Revert "Revert "cmd/compile: adjust RISCV64 rewrite rules to use typed aux fields""

This reverts commit 98c32670fd454939794504225dca1d4ec55045d5.

Rolling-forward with trivial format-string fix

cmd/compile: adjust RISCV64 rewrite rules to use typed aux fields

Also add a typed version of mergeSym to rewrite.go to assist with a few
rules that used mergeSym in the untyped-form.

Remove a few extra int32 overflow checks that no longer make sense, as
adding two int8s or int16s should never overflow an int32.

Passes toolstash-check -all.

Original review: https://go-review.googlesource.com/c/go/+/228882

Change-Id: Ib63db4ee1687446f0f3d9f11575a40dd85cbce55
Reviewed-on: https://go-review.googlesource.com/c/go/+/229126
Run-TryBot: Than McIntosh <thanm@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Than McIntosh <thanm@google.com>
This commit is contained in:
David Finkel 2020-04-20 18:15:50 -04:00 committed by Than McIntosh
parent f38fad4aaa
commit 1cca496c5e
4 changed files with 1040 additions and 1005 deletions

View file

@ -112,6 +112,7 @@ var knownFormats = map[string]string{
"cmd/compile/internal/ssa.Location %s": "", "cmd/compile/internal/ssa.Location %s": "",
"cmd/compile/internal/ssa.Op %s": "", "cmd/compile/internal/ssa.Op %s": "",
"cmd/compile/internal/ssa.Op %v": "", "cmd/compile/internal/ssa.Op %v": "",
"cmd/compile/internal/ssa.Sym %v": "",
"cmd/compile/internal/ssa.ValAndOff %s": "", "cmd/compile/internal/ssa.ValAndOff %s": "",
"cmd/compile/internal/ssa.domain %v": "", "cmd/compile/internal/ssa.domain %v": "",
"cmd/compile/internal/ssa.posetNode %v": "", "cmd/compile/internal/ssa.posetNode %v": "",

View file

@ -17,86 +17,86 @@
// * Avoid using Neq32 for writeBarrier.enabled checks. // * Avoid using Neq32 for writeBarrier.enabled checks.
// Lowering arithmetic // Lowering arithmetic
(Add64 ...) -> (ADD ...) (Add64 ...) => (ADD ...)
(AddPtr ...) -> (ADD ...) (AddPtr ...) => (ADD ...)
(Add32 ...) -> (ADD ...) (Add32 ...) => (ADD ...)
(Add16 ...) -> (ADD ...) (Add16 ...) => (ADD ...)
(Add8 ...) -> (ADD ...) (Add8 ...) => (ADD ...)
(Add32F ...) -> (FADDS ...) (Add32F ...) => (FADDS ...)
(Add64F ...) -> (FADDD ...) (Add64F ...) => (FADDD ...)
(Sub64 ...) -> (SUB ...) (Sub64 ...) => (SUB ...)
(SubPtr ...) -> (SUB ...) (SubPtr ...) => (SUB ...)
(Sub32 ...) -> (SUB ...) (Sub32 ...) => (SUB ...)
(Sub16 ...) -> (SUB ...) (Sub16 ...) => (SUB ...)
(Sub8 ...) -> (SUB ...) (Sub8 ...) => (SUB ...)
(Sub32F ...) -> (FSUBS ...) (Sub32F ...) => (FSUBS ...)
(Sub64F ...) -> (FSUBD ...) (Sub64F ...) => (FSUBD ...)
(Mul64 ...) -> (MUL ...) (Mul64 ...) => (MUL ...)
(Mul32 ...) -> (MULW ...) (Mul32 ...) => (MULW ...)
(Mul16 x y) -> (MULW (SignExt16to32 x) (SignExt16to32 y)) (Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
(Mul8 x y) -> (MULW (SignExt8to32 x) (SignExt8to32 y)) (Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
(Mul32F ...) -> (FMULS ...) (Mul32F ...) => (FMULS ...)
(Mul64F ...) -> (FMULD ...) (Mul64F ...) => (FMULD ...)
(Div32F ...) -> (FDIVS ...) (Div32F ...) => (FDIVS ...)
(Div64F ...) -> (FDIVD ...) (Div64F ...) => (FDIVD ...)
(Div64 ...) -> (DIV ...) (Div64 x y [false]) => (DIV x y)
(Div64u ...) -> (DIVU ...) (Div64u ...) => (DIVU ...)
(Div32 ...) -> (DIVW ...) (Div32 x y [false]) => (DIVW x y)
(Div32u ...) -> (DIVUW ...) (Div32u ...) => (DIVUW ...)
(Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y)) (Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
(Div16u x y) -> (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y)) (Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
(Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y)) (Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
(Div8u x y) -> (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y)) (Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
(Hmul64 ...) -> (MULH ...) (Hmul64 ...) => (MULH ...)
(Hmul64u ...) -> (MULHU ...) (Hmul64u ...) => (MULHU ...)
(Hmul32 x y) -> (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y))) (Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
(Hmul32u x y) -> (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y))) (Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
// (x + y) / 2 -> (x / 2) + (y / 2) + (x & y & 1) // (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
(Avg64u <t> x y) -> (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y))) (Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
(Mod64 ...) -> (REM ...) (Mod64 x y [false]) => (REM x y)
(Mod64u ...) -> (REMU ...) (Mod64u ...) => (REMU ...)
(Mod32 ...) -> (REMW ...) (Mod32 x y [false]) => (REMW x y)
(Mod32u ...) -> (REMUW ...) (Mod32u ...) => (REMUW ...)
(Mod16 x y) -> (REMW (SignExt16to32 x) (SignExt16to32 y)) (Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) -> (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y)) (Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
(Mod8 x y) -> (REMW (SignExt8to32 x) (SignExt8to32 y)) (Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
(Mod8u x y) -> (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y)) (Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
(And64 ...) -> (AND ...) (And64 ...) => (AND ...)
(And32 ...) -> (AND ...) (And32 ...) => (AND ...)
(And16 ...) -> (AND ...) (And16 ...) => (AND ...)
(And8 ...) -> (AND ...) (And8 ...) => (AND ...)
(Or64 ...) -> (OR ...) (Or64 ...) => (OR ...)
(Or32 ...) -> (OR ...) (Or32 ...) => (OR ...)
(Or16 ...) -> (OR ...) (Or16 ...) => (OR ...)
(Or8 ...) -> (OR ...) (Or8 ...) => (OR ...)
(Xor64 ...) -> (XOR ...) (Xor64 ...) => (XOR ...)
(Xor32 ...) -> (XOR ...) (Xor32 ...) => (XOR ...)
(Xor16 ...) -> (XOR ...) (Xor16 ...) => (XOR ...)
(Xor8 ...) -> (XOR ...) (Xor8 ...) => (XOR ...)
(Neg64 ...) -> (NEG ...) (Neg64 ...) => (NEG ...)
(Neg32 ...) -> (NEG ...) (Neg32 ...) => (NEG ...)
(Neg16 ...) -> (NEG ...) (Neg16 ...) => (NEG ...)
(Neg8 ...) -> (NEG ...) (Neg8 ...) => (NEG ...)
(Neg32F ...) -> (FNEGS ...) (Neg32F ...) => (FNEGS ...)
(Neg64F ...) -> (FNEGD ...) (Neg64F ...) => (FNEGD ...)
(Com64 ...) -> (NOT ...) (Com64 ...) => (NOT ...)
(Com32 ...) -> (NOT ...) (Com32 ...) => (NOT ...)
(Com16 ...) -> (NOT ...) (Com16 ...) => (NOT ...)
(Com8 ...) -> (NOT ...) (Com8 ...) => (NOT ...)
(Sqrt ...) -> (FSQRTD ...) (Sqrt ...) => (FSQRTD ...)
// Zero and sign extension // Zero and sign extension
// Shift left until the bits we want are at the top of the register. // Shift left until the bits we want are at the top of the register.
@ -104,37 +104,37 @@
// We always extend to 64 bits; there's no reason not to, // We always extend to 64 bits; there's no reason not to,
// and optimization rules can then collapse some extensions. // and optimization rules can then collapse some extensions.
(SignExt8to16 <t> x) -> (SRAI [56] (SLLI <t> [56] x)) (SignExt8to16 <t> x) => (SRAI [56] (SLLI <t> [56] x))
(SignExt8to32 <t> x) -> (SRAI [56] (SLLI <t> [56] x)) (SignExt8to32 <t> x) => (SRAI [56] (SLLI <t> [56] x))
(SignExt8to64 <t> x) -> (SRAI [56] (SLLI <t> [56] x)) (SignExt8to64 <t> x) => (SRAI [56] (SLLI <t> [56] x))
(SignExt16to32 <t> x) -> (SRAI [48] (SLLI <t> [48] x)) (SignExt16to32 <t> x) => (SRAI [48] (SLLI <t> [48] x))
(SignExt16to64 <t> x) -> (SRAI [48] (SLLI <t> [48] x)) (SignExt16to64 <t> x) => (SRAI [48] (SLLI <t> [48] x))
(SignExt32to64 <t> x) -> (ADDIW [0] x) (SignExt32to64 <t> x) => (ADDIW [0] x)
(ZeroExt8to16 <t> x) -> (SRLI [56] (SLLI <t> [56] x)) (ZeroExt8to16 <t> x) => (SRLI [56] (SLLI <t> [56] x))
(ZeroExt8to32 <t> x) -> (SRLI [56] (SLLI <t> [56] x)) (ZeroExt8to32 <t> x) => (SRLI [56] (SLLI <t> [56] x))
(ZeroExt8to64 <t> x) -> (SRLI [56] (SLLI <t> [56] x)) (ZeroExt8to64 <t> x) => (SRLI [56] (SLLI <t> [56] x))
(ZeroExt16to32 <t> x) -> (SRLI [48] (SLLI <t> [48] x)) (ZeroExt16to32 <t> x) => (SRLI [48] (SLLI <t> [48] x))
(ZeroExt16to64 <t> x) -> (SRLI [48] (SLLI <t> [48] x)) (ZeroExt16to64 <t> x) => (SRLI [48] (SLLI <t> [48] x))
(ZeroExt32to64 <t> x) -> (SRLI [32] (SLLI <t> [32] x)) (ZeroExt32to64 <t> x) => (SRLI [32] (SLLI <t> [32] x))
(Cvt32to32F ...) -> (FCVTSW ...) (Cvt32to32F ...) => (FCVTSW ...)
(Cvt32to64F ...) -> (FCVTDW ...) (Cvt32to64F ...) => (FCVTDW ...)
(Cvt64to32F ...) -> (FCVTSL ...) (Cvt64to32F ...) => (FCVTSL ...)
(Cvt64to64F ...) -> (FCVTDL ...) (Cvt64to64F ...) => (FCVTDL ...)
(Cvt32Fto32 ...) -> (FCVTWS ...) (Cvt32Fto32 ...) => (FCVTWS ...)
(Cvt32Fto64 ...) -> (FCVTLS ...) (Cvt32Fto64 ...) => (FCVTLS ...)
(Cvt64Fto32 ...) -> (FCVTWD ...) (Cvt64Fto32 ...) => (FCVTWD ...)
(Cvt64Fto64 ...) -> (FCVTLD ...) (Cvt64Fto64 ...) => (FCVTLD ...)
(Cvt32Fto64F ...) -> (FCVTDS ...) (Cvt32Fto64F ...) => (FCVTDS ...)
(Cvt64Fto32F ...) -> (FCVTSD ...) (Cvt64Fto32F ...) => (FCVTSD ...)
(CvtBoolToUint8 ...) -> (Copy ...) (CvtBoolToUint8 ...) => (Copy ...)
(Round32F ...) -> (Copy ...) (Round32F ...) => (Copy ...)
(Round64F ...) -> (Copy ...) (Round64F ...) => (Copy ...)
// From genericOps.go: // From genericOps.go:
// "0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0" // "0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0"
@ -143,16 +143,16 @@
// For positive x, bit 63 of x-1 is always 0, so the result is -1. // For positive x, bit 63 of x-1 is always 0, so the result is -1.
// For zero x, bit 63 of x-1 is 1, so the result is 0. // For zero x, bit 63 of x-1 is 1, so the result is 0.
// //
(Slicemask <t> x) -> (NOT (SRAI <t> [63] (ADDI <t> [-1] x))) (Slicemask <t> x) => (NOT (SRAI <t> [63] (ADDI <t> [-1] x)))
// Truncations // Truncations
// We ignore the unused high parts of registers, so truncates are just copies. // We ignore the unused high parts of registers, so truncates are just copies.
(Trunc16to8 ...) -> (Copy ...) (Trunc16to8 ...) => (Copy ...)
(Trunc32to8 ...) -> (Copy ...) (Trunc32to8 ...) => (Copy ...)
(Trunc32to16 ...) -> (Copy ...) (Trunc32to16 ...) => (Copy ...)
(Trunc64to8 ...) -> (Copy ...) (Trunc64to8 ...) => (Copy ...)
(Trunc64to16 ...) -> (Copy ...) (Trunc64to16 ...) => (Copy ...)
(Trunc64to32 ...) -> (Copy ...) (Trunc64to32 ...) => (Copy ...)
// Shifts // Shifts
@ -166,41 +166,41 @@
// If y < 64, this is the value we want. Otherwise, we want zero. // If y < 64, this is the value we want. Otherwise, we want zero.
// //
// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise. // So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
(Lsh8x8 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) (Lsh8x8 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
(Lsh8x16 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) (Lsh8x16 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
(Lsh8x32 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) (Lsh8x32 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
(Lsh8x64 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y))) (Lsh8x64 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
(Lsh16x8 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) (Lsh16x8 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
(Lsh16x16 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) (Lsh16x16 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
(Lsh16x32 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) (Lsh16x32 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
(Lsh16x64 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y))) (Lsh16x64 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
(Lsh32x8 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) (Lsh32x8 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
(Lsh32x16 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) (Lsh32x16 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
(Lsh32x32 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) (Lsh32x32 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
(Lsh32x64 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y))) (Lsh32x64 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
(Lsh64x8 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) (Lsh64x8 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
(Lsh64x16 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) (Lsh64x16 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
(Lsh64x32 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) (Lsh64x32 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
(Lsh64x64 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y))) (Lsh64x64 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
// SRL only considers the bottom 6 bits of y. If y > 64, the result should // SRL only considers the bottom 6 bits of y. If y > 64, the result should
// always be 0. See Lsh above for a detailed description. // always be 0. See Lsh above for a detailed description.
(Rsh8Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) (Rsh8Ux8 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
(Rsh8Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) (Rsh8Ux16 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
(Rsh8Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) (Rsh8Ux32 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
(Rsh8Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y))) (Rsh8Ux64 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
(Rsh16Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) (Rsh16Ux8 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
(Rsh16Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) (Rsh16Ux16 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
(Rsh16Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) (Rsh16Ux32 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
(Rsh16Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y))) (Rsh16Ux64 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
(Rsh32Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) (Rsh32Ux8 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
(Rsh32Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) (Rsh32Ux16 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
(Rsh32Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) (Rsh32Ux32 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
(Rsh32Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y))) (Rsh32Ux64 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
(Rsh64Ux8 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y)))) (Rsh64Ux8 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
(Rsh64Ux16 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y)))) (Rsh64Ux16 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
(Rsh64Ux32 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y)))) (Rsh64Ux32 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
(Rsh64Ux64 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y))) (Rsh64Ux64 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
// SRA only considers the bottom 6 bits of y. If y > 64, the result should // SRA only considers the bottom 6 bits of y. If y > 64, the result should
// be either 0 or -1 based on the sign bit. // be either 0 or -1 based on the sign bit.
@ -212,226 +212,226 @@
// //
// We don't need to sign-extend the OR result, as it will be at minimum 8 bits, // We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
// more than the 6 bits SRA cares about. // more than the 6 bits SRA cares about.
(Rsh8x8 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y))))) (Rsh8x8 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
(Rsh8x16 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y))))) (Rsh8x16 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
(Rsh8x32 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y))))) (Rsh8x32 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
(Rsh8x64 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y)))) (Rsh8x64 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
(Rsh16x8 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y))))) (Rsh16x8 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
(Rsh16x16 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y))))) (Rsh16x16 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
(Rsh16x32 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y))))) (Rsh16x32 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
(Rsh16x64 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y)))) (Rsh16x64 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
(Rsh32x8 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y))))) (Rsh32x8 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
(Rsh32x16 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y))))) (Rsh32x16 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
(Rsh32x32 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y))))) (Rsh32x32 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
(Rsh32x64 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y)))) (Rsh32x64 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
(Rsh64x8 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y))))) (Rsh64x8 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
(Rsh64x16 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y))))) (Rsh64x16 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
(Rsh64x32 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y))))) (Rsh64x32 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
(Rsh64x64 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y)))) (Rsh64x64 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
// rotates // rotates
(RotateLeft8 <t> x (MOVBconst [c])) -> (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7]))) (RotateLeft8 <t> x (MOVBconst [c])) => (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7])))
(RotateLeft16 <t> x (MOVHconst [c])) -> (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15]))) (RotateLeft16 <t> x (MOVHconst [c])) => (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15])))
(RotateLeft32 <t> x (MOVWconst [c])) -> (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31]))) (RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31])))
(RotateLeft64 <t> x (MOVDconst [c])) -> (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63]))) (RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
(Less64 ...) -> (SLT ...) (Less64 ...) => (SLT ...)
(Less32 x y) -> (SLT (SignExt32to64 x) (SignExt32to64 y)) (Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
(Less16 x y) -> (SLT (SignExt16to64 x) (SignExt16to64 y)) (Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
(Less8 x y) -> (SLT (SignExt8to64 x) (SignExt8to64 y)) (Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
(Less64U ...) -> (SLTU ...) (Less64U ...) => (SLTU ...)
(Less32U x y) -> (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y)) (Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
(Less16U x y) -> (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y)) (Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
(Less8U x y) -> (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y)) (Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
(Less64F ...) -> (FLTD ...) (Less64F ...) => (FLTD ...)
(Less32F ...) -> (FLTS ...) (Less32F ...) => (FLTS ...)
// Convert x <= y to !(y > x). // Convert x <= y to !(y > x).
(Leq64 x y) -> (Not (Less64 y x)) (Leq64 x y) => (Not (Less64 y x))
(Leq32 x y) -> (Not (Less32 y x)) (Leq32 x y) => (Not (Less32 y x))
(Leq16 x y) -> (Not (Less16 y x)) (Leq16 x y) => (Not (Less16 y x))
(Leq8 x y) -> (Not (Less8 y x)) (Leq8 x y) => (Not (Less8 y x))
(Leq64U x y) -> (Not (Less64U y x)) (Leq64U x y) => (Not (Less64U y x))
(Leq32U x y) -> (Not (Less32U y x)) (Leq32U x y) => (Not (Less32U y x))
(Leq16U x y) -> (Not (Less16U y x)) (Leq16U x y) => (Not (Less16U y x))
(Leq8U x y) -> (Not (Less8U y x)) (Leq8U x y) => (Not (Less8U y x))
(Leq64F ...) -> (FLED ...) (Leq64F ...) => (FLED ...)
(Leq32F ...) -> (FLES ...) (Leq32F ...) => (FLES ...)
(EqPtr x y) -> (SEQZ (SUB <x.Type> x y)) (EqPtr x y) => (SEQZ (SUB <x.Type> x y))
(Eq64 x y) -> (SEQZ (SUB <x.Type> x y)) (Eq64 x y) => (SEQZ (SUB <x.Type> x y))
(Eq32 x y) -> (SEQZ (SUBW <x.Type> x y)) (Eq32 x y) => (SEQZ (SUBW <x.Type> x y))
(Eq16 x y) -> (SEQZ (ZeroExt16to64 (SUB <x.Type> x y))) (Eq16 x y) => (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
(Eq8 x y) -> (SEQZ (ZeroExt8to64 (SUB <x.Type> x y))) (Eq8 x y) => (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
(Eq64F ...) -> (FEQD ...) (Eq64F ...) => (FEQD ...)
(Eq32F ...) -> (FEQS ...) (Eq32F ...) => (FEQS ...)
(NeqPtr x y) -> (SNEZ (SUB <x.Type> x y)) (NeqPtr x y) => (SNEZ (SUB <x.Type> x y))
(Neq64 x y) -> (SNEZ (SUB <x.Type> x y)) (Neq64 x y) => (SNEZ (SUB <x.Type> x y))
(Neq32 x y) -> (SNEZ (SUBW <x.Type> x y)) (Neq32 x y) => (SNEZ (SUBW <x.Type> x y))
(Neq16 x y) -> (SNEZ (ZeroExt16to64 (SUB <x.Type> x y))) (Neq16 x y) => (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
(Neq8 x y) -> (SNEZ (ZeroExt8to64 (SUB <x.Type> x y))) (Neq8 x y) => (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
(Neq64F ...) -> (FNED ...) (Neq64F ...) => (FNED ...)
(Neq32F ...) -> (FNES ...) (Neq32F ...) => (FNES ...)
// Loads // Loads
(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem) (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
(Load <t> ptr mem) && ( is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem) (Load <t> ptr mem) && ( is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
(Load <t> ptr mem) && ( is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem) (Load <t> ptr mem) && ( is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem) (Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem) (Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem) (Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem) (Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem) (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVWload ptr mem) (Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem) (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
// Stores // Stores
(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVWstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVWstore ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis // We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
// knows what variables are being read/written by the ops. // knows what variables are being read/written by the ops.
(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOVWUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> (MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOVBUload [off1+off2] {sym} base mem) (MOVBUload [off1+int32(off2)] {sym} base mem)
(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> (MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOVBload [off1+off2] {sym} base mem) (MOVBload [off1+int32(off2)] {sym} base mem)
(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> (MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOVHUload [off1+off2] {sym} base mem) (MOVHUload [off1+int32(off2)] {sym} base mem)
(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> (MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOVHload [off1+off2] {sym} base mem) (MOVHload [off1+int32(off2)] {sym} base mem)
(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> (MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOVWUload [off1+off2] {sym} base mem) (MOVWUload [off1+int32(off2)] {sym} base mem)
(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> (MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOVWload [off1+off2] {sym} base mem) (MOVWload [off1+int32(off2)] {sym} base mem)
(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> (MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOVDload [off1+off2] {sym} base mem) (MOVDload [off1+int32(off2)] {sym} base mem)
(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
(MOVBstore [off1+off2] {sym} base val mem) (MOVBstore [off1+int32(off2)] {sym} base val mem)
(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> (MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
(MOVHstore [off1+off2] {sym} base val mem) (MOVHstore [off1+int32(off2)] {sym} base val mem)
(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
(MOVWstore [off1+off2] {sym} base val mem) (MOVWstore [off1+int32(off2)] {sym} base val mem)
(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
(MOVDstore [off1+off2] {sym} base val mem) (MOVDstore [off1+int32(off2)] {sym} base val mem)
(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem) (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem) (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem) (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDstorezero [off1+off2] {sym} ptr mem) (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis // Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
// with OffPtr -> ADDI. // with OffPtr -> ADDI.
(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+d) -> (MOVaddr [c+d] {s} x) (ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
// Zeroing // Zeroing
// TODO: more optimized zeroing, including attempting to use aligned accesses. // TODO: more optimized zeroing, including attempting to use aligned accesses.
(Zero [0] _ mem) -> mem (Zero [0] _ mem) => mem
(Zero [1] ptr mem) -> (MOVBstore ptr (MOVBconst) mem) (Zero [1] ptr mem) => (MOVBstore ptr (MOVBconst) mem)
(Zero [2] ptr mem) -> (MOVHstore ptr (MOVHconst) mem) (Zero [2] ptr mem) => (MOVHstore ptr (MOVHconst) mem)
(Zero [4] ptr mem) -> (MOVWstore ptr (MOVWconst) mem) (Zero [4] ptr mem) => (MOVWstore ptr (MOVWconst) mem)
(Zero [8] ptr mem) -> (MOVDstore ptr (MOVDconst) mem) (Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst) mem)
// Generic zeroing uses a loop // Generic zeroing uses a loop
(Zero [s] {t} ptr mem) -> (Zero [s] {t} ptr mem) =>
(LoweredZero [t.(*types.Type).Alignment()] (LoweredZero [t.Alignment()]
ptr ptr
(ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.(*types.Type).Alignment(), config)])) (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
mem) mem)
(Convert ...) -> (MOVconvert ...) (Convert ...) => (MOVconvert ...)
// Checks // Checks
(IsNonNil p) -> (NeqPtr (MOVDconst) p) (IsNonNil p) => (NeqPtr (MOVDconst) p)
(IsInBounds ...) -> (Less64U ...) (IsInBounds ...) => (Less64U ...)
(IsSliceInBounds ...) -> (Leq64U ...) (IsSliceInBounds ...) => (Leq64U ...)
// Trivial lowering // Trivial lowering
(NilCheck ...) -> (LoweredNilCheck ...) (NilCheck ...) => (LoweredNilCheck ...)
(GetClosurePtr ...) -> (LoweredGetClosurePtr ...) (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
(GetCallerSP ...) -> (LoweredGetCallerSP ...) (GetCallerSP ...) => (LoweredGetCallerSP ...)
(GetCallerPC ...) -> (LoweredGetCallerPC ...) (GetCallerPC ...) => (LoweredGetCallerPC ...)
// Write barrier. // Write barrier.
(WB ...) -> (LoweredWB ...) (WB ...) => (LoweredWB ...)
(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem) (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem) (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem) (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
// Moves // Moves
// TODO: more optimized moves, including attempting to use aligned accesses. // TODO: more optimized moves, including attempting to use aligned accesses.
(Move [0] _ _ mem) -> mem (Move [0] _ _ mem) => mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem) (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
(Move [2] dst src mem) -> (MOVHstore dst (MOVHload src mem) mem) (Move [2] dst src mem) => (MOVHstore dst (MOVHload src mem) mem)
(Move [4] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem) (Move [4] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
(Move [8] dst src mem) -> (MOVDstore dst (MOVDload src mem) mem) (Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
// Generic move uses a loop // Generic move uses a loop
(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) -> (Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
(LoweredMove [t.(*types.Type).Alignment()] (LoweredMove [t.Alignment()]
dst dst
src src
(ADDI <src.Type> [s-moveSize(t.(*types.Type).Alignment(), config)] src) (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
mem) mem)
// Boolean ops; 0=false, 1=true // Boolean ops; 0=false, 1=true
(AndB ...) -> (AND ...) (AndB ...) => (AND ...)
(OrB ...) -> (OR ...) (OrB ...) => (OR ...)
(EqB x y) -> (XORI [1] (XOR <typ.Bool> x y)) (EqB x y) => (XORI [1] (XOR <typ.Bool> x y))
(NeqB ...) -> (XOR ...) (NeqB ...) => (XOR ...)
(Not x) -> (XORI [1] x) (Not x) => (XORI [1] x)
// Lowering pointer arithmetic // Lowering pointer arithmetic
// TODO: Special handling for SP offsets, like ARM // TODO: Special handling for SP offsets, like ARM
(OffPtr [off] ptr:(SP)) -> (MOVaddr [off] ptr) (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
(OffPtr [off] ptr) && is32Bit(off) -> (ADDI [off] ptr) (OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
(OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr) (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
(Const8 ...) -> (MOVBconst ...) (Const8 ...) => (MOVBconst ...)
(Const16 ...) -> (MOVHconst ...) (Const16 ...) => (MOVHconst ...)
(Const32 ...) -> (MOVWconst ...) (Const32 ...) => (MOVWconst ...)
(Const64 ...) -> (MOVDconst ...) (Const64 ...) => (MOVDconst ...)
(Const32F [val]) -> (FMVSX (MOVWconst [int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))])) (Const32F [val]) => (FMVSX (MOVWconst [int32(math.Float32bits(val))]))
(Const64F [val]) -> (FMVDX (MOVDconst [val])) (Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
(ConstNil) -> (MOVDconst [0]) (ConstNil) => (MOVDconst [0])
(ConstBool ...) -> (MOVBconst ...) (ConstBool [val]) => (MOVBconst [int8(b2i(val))])
// Convert 64 bit immediate to two 32 bit immediates, combine with add and shift. // Convert 64 bit immediate to two 32 bit immediates, combine with add and shift.
// The lower 32 bit immediate will be treated as signed, // The lower 32 bit immediate will be treated as signed,
@ -439,11 +439,11 @@
// We don't have to worry about overflow from the increment, // We don't have to worry about overflow from the increment,
// because if the top half is all 1s, and int32(c) is negative, // because if the top half is all 1s, and int32(c) is negative,
// then the overall constant fits in an int32. // then the overall constant fits in an int32.
(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) < 0 -> (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))])) (MOVDconst <t> [c]) && !is32Bit(c) && int32(c) < 0 => (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) >= 0 -> (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))])) (MOVDconst <t> [c]) && !is32Bit(c) && int32(c) >= 0 => (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
(Addr {sym} base) => (MOVaddr {sym} [0] base) (Addr {sym} base) => (MOVaddr {sym} [0] base)
(LocalAddr {sym} base _) -> (MOVaddr {sym} base) (LocalAddr {sym} base _) => (MOVaddr {sym} base)
// Conditional branches // Conditional branches
// //
@ -453,103 +453,103 @@
// so we could generate more efficient code by computing the condition in the // so we could generate more efficient code by computing the condition in the
// branch itself. This should be revisited now that the compiler has support // branch itself. This should be revisited now that the compiler has support
// for two control values (https://golang.org/cl/196557). // for two control values (https://golang.org/cl/196557).
(If cond yes no) -> (BNE cond yes no) (If cond yes no) => (BNE cond yes no)
// Calls // Calls
(StaticCall ...) -> (CALLstatic ...) (StaticCall ...) => (CALLstatic ...)
(ClosureCall ...) -> (CALLclosure ...) (ClosureCall ...) => (CALLclosure ...)
(InterCall ...) -> (CALLinter ...) (InterCall ...) => (CALLinter ...)
// Atomic Intrinsics // Atomic Intrinsics
(AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...) (AtomicLoad8 ...) => (LoweredAtomicLoad8 ...)
(AtomicLoad32 ...) -> (LoweredAtomicLoad32 ...) (AtomicLoad32 ...) => (LoweredAtomicLoad32 ...)
(AtomicLoad64 ...) -> (LoweredAtomicLoad64 ...) (AtomicLoad64 ...) => (LoweredAtomicLoad64 ...)
(AtomicLoadPtr ...) -> (LoweredAtomicLoad64 ...) (AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
(AtomicStore8 ...) -> (LoweredAtomicStore8 ...) (AtomicStore8 ...) => (LoweredAtomicStore8 ...)
(AtomicStore32 ...) -> (LoweredAtomicStore32 ...) (AtomicStore32 ...) => (LoweredAtomicStore32 ...)
(AtomicStore64 ...) -> (LoweredAtomicStore64 ...) (AtomicStore64 ...) => (LoweredAtomicStore64 ...)
(AtomicStorePtrNoWB ...) -> (LoweredAtomicStore64 ...) (AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
(AtomicAdd32 ...) -> (LoweredAtomicAdd32 ...) (AtomicAdd32 ...) => (LoweredAtomicAdd32 ...)
(AtomicAdd64 ...) -> (LoweredAtomicAdd64 ...) (AtomicAdd64 ...) => (LoweredAtomicAdd64 ...)
(AtomicCompareAndSwap32 ...) -> (LoweredAtomicCas32 ...) (AtomicCompareAndSwap32 ...) => (LoweredAtomicCas32 ...)
(AtomicCompareAndSwap64 ...) -> (LoweredAtomicCas64 ...) (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
(AtomicExchange32 ...) -> (LoweredAtomicExchange32 ...) (AtomicExchange32 ...) => (LoweredAtomicExchange32 ...)
(AtomicExchange64 ...) -> (LoweredAtomicExchange64 ...) (AtomicExchange64 ...) => (LoweredAtomicExchange64 ...)
// Optimizations // Optimizations
// Absorb SNEZ into branch. // Absorb SNEZ into branch.
(BNE (SNEZ x) yes no) -> (BNE x yes no) (BNE (SNEZ x) yes no) => (BNE x yes no)
// Store zero // Store zero
(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem) (MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem) (MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem) (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem) (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
// Fold constant into immediate instructions where possible. // Fold constant into immediate instructions where possible.
(ADD (MOVBconst [val]) x) && is32Bit(val) -> (ADDI [val] x) (ADD (MOVBconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVHconst [val]) x) && is32Bit(val) -> (ADDI [val] x) (ADD (MOVHconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVWconst [val]) x) && is32Bit(val) -> (ADDI [val] x) (ADD (MOVWconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVDconst [val]) x) && is32Bit(val) -> (ADDI [val] x) (ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x)
(AND (MOVBconst [val]) x) && is32Bit(val) -> (ANDI [val] x) (AND (MOVBconst [val]) x) => (ANDI [int64(val)] x)
(AND (MOVHconst [val]) x) && is32Bit(val) -> (ANDI [val] x) (AND (MOVHconst [val]) x) => (ANDI [int64(val)] x)
(AND (MOVWconst [val]) x) && is32Bit(val) -> (ANDI [val] x) (AND (MOVWconst [val]) x) => (ANDI [int64(val)] x)
(AND (MOVDconst [val]) x) && is32Bit(val) -> (ANDI [val] x) (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
(OR (MOVBconst [val]) x) && is32Bit(val) -> (ORI [val] x) (OR (MOVBconst [val]) x) => (ORI [int64(val)] x)
(OR (MOVHconst [val]) x) && is32Bit(val) -> (ORI [val] x) (OR (MOVHconst [val]) x) => (ORI [int64(val)] x)
(OR (MOVWconst [val]) x) && is32Bit(val) -> (ORI [val] x) (OR (MOVWconst [val]) x) => (ORI [int64(val)] x)
(OR (MOVDconst [val]) x) && is32Bit(val) -> (ORI [val] x) (OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
(XOR (MOVBconst [val]) x) && is32Bit(val) -> (XORI [val] x) (XOR (MOVBconst [val]) x) => (XORI [int64(val)] x)
(XOR (MOVHconst [val]) x) && is32Bit(val) -> (XORI [val] x) (XOR (MOVHconst [val]) x) => (XORI [int64(val)] x)
(XOR (MOVWconst [val]) x) && is32Bit(val) -> (XORI [val] x) (XOR (MOVWconst [val]) x) => (XORI [int64(val)] x)
(XOR (MOVDconst [val]) x) && is32Bit(val) -> (XORI [val] x) (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
(SLL x (MOVBconst [val])) -> (SLLI [val&63] x) (SLL x (MOVBconst [val])) => (SLLI [int64(val&63)] x)
(SLL x (MOVHconst [val])) -> (SLLI [val&63] x) (SLL x (MOVHconst [val])) => (SLLI [int64(val&63)] x)
(SLL x (MOVWconst [val])) -> (SLLI [val&63] x) (SLL x (MOVWconst [val])) => (SLLI [int64(val&63)] x)
(SLL x (MOVDconst [val])) -> (SLLI [val&63] x) (SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
(SRL x (MOVBconst [val])) -> (SRLI [val&63] x) (SRL x (MOVBconst [val])) => (SRLI [int64(val&63)] x)
(SRL x (MOVHconst [val])) -> (SRLI [val&63] x) (SRL x (MOVHconst [val])) => (SRLI [int64(val&63)] x)
(SRL x (MOVWconst [val])) -> (SRLI [val&63] x) (SRL x (MOVWconst [val])) => (SRLI [int64(val&63)] x)
(SRL x (MOVDconst [val])) -> (SRLI [val&63] x) (SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
(SRA x (MOVBconst [val])) -> (SRAI [val&63] x) (SRA x (MOVBconst [val])) => (SRAI [int64(val&63)] x)
(SRA x (MOVHconst [val])) -> (SRAI [val&63] x) (SRA x (MOVHconst [val])) => (SRAI [int64(val&63)] x)
(SRA x (MOVWconst [val])) -> (SRAI [val&63] x) (SRA x (MOVWconst [val])) => (SRAI [int64(val&63)] x)
(SRA x (MOVDconst [val])) -> (SRAI [val&63] x) (SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
// Convert subtraction of a const into ADDI with negative immediate, where possible. // Convert subtraction of a const into ADDI with negative immediate, where possible.
(SUB x (MOVBconst [val])) && is32Bit(-val) -> (ADDI [-val] x) (SUB x (MOVBconst [val])) => (ADDI [-int64(val)] x)
(SUB x (MOVHconst [val])) && is32Bit(-val) -> (ADDI [-val] x) (SUB x (MOVHconst [val])) => (ADDI [-int64(val)] x)
(SUB x (MOVWconst [val])) && is32Bit(-val) -> (ADDI [-val] x) (SUB x (MOVWconst [val])) && is32Bit(-int64(val)) => (ADDI [-int64(val)] x)
(SUB x (MOVDconst [val])) && is32Bit(-val) -> (ADDI [-val] x) (SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
// Subtraction of zero. // Subtraction of zero.
(SUB x (MOVBconst [0])) -> x (SUB x (MOVBconst [0])) => x
(SUB x (MOVHconst [0])) -> x (SUB x (MOVHconst [0])) => x
(SUB x (MOVWconst [0])) -> x (SUB x (MOVWconst [0])) => x
(SUB x (MOVDconst [0])) -> x (SUB x (MOVDconst [0])) => x
// Subtraction of zero with sign extension. // Subtraction of zero with sign extension.
(SUBW x (MOVWconst [0])) -> (ADDIW [0] x) (SUBW x (MOVWconst [0])) => (ADDIW [0] x)
// Subtraction from zero. // Subtraction from zero.
(SUB (MOVBconst [0]) x) -> (NEG x) (SUB (MOVBconst [0]) x) => (NEG x)
(SUB (MOVHconst [0]) x) -> (NEG x) (SUB (MOVHconst [0]) x) => (NEG x)
(SUB (MOVWconst [0]) x) -> (NEG x) (SUB (MOVWconst [0]) x) => (NEG x)
(SUB (MOVDconst [0]) x) -> (NEG x) (SUB (MOVDconst [0]) x) => (NEG x)
// Subtraction from zero with sign extension. // Subtraction from zero with sign extension.
(SUBW (MOVDconst [0]) x) -> (NEGW x) (SUBW (MOVDconst [0]) x) => (NEGW x)
// Addition of zero. // Addition of zero.
(ADDI [0] x) -> x (ADDI [0] x) => x

View file

@ -210,6 +210,15 @@ func mergeSym(x, y interface{}) interface{} {
func canMergeSym(x, y interface{}) bool { func canMergeSym(x, y interface{}) bool {
return x == nil || y == nil return x == nil || y == nil
} }
func mergeSymTyped(x, y Sym) Sym {
if x == nil {
return y
}
if y == nil {
return x
}
panic(fmt.Sprintf("mergeSym with two non-nil syms %v %v", x, y))
}
// canMergeLoadClobber reports whether the load can be merged into target without // canMergeLoadClobber reports whether the load can be merged into target without
// invalidating the schedule. // invalidating the schedule.

File diff suppressed because it is too large Load diff