cmd/compile: use a single const MOV operand for riscv64

Most platforms only use a single MOV const operand - remove the MOV{B,H,W}const
operands from riscv64 and consistently use MOVDconst instead. The implementation
of all four is the same and there is no benefit gained from having multiple const
operands (in fact it requires a lot more rewrite rules).

Change-Id: I0ba7d7554e371a1de762ef5f3745e9c0c30d41ac
Reviewed-on: https://go-review.googlesource.com/c/go/+/302610
Trust: Joel Sing <joel@sing.id.au>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Reviewed-by: Michael Munday <mike.munday@lowrisc.org>
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
This commit is contained in:
Joel Sing 2021-03-18 03:37:58 +11:00
parent f5e6d3e879
commit 6517844129
5 changed files with 180 additions and 711 deletions

View file

@ -301,7 +301,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpRISCV64MOVBconst, ssa.OpRISCV64MOVHconst, ssa.OpRISCV64MOVWconst, ssa.OpRISCV64MOVDconst:
case ssa.OpRISCV64MOVDconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt

View file

@ -222,9 +222,9 @@
(Rsh64x64 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
// rotates
(RotateLeft8 <t> x (MOVBconst [c])) => (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7])))
(RotateLeft16 <t> x (MOVHconst [c])) => (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15])))
(RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31])))
(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
(RotateLeft32 <t> x (MOVDconst [c])) => (Or32 (Lsh32x64 <t> x (MOVDconst [c&31])) (Rsh32Ux64 <t> x (MOVDconst [-c&31])))
(RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
(Less64 ...) => (SLT ...)
@ -354,45 +354,45 @@
// Small zeroing
(Zero [0] _ mem) => mem
(Zero [1] ptr mem) => (MOVBstore ptr (MOVBconst [0]) mem)
(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore ptr (MOVHconst [0]) mem)
(MOVHstore ptr (MOVDconst [0]) mem)
(Zero [2] ptr mem) =>
(MOVBstore [1] ptr (MOVBconst [0])
(MOVBstore ptr (MOVBconst [0]) mem))
(MOVBstore [1] ptr (MOVDconst [0])
(MOVBstore ptr (MOVDconst [0]) mem))
(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore ptr (MOVWconst [0]) mem)
(MOVWstore ptr (MOVDconst [0]) mem)
(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [2] ptr (MOVHconst [0])
(MOVHstore ptr (MOVHconst [0]) mem))
(MOVHstore [2] ptr (MOVDconst [0])
(MOVHstore ptr (MOVDconst [0]) mem))
(Zero [4] ptr mem) =>
(MOVBstore [3] ptr (MOVBconst [0])
(MOVBstore [2] ptr (MOVBconst [0])
(MOVBstore [1] ptr (MOVBconst [0])
(MOVBstore ptr (MOVBconst [0]) mem))))
(MOVBstore [3] ptr (MOVDconst [0])
(MOVBstore [2] ptr (MOVDconst [0])
(MOVBstore [1] ptr (MOVDconst [0])
(MOVBstore ptr (MOVDconst [0]) mem))))
(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore ptr (MOVDconst [0]) mem)
(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore ptr (MOVWconst [0]) mem))
(MOVWstore [4] ptr (MOVDconst [0])
(MOVWstore ptr (MOVDconst [0]) mem))
(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [6] ptr (MOVHconst [0])
(MOVHstore [4] ptr (MOVHconst [0])
(MOVHstore [2] ptr (MOVHconst [0])
(MOVHstore ptr (MOVHconst [0]) mem))))
(MOVHstore [6] ptr (MOVDconst [0])
(MOVHstore [4] ptr (MOVDconst [0])
(MOVHstore [2] ptr (MOVDconst [0])
(MOVHstore ptr (MOVDconst [0]) mem))))
(Zero [3] ptr mem) =>
(MOVBstore [2] ptr (MOVBconst [0])
(MOVBstore [1] ptr (MOVBconst [0])
(MOVBstore ptr (MOVBconst [0]) mem)))
(MOVBstore [2] ptr (MOVDconst [0])
(MOVBstore [1] ptr (MOVDconst [0])
(MOVBstore ptr (MOVDconst [0]) mem)))
(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [4] ptr (MOVHconst [0])
(MOVHstore [2] ptr (MOVHconst [0])
(MOVHstore ptr (MOVHconst [0]) mem)))
(MOVHstore [4] ptr (MOVDconst [0])
(MOVHstore [2] ptr (MOVDconst [0])
(MOVHstore ptr (MOVDconst [0]) mem)))
(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] ptr (MOVWconst [0])
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore ptr (MOVWconst [0]) mem)))
(MOVWstore [8] ptr (MOVDconst [0])
(MOVWstore [4] ptr (MOVDconst [0])
(MOVWstore ptr (MOVDconst [0]) mem)))
(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore [8] ptr (MOVDconst [0])
(MOVDstore ptr (MOVDconst [0]) mem))
@ -522,16 +522,14 @@
(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
// TODO(jsing): Check if we actually need MOV{B,H,W}const as most platforms
// use a single MOVDconst op.
(Const8 ...) => (MOVBconst ...)
(Const16 ...) => (MOVHconst ...)
(Const32 ...) => (MOVWconst ...)
(Const64 ...) => (MOVDconst ...)
(Const32F [val]) => (FMVSX (MOVWconst [int32(math.Float32bits(val))]))
(Const8 [val]) => (MOVDconst [int64(val)])
(Const16 [val]) => (MOVDconst [int64(val)])
(Const32 [val]) => (MOVDconst [int64(val)])
(Const64 [val]) => (MOVDconst [int64(val)])
(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
(ConstNil) => (MOVDconst [0])
(ConstBool [val]) => (MOVBconst [int8(b2i(val))])
(ConstBool [val]) => (MOVDconst [int64(b2i(val))])
// Convert 64 bit immediate to two 32 bit immediates, combine with add and shift.
// The lower 32 bit immediate will be treated as signed,
@ -612,24 +610,18 @@
(BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
// Store zero
(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
// Avoid sign/zero extension for consts.
(MOVBreg (MOVBconst [c])) => (MOVDconst [int64(c)])
(MOVHreg (MOVBconst [c])) => (MOVDconst [int64(c)])
(MOVHreg (MOVHconst [c])) => (MOVDconst [int64(c)])
(MOVWreg (MOVBconst [c])) => (MOVDconst [int64(c)])
(MOVWreg (MOVHconst [c])) => (MOVDconst [int64(c)])
(MOVWreg (MOVWconst [c])) => (MOVDconst [int64(c)])
(MOVBUreg (MOVBconst [c])) => (MOVDconst [int64(uint8(c))])
(MOVHUreg (MOVBconst [c])) => (MOVDconst [int64(uint16(c))])
(MOVHUreg (MOVHconst [c])) => (MOVDconst [int64(uint16(c))])
(MOVWUreg (MOVBconst [c])) => (MOVDconst [int64(uint32(c))])
(MOVWUreg (MOVHconst [c])) => (MOVDconst [int64(uint32(c))])
(MOVWUreg (MOVWconst [c])) => (MOVDconst [int64(uint32(c))])
(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(c)])
(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(c)])
(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(c)])
(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
// Avoid sign/zero extension after properly typed load.
(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
@ -695,60 +687,24 @@
(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
// Fold constant into immediate instructions where possible.
(ADD (MOVBconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVHconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVWconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x)
(AND (MOVBconst [val]) x) => (ANDI [int64(val)] x)
(AND (MOVHconst [val]) x) => (ANDI [int64(val)] x)
(AND (MOVWconst [val]) x) => (ANDI [int64(val)] x)
(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
(OR (MOVBconst [val]) x) => (ORI [int64(val)] x)
(OR (MOVHconst [val]) x) => (ORI [int64(val)] x)
(OR (MOVWconst [val]) x) => (ORI [int64(val)] x)
(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
(XOR (MOVBconst [val]) x) => (XORI [int64(val)] x)
(XOR (MOVHconst [val]) x) => (XORI [int64(val)] x)
(XOR (MOVWconst [val]) x) => (XORI [int64(val)] x)
(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
(SLL x (MOVBconst [val])) => (SLLI [int64(val&63)] x)
(SLL x (MOVHconst [val])) => (SLLI [int64(val&63)] x)
(SLL x (MOVWconst [val])) => (SLLI [int64(val&63)] x)
(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
(SRL x (MOVBconst [val])) => (SRLI [int64(val&63)] x)
(SRL x (MOVHconst [val])) => (SRLI [int64(val&63)] x)
(SRL x (MOVWconst [val])) => (SRLI [int64(val&63)] x)
(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
(SRA x (MOVBconst [val])) => (SRAI [int64(val&63)] x)
(SRA x (MOVHconst [val])) => (SRAI [int64(val&63)] x)
(SRA x (MOVWconst [val])) => (SRAI [int64(val&63)] x)
(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
// Convert subtraction of a const into ADDI with negative immediate, where possible.
(SUB x (MOVBconst [val])) => (ADDI [-int64(val)] x)
(SUB x (MOVHconst [val])) => (ADDI [-int64(val)] x)
(SUB x (MOVWconst [val])) && is32Bit(-int64(val)) => (ADDI [-int64(val)] x)
(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
// Subtraction of zero.
(SUB x (MOVBconst [0])) => x
(SUB x (MOVHconst [0])) => x
(SUB x (MOVWconst [0])) => x
(SUB x (MOVDconst [0])) => x
// Subtraction of zero with sign extension.
(SUBW x (MOVWconst [0])) => (ADDIW [0] x)
(SUBW x (MOVDconst [0])) => (ADDIW [0] x)
// Subtraction from zero.
(SUB (MOVBconst [0]) x) => (NEG x)
(SUB (MOVHconst [0]) x) => (NEG x)
(SUB (MOVWconst [0]) x) => (NEG x)
(SUB (MOVDconst [0]) x) => (NEG x)
// Subtraction from zero with sign extension.

View file

@ -168,9 +168,6 @@ func init() {
{name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "RdWr"}, // arg0 + auxint + offset encoded in aux
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
{name: "MOVBconst", reg: gp01, asm: "MOV", typ: "UInt8", aux: "Int8", rematerializeable: true}, // 8 low bits of auxint
{name: "MOVHconst", reg: gp01, asm: "MOV", typ: "UInt16", aux: "Int16", rematerializeable: true}, // 16 low bits of auxint
{name: "MOVWconst", reg: gp01, asm: "MOV", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
{name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
// Loads: load <size> bits from arg0+auxint+aux and extend to 64 bits; arg1=mem

View file

@ -2089,9 +2089,6 @@ const (
OpRISCV64REMW
OpRISCV64REMUW
OpRISCV64MOVaddr
OpRISCV64MOVBconst
OpRISCV64MOVHconst
OpRISCV64MOVWconst
OpRISCV64MOVDconst
OpRISCV64MOVBload
OpRISCV64MOVHload
@ -27904,42 +27901,6 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVBconst",
auxType: auxInt8,
argLen: 0,
rematerializeable: true,
asm: riscv.AMOV,
reg: regInfo{
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVHconst",
auxType: auxInt16,
argLen: 0,
rematerializeable: true,
asm: riscv.AMOV,
reg: regInfo{
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVWconst",
auxType: auxInt32,
argLen: 0,
rematerializeable: true,
asm: riscv.AMOV,
reg: regInfo{
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVDconst",
auxType: auxInt64,

File diff suppressed because it is too large Load diff