2019-11-04 04:40:47 +11:00
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
2020-01-27 23:55:30 +11:00
import (
"fmt"
)
2019-11-04 04:40:47 +11:00
2020-04-26 04:34:34 +10:00
// Notes:
// - Boolean types occupy the entire register. 0=false, 1=true.
2019-11-04 04:40:47 +11:00
// Suffixes encode the bit width of various instructions:
//
// D (double word) = 64 bit int
// W (word) = 32 bit int
// H (half word) = 16 bit int
// B (byte) = 8 bit int
// S (single) = 32 bit float
// D (double) = 64 bit float
// L = 64 bit int, used when the opcode starts with F
2020-01-27 23:55:30 +11:00
const (
2020-05-19 18:55:31 +10:00
riscv64REG_G = 27
2021-10-22 14:36:06 +08:00
riscv64REG_CTXT = 26
2020-01-27 23:55:30 +11:00
riscv64REG_LR = 1
riscv64REG_SP = 2
2021-09-26 19:21:29 +10:00
riscv64REG_GP = 3
2020-05-19 18:55:31 +10:00
riscv64REG_TP = 4
2020-01-27 23:55:30 +11:00
riscv64REG_TMP = 31
riscv64REG_ZERO = 0
)
func riscv64RegName ( r int ) string {
switch {
case r == riscv64REG_G :
return "g"
case r == riscv64REG_SP :
return "SP"
case 0 <= r && r <= 31 :
return fmt . Sprintf ( "X%d" , r )
case 32 <= r && r <= 63 :
return fmt . Sprintf ( "F%d" , r - 32 )
default :
panic ( fmt . Sprintf ( "unknown register %d" , r ) )
}
}
2019-11-04 04:40:47 +11:00
func init ( ) {
var regNamesRISCV64 [ ] string
2020-03-16 02:51:54 +11:00
var gpMask , fpMask , gpgMask , gpspMask , gpspsbMask , gpspsbgMask regMask
2019-11-04 04:40:47 +11:00
regNamed := make ( map [ string ] regMask )
// Build the list of register names, creating an appropriately indexed
// regMask for the gp and fp registers as we go.
//
// If name is specified, use it rather than the riscv reg number.
addreg := func ( r int , name string ) regMask {
mask := regMask ( 1 ) << uint ( len ( regNamesRISCV64 ) )
if name == "" {
2020-01-27 23:55:30 +11:00
name = riscv64RegName ( r )
2019-11-04 04:40:47 +11:00
}
regNamesRISCV64 = append ( regNamesRISCV64 , name )
regNamed [ name ] = mask
return mask
}
// General purpose registers.
2020-01-27 23:55:30 +11:00
for r := 0 ; r <= 31 ; r ++ {
if r == riscv64REG_LR {
2019-11-04 04:40:47 +11:00
// LR is not used by regalloc, so we skip it to leave
// room for pseudo-register SB.
continue
}
mask := addreg ( r , "" )
// Add general purpose registers to gpMask.
switch r {
2021-09-26 19:21:29 +10:00
// ZERO, GP, TP and TMP are not in any gp mask.
case riscv64REG_ZERO , riscv64REG_GP , riscv64REG_TP , riscv64REG_TMP :
2020-03-16 02:51:54 +11:00
case riscv64REG_G :
gpgMask |= mask
gpspsbgMask |= mask
2020-01-27 23:55:30 +11:00
case riscv64REG_SP :
2019-11-04 04:40:47 +11:00
gpspMask |= mask
gpspsbMask |= mask
2020-03-16 02:51:54 +11:00
gpspsbgMask |= mask
2019-11-04 04:40:47 +11:00
default :
gpMask |= mask
2020-03-16 02:51:54 +11:00
gpgMask |= mask
2019-11-04 04:40:47 +11:00
gpspMask |= mask
gpspsbMask |= mask
2020-03-16 02:51:54 +11:00
gpspsbgMask |= mask
2019-11-04 04:40:47 +11:00
}
}
// Floating pointer registers.
2020-01-27 23:55:30 +11:00
for r := 32 ; r <= 63 ; r ++ {
2019-11-04 04:40:47 +11:00
mask := addreg ( r , "" )
fpMask |= mask
}
// Pseudo-register: SB
mask := addreg ( - 1 , "SB" )
gpspsbMask |= mask
2020-03-16 02:51:54 +11:00
gpspsbgMask |= mask
2019-11-04 04:40:47 +11:00
if len ( regNamesRISCV64 ) > 64 {
// regMask is only 64 bits.
panic ( "Too many RISCV64 registers" )
}
2021-10-22 14:36:06 +08:00
regCtxt := regNamed [ "X26" ]
2019-11-04 04:40:47 +11:00
callerSave := gpMask | fpMask | regNamed [ "g" ]
var (
2020-03-02 04:26:54 +11:00
gpstore = regInfo { inputs : [ ] regMask { gpspsbMask , gpspMask , 0 } } // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
gpstore0 = regInfo { inputs : [ ] regMask { gpspsbMask } }
gp01 = regInfo { outputs : [ ] regMask { gpMask } }
gp11 = regInfo { inputs : [ ] regMask { gpMask } , outputs : [ ] regMask { gpMask } }
gp21 = regInfo { inputs : [ ] regMask { gpMask , gpMask } , outputs : [ ] regMask { gpMask } }
2021-06-22 11:20:03 +00:00
gp22 = regInfo { inputs : [ ] regMask { gpMask , gpMask } , outputs : [ ] regMask { gpMask , gpMask } }
2020-03-02 04:26:54 +11:00
gpload = regInfo { inputs : [ ] regMask { gpspsbMask , 0 } , outputs : [ ] regMask { gpMask } }
gp11sb = regInfo { inputs : [ ] regMask { gpspsbMask } , outputs : [ ] regMask { gpMask } }
2020-03-16 02:51:54 +11:00
gpxchg = regInfo { inputs : [ ] regMask { gpspsbgMask , gpgMask } , outputs : [ ] regMask { gpMask } }
gpcas = regInfo { inputs : [ ] regMask { gpspsbgMask , gpgMask , gpgMask } , outputs : [ ] regMask { gpMask } }
2021-02-27 19:07:32 +11:00
gpatomic = regInfo { inputs : [ ] regMask { gpspsbgMask , gpgMask } }
2019-11-04 04:40:47 +11:00
fp11 = regInfo { inputs : [ ] regMask { fpMask } , outputs : [ ] regMask { fpMask } }
fp21 = regInfo { inputs : [ ] regMask { fpMask , fpMask } , outputs : [ ] regMask { fpMask } }
2021-02-17 15:00:34 +00:00
fp31 = regInfo { inputs : [ ] regMask { fpMask , fpMask , fpMask } , outputs : [ ] regMask { fpMask } }
2019-11-04 04:40:47 +11:00
gpfp = regInfo { inputs : [ ] regMask { gpMask } , outputs : [ ] regMask { fpMask } }
fpgp = regInfo { inputs : [ ] regMask { fpMask } , outputs : [ ] regMask { gpMask } }
fpstore = regInfo { inputs : [ ] regMask { gpspsbMask , fpMask , 0 } }
fpload = regInfo { inputs : [ ] regMask { gpspsbMask , 0 } , outputs : [ ] regMask { fpMask } }
fp2gp = regInfo { inputs : [ ] regMask { fpMask , fpMask } , outputs : [ ] regMask { gpMask } }
call = regInfo { clobbers : callerSave }
callClosure = regInfo { inputs : [ ] regMask { gpspMask , regCtxt , 0 } , clobbers : callerSave }
callInter = regInfo { inputs : [ ] regMask { gpMask } , clobbers : callerSave }
)
RISCV64ops := [ ] opData {
{ name : "ADD" , argLength : 2 , reg : gp21 , asm : "ADD" , commutative : true } , // arg0 + arg1
{ name : "ADDI" , argLength : 1 , reg : gp11sb , asm : "ADDI" , aux : "Int64" } , // arg0 + auxint
2020-02-26 04:00:10 +11:00
{ name : "ADDIW" , argLength : 1 , reg : gp11 , asm : "ADDIW" , aux : "Int64" } , // 32 low bits of arg0 + auxint, sign extended to 64 bits
2020-03-03 03:45:22 +11:00
{ name : "NEG" , argLength : 1 , reg : gp11 , asm : "NEG" } , // -arg0
{ name : "NEGW" , argLength : 1 , reg : gp11 , asm : "NEGW" } , // -arg0 of 32 bits, sign extended to 64 bits
2019-11-04 04:40:47 +11:00
{ name : "SUB" , argLength : 2 , reg : gp21 , asm : "SUB" } , // arg0 - arg1
2020-02-26 03:58:59 +11:00
{ name : "SUBW" , argLength : 2 , reg : gp21 , asm : "SUBW" } , // 32 low bits of arg 0 - 32 low bits of arg 1, sign extended to 64 bits
2019-11-04 04:40:47 +11:00
// M extension. H means high (i.e., it returns the top bits of
// the result). U means unsigned. W means word (i.e., 32-bit).
{ name : "MUL" , argLength : 2 , reg : gp21 , asm : "MUL" , commutative : true , typ : "Int64" } , // arg0 * arg1
{ name : "MULW" , argLength : 2 , reg : gp21 , asm : "MULW" , commutative : true , typ : "Int32" } ,
{ name : "MULH" , argLength : 2 , reg : gp21 , asm : "MULH" , commutative : true , typ : "Int64" } ,
{ name : "MULHU" , argLength : 2 , reg : gp21 , asm : "MULHU" , commutative : true , typ : "UInt64" } ,
2021-06-22 11:20:03 +00:00
{ name : "LoweredMuluhilo" , argLength : 2 , reg : gp22 , resultNotInArgs : true } , // arg0 * arg1, return (hi, lo)
2021-07-31 10:20:10 +00:00
{ name : "LoweredMuluover" , argLength : 2 , reg : gp22 , resultNotInArgs : true } , // arg0 * arg1, return (64 bits of arg0*arg1, overflow)
2021-06-22 11:20:03 +00:00
2019-11-04 04:40:47 +11:00
{ name : "DIV" , argLength : 2 , reg : gp21 , asm : "DIV" , typ : "Int64" } , // arg0 / arg1
{ name : "DIVU" , argLength : 2 , reg : gp21 , asm : "DIVU" , typ : "UInt64" } ,
{ name : "DIVW" , argLength : 2 , reg : gp21 , asm : "DIVW" , typ : "Int32" } ,
{ name : "DIVUW" , argLength : 2 , reg : gp21 , asm : "DIVUW" , typ : "UInt32" } ,
{ name : "REM" , argLength : 2 , reg : gp21 , asm : "REM" , typ : "Int64" } , // arg0 % arg1
{ name : "REMU" , argLength : 2 , reg : gp21 , asm : "REMU" , typ : "UInt64" } ,
{ name : "REMW" , argLength : 2 , reg : gp21 , asm : "REMW" , typ : "Int32" } ,
{ name : "REMUW" , argLength : 2 , reg : gp21 , asm : "REMUW" , typ : "UInt32" } ,
2020-11-17 15:32:45 -08:00
{ name : "MOVaddr" , argLength : 1 , reg : gp11sb , asm : "MOV" , aux : "SymOff" , rematerializeable : true , symEffect : "Addr" } , // arg0 + auxint + offset encoded in aux
2019-11-04 04:40:47 +11:00
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
{ name : "MOVDconst" , reg : gp01 , asm : "MOV" , typ : "UInt64" , aux : "Int64" , rematerializeable : true } , // auxint
// Loads: load <size> bits from arg0+auxint+aux and extend to 64 bits; arg1=mem
{ name : "MOVBload" , argLength : 2 , reg : gpload , asm : "MOVB" , aux : "SymOff" , typ : "Int8" , faultOnNilArg0 : true , symEffect : "Read" } , // 8 bits, sign extend
{ name : "MOVHload" , argLength : 2 , reg : gpload , asm : "MOVH" , aux : "SymOff" , typ : "Int16" , faultOnNilArg0 : true , symEffect : "Read" } , // 16 bits, sign extend
{ name : "MOVWload" , argLength : 2 , reg : gpload , asm : "MOVW" , aux : "SymOff" , typ : "Int32" , faultOnNilArg0 : true , symEffect : "Read" } , // 32 bits, sign extend
{ name : "MOVDload" , argLength : 2 , reg : gpload , asm : "MOV" , aux : "SymOff" , typ : "Int64" , faultOnNilArg0 : true , symEffect : "Read" } , // 64 bits
{ name : "MOVBUload" , argLength : 2 , reg : gpload , asm : "MOVBU" , aux : "SymOff" , typ : "UInt8" , faultOnNilArg0 : true , symEffect : "Read" } , // 8 bits, zero extend
{ name : "MOVHUload" , argLength : 2 , reg : gpload , asm : "MOVHU" , aux : "SymOff" , typ : "UInt16" , faultOnNilArg0 : true , symEffect : "Read" } , // 16 bits, zero extend
{ name : "MOVWUload" , argLength : 2 , reg : gpload , asm : "MOVWU" , aux : "SymOff" , typ : "UInt32" , faultOnNilArg0 : true , symEffect : "Read" } , // 32 bits, zero extend
// Stores: store <size> lowest bits in arg1 to arg0+auxint+aux; arg2=mem
{ name : "MOVBstore" , argLength : 3 , reg : gpstore , asm : "MOVB" , aux : "SymOff" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // 8 bits
{ name : "MOVHstore" , argLength : 3 , reg : gpstore , asm : "MOVH" , aux : "SymOff" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // 16 bits
{ name : "MOVWstore" , argLength : 3 , reg : gpstore , asm : "MOVW" , aux : "SymOff" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // 32 bits
{ name : "MOVDstore" , argLength : 3 , reg : gpstore , asm : "MOV" , aux : "SymOff" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // 64 bits
2020-03-02 04:26:54 +11:00
// Stores: store <size> of zero in arg0+auxint+aux; arg1=mem
{ name : "MOVBstorezero" , argLength : 2 , reg : gpstore0 , aux : "SymOff" , asm : "MOVB" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // 8 bits
{ name : "MOVHstorezero" , argLength : 2 , reg : gpstore0 , aux : "SymOff" , asm : "MOVH" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // 16 bits
{ name : "MOVWstorezero" , argLength : 2 , reg : gpstore0 , aux : "SymOff" , asm : "MOVW" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // 32 bits
{ name : "MOVDstorezero" , argLength : 2 , reg : gpstore0 , aux : "SymOff" , asm : "MOV" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // 64 bits
2020-10-25 00:32:23 +11:00
// Conversions
{ name : "MOVBreg" , argLength : 1 , reg : gp11 , asm : "MOVB" } , // move from arg0, sign-extended from byte
{ name : "MOVHreg" , argLength : 1 , reg : gp11 , asm : "MOVH" } , // move from arg0, sign-extended from half
{ name : "MOVWreg" , argLength : 1 , reg : gp11 , asm : "MOVW" } , // move from arg0, sign-extended from word
{ name : "MOVDreg" , argLength : 1 , reg : gp11 , asm : "MOV" } , // move from arg0
{ name : "MOVBUreg" , argLength : 1 , reg : gp11 , asm : "MOVBU" } , // move from arg0, unsign-extended from byte
{ name : "MOVHUreg" , argLength : 1 , reg : gp11 , asm : "MOVHU" } , // move from arg0, unsign-extended from half
{ name : "MOVWUreg" , argLength : 1 , reg : gp11 , asm : "MOVWU" } , // move from arg0, unsign-extended from word
cmd/compile: eliminate unnecessary sign/zero extension for riscv64
Add additional rules to eliminate unnecessary sign/zero extension for riscv64.
Also where possible, replace an extension following a load with a different typed
load. This removes almost another 8,000 instructions from the go binary.
Of particular note, change Eq16/Eq8/Neq16/Neq8 to zero extend each value before
subtraction, rather than zero extending after subtraction. While this appears to
double the number of zero extensions, it often lets us completely eliminate them
as the load can already be performed in a properly typed manner.
As an example, prior to this change runtime.memequal16 was:
0000000000013028 <runtime.memequal16>:
13028: 00813183 ld gp,8(sp)
1302c: 00019183 lh gp,0(gp)
13030: 01013283 ld t0,16(sp)
13034: 00029283 lh t0,0(t0)
13038: 405181b3 sub gp,gp,t0
1303c: 03019193 slli gp,gp,0x30
13040: 0301d193 srli gp,gp,0x30
13044: 0011b193 seqz gp,gp
13048: 00310c23 sb gp,24(sp)
1304c: 00008067 ret
Whereas it now becomes:
0000000000012fa8 <runtime.memequal16>:
12fa8: 00813183 ld gp,8(sp)
12fac: 0001d183 lhu gp,0(gp)
12fb0: 01013283 ld t0,16(sp)
12fb4: 0002d283 lhu t0,0(t0)
12fb8: 405181b3 sub gp,gp,t0
12fbc: 0011b193 seqz gp,gp
12fc0: 00310c23 sb gp,24(sp)
12fc4: 00008067 ret
Change-Id: I16321feb18381241cab121c0097a126104c56c2c
Reviewed-on: https://go-review.googlesource.com/c/go/+/264659
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2020-10-25 01:34:17 +11:00
{ name : "MOVDnop" , argLength : 1 , reg : regInfo { inputs : [ ] regMask { gpMask } , outputs : [ ] regMask { gpMask } } , resultInArg0 : true } , // nop, return arg0 in same register
2019-11-04 04:40:47 +11:00
// Shift ops
cmd/compile: optimize right shifts of uint32 on riscv
The compiler is currently zero extending 32 bit unsigned integers to
64 bits before right shifting them using a 64 bit shift instruction.
There's no need to do this as RISC-V has instructions for right
shifting 32 bit unsigned values (srlw and srliw) which zero extend
the result of the shift to 64 bits. Change the compiler so that
it uses srlw and srliw for 32 bit unsigned shifts reducing in most
cases the number of instructions needed to perform the shift.
Here are some examples of code sequences that are changed by this
patch:
uint32(a) >> 2
before:
sll x5,x10,0x20
srl x10,x5,0x22
after:
srlw x10,x10,0x2
uint32(a) >> int(b)
before:
sll x5,x10,0x20
srl x5,x5,0x20
srl x5,x5,x11
sltiu x6,x11,64
neg x6,x6
and x10,x5,x6
after:
srlw x5,x10,x11
sltiu x6,x11,32
neg x6,x6
and x10,x5,x6
bits.RotateLeft32(uint32(a), 1)
before:
sll x5,x10,0x1
sll x6,x10,0x20
srl x7,x6,0x3f
or x5,x5,x7
after:
sll x5,x10,0x1
srlw x6,x10,0x1f
or x10,x5,x6
bits.RotateLeft32(uint32(a), int(b))
before:
and x6,x11,31
sll x7,x10,x6
sll x8,x10,0x20
srl x8,x8,0x20
add x6,x6,-32
neg x6,x6
srl x9,x8,x6
sltiu x6,x6,64
neg x6,x6
and x6,x9,x6
or x6,x6,x7
after:
and x5,x11,31
sll x6,x10,x5
add x5,x5,-32
neg x5,x5
srlw x7,x10,x5
sltiu x5,x5,32
neg x5,x5
and x5,x7,x5
or x10,x6,x5
The one regression observed is the following case, an unbounded right
shift of a uint32 where the value we're shifting by is known to be
< 64 but > 31. As this is an unusual case this commit does not
optimize for it, although the existing code does.
uint32(a) >> (b & 63)
before:
sll x5,x10,0x20
srl x5,x5,0x20
and x6,x11,63
srl x10,x5,x6
after
and x5,x11,63
srlw x6,x10,x5
sltiu x5,x5,32
neg x5,x5
and x10,x6,x5
Here we have one extra instruction.
Some benchmark highlights, generated on a VisionFive2 8GB running
Ubuntu 23.04.
pkg: math/bits
LeadingZeros32-4 18.64n ± 0% 17.32n ± 0% -7.11% (p=0.000 n=10)
LeadingZeros64-4 15.47n ± 0% 15.51n ± 0% +0.26% (p=0.027 n=10)
TrailingZeros16-4 18.48n ± 0% 17.68n ± 0% -4.33% (p=0.000 n=10)
TrailingZeros32-4 16.87n ± 0% 16.07n ± 0% -4.74% (p=0.000 n=10)
TrailingZeros64-4 15.26n ± 0% 15.27n ± 0% +0.07% (p=0.043 n=10)
OnesCount32-4 20.08n ± 0% 19.29n ± 0% -3.96% (p=0.000 n=10)
RotateLeft-4 8.864n ± 0% 8.838n ± 0% -0.30% (p=0.006 n=10)
RotateLeft32-4 8.837n ± 0% 8.032n ± 0% -9.11% (p=0.000 n=10)
Reverse32-4 29.77n ± 0% 26.52n ± 0% -10.93% (p=0.000 n=10)
ReverseBytes32-4 9.640n ± 0% 8.838n ± 0% -8.32% (p=0.000 n=10)
Sub32-4 8.835n ± 0% 8.035n ± 0% -9.06% (p=0.000 n=10)
geomean 11.50n 11.33n -1.45%
pkg: crypto/md5
Hash8Bytes-4 1.486µ ± 0% 1.426µ ± 0% -4.04% (p=0.000 n=10)
Hash64-4 2.079µ ± 0% 1.968µ ± 0% -5.36% (p=0.000 n=10)
Hash128-4 2.720µ ± 0% 2.557µ ± 0% -5.99% (p=0.000 n=10)
Hash256-4 3.996µ ± 0% 3.733µ ± 0% -6.58% (p=0.000 n=10)
Hash512-4 6.541µ ± 0% 6.072µ ± 0% -7.18% (p=0.000 n=10)
Hash1K-4 11.64µ ± 0% 10.75µ ± 0% -7.58% (p=0.000 n=10)
Hash8K-4 82.95µ ± 0% 76.32µ ± 0% -7.99% (p=0.000 n=10)
Hash1M-4 10.436m ± 0% 9.591m ± 0% -8.10% (p=0.000 n=10)
Hash8M-4 83.50m ± 0% 76.73m ± 0% -8.10% (p=0.000 n=10)
Hash8BytesUnaligned-4 1.494µ ± 0% 1.434µ ± 0% -4.02% (p=0.000 n=10)
Hash1KUnaligned-4 11.64µ ± 0% 10.76µ ± 0% -7.52% (p=0.000 n=10)
Hash8KUnaligned-4 83.01µ ± 0% 76.32µ ± 0% -8.07% (p=0.000 n=10)
geomean 28.32µ 26.42µ -6.72%
Change-Id: I20483a6668cca1b53fe83944bee3706aadcf8693
Reviewed-on: https://go-review.googlesource.com/c/go/+/528975
Reviewed-by: Michael Pratt <mpratt@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Gopher Robot <gobot@golang.org>
2023-09-17 13:08:55 +02:00
{ name : "SLL" , argLength : 2 , reg : gp21 , asm : "SLL" } , // arg0 << (aux1 & 63)
{ name : "SRA" , argLength : 2 , reg : gp21 , asm : "SRA" } , // arg0 >> (aux1 & 63), signed
cmd/compile: optimize right shifts of int32 on riscv64
The compiler is currently sign extending 32 bit signed integers to
64 bits before right shifting them using a 64 bit shift instruction.
There's no need to do this as RISC-V has instructions for right
shifting 32 bit signed values (sraw and sraiw) which sign extend
the result of the shift to 64 bits. Change the compiler so that
it uses sraw and sraiw for shifts of signed 32 bit integers reducing
in most cases the number of instructions needed to perform the shift.
Here are some examples of code sequences that are changed by this
patch:
int32(a) >> 2
before:
sll x5,x10,0x20
sra x10,x5,0x22
after:
sraw x10,x10,0x2
int32(v) >> int(s)
before:
sext.w x5,x10
sltiu x6,x11,64
add x6,x6,-1
or x6,x11,x6
sra x10,x5,x6
after:
sltiu x5,x11,32
add x5,x5,-1
or x5,x11,x5
sraw x10,x10,x5
int32(v) >> (int(s) & 31)
before:
sext.w x5,x10
and x6,x11,63
sra x10,x5,x6
after:
and x5,x11,31
sraw x10,x10,x5
int32(100) >> int(a)
before:
bltz x10,<target address calls runtime.panicshift>
sltiu x5,x10,64
add x5,x5,-1
or x5,x10,x5
li x6,100
sra x10,x6,x5
after:
bltz x10,<target address calls runtime.panicshift>
sltiu x5,x10,32
add x5,x5,-1
or x5,x10,x5
li x6,100
sraw x10,x6,x5
int32(v) >> (int(s) & 63)
before:
sext.w x5,x10
and x6,x11,63
sra x10,x5,x6
after:
and x5,x11,63
sltiu x6,x5,32
add x6,x6,-1
or x5,x5,x6
sraw x10,x10,x5
In most cases we eliminate one instruction. In the case where
we shift a int32 constant by a variable the number of instructions
generated is identical. A sra is simply replaced by a sraw. In the
unusual case where we shift right by a variable anded with a constant
> 31 but < 64, we generate two additional instructions. As this is
an unusual case we do not try to optimize for it.
Some improvements can be seen in some of the existing benchmarks,
notably in the utf8 package which performs right shifts of runes
which are signed 32 bit integers.
| utf8-old | utf8-new |
| sec/op | sec/op vs base |
EncodeASCIIRune-4 17.68n ± 0% 17.67n ± 0% ~ (p=0.312 n=10)
EncodeJapaneseRune-4 35.34n ± 0% 34.53n ± 1% -2.31% (p=0.000 n=10)
AppendASCIIRune-4 3.213n ± 0% 3.213n ± 0% ~ (p=0.318 n=10)
AppendJapaneseRune-4 36.14n ± 0% 35.35n ± 0% -2.19% (p=0.000 n=10)
DecodeASCIIRune-4 28.11n ± 0% 27.36n ± 0% -2.69% (p=0.000 n=10)
DecodeJapaneseRune-4 38.55n ± 0% 38.58n ± 0% ~ (p=0.612 n=10)
Change-Id: I60a91cbede9ce65597571c7b7dd9943eeb8d3cc2
Reviewed-on: https://go-review.googlesource.com/c/go/+/535115
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Joel Sing <joel@sing.id.au>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: M Zhuo <mzh@golangcn.org>
Reviewed-by: David Chase <drchase@google.com>
2023-09-22 13:14:25 +00:00
{ name : "SRAW" , argLength : 2 , reg : gp21 , asm : "SRAW" } , // arg0 >> (aux1 & 31), signed
cmd/compile: optimize right shifts of uint32 on riscv
The compiler is currently zero extending 32 bit unsigned integers to
64 bits before right shifting them using a 64 bit shift instruction.
There's no need to do this as RISC-V has instructions for right
shifting 32 bit unsigned values (srlw and srliw) which zero extend
the result of the shift to 64 bits. Change the compiler so that
it uses srlw and srliw for 32 bit unsigned shifts reducing in most
cases the number of instructions needed to perform the shift.
Here are some examples of code sequences that are changed by this
patch:
uint32(a) >> 2
before:
sll x5,x10,0x20
srl x10,x5,0x22
after:
srlw x10,x10,0x2
uint32(a) >> int(b)
before:
sll x5,x10,0x20
srl x5,x5,0x20
srl x5,x5,x11
sltiu x6,x11,64
neg x6,x6
and x10,x5,x6
after:
srlw x5,x10,x11
sltiu x6,x11,32
neg x6,x6
and x10,x5,x6
bits.RotateLeft32(uint32(a), 1)
before:
sll x5,x10,0x1
sll x6,x10,0x20
srl x7,x6,0x3f
or x5,x5,x7
after:
sll x5,x10,0x1
srlw x6,x10,0x1f
or x10,x5,x6
bits.RotateLeft32(uint32(a), int(b))
before:
and x6,x11,31
sll x7,x10,x6
sll x8,x10,0x20
srl x8,x8,0x20
add x6,x6,-32
neg x6,x6
srl x9,x8,x6
sltiu x6,x6,64
neg x6,x6
and x6,x9,x6
or x6,x6,x7
after:
and x5,x11,31
sll x6,x10,x5
add x5,x5,-32
neg x5,x5
srlw x7,x10,x5
sltiu x5,x5,32
neg x5,x5
and x5,x7,x5
or x10,x6,x5
The one regression observed is the following case, an unbounded right
shift of a uint32 where the value we're shifting by is known to be
< 64 but > 31. As this is an unusual case this commit does not
optimize for it, although the existing code does.
uint32(a) >> (b & 63)
before:
sll x5,x10,0x20
srl x5,x5,0x20
and x6,x11,63
srl x10,x5,x6
after
and x5,x11,63
srlw x6,x10,x5
sltiu x5,x5,32
neg x5,x5
and x10,x6,x5
Here we have one extra instruction.
Some benchmark highlights, generated on a VisionFive2 8GB running
Ubuntu 23.04.
pkg: math/bits
LeadingZeros32-4 18.64n ± 0% 17.32n ± 0% -7.11% (p=0.000 n=10)
LeadingZeros64-4 15.47n ± 0% 15.51n ± 0% +0.26% (p=0.027 n=10)
TrailingZeros16-4 18.48n ± 0% 17.68n ± 0% -4.33% (p=0.000 n=10)
TrailingZeros32-4 16.87n ± 0% 16.07n ± 0% -4.74% (p=0.000 n=10)
TrailingZeros64-4 15.26n ± 0% 15.27n ± 0% +0.07% (p=0.043 n=10)
OnesCount32-4 20.08n ± 0% 19.29n ± 0% -3.96% (p=0.000 n=10)
RotateLeft-4 8.864n ± 0% 8.838n ± 0% -0.30% (p=0.006 n=10)
RotateLeft32-4 8.837n ± 0% 8.032n ± 0% -9.11% (p=0.000 n=10)
Reverse32-4 29.77n ± 0% 26.52n ± 0% -10.93% (p=0.000 n=10)
ReverseBytes32-4 9.640n ± 0% 8.838n ± 0% -8.32% (p=0.000 n=10)
Sub32-4 8.835n ± 0% 8.035n ± 0% -9.06% (p=0.000 n=10)
geomean 11.50n 11.33n -1.45%
pkg: crypto/md5
Hash8Bytes-4 1.486µ ± 0% 1.426µ ± 0% -4.04% (p=0.000 n=10)
Hash64-4 2.079µ ± 0% 1.968µ ± 0% -5.36% (p=0.000 n=10)
Hash128-4 2.720µ ± 0% 2.557µ ± 0% -5.99% (p=0.000 n=10)
Hash256-4 3.996µ ± 0% 3.733µ ± 0% -6.58% (p=0.000 n=10)
Hash512-4 6.541µ ± 0% 6.072µ ± 0% -7.18% (p=0.000 n=10)
Hash1K-4 11.64µ ± 0% 10.75µ ± 0% -7.58% (p=0.000 n=10)
Hash8K-4 82.95µ ± 0% 76.32µ ± 0% -7.99% (p=0.000 n=10)
Hash1M-4 10.436m ± 0% 9.591m ± 0% -8.10% (p=0.000 n=10)
Hash8M-4 83.50m ± 0% 76.73m ± 0% -8.10% (p=0.000 n=10)
Hash8BytesUnaligned-4 1.494µ ± 0% 1.434µ ± 0% -4.02% (p=0.000 n=10)
Hash1KUnaligned-4 11.64µ ± 0% 10.76µ ± 0% -7.52% (p=0.000 n=10)
Hash8KUnaligned-4 83.01µ ± 0% 76.32µ ± 0% -8.07% (p=0.000 n=10)
geomean 28.32µ 26.42µ -6.72%
Change-Id: I20483a6668cca1b53fe83944bee3706aadcf8693
Reviewed-on: https://go-review.googlesource.com/c/go/+/528975
Reviewed-by: Michael Pratt <mpratt@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Gopher Robot <gobot@golang.org>
2023-09-17 13:08:55 +02:00
{ name : "SRL" , argLength : 2 , reg : gp21 , asm : "SRL" } , // arg0 >> (aux1 & 63), unsigned
{ name : "SRLW" , argLength : 2 , reg : gp21 , asm : "SRLW" } , // arg0 >> (aux1 & 31), unsigned
{ name : "SLLI" , argLength : 1 , reg : gp11 , asm : "SLLI" , aux : "Int64" } , // arg0 << auxint, shift amount 0-63
{ name : "SRAI" , argLength : 1 , reg : gp11 , asm : "SRAI" , aux : "Int64" } , // arg0 >> auxint, signed, shift amount 0-63
cmd/compile: optimize right shifts of int32 on riscv64
The compiler is currently sign extending 32 bit signed integers to
64 bits before right shifting them using a 64 bit shift instruction.
There's no need to do this as RISC-V has instructions for right
shifting 32 bit signed values (sraw and sraiw) which sign extend
the result of the shift to 64 bits. Change the compiler so that
it uses sraw and sraiw for shifts of signed 32 bit integers reducing
in most cases the number of instructions needed to perform the shift.
Here are some examples of code sequences that are changed by this
patch:
int32(a) >> 2
before:
sll x5,x10,0x20
sra x10,x5,0x22
after:
sraw x10,x10,0x2
int32(v) >> int(s)
before:
sext.w x5,x10
sltiu x6,x11,64
add x6,x6,-1
or x6,x11,x6
sra x10,x5,x6
after:
sltiu x5,x11,32
add x5,x5,-1
or x5,x11,x5
sraw x10,x10,x5
int32(v) >> (int(s) & 31)
before:
sext.w x5,x10
and x6,x11,63
sra x10,x5,x6
after:
and x5,x11,31
sraw x10,x10,x5
int32(100) >> int(a)
before:
bltz x10,<target address calls runtime.panicshift>
sltiu x5,x10,64
add x5,x5,-1
or x5,x10,x5
li x6,100
sra x10,x6,x5
after:
bltz x10,<target address calls runtime.panicshift>
sltiu x5,x10,32
add x5,x5,-1
or x5,x10,x5
li x6,100
sraw x10,x6,x5
int32(v) >> (int(s) & 63)
before:
sext.w x5,x10
and x6,x11,63
sra x10,x5,x6
after:
and x5,x11,63
sltiu x6,x5,32
add x6,x6,-1
or x5,x5,x6
sraw x10,x10,x5
In most cases we eliminate one instruction. In the case where
we shift a int32 constant by a variable the number of instructions
generated is identical. A sra is simply replaced by a sraw. In the
unusual case where we shift right by a variable anded with a constant
> 31 but < 64, we generate two additional instructions. As this is
an unusual case we do not try to optimize for it.
Some improvements can be seen in some of the existing benchmarks,
notably in the utf8 package which performs right shifts of runes
which are signed 32 bit integers.
| utf8-old | utf8-new |
| sec/op | sec/op vs base |
EncodeASCIIRune-4 17.68n ± 0% 17.67n ± 0% ~ (p=0.312 n=10)
EncodeJapaneseRune-4 35.34n ± 0% 34.53n ± 1% -2.31% (p=0.000 n=10)
AppendASCIIRune-4 3.213n ± 0% 3.213n ± 0% ~ (p=0.318 n=10)
AppendJapaneseRune-4 36.14n ± 0% 35.35n ± 0% -2.19% (p=0.000 n=10)
DecodeASCIIRune-4 28.11n ± 0% 27.36n ± 0% -2.69% (p=0.000 n=10)
DecodeJapaneseRune-4 38.55n ± 0% 38.58n ± 0% ~ (p=0.612 n=10)
Change-Id: I60a91cbede9ce65597571c7b7dd9943eeb8d3cc2
Reviewed-on: https://go-review.googlesource.com/c/go/+/535115
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Joel Sing <joel@sing.id.au>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: M Zhuo <mzh@golangcn.org>
Reviewed-by: David Chase <drchase@google.com>
2023-09-22 13:14:25 +00:00
{ name : "SRAIW" , argLength : 1 , reg : gp11 , asm : "SRAIW" , aux : "Int64" } , // arg0 >> auxint, signed, shift amount 0-31
cmd/compile: optimize right shifts of uint32 on riscv
The compiler is currently zero extending 32 bit unsigned integers to
64 bits before right shifting them using a 64 bit shift instruction.
There's no need to do this as RISC-V has instructions for right
shifting 32 bit unsigned values (srlw and srliw) which zero extend
the result of the shift to 64 bits. Change the compiler so that
it uses srlw and srliw for 32 bit unsigned shifts reducing in most
cases the number of instructions needed to perform the shift.
Here are some examples of code sequences that are changed by this
patch:
uint32(a) >> 2
before:
sll x5,x10,0x20
srl x10,x5,0x22
after:
srlw x10,x10,0x2
uint32(a) >> int(b)
before:
sll x5,x10,0x20
srl x5,x5,0x20
srl x5,x5,x11
sltiu x6,x11,64
neg x6,x6
and x10,x5,x6
after:
srlw x5,x10,x11
sltiu x6,x11,32
neg x6,x6
and x10,x5,x6
bits.RotateLeft32(uint32(a), 1)
before:
sll x5,x10,0x1
sll x6,x10,0x20
srl x7,x6,0x3f
or x5,x5,x7
after:
sll x5,x10,0x1
srlw x6,x10,0x1f
or x10,x5,x6
bits.RotateLeft32(uint32(a), int(b))
before:
and x6,x11,31
sll x7,x10,x6
sll x8,x10,0x20
srl x8,x8,0x20
add x6,x6,-32
neg x6,x6
srl x9,x8,x6
sltiu x6,x6,64
neg x6,x6
and x6,x9,x6
or x6,x6,x7
after:
and x5,x11,31
sll x6,x10,x5
add x5,x5,-32
neg x5,x5
srlw x7,x10,x5
sltiu x5,x5,32
neg x5,x5
and x5,x7,x5
or x10,x6,x5
The one regression observed is the following case, an unbounded right
shift of a uint32 where the value we're shifting by is known to be
< 64 but > 31. As this is an unusual case this commit does not
optimize for it, although the existing code does.
uint32(a) >> (b & 63)
before:
sll x5,x10,0x20
srl x5,x5,0x20
and x6,x11,63
srl x10,x5,x6
after
and x5,x11,63
srlw x6,x10,x5
sltiu x5,x5,32
neg x5,x5
and x10,x6,x5
Here we have one extra instruction.
Some benchmark highlights, generated on a VisionFive2 8GB running
Ubuntu 23.04.
pkg: math/bits
LeadingZeros32-4 18.64n ± 0% 17.32n ± 0% -7.11% (p=0.000 n=10)
LeadingZeros64-4 15.47n ± 0% 15.51n ± 0% +0.26% (p=0.027 n=10)
TrailingZeros16-4 18.48n ± 0% 17.68n ± 0% -4.33% (p=0.000 n=10)
TrailingZeros32-4 16.87n ± 0% 16.07n ± 0% -4.74% (p=0.000 n=10)
TrailingZeros64-4 15.26n ± 0% 15.27n ± 0% +0.07% (p=0.043 n=10)
OnesCount32-4 20.08n ± 0% 19.29n ± 0% -3.96% (p=0.000 n=10)
RotateLeft-4 8.864n ± 0% 8.838n ± 0% -0.30% (p=0.006 n=10)
RotateLeft32-4 8.837n ± 0% 8.032n ± 0% -9.11% (p=0.000 n=10)
Reverse32-4 29.77n ± 0% 26.52n ± 0% -10.93% (p=0.000 n=10)
ReverseBytes32-4 9.640n ± 0% 8.838n ± 0% -8.32% (p=0.000 n=10)
Sub32-4 8.835n ± 0% 8.035n ± 0% -9.06% (p=0.000 n=10)
geomean 11.50n 11.33n -1.45%
pkg: crypto/md5
Hash8Bytes-4 1.486µ ± 0% 1.426µ ± 0% -4.04% (p=0.000 n=10)
Hash64-4 2.079µ ± 0% 1.968µ ± 0% -5.36% (p=0.000 n=10)
Hash128-4 2.720µ ± 0% 2.557µ ± 0% -5.99% (p=0.000 n=10)
Hash256-4 3.996µ ± 0% 3.733µ ± 0% -6.58% (p=0.000 n=10)
Hash512-4 6.541µ ± 0% 6.072µ ± 0% -7.18% (p=0.000 n=10)
Hash1K-4 11.64µ ± 0% 10.75µ ± 0% -7.58% (p=0.000 n=10)
Hash8K-4 82.95µ ± 0% 76.32µ ± 0% -7.99% (p=0.000 n=10)
Hash1M-4 10.436m ± 0% 9.591m ± 0% -8.10% (p=0.000 n=10)
Hash8M-4 83.50m ± 0% 76.73m ± 0% -8.10% (p=0.000 n=10)
Hash8BytesUnaligned-4 1.494µ ± 0% 1.434µ ± 0% -4.02% (p=0.000 n=10)
Hash1KUnaligned-4 11.64µ ± 0% 10.76µ ± 0% -7.52% (p=0.000 n=10)
Hash8KUnaligned-4 83.01µ ± 0% 76.32µ ± 0% -8.07% (p=0.000 n=10)
geomean 28.32µ 26.42µ -6.72%
Change-Id: I20483a6668cca1b53fe83944bee3706aadcf8693
Reviewed-on: https://go-review.googlesource.com/c/go/+/528975
Reviewed-by: Michael Pratt <mpratt@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Gopher Robot <gobot@golang.org>
2023-09-17 13:08:55 +02:00
{ name : "SRLI" , argLength : 1 , reg : gp11 , asm : "SRLI" , aux : "Int64" } , // arg0 >> auxint, unsigned, shift amount 0-63
{ name : "SRLIW" , argLength : 1 , reg : gp11 , asm : "SRLIW" , aux : "Int64" } , // arg0 >> auxint, unsigned, shift amount 0-31
2019-11-04 04:40:47 +11:00
// Bitwise ops
{ name : "XOR" , argLength : 2 , reg : gp21 , asm : "XOR" , commutative : true } , // arg0 ^ arg1
{ name : "XORI" , argLength : 1 , reg : gp11 , asm : "XORI" , aux : "Int64" } , // arg0 ^ auxint
{ name : "OR" , argLength : 2 , reg : gp21 , asm : "OR" , commutative : true } , // arg0 | arg1
{ name : "ORI" , argLength : 1 , reg : gp11 , asm : "ORI" , aux : "Int64" } , // arg0 | auxint
{ name : "AND" , argLength : 2 , reg : gp21 , asm : "AND" , commutative : true } , // arg0 & arg1
{ name : "ANDI" , argLength : 1 , reg : gp11 , asm : "ANDI" , aux : "Int64" } , // arg0 & auxint
2020-03-03 03:43:02 +11:00
{ name : "NOT" , argLength : 1 , reg : gp11 , asm : "NOT" } , // ^arg0
2019-11-04 04:40:47 +11:00
// Generate boolean values
{ name : "SEQZ" , argLength : 1 , reg : gp11 , asm : "SEQZ" } , // arg0 == 0, result is 0 or 1
{ name : "SNEZ" , argLength : 1 , reg : gp11 , asm : "SNEZ" } , // arg0 != 0, result is 0 or 1
{ name : "SLT" , argLength : 2 , reg : gp21 , asm : "SLT" } , // arg0 < arg1, result is 0 or 1
{ name : "SLTI" , argLength : 1 , reg : gp11 , asm : "SLTI" , aux : "Int64" } , // arg0 < auxint, result is 0 or 1
{ name : "SLTU" , argLength : 2 , reg : gp21 , asm : "SLTU" } , // arg0 < arg1, unsigned, result is 0 or 1
{ name : "SLTIU" , argLength : 1 , reg : gp11 , asm : "SLTIU" , aux : "Int64" } , // arg0 < auxint, unsigned, result is 0 or 1
2023-06-26 20:46:49 +08:00
// Round ops to block fused-multiply-add extraction.
{ name : "LoweredRound32F" , argLength : 1 , reg : fp11 , resultInArg0 : true } ,
{ name : "LoweredRound64F" , argLength : 1 , reg : fp11 , resultInArg0 : true } ,
2019-11-04 04:40:47 +11:00
// Calls
2021-10-31 21:16:15 +08:00
{ name : "CALLstatic" , argLength : - 1 , reg : call , aux : "CallOff" , call : true } , // call static function aux.(*gc.Sym). last arg=mem, auxint=argsize, returns mem
{ name : "CALLtail" , argLength : - 1 , reg : call , aux : "CallOff" , call : true , tailCall : true } , // tail call static function aux.(*gc.Sym). last arg=mem, auxint=argsize, returns mem
{ name : "CALLclosure" , argLength : - 1 , reg : callClosure , aux : "CallOff" , call : true } , // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
{ name : "CALLinter" , argLength : - 1 , reg : callInter , aux : "CallOff" , call : true } , // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
2019-11-04 04:40:47 +11:00
2020-06-14 00:06:24 +02:00
// duffzero
2021-10-22 17:40:08 +08:00
// arg0 = address of memory to zero (in X25, changed as side effect)
2020-06-14 00:06:24 +02:00
// arg1 = mem
// auxint = offset into duffzero code to start executing
// X1 (link register) changed because of function call
// returns mem
{
name : "DUFFZERO" ,
aux : "Int64" ,
argLength : 2 ,
reg : regInfo {
2021-10-22 17:40:08 +08:00
inputs : [ ] regMask { regNamed [ "X25" ] } ,
clobbers : regNamed [ "X1" ] | regNamed [ "X25" ] ,
2020-06-14 00:06:24 +02:00
} ,
typ : "Mem" ,
faultOnNilArg0 : true ,
} ,
// duffcopy
2021-10-22 17:40:08 +08:00
// arg0 = address of dst memory (in X25, changed as side effect)
// arg1 = address of src memory (in X24, changed as side effect)
2020-06-14 00:06:24 +02:00
// arg2 = mem
// auxint = offset into duffcopy code to start executing
// X1 (link register) changed because of function call
// returns mem
{
name : "DUFFCOPY" ,
aux : "Int64" ,
argLength : 3 ,
reg : regInfo {
2021-10-22 17:40:08 +08:00
inputs : [ ] regMask { regNamed [ "X25" ] , regNamed [ "X24" ] } ,
clobbers : regNamed [ "X1" ] | regNamed [ "X24" ] | regNamed [ "X25" ] ,
2020-06-14 00:06:24 +02:00
} ,
typ : "Mem" ,
faultOnNilArg0 : true ,
faultOnNilArg1 : true ,
} ,
2019-11-04 04:40:47 +11:00
// Generic moves and zeros
// general unaligned zeroing
// arg0 = address of memory to zero (in X5, changed as side effect)
// arg1 = address of the last element to zero (inclusive)
// arg2 = mem
// auxint = element size
// returns mem
// mov ZERO, (X5)
// ADD $sz, X5
// BGEU Rarg1, X5, -2(PC)
{
name : "LoweredZero" ,
aux : "Int64" ,
argLength : 3 ,
reg : regInfo {
inputs : [ ] regMask { regNamed [ "X5" ] , gpMask } ,
clobbers : regNamed [ "X5" ] ,
} ,
typ : "Mem" ,
faultOnNilArg0 : true ,
} ,
// general unaligned move
// arg0 = address of dst memory (in X5, changed as side effect)
// arg1 = address of src memory (in X6, changed as side effect)
// arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2)
// arg3 = mem
// auxint = alignment
// clobbers X7 as a tmp register.
// returns mem
// mov (X6), X7
// mov X7, (X5)
// ADD $sz, X5
// ADD $sz, X6
// BGEU Rarg2, X5, -4(PC)
{
name : "LoweredMove" ,
aux : "Int64" ,
argLength : 4 ,
reg : regInfo {
inputs : [ ] regMask { regNamed [ "X5" ] , regNamed [ "X6" ] , gpMask &^ regNamed [ "X7" ] } ,
clobbers : regNamed [ "X5" ] | regNamed [ "X6" ] | regNamed [ "X7" ] ,
} ,
typ : "Mem" ,
faultOnNilArg0 : true ,
faultOnNilArg1 : true ,
} ,
2020-03-16 02:38:43 +11:00
// Atomic loads.
// load from arg0. arg1=mem.
// returns <value,memory> so they can be properly ordered with other loads.
{ name : "LoweredAtomicLoad8" , argLength : 2 , reg : gpload , faultOnNilArg0 : true } ,
2020-03-16 02:47:40 +11:00
{ name : "LoweredAtomicLoad32" , argLength : 2 , reg : gpload , faultOnNilArg0 : true } ,
{ name : "LoweredAtomicLoad64" , argLength : 2 , reg : gpload , faultOnNilArg0 : true } ,
2020-03-16 02:38:43 +11:00
// Atomic stores.
2021-02-27 19:07:32 +11:00
// store arg1 to *arg0. arg2=mem. returns memory.
2020-03-16 02:38:43 +11:00
{ name : "LoweredAtomicStore8" , argLength : 3 , reg : gpstore , faultOnNilArg0 : true , hasSideEffects : true } ,
2020-03-16 02:47:40 +11:00
{ name : "LoweredAtomicStore32" , argLength : 3 , reg : gpstore , faultOnNilArg0 : true , hasSideEffects : true } ,
{ name : "LoweredAtomicStore64" , argLength : 3 , reg : gpstore , faultOnNilArg0 : true , hasSideEffects : true } ,
2020-03-16 02:38:43 +11:00
2020-03-16 02:51:54 +11:00
// Atomic exchange.
// store arg1 to *arg0. arg2=mem. returns <old content of *arg0, memory>.
{ name : "LoweredAtomicExchange32" , argLength : 3 , reg : gpxchg , resultNotInArgs : true , faultOnNilArg0 : true , hasSideEffects : true } ,
{ name : "LoweredAtomicExchange64" , argLength : 3 , reg : gpxchg , resultNotInArgs : true , faultOnNilArg0 : true , hasSideEffects : true } ,
// Atomic add.
// *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
{ name : "LoweredAtomicAdd32" , argLength : 3 , reg : gpxchg , resultNotInArgs : true , faultOnNilArg0 : true , hasSideEffects : true , unsafePoint : true } ,
{ name : "LoweredAtomicAdd64" , argLength : 3 , reg : gpxchg , resultNotInArgs : true , faultOnNilArg0 : true , hasSideEffects : true , unsafePoint : true } ,
// Atomic compare and swap.
// arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
// if *arg0 == arg1 {
// *arg0 = arg2
// return (true, memory)
// } else {
// return (false, memory)
// }
// MOV $0, Rout
// LR (Rarg0), Rtmp
// BNE Rtmp, Rarg1, 3(PC)
// SC Rarg2, (Rarg0), Rtmp
// BNE Rtmp, ZERO, -3(PC)
// MOV $1, Rout
{ name : "LoweredAtomicCas32" , argLength : 4 , reg : gpcas , resultNotInArgs : true , faultOnNilArg0 : true , hasSideEffects : true , unsafePoint : true } ,
{ name : "LoweredAtomicCas64" , argLength : 4 , reg : gpcas , resultNotInArgs : true , faultOnNilArg0 : true , hasSideEffects : true , unsafePoint : true } ,
2021-02-27 19:07:32 +11:00
// Atomic 32 bit AND/OR.
// *arg0 &= (|=) arg1. arg2=mem. returns nil.
{ name : "LoweredAtomicAnd32" , argLength : 3 , reg : gpatomic , asm : "AMOANDW" , faultOnNilArg0 : true , hasSideEffects : true } ,
{ name : "LoweredAtomicOr32" , argLength : 3 , reg : gpatomic , asm : "AMOORW" , faultOnNilArg0 : true , hasSideEffects : true } ,
2019-11-04 04:40:47 +11:00
// Lowering pass-throughs
{ name : "LoweredNilCheck" , argLength : 2 , faultOnNilArg0 : true , nilCheck : true , reg : regInfo { inputs : [ ] regMask { gpspMask } } } , // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{ name : "LoweredGetClosurePtr" , reg : regInfo { outputs : [ ] regMask { regCtxt } } } , // scheduler ensures only at beginning of entry block
2022-11-26 15:03:51 -08:00
// LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
{ name : "LoweredGetCallerSP" , argLength : 1 , reg : gp01 , rematerializeable : true } ,
2019-11-04 04:40:47 +11:00
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
// I.e., if f calls g "calls" getcallerpc,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{ name : "LoweredGetCallerPC" , reg : gp01 , rematerializeable : true } ,
2022-11-01 16:46:43 -07:00
// LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
2019-11-04 04:40:47 +11:00
// It saves all GP registers if necessary,
// but clobbers RA (LR) because it's a call
// and T6 (REG_TMP).
2022-11-01 16:46:43 -07:00
// Returns a pointer to a write barrier buffer in X24.
{ name : "LoweredWB" , argLength : 1 , reg : regInfo { clobbers : ( callerSave &^ ( gpMask | regNamed [ "g" ] ) ) | regNamed [ "X1" ] , outputs : [ ] regMask { regNamed [ "X24" ] } } , clobberFlags : true , aux : "Int64" } ,
2019-11-04 04:40:47 +11:00
2023-09-30 16:12:34 +08:00
// Do data barrier. arg0=memorys
{ name : "LoweredPubBarrier" , argLength : 1 , asm : "FENCE" , hasSideEffects : true } ,
2019-11-04 04:40:47 +11:00
// There are three of these functions so that they can have three different register inputs.
// When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
// default registers to match so we don't need to copy registers around unnecessarily.
2020-04-27 15:58:16 -04:00
{ name : "LoweredPanicBoundsA" , argLength : 3 , aux : "Int64" , reg : regInfo { inputs : [ ] regMask { regNamed [ "X7" ] , regNamed [ "X28" ] } } , typ : "Mem" , call : true } , // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
{ name : "LoweredPanicBoundsB" , argLength : 3 , aux : "Int64" , reg : regInfo { inputs : [ ] regMask { regNamed [ "X6" ] , regNamed [ "X7" ] } } , typ : "Mem" , call : true } , // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
{ name : "LoweredPanicBoundsC" , argLength : 3 , aux : "Int64" , reg : regInfo { inputs : [ ] regMask { regNamed [ "X5" ] , regNamed [ "X6" ] } } , typ : "Mem" , call : true } , // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
2019-11-04 04:40:47 +11:00
// F extension.
{ name : "FADDS" , argLength : 2 , reg : fp21 , asm : "FADDS" , commutative : true , typ : "Float32" } , // arg0 + arg1
{ name : "FSUBS" , argLength : 2 , reg : fp21 , asm : "FSUBS" , commutative : false , typ : "Float32" } , // arg0 - arg1
{ name : "FMULS" , argLength : 2 , reg : fp21 , asm : "FMULS" , commutative : true , typ : "Float32" } , // arg0 * arg1
{ name : "FDIVS" , argLength : 2 , reg : fp21 , asm : "FDIVS" , commutative : false , typ : "Float32" } , // arg0 / arg1
2023-06-28 16:45:07 +08:00
{ name : "FMADDS" , argLength : 3 , reg : fp31 , asm : "FMADDS" , commutative : true , typ : "Float32" } , // (arg0 * arg1) + arg2
{ name : "FMSUBS" , argLength : 3 , reg : fp31 , asm : "FMSUBS" , commutative : true , typ : "Float32" } , // (arg0 * arg1) - arg2
{ name : "FNMADDS" , argLength : 3 , reg : fp31 , asm : "FNMADDS" , commutative : true , typ : "Float32" } , // -(arg0 * arg1) + arg2
{ name : "FNMSUBS" , argLength : 3 , reg : fp31 , asm : "FNMSUBS" , commutative : true , typ : "Float32" } , // -(arg0 * arg1) - arg2
2019-11-04 04:40:47 +11:00
{ name : "FSQRTS" , argLength : 1 , reg : fp11 , asm : "FSQRTS" , typ : "Float32" } , // sqrt(arg0)
{ name : "FNEGS" , argLength : 1 , reg : fp11 , asm : "FNEGS" , typ : "Float32" } , // -arg0
{ name : "FMVSX" , argLength : 1 , reg : gpfp , asm : "FMVSX" , typ : "Float32" } , // reinterpret arg0 as float
{ name : "FCVTSW" , argLength : 1 , reg : gpfp , asm : "FCVTSW" , typ : "Float32" } , // float32(low 32 bits of arg0)
{ name : "FCVTSL" , argLength : 1 , reg : gpfp , asm : "FCVTSL" , typ : "Float32" } , // float32(arg0)
{ name : "FCVTWS" , argLength : 1 , reg : fpgp , asm : "FCVTWS" , typ : "Int32" } , // int32(arg0)
{ name : "FCVTLS" , argLength : 1 , reg : fpgp , asm : "FCVTLS" , typ : "Int64" } , // int64(arg0)
{ name : "FMOVWload" , argLength : 2 , reg : fpload , asm : "MOVF" , aux : "SymOff" , typ : "Float32" , faultOnNilArg0 : true , symEffect : "Read" } , // load float32 from arg0+auxint+aux
{ name : "FMOVWstore" , argLength : 3 , reg : fpstore , asm : "MOVF" , aux : "SymOff" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // store float32 to arg0+auxint+aux
{ name : "FEQS" , argLength : 2 , reg : fp2gp , asm : "FEQS" , commutative : true } , // arg0 == arg1
{ name : "FNES" , argLength : 2 , reg : fp2gp , asm : "FNES" , commutative : true } , // arg0 != arg1
{ name : "FLTS" , argLength : 2 , reg : fp2gp , asm : "FLTS" } , // arg0 < arg1
{ name : "FLES" , argLength : 2 , reg : fp2gp , asm : "FLES" } , // arg0 <= arg1
// D extension.
{ name : "FADDD" , argLength : 2 , reg : fp21 , asm : "FADDD" , commutative : true , typ : "Float64" } , // arg0 + arg1
{ name : "FSUBD" , argLength : 2 , reg : fp21 , asm : "FSUBD" , commutative : false , typ : "Float64" } , // arg0 - arg1
{ name : "FMULD" , argLength : 2 , reg : fp21 , asm : "FMULD" , commutative : true , typ : "Float64" } , // arg0 * arg1
{ name : "FDIVD" , argLength : 2 , reg : fp21 , asm : "FDIVD" , commutative : false , typ : "Float64" } , // arg0 / arg1
2021-02-17 15:00:34 +00:00
{ name : "FMADDD" , argLength : 3 , reg : fp31 , asm : "FMADDD" , commutative : true , typ : "Float64" } , // (arg0 * arg1) + arg2
{ name : "FMSUBD" , argLength : 3 , reg : fp31 , asm : "FMSUBD" , commutative : true , typ : "Float64" } , // (arg0 * arg1) - arg2
{ name : "FNMADDD" , argLength : 3 , reg : fp31 , asm : "FNMADDD" , commutative : true , typ : "Float64" } , // -(arg0 * arg1) + arg2
{ name : "FNMSUBD" , argLength : 3 , reg : fp31 , asm : "FNMSUBD" , commutative : true , typ : "Float64" } , // -(arg0 * arg1) - arg2
2019-11-04 04:40:47 +11:00
{ name : "FSQRTD" , argLength : 1 , reg : fp11 , asm : "FSQRTD" , typ : "Float64" } , // sqrt(arg0)
{ name : "FNEGD" , argLength : 1 , reg : fp11 , asm : "FNEGD" , typ : "Float64" } , // -arg0
2021-09-09 23:47:14 +01:00
{ name : "FABSD" , argLength : 1 , reg : fp11 , asm : "FABSD" , typ : "Float64" } , // abs(arg0)
{ name : "FSGNJD" , argLength : 2 , reg : fp21 , asm : "FSGNJD" , typ : "Float64" } , // copy sign of arg1 to arg0
2019-11-04 04:40:47 +11:00
{ name : "FMVDX" , argLength : 1 , reg : gpfp , asm : "FMVDX" , typ : "Float64" } , // reinterpret arg0 as float
{ name : "FCVTDW" , argLength : 1 , reg : gpfp , asm : "FCVTDW" , typ : "Float64" } , // float64(low 32 bits of arg0)
{ name : "FCVTDL" , argLength : 1 , reg : gpfp , asm : "FCVTDL" , typ : "Float64" } , // float64(arg0)
{ name : "FCVTWD" , argLength : 1 , reg : fpgp , asm : "FCVTWD" , typ : "Int32" } , // int32(arg0)
{ name : "FCVTLD" , argLength : 1 , reg : fpgp , asm : "FCVTLD" , typ : "Int64" } , // int64(arg0)
{ name : "FCVTDS" , argLength : 1 , reg : fp11 , asm : "FCVTDS" , typ : "Float64" } , // float64(arg0)
{ name : "FCVTSD" , argLength : 1 , reg : fp11 , asm : "FCVTSD" , typ : "Float32" } , // float32(arg0)
{ name : "FMOVDload" , argLength : 2 , reg : fpload , asm : "MOVD" , aux : "SymOff" , typ : "Float64" , faultOnNilArg0 : true , symEffect : "Read" } , // load float64 from arg0+auxint+aux
{ name : "FMOVDstore" , argLength : 3 , reg : fpstore , asm : "MOVD" , aux : "SymOff" , typ : "Mem" , faultOnNilArg0 : true , symEffect : "Write" } , // store float6 to arg0+auxint+aux
{ name : "FEQD" , argLength : 2 , reg : fp2gp , asm : "FEQD" , commutative : true } , // arg0 == arg1
{ name : "FNED" , argLength : 2 , reg : fp2gp , asm : "FNED" , commutative : true } , // arg0 != arg1
{ name : "FLTD" , argLength : 2 , reg : fp2gp , asm : "FLTD" } , // arg0 < arg1
{ name : "FLED" , argLength : 2 , reg : fp2gp , asm : "FLED" } , // arg0 <= arg1
}
RISCV64blocks := [ ] blockData {
2020-03-31 02:00:50 +11:00
{ name : "BEQ" , controls : 2 } ,
{ name : "BNE" , controls : 2 } ,
{ name : "BLT" , controls : 2 } ,
{ name : "BGE" , controls : 2 } ,
{ name : "BLTU" , controls : 2 } ,
{ name : "BGEU" , controls : 2 } ,
{ name : "BEQZ" , controls : 1 } ,
{ name : "BNEZ" , controls : 1 } ,
{ name : "BLEZ" , controls : 1 } ,
{ name : "BGEZ" , controls : 1 } ,
{ name : "BLTZ" , controls : 1 } ,
{ name : "BGTZ" , controls : 1 } ,
2019-11-04 04:40:47 +11:00
}
archs = append ( archs , arch {
name : "RISCV64" ,
pkg : "cmd/internal/obj/riscv" ,
genfile : "../../riscv64/ssa.go" ,
ops : RISCV64ops ,
blocks : RISCV64blocks ,
regnames : regNamesRISCV64 ,
gpregmask : gpMask ,
fpregmask : fpMask ,
framepointerreg : - 1 , // not used
2021-10-28 15:22:26 +08:00
// Integer parameters passed in register X10-X17, X8-X9, X18-X23
ParamIntRegNames : "X10 X11 X12 X13 X14 X15 X16 X17 X8 X9 X18 X19 X20 X21 X22 X23" ,
// Float parameters passed in register F10-F17, F8-F9, F18-F23
ParamFloatRegNames : "F10 F11 F12 F13 F14 F15 F16 F17 F8 F9 F18 F19 F20 F21 F22 F23" ,
2019-11-04 04:40:47 +11:00
} )
}