2019-09-08 01:56:26 +10:00
|
|
|
// Copyright © 2015 The Go Authors. All rights reserved.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
// of this software and associated documentation files (the "Software"), to deal
|
|
|
|
// in the Software without restriction, including without limitation the rights
|
|
|
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
// copies of the Software, and to permit persons to whom the Software is
|
|
|
|
// furnished to do so, subject to the following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
// THE SOFTWARE.
|
|
|
|
|
|
|
|
package riscv
|
|
|
|
|
|
|
|
import (
|
|
|
|
"cmd/internal/obj"
|
2019-11-04 02:31:37 +11:00
|
|
|
"cmd/internal/objabi"
|
2024-10-15 15:18:08 +08:00
|
|
|
"cmd/internal/src"
|
2019-09-08 01:56:26 +10:00
|
|
|
"cmd/internal/sys"
|
|
|
|
"fmt"
|
2023-04-17 15:58:47 -04:00
|
|
|
"internal/abi"
|
2023-12-09 19:18:00 +11:00
|
|
|
"internal/buildcfg"
|
cmd/asm, cmd/link, runtime: introduce FuncInfo flag bits
The runtime traceback code has its own definition of which functions
mark the top frame of a stack, separate from the TOPFRAME bits that
exist in the assembly and are passed along in DWARF information.
It's error-prone and redundant to have two different sources of truth.
This CL provides the actual TOPFRAME bits to the runtime, so that
the runtime can use those bits instead of reinventing its own category.
This CL also adds a new bit, SPWRITE, which marks functions that
write directly to SP (anything but adding and subtracting constants).
Such functions must stop a traceback, because the traceback has no
way to rederive the SP on entry. Again, the runtime has its own definition
which is mostly correct, but also missing some functions. During ordinary
goroutine context switches, such functions do not appear on the stack,
so the incompleteness in the runtime usually doesn't matter.
But profiling signals can arrive at any moment, and the runtime may
crash during traceback if it attempts to unwind an SP-writing frame
and gets out-of-sync with the actual stack. The runtime contains code
to try to detect likely candidates but again it is incomplete.
Deriving the SPWRITE bit automatically from the actual assembly code
provides the complete truth, and passing it to the runtime lets the
runtime use it.
This CL is part of a stack adding windows/arm64
support (#36439), intended to land in the Go 1.17 cycle.
This CL is, however, not windows/arm64-specific.
It is cleanup meant to make the port (and future ports) easier.
Change-Id: I227f53b23ac5b3dabfcc5e8ee3f00df4e113cf58
Reviewed-on: https://go-review.googlesource.com/c/go/+/288800
Trust: Russ Cox <rsc@golang.org>
Trust: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com>
2021-01-28 15:21:33 -05:00
|
|
|
"log"
|
2022-11-08 15:39:27 +08:00
|
|
|
"math/bits"
|
2023-06-20 11:16:56 +08:00
|
|
|
"strings"
|
2019-09-08 01:56:26 +10:00
|
|
|
)
|
|
|
|
|
|
|
|
func buildop(ctxt *obj.Link) {}
|
|
|
|
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
func jalToSym(ctxt *obj.Link, p *obj.Prog, lr int16) {
|
|
|
|
switch p.As {
|
|
|
|
case obj.ACALL, obj.AJMP, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY:
|
|
|
|
default:
|
|
|
|
ctxt.Diag("unexpected Prog in jalToSym: %v", p)
|
|
|
|
return
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
|
|
|
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
p.As = AJAL
|
2023-08-17 01:13:32 +10:00
|
|
|
p.Mark |= NEED_JAL_RELOC
|
2019-11-04 02:31:37 +11:00
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
p.From.Reg = lr
|
2021-08-25 20:08:37 +10:00
|
|
|
p.Reg = obj.REG_NONE
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
|
|
|
|
2019-09-08 04:11:07 +10:00
|
|
|
// progedit is called individually for each *obj.Prog. It normalizes instruction
|
|
|
|
// formats and eliminates as many pseudo-instructions as possible.
|
2019-09-08 01:56:26 +10:00
|
|
|
func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
|
2024-08-15 02:48:09 +10:00
|
|
|
insData, err := instructionDataForAs(p.As)
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("failed to lookup instruction data for %v: %v", p.As, err))
|
|
|
|
}
|
2019-09-08 04:11:07 +10:00
|
|
|
|
|
|
|
// Expand binary instructions to ternary ones.
|
2021-08-25 20:08:37 +10:00
|
|
|
if p.Reg == obj.REG_NONE {
|
2024-08-15 02:48:09 +10:00
|
|
|
if insData.ternary {
|
2019-09-08 04:11:07 +10:00
|
|
|
p.Reg = p.To.Reg
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rewrite instructions with constant operands to refer to the immediate
|
|
|
|
// form of the instruction.
|
|
|
|
if p.From.Type == obj.TYPE_CONST {
|
|
|
|
switch p.As {
|
2023-08-26 04:19:40 +10:00
|
|
|
case ASUB:
|
|
|
|
p.As, p.From.Offset = AADDI, -p.From.Offset
|
|
|
|
case ASUBW:
|
|
|
|
p.As, p.From.Offset = AADDIW, -p.From.Offset
|
2024-08-15 02:48:09 +10:00
|
|
|
default:
|
|
|
|
if insData.immForm != obj.AXXX {
|
|
|
|
p.As = insData.immForm
|
|
|
|
}
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
}
|
2019-09-08 04:11:07 +10:00
|
|
|
|
|
|
|
switch p.As {
|
2019-11-04 02:31:37 +11:00
|
|
|
case obj.AJMP:
|
|
|
|
// Turn JMP into JAL ZERO or JALR ZERO.
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
p.From.Reg = REG_ZERO
|
|
|
|
|
|
|
|
switch p.To.Type {
|
|
|
|
case obj.TYPE_BRANCH:
|
|
|
|
p.As = AJAL
|
|
|
|
case obj.TYPE_MEM:
|
|
|
|
switch p.To.Name {
|
|
|
|
case obj.NAME_NONE:
|
|
|
|
p.As = AJALR
|
2021-02-15 13:58:45 -05:00
|
|
|
case obj.NAME_EXTERN, obj.NAME_STATIC:
|
2019-11-04 02:31:37 +11:00
|
|
|
// Handled in preprocess.
|
|
|
|
default:
|
2019-12-19 02:09:45 +11:00
|
|
|
ctxt.Diag("unsupported name %d for %v", p.To.Name, p)
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unhandled type %+v", p.To.Type))
|
|
|
|
}
|
|
|
|
|
|
|
|
case obj.ACALL:
|
|
|
|
switch p.To.Type {
|
|
|
|
case obj.TYPE_MEM:
|
|
|
|
// Handled in preprocess.
|
|
|
|
case obj.TYPE_REG:
|
|
|
|
p.As = AJALR
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
p.From.Reg = REG_LR
|
|
|
|
default:
|
|
|
|
ctxt.Diag("unknown destination type %+v in CALL: %v", p.To.Type, p)
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
case obj.AUNDEF:
|
|
|
|
p.As = AEBREAK
|
2019-11-04 04:08:26 +11:00
|
|
|
|
2024-10-24 00:56:07 +11:00
|
|
|
case AFMVXS:
|
|
|
|
// FMVXS is the old name for FMVXW.
|
|
|
|
p.As = AFMVXW
|
|
|
|
|
|
|
|
case AFMVSX:
|
|
|
|
// FMVSX is the old name for FMVWX.
|
|
|
|
p.As = AFMVWX
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
case ASCALL:
|
|
|
|
// SCALL is the old name for ECALL.
|
|
|
|
p.As = AECALL
|
2019-11-04 04:08:26 +11:00
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
case ASBREAK:
|
|
|
|
// SBREAK is the old name for EBREAK.
|
|
|
|
p.As = AEBREAK
|
2021-03-18 03:32:32 +11:00
|
|
|
|
|
|
|
case AMOV:
|
2021-08-25 20:08:37 +10:00
|
|
|
if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == obj.REG_NONE && int64(int32(p.From.Offset)) != p.From.Offset {
|
2022-11-08 15:39:27 +08:00
|
|
|
ctz := bits.TrailingZeros64(uint64(p.From.Offset))
|
|
|
|
val := p.From.Offset >> ctz
|
|
|
|
if int64(int32(val)) == val {
|
|
|
|
// It's ok. We can handle constants with many trailing zeros.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// Put >32-bit constants in memory and load them.
|
2021-03-18 03:32:32 +11:00
|
|
|
p.From.Type = obj.TYPE_MEM
|
|
|
|
p.From.Sym = ctxt.Int64Sym(p.From.Offset)
|
|
|
|
p.From.Name = obj.NAME_EXTERN
|
|
|
|
p.From.Offset = 0
|
|
|
|
}
|
2024-07-02 00:31:53 +10:00
|
|
|
|
|
|
|
case AMOVD:
|
|
|
|
if p.From.Type == obj.TYPE_FCONST && p.From.Name == obj.NAME_NONE && p.From.Reg == obj.REG_NONE {
|
|
|
|
f64 := p.From.Val.(float64)
|
|
|
|
p.From.Type = obj.TYPE_MEM
|
|
|
|
p.From.Sym = ctxt.Float64Sym(f64)
|
|
|
|
p.From.Name = obj.NAME_EXTERN
|
|
|
|
p.From.Offset = 0
|
|
|
|
}
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
|
2019-10-04 04:02:38 +10:00
|
|
|
// addrToReg extracts the register from an Addr, handling special Addr.Names.
|
|
|
|
func addrToReg(a obj.Addr) int16 {
|
|
|
|
switch a.Name {
|
|
|
|
case obj.NAME_PARAM, obj.NAME_AUTO:
|
|
|
|
return REG_SP
|
|
|
|
}
|
|
|
|
return a.Reg
|
|
|
|
}
|
|
|
|
|
|
|
|
// movToLoad converts a MOV mnemonic into the corresponding load instruction.
|
|
|
|
func movToLoad(mnemonic obj.As) obj.As {
|
|
|
|
switch mnemonic {
|
|
|
|
case AMOV:
|
|
|
|
return ALD
|
|
|
|
case AMOVB:
|
|
|
|
return ALB
|
|
|
|
case AMOVH:
|
|
|
|
return ALH
|
|
|
|
case AMOVW:
|
|
|
|
return ALW
|
|
|
|
case AMOVBU:
|
|
|
|
return ALBU
|
|
|
|
case AMOVHU:
|
|
|
|
return ALHU
|
|
|
|
case AMOVWU:
|
|
|
|
return ALWU
|
|
|
|
case AMOVF:
|
|
|
|
return AFLW
|
|
|
|
case AMOVD:
|
|
|
|
return AFLD
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%+v is not a MOV", mnemonic))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// movToStore converts a MOV mnemonic into the corresponding store instruction.
|
|
|
|
func movToStore(mnemonic obj.As) obj.As {
|
|
|
|
switch mnemonic {
|
|
|
|
case AMOV:
|
|
|
|
return ASD
|
|
|
|
case AMOVB:
|
|
|
|
return ASB
|
|
|
|
case AMOVH:
|
|
|
|
return ASH
|
|
|
|
case AMOVW:
|
|
|
|
return ASW
|
|
|
|
case AMOVF:
|
|
|
|
return AFSW
|
|
|
|
case AMOVD:
|
|
|
|
return AFSD
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%+v is not a MOV", mnemonic))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-21 09:06:25 +00:00
|
|
|
// markRelocs marks an obj.Prog that specifies a MOV pseudo-instruction and
|
|
|
|
// requires relocation.
|
|
|
|
func markRelocs(p *obj.Prog) {
|
2019-10-04 04:02:38 +10:00
|
|
|
switch p.As {
|
|
|
|
case AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD:
|
2021-08-21 09:06:25 +00:00
|
|
|
switch {
|
|
|
|
case p.From.Type == obj.TYPE_ADDR && p.To.Type == obj.TYPE_REG:
|
|
|
|
switch p.From.Name {
|
|
|
|
case obj.NAME_EXTERN, obj.NAME_STATIC:
|
|
|
|
p.Mark |= NEED_PCREL_ITYPE_RELOC
|
2024-09-12 20:03:59 +08:00
|
|
|
case obj.NAME_GOTREF:
|
|
|
|
p.Mark |= NEED_GOT_PCREL_ITYPE_RELOC
|
2021-08-21 09:06:25 +00:00
|
|
|
}
|
|
|
|
case p.From.Type == obj.TYPE_MEM && p.To.Type == obj.TYPE_REG:
|
|
|
|
switch p.From.Name {
|
|
|
|
case obj.NAME_EXTERN, obj.NAME_STATIC:
|
|
|
|
p.Mark |= NEED_PCREL_ITYPE_RELOC
|
2024-09-12 20:03:59 +08:00
|
|
|
case obj.NAME_GOTREF:
|
|
|
|
p.Mark |= NEED_GOT_PCREL_ITYPE_RELOC
|
2021-08-21 09:06:25 +00:00
|
|
|
}
|
|
|
|
case p.From.Type == obj.TYPE_REG && p.To.Type == obj.TYPE_MEM:
|
|
|
|
switch p.To.Name {
|
|
|
|
case obj.NAME_EXTERN, obj.NAME_STATIC:
|
|
|
|
p.Mark |= NEED_PCREL_STYPE_RELOC
|
|
|
|
}
|
2019-10-04 04:02:38 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-04 04:40:47 +11:00
|
|
|
// InvertBranch inverts the condition of a conditional branch.
|
2020-03-31 01:57:52 +11:00
|
|
|
func InvertBranch(as obj.As) obj.As {
|
|
|
|
switch as {
|
2019-11-04 02:31:37 +11:00
|
|
|
case ABEQ:
|
|
|
|
return ABNE
|
2020-03-31 01:57:52 +11:00
|
|
|
case ABEQZ:
|
|
|
|
return ABNEZ
|
2019-11-04 02:31:37 +11:00
|
|
|
case ABGE:
|
|
|
|
return ABLT
|
|
|
|
case ABGEU:
|
|
|
|
return ABLTU
|
2020-03-31 01:57:52 +11:00
|
|
|
case ABGEZ:
|
|
|
|
return ABLTZ
|
|
|
|
case ABGT:
|
|
|
|
return ABLE
|
|
|
|
case ABGTU:
|
|
|
|
return ABLEU
|
|
|
|
case ABGTZ:
|
|
|
|
return ABLEZ
|
|
|
|
case ABLE:
|
|
|
|
return ABGT
|
|
|
|
case ABLEU:
|
|
|
|
return ABGTU
|
|
|
|
case ABLEZ:
|
|
|
|
return ABGTZ
|
|
|
|
case ABLT:
|
|
|
|
return ABGE
|
|
|
|
case ABLTU:
|
|
|
|
return ABGEU
|
|
|
|
case ABLTZ:
|
|
|
|
return ABGEZ
|
|
|
|
case ABNE:
|
|
|
|
return ABEQ
|
|
|
|
case ABNEZ:
|
|
|
|
return ABEQZ
|
2019-11-04 02:31:37 +11:00
|
|
|
default:
|
2019-11-04 04:40:47 +11:00
|
|
|
panic("InvertBranch: not a branch")
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
// containsCall reports whether the symbol contains a CALL (or equivalent)
|
|
|
|
// instruction. Must be called after progedit.
|
|
|
|
func containsCall(sym *obj.LSym) bool {
|
|
|
|
// CALLs are CALL or JAL(R) with link register LR.
|
2020-07-19 00:30:12 -04:00
|
|
|
for p := sym.Func().Text; p != nil; p = p.Link {
|
2019-11-04 04:32:32 +11:00
|
|
|
switch p.As {
|
2020-06-14 00:06:24 +02:00
|
|
|
case obj.ACALL, obj.ADUFFZERO, obj.ADUFFCOPY:
|
2019-11-04 04:32:32 +11:00
|
|
|
return true
|
|
|
|
case AJAL, AJALR:
|
2020-08-19 03:07:26 +10:00
|
|
|
if p.From.Type == obj.TYPE_REG && p.From.Reg == REG_LR {
|
2019-11-04 04:32:32 +11:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-09-08 01:56:26 +10:00
|
|
|
// setPCs sets the Pc field in all instructions reachable from p.
|
2021-10-16 03:59:41 +11:00
|
|
|
// It uses pc as the initial value and returns the next available pc.
|
|
|
|
func setPCs(p *obj.Prog, pc int64) int64 {
|
2019-09-08 01:56:26 +10:00
|
|
|
for ; p != nil; p = p.Link {
|
|
|
|
p.Pc = pc
|
2019-12-19 02:09:45 +11:00
|
|
|
for _, ins := range instructionsForProg(p) {
|
|
|
|
pc += int64(ins.length())
|
|
|
|
}
|
2023-11-12 17:05:57 +08:00
|
|
|
|
|
|
|
if p.As == obj.APCALIGN {
|
|
|
|
alignedValue := p.From.Offset
|
|
|
|
v := pcAlignPadLength(pc, alignedValue)
|
|
|
|
pc += int64(v)
|
|
|
|
}
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
2021-10-16 03:59:41 +11:00
|
|
|
return pc
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
|
2019-10-04 04:02:38 +10:00
|
|
|
// stackOffset updates Addr offsets based on the current stack size.
|
|
|
|
//
|
|
|
|
// The stack looks like:
|
|
|
|
// -------------------
|
|
|
|
// | |
|
|
|
|
// | PARAMs |
|
|
|
|
// | |
|
|
|
|
// | |
|
|
|
|
// -------------------
|
|
|
|
// | Parent RA | SP on function entry
|
|
|
|
// -------------------
|
|
|
|
// | |
|
|
|
|
// | |
|
|
|
|
// | AUTOs |
|
|
|
|
// | |
|
|
|
|
// | |
|
|
|
|
// -------------------
|
|
|
|
// | RA | SP during function execution
|
|
|
|
// -------------------
|
|
|
|
//
|
|
|
|
// FixedFrameSize makes other packages aware of the space allocated for RA.
|
|
|
|
//
|
|
|
|
// A nicer version of this diagram can be found on slide 21 of the presentation
|
2022-01-29 19:07:27 -05:00
|
|
|
// attached to https://golang.org/issue/16922#issuecomment-243748180.
|
2019-10-04 04:02:38 +10:00
|
|
|
func stackOffset(a *obj.Addr, stacksize int64) {
|
|
|
|
switch a.Name {
|
|
|
|
case obj.NAME_AUTO:
|
|
|
|
// Adjust to the top of AUTOs.
|
|
|
|
a.Offset += stacksize
|
|
|
|
case obj.NAME_PARAM:
|
|
|
|
// Adjust to the bottom of PARAMs.
|
|
|
|
a.Offset += stacksize + 8
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
// preprocess generates prologue and epilogue code, computes PC-relative branch
|
|
|
|
// and jump offsets, and resolves pseudo-registers.
|
|
|
|
//
|
|
|
|
// preprocess is called once per linker symbol.
|
|
|
|
//
|
|
|
|
// When preprocess finishes, all instructions in the symbol are either
|
|
|
|
// concrete, real RISC-V instructions or directive pseudo-ops like TEXT,
|
|
|
|
// PCDATA, and FUNCDATA.
|
2019-09-08 01:56:26 +10:00
|
|
|
func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
|
2020-07-19 00:30:12 -04:00
|
|
|
if cursym.Func().Text == nil || cursym.Func().Text.Link == nil {
|
2019-09-08 01:56:26 +10:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
// Generate the prologue.
|
2020-07-19 00:30:12 -04:00
|
|
|
text := cursym.Func().Text
|
2019-09-08 01:56:26 +10:00
|
|
|
if text.As != obj.ATEXT {
|
|
|
|
ctxt.Diag("preprocess: found symbol that does not start with TEXT directive")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
stacksize := text.To.Offset
|
|
|
|
if stacksize == -8 {
|
|
|
|
// Historical way to mark NOFRAME.
|
|
|
|
text.From.Sym.Set(obj.AttrNoFrame, true)
|
|
|
|
stacksize = 0
|
|
|
|
}
|
|
|
|
if stacksize < 0 {
|
|
|
|
ctxt.Diag("negative frame size %d - did you mean NOFRAME?", stacksize)
|
|
|
|
}
|
|
|
|
if text.From.Sym.NoFrame() {
|
|
|
|
if stacksize != 0 {
|
|
|
|
ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", stacksize)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
if !containsCall(cursym) {
|
|
|
|
text.From.Sym.Set(obj.AttrLeaf, true)
|
|
|
|
if stacksize == 0 {
|
|
|
|
// A leaf function with no locals has no frame.
|
|
|
|
text.From.Sym.Set(obj.AttrNoFrame, true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save LR unless there is no frame.
|
|
|
|
if !text.From.Sym.NoFrame() {
|
2022-04-18 13:41:08 -04:00
|
|
|
stacksize += ctxt.Arch.FixedFrameSize
|
2019-11-04 04:32:32 +11:00
|
|
|
}
|
|
|
|
|
2020-07-19 00:30:12 -04:00
|
|
|
cursym.Func().Args = text.To.Val.(int32)
|
|
|
|
cursym.Func().Locals = int32(stacksize)
|
2019-09-08 01:56:26 +10:00
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
prologue := text
|
|
|
|
|
2020-07-19 00:30:12 -04:00
|
|
|
if !cursym.Func().Text.From.Sym.NoSplit() {
|
2019-11-04 04:32:32 +11:00
|
|
|
prologue = stacksplit(ctxt, prologue, cursym, newprog, stacksize) // emit split check
|
|
|
|
}
|
|
|
|
|
2024-10-15 15:18:08 +08:00
|
|
|
q := prologue
|
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
if stacksize != 0 {
|
|
|
|
prologue = ctxt.StartUnsafePoint(prologue, newprog)
|
|
|
|
|
|
|
|
// Actually save LR.
|
|
|
|
prologue = obj.Appendp(prologue, newprog)
|
|
|
|
prologue.As = AMOV
|
2024-10-15 15:18:08 +08:00
|
|
|
prologue.Pos = q.Pos
|
2019-11-04 04:32:32 +11:00
|
|
|
prologue.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR}
|
|
|
|
prologue.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: -stacksize}
|
|
|
|
|
|
|
|
// Insert stack adjustment.
|
|
|
|
prologue = obj.Appendp(prologue, newprog)
|
|
|
|
prologue.As = AADDI
|
2024-10-15 15:18:08 +08:00
|
|
|
prologue.Pos = q.Pos
|
|
|
|
prologue.Pos = prologue.Pos.WithXlogue(src.PosPrologueEnd)
|
2019-11-04 04:32:32 +11:00
|
|
|
prologue.From = obj.Addr{Type: obj.TYPE_CONST, Offset: -stacksize}
|
|
|
|
prologue.Reg = REG_SP
|
|
|
|
prologue.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_SP}
|
|
|
|
prologue.Spadj = int32(stacksize)
|
|
|
|
|
|
|
|
prologue = ctxt.EndUnsafePoint(prologue, newprog, -1)
|
2022-06-22 16:28:41 -04:00
|
|
|
|
|
|
|
// On Linux, in a cgo binary we may get a SIGSETXID signal early on
|
|
|
|
// before the signal stack is set, as glibc doesn't allow us to block
|
|
|
|
// SIGSETXID. So a signal may land on the current stack and clobber
|
|
|
|
// the content below the SP. We store the LR again after the SP is
|
|
|
|
// decremented.
|
|
|
|
prologue = obj.Appendp(prologue, newprog)
|
|
|
|
prologue.As = AMOV
|
|
|
|
prologue.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR}
|
|
|
|
prologue.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: 0}
|
2019-11-04 04:32:32 +11:00
|
|
|
}
|
|
|
|
|
2020-07-19 00:30:12 -04:00
|
|
|
if cursym.Func().Text.From.Sym.Wrapper() {
|
2019-11-04 04:32:32 +11:00
|
|
|
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
|
|
|
|
//
|
2022-03-29 19:14:24 +08:00
|
|
|
// MOV g_panic(g), X5
|
|
|
|
// BNE X5, ZERO, adjust
|
2019-11-04 04:32:32 +11:00
|
|
|
// end:
|
|
|
|
// NOP
|
|
|
|
// ...rest of function..
|
|
|
|
// adjust:
|
2022-03-29 19:14:24 +08:00
|
|
|
// MOV panic_argp(X5), X6
|
|
|
|
// ADD $(autosize+FIXED_FRAME), SP, X7
|
|
|
|
// BNE X6, X7, end
|
|
|
|
// ADD $FIXED_FRAME, SP, X6
|
|
|
|
// MOV X6, panic_argp(X5)
|
2019-11-04 04:32:32 +11:00
|
|
|
// JMP end
|
|
|
|
//
|
|
|
|
// The NOP is needed to give the jumps somewhere to land.
|
|
|
|
|
|
|
|
ldpanic := obj.Appendp(prologue, newprog)
|
|
|
|
|
|
|
|
ldpanic.As = AMOV
|
|
|
|
ldpanic.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REGG, Offset: 4 * int64(ctxt.Arch.PtrSize)} // G.panic
|
2021-08-25 20:08:37 +10:00
|
|
|
ldpanic.Reg = obj.REG_NONE
|
2022-03-29 19:14:24 +08:00
|
|
|
ldpanic.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X5}
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
bneadj := obj.Appendp(ldpanic, newprog)
|
|
|
|
bneadj.As = ABNE
|
2022-03-29 19:14:24 +08:00
|
|
|
bneadj.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X5}
|
2019-11-04 04:32:32 +11:00
|
|
|
bneadj.Reg = REG_ZERO
|
|
|
|
bneadj.To.Type = obj.TYPE_BRANCH
|
|
|
|
|
|
|
|
endadj := obj.Appendp(bneadj, newprog)
|
|
|
|
endadj.As = obj.ANOP
|
|
|
|
|
|
|
|
last := endadj
|
|
|
|
for last.Link != nil {
|
|
|
|
last = last.Link
|
|
|
|
}
|
|
|
|
|
|
|
|
getargp := obj.Appendp(last, newprog)
|
|
|
|
getargp.As = AMOV
|
2022-03-29 19:14:24 +08:00
|
|
|
getargp.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_X5, Offset: 0} // Panic.argp
|
2021-08-25 20:08:37 +10:00
|
|
|
getargp.Reg = obj.REG_NONE
|
2022-03-29 19:14:24 +08:00
|
|
|
getargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X6}
|
2019-11-04 04:32:32 +11:00
|
|
|
|
2020-08-28 17:10:32 +00:00
|
|
|
bneadj.To.SetTarget(getargp)
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
calcargp := obj.Appendp(getargp, newprog)
|
|
|
|
calcargp.As = AADDI
|
2022-04-18 13:41:08 -04:00
|
|
|
calcargp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: stacksize + ctxt.Arch.FixedFrameSize}
|
2019-11-04 04:32:32 +11:00
|
|
|
calcargp.Reg = REG_SP
|
2022-03-29 19:14:24 +08:00
|
|
|
calcargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X7}
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
testargp := obj.Appendp(calcargp, newprog)
|
|
|
|
testargp.As = ABNE
|
2022-03-29 19:14:24 +08:00
|
|
|
testargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X6}
|
|
|
|
testargp.Reg = REG_X7
|
2019-11-04 04:32:32 +11:00
|
|
|
testargp.To.Type = obj.TYPE_BRANCH
|
2020-08-28 17:10:32 +00:00
|
|
|
testargp.To.SetTarget(endadj)
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
adjargp := obj.Appendp(testargp, newprog)
|
|
|
|
adjargp.As = AADDI
|
|
|
|
adjargp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(ctxt.Arch.PtrSize)}
|
|
|
|
adjargp.Reg = REG_SP
|
2022-03-29 19:14:24 +08:00
|
|
|
adjargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X6}
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
setargp := obj.Appendp(adjargp, newprog)
|
|
|
|
setargp.As = AMOV
|
2022-03-29 19:14:24 +08:00
|
|
|
setargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X6}
|
2021-08-25 20:08:37 +10:00
|
|
|
setargp.Reg = obj.REG_NONE
|
2022-03-29 19:14:24 +08:00
|
|
|
setargp.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_X5, Offset: 0} // Panic.argp
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
godone := obj.Appendp(setargp, newprog)
|
|
|
|
godone.As = AJAL
|
|
|
|
godone.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO}
|
|
|
|
godone.To.Type = obj.TYPE_BRANCH
|
2020-08-28 17:10:32 +00:00
|
|
|
godone.To.SetTarget(endadj)
|
2019-11-04 04:32:32 +11:00
|
|
|
}
|
2019-09-08 01:56:26 +10:00
|
|
|
|
2019-10-04 04:02:38 +10:00
|
|
|
// Update stack-based offsets.
|
2020-07-19 00:30:12 -04:00
|
|
|
for p := cursym.Func().Text; p != nil; p = p.Link {
|
2019-10-04 04:02:38 +10:00
|
|
|
stackOffset(&p.From, stacksize)
|
|
|
|
stackOffset(&p.To, stacksize)
|
|
|
|
}
|
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
// Additional instruction rewriting.
|
2020-07-19 00:30:12 -04:00
|
|
|
for p := cursym.Func().Text; p != nil; p = p.Link {
|
2020-01-01 01:28:22 +11:00
|
|
|
switch p.As {
|
|
|
|
case obj.AGETCALLERPC:
|
2019-11-04 04:31:20 +11:00
|
|
|
if cursym.Leaf() {
|
|
|
|
// MOV LR, Rd
|
|
|
|
p.As = AMOV
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
p.From.Reg = REG_LR
|
|
|
|
} else {
|
|
|
|
// MOV (RSP), Rd
|
|
|
|
p.As = AMOV
|
|
|
|
p.From.Type = obj.TYPE_MEM
|
|
|
|
p.From.Reg = REG_SP
|
|
|
|
}
|
2019-11-04 02:31:37 +11:00
|
|
|
|
2020-06-14 00:06:24 +02:00
|
|
|
case obj.ACALL, obj.ADUFFZERO, obj.ADUFFCOPY:
|
2019-11-04 02:31:37 +11:00
|
|
|
switch p.To.Type {
|
|
|
|
case obj.TYPE_MEM:
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
jalToSym(ctxt, p, REG_LR)
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
case obj.AJMP:
|
|
|
|
switch p.To.Type {
|
|
|
|
case obj.TYPE_MEM:
|
|
|
|
switch p.To.Name {
|
2021-02-15 13:58:45 -05:00
|
|
|
case obj.NAME_EXTERN, obj.NAME_STATIC:
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
jalToSym(ctxt, p, REG_ZERO)
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
|
|
|
}
|
2019-11-04 04:08:26 +11:00
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
case obj.ARET:
|
|
|
|
// Replace RET with epilogue.
|
|
|
|
retJMP := p.To.Sym
|
|
|
|
|
|
|
|
if stacksize != 0 {
|
|
|
|
// Restore LR.
|
|
|
|
p.As = AMOV
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: 0}
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR}
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
|
|
|
|
p.As = AADDI
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: stacksize}
|
|
|
|
p.Reg = REG_SP
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_SP}
|
|
|
|
p.Spadj = int32(-stacksize)
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
}
|
|
|
|
|
|
|
|
if retJMP != nil {
|
|
|
|
p.As = obj.ARET
|
|
|
|
p.To.Sym = retJMP
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
jalToSym(ctxt, p, REG_ZERO)
|
2019-11-04 04:32:32 +11:00
|
|
|
} else {
|
|
|
|
p.As = AJALR
|
2020-08-19 03:07:26 +10:00
|
|
|
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO}
|
2021-08-25 20:08:37 +10:00
|
|
|
p.Reg = obj.REG_NONE
|
2020-08-19 03:07:26 +10:00
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR}
|
2019-11-04 04:32:32 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
// "Add back" the stack removed in the previous instruction.
|
|
|
|
//
|
|
|
|
// This is to avoid confusing pctospadj, which sums
|
|
|
|
// Spadj from function entry to each PC, and shouldn't
|
|
|
|
// count adjustments from earlier epilogues, since they
|
|
|
|
// won't affect later PCs.
|
|
|
|
p.Spadj = int32(stacksize)
|
2020-04-15 13:23:52 +00:00
|
|
|
|
|
|
|
case AADDI:
|
|
|
|
// Refine Spadjs account for adjustment via ADDI instruction.
|
|
|
|
if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_SP && p.From.Type == obj.TYPE_CONST {
|
|
|
|
p.Spadj = int32(-p.From.Offset)
|
|
|
|
}
|
2019-10-04 04:02:38 +10:00
|
|
|
}
|
cmd/asm, cmd/link, runtime: introduce FuncInfo flag bits
The runtime traceback code has its own definition of which functions
mark the top frame of a stack, separate from the TOPFRAME bits that
exist in the assembly and are passed along in DWARF information.
It's error-prone and redundant to have two different sources of truth.
This CL provides the actual TOPFRAME bits to the runtime, so that
the runtime can use those bits instead of reinventing its own category.
This CL also adds a new bit, SPWRITE, which marks functions that
write directly to SP (anything but adding and subtracting constants).
Such functions must stop a traceback, because the traceback has no
way to rederive the SP on entry. Again, the runtime has its own definition
which is mostly correct, but also missing some functions. During ordinary
goroutine context switches, such functions do not appear on the stack,
so the incompleteness in the runtime usually doesn't matter.
But profiling signals can arrive at any moment, and the runtime may
crash during traceback if it attempts to unwind an SP-writing frame
and gets out-of-sync with the actual stack. The runtime contains code
to try to detect likely candidates but again it is incomplete.
Deriving the SPWRITE bit automatically from the actual assembly code
provides the complete truth, and passing it to the runtime lets the
runtime use it.
This CL is part of a stack adding windows/arm64
support (#36439), intended to land in the Go 1.17 cycle.
This CL is, however, not windows/arm64-specific.
It is cleanup meant to make the port (and future ports) easier.
Change-Id: I227f53b23ac5b3dabfcc5e8ee3f00df4e113cf58
Reviewed-on: https://go-review.googlesource.com/c/go/+/288800
Trust: Russ Cox <rsc@golang.org>
Trust: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com>
2021-01-28 15:21:33 -05:00
|
|
|
|
|
|
|
if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 {
|
|
|
|
f := cursym.Func()
|
2023-04-17 15:58:47 -04:00
|
|
|
if f.FuncFlag&abi.FuncFlagSPWrite == 0 {
|
|
|
|
f.FuncFlag |= abi.FuncFlagSPWrite
|
cmd/asm, cmd/link, runtime: introduce FuncInfo flag bits
The runtime traceback code has its own definition of which functions
mark the top frame of a stack, separate from the TOPFRAME bits that
exist in the assembly and are passed along in DWARF information.
It's error-prone and redundant to have two different sources of truth.
This CL provides the actual TOPFRAME bits to the runtime, so that
the runtime can use those bits instead of reinventing its own category.
This CL also adds a new bit, SPWRITE, which marks functions that
write directly to SP (anything but adding and subtracting constants).
Such functions must stop a traceback, because the traceback has no
way to rederive the SP on entry. Again, the runtime has its own definition
which is mostly correct, but also missing some functions. During ordinary
goroutine context switches, such functions do not appear on the stack,
so the incompleteness in the runtime usually doesn't matter.
But profiling signals can arrive at any moment, and the runtime may
crash during traceback if it attempts to unwind an SP-writing frame
and gets out-of-sync with the actual stack. The runtime contains code
to try to detect likely candidates but again it is incomplete.
Deriving the SPWRITE bit automatically from the actual assembly code
provides the complete truth, and passing it to the runtime lets the
runtime use it.
This CL is part of a stack adding windows/arm64
support (#36439), intended to land in the Go 1.17 cycle.
This CL is, however, not windows/arm64-specific.
It is cleanup meant to make the port (and future ports) easier.
Change-Id: I227f53b23ac5b3dabfcc5e8ee3f00df4e113cf58
Reviewed-on: https://go-review.googlesource.com/c/go/+/288800
Trust: Russ Cox <rsc@golang.org>
Trust: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com>
2021-01-28 15:21:33 -05:00
|
|
|
if ctxt.Debugvlog || !ctxt.IsAsm {
|
|
|
|
ctxt.Logf("auto-SPWRITE: %s %v\n", cursym.Name, p)
|
|
|
|
if !ctxt.IsAsm {
|
|
|
|
ctxt.Diag("invalid auto-SPWRITE in non-assembly")
|
|
|
|
ctxt.DiagFlush()
|
|
|
|
log.Fatalf("bad SPWRITE")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-10-04 04:02:38 +10:00
|
|
|
}
|
|
|
|
|
2021-10-16 03:59:41 +11:00
|
|
|
var callCount int
|
2020-07-19 00:30:12 -04:00
|
|
|
for p := cursym.Func().Text; p != nil; p = p.Link {
|
2021-08-21 09:06:25 +00:00
|
|
|
markRelocs(p)
|
2023-08-17 01:13:32 +10:00
|
|
|
if p.Mark&NEED_JAL_RELOC == NEED_JAL_RELOC {
|
2021-10-16 03:59:41 +11:00
|
|
|
callCount++
|
|
|
|
}
|
2020-01-01 01:28:22 +11:00
|
|
|
}
|
2021-10-16 03:59:41 +11:00
|
|
|
const callTrampSize = 8 // 2 machine instructions.
|
|
|
|
maxTrampSize := int64(callCount * callTrampSize)
|
2020-01-01 01:28:22 +11:00
|
|
|
|
2019-11-04 02:31:37 +11:00
|
|
|
// Compute instruction addresses. Once we do that, we need to check for
|
|
|
|
// overextended jumps and branches. Within each iteration, Pc differences
|
|
|
|
// are always lower bounds (since the program gets monotonically longer,
|
|
|
|
// a fixed point will be reached). No attempt to handle functions > 2GiB.
|
|
|
|
for {
|
2021-10-16 03:59:41 +11:00
|
|
|
big, rescan := false, false
|
|
|
|
maxPC := setPCs(cursym.Func().Text, 0)
|
|
|
|
if maxPC+maxTrampSize > (1 << 20) {
|
|
|
|
big = true
|
|
|
|
}
|
2019-09-08 01:56:26 +10:00
|
|
|
|
2020-07-19 00:30:12 -04:00
|
|
|
for p := cursym.Func().Text; p != nil; p = p.Link {
|
2019-11-04 02:31:37 +11:00
|
|
|
switch p.As {
|
2020-03-31 01:57:52 +11:00
|
|
|
case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ:
|
2019-11-04 02:31:37 +11:00
|
|
|
if p.To.Type != obj.TYPE_BRANCH {
|
|
|
|
panic("assemble: instruction with branch-like opcode lacks destination")
|
|
|
|
}
|
2020-08-28 17:10:32 +00:00
|
|
|
offset := p.To.Target().Pc - p.Pc
|
2019-11-04 02:31:37 +11:00
|
|
|
if offset < -4096 || 4096 <= offset {
|
|
|
|
// Branch is long. Replace it with a jump.
|
|
|
|
jmp := obj.Appendp(p, newprog)
|
|
|
|
jmp.As = AJAL
|
|
|
|
jmp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO}
|
|
|
|
jmp.To = obj.Addr{Type: obj.TYPE_BRANCH}
|
2020-08-28 17:10:32 +00:00
|
|
|
jmp.To.SetTarget(p.To.Target())
|
2019-11-04 02:31:37 +11:00
|
|
|
|
2019-11-04 04:40:47 +11:00
|
|
|
p.As = InvertBranch(p.As)
|
2020-08-28 17:10:32 +00:00
|
|
|
p.To.SetTarget(jmp.Link)
|
2019-11-04 02:31:37 +11:00
|
|
|
|
|
|
|
// We may have made previous branches too long,
|
|
|
|
// so recheck them.
|
|
|
|
rescan = true
|
|
|
|
}
|
|
|
|
case AJAL:
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
// Linker will handle the intersymbol case and trampolines.
|
2020-08-28 17:10:32 +00:00
|
|
|
if p.To.Target() == nil {
|
2021-10-16 03:59:41 +11:00
|
|
|
if !big {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// This function is going to be too large for JALs
|
|
|
|
// to reach trampolines. Replace with AUIPC+JALR.
|
|
|
|
jmp := obj.Appendp(p, newprog)
|
|
|
|
jmp.As = AJALR
|
|
|
|
jmp.From = p.From
|
|
|
|
jmp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
|
|
|
|
|
|
|
|
p.As = AAUIPC
|
2023-08-17 01:13:32 +10:00
|
|
|
p.Mark = (p.Mark &^ NEED_JAL_RELOC) | NEED_CALL_RELOC
|
2023-04-12 11:23:13 +08:00
|
|
|
p.AddRestSource(obj.Addr{Type: obj.TYPE_CONST, Offset: p.To.Offset, Sym: p.To.Sym})
|
2021-10-16 03:59:41 +11:00
|
|
|
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0}
|
|
|
|
p.Reg = obj.REG_NONE
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
|
|
|
|
|
|
|
|
rescan = true
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
break
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
2020-08-28 17:10:32 +00:00
|
|
|
offset := p.To.Target().Pc - p.Pc
|
2019-11-04 02:31:37 +11:00
|
|
|
if offset < -(1<<20) || (1<<20) <= offset {
|
|
|
|
// Replace with 2-instruction sequence. This assumes
|
|
|
|
// that TMP is not live across J instructions, since
|
|
|
|
// it is reserved by SSA.
|
|
|
|
jmp := obj.Appendp(p, newprog)
|
|
|
|
jmp.As = AJALR
|
2020-08-19 03:07:26 +10:00
|
|
|
jmp.From = p.From
|
|
|
|
jmp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
|
2019-11-04 02:31:37 +11:00
|
|
|
|
|
|
|
// p.From is not generally valid, however will be
|
|
|
|
// fixed up in the next loop.
|
|
|
|
p.As = AAUIPC
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_BRANCH, Sym: p.From.Sym}
|
2020-08-28 17:10:32 +00:00
|
|
|
p.From.SetTarget(p.To.Target())
|
2021-08-25 20:08:37 +10:00
|
|
|
p.Reg = obj.REG_NONE
|
2019-11-04 02:31:37 +11:00
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
|
|
|
|
|
|
|
|
rescan = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !rescan {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that there are no long branches, resolve branch and jump targets.
|
|
|
|
// At this point, instruction rewriting which changes the number of
|
|
|
|
// instructions will break everything--don't do it!
|
2020-07-19 00:30:12 -04:00
|
|
|
for p := cursym.Func().Text; p != nil; p = p.Link {
|
2019-09-19 03:53:50 +10:00
|
|
|
switch p.As {
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ:
|
2019-09-19 03:53:50 +10:00
|
|
|
switch p.To.Type {
|
|
|
|
case obj.TYPE_BRANCH:
|
2020-08-28 17:10:32 +00:00
|
|
|
p.To.Type, p.To.Offset = obj.TYPE_CONST, p.To.Target().Pc-p.Pc
|
2019-09-19 03:53:50 +10:00
|
|
|
case obj.TYPE_MEM:
|
|
|
|
panic("unhandled type")
|
|
|
|
}
|
2019-11-04 02:31:37 +11:00
|
|
|
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
case AJAL:
|
|
|
|
// Linker will handle the intersymbol case and trampolines.
|
|
|
|
if p.To.Target() != nil {
|
|
|
|
p.To.Type, p.To.Offset = obj.TYPE_CONST, p.To.Target().Pc-p.Pc
|
|
|
|
}
|
|
|
|
|
2019-11-04 02:31:37 +11:00
|
|
|
case AAUIPC:
|
|
|
|
if p.From.Type == obj.TYPE_BRANCH {
|
2020-08-28 17:10:32 +00:00
|
|
|
low, high, err := Split32BitImmediate(p.From.Target().Pc - p.Pc)
|
2019-11-04 02:31:37 +11:00
|
|
|
if err != nil {
|
2020-08-28 17:10:32 +00:00
|
|
|
ctxt.Diag("%v: jump displacement %d too large", p, p.To.Target().Pc-p.Pc)
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high, Sym: cursym}
|
2023-10-27 14:48:25 +08:00
|
|
|
p.Link.To.Offset = low
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
2023-11-12 17:05:57 +08:00
|
|
|
|
|
|
|
case obj.APCALIGN:
|
|
|
|
alignedValue := p.From.Offset
|
|
|
|
if (alignedValue&(alignedValue-1) != 0) || 4 > alignedValue || alignedValue > 2048 {
|
|
|
|
ctxt.Diag("alignment value of an instruction must be a power of two and in the range [4, 2048], got %d\n", alignedValue)
|
|
|
|
}
|
|
|
|
// Update the current text symbol alignment value.
|
|
|
|
if int32(alignedValue) > cursym.Func().Align {
|
|
|
|
cursym.Func().Align = int32(alignedValue)
|
|
|
|
}
|
2019-09-19 03:53:50 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-08 01:56:26 +10:00
|
|
|
// Validate all instructions - this provides nice error messages.
|
2020-07-19 00:30:12 -04:00
|
|
|
for p := cursym.Func().Text; p != nil; p = p.Link {
|
2019-12-19 02:09:45 +11:00
|
|
|
for _, ins := range instructionsForProg(p) {
|
|
|
|
ins.validate(ctxt)
|
|
|
|
}
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-12 17:05:57 +08:00
|
|
|
func pcAlignPadLength(pc int64, alignedValue int64) int {
|
|
|
|
return int(-pc & (alignedValue - 1))
|
|
|
|
}
|
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgAlloc, framesize int64) *obj.Prog {
|
|
|
|
// Leaf function with no frame is effectively NOSPLIT.
|
|
|
|
if framesize == 0 {
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2019-08-20 17:39:09 -04:00
|
|
|
if ctxt.Flag_maymorestack != "" {
|
|
|
|
// Save LR and REGCTXT
|
|
|
|
const frameSize = 16
|
|
|
|
p = ctxt.StartUnsafePoint(p, newprog)
|
2022-03-29 19:14:24 +08:00
|
|
|
|
|
|
|
// Spill Arguments. This has to happen before we open
|
|
|
|
// any more frame space.
|
|
|
|
p = cursym.Func().SpillRegisterArgs(p, newprog)
|
|
|
|
|
2019-08-20 17:39:09 -04:00
|
|
|
// MOV LR, -16(SP)
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AMOV
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR}
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: -frameSize}
|
|
|
|
// ADDI $-16, SP
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AADDI
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: -frameSize}
|
|
|
|
p.Reg = REG_SP
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_SP}
|
|
|
|
p.Spadj = frameSize
|
|
|
|
// MOV REGCTXT, 8(SP)
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AMOV
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_CTXT}
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: 8}
|
|
|
|
|
|
|
|
// CALL maymorestack
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = obj.ACALL
|
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
|
|
|
// See ../x86/obj6.go
|
|
|
|
p.To.Sym = ctxt.LookupABI(ctxt.Flag_maymorestack, cursym.ABI())
|
|
|
|
jalToSym(ctxt, p, REG_X5)
|
|
|
|
|
|
|
|
// Restore LR and REGCTXT
|
|
|
|
|
|
|
|
// MOV 8(SP), REGCTXT
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AMOV
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: 8}
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_CTXT}
|
|
|
|
// MOV (SP), LR
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AMOV
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: 0}
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR}
|
|
|
|
// ADDI $16, SP
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AADDI
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: frameSize}
|
|
|
|
p.Reg = REG_SP
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_SP}
|
|
|
|
p.Spadj = -frameSize
|
|
|
|
|
2022-03-29 19:14:24 +08:00
|
|
|
// Unspill arguments
|
|
|
|
p = cursym.Func().UnspillRegisterArgs(p, newprog)
|
2019-08-20 17:39:09 -04:00
|
|
|
p = ctxt.EndUnsafePoint(p, newprog, -1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Jump back to here after morestack returns.
|
|
|
|
startPred := p
|
|
|
|
|
2022-03-29 19:14:24 +08:00
|
|
|
// MOV g_stackguard(g), X6
|
2019-11-04 04:32:32 +11:00
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AMOV
|
|
|
|
p.From.Type = obj.TYPE_MEM
|
|
|
|
p.From.Reg = REGG
|
|
|
|
p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
|
|
|
|
if cursym.CFunc() {
|
|
|
|
p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
|
|
|
|
}
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2022-03-29 19:14:24 +08:00
|
|
|
p.To.Reg = REG_X6
|
2019-11-04 04:32:32 +11:00
|
|
|
|
2021-12-20 12:45:04 -05:00
|
|
|
// Mark the stack bound check and morestack call async nonpreemptible.
|
|
|
|
// If we get preempted here, when resumed the preemption request is
|
|
|
|
// cleared, but we'll still call morestack, which will double the stack
|
|
|
|
// unnecessarily. See issue #35470.
|
|
|
|
p = ctxt.StartUnsafePoint(p, newprog)
|
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
var to_done, to_more *obj.Prog
|
|
|
|
|
2023-04-19 13:21:02 -04:00
|
|
|
if framesize <= abi.StackSmall {
|
2021-04-02 17:20:15 -04:00
|
|
|
// small stack
|
|
|
|
// // if SP > stackguard { goto done }
|
|
|
|
// BLTU stackguard, SP, done
|
2019-11-04 04:32:32 +11:00
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = ABLTU
|
|
|
|
p.From.Type = obj.TYPE_REG
|
2022-03-29 19:14:24 +08:00
|
|
|
p.From.Reg = REG_X6
|
2019-11-04 04:32:32 +11:00
|
|
|
p.Reg = REG_SP
|
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
|
|
|
to_done = p
|
2021-04-02 17:20:15 -04:00
|
|
|
} else {
|
2019-11-04 04:32:32 +11:00
|
|
|
// large stack: SP-framesize < stackguard-StackSmall
|
2023-04-19 13:21:02 -04:00
|
|
|
offset := int64(framesize) - abi.StackSmall
|
|
|
|
if framesize > abi.StackBig {
|
2021-04-02 17:20:15 -04:00
|
|
|
// Such a large stack we need to protect against underflow.
|
|
|
|
// The runtime guarantees SP > objabi.StackBig, but
|
|
|
|
// framesize is large enough that SP-framesize may
|
|
|
|
// underflow, causing a direct comparison with the
|
|
|
|
// stack guard to incorrectly succeed. We explicitly
|
|
|
|
// guard against underflow.
|
|
|
|
//
|
2022-03-29 19:14:24 +08:00
|
|
|
// MOV $(framesize-StackSmall), X7
|
|
|
|
// BLTU SP, X7, label-of-call-to-morestack
|
2019-11-04 04:32:32 +11:00
|
|
|
|
2021-04-02 17:20:15 -04:00
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AMOV
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
p.From.Offset = offset
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2022-03-29 19:14:24 +08:00
|
|
|
p.To.Reg = REG_X7
|
2019-11-04 04:32:32 +11:00
|
|
|
|
2021-04-02 17:20:15 -04:00
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = ABLTU
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
p.From.Reg = REG_SP
|
2022-03-29 19:14:24 +08:00
|
|
|
p.Reg = REG_X7
|
2021-04-02 17:20:15 -04:00
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
|
|
|
to_more = p
|
|
|
|
}
|
2019-11-04 04:32:32 +11:00
|
|
|
|
2021-04-02 17:20:15 -04:00
|
|
|
// Check against the stack guard. We've ensured this won't underflow.
|
2022-03-29 19:14:24 +08:00
|
|
|
// ADD $-(framesize-StackSmall), SP, X7
|
|
|
|
// // if X7 > stackguard { goto done }
|
|
|
|
// BLTU stackguard, X7, done
|
2019-11-04 04:32:32 +11:00
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AADDI
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
2021-04-02 17:20:15 -04:00
|
|
|
p.From.Offset = -offset
|
2019-11-04 04:32:32 +11:00
|
|
|
p.Reg = REG_SP
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2022-03-29 19:14:24 +08:00
|
|
|
p.To.Reg = REG_X7
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = ABLTU
|
|
|
|
p.From.Type = obj.TYPE_REG
|
2022-03-29 19:14:24 +08:00
|
|
|
p.From.Reg = REG_X6
|
|
|
|
p.Reg = REG_X7
|
2019-11-04 04:32:32 +11:00
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
|
|
|
to_done = p
|
|
|
|
}
|
|
|
|
|
2022-03-29 19:14:24 +08:00
|
|
|
// Spill the register args that could be clobbered by the
|
|
|
|
// morestack code
|
2021-12-20 12:45:04 -05:00
|
|
|
p = ctxt.EmitEntryStackMap(cursym, p, newprog)
|
2022-03-29 19:14:24 +08:00
|
|
|
p = cursym.Func().SpillRegisterArgs(p, newprog)
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
// CALL runtime.morestack(SB)
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = obj.ACALL
|
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
2022-03-29 19:14:24 +08:00
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
if cursym.CFunc() {
|
|
|
|
p.To.Sym = ctxt.Lookup("runtime.morestackc")
|
2020-07-19 00:30:12 -04:00
|
|
|
} else if !cursym.Func().Text.From.Sym.NeedCtxt() {
|
2019-11-04 04:32:32 +11:00
|
|
|
p.To.Sym = ctxt.Lookup("runtime.morestack_noctxt")
|
|
|
|
} else {
|
|
|
|
p.To.Sym = ctxt.Lookup("runtime.morestack")
|
|
|
|
}
|
|
|
|
if to_more != nil {
|
2020-08-28 17:10:32 +00:00
|
|
|
to_more.To.SetTarget(p)
|
2019-11-04 04:32:32 +11:00
|
|
|
}
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
jalToSym(ctxt, p, REG_X5)
|
2019-11-04 04:32:32 +11:00
|
|
|
|
2023-09-07 00:09:31 +08:00
|
|
|
// The instructions which unspill regs should be preemptible.
|
2021-12-20 12:45:04 -05:00
|
|
|
p = ctxt.EndUnsafePoint(p, newprog, -1)
|
2023-09-07 00:09:31 +08:00
|
|
|
p = cursym.Func().UnspillRegisterArgs(p, newprog)
|
2021-12-20 12:45:04 -05:00
|
|
|
|
2019-11-04 04:32:32 +11:00
|
|
|
// JMP start
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = AJAL
|
|
|
|
p.To = obj.Addr{Type: obj.TYPE_BRANCH}
|
|
|
|
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO}
|
2019-08-20 17:39:09 -04:00
|
|
|
p.To.SetTarget(startPred.Link)
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
// placeholder for to_done's jump target
|
|
|
|
p = obj.Appendp(p, newprog)
|
|
|
|
p.As = obj.ANOP // zero-width place holder
|
2020-08-28 17:10:32 +00:00
|
|
|
to_done.To.SetTarget(p)
|
2019-11-04 04:32:32 +11:00
|
|
|
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2019-10-04 04:02:38 +10:00
|
|
|
// signExtend sign extends val starting at bit bit.
|
|
|
|
func signExtend(val int64, bit uint) int64 {
|
|
|
|
return val << (64 - bit) >> (64 - bit)
|
|
|
|
}
|
|
|
|
|
2019-11-04 01:05:46 +11:00
|
|
|
// Split32BitImmediate splits a signed 32-bit immediate into a signed 20-bit
|
2019-10-04 04:02:38 +10:00
|
|
|
// upper immediate and a signed 12-bit lower immediate to be added to the upper
|
|
|
|
// result. For example, high may be used in LUI and low in a following ADDI to
|
|
|
|
// generate a full 32-bit constant.
|
2019-11-04 01:05:46 +11:00
|
|
|
func Split32BitImmediate(imm int64) (low, high int64, err error) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 32); err != nil {
|
|
|
|
return 0, 0, err
|
2019-10-04 04:02:38 +10:00
|
|
|
}
|
|
|
|
|
2021-08-26 01:15:23 +10:00
|
|
|
// Nothing special needs to be done if the immediate fits in 12 bits.
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 12); err == nil {
|
2019-10-04 04:02:38 +10:00
|
|
|
return imm, 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
high = imm >> 12
|
|
|
|
|
|
|
|
// The bottom 12 bits will be treated as signed.
|
|
|
|
//
|
|
|
|
// If that will result in a negative 12 bit number, add 1 to
|
|
|
|
// our upper bits to adjust for the borrow.
|
|
|
|
//
|
|
|
|
// It is not possible for this increment to overflow. To
|
|
|
|
// overflow, the 20 top bits would be 1, and the sign bit for
|
|
|
|
// the low 12 bits would be set, in which case the entire 32
|
|
|
|
// bit pattern fits in a 12 bit signed value.
|
|
|
|
if imm&(1<<11) != 0 {
|
|
|
|
high++
|
|
|
|
}
|
|
|
|
|
|
|
|
low = signExtend(imm, 12)
|
|
|
|
high = signExtend(high, 20)
|
|
|
|
|
|
|
|
return low, high, nil
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func regVal(r, min, max uint32) uint32 {
|
2019-09-08 04:11:07 +10:00
|
|
|
if r < min || r > max {
|
2023-08-28 02:08:06 +10:00
|
|
|
panic(fmt.Sprintf("register out of range, want %d <= %d <= %d", min, r, max))
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
return r - min
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
// regI returns an integer register.
|
2019-12-19 02:09:45 +11:00
|
|
|
func regI(r uint32) uint32 {
|
2019-09-08 04:11:07 +10:00
|
|
|
return regVal(r, REG_X0, REG_X31)
|
|
|
|
}
|
|
|
|
|
2019-09-19 01:01:07 +10:00
|
|
|
// regF returns a float register.
|
2019-12-19 02:09:45 +11:00
|
|
|
func regF(r uint32) uint32 {
|
2019-09-19 01:01:07 +10:00
|
|
|
return regVal(r, REG_F0, REG_F31)
|
|
|
|
}
|
|
|
|
|
2024-06-28 00:03:53 +10:00
|
|
|
// regV returns a vector register.
|
|
|
|
func regV(r uint32) uint32 {
|
|
|
|
return regVal(r, REG_V0, REG_V31)
|
|
|
|
}
|
|
|
|
|
2019-09-08 04:11:07 +10:00
|
|
|
// regAddr extracts a register from an Addr.
|
2019-12-19 02:09:45 +11:00
|
|
|
func regAddr(a obj.Addr, min, max uint32) uint32 {
|
2019-09-08 04:11:07 +10:00
|
|
|
if a.Type != obj.TYPE_REG {
|
|
|
|
panic(fmt.Sprintf("ill typed: %+v", a))
|
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
return regVal(uint32(a.Reg), min, max)
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
// regIAddr extracts the integer register from an Addr.
|
|
|
|
func regIAddr(a obj.Addr) uint32 {
|
|
|
|
return regAddr(a, REG_X0, REG_X31)
|
|
|
|
}
|
|
|
|
|
2019-09-19 01:01:07 +10:00
|
|
|
// regFAddr extracts the float register from an Addr.
|
|
|
|
func regFAddr(a obj.Addr) uint32 {
|
|
|
|
return regAddr(a, REG_F0, REG_F31)
|
|
|
|
}
|
|
|
|
|
2023-08-28 01:22:02 +10:00
|
|
|
// immEven checks that the immediate is a multiple of two. If it
|
|
|
|
// is not, an error is returned.
|
|
|
|
func immEven(x int64) error {
|
|
|
|
if x&1 != 0 {
|
|
|
|
return fmt.Errorf("immediate %#x is not a multiple of two", x)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
func immFits(x int64, nbits uint, signed bool) error {
|
|
|
|
label := "unsigned"
|
|
|
|
min, max := int64(0), int64(1)<<nbits-1
|
|
|
|
if signed {
|
|
|
|
label = "signed"
|
|
|
|
sbits := nbits - 1
|
|
|
|
min, max = int64(-1)<<sbits, int64(1)<<sbits-1
|
|
|
|
}
|
2023-08-28 01:22:02 +10:00
|
|
|
if x < min || x > max {
|
|
|
|
if nbits <= 16 {
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
return fmt.Errorf("%s immediate %d must be in range [%d, %d] (%d bits)", label, x, min, max, nbits)
|
2023-08-28 01:22:02 +10:00
|
|
|
}
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
return fmt.Errorf("%s immediate %#x must be in range [%#x, %#x] (%d bits)", label, x, min, max, nbits)
|
2023-08-28 01:22:02 +10:00
|
|
|
}
|
|
|
|
return nil
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
// immIFits checks whether the immediate value x fits in nbits bits
|
|
|
|
// as a signed integer. If it does not, an error is returned.
|
|
|
|
func immIFits(x int64, nbits uint) error {
|
|
|
|
return immFits(x, nbits, true)
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
// immI extracts the signed integer of the specified size from an immediate.
|
|
|
|
func immI(as obj.As, imm int64, nbits uint) uint32 {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, nbits); err != nil {
|
|
|
|
panic(fmt.Sprintf("%v: %v", as, err))
|
2019-09-19 02:34:06 +10:00
|
|
|
}
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
return uint32(imm) & ((1 << nbits) - 1)
|
2019-09-19 02:34:06 +10:00
|
|
|
}
|
|
|
|
|
2023-08-28 02:08:56 +10:00
|
|
|
func wantImmI(ctxt *obj.Link, ins *instruction, imm int64, nbits uint) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, nbits); err != nil {
|
2023-08-28 02:08:56 +10:00
|
|
|
ctxt.Diag("%v: %v", ins, err)
|
2019-09-19 02:34:06 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
// immUFits checks whether the immediate value x fits in nbits bits
|
|
|
|
// as an unsigned integer. If it does not, an error is returned.
|
|
|
|
func immUFits(x int64, nbits uint) error {
|
|
|
|
return immFits(x, nbits, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// immU extracts the unsigned integer of the specified size from an immediate.
|
|
|
|
func immU(as obj.As, imm int64, nbits uint) uint32 {
|
|
|
|
if err := immUFits(imm, nbits); err != nil {
|
|
|
|
panic(fmt.Sprintf("%v: %v", as, err))
|
|
|
|
}
|
|
|
|
return uint32(imm) & ((1 << nbits) - 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func wantImmU(ctxt *obj.Link, ins *instruction, imm int64, nbits uint) {
|
|
|
|
if err := immUFits(imm, nbits); err != nil {
|
|
|
|
ctxt.Diag("%v: %v", ins, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-28 02:08:56 +10:00
|
|
|
func wantReg(ctxt *obj.Link, ins *instruction, pos string, descr string, r, min, max uint32) {
|
2019-09-08 04:11:07 +10:00
|
|
|
if r < min || r > max {
|
2019-12-19 02:09:45 +11:00
|
|
|
var suffix string
|
|
|
|
if r != obj.REG_NONE {
|
|
|
|
suffix = fmt.Sprintf(" but got non-%s register %s", descr, RegName(int(r)))
|
|
|
|
}
|
2023-08-28 02:08:56 +10:00
|
|
|
ctxt.Diag("%v: expected %s register in %s position%s", ins, descr, pos, suffix)
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-28 02:08:56 +10:00
|
|
|
func wantNoneReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) {
|
2019-12-19 02:09:45 +11:00
|
|
|
if r != obj.REG_NONE {
|
2023-08-28 02:08:56 +10:00
|
|
|
ctxt.Diag("%v: expected no register in %s but got register %s", ins, pos, RegName(int(r)))
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
// wantIntReg checks that r is an integer register.
|
2023-08-28 02:08:56 +10:00
|
|
|
func wantIntReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) {
|
|
|
|
wantReg(ctxt, ins, pos, "integer", r, REG_X0, REG_X31)
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
// wantFloatReg checks that r is a floating-point register.
|
2023-08-28 02:08:56 +10:00
|
|
|
func wantFloatReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) {
|
|
|
|
wantReg(ctxt, ins, pos, "float", r, REG_F0, REG_F31)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2024-06-28 00:03:53 +10:00
|
|
|
// wantVectorReg checks that r is a vector register.
|
|
|
|
func wantVectorReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) {
|
|
|
|
wantReg(ctxt, ins, pos, "vector", r, REG_V0, REG_V31)
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
// wantEvenOffset checks that the offset is a multiple of two.
|
2023-08-28 02:08:56 +10:00
|
|
|
func wantEvenOffset(ctxt *obj.Link, ins *instruction, offset int64) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immEven(offset); err != nil {
|
2023-08-28 02:08:56 +10:00
|
|
|
ctxt.Diag("%v: %v", ins, err)
|
2019-09-19 03:53:50 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-31 10:37:35 +08:00
|
|
|
func validateRII(ctxt *obj.Link, ins *instruction) {
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantIntReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateRIII(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantIntReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantIntReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateRFFF(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantFloatReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantFloatReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantFloatReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2021-02-17 15:00:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func validateRFFFF(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantFloatReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantFloatReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantFloatReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantFloatReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateRFFI(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantFloatReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantFloatReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateRFI(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantNoneReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantFloatReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateRIF(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantFloatReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantNoneReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantIntReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateRFF(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantFloatReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantNoneReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantFloatReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2024-09-13 09:33:03 +02:00
|
|
|
func validateIII(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantImmI(ctxt, ins, ins.imm, 12)
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantIntReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateIF(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantImmI(ctxt, ins, ins.imm, 12)
|
|
|
|
wantFloatReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantIntReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateSI(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantImmI(ctxt, ins, ins.imm, 12)
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantIntReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-17 04:23:23 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateSF(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantImmI(ctxt, ins, ins.imm, 12)
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantFloatReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateB(ctxt *obj.Link, ins *instruction) {
|
2019-09-19 03:53:50 +10:00
|
|
|
// Offsets are multiples of two, so accept 13 bit immediates for the
|
|
|
|
// 12 bit slot. We implicitly drop the least significant bit in encodeB.
|
2023-08-28 02:08:56 +10:00
|
|
|
wantEvenOffset(ctxt, ins, ins.imm)
|
|
|
|
wantImmI(ctxt, ins, ins.imm, 13)
|
|
|
|
wantNoneReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantIntReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantIntReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 03:53:50 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateU(ctxt *obj.Link, ins *instruction) {
|
2023-08-28 02:08:56 +10:00
|
|
|
wantImmI(ctxt, ins, ins.imm, 20)
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantNoneReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 02:34:06 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateJ(ctxt *obj.Link, ins *instruction) {
|
2019-09-19 03:53:50 +10:00
|
|
|
// Offsets are multiples of two, so accept 21 bit immediates for the
|
|
|
|
// 20 bit slot. We implicitly drop the least significant bit in encodeJ.
|
2023-08-28 02:08:56 +10:00
|
|
|
wantEvenOffset(ctxt, ins, ins.imm)
|
|
|
|
wantImmI(ctxt, ins, ins.imm, 21)
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantNoneReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
2019-09-19 03:53:50 +10:00
|
|
|
}
|
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
func validateVsetvli(ctxt *obj.Link, ins *instruction) {
|
|
|
|
wantImmU(ctxt, ins, ins.imm, 11)
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantIntReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateVsetivli(ctxt *obj.Link, ins *instruction) {
|
|
|
|
wantImmU(ctxt, ins, ins.imm, 10)
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantImmU(ctxt, ins, int64(ins.rs1), 5)
|
|
|
|
wantNoneReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateVsetvl(ctxt *obj.Link, ins *instruction) {
|
|
|
|
wantIntReg(ctxt, ins, "rd", ins.rd)
|
|
|
|
wantIntReg(ctxt, ins, "rs1", ins.rs1)
|
|
|
|
wantIntReg(ctxt, ins, "rs2", ins.rs2)
|
|
|
|
wantNoneReg(ctxt, ins, "rs3", ins.rs3)
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func validateRaw(ctxt *obj.Link, ins *instruction) {
|
2019-09-08 01:56:26 +10:00
|
|
|
// Treat the raw value specially as a 32-bit unsigned integer.
|
|
|
|
// Nobody wants to enter negative machine code.
|
2019-12-19 02:09:45 +11:00
|
|
|
if ins.imm < 0 || 1<<32 <= ins.imm {
|
2021-08-26 01:15:23 +10:00
|
|
|
ctxt.Diag("%v: immediate %d in raw position cannot be larger than 32 bits", ins.as, ins.imm)
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-31 18:18:19 +10:00
|
|
|
// extractBitAndShift extracts the specified bit from the given immediate,
|
|
|
|
// before shifting it to the requested position and returning it.
|
|
|
|
func extractBitAndShift(imm uint32, bit, pos int) uint32 {
|
|
|
|
return ((imm >> bit) & 1) << pos
|
|
|
|
}
|
|
|
|
|
2019-09-08 04:11:07 +10:00
|
|
|
// encodeR encodes an R-type RISC-V instruction.
|
2020-02-21 02:30:09 +11:00
|
|
|
func encodeR(as obj.As, rs1, rs2, rd, funct3, funct7 uint32) uint32 {
|
2019-12-19 02:09:45 +11:00
|
|
|
enc := encode(as)
|
|
|
|
if enc == nil {
|
2019-09-08 04:11:07 +10:00
|
|
|
panic("encodeR: could not encode instruction")
|
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
if enc.rs2 != 0 && rs2 != 0 {
|
2019-09-08 04:11:07 +10:00
|
|
|
panic("encodeR: instruction uses rs2, but rs2 was nonzero")
|
|
|
|
}
|
2020-02-21 02:30:09 +11:00
|
|
|
return funct7<<25 | enc.funct7<<25 | enc.rs2<<20 | rs2<<20 | rs1<<15 | enc.funct3<<12 | funct3<<12 | rd<<7 | enc.opcode
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
2021-02-17 15:00:34 +00:00
|
|
|
// encodeR4 encodes an R4-type RISC-V instruction.
|
|
|
|
func encodeR4(as obj.As, rs1, rs2, rs3, rd, funct3, funct2 uint32) uint32 {
|
|
|
|
enc := encode(as)
|
|
|
|
if enc == nil {
|
|
|
|
panic("encodeR4: could not encode instruction")
|
|
|
|
}
|
|
|
|
if enc.rs2 != 0 {
|
|
|
|
panic("encodeR4: instruction uses rs2")
|
|
|
|
}
|
|
|
|
funct2 |= enc.funct7
|
|
|
|
if funct2&^3 != 0 {
|
|
|
|
panic("encodeR4: funct2 requires more than 2 bits")
|
|
|
|
}
|
|
|
|
return rs3<<27 | funct2<<25 | rs2<<20 | rs1<<15 | enc.funct3<<12 | funct3<<12 | rd<<7 | enc.opcode
|
|
|
|
}
|
|
|
|
|
2024-01-31 10:37:35 +08:00
|
|
|
func encodeRII(ins *instruction) uint32 {
|
|
|
|
return encodeR(ins.as, regI(ins.rs1), 0, regI(ins.rd), ins.funct3, ins.funct7)
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeRIII(ins *instruction) uint32 {
|
2020-02-21 02:30:09 +11:00
|
|
|
return encodeR(ins.as, regI(ins.rs1), regI(ins.rs2), regI(ins.rd), ins.funct3, ins.funct7)
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeRFFF(ins *instruction) uint32 {
|
2020-02-21 02:30:09 +11:00
|
|
|
return encodeR(ins.as, regF(ins.rs1), regF(ins.rs2), regF(ins.rd), ins.funct3, ins.funct7)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2021-02-17 15:00:34 +00:00
|
|
|
func encodeRFFFF(ins *instruction) uint32 {
|
|
|
|
return encodeR4(ins.as, regF(ins.rs1), regF(ins.rs2), regF(ins.rs3), regF(ins.rd), ins.funct3, ins.funct7)
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeRFFI(ins *instruction) uint32 {
|
2020-02-21 02:30:09 +11:00
|
|
|
return encodeR(ins.as, regF(ins.rs1), regF(ins.rs2), regI(ins.rd), ins.funct3, ins.funct7)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeRFI(ins *instruction) uint32 {
|
2020-02-21 02:30:09 +11:00
|
|
|
return encodeR(ins.as, regF(ins.rs2), 0, regI(ins.rd), ins.funct3, ins.funct7)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeRIF(ins *instruction) uint32 {
|
2020-02-21 02:30:09 +11:00
|
|
|
return encodeR(ins.as, regI(ins.rs2), 0, regF(ins.rd), ins.funct3, ins.funct7)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeRFF(ins *instruction) uint32 {
|
2020-02-21 02:30:09 +11:00
|
|
|
return encodeR(ins.as, regF(ins.rs2), 0, regF(ins.rd), ins.funct3, ins.funct7)
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-09-08 04:11:07 +10:00
|
|
|
// encodeI encodes an I-type RISC-V instruction.
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeI(as obj.As, rs1, rd, imm uint32) uint32 {
|
|
|
|
enc := encode(as)
|
|
|
|
if enc == nil {
|
2019-09-08 04:11:07 +10:00
|
|
|
panic("encodeI: could not encode instruction")
|
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
imm |= uint32(enc.csr)
|
|
|
|
return imm<<20 | rs1<<15 | enc.funct3<<12 | rd<<7 | enc.opcode
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
2024-09-13 09:33:03 +02:00
|
|
|
func encodeIII(ins *instruction) uint32 {
|
2019-12-19 02:09:45 +11:00
|
|
|
return encodeI(ins.as, regI(ins.rs1), regI(ins.rd), uint32(ins.imm))
|
2019-09-08 04:11:07 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeIF(ins *instruction) uint32 {
|
|
|
|
return encodeI(ins.as, regI(ins.rs1), regF(ins.rd), uint32(ins.imm))
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2019-09-17 04:23:23 +10:00
|
|
|
// encodeS encodes an S-type RISC-V instruction.
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeS(as obj.As, rs1, rs2, imm uint32) uint32 {
|
|
|
|
enc := encode(as)
|
|
|
|
if enc == nil {
|
2019-09-17 04:23:23 +10:00
|
|
|
panic("encodeS: could not encode instruction")
|
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
return (imm>>5)<<25 | rs2<<20 | rs1<<15 | enc.funct3<<12 | (imm&0x1f)<<7 | enc.opcode
|
2019-09-17 04:23:23 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeSI(ins *instruction) uint32 {
|
|
|
|
return encodeS(ins.as, regI(ins.rd), regI(ins.rs1), uint32(ins.imm))
|
2019-09-17 04:23:23 +10:00
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeSF(ins *instruction) uint32 {
|
|
|
|
return encodeS(ins.as, regI(ins.rd), regF(ins.rs1), uint32(ins.imm))
|
2019-09-19 01:01:07 +10:00
|
|
|
}
|
|
|
|
|
2022-08-31 18:18:19 +10:00
|
|
|
// encodeBImmediate encodes an immediate for a B-type RISC-V instruction.
|
|
|
|
func encodeBImmediate(imm uint32) uint32 {
|
|
|
|
return (imm>>12)<<31 | ((imm>>5)&0x3f)<<25 | ((imm>>1)&0xf)<<8 | ((imm>>11)&0x1)<<7
|
|
|
|
}
|
|
|
|
|
2019-09-19 03:53:50 +10:00
|
|
|
// encodeB encodes a B-type RISC-V instruction.
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeB(ins *instruction) uint32 {
|
|
|
|
imm := immI(ins.as, ins.imm, 13)
|
|
|
|
rs2 := regI(ins.rs1)
|
|
|
|
rs1 := regI(ins.rs2)
|
|
|
|
enc := encode(ins.as)
|
|
|
|
if enc == nil {
|
2019-09-19 03:53:50 +10:00
|
|
|
panic("encodeB: could not encode instruction")
|
|
|
|
}
|
2022-08-31 18:18:19 +10:00
|
|
|
return encodeBImmediate(imm) | rs2<<20 | rs1<<15 | enc.funct3<<12 | enc.opcode
|
2019-09-19 03:53:50 +10:00
|
|
|
}
|
|
|
|
|
2019-09-19 02:34:06 +10:00
|
|
|
// encodeU encodes a U-type RISC-V instruction.
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeU(ins *instruction) uint32 {
|
2019-09-19 02:34:06 +10:00
|
|
|
// The immediates for encodeU are the upper 20 bits of a 32 bit value.
|
|
|
|
// Rather than have the user/compiler generate a 32 bit constant, the
|
|
|
|
// bottommost bits of which must all be zero, instead accept just the
|
|
|
|
// top bits.
|
2019-12-19 02:09:45 +11:00
|
|
|
imm := immI(ins.as, ins.imm, 20)
|
|
|
|
rd := regI(ins.rd)
|
|
|
|
enc := encode(ins.as)
|
|
|
|
if enc == nil {
|
2019-09-19 02:34:06 +10:00
|
|
|
panic("encodeU: could not encode instruction")
|
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
return imm<<12 | rd<<7 | enc.opcode
|
2019-09-19 02:34:06 +10:00
|
|
|
}
|
|
|
|
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
// encodeJImmediate encodes an immediate for a J-type RISC-V instruction.
|
|
|
|
func encodeJImmediate(imm uint32) uint32 {
|
|
|
|
return (imm>>20)<<31 | ((imm>>1)&0x3ff)<<21 | ((imm>>11)&0x1)<<20 | ((imm>>12)&0xff)<<12
|
|
|
|
}
|
|
|
|
|
2019-09-19 03:53:50 +10:00
|
|
|
// encodeJ encodes a J-type RISC-V instruction.
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeJ(ins *instruction) uint32 {
|
|
|
|
imm := immI(ins.as, ins.imm, 21)
|
|
|
|
rd := regI(ins.rd)
|
|
|
|
enc := encode(ins.as)
|
|
|
|
if enc == nil {
|
2019-09-19 03:53:50 +10:00
|
|
|
panic("encodeJ: could not encode instruction")
|
|
|
|
}
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
return encodeJImmediate(imm) | rd<<7 | enc.opcode
|
2019-09-19 03:53:50 +10:00
|
|
|
}
|
|
|
|
|
2022-08-31 18:18:19 +10:00
|
|
|
// encodeCBImmediate encodes an immediate for a CB-type RISC-V instruction.
|
|
|
|
func encodeCBImmediate(imm uint32) uint32 {
|
|
|
|
// Bit order - [8|4:3|7:6|2:1|5]
|
|
|
|
bits := extractBitAndShift(imm, 8, 7)
|
|
|
|
bits |= extractBitAndShift(imm, 4, 6)
|
|
|
|
bits |= extractBitAndShift(imm, 3, 5)
|
|
|
|
bits |= extractBitAndShift(imm, 7, 4)
|
|
|
|
bits |= extractBitAndShift(imm, 6, 3)
|
|
|
|
bits |= extractBitAndShift(imm, 2, 2)
|
|
|
|
bits |= extractBitAndShift(imm, 1, 1)
|
|
|
|
bits |= extractBitAndShift(imm, 5, 0)
|
|
|
|
return (bits>>5)<<10 | (bits&0x1f)<<2
|
|
|
|
}
|
|
|
|
|
|
|
|
// encodeCJImmediate encodes an immediate for a CJ-type RISC-V instruction.
|
|
|
|
func encodeCJImmediate(imm uint32) uint32 {
|
|
|
|
// Bit order - [11|4|9:8|10|6|7|3:1|5]
|
|
|
|
bits := extractBitAndShift(imm, 11, 10)
|
|
|
|
bits |= extractBitAndShift(imm, 4, 9)
|
|
|
|
bits |= extractBitAndShift(imm, 9, 8)
|
|
|
|
bits |= extractBitAndShift(imm, 8, 7)
|
|
|
|
bits |= extractBitAndShift(imm, 10, 6)
|
|
|
|
bits |= extractBitAndShift(imm, 6, 5)
|
|
|
|
bits |= extractBitAndShift(imm, 7, 4)
|
|
|
|
bits |= extractBitAndShift(imm, 3, 3)
|
|
|
|
bits |= extractBitAndShift(imm, 2, 2)
|
|
|
|
bits |= extractBitAndShift(imm, 1, 1)
|
|
|
|
bits |= extractBitAndShift(imm, 5, 0)
|
|
|
|
return bits << 2
|
|
|
|
}
|
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
func encodeVset(as obj.As, rs1, rs2, rd uint32) uint32 {
|
|
|
|
enc := encode(as)
|
|
|
|
if enc == nil {
|
|
|
|
panic("encodeVset: could not encode instruction")
|
|
|
|
}
|
|
|
|
return enc.funct7<<25 | rs2<<20 | rs1<<15 | enc.funct3<<12 | rd<<7 | enc.opcode
|
|
|
|
}
|
|
|
|
|
|
|
|
func encodeVsetvli(ins *instruction) uint32 {
|
|
|
|
vtype := immU(ins.as, ins.imm, 11)
|
|
|
|
return encodeVset(ins.as, regI(ins.rs1), vtype, regI(ins.rd))
|
|
|
|
}
|
|
|
|
|
|
|
|
func encodeVsetivli(ins *instruction) uint32 {
|
|
|
|
vtype := immU(ins.as, ins.imm, 10)
|
|
|
|
avl := immU(ins.as, int64(ins.rs1), 5)
|
|
|
|
return encodeVset(ins.as, avl, vtype, regI(ins.rd))
|
|
|
|
}
|
|
|
|
|
|
|
|
func encodeVsetvl(ins *instruction) uint32 {
|
|
|
|
return encodeVset(ins.as, regI(ins.rs1), regI(ins.rs2), regI(ins.rd))
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
func encodeRawIns(ins *instruction) uint32 {
|
2019-09-08 01:56:26 +10:00
|
|
|
// Treat the raw value specially as a 32-bit unsigned integer.
|
|
|
|
// Nobody wants to enter negative machine code.
|
2019-12-19 02:09:45 +11:00
|
|
|
if ins.imm < 0 || 1<<32 <= ins.imm {
|
|
|
|
panic(fmt.Sprintf("immediate %d cannot fit in 32 bits", ins.imm))
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
return uint32(ins.imm)
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
|
2022-08-31 18:18:19 +10:00
|
|
|
func EncodeBImmediate(imm int64) (int64, error) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 13); err != nil {
|
|
|
|
return 0, err
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
}
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immEven(imm); err != nil {
|
|
|
|
return 0, err
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
}
|
2022-08-31 18:18:19 +10:00
|
|
|
return int64(encodeBImmediate(uint32(imm))), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func EncodeCBImmediate(imm int64) (int64, error) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 9); err != nil {
|
|
|
|
return 0, err
|
2022-08-31 18:18:19 +10:00
|
|
|
}
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immEven(imm); err != nil {
|
|
|
|
return 0, err
|
2022-08-31 18:18:19 +10:00
|
|
|
}
|
|
|
|
return int64(encodeCBImmediate(uint32(imm))), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func EncodeCJImmediate(imm int64) (int64, error) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 12); err != nil {
|
|
|
|
return 0, err
|
2022-08-31 18:18:19 +10:00
|
|
|
}
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immEven(imm); err != nil {
|
|
|
|
return 0, err
|
2022-08-31 18:18:19 +10:00
|
|
|
}
|
|
|
|
return int64(encodeCJImmediate(uint32(imm))), nil
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
}
|
|
|
|
|
2019-11-04 01:05:46 +11:00
|
|
|
func EncodeIImmediate(imm int64) (int64, error) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 12); err != nil {
|
|
|
|
return 0, err
|
2019-11-04 01:05:46 +11:00
|
|
|
}
|
|
|
|
return imm << 20, nil
|
|
|
|
}
|
|
|
|
|
2022-08-31 18:18:19 +10:00
|
|
|
func EncodeJImmediate(imm int64) (int64, error) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 21); err != nil {
|
|
|
|
return 0, err
|
2022-08-31 18:18:19 +10:00
|
|
|
}
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immEven(imm); err != nil {
|
|
|
|
return 0, err
|
2022-08-31 18:18:19 +10:00
|
|
|
}
|
|
|
|
return int64(encodeJImmediate(uint32(imm))), nil
|
|
|
|
}
|
|
|
|
|
2019-11-04 01:05:46 +11:00
|
|
|
func EncodeSImmediate(imm int64) (int64, error) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 12); err != nil {
|
|
|
|
return 0, err
|
2019-11-04 01:05:46 +11:00
|
|
|
}
|
|
|
|
return ((imm >> 5) << 25) | ((imm & 0x1f) << 7), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func EncodeUImmediate(imm int64) (int64, error) {
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(imm, 20); err != nil {
|
|
|
|
return 0, err
|
2019-11-04 01:05:46 +11:00
|
|
|
}
|
|
|
|
return imm << 12, nil
|
|
|
|
}
|
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
func EncodeVectorType(vsew, vlmul, vtail, vmask int64) (int64, error) {
|
|
|
|
vsewSO := SpecialOperand(vsew)
|
|
|
|
if vsewSO < SPOP_E8 || vsewSO > SPOP_E64 {
|
|
|
|
return -1, fmt.Errorf("invalid vector selected element width %q", vsewSO)
|
|
|
|
}
|
|
|
|
vlmulSO := SpecialOperand(vlmul)
|
|
|
|
if vlmulSO < SPOP_M1 || vlmulSO > SPOP_MF8 {
|
|
|
|
return -1, fmt.Errorf("invalid vector register group multiplier %q", vlmulSO)
|
|
|
|
}
|
|
|
|
vtailSO := SpecialOperand(vtail)
|
|
|
|
if vtailSO != SPOP_TA && vtailSO != SPOP_TU {
|
|
|
|
return -1, fmt.Errorf("invalid vector tail policy %q", vtailSO)
|
|
|
|
}
|
|
|
|
vmaskSO := SpecialOperand(vmask)
|
|
|
|
if vmaskSO != SPOP_MA && vmaskSO != SPOP_MU {
|
|
|
|
return -1, fmt.Errorf("invalid vector mask policy %q", vmaskSO)
|
|
|
|
}
|
|
|
|
vtype := vmaskSO.encode()<<7 | vtailSO.encode()<<6 | vsewSO.encode()<<3 | vlmulSO.encode()
|
|
|
|
return int64(vtype), nil
|
|
|
|
}
|
|
|
|
|
2019-09-08 01:56:26 +10:00
|
|
|
type encoding struct {
|
2019-12-19 02:09:45 +11:00
|
|
|
encode func(*instruction) uint32 // encode returns the machine code for an instruction
|
|
|
|
validate func(*obj.Link, *instruction) // validate validates an instruction
|
|
|
|
length int // length of encoded instruction; 0 for pseudo-ops, 4 otherwise
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2019-09-08 04:11:07 +10:00
|
|
|
// Encodings have the following naming convention:
|
|
|
|
//
|
2019-09-19 03:53:50 +10:00
|
|
|
// 1. the instruction encoding (R/I/S/B/U/J), in lowercase
|
2019-09-08 04:11:07 +10:00
|
|
|
// 2. zero or more register operand identifiers (I = integer
|
|
|
|
// register, F = float register), in uppercase
|
|
|
|
// 3. the word "Encoding"
|
|
|
|
//
|
|
|
|
// For example, rIIIEncoding indicates an R-type instruction with two
|
|
|
|
// integer register inputs and an integer register output; sFEncoding
|
|
|
|
// indicates an S-type instruction with rs2 being a float register.
|
|
|
|
|
2021-02-17 15:00:34 +00:00
|
|
|
rIIIEncoding = encoding{encode: encodeRIII, validate: validateRIII, length: 4}
|
2024-01-31 10:37:35 +08:00
|
|
|
rIIEncoding = encoding{encode: encodeRII, validate: validateRII, length: 4}
|
2021-02-17 15:00:34 +00:00
|
|
|
rFFFEncoding = encoding{encode: encodeRFFF, validate: validateRFFF, length: 4}
|
|
|
|
rFFFFEncoding = encoding{encode: encodeRFFFF, validate: validateRFFFF, length: 4}
|
|
|
|
rFFIEncoding = encoding{encode: encodeRFFI, validate: validateRFFI, length: 4}
|
|
|
|
rFIEncoding = encoding{encode: encodeRFI, validate: validateRFI, length: 4}
|
|
|
|
rIFEncoding = encoding{encode: encodeRIF, validate: validateRIF, length: 4}
|
|
|
|
rFFEncoding = encoding{encode: encodeRFF, validate: validateRFF, length: 4}
|
2019-09-08 04:11:07 +10:00
|
|
|
|
2024-09-13 09:33:03 +02:00
|
|
|
iIIEncoding = encoding{encode: encodeIII, validate: validateIII, length: 4}
|
|
|
|
iFEncoding = encoding{encode: encodeIF, validate: validateIF, length: 4}
|
2019-09-08 04:11:07 +10:00
|
|
|
|
2019-09-17 04:23:23 +10:00
|
|
|
sIEncoding = encoding{encode: encodeSI, validate: validateSI, length: 4}
|
2019-09-19 01:01:07 +10:00
|
|
|
sFEncoding = encoding{encode: encodeSF, validate: validateSF, length: 4}
|
2019-09-17 04:23:23 +10:00
|
|
|
|
2019-09-19 03:53:50 +10:00
|
|
|
bEncoding = encoding{encode: encodeB, validate: validateB, length: 4}
|
2019-09-19 02:34:06 +10:00
|
|
|
uEncoding = encoding{encode: encodeU, validate: validateU, length: 4}
|
2019-09-19 03:53:50 +10:00
|
|
|
jEncoding = encoding{encode: encodeJ, validate: validateJ, length: 4}
|
2019-09-19 02:34:06 +10:00
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
// Encodings for vector configuration setting instruction.
|
|
|
|
vsetvliEncoding = encoding{encode: encodeVsetvli, validate: validateVsetvli, length: 4}
|
|
|
|
vsetivliEncoding = encoding{encode: encodeVsetivli, validate: validateVsetivli, length: 4}
|
|
|
|
vsetvlEncoding = encoding{encode: encodeVsetvl, validate: validateVsetvl, length: 4}
|
|
|
|
|
2019-09-08 04:11:07 +10:00
|
|
|
// rawEncoding encodes a raw instruction byte sequence.
|
2019-12-19 02:09:45 +11:00
|
|
|
rawEncoding = encoding{encode: encodeRawIns, validate: validateRaw, length: 4}
|
2019-09-08 01:56:26 +10:00
|
|
|
|
|
|
|
// pseudoOpEncoding panics if encoding is attempted, but does no validation.
|
2019-12-19 02:09:45 +11:00
|
|
|
pseudoOpEncoding = encoding{encode: nil, validate: func(*obj.Link, *instruction) {}, length: 0}
|
2019-09-08 01:56:26 +10:00
|
|
|
|
|
|
|
// badEncoding is used when an invalid op is encountered.
|
|
|
|
// An error has already been generated, so let anything else through.
|
2019-12-19 02:09:45 +11:00
|
|
|
badEncoding = encoding{encode: func(*instruction) uint32 { return 0 }, validate: func(*obj.Link, *instruction) {}, length: 0}
|
2019-09-08 01:56:26 +10:00
|
|
|
)
|
|
|
|
|
2024-08-15 02:48:09 +10:00
|
|
|
// instructionData specifies details relating to a RISC-V instruction.
|
|
|
|
type instructionData struct {
|
|
|
|
enc encoding
|
|
|
|
immForm obj.As // immediate form of this instruction
|
|
|
|
ternary bool
|
|
|
|
}
|
2019-09-08 01:56:26 +10:00
|
|
|
|
2024-08-15 02:48:09 +10:00
|
|
|
// instructions contains details of RISC-V instructions, including
|
|
|
|
// their encoding type. Entries are masked with obj.AMask to keep
|
|
|
|
// indices small.
|
|
|
|
var instructions = [ALAST & obj.AMask]instructionData{
|
2024-11-24 14:38:33 +11:00
|
|
|
//
|
2019-09-08 04:11:07 +10:00
|
|
|
// Unprivileged ISA
|
2024-11-24 14:38:33 +11:00
|
|
|
//
|
2019-09-08 04:11:07 +10:00
|
|
|
|
|
|
|
// 2.4: Integer Computational Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AADDI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ASLTI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ASLTIU & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
AANDI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
AORI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
AXORI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ASLLI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ASRLI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ASRAI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ALUI & obj.AMask: {enc: uEncoding},
|
|
|
|
AAUIPC & obj.AMask: {enc: uEncoding},
|
|
|
|
AADD & obj.AMask: {enc: rIIIEncoding, immForm: AADDI, ternary: true},
|
|
|
|
ASLT & obj.AMask: {enc: rIIIEncoding, immForm: ASLTI, ternary: true},
|
|
|
|
ASLTU & obj.AMask: {enc: rIIIEncoding, immForm: ASLTIU, ternary: true},
|
|
|
|
AAND & obj.AMask: {enc: rIIIEncoding, immForm: AANDI, ternary: true},
|
|
|
|
AOR & obj.AMask: {enc: rIIIEncoding, immForm: AORI, ternary: true},
|
|
|
|
AXOR & obj.AMask: {enc: rIIIEncoding, immForm: AXORI, ternary: true},
|
|
|
|
ASLL & obj.AMask: {enc: rIIIEncoding, immForm: ASLLI, ternary: true},
|
|
|
|
ASRL & obj.AMask: {enc: rIIIEncoding, immForm: ASRLI, ternary: true},
|
|
|
|
ASUB & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASRA & obj.AMask: {enc: rIIIEncoding, immForm: ASRAI, ternary: true},
|
2019-09-08 04:11:07 +10:00
|
|
|
|
2019-09-19 03:53:50 +10:00
|
|
|
// 2.5: Control Transfer Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AJAL & obj.AMask: {enc: jEncoding},
|
|
|
|
AJALR & obj.AMask: {enc: iIIEncoding},
|
|
|
|
ABEQ & obj.AMask: {enc: bEncoding},
|
|
|
|
ABNE & obj.AMask: {enc: bEncoding},
|
|
|
|
ABLT & obj.AMask: {enc: bEncoding},
|
|
|
|
ABLTU & obj.AMask: {enc: bEncoding},
|
|
|
|
ABGE & obj.AMask: {enc: bEncoding},
|
|
|
|
ABGEU & obj.AMask: {enc: bEncoding},
|
2019-09-19 03:53:50 +10:00
|
|
|
|
2019-09-17 04:23:23 +10:00
|
|
|
// 2.6: Load and Store Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
ALW & obj.AMask: {enc: iIIEncoding},
|
|
|
|
ALWU & obj.AMask: {enc: iIIEncoding},
|
|
|
|
ALH & obj.AMask: {enc: iIIEncoding},
|
|
|
|
ALHU & obj.AMask: {enc: iIIEncoding},
|
|
|
|
ALB & obj.AMask: {enc: iIIEncoding},
|
|
|
|
ALBU & obj.AMask: {enc: iIIEncoding},
|
|
|
|
ASW & obj.AMask: {enc: sIEncoding},
|
|
|
|
ASH & obj.AMask: {enc: sIEncoding},
|
|
|
|
ASB & obj.AMask: {enc: sIEncoding},
|
2019-09-17 04:23:23 +10:00
|
|
|
|
2020-02-21 02:28:37 +11:00
|
|
|
// 2.7: Memory Ordering
|
2024-08-15 02:48:09 +10:00
|
|
|
AFENCE & obj.AMask: {enc: iIIEncoding},
|
2020-02-21 02:28:37 +11:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 4.2: Integer Computational Instructions (RV64I)
|
2024-08-15 02:48:09 +10:00
|
|
|
AADDIW & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ASLLIW & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ASRLIW & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ASRAIW & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
AADDW & obj.AMask: {enc: rIIIEncoding, immForm: AADDIW, ternary: true},
|
|
|
|
ASLLW & obj.AMask: {enc: rIIIEncoding, immForm: ASLLIW, ternary: true},
|
|
|
|
ASRLW & obj.AMask: {enc: rIIIEncoding, immForm: ASRLIW, ternary: true},
|
|
|
|
ASUBW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASRAW & obj.AMask: {enc: rIIIEncoding, immForm: ASRAIW, ternary: true},
|
2019-09-19 00:59:26 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 4.3: Load and Store Instructions (RV64I)
|
2024-08-15 02:48:09 +10:00
|
|
|
ALD & obj.AMask: {enc: iIIEncoding},
|
|
|
|
ASD & obj.AMask: {enc: sIEncoding},
|
2019-09-17 04:23:23 +10:00
|
|
|
|
2024-10-24 00:56:07 +11:00
|
|
|
// 7.1: CSR Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
ACSRRS & obj.AMask: {enc: iIIEncoding},
|
2024-10-24 00:56:07 +11:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 13.1: Multiplication Operations
|
2024-08-15 02:48:09 +10:00
|
|
|
AMUL & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AMULH & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AMULHU & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AMULHSU & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AMULW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ADIV & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ADIVU & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AREM & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AREMU & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ADIVW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ADIVUW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AREMW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AREMUW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
2019-09-17 04:23:23 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 14.2: Load-Reserved/Store-Conditional Instructions (Zalrsc)
|
2024-08-15 02:48:09 +10:00
|
|
|
ALRW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
ALRD & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
ASCW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
ASCD & obj.AMask: {enc: rIIIEncoding},
|
2020-02-21 02:30:09 +11:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 14.4: Atomic Memory Operations (Zaamo)
|
2024-08-15 02:48:09 +10:00
|
|
|
AAMOSWAPW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOSWAPD & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOADDW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOADDD & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOANDW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOANDD & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOORW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOORD & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOXORW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOXORD & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOMAXW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOMAXD & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOMAXUW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOMAXUD & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOMINW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOMIND & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOMINUW & obj.AMask: {enc: rIIIEncoding},
|
|
|
|
AAMOMINUD & obj.AMask: {enc: rIIIEncoding},
|
2020-02-21 02:50:57 +11:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 20.5: Single-Precision Load and Store Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AFLW & obj.AMask: {enc: iFEncoding},
|
|
|
|
AFSW & obj.AMask: {enc: sFEncoding},
|
2019-09-19 01:01:07 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 20.6: Single-Precision Floating-Point Computational Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AFADDS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFSUBS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMULS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFDIVS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMINS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMAXS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFSQRTS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMADDS & obj.AMask: {enc: rFFFFEncoding},
|
|
|
|
AFMSUBS & obj.AMask: {enc: rFFFFEncoding},
|
|
|
|
AFNMSUBS & obj.AMask: {enc: rFFFFEncoding},
|
|
|
|
AFNMADDS & obj.AMask: {enc: rFFFFEncoding},
|
2019-09-19 01:01:07 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 20.7: Single-Precision Floating-Point Conversion and Move Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AFCVTWS & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFCVTLS & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFCVTSW & obj.AMask: {enc: rIFEncoding},
|
|
|
|
AFCVTSL & obj.AMask: {enc: rIFEncoding},
|
|
|
|
AFCVTWUS & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFCVTLUS & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFCVTSWU & obj.AMask: {enc: rIFEncoding},
|
|
|
|
AFCVTSLU & obj.AMask: {enc: rIFEncoding},
|
|
|
|
AFSGNJS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFSGNJNS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFSGNJXS & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMVXW & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFMVWX & obj.AMask: {enc: rIFEncoding},
|
2019-09-19 01:01:07 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 20.8: Single-Precision Floating-Point Compare Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AFEQS & obj.AMask: {enc: rFFIEncoding},
|
|
|
|
AFLTS & obj.AMask: {enc: rFFIEncoding},
|
|
|
|
AFLES & obj.AMask: {enc: rFFIEncoding},
|
2019-09-19 01:01:07 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 20.9: Single-Precision Floating-Point Classify Instruction
|
2024-08-15 02:48:09 +10:00
|
|
|
AFCLASSS & obj.AMask: {enc: rFIEncoding},
|
2020-02-21 03:03:57 +11:00
|
|
|
|
2019-09-19 01:01:07 +10:00
|
|
|
// 12.3: Double-Precision Load and Store Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AFLD & obj.AMask: {enc: iFEncoding},
|
|
|
|
AFSD & obj.AMask: {enc: sFEncoding},
|
2019-09-19 01:01:07 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 21.4: Double-Precision Floating-Point Computational Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AFADDD & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFSUBD & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMULD & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFDIVD & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMIND & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMAXD & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFSQRTD & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMADDD & obj.AMask: {enc: rFFFFEncoding},
|
|
|
|
AFMSUBD & obj.AMask: {enc: rFFFFEncoding},
|
|
|
|
AFNMSUBD & obj.AMask: {enc: rFFFFEncoding},
|
|
|
|
AFNMADDD & obj.AMask: {enc: rFFFFEncoding},
|
2019-09-19 01:01:07 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 21.5: Double-Precision Floating-Point Conversion and Move Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AFCVTWD & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFCVTLD & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFCVTDW & obj.AMask: {enc: rIFEncoding},
|
|
|
|
AFCVTDL & obj.AMask: {enc: rIFEncoding},
|
|
|
|
AFCVTWUD & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFCVTLUD & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFCVTDWU & obj.AMask: {enc: rIFEncoding},
|
|
|
|
AFCVTDLU & obj.AMask: {enc: rIFEncoding},
|
|
|
|
AFCVTSD & obj.AMask: {enc: rFFEncoding},
|
|
|
|
AFCVTDS & obj.AMask: {enc: rFFEncoding},
|
|
|
|
AFSGNJD & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFSGNJND & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFSGNJXD & obj.AMask: {enc: rFFFEncoding},
|
|
|
|
AFMVXD & obj.AMask: {enc: rFIEncoding},
|
|
|
|
AFMVDX & obj.AMask: {enc: rIFEncoding},
|
2019-09-19 01:01:07 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 21.6: Double-Precision Floating-Point Compare Instructions
|
2024-08-15 02:48:09 +10:00
|
|
|
AFEQD & obj.AMask: {enc: rFFIEncoding},
|
|
|
|
AFLTD & obj.AMask: {enc: rFFIEncoding},
|
|
|
|
AFLED & obj.AMask: {enc: rFFIEncoding},
|
2019-09-19 01:01:07 +10:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 21.7: Double-Precision Floating-Point Classify Instruction
|
2024-08-15 02:48:09 +10:00
|
|
|
AFCLASSD & obj.AMask: {enc: rFIEncoding},
|
2020-02-21 03:03:57 +11:00
|
|
|
|
2024-01-31 10:37:35 +08:00
|
|
|
//
|
2024-11-24 14:38:33 +11:00
|
|
|
// "B" Extension for Bit Manipulation, Version 1.0.0
|
2024-01-31 10:37:35 +08:00
|
|
|
//
|
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 28.4.1: Address Generation Instructions (Zba)
|
2024-08-15 02:48:09 +10:00
|
|
|
AADDUW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASH1ADD & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASH1ADDUW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASH2ADD & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASH2ADDUW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASH3ADD & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASH3ADDUW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASLLIUW & obj.AMask: {enc: iIIEncoding, ternary: true},
|
2024-01-31 10:37:35 +08:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 28.4.2: Basic Bit Manipulation (Zbb)
|
2024-08-15 02:48:09 +10:00
|
|
|
AANDN & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ACLZ & obj.AMask: {enc: rIIEncoding},
|
|
|
|
ACLZW & obj.AMask: {enc: rIIEncoding},
|
|
|
|
ACPOP & obj.AMask: {enc: rIIEncoding},
|
|
|
|
ACPOPW & obj.AMask: {enc: rIIEncoding},
|
|
|
|
ACTZ & obj.AMask: {enc: rIIEncoding},
|
|
|
|
ACTZW & obj.AMask: {enc: rIIEncoding},
|
|
|
|
AMAX & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AMAXU & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AMIN & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AMINU & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AORN & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
ASEXTB & obj.AMask: {enc: rIIEncoding},
|
|
|
|
ASEXTH & obj.AMask: {enc: rIIEncoding},
|
|
|
|
AXNOR & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AZEXTH & obj.AMask: {enc: rIIEncoding},
|
2024-01-31 10:37:35 +08:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 28.4.3: Bitwise Rotation (Zbb)
|
2024-08-15 02:48:09 +10:00
|
|
|
AROL & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AROLW & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
|
|
|
AROR & obj.AMask: {enc: rIIIEncoding, immForm: ARORI, ternary: true},
|
|
|
|
ARORI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ARORIW & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ARORW & obj.AMask: {enc: rIIIEncoding, immForm: ARORIW, ternary: true},
|
|
|
|
AORCB & obj.AMask: {enc: iIIEncoding},
|
|
|
|
AREV8 & obj.AMask: {enc: iIIEncoding},
|
2024-01-31 10:37:35 +08:00
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
// 28.4.4: Single-bit Instructions (Zbs)
|
2024-08-15 02:48:09 +10:00
|
|
|
ABCLR & obj.AMask: {enc: rIIIEncoding, immForm: ABCLRI, ternary: true},
|
|
|
|
ABCLRI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ABEXT & obj.AMask: {enc: rIIIEncoding, immForm: ABEXTI, ternary: true},
|
|
|
|
ABEXTI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ABINV & obj.AMask: {enc: rIIIEncoding, immForm: ABINVI, ternary: true},
|
|
|
|
ABINVI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
|
|
|
ABSET & obj.AMask: {enc: rIIIEncoding, immForm: ABSETI, ternary: true},
|
|
|
|
ABSETI & obj.AMask: {enc: iIIEncoding, ternary: true},
|
2024-01-31 10:37:35 +08:00
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
//
|
|
|
|
// "V" Standard Extension for Vector Operations, Version 1.0
|
|
|
|
//
|
|
|
|
|
|
|
|
// 31.6. Vector Configuration-Setting Instructions
|
|
|
|
AVSETVLI & obj.AMask: {enc: vsetvliEncoding, immForm: AVSETIVLI},
|
|
|
|
AVSETIVLI & obj.AMask: {enc: vsetivliEncoding},
|
|
|
|
AVSETVL & obj.AMask: {enc: vsetvlEncoding},
|
|
|
|
|
2024-11-24 14:38:33 +11:00
|
|
|
//
|
|
|
|
// Privileged ISA
|
|
|
|
//
|
|
|
|
|
|
|
|
// 3.3.1: Environment Call and Breakpoint
|
|
|
|
AECALL & obj.AMask: {enc: iIIEncoding},
|
|
|
|
AEBREAK & obj.AMask: {enc: iIIEncoding},
|
|
|
|
|
2019-09-08 01:56:26 +10:00
|
|
|
// Escape hatch
|
2024-08-15 02:48:09 +10:00
|
|
|
AWORD & obj.AMask: {enc: rawEncoding},
|
2019-09-08 01:56:26 +10:00
|
|
|
|
|
|
|
// Pseudo-operations
|
2024-08-15 02:48:09 +10:00
|
|
|
obj.AFUNCDATA: {enc: pseudoOpEncoding},
|
|
|
|
obj.APCDATA: {enc: pseudoOpEncoding},
|
|
|
|
obj.ATEXT: {enc: pseudoOpEncoding},
|
|
|
|
obj.ANOP: {enc: pseudoOpEncoding},
|
|
|
|
obj.ADUFFZERO: {enc: pseudoOpEncoding},
|
|
|
|
obj.ADUFFCOPY: {enc: pseudoOpEncoding},
|
|
|
|
obj.APCALIGN: {enc: pseudoOpEncoding},
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
|
2024-08-15 02:48:09 +10:00
|
|
|
// instructionDataForAs returns the instruction data for an obj.As.
|
|
|
|
func instructionDataForAs(as obj.As) (*instructionData, error) {
|
2019-12-19 02:09:45 +11:00
|
|
|
if base := as &^ obj.AMask; base != obj.ABaseRISCV && base != 0 {
|
2024-08-15 02:48:09 +10:00
|
|
|
return nil, fmt.Errorf("%v is not a RISC-V instruction", as)
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
asi := as & obj.AMask
|
2024-08-15 02:48:09 +10:00
|
|
|
if int(asi) >= len(instructions) {
|
|
|
|
return nil, fmt.Errorf("bad RISC-V instruction %v", as)
|
|
|
|
}
|
|
|
|
return &instructions[asi], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// encodingForAs returns the encoding for an obj.As.
|
|
|
|
func encodingForAs(as obj.As) (*encoding, error) {
|
|
|
|
insData, err := instructionDataForAs(as)
|
|
|
|
if err != nil {
|
|
|
|
return &badEncoding, err
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
2024-08-15 02:48:09 +10:00
|
|
|
if insData.enc.validate == nil {
|
|
|
|
return &badEncoding, fmt.Errorf("no encoding for instruction %s", as)
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
2024-08-15 02:48:09 +10:00
|
|
|
return &insData.enc, nil
|
2019-12-19 02:09:45 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
type instruction struct {
|
2023-08-28 02:08:56 +10:00
|
|
|
p *obj.Prog // Prog that instruction is for
|
|
|
|
as obj.As // Assembler opcode
|
|
|
|
rd uint32 // Destination register
|
|
|
|
rs1 uint32 // Source register 1
|
|
|
|
rs2 uint32 // Source register 2
|
|
|
|
rs3 uint32 // Source register 3
|
|
|
|
imm int64 // Immediate
|
|
|
|
funct3 uint32 // Function 3
|
|
|
|
funct7 uint32 // Function 7 (or Function 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ins *instruction) String() string {
|
|
|
|
if ins.p == nil {
|
|
|
|
return ins.as.String()
|
|
|
|
}
|
|
|
|
var suffix string
|
|
|
|
if ins.p.As != ins.as {
|
|
|
|
suffix = fmt.Sprintf(" (%v)", ins.as)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%v%v", ins.p, suffix)
|
2019-12-19 02:09:45 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ins *instruction) encode() (uint32, error) {
|
|
|
|
enc, err := encodingForAs(ins.as)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2023-08-28 00:13:34 +10:00
|
|
|
if enc.length <= 0 {
|
|
|
|
return 0, fmt.Errorf("%v: encoding called for a pseudo instruction", ins.as)
|
2019-12-19 02:09:45 +11:00
|
|
|
}
|
2023-08-28 00:13:34 +10:00
|
|
|
return enc.encode(ins), nil
|
2019-12-19 02:09:45 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ins *instruction) length() int {
|
|
|
|
enc, err := encodingForAs(ins.as)
|
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return enc.length
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ins *instruction) validate(ctxt *obj.Link) {
|
|
|
|
enc, err := encodingForAs(ins.as)
|
|
|
|
if err != nil {
|
|
|
|
ctxt.Diag(err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
enc.validate(ctxt, ins)
|
|
|
|
}
|
|
|
|
|
2021-08-18 18:14:52 +00:00
|
|
|
func (ins *instruction) usesRegTmp() bool {
|
|
|
|
return ins.rd == REG_TMP || ins.rs1 == REG_TMP || ins.rs2 == REG_TMP
|
|
|
|
}
|
|
|
|
|
2021-08-24 01:01:06 +10:00
|
|
|
// instructionForProg returns the default *obj.Prog to instruction mapping.
|
|
|
|
func instructionForProg(p *obj.Prog) *instruction {
|
2019-12-19 02:09:45 +11:00
|
|
|
ins := &instruction{
|
|
|
|
as: p.As,
|
|
|
|
rd: uint32(p.To.Reg),
|
|
|
|
rs1: uint32(p.Reg),
|
|
|
|
rs2: uint32(p.From.Reg),
|
|
|
|
imm: p.From.Offset,
|
|
|
|
}
|
2021-02-17 15:00:34 +00:00
|
|
|
if len(p.RestArgs) == 1 {
|
|
|
|
ins.rs3 = uint32(p.RestArgs[0].Reg)
|
|
|
|
}
|
2021-08-24 01:01:06 +10:00
|
|
|
return ins
|
|
|
|
}
|
2021-02-17 15:00:34 +00:00
|
|
|
|
2023-03-31 00:00:19 +08:00
|
|
|
// instructionsForOpImmediate returns the machine instructions for an immediate
|
2021-08-20 17:04:35 +00:00
|
|
|
// operand. The instruction is specified by as and the source register is
|
|
|
|
// specified by rs, instead of the obj.Prog.
|
|
|
|
func instructionsForOpImmediate(p *obj.Prog, as obj.As, rs int16) []*instruction {
|
|
|
|
// <opi> $imm, REG, TO
|
|
|
|
ins := instructionForProg(p)
|
2021-09-14 02:21:22 +10:00
|
|
|
ins.as, ins.rs1, ins.rs2 = as, uint32(rs), obj.REG_NONE
|
2021-08-20 17:04:35 +00:00
|
|
|
|
|
|
|
low, high, err := Split32BitImmediate(ins.imm)
|
|
|
|
if err != nil {
|
|
|
|
p.Ctxt.Diag("%v: constant %d too large", p, ins.imm, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if high == 0 {
|
|
|
|
return []*instruction{ins}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Split into two additions, if possible.
|
2021-10-25 12:18:40 -04:00
|
|
|
// Do not split SP-writing instructions, as otherwise the recorded SP delta may be wrong.
|
|
|
|
if p.Spadj == 0 && ins.as == AADDI && ins.imm >= -(1<<12) && ins.imm < 1<<12-1 {
|
2021-08-20 17:04:35 +00:00
|
|
|
imm0 := ins.imm / 2
|
|
|
|
imm1 := ins.imm - imm0
|
|
|
|
|
|
|
|
// ADDI $(imm/2), REG, TO
|
|
|
|
// ADDI $(imm-imm/2), TO, TO
|
|
|
|
ins.imm = imm0
|
|
|
|
insADDI := &instruction{as: AADDI, rd: ins.rd, rs1: ins.rd, imm: imm1}
|
|
|
|
return []*instruction{ins, insADDI}
|
|
|
|
}
|
|
|
|
|
|
|
|
// LUI $high, TMP
|
2022-09-02 22:26:05 +08:00
|
|
|
// ADDIW $low, TMP, TMP
|
2021-08-20 17:04:35 +00:00
|
|
|
// <op> TMP, REG, TO
|
|
|
|
insLUI := &instruction{as: ALUI, rd: REG_TMP, imm: high}
|
|
|
|
insADDIW := &instruction{as: AADDIW, rd: REG_TMP, rs1: REG_TMP, imm: low}
|
|
|
|
switch ins.as {
|
|
|
|
case AADDI:
|
|
|
|
ins.as = AADD
|
|
|
|
case AANDI:
|
|
|
|
ins.as = AAND
|
|
|
|
case AORI:
|
|
|
|
ins.as = AOR
|
|
|
|
case AXORI:
|
|
|
|
ins.as = AXOR
|
|
|
|
default:
|
|
|
|
p.Ctxt.Diag("unsupported immediate instruction %v for splitting", p)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
ins.rs2 = REG_TMP
|
cmd/internal/obj/riscv: improve code generation for loading of constants
Loading of constants that are 12 bits or smaller is currently performed using a single
ADDIW instruction, while constants between 13 bits and 32 bits are loaded using a
LUI+ADDIW pair.
Instead, use a single ADDI instruction for the 12 bits or smaller case - this
translates to the LI pseudo-instruction, making objdump more readable and giving:
11c7c: fff00293 li t0,-1
11c80: 00000313 li t1,0
Rather than:
11c7c: fff0029b addiw t0,zero,-1
11c80: 0000031b sext.w t1,zero
In the case where a constant exceeds 12 bits, an LUI instruction is required,
however if the lower 12 bits are zero, the ADDIW instruction can be omitted.
The same applies to the case where immediate splitting is performed for other
immediate instructions.
This removes around 900 instructions from the Go binary.
Change-Id: Id6c77774b3b429fa525da018a6926b85df838a2f
Reviewed-on: https://go-review.googlesource.com/c/go/+/344457
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-09 08:14:04 +00:00
|
|
|
if low == 0 {
|
|
|
|
return []*instruction{insLUI, ins}
|
|
|
|
}
|
2021-08-20 17:04:35 +00:00
|
|
|
return []*instruction{insLUI, insADDIW, ins}
|
|
|
|
}
|
|
|
|
|
2021-03-19 14:09:59 +00:00
|
|
|
// instructionsForLoad returns the machine instructions for a load. The load
|
|
|
|
// instruction is specified by as and the base/source register is specified
|
|
|
|
// by rs, instead of the obj.Prog.
|
|
|
|
func instructionsForLoad(p *obj.Prog, as obj.As, rs int16) []*instruction {
|
2021-08-18 18:14:52 +00:00
|
|
|
if p.From.Type != obj.TYPE_MEM {
|
|
|
|
p.Ctxt.Diag("%v requires memory for source", p)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-19 14:09:59 +00:00
|
|
|
switch as {
|
2021-08-18 18:14:52 +00:00
|
|
|
case ALD, ALB, ALH, ALW, ALBU, ALHU, ALWU, AFLW, AFLD:
|
|
|
|
default:
|
2021-03-19 14:09:59 +00:00
|
|
|
p.Ctxt.Diag("%v: unknown load instruction %v", p, as)
|
2021-08-18 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// <load> $imm, REG, TO (load $imm+(REG), TO)
|
|
|
|
ins := instructionForProg(p)
|
2021-03-19 14:09:59 +00:00
|
|
|
ins.as, ins.rs1, ins.rs2 = as, uint32(rs), obj.REG_NONE
|
2021-08-18 18:14:52 +00:00
|
|
|
ins.imm = p.From.Offset
|
|
|
|
|
|
|
|
low, high, err := Split32BitImmediate(ins.imm)
|
|
|
|
if err != nil {
|
|
|
|
p.Ctxt.Diag("%v: constant %d too large", p, ins.imm)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if high == 0 {
|
|
|
|
return []*instruction{ins}
|
|
|
|
}
|
|
|
|
|
|
|
|
// LUI $high, TMP
|
|
|
|
// ADD TMP, REG, TMP
|
|
|
|
// <load> $low, TMP, TO
|
|
|
|
insLUI := &instruction{as: ALUI, rd: REG_TMP, imm: high}
|
|
|
|
insADD := &instruction{as: AADD, rd: REG_TMP, rs1: REG_TMP, rs2: ins.rs1}
|
|
|
|
ins.rs1, ins.imm = REG_TMP, low
|
|
|
|
|
|
|
|
return []*instruction{insLUI, insADD, ins}
|
|
|
|
}
|
|
|
|
|
2021-08-17 18:25:05 +00:00
|
|
|
// instructionsForStore returns the machine instructions for a store. The store
|
|
|
|
// instruction is specified by as and the target/source register is specified
|
|
|
|
// by rd, instead of the obj.Prog.
|
|
|
|
func instructionsForStore(p *obj.Prog, as obj.As, rd int16) []*instruction {
|
2021-08-19 05:33:01 +00:00
|
|
|
if p.To.Type != obj.TYPE_MEM {
|
|
|
|
p.Ctxt.Diag("%v requires memory for destination", p)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-17 18:25:05 +00:00
|
|
|
switch as {
|
2021-08-19 05:33:01 +00:00
|
|
|
case ASW, ASH, ASB, ASD, AFSW, AFSD:
|
|
|
|
default:
|
2021-08-17 18:25:05 +00:00
|
|
|
p.Ctxt.Diag("%v: unknown store instruction %v", p, as)
|
2021-08-19 05:33:01 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// <store> $imm, REG, TO (store $imm+(TO), REG)
|
|
|
|
ins := instructionForProg(p)
|
2021-08-17 18:25:05 +00:00
|
|
|
ins.as, ins.rd, ins.rs1, ins.rs2 = as, uint32(rd), uint32(p.From.Reg), obj.REG_NONE
|
2021-08-19 05:33:01 +00:00
|
|
|
ins.imm = p.To.Offset
|
|
|
|
|
|
|
|
low, high, err := Split32BitImmediate(ins.imm)
|
|
|
|
if err != nil {
|
|
|
|
p.Ctxt.Diag("%v: constant %d too large", p, ins.imm)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if high == 0 {
|
|
|
|
return []*instruction{ins}
|
|
|
|
}
|
|
|
|
|
|
|
|
// LUI $high, TMP
|
|
|
|
// ADD TMP, TO, TMP
|
|
|
|
// <store> $low, REG, TMP
|
|
|
|
insLUI := &instruction{as: ALUI, rd: REG_TMP, imm: high}
|
|
|
|
insADD := &instruction{as: AADD, rd: REG_TMP, rs1: REG_TMP, rs2: ins.rd}
|
|
|
|
ins.rd, ins.imm = REG_TMP, low
|
|
|
|
|
|
|
|
return []*instruction{insLUI, insADD, ins}
|
|
|
|
}
|
|
|
|
|
2022-09-16 02:29:12 +10:00
|
|
|
func instructionsForTLS(p *obj.Prog, ins *instruction) []*instruction {
|
|
|
|
insAddTP := &instruction{as: AADD, rd: REG_TMP, rs1: REG_TMP, rs2: REG_TP}
|
|
|
|
|
|
|
|
var inss []*instruction
|
|
|
|
if p.Ctxt.Flag_shared {
|
|
|
|
// TLS initial-exec mode - load TLS offset from GOT, add the thread pointer
|
|
|
|
// register, then load from or store to the resulting memory location.
|
|
|
|
insAUIPC := &instruction{as: AAUIPC, rd: REG_TMP}
|
|
|
|
insLoadTLSOffset := &instruction{as: ALD, rd: REG_TMP, rs1: REG_TMP}
|
|
|
|
inss = []*instruction{insAUIPC, insLoadTLSOffset, insAddTP, ins}
|
|
|
|
} else {
|
|
|
|
// TLS local-exec mode - load upper TLS offset, add the lower TLS offset,
|
|
|
|
// add the thread pointer register, then load from or store to the resulting
|
|
|
|
// memory location. Note that this differs from the suggested three
|
|
|
|
// instruction sequence, as the Go linker does not currently have an
|
|
|
|
// easy way to handle relocation across 12 bytes of machine code.
|
|
|
|
insLUI := &instruction{as: ALUI, rd: REG_TMP}
|
|
|
|
insADDIW := &instruction{as: AADDIW, rd: REG_TMP, rs1: REG_TMP}
|
|
|
|
inss = []*instruction{insLUI, insADDIW, insAddTP, ins}
|
|
|
|
}
|
|
|
|
return inss
|
|
|
|
}
|
|
|
|
|
|
|
|
func instructionsForTLSLoad(p *obj.Prog) []*instruction {
|
|
|
|
if p.From.Sym.Type != objabi.STLSBSS {
|
|
|
|
p.Ctxt.Diag("%v: %v is not a TLS symbol", p, p.From.Sym)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ins := instructionForProg(p)
|
|
|
|
ins.as, ins.rs1, ins.rs2, ins.imm = movToLoad(p.As), REG_TMP, obj.REG_NONE, 0
|
|
|
|
|
|
|
|
return instructionsForTLS(p, ins)
|
|
|
|
}
|
|
|
|
|
|
|
|
func instructionsForTLSStore(p *obj.Prog) []*instruction {
|
|
|
|
if p.To.Sym.Type != objabi.STLSBSS {
|
|
|
|
p.Ctxt.Diag("%v: %v is not a TLS symbol", p, p.To.Sym)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ins := instructionForProg(p)
|
|
|
|
ins.as, ins.rd, ins.rs1, ins.rs2, ins.imm = movToStore(p.As), REG_TMP, uint32(p.From.Reg), obj.REG_NONE, 0
|
|
|
|
|
|
|
|
return instructionsForTLS(p, ins)
|
|
|
|
}
|
|
|
|
|
2021-08-24 01:01:06 +10:00
|
|
|
// instructionsForMOV returns the machine instructions for an *obj.Prog that
|
|
|
|
// uses a MOV pseudo-instruction.
|
|
|
|
func instructionsForMOV(p *obj.Prog) []*instruction {
|
|
|
|
ins := instructionForProg(p)
|
2019-12-19 02:09:45 +11:00
|
|
|
inss := []*instruction{ins}
|
|
|
|
|
2022-03-31 14:34:32 +00:00
|
|
|
if p.Reg != 0 {
|
|
|
|
p.Ctxt.Diag("%v: illegal MOV instruction", p)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-24 01:01:06 +10:00
|
|
|
switch {
|
2021-08-17 18:45:11 +00:00
|
|
|
case p.From.Type == obj.TYPE_CONST && p.To.Type == obj.TYPE_REG:
|
|
|
|
// Handle constant to register moves.
|
2021-08-21 09:06:25 +00:00
|
|
|
if p.As != AMOV {
|
|
|
|
p.Ctxt.Diag("%v: unsupported constant load", p)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-11-08 15:39:27 +08:00
|
|
|
// For constants larger than 32 bits in size that have trailing zeros,
|
|
|
|
// use the value with the trailing zeros removed and then use a SLLI
|
|
|
|
// instruction to restore the original constant.
|
|
|
|
// For example:
|
|
|
|
// MOV $0x8000000000000000, X10
|
|
|
|
// becomes
|
|
|
|
// MOV $1, X10
|
|
|
|
// SLLI $63, X10, X10
|
|
|
|
var insSLLI *instruction
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(ins.imm, 32); err != nil {
|
2022-11-08 15:39:27 +08:00
|
|
|
ctz := bits.TrailingZeros64(uint64(ins.imm))
|
2023-08-28 01:22:02 +10:00
|
|
|
if err := immIFits(ins.imm>>ctz, 32); err == nil {
|
2022-11-08 15:39:27 +08:00
|
|
|
ins.imm = ins.imm >> ctz
|
|
|
|
insSLLI = &instruction{as: ASLLI, rd: ins.rd, rs1: ins.rd, imm: int64(ctz)}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-17 18:45:11 +00:00
|
|
|
low, high, err := Split32BitImmediate(ins.imm)
|
|
|
|
if err != nil {
|
|
|
|
p.Ctxt.Diag("%v: constant %d too large: %v", p, ins.imm, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MOV $c, R -> ADD $c, ZERO, R
|
cmd/internal/obj/riscv: improve code generation for loading of constants
Loading of constants that are 12 bits or smaller is currently performed using a single
ADDIW instruction, while constants between 13 bits and 32 bits are loaded using a
LUI+ADDIW pair.
Instead, use a single ADDI instruction for the 12 bits or smaller case - this
translates to the LI pseudo-instruction, making objdump more readable and giving:
11c7c: fff00293 li t0,-1
11c80: 00000313 li t1,0
Rather than:
11c7c: fff0029b addiw t0,zero,-1
11c80: 0000031b sext.w t1,zero
In the case where a constant exceeds 12 bits, an LUI instruction is required,
however if the lower 12 bits are zero, the ADDIW instruction can be omitted.
The same applies to the case where immediate splitting is performed for other
immediate instructions.
This removes around 900 instructions from the Go binary.
Change-Id: Id6c77774b3b429fa525da018a6926b85df838a2f
Reviewed-on: https://go-review.googlesource.com/c/go/+/344457
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-09 08:14:04 +00:00
|
|
|
ins.as, ins.rs1, ins.rs2, ins.imm = AADDI, REG_ZERO, obj.REG_NONE, low
|
2021-08-17 18:45:11 +00:00
|
|
|
|
|
|
|
// LUI is only necessary if the constant does not fit in 12 bits.
|
2023-08-28 02:14:22 +10:00
|
|
|
if high != 0 {
|
|
|
|
// LUI top20bits(c), R
|
|
|
|
// ADD bottom12bits(c), R, R
|
|
|
|
insLUI := &instruction{as: ALUI, rd: ins.rd, imm: high}
|
|
|
|
inss = []*instruction{insLUI}
|
|
|
|
if low != 0 {
|
|
|
|
ins.as, ins.rs1 = AADDIW, ins.rd
|
|
|
|
inss = append(inss, ins)
|
2022-11-08 15:39:27 +08:00
|
|
|
}
|
cmd/internal/obj/riscv: improve code generation for loading of constants
Loading of constants that are 12 bits or smaller is currently performed using a single
ADDIW instruction, while constants between 13 bits and 32 bits are loaded using a
LUI+ADDIW pair.
Instead, use a single ADDI instruction for the 12 bits or smaller case - this
translates to the LI pseudo-instruction, making objdump more readable and giving:
11c7c: fff00293 li t0,-1
11c80: 00000313 li t1,0
Rather than:
11c7c: fff0029b addiw t0,zero,-1
11c80: 0000031b sext.w t1,zero
In the case where a constant exceeds 12 bits, an LUI instruction is required,
however if the lower 12 bits are zero, the ADDIW instruction can be omitted.
The same applies to the case where immediate splitting is performed for other
immediate instructions.
This removes around 900 instructions from the Go binary.
Change-Id: Id6c77774b3b429fa525da018a6926b85df838a2f
Reviewed-on: https://go-review.googlesource.com/c/go/+/344457
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-09 08:14:04 +00:00
|
|
|
}
|
2022-11-08 15:39:27 +08:00
|
|
|
if insSLLI != nil {
|
|
|
|
inss = append(inss, insSLLI)
|
|
|
|
}
|
2021-08-17 18:45:11 +00:00
|
|
|
|
2021-08-21 09:06:25 +00:00
|
|
|
case p.From.Type == obj.TYPE_CONST && p.To.Type != obj.TYPE_REG:
|
|
|
|
p.Ctxt.Diag("%v: constant load must target register", p)
|
|
|
|
return nil
|
|
|
|
|
2021-08-24 01:01:06 +10:00
|
|
|
case p.From.Type == obj.TYPE_REG && p.To.Type == obj.TYPE_REG:
|
2020-10-24 03:53:53 +11:00
|
|
|
// Handle register to register moves.
|
|
|
|
switch p.As {
|
|
|
|
case AMOV: // MOV Ra, Rb -> ADDI $0, Ra, Rb
|
|
|
|
ins.as, ins.rs1, ins.rs2, ins.imm = AADDI, uint32(p.From.Reg), obj.REG_NONE, 0
|
|
|
|
case AMOVW: // MOVW Ra, Rb -> ADDIW $0, Ra, Rb
|
|
|
|
ins.as, ins.rs1, ins.rs2, ins.imm = AADDIW, uint32(p.From.Reg), obj.REG_NONE, 0
|
|
|
|
case AMOVBU: // MOVBU Ra, Rb -> ANDI $255, Ra, Rb
|
|
|
|
ins.as, ins.rs1, ins.rs2, ins.imm = AANDI, uint32(p.From.Reg), obj.REG_NONE, 255
|
|
|
|
case AMOVF: // MOVF Ra, Rb -> FSGNJS Ra, Ra, Rb
|
|
|
|
ins.as, ins.rs1 = AFSGNJS, uint32(p.From.Reg)
|
|
|
|
case AMOVD: // MOVD Ra, Rb -> FSGNJD Ra, Ra, Rb
|
|
|
|
ins.as, ins.rs1 = AFSGNJD, uint32(p.From.Reg)
|
|
|
|
case AMOVB, AMOVH:
|
2023-12-09 19:18:00 +11:00
|
|
|
if buildcfg.GORISCV64 >= 22 {
|
|
|
|
// Use SEXTB or SEXTH to extend.
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ASEXTB, uint32(p.From.Reg), obj.REG_NONE
|
|
|
|
if p.As == AMOVH {
|
|
|
|
ins.as = ASEXTH
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Use SLLI/SRAI sequence to extend.
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ASLLI, uint32(p.From.Reg), obj.REG_NONE
|
|
|
|
if p.As == AMOVB {
|
|
|
|
ins.imm = 56
|
|
|
|
} else if p.As == AMOVH {
|
|
|
|
ins.imm = 48
|
|
|
|
}
|
|
|
|
ins2 := &instruction{as: ASRAI, rd: ins.rd, rs1: ins.rd, imm: ins.imm}
|
|
|
|
inss = append(inss, ins2)
|
2020-10-24 03:53:53 +11:00
|
|
|
}
|
|
|
|
case AMOVHU, AMOVWU:
|
2023-12-09 19:18:00 +11:00
|
|
|
if buildcfg.GORISCV64 >= 22 {
|
|
|
|
// Use ZEXTH or ADDUW to extend.
|
|
|
|
ins.as, ins.rs1, ins.rs2, ins.imm = AZEXTH, uint32(p.From.Reg), obj.REG_NONE, 0
|
|
|
|
if p.As == AMOVWU {
|
|
|
|
ins.as, ins.rs2 = AADDUW, REG_ZERO
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Use SLLI/SRLI sequence to extend.
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ASLLI, uint32(p.From.Reg), obj.REG_NONE
|
|
|
|
if p.As == AMOVHU {
|
|
|
|
ins.imm = 48
|
|
|
|
} else if p.As == AMOVWU {
|
|
|
|
ins.imm = 32
|
|
|
|
}
|
|
|
|
ins2 := &instruction{as: ASRLI, rd: ins.rd, rs1: ins.rd, imm: ins.imm}
|
|
|
|
inss = append(inss, ins2)
|
2020-10-24 03:53:53 +11:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-19 14:09:59 +00:00
|
|
|
case p.From.Type == obj.TYPE_MEM && p.To.Type == obj.TYPE_REG:
|
|
|
|
// Memory to register loads.
|
|
|
|
switch p.From.Name {
|
|
|
|
case obj.NAME_AUTO, obj.NAME_PARAM, obj.NAME_NONE:
|
|
|
|
// MOV c(Rs), Rd -> L $c, Rs, Rd
|
|
|
|
inss = instructionsForLoad(p, movToLoad(p.As), addrToReg(p.From))
|
|
|
|
|
2024-09-12 20:03:59 +08:00
|
|
|
case obj.NAME_EXTERN, obj.NAME_STATIC, obj.NAME_GOTREF:
|
2022-09-16 02:29:12 +10:00
|
|
|
if p.From.Sym.Type == objabi.STLSBSS {
|
|
|
|
return instructionsForTLSLoad(p)
|
|
|
|
}
|
|
|
|
|
2021-08-20 17:04:35 +00:00
|
|
|
// Note that the values for $off_hi and $off_lo are currently
|
2024-07-02 00:31:53 +10:00
|
|
|
// zero and will be assigned during relocation. If the destination
|
|
|
|
// is an integer register then we can use the same register for the
|
|
|
|
// address computation, otherwise we need to use the temporary register.
|
2021-08-20 17:04:35 +00:00
|
|
|
//
|
2021-03-19 14:09:59 +00:00
|
|
|
// AUIPC $off_hi, Rd
|
|
|
|
// L $off_lo, Rd, Rd
|
2024-07-02 00:31:53 +10:00
|
|
|
//
|
|
|
|
addrReg := ins.rd
|
|
|
|
if addrReg < REG_X0 || addrReg > REG_X31 {
|
|
|
|
addrReg = REG_TMP
|
|
|
|
}
|
|
|
|
insAUIPC := &instruction{as: AAUIPC, rd: addrReg}
|
|
|
|
ins.as, ins.rs1, ins.rs2, ins.imm = movToLoad(p.As), addrReg, obj.REG_NONE, 0
|
2021-03-19 14:09:59 +00:00
|
|
|
inss = []*instruction{insAUIPC, ins}
|
2021-08-21 09:06:25 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
p.Ctxt.Diag("unsupported name %d for %v", p.From.Name, p)
|
|
|
|
return nil
|
2021-03-19 14:09:59 +00:00
|
|
|
}
|
|
|
|
|
2021-08-17 18:25:05 +00:00
|
|
|
case p.From.Type == obj.TYPE_REG && p.To.Type == obj.TYPE_MEM:
|
|
|
|
// Register to memory stores.
|
|
|
|
switch p.As {
|
|
|
|
case AMOVBU, AMOVHU, AMOVWU:
|
2021-08-21 09:06:25 +00:00
|
|
|
p.Ctxt.Diag("%v: unsupported unsigned store", p)
|
2021-08-17 18:25:05 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
switch p.To.Name {
|
|
|
|
case obj.NAME_AUTO, obj.NAME_PARAM, obj.NAME_NONE:
|
|
|
|
// MOV Rs, c(Rd) -> S $c, Rs, Rd
|
|
|
|
inss = instructionsForStore(p, movToStore(p.As), addrToReg(p.To))
|
|
|
|
|
|
|
|
case obj.NAME_EXTERN, obj.NAME_STATIC:
|
2022-09-16 02:29:12 +10:00
|
|
|
if p.To.Sym.Type == objabi.STLSBSS {
|
|
|
|
return instructionsForTLSStore(p)
|
|
|
|
}
|
|
|
|
|
2021-08-20 17:04:35 +00:00
|
|
|
// Note that the values for $off_hi and $off_lo are currently
|
|
|
|
// zero and will be assigned during relocation.
|
|
|
|
//
|
2021-08-17 18:25:05 +00:00
|
|
|
// AUIPC $off_hi, Rtmp
|
|
|
|
// S $off_lo, Rtmp, Rd
|
|
|
|
insAUIPC := &instruction{as: AAUIPC, rd: REG_TMP}
|
|
|
|
ins.as, ins.rd, ins.rs1, ins.rs2, ins.imm = movToStore(p.As), REG_TMP, uint32(p.From.Reg), obj.REG_NONE, 0
|
|
|
|
inss = []*instruction{insAUIPC, ins}
|
2021-08-21 09:06:25 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
p.Ctxt.Diag("unsupported name %d for %v", p.From.Name, p)
|
|
|
|
return nil
|
2021-08-17 18:25:05 +00:00
|
|
|
}
|
|
|
|
|
2021-08-20 17:04:35 +00:00
|
|
|
case p.From.Type == obj.TYPE_ADDR && p.To.Type == obj.TYPE_REG:
|
|
|
|
// MOV $sym+off(SP/SB), R
|
2021-08-21 09:06:25 +00:00
|
|
|
if p.As != AMOV {
|
|
|
|
p.Ctxt.Diag("%v: unsupported address load", p)
|
|
|
|
return nil
|
|
|
|
}
|
2021-08-20 17:04:35 +00:00
|
|
|
switch p.From.Name {
|
|
|
|
case obj.NAME_AUTO, obj.NAME_PARAM, obj.NAME_NONE:
|
|
|
|
inss = instructionsForOpImmediate(p, AADDI, addrToReg(p.From))
|
|
|
|
|
|
|
|
case obj.NAME_EXTERN, obj.NAME_STATIC:
|
|
|
|
// Note that the values for $off_hi and $off_lo are currently
|
|
|
|
// zero and will be assigned during relocation.
|
|
|
|
//
|
|
|
|
// AUIPC $off_hi, R
|
|
|
|
// ADDI $off_lo, R
|
|
|
|
insAUIPC := &instruction{as: AAUIPC, rd: ins.rd}
|
|
|
|
ins.as, ins.rs1, ins.rs2, ins.imm = AADDI, ins.rd, obj.REG_NONE, 0
|
|
|
|
inss = []*instruction{insAUIPC, ins}
|
|
|
|
|
2021-08-21 09:06:25 +00:00
|
|
|
default:
|
|
|
|
p.Ctxt.Diag("unsupported name %d for %v", p.From.Name, p)
|
2021-08-24 01:01:06 +10:00
|
|
|
return nil
|
|
|
|
}
|
2021-08-21 09:06:25 +00:00
|
|
|
|
|
|
|
case p.From.Type == obj.TYPE_ADDR && p.To.Type != obj.TYPE_REG:
|
|
|
|
p.Ctxt.Diag("%v: address load must target register", p)
|
|
|
|
return nil
|
|
|
|
|
|
|
|
default:
|
|
|
|
p.Ctxt.Diag("%v: unsupported MOV", p)
|
|
|
|
return nil
|
2021-08-24 01:01:06 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
return inss
|
|
|
|
}
|
|
|
|
|
cmd/compile,cmd/internal/obj: provide rotation pseudo-instructions for riscv64
Provide and use rotation pseudo-instructions for riscv64. The RISC-V bitmanip
extension adds support for hardware rotation instructions in the form of ROL,
ROLW, ROR, RORI, RORIW and RORW. These are easily implemented in the assembler
as pseudo-instructions for CPUs that do not support the bitmanip extension.
This approach provides a number of advantages, including reducing the rewrite
rules needed in the compiler, simplifying codegen tests and most importantly,
allowing these instructions to be used in assembly (for example, riscv64
optimised versions of SHA-256 and SHA-512). When bitmanip support is added,
these instruction sequences can simply be replaced with a single instruction
if permitted by the GORISCV64 profile.
Change-Id: Ia23402e1a82f211ac760690deb063386056ae1fa
Reviewed-on: https://go-review.googlesource.com/c/go/+/565015
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Reviewed-by: M Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Run-TryBot: Joel Sing <joel@sing.id.au>
2024-02-08 13:54:10 +11:00
|
|
|
// instructionsForRotate returns the machine instructions for a bitwise rotation.
|
|
|
|
func instructionsForRotate(p *obj.Prog, ins *instruction) []*instruction {
|
2024-03-19 22:36:42 +11:00
|
|
|
if buildcfg.GORISCV64 >= 22 {
|
|
|
|
// Rotation instructions are supported natively.
|
|
|
|
return []*instruction{ins}
|
|
|
|
}
|
|
|
|
|
cmd/compile,cmd/internal/obj: provide rotation pseudo-instructions for riscv64
Provide and use rotation pseudo-instructions for riscv64. The RISC-V bitmanip
extension adds support for hardware rotation instructions in the form of ROL,
ROLW, ROR, RORI, RORIW and RORW. These are easily implemented in the assembler
as pseudo-instructions for CPUs that do not support the bitmanip extension.
This approach provides a number of advantages, including reducing the rewrite
rules needed in the compiler, simplifying codegen tests and most importantly,
allowing these instructions to be used in assembly (for example, riscv64
optimised versions of SHA-256 and SHA-512). When bitmanip support is added,
these instruction sequences can simply be replaced with a single instruction
if permitted by the GORISCV64 profile.
Change-Id: Ia23402e1a82f211ac760690deb063386056ae1fa
Reviewed-on: https://go-review.googlesource.com/c/go/+/565015
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Reviewed-by: M Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Run-TryBot: Joel Sing <joel@sing.id.au>
2024-02-08 13:54:10 +11:00
|
|
|
switch ins.as {
|
|
|
|
case AROL, AROLW, AROR, ARORW:
|
|
|
|
// ROL -> OR (SLL x y) (SRL x (NEG y))
|
|
|
|
// ROR -> OR (SRL x y) (SLL x (NEG y))
|
|
|
|
sllOp, srlOp := ASLL, ASRL
|
|
|
|
if ins.as == AROLW || ins.as == ARORW {
|
|
|
|
sllOp, srlOp = ASLLW, ASRLW
|
|
|
|
}
|
|
|
|
shift1, shift2 := sllOp, srlOp
|
|
|
|
if ins.as == AROR || ins.as == ARORW {
|
|
|
|
shift1, shift2 = shift2, shift1
|
|
|
|
}
|
|
|
|
return []*instruction{
|
|
|
|
&instruction{as: ASUB, rs1: REG_ZERO, rs2: ins.rs2, rd: REG_TMP},
|
|
|
|
&instruction{as: shift2, rs1: ins.rs1, rs2: REG_TMP, rd: REG_TMP},
|
|
|
|
&instruction{as: shift1, rs1: ins.rs1, rs2: ins.rs2, rd: ins.rd},
|
|
|
|
&instruction{as: AOR, rs1: REG_TMP, rs2: ins.rd, rd: ins.rd},
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARORI, ARORIW:
|
|
|
|
// ROR -> OR (SLLI -x y) (SRLI x y)
|
|
|
|
sllOp, srlOp := ASLLI, ASRLI
|
|
|
|
sllImm := int64(int8(-ins.imm) & 63)
|
|
|
|
if ins.as == ARORIW {
|
|
|
|
sllOp, srlOp = ASLLIW, ASRLIW
|
|
|
|
sllImm = int64(int8(-ins.imm) & 31)
|
|
|
|
}
|
|
|
|
return []*instruction{
|
|
|
|
&instruction{as: srlOp, rs1: ins.rs1, rd: REG_TMP, imm: ins.imm},
|
|
|
|
&instruction{as: sllOp, rs1: ins.rs1, rd: ins.rd, imm: sllImm},
|
|
|
|
&instruction{as: AOR, rs1: REG_TMP, rs2: ins.rd, rd: ins.rd},
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
p.Ctxt.Diag("%v: unknown rotation", p)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-24 01:01:06 +10:00
|
|
|
// instructionsForProg returns the machine instructions for an *obj.Prog.
|
|
|
|
func instructionsForProg(p *obj.Prog) []*instruction {
|
|
|
|
ins := instructionForProg(p)
|
|
|
|
inss := []*instruction{ins}
|
|
|
|
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
if ins.as == AVSETVLI || ins.as == AVSETIVLI {
|
|
|
|
if len(p.RestArgs) != 4 {
|
|
|
|
p.Ctxt.Diag("incorrect number of arguments for instruction")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else if len(p.RestArgs) > 1 {
|
2021-08-24 01:01:06 +10:00
|
|
|
p.Ctxt.Diag("too many source registers")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch ins.as {
|
|
|
|
case AJAL, AJALR:
|
|
|
|
ins.rd, ins.rs1, ins.rs2 = uint32(p.From.Reg), uint32(p.To.Reg), obj.REG_NONE
|
|
|
|
ins.imm = p.To.Offset
|
|
|
|
|
|
|
|
case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ:
|
|
|
|
switch ins.as {
|
|
|
|
case ABEQZ:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABEQ, REG_ZERO, uint32(p.From.Reg)
|
|
|
|
case ABGEZ:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABGE, REG_ZERO, uint32(p.From.Reg)
|
|
|
|
case ABGT:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABLT, uint32(p.From.Reg), uint32(p.Reg)
|
|
|
|
case ABGTU:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABLTU, uint32(p.From.Reg), uint32(p.Reg)
|
|
|
|
case ABGTZ:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABLT, uint32(p.From.Reg), REG_ZERO
|
|
|
|
case ABLE:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABGE, uint32(p.From.Reg), uint32(p.Reg)
|
|
|
|
case ABLEU:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABGEU, uint32(p.From.Reg), uint32(p.Reg)
|
|
|
|
case ABLEZ:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABGE, uint32(p.From.Reg), REG_ZERO
|
|
|
|
case ABLTZ:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABLT, REG_ZERO, uint32(p.From.Reg)
|
|
|
|
case ABNEZ:
|
|
|
|
ins.as, ins.rs1, ins.rs2 = ABNE, REG_ZERO, uint32(p.From.Reg)
|
|
|
|
}
|
|
|
|
ins.imm = p.To.Offset
|
|
|
|
|
|
|
|
case AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD:
|
2023-08-28 02:08:56 +10:00
|
|
|
inss = instructionsForMOV(p)
|
2021-08-24 01:01:06 +10:00
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
case ALW, ALWU, ALH, ALHU, ALB, ALBU, ALD, AFLW, AFLD:
|
2023-08-28 02:08:56 +10:00
|
|
|
inss = instructionsForLoad(p, ins.as, p.From.Reg)
|
2019-12-19 02:09:45 +11:00
|
|
|
|
|
|
|
case ASW, ASH, ASB, ASD, AFSW, AFSD:
|
2023-08-28 02:08:56 +10:00
|
|
|
inss = instructionsForStore(p, ins.as, p.To.Reg)
|
2019-12-19 02:09:45 +11:00
|
|
|
|
2020-02-21 02:30:09 +11:00
|
|
|
case ALRW, ALRD:
|
2023-07-11 14:53:54 +08:00
|
|
|
// Set aq to use acquire access ordering
|
2020-02-21 02:30:09 +11:00
|
|
|
ins.funct7 = 2
|
|
|
|
ins.rs1, ins.rs2 = uint32(p.From.Reg), REG_ZERO
|
|
|
|
|
2021-08-19 08:35:12 +00:00
|
|
|
case AADDI, AANDI, AORI, AXORI:
|
2021-08-20 17:04:35 +00:00
|
|
|
inss = instructionsForOpImmediate(p, ins.as, p.Reg)
|
2021-08-19 08:35:12 +00:00
|
|
|
|
2023-07-11 14:53:54 +08:00
|
|
|
case ASCW, ASCD:
|
|
|
|
// Set release access ordering
|
|
|
|
ins.funct7 = 1
|
|
|
|
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
|
|
|
|
|
|
|
|
case AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
|
2020-02-21 02:50:57 +11:00
|
|
|
AAMOXORW, AAMOXORD, AAMOMINW, AAMOMIND, AAMOMINUW, AAMOMINUD, AAMOMAXW, AAMOMAXD, AAMOMAXUW, AAMOMAXUD:
|
2023-07-11 14:53:54 +08:00
|
|
|
// Set aqrl to use acquire & release access ordering
|
|
|
|
ins.funct7 = 3
|
2020-02-21 02:30:09 +11:00
|
|
|
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
|
|
|
|
|
2024-10-24 00:56:07 +11:00
|
|
|
case AECALL, AEBREAK:
|
2019-12-19 02:09:45 +11:00
|
|
|
insEnc := encode(p.As)
|
|
|
|
if p.To.Type == obj.TYPE_NONE {
|
|
|
|
ins.rd = REG_ZERO
|
|
|
|
}
|
|
|
|
ins.rs1 = REG_ZERO
|
|
|
|
ins.imm = insEnc.csr
|
|
|
|
|
2024-10-24 00:56:07 +11:00
|
|
|
case ARDCYCLE, ARDTIME, ARDINSTRET:
|
|
|
|
ins.as = ACSRRS
|
|
|
|
if p.To.Type == obj.TYPE_NONE {
|
|
|
|
ins.rd = REG_ZERO
|
|
|
|
}
|
|
|
|
ins.rs1 = REG_ZERO
|
|
|
|
switch p.As {
|
|
|
|
case ARDCYCLE:
|
|
|
|
ins.imm = -1024
|
|
|
|
case ARDTIME:
|
|
|
|
ins.imm = -1023
|
|
|
|
case ARDINSTRET:
|
|
|
|
ins.imm = -1022
|
|
|
|
}
|
|
|
|
|
2020-02-21 02:28:37 +11:00
|
|
|
case AFENCE:
|
|
|
|
ins.rd, ins.rs1, ins.rs2 = REG_ZERO, REG_ZERO, obj.REG_NONE
|
|
|
|
ins.imm = 0x0ff
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
case AFCVTWS, AFCVTLS, AFCVTWUS, AFCVTLUS, AFCVTWD, AFCVTLD, AFCVTWUD, AFCVTLUD:
|
2023-06-20 11:16:56 +08:00
|
|
|
// Set the default rounding mode in funct3 to round to zero.
|
|
|
|
if p.Scond&rmSuffixBit == 0 {
|
|
|
|
ins.funct3 = uint32(RM_RTZ)
|
|
|
|
} else {
|
|
|
|
ins.funct3 = uint32(p.Scond &^ rmSuffixBit)
|
|
|
|
}
|
2019-12-19 02:09:45 +11:00
|
|
|
|
|
|
|
case AFNES, AFNED:
|
|
|
|
// Replace FNE[SD] with FEQ[SD] and NOT.
|
|
|
|
if p.To.Type != obj.TYPE_REG {
|
2023-08-28 02:08:56 +10:00
|
|
|
p.Ctxt.Diag("%v needs an integer register output", p)
|
2019-12-19 02:09:45 +11:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if ins.as == AFNES {
|
|
|
|
ins.as = AFEQS
|
|
|
|
} else {
|
|
|
|
ins.as = AFEQD
|
|
|
|
}
|
2020-10-24 03:53:53 +11:00
|
|
|
ins2 := &instruction{
|
2019-12-19 02:09:45 +11:00
|
|
|
as: AXORI, // [bit] xor 1 = not [bit]
|
|
|
|
rd: ins.rd,
|
|
|
|
rs1: ins.rd,
|
|
|
|
imm: 1,
|
|
|
|
}
|
2020-10-24 03:53:53 +11:00
|
|
|
inss = append(inss, ins2)
|
2019-12-19 02:09:45 +11:00
|
|
|
|
|
|
|
case AFSQRTS, AFSQRTD:
|
|
|
|
// These instructions expect a zero (i.e. float register 0)
|
|
|
|
// to be the second input operand.
|
|
|
|
ins.rs1 = uint32(p.From.Reg)
|
|
|
|
ins.rs2 = REG_F0
|
|
|
|
|
2021-02-17 15:00:34 +00:00
|
|
|
case AFMADDS, AFMSUBS, AFNMADDS, AFNMSUBS,
|
|
|
|
AFMADDD, AFMSUBD, AFNMADDD, AFNMSUBD:
|
|
|
|
// Swap the first two operands so that the operands are in the same
|
|
|
|
// order as they are in the specification: RS1, RS2, RS3, RD.
|
|
|
|
ins.rs1, ins.rs2 = ins.rs2, ins.rs1
|
|
|
|
|
2020-03-03 03:41:43 +11:00
|
|
|
case ANEG, ANEGW:
|
|
|
|
// NEG rs, rd -> SUB rs, X0, rd
|
|
|
|
ins.as = ASUB
|
|
|
|
if p.As == ANEGW {
|
|
|
|
ins.as = ASUBW
|
|
|
|
}
|
|
|
|
ins.rs1 = REG_ZERO
|
|
|
|
if ins.rd == obj.REG_NONE {
|
|
|
|
ins.rd = ins.rs2
|
|
|
|
}
|
|
|
|
|
2020-03-03 03:40:37 +11:00
|
|
|
case ANOT:
|
|
|
|
// NOT rs, rd -> XORI $-1, rs, rd
|
|
|
|
ins.as = AXORI
|
|
|
|
ins.rs1, ins.rs2 = uint32(p.From.Reg), obj.REG_NONE
|
|
|
|
if ins.rd == obj.REG_NONE {
|
|
|
|
ins.rd = ins.rs1
|
|
|
|
}
|
|
|
|
ins.imm = -1
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
case ASEQZ:
|
|
|
|
// SEQZ rs, rd -> SLTIU $1, rs, rd
|
|
|
|
ins.as = ASLTIU
|
2021-09-14 02:21:22 +10:00
|
|
|
ins.rs1, ins.rs2 = uint32(p.From.Reg), obj.REG_NONE
|
2019-12-19 02:09:45 +11:00
|
|
|
ins.imm = 1
|
|
|
|
|
|
|
|
case ASNEZ:
|
|
|
|
// SNEZ rs, rd -> SLTU rs, x0, rd
|
|
|
|
ins.as = ASLTU
|
|
|
|
ins.rs1 = REG_ZERO
|
|
|
|
|
2021-09-09 23:47:14 +01:00
|
|
|
case AFABSS:
|
|
|
|
// FABSS rs, rd -> FSGNJXS rs, rs, rd
|
|
|
|
ins.as = AFSGNJXS
|
|
|
|
ins.rs1 = uint32(p.From.Reg)
|
|
|
|
|
|
|
|
case AFABSD:
|
|
|
|
// FABSD rs, rd -> FSGNJXD rs, rs, rd
|
|
|
|
ins.as = AFSGNJXD
|
|
|
|
ins.rs1 = uint32(p.From.Reg)
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
case AFNEGS:
|
|
|
|
// FNEGS rs, rd -> FSGNJNS rs, rs, rd
|
|
|
|
ins.as = AFSGNJNS
|
|
|
|
ins.rs1 = uint32(p.From.Reg)
|
|
|
|
|
|
|
|
case AFNEGD:
|
|
|
|
// FNEGD rs, rd -> FSGNJND rs, rs, rd
|
|
|
|
ins.as = AFSGNJND
|
|
|
|
ins.rs1 = uint32(p.From.Reg)
|
2023-01-12 19:37:18 +08:00
|
|
|
|
2024-03-19 21:07:46 +11:00
|
|
|
case AROL, AROLW, AROR, ARORW:
|
|
|
|
inss = instructionsForRotate(p, ins)
|
|
|
|
|
|
|
|
case ARORI:
|
|
|
|
if ins.imm < 0 || ins.imm > 63 {
|
|
|
|
p.Ctxt.Diag("%v: immediate out of range 0 to 63", p)
|
|
|
|
}
|
|
|
|
inss = instructionsForRotate(p, ins)
|
|
|
|
|
|
|
|
case ARORIW:
|
|
|
|
if ins.imm < 0 || ins.imm > 31 {
|
|
|
|
p.Ctxt.Diag("%v: immediate out of range 0 to 31", p)
|
|
|
|
}
|
cmd/compile,cmd/internal/obj: provide rotation pseudo-instructions for riscv64
Provide and use rotation pseudo-instructions for riscv64. The RISC-V bitmanip
extension adds support for hardware rotation instructions in the form of ROL,
ROLW, ROR, RORI, RORIW and RORW. These are easily implemented in the assembler
as pseudo-instructions for CPUs that do not support the bitmanip extension.
This approach provides a number of advantages, including reducing the rewrite
rules needed in the compiler, simplifying codegen tests and most importantly,
allowing these instructions to be used in assembly (for example, riscv64
optimised versions of SHA-256 and SHA-512). When bitmanip support is added,
these instruction sequences can simply be replaced with a single instruction
if permitted by the GORISCV64 profile.
Change-Id: Ia23402e1a82f211ac760690deb063386056ae1fa
Reviewed-on: https://go-review.googlesource.com/c/go/+/565015
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Reviewed-by: M Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Run-TryBot: Joel Sing <joel@sing.id.au>
2024-02-08 13:54:10 +11:00
|
|
|
inss = instructionsForRotate(p, ins)
|
|
|
|
|
2023-01-12 19:37:18 +08:00
|
|
|
case ASLLI, ASRLI, ASRAI:
|
|
|
|
if ins.imm < 0 || ins.imm > 63 {
|
2024-03-19 21:07:46 +11:00
|
|
|
p.Ctxt.Diag("%v: immediate out of range 0 to 63", p)
|
2023-01-12 19:37:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case ASLLIW, ASRLIW, ASRAIW:
|
|
|
|
if ins.imm < 0 || ins.imm > 31 {
|
2024-03-19 21:07:46 +11:00
|
|
|
p.Ctxt.Diag("%v: immediate out of range 0 to 31", p)
|
2023-01-12 19:37:18 +08:00
|
|
|
}
|
2024-01-31 10:37:35 +08:00
|
|
|
|
|
|
|
case ACLZ, ACLZW, ACTZ, ACTZW, ACPOP, ACPOPW, ASEXTB, ASEXTH, AZEXTH:
|
|
|
|
ins.rs1, ins.rs2 = uint32(p.From.Reg), obj.REG_NONE
|
|
|
|
|
|
|
|
case AORCB, AREV8:
|
|
|
|
ins.rd, ins.rs1, ins.rs2 = uint32(p.To.Reg), uint32(p.From.Reg), obj.REG_NONE
|
2024-09-10 01:04:51 +10:00
|
|
|
|
|
|
|
case AANDN, AORN:
|
|
|
|
if buildcfg.GORISCV64 >= 22 {
|
|
|
|
// ANDN and ORN instructions are supported natively.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// ANDN -> (AND (NOT x) y)
|
|
|
|
// ORN -> (OR (NOT x) y)
|
|
|
|
bitwiseOp, notReg := AAND, ins.rd
|
|
|
|
if ins.as == AORN {
|
|
|
|
bitwiseOp = AOR
|
|
|
|
}
|
|
|
|
if ins.rs1 == notReg {
|
|
|
|
notReg = REG_TMP
|
|
|
|
}
|
|
|
|
inss = []*instruction{
|
|
|
|
&instruction{as: AXORI, rs1: ins.rs2, rs2: obj.REG_NONE, rd: notReg, imm: -1},
|
|
|
|
&instruction{as: bitwiseOp, rs1: ins.rs1, rs2: notReg, rd: ins.rd},
|
|
|
|
}
|
|
|
|
|
|
|
|
case AXNOR:
|
|
|
|
if buildcfg.GORISCV64 >= 22 {
|
|
|
|
// XNOR instruction is supported natively.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// XNOR -> (NOT (XOR x y))
|
|
|
|
ins.as = AXOR
|
|
|
|
inss = append(inss, &instruction{as: AXORI, rs1: ins.rd, rs2: obj.REG_NONE, rd: ins.rd, imm: -1})
|
cmd/asm,cmd/internal/obj/riscv: implement vector configuration setting instructions
Implement vector configuration setting instructions (VSETVLI,
VSETIVLI, VSETL). These allow the vector length (vl) and vector
type (vtype) CSRs to be configured via a single instruction.
Unfortunately each instruction has its own dedicated encoding.
In the case of VSETVLI/VSETIVLI, the vector type is specified via
a series of special operands, which specify the selected element
width (E8, E16, E32, E64), the vector register group multiplier
(M1, M2, M4, M8, MF2, MF4, MF8), the vector tail policy (TU, TA)
and vector mask policy (MU, MA). Note that the order of these
special operands matches non-Go assemblers.
Partially based on work by Pengcheng Wang <wangpengcheng.pp@bytedance.com>.
Cq-Include-Trybots: luci.golang.try:gotip-linux-riscv64
Change-Id: I431f59c1e048a3e84754f0643a963da473a741fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/631936
Reviewed-by: Mark Ryan <markdryan@rivosinc.com>
Reviewed-by: Meng Zhuo <mengzhuo1203@gmail.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2024-11-24 12:39:20 +11:00
|
|
|
|
|
|
|
case AVSETVLI, AVSETIVLI:
|
|
|
|
ins.rs1, ins.rs2 = ins.rs2, obj.REG_NONE
|
|
|
|
vtype, err := EncodeVectorType(p.RestArgs[0].Offset, p.RestArgs[1].Offset, p.RestArgs[2].Offset, p.RestArgs[3].Offset)
|
|
|
|
if err != nil {
|
|
|
|
p.Ctxt.Diag("%v: %v", p, err)
|
|
|
|
}
|
|
|
|
ins.imm = int64(vtype)
|
|
|
|
if ins.as == AVSETIVLI {
|
|
|
|
if p.From.Type != obj.TYPE_CONST {
|
|
|
|
p.Ctxt.Diag("%v: expected immediate value", p)
|
|
|
|
}
|
|
|
|
ins.rs1 = uint32(p.From.Offset)
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
}
|
2023-08-28 02:08:56 +10:00
|
|
|
|
|
|
|
for _, ins := range inss {
|
|
|
|
ins.p = p
|
|
|
|
}
|
|
|
|
|
2019-12-19 02:09:45 +11:00
|
|
|
return inss
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
// assemble emits machine code.
|
|
|
|
// It is called at the very end of the assembly process.
|
|
|
|
func assemble(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
|
2020-01-17 13:54:30 -05:00
|
|
|
if ctxt.Retpoline {
|
|
|
|
ctxt.Diag("-spectre=ret not supported on riscv")
|
|
|
|
ctxt.Retpoline = false // don't keep printing
|
|
|
|
}
|
|
|
|
|
2023-08-28 02:08:56 +10:00
|
|
|
// If errors were encountered during preprocess/validation, proceeding
|
|
|
|
// and attempting to encode said instructions will only lead to panics.
|
|
|
|
if ctxt.Errors > 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-19 00:30:12 -04:00
|
|
|
for p := cursym.Func().Text; p != nil; p = p.Link {
|
2019-11-04 02:31:37 +11:00
|
|
|
switch p.As {
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
case AJAL:
|
2023-08-17 01:13:32 +10:00
|
|
|
if p.Mark&NEED_JAL_RELOC == NEED_JAL_RELOC {
|
2024-11-05 13:33:17 -05:00
|
|
|
cursym.AddRel(ctxt, obj.Reloc{
|
|
|
|
Type: objabi.R_RISCV_JAL,
|
|
|
|
Off: int32(p.Pc),
|
|
|
|
Siz: 4,
|
|
|
|
Sym: p.To.Sym,
|
|
|
|
Add: p.To.Offset,
|
|
|
|
})
|
cmd/internal/obj/riscv,cmd/link/internal/riscv64: add call trampolines for riscv64
CALL and JMP on riscv64 are currently implemented as an AUIPC+JALR pair. This means
that every call requires two instructions and makes use of the REG_TMP register,
even when the symbol would be directly reachable via a single JAL instruction.
Add support for call trampolines - CALL and JMP are now implemented as a single JAL
instruction, with the linker generating trampolines in the case where the symbol is
not reachable (more than +/-1MiB from the JAL instruction), is an unknown symbol or
does not yet have an address assigned. Each trampoline contains an AUIPC+JALR pair,
which the relocation is applied to.
Due to the limited reachability of the JAL instruction, combined with the way that
the Go linker currently assigns symbol addresses, there are cases where a call is to
a symbol that has no address currently assigned. In this situation we have to assume
that a trampoline will be required, however we can patch this up during relocation,
potentially calling directly instead. This means that we will end up with trampolines
that are unused. In the case of the Go binary, there are around 3,500 trampolines of
which approximately 2,300 are unused (around 9200 bytes of machine instructions).
Overall, this removes over 72,000 AUIPC instructions from the Go binary.
Change-Id: I2d9ecfb85dfc285c7729a3cd0b3a77b6f6c98be0
Reviewed-on: https://go-review.googlesource.com/c/go/+/345051
Trust: Joel Sing <joel@sing.id.au>
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-26 01:33:29 +10:00
|
|
|
}
|
|
|
|
case AJALR:
|
|
|
|
if p.To.Sym != nil {
|
|
|
|
ctxt.Diag("%v: unexpected AJALR with to symbol", p)
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
2021-03-19 14:09:59 +00:00
|
|
|
|
|
|
|
case AAUIPC, AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD:
|
|
|
|
var addr *obj.Addr
|
2019-11-04 02:31:37 +11:00
|
|
|
var rt objabi.RelocType
|
2023-08-17 01:13:32 +10:00
|
|
|
if p.Mark&NEED_CALL_RELOC == NEED_CALL_RELOC {
|
|
|
|
rt = objabi.R_RISCV_CALL
|
|
|
|
addr = &p.From
|
|
|
|
} else if p.Mark&NEED_PCREL_ITYPE_RELOC == NEED_PCREL_ITYPE_RELOC {
|
2019-11-04 02:31:37 +11:00
|
|
|
rt = objabi.R_RISCV_PCREL_ITYPE
|
2021-03-19 14:09:59 +00:00
|
|
|
addr = &p.From
|
2019-11-04 02:31:37 +11:00
|
|
|
} else if p.Mark&NEED_PCREL_STYPE_RELOC == NEED_PCREL_STYPE_RELOC {
|
|
|
|
rt = objabi.R_RISCV_PCREL_STYPE
|
2021-03-19 14:09:59 +00:00
|
|
|
addr = &p.To
|
2024-09-12 20:03:59 +08:00
|
|
|
} else if p.Mark&NEED_GOT_PCREL_ITYPE_RELOC == NEED_GOT_PCREL_ITYPE_RELOC {
|
|
|
|
rt = objabi.R_RISCV_GOT_PCREL_ITYPE
|
|
|
|
addr = &p.From
|
2019-11-04 02:31:37 +11:00
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
2021-03-19 14:09:59 +00:00
|
|
|
if p.As == AAUIPC {
|
|
|
|
if p.Link == nil {
|
|
|
|
ctxt.Diag("AUIPC needing PC-relative reloc missing following instruction")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
addr = &p.RestArgs[0].Addr
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
2020-02-06 04:47:52 +11:00
|
|
|
if addr.Sym == nil {
|
2021-03-19 14:09:59 +00:00
|
|
|
ctxt.Diag("PC-relative relocation missing symbol")
|
2019-11-04 02:31:37 +11:00
|
|
|
break
|
|
|
|
}
|
2020-05-19 18:55:10 +10:00
|
|
|
if addr.Sym.Type == objabi.STLSBSS {
|
2022-09-16 02:29:12 +10:00
|
|
|
if ctxt.Flag_shared {
|
|
|
|
rt = objabi.R_RISCV_TLS_IE
|
|
|
|
} else {
|
|
|
|
rt = objabi.R_RISCV_TLS_LE
|
2020-05-19 18:55:10 +10:00
|
|
|
}
|
|
|
|
}
|
2019-11-04 02:31:37 +11:00
|
|
|
|
2024-11-05 13:33:17 -05:00
|
|
|
cursym.AddRel(ctxt, obj.Reloc{
|
|
|
|
Type: rt,
|
|
|
|
Off: int32(p.Pc),
|
|
|
|
Siz: 8,
|
|
|
|
Sym: addr.Sym,
|
|
|
|
Add: addr.Offset,
|
|
|
|
})
|
2023-11-12 17:05:57 +08:00
|
|
|
|
|
|
|
case obj.APCALIGN:
|
|
|
|
alignedValue := p.From.Offset
|
|
|
|
v := pcAlignPadLength(p.Pc, alignedValue)
|
|
|
|
offset := p.Pc
|
|
|
|
for ; v >= 4; v -= 4 {
|
|
|
|
// NOP
|
|
|
|
cursym.WriteBytes(ctxt, offset, []byte{0x13, 0, 0, 0})
|
|
|
|
offset += 4
|
|
|
|
}
|
|
|
|
continue
|
2019-11-04 02:31:37 +11:00
|
|
|
}
|
|
|
|
|
2021-08-23 21:32:30 +10:00
|
|
|
offset := p.Pc
|
2019-12-19 02:09:45 +11:00
|
|
|
for _, ins := range instructionsForProg(p) {
|
2021-08-23 21:32:30 +10:00
|
|
|
if ic, err := ins.encode(); err == nil {
|
|
|
|
cursym.WriteInt(ctxt, offset, ins.length(), int64(ic))
|
|
|
|
offset += int64(ins.length())
|
2021-08-18 18:14:52 +00:00
|
|
|
}
|
|
|
|
if ins.usesRegTmp() {
|
|
|
|
p.Mark |= USES_REG_TMP
|
2019-12-19 02:09:45 +11:00
|
|
|
}
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
}
|
2020-04-15 13:23:52 +00:00
|
|
|
|
2020-07-19 00:30:12 -04:00
|
|
|
obj.MarkUnsafePoints(ctxt, cursym.Func().Text, newprog, isUnsafePoint, nil)
|
2020-04-15 13:23:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func isUnsafePoint(p *obj.Prog) bool {
|
2021-08-18 18:14:52 +00:00
|
|
|
return p.Mark&USES_REG_TMP == USES_REG_TMP || p.From.Reg == REG_TMP || p.To.Reg == REG_TMP || p.Reg == REG_TMP
|
2019-09-08 01:56:26 +10:00
|
|
|
}
|
|
|
|
|
2023-06-20 11:16:56 +08:00
|
|
|
func ParseSuffix(prog *obj.Prog, cond string) (err error) {
|
|
|
|
switch prog.As {
|
|
|
|
case AFCVTWS, AFCVTLS, AFCVTWUS, AFCVTLUS, AFCVTWD, AFCVTLD, AFCVTWUD, AFCVTLUD:
|
|
|
|
prog.Scond, err = rmSuffixEncode(strings.TrimPrefix(cond, "."))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-08 01:56:26 +10:00
|
|
|
var LinkRISCV64 = obj.LinkArch{
|
|
|
|
Arch: sys.ArchRISCV64,
|
|
|
|
Init: buildop,
|
|
|
|
Preprocess: preprocess,
|
|
|
|
Assemble: assemble,
|
|
|
|
Progedit: progedit,
|
|
|
|
UnaryDst: unaryDst,
|
|
|
|
DWARFRegisters: RISCV64DWARFRegisters,
|
|
|
|
}
|