2016-03-21 22:57:26 -07:00
|
|
|
// Copyright 2016 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package arm
|
|
|
|
|
|
|
|
|
|
import (
|
2016-06-17 10:34:06 -04:00
|
|
|
"fmt"
|
2021-04-15 23:05:49 -04:00
|
|
|
"internal/buildcfg"
|
2016-05-31 11:27:16 -04:00
|
|
|
"math"
|
2018-09-10 08:29:52 +00:00
|
|
|
"math/bits"
|
2016-05-31 11:27:16 -04:00
|
|
|
|
2020-11-19 20:49:23 -05:00
|
|
|
"cmd/compile/internal/base"
|
[dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated]
If we want to break up package gc at all, we will need to move
the compiler IR it defines into a separate package that can be
imported by packages that gc itself imports. This CL does that.
It also removes the TINT8 etc aliases so that all code is clear
about which package things are coming from.
This CL is automatically generated by the script below.
See the comments in the script for details about the changes.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# These names were never fully qualified
# when the types package was added.
# Do it now, to avoid confusion about where they live.
inline -rm \
Txxx \
TINT8 \
TUINT8 \
TINT16 \
TUINT16 \
TINT32 \
TUINT32 \
TINT64 \
TUINT64 \
TINT \
TUINT \
TUINTPTR \
TCOMPLEX64 \
TCOMPLEX128 \
TFLOAT32 \
TFLOAT64 \
TBOOL \
TPTR \
TFUNC \
TSLICE \
TARRAY \
TSTRUCT \
TCHAN \
TMAP \
TINTER \
TFORW \
TANY \
TSTRING \
TUNSAFEPTR \
TIDEAL \
TNIL \
TBLANK \
TFUNCARGS \
TCHANARGS \
NTYPE \
BADWIDTH
# esc.go and escape.go do not need to be split.
# Append esc.go onto the end of escape.go.
mv esc.go escape.go
# Pull out the type format installation from func Main,
# so it can be carried into package ir.
mv Main:/Sconv.=/-0,/TypeLinkSym/-1 InstallTypeFormats
# Names that need to be exported for use by code left in gc.
mv Isconst IsConst
mv asNode AsNode
mv asNodes AsNodes
mv asTypesNode AsTypesNode
mv basicnames BasicTypeNames
mv builtinpkg BuiltinPkg
mv consttype ConstType
mv dumplist DumpList
mv fdumplist FDumpList
mv fmtMode FmtMode
mv goopnames OpNames
mv inspect Inspect
mv inspectList InspectList
mv localpkg LocalPkg
mv nblank BlankNode
mv numImport NumImport
mv opprec OpPrec
mv origSym OrigSym
mv stmtwithinit StmtWithInit
mv dump DumpAny
mv fdump FDumpAny
mv nod Nod
mv nodl NodAt
mv newname NewName
mv newnamel NewNameAt
mv assertRepresents AssertValidTypeForConst
mv represents ValidTypeForConst
mv nodlit NewLiteral
# Types and fields that need to be exported for use by gc.
mv nowritebarrierrecCallSym SymAndPos
mv SymAndPos.lineno SymAndPos.Pos
mv SymAndPos.target SymAndPos.Sym
mv Func.lsym Func.LSym
mv Func.setWBPos Func.SetWBPos
mv Func.numReturns Func.NumReturns
mv Func.numDefers Func.NumDefers
mv Func.nwbrCalls Func.NWBRCalls
# initLSym is an algorithm left behind in gc,
# not an operation on Func itself.
mv Func.initLSym initLSym
mv nodeQueue NodeQueue
mv NodeQueue.empty NodeQueue.Empty
mv NodeQueue.popLeft NodeQueue.PopLeft
mv NodeQueue.pushRight NodeQueue.PushRight
# Many methods on Node are actually algorithms that
# would apply to any node implementation.
# Those become plain functions.
mv Node.funcname FuncName
mv Node.isBlank IsBlank
mv Node.isGoConst isGoConst
mv Node.isNil IsNil
mv Node.isParamHeapCopy isParamHeapCopy
mv Node.isParamStackCopy isParamStackCopy
mv Node.isSimpleName isSimpleName
mv Node.mayBeShared MayBeShared
mv Node.pkgFuncName PkgFuncName
mv Node.backingArrayPtrLen backingArrayPtrLen
mv Node.isterminating isTermNode
mv Node.labeledControl labeledControl
mv Nodes.isterminating isTermNodes
mv Nodes.sigerr fmtSignature
mv Node.MethodName methodExprName
mv Node.MethodFunc methodExprFunc
mv Node.IsMethod IsMethod
# Every node will need to implement RawCopy;
# Copy and SepCopy algorithms will use it.
mv Node.rawcopy Node.RawCopy
mv Node.copy Copy
mv Node.sepcopy SepCopy
# Extract Node.Format method body into func FmtNode,
# but leave method wrapper behind.
mv Node.Format:0,$ FmtNode
# Formatting helpers that will apply to all node implementations.
mv Node.Line Line
mv Node.exprfmt exprFmt
mv Node.jconv jconvFmt
mv Node.modeString modeString
mv Node.nconv nconvFmt
mv Node.nodedump nodeDumpFmt
mv Node.nodefmt nodeFmt
mv Node.stmtfmt stmtFmt
# Constant support needed for code moving to ir.
mv okforconst OKForConst
mv vconv FmtConst
mv int64Val Int64Val
mv float64Val Float64Val
mv Node.ValueInterface ConstValue
# Organize code into files.
mv LocalPkg BuiltinPkg ir.go
mv NumImport InstallTypeFormats Line fmt.go
mv syntax.go Nod NodAt NewNameAt Class Pxxx PragmaFlag Nointerface SymAndPos \
AsNode AsTypesNode BlankNode OrigSym \
Node.SliceBounds Node.SetSliceBounds Op.IsSlice3 \
IsConst Node.Int64Val Node.CanInt64 Node.Uint64Val Node.BoolVal Node.StringVal \
Node.RawCopy SepCopy Copy \
IsNil IsBlank IsMethod \
Node.Typ Node.StorageClass node.go
mv ConstType ConstValue Int64Val Float64Val AssertValidTypeForConst ValidTypeForConst NewLiteral idealType OKForConst val.go
# Move files to new ir package.
mv bitset.go class_string.go dump.go fmt.go \
ir.go node.go op_string.go val.go \
sizeof_test.go cmd/compile/internal/ir
'
: # fix mkbuiltin.go to generate the changes made to builtin.go during rf
sed -i '' '
s/\[T/[types.T/g
s/\*Node/*ir.Node/g
/internal\/types/c \
fmt.Fprintln(&b, `import (`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) \
fmt.Fprintln(&b, `)`)
' mkbuiltin.go
gofmt -w mkbuiltin.go
: # update cmd/dist to add internal/ir
cd ../../../dist
sed -i '' '/compile.internal.gc/a\
"cmd/compile/internal/ir",
' buildtool.go
gofmt -w buildtool.go
: # update cmd/compile TestFormats
cd ../..
go install std cmd
cd cmd/compile
go test -u || go test # first one updates but fails; second passes
Change-Id: I5f7caf6b20629b51970279e81231a3574d5b51db
Reviewed-on: https://go-review.googlesource.com/c/go/+/273008
Trust: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-19 21:09:22 -05:00
|
|
|
"cmd/compile/internal/ir"
|
2019-10-29 14:24:43 -04:00
|
|
|
"cmd/compile/internal/logopt"
|
2016-03-21 22:57:26 -07:00
|
|
|
"cmd/compile/internal/ssa"
|
2020-12-23 00:57:10 -05:00
|
|
|
"cmd/compile/internal/ssagen"
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
"cmd/compile/internal/types"
|
2016-03-21 22:57:26 -07:00
|
|
|
"cmd/internal/obj"
|
|
|
|
|
"cmd/internal/obj/arm"
|
|
|
|
|
)
|
|
|
|
|
|
2016-05-15 00:12:56 -04:00
|
|
|
// loadByType returns the load instruction of the given type.
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
func loadByType(t *types.Type) obj.As {
|
2016-05-15 00:12:56 -04:00
|
|
|
if t.IsFloat() {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch t.Size() {
|
2016-05-31 11:27:16 -04:00
|
|
|
case 4:
|
|
|
|
|
return arm.AMOVF
|
|
|
|
|
case 8:
|
|
|
|
|
return arm.AMOVD
|
|
|
|
|
}
|
2016-05-15 00:12:56 -04:00
|
|
|
} else {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch t.Size() {
|
2016-05-15 00:12:56 -04:00
|
|
|
case 1:
|
|
|
|
|
if t.IsSigned() {
|
|
|
|
|
return arm.AMOVB
|
|
|
|
|
} else {
|
|
|
|
|
return arm.AMOVBU
|
|
|
|
|
}
|
|
|
|
|
case 2:
|
|
|
|
|
if t.IsSigned() {
|
|
|
|
|
return arm.AMOVH
|
|
|
|
|
} else {
|
|
|
|
|
return arm.AMOVHU
|
|
|
|
|
}
|
|
|
|
|
case 4:
|
|
|
|
|
return arm.AMOVW
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
panic("bad load type")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// storeByType returns the store instruction of the given type.
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
func storeByType(t *types.Type) obj.As {
|
2016-05-15 00:12:56 -04:00
|
|
|
if t.IsFloat() {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch t.Size() {
|
2016-05-31 11:27:16 -04:00
|
|
|
case 4:
|
|
|
|
|
return arm.AMOVF
|
|
|
|
|
case 8:
|
|
|
|
|
return arm.AMOVD
|
|
|
|
|
}
|
2016-05-15 00:12:56 -04:00
|
|
|
} else {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch t.Size() {
|
2016-05-15 00:12:56 -04:00
|
|
|
case 1:
|
|
|
|
|
return arm.AMOVB
|
|
|
|
|
case 2:
|
|
|
|
|
return arm.AMOVH
|
|
|
|
|
case 4:
|
|
|
|
|
return arm.AMOVW
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
panic("bad store type")
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-11 19:22:35 +08:00
|
|
|
// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands.
|
2016-06-17 10:34:06 -04:00
|
|
|
type shift int64
|
|
|
|
|
|
|
|
|
|
// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
|
|
|
|
|
func (v shift) String() string {
|
|
|
|
|
op := "<<>>->@>"[((v>>5)&3)<<1:]
|
|
|
|
|
if v&(1<<4) != 0 {
|
|
|
|
|
// register shift
|
|
|
|
|
return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
|
|
|
|
|
} else {
|
|
|
|
|
// constant shift
|
|
|
|
|
return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-11 19:22:35 +08:00
|
|
|
// makeshift encodes a register shifted by a constant.
|
2021-09-22 13:13:08 -04:00
|
|
|
func makeshift(v *ssa.Value, reg int16, typ int64, s int64) shift {
|
|
|
|
|
if s < 0 || s >= 32 {
|
|
|
|
|
v.Fatalf("shift out of range: %d", s)
|
|
|
|
|
}
|
2016-06-17 10:34:06 -04:00
|
|
|
return shift(int64(reg&0xf) | typ | (s&31)<<7)
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-11 19:22:35 +08:00
|
|
|
// genshift generates a Prog for r = r0 op (r1 shifted by n).
|
2021-09-22 13:13:08 -04:00
|
|
|
func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(as)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_SHIFT
|
2021-09-22 13:13:08 -04:00
|
|
|
p.From.Offset = int64(makeshift(v, r1, typ, n))
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Reg = r0
|
|
|
|
|
if r != 0 {
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
|
|
|
|
}
|
|
|
|
|
return p
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-11 19:22:35 +08:00
|
|
|
// makeregshift encodes a register shifted by a register.
|
2016-06-17 10:34:06 -04:00
|
|
|
func makeregshift(r1 int16, typ int64, r2 int16) shift {
|
|
|
|
|
return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-11 19:22:35 +08:00
|
|
|
// genregshift generates a Prog for r = r0 op (r1 shifted by r2).
|
2020-12-23 00:57:10 -05:00
|
|
|
func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(as)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_SHIFT
|
|
|
|
|
p.From.Offset = int64(makeregshift(r1, typ, r2))
|
|
|
|
|
p.Reg = r0
|
|
|
|
|
if r != 0 {
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
|
|
|
|
}
|
|
|
|
|
return p
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-10 08:29:52 +00:00
|
|
|
// find a (lsb, width) pair for BFC
|
|
|
|
|
// lsb must be in [0, 31], width must be in [1, 32 - lsb]
|
|
|
|
|
// return (0xffffffff, 0) if v is not a binary like 0...01...10...0
|
|
|
|
|
func getBFC(v uint32) (uint32, uint32) {
|
|
|
|
|
var m, l uint32
|
|
|
|
|
// BFC is not applicable with zero
|
|
|
|
|
if v == 0 {
|
|
|
|
|
return 0xffffffff, 0
|
|
|
|
|
}
|
|
|
|
|
// find the lowest set bit, for example l=2 for 0x3ffffffc
|
|
|
|
|
l = uint32(bits.TrailingZeros32(v))
|
|
|
|
|
// m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
|
|
|
|
|
m = 32 - uint32(bits.LeadingZeros32(v))
|
|
|
|
|
// check if v is a binary like 0...01...10...0
|
|
|
|
|
if (1<<m)-(1<<l) == v {
|
|
|
|
|
// it must be m > l for non-zero v
|
|
|
|
|
return l, m - l
|
|
|
|
|
}
|
|
|
|
|
// invalid
|
|
|
|
|
return 0xffffffff, 0
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-23 00:57:10 -05:00
|
|
|
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
2016-03-21 22:57:26 -07:00
|
|
|
switch v.Op {
|
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
|
|
|
case ssa.OpCopy, ssa.OpARMMOVWreg:
|
2016-05-13 15:31:14 -04:00
|
|
|
if v.Type.IsMemory() {
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-16 09:36:00 -07:00
|
|
|
x := v.Args[0].Reg()
|
|
|
|
|
y := v.Reg()
|
2016-05-13 15:31:14 -04:00
|
|
|
if x == y {
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-05-31 11:27:16 -04:00
|
|
|
as := arm.AMOVW
|
|
|
|
|
if v.Type.IsFloat() {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch v.Type.Size() {
|
2016-05-31 11:27:16 -04:00
|
|
|
case 4:
|
|
|
|
|
as = arm.AMOVF
|
|
|
|
|
case 8:
|
|
|
|
|
as = arm.AMOVD
|
|
|
|
|
default:
|
|
|
|
|
panic("bad float size")
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(as)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
|
p.From.Reg = x
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = y
|
2016-07-15 14:07:15 -04:00
|
|
|
case ssa.OpARMMOVWnop:
|
|
|
|
|
// nothing to do
|
2016-03-21 22:57:26 -07:00
|
|
|
case ssa.OpLoadReg:
|
2016-05-15 00:12:56 -04:00
|
|
|
if v.Type.IsFlags() {
|
2016-09-14 10:01:05 -07:00
|
|
|
v.Fatalf("load flags not implemented: %v", v.LongString())
|
2016-05-15 00:12:56 -04:00
|
|
|
return
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(loadByType(v.Type))
|
2020-12-23 00:57:10 -05:00
|
|
|
ssagen.AddrAuto(&p.From, v.Args[0])
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-03-21 22:57:26 -07:00
|
|
|
case ssa.OpStoreReg:
|
2016-05-15 00:12:56 -04:00
|
|
|
if v.Type.IsFlags() {
|
2016-09-14 10:01:05 -07:00
|
|
|
v.Fatalf("store flags not implemented: %v", v.LongString())
|
2016-05-15 00:12:56 -04:00
|
|
|
return
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(storeByType(v.Type))
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2020-12-23 00:57:10 -05:00
|
|
|
ssagen.AddrAuto(&p.To, v)
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMADD,
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
ssa.OpARMADC,
|
2016-05-06 10:13:31 -07:00
|
|
|
ssa.OpARMSUB,
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
ssa.OpARMSBC,
|
2016-05-06 10:13:31 -07:00
|
|
|
ssa.OpARMRSB,
|
|
|
|
|
ssa.OpARMAND,
|
|
|
|
|
ssa.OpARMOR,
|
|
|
|
|
ssa.OpARMXOR,
|
2016-05-13 15:22:56 -04:00
|
|
|
ssa.OpARMBIC,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMUL,
|
|
|
|
|
ssa.OpARMADDF,
|
|
|
|
|
ssa.OpARMADDD,
|
|
|
|
|
ssa.OpARMSUBF,
|
|
|
|
|
ssa.OpARMSUBD,
|
2019-04-12 14:03:39 +02:00
|
|
|
ssa.OpARMSLL,
|
|
|
|
|
ssa.OpARMSRL,
|
|
|
|
|
ssa.OpARMSRA,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMULF,
|
|
|
|
|
ssa.OpARMMULD,
|
2017-09-02 08:14:08 +00:00
|
|
|
ssa.OpARMNMULF,
|
|
|
|
|
ssa.OpARMNMULD,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMDIVF,
|
|
|
|
|
ssa.OpARMDIVD:
|
2016-09-16 09:36:00 -07:00
|
|
|
r := v.Reg()
|
|
|
|
|
r1 := v.Args[0].Reg()
|
|
|
|
|
r2 := v.Args[1].Reg()
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-05-06 10:13:31 -07:00
|
|
|
p.From.Reg = r2
|
|
|
|
|
p.Reg = r1
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
2019-08-02 02:20:38 +00:00
|
|
|
case ssa.OpARMSRR:
|
|
|
|
|
genregshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR)
|
2018-10-15 03:14:57 -04:00
|
|
|
case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD, ssa.OpARMFMULAD:
|
2017-09-14 06:52:51 +00:00
|
|
|
r := v.Reg()
|
|
|
|
|
r0 := v.Args[0].Reg()
|
|
|
|
|
r1 := v.Args[1].Reg()
|
|
|
|
|
r2 := v.Args[2].Reg()
|
|
|
|
|
if r != r0 {
|
|
|
|
|
v.Fatalf("result and addend are not in the same register: %v", v.LongString())
|
|
|
|
|
}
|
|
|
|
|
p := s.Prog(v.Op.Asm())
|
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
|
p.From.Reg = r2
|
|
|
|
|
p.Reg = r1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
case ssa.OpARMADDS,
|
|
|
|
|
ssa.OpARMSUBS:
|
2016-09-16 09:36:00 -07:00
|
|
|
r := v.Reg0()
|
|
|
|
|
r1 := v.Args[0].Reg()
|
|
|
|
|
r2 := v.Args[1].Reg()
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
|
p.From.Reg = r2
|
|
|
|
|
p.Reg = r1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMSRAcond:
|
2016-05-13 15:22:56 -04:00
|
|
|
// ARM shift instructions uses only the low-order byte of the shift amount
|
|
|
|
|
// generate conditional instructions to deal with large shifts
|
2016-06-17 10:34:06 -04:00
|
|
|
// flag is already set
|
2016-05-13 15:22:56 -04:00
|
|
|
// SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit
|
|
|
|
|
// SRA.LO Rarg1, Rarg0, Rdst
|
2016-09-16 09:36:00 -07:00
|
|
|
r := v.Reg()
|
|
|
|
|
r1 := v.Args[0].Reg()
|
|
|
|
|
r2 := v.Args[1].Reg()
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.ASRA)
|
2016-05-13 15:22:56 -04:00
|
|
|
p.Scond = arm.C_SCOND_HS
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = 31
|
|
|
|
|
p.Reg = r1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
2017-03-20 08:01:28 -07:00
|
|
|
p = s.Prog(arm.ASRA)
|
2016-05-13 15:22:56 -04:00
|
|
|
p.Scond = arm.C_SCOND_LO
|
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
|
p.From.Reg = r2
|
|
|
|
|
p.Reg = r1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
2017-09-20 08:48:34 +00:00
|
|
|
case ssa.OpARMBFX, ssa.OpARMBFXU:
|
|
|
|
|
p := s.Prog(v.Op.Asm())
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt >> 8
|
2023-04-12 11:23:13 +08:00
|
|
|
p.AddRestSourceConst(v.AuxInt & 0xff)
|
2017-09-20 08:48:34 +00:00
|
|
|
p.Reg = v.Args[0].Reg()
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = v.Reg()
|
2018-09-10 08:29:52 +00:00
|
|
|
case ssa.OpARMANDconst, ssa.OpARMBICconst:
|
|
|
|
|
// try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
|
|
|
|
|
// BFC is only available on ARMv7, and its result and source are in the same register
|
2023-07-29 18:25:42 -07:00
|
|
|
if buildcfg.GOARM.Version == 7 && v.Reg() == v.Args[0].Reg() {
|
2018-09-10 08:29:52 +00:00
|
|
|
var val uint32
|
|
|
|
|
if v.Op == ssa.OpARMANDconst {
|
|
|
|
|
val = ^uint32(v.AuxInt)
|
|
|
|
|
} else { // BICconst
|
|
|
|
|
val = uint32(v.AuxInt)
|
|
|
|
|
}
|
|
|
|
|
lsb, width := getBFC(val)
|
|
|
|
|
// omit BFC for ARM's imm12
|
|
|
|
|
if 8 < width && width < 24 {
|
|
|
|
|
p := s.Prog(arm.ABFC)
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = int64(width)
|
2023-04-12 11:23:13 +08:00
|
|
|
p.AddRestSourceConst(int64(lsb))
|
2018-09-10 08:29:52 +00:00
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = v.Reg()
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// fall back to ordinary form
|
|
|
|
|
fallthrough
|
2016-06-06 22:36:45 -04:00
|
|
|
case ssa.OpARMADDconst,
|
2016-06-13 16:49:09 -04:00
|
|
|
ssa.OpARMADCconst,
|
2016-06-06 22:36:45 -04:00
|
|
|
ssa.OpARMSUBconst,
|
2016-06-13 16:49:09 -04:00
|
|
|
ssa.OpARMSBCconst,
|
2016-05-06 10:13:31 -07:00
|
|
|
ssa.OpARMRSBconst,
|
2016-06-13 16:49:09 -04:00
|
|
|
ssa.OpARMRSCconst,
|
2016-05-06 10:13:31 -07:00
|
|
|
ssa.OpARMORconst,
|
|
|
|
|
ssa.OpARMXORconst,
|
2016-05-13 15:22:56 -04:00
|
|
|
ssa.OpARMSLLconst,
|
|
|
|
|
ssa.OpARMSRLconst,
|
|
|
|
|
ssa.OpARMSRAconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
2016-09-16 09:36:00 -07:00
|
|
|
p.Reg = v.Args[0].Reg()
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-13 16:49:09 -04:00
|
|
|
case ssa.OpARMADDSconst,
|
|
|
|
|
ssa.OpARMSUBSconst,
|
|
|
|
|
ssa.OpARMRSBSconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-06-13 16:49:09 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
2016-09-16 09:36:00 -07:00
|
|
|
p.Reg = v.Args[0].Reg()
|
2016-06-13 16:49:09 -04:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg0()
|
2016-05-25 23:17:42 -04:00
|
|
|
case ssa.OpARMSRRconst:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDshiftLL,
|
|
|
|
|
ssa.OpARMADCshiftLL,
|
|
|
|
|
ssa.OpARMSUBshiftLL,
|
|
|
|
|
ssa.OpARMSBCshiftLL,
|
|
|
|
|
ssa.OpARMRSBshiftLL,
|
|
|
|
|
ssa.OpARMRSCshiftLL,
|
|
|
|
|
ssa.OpARMANDshiftLL,
|
|
|
|
|
ssa.OpARMORshiftLL,
|
|
|
|
|
ssa.OpARMXORshiftLL,
|
|
|
|
|
ssa.OpARMBICshiftLL:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftLL,
|
|
|
|
|
ssa.OpARMSUBSshiftLL,
|
|
|
|
|
ssa.OpARMRSBSshiftLL:
|
2021-09-22 13:13:08 -04:00
|
|
|
p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
case ssa.OpARMADDshiftRL,
|
|
|
|
|
ssa.OpARMADCshiftRL,
|
|
|
|
|
ssa.OpARMSUBshiftRL,
|
|
|
|
|
ssa.OpARMSBCshiftRL,
|
|
|
|
|
ssa.OpARMRSBshiftRL,
|
|
|
|
|
ssa.OpARMRSCshiftRL,
|
|
|
|
|
ssa.OpARMANDshiftRL,
|
|
|
|
|
ssa.OpARMORshiftRL,
|
|
|
|
|
ssa.OpARMXORshiftRL,
|
|
|
|
|
ssa.OpARMBICshiftRL:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftRL,
|
|
|
|
|
ssa.OpARMSUBSshiftRL,
|
|
|
|
|
ssa.OpARMRSBSshiftRL:
|
2021-09-22 13:13:08 -04:00
|
|
|
p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
case ssa.OpARMADDshiftRA,
|
|
|
|
|
ssa.OpARMADCshiftRA,
|
|
|
|
|
ssa.OpARMSUBshiftRA,
|
|
|
|
|
ssa.OpARMSBCshiftRA,
|
|
|
|
|
ssa.OpARMRSBshiftRA,
|
|
|
|
|
ssa.OpARMRSCshiftRA,
|
|
|
|
|
ssa.OpARMANDshiftRA,
|
|
|
|
|
ssa.OpARMORshiftRA,
|
|
|
|
|
ssa.OpARMXORshiftRA,
|
|
|
|
|
ssa.OpARMBICshiftRA:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftRA,
|
|
|
|
|
ssa.OpARMSUBSshiftRA,
|
|
|
|
|
ssa.OpARMRSBSshiftRA:
|
2021-09-22 13:13:08 -04:00
|
|
|
p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
2016-08-30 09:12:22 -04:00
|
|
|
case ssa.OpARMXORshiftRR:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftLL:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftRL:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftRA:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftLLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftRLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftRAreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDshiftLLreg,
|
|
|
|
|
ssa.OpARMADCshiftLLreg,
|
|
|
|
|
ssa.OpARMSUBshiftLLreg,
|
|
|
|
|
ssa.OpARMSBCshiftLLreg,
|
|
|
|
|
ssa.OpARMRSBshiftLLreg,
|
|
|
|
|
ssa.OpARMRSCshiftLLreg,
|
|
|
|
|
ssa.OpARMANDshiftLLreg,
|
|
|
|
|
ssa.OpARMORshiftLLreg,
|
|
|
|
|
ssa.OpARMXORshiftLLreg,
|
|
|
|
|
ssa.OpARMBICshiftLLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftLLreg,
|
|
|
|
|
ssa.OpARMSUBSshiftLLreg,
|
|
|
|
|
ssa.OpARMRSBSshiftLLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
case ssa.OpARMADDshiftRLreg,
|
|
|
|
|
ssa.OpARMADCshiftRLreg,
|
|
|
|
|
ssa.OpARMSUBshiftRLreg,
|
|
|
|
|
ssa.OpARMSBCshiftRLreg,
|
|
|
|
|
ssa.OpARMRSBshiftRLreg,
|
|
|
|
|
ssa.OpARMRSCshiftRLreg,
|
|
|
|
|
ssa.OpARMANDshiftRLreg,
|
|
|
|
|
ssa.OpARMORshiftRLreg,
|
|
|
|
|
ssa.OpARMXORshiftRLreg,
|
|
|
|
|
ssa.OpARMBICshiftRLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftRLreg,
|
|
|
|
|
ssa.OpARMSUBSshiftRLreg,
|
|
|
|
|
ssa.OpARMRSBSshiftRLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
case ssa.OpARMADDshiftRAreg,
|
|
|
|
|
ssa.OpARMADCshiftRAreg,
|
|
|
|
|
ssa.OpARMSUBshiftRAreg,
|
|
|
|
|
ssa.OpARMSBCshiftRAreg,
|
|
|
|
|
ssa.OpARMRSBshiftRAreg,
|
|
|
|
|
ssa.OpARMRSCshiftRAreg,
|
|
|
|
|
ssa.OpARMANDshiftRAreg,
|
|
|
|
|
ssa.OpARMORshiftRAreg,
|
|
|
|
|
ssa.OpARMXORshiftRAreg,
|
|
|
|
|
ssa.OpARMBICshiftRAreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftRAreg,
|
|
|
|
|
ssa.OpARMSUBSshiftRAreg,
|
|
|
|
|
ssa.OpARMRSBSshiftRAreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
2016-05-13 15:22:56 -04:00
|
|
|
case ssa.OpARMHMUL,
|
|
|
|
|
ssa.OpARMHMULU:
|
|
|
|
|
// 32-bit high multiplication
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-13 15:22:56 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
|
|
|
|
p.Reg = v.Args[1].Reg()
|
2016-05-13 15:22:56 -04:00
|
|
|
p.To.Type = obj.TYPE_REGREG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-13 15:22:56 -04:00
|
|
|
p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
case ssa.OpARMMULLU:
|
2016-07-13 16:15:54 -07:00
|
|
|
// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
|
|
|
|
p.Reg = v.Args[1].Reg()
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.To.Type = obj.TYPE_REGREG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg0() // high 32-bit
|
|
|
|
|
p.To.Offset = int64(v.Reg1()) // low 32-bit
|
2017-08-25 12:07:01 +00:00
|
|
|
case ssa.OpARMMULA, ssa.OpARMMULS:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
|
|
|
|
p.Reg = v.Args[1].Reg()
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.To.Type = obj.TYPE_REGREG2
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg() // result
|
|
|
|
|
p.To.Offset = int64(v.Args[2].Reg()) // addend
|
2016-03-21 22:57:26 -07:00
|
|
|
case ssa.OpARMMOVWconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
2016-03-29 16:39:53 -07:00
|
|
|
p.From.Offset = v.AuxInt
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-31 11:27:16 -04:00
|
|
|
case ssa.OpARMMOVFconst,
|
|
|
|
|
ssa.OpARMMOVDconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-31 11:27:16 -04:00
|
|
|
p.From.Type = obj.TYPE_FCONST
|
|
|
|
|
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMCMP,
|
|
|
|
|
ssa.OpARMCMN,
|
|
|
|
|
ssa.OpARMTST,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMTEQ,
|
|
|
|
|
ssa.OpARMCMPF,
|
|
|
|
|
ssa.OpARMCMPD:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-04-18 12:21:51 -04:00
|
|
|
// Special layout in ARM assembly
|
|
|
|
|
// Comparing to x86, the operands of ARM's CMP are reversed.
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[1].Reg()
|
|
|
|
|
p.Reg = v.Args[0].Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMCMPconst,
|
|
|
|
|
ssa.OpARMCMNconst,
|
|
|
|
|
ssa.OpARMTSTconst,
|
|
|
|
|
ssa.OpARMTEQconst:
|
|
|
|
|
// Special layout in ARM assembly
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-06 10:13:31 -07:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
2016-09-16 09:36:00 -07:00
|
|
|
p.Reg = v.Args[0].Reg()
|
2016-07-06 10:04:45 -04:00
|
|
|
case ssa.OpARMCMPF0,
|
|
|
|
|
ssa.OpARMCMPD0:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-07-06 10:04:45 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
|
2016-06-06 22:36:45 -04:00
|
|
|
case ssa.OpARMMOVWaddr:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-06-15 15:56:52 -07:00
|
|
|
p.From.Type = obj.TYPE_ADDR
|
2017-07-18 08:35:00 -04:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-06-15 15:56:52 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-15 15:56:52 -07:00
|
|
|
|
|
|
|
|
var wantreg string
|
2016-06-06 22:36:45 -04:00
|
|
|
// MOVW $sym+off(base), R
|
|
|
|
|
// the assembler expands it as the following:
|
|
|
|
|
// - base is SP: add constant offset to SP (R13)
|
|
|
|
|
// when constant is large, tmp register (R11) may be used
|
|
|
|
|
// - base is SB: load external address from constant pool (use relocation)
|
|
|
|
|
switch v.Aux.(type) {
|
|
|
|
|
default:
|
|
|
|
|
v.Fatalf("aux is of unknown type %T", v.Aux)
|
2017-09-18 14:53:56 -07:00
|
|
|
case *obj.LSym:
|
2016-06-15 15:56:52 -07:00
|
|
|
wantreg = "SB"
|
2020-12-23 00:57:10 -05:00
|
|
|
ssagen.AddAux(&p.From, v)
|
2020-12-06 18:13:43 -08:00
|
|
|
case *ir.Name:
|
2016-06-15 15:56:52 -07:00
|
|
|
wantreg = "SP"
|
2020-12-23 00:57:10 -05:00
|
|
|
ssagen.AddAux(&p.From, v)
|
2016-06-15 15:56:52 -07:00
|
|
|
case nil:
|
|
|
|
|
// No sym, just MOVW $off(SP), R
|
|
|
|
|
wantreg = "SP"
|
|
|
|
|
p.From.Offset = v.AuxInt
|
2016-06-06 22:36:45 -04:00
|
|
|
}
|
2016-09-16 09:36:00 -07:00
|
|
|
if reg := v.Args[0].RegName(); reg != wantreg {
|
|
|
|
|
v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
|
2016-06-15 15:56:52 -07:00
|
|
|
}
|
|
|
|
|
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMMOVBload,
|
|
|
|
|
ssa.OpARMMOVBUload,
|
|
|
|
|
ssa.OpARMMOVHload,
|
|
|
|
|
ssa.OpARMMOVHUload,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMOVWload,
|
|
|
|
|
ssa.OpARMMOVFload,
|
|
|
|
|
ssa.OpARMMOVDload:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_MEM
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2020-12-23 00:57:10 -05:00
|
|
|
ssagen.AddAux(&p.From, v)
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMMOVBstore,
|
|
|
|
|
ssa.OpARMMOVHstore,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMOVWstore,
|
|
|
|
|
ssa.OpARMMOVFstore,
|
|
|
|
|
ssa.OpARMMOVDstore:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[1].Reg()
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Args[0].Reg()
|
2020-12-23 00:57:10 -05:00
|
|
|
ssagen.AddAux(&p.To, v)
|
2017-08-24 10:51:34 +00:00
|
|
|
case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
|
2016-06-17 10:34:06 -04:00
|
|
|
// this is just shift 0 bits
|
|
|
|
|
fallthrough
|
|
|
|
|
case ssa.OpARMMOVWloadshiftLL:
|
2021-09-22 13:13:08 -04:00
|
|
|
p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMOVWloadshiftRL:
|
2021-09-22 13:13:08 -04:00
|
|
|
p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMOVWloadshiftRA:
|
2021-09-22 13:13:08 -04:00
|
|
|
p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2017-08-24 10:51:34 +00:00
|
|
|
case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
|
2016-06-17 10:34:06 -04:00
|
|
|
// this is just shift 0 bits
|
|
|
|
|
fallthrough
|
|
|
|
|
case ssa.OpARMMOVWstoreshiftLL:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[2].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
p.To.Type = obj.TYPE_SHIFT
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Args[0].Reg()
|
2021-09-22 13:13:08 -04:00
|
|
|
p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMOVWstoreshiftRL:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[2].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
p.To.Type = obj.TYPE_SHIFT
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Args[0].Reg()
|
2021-09-22 13:13:08 -04:00
|
|
|
p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMOVWstoreshiftRA:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[2].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
p.To.Type = obj.TYPE_SHIFT
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Args[0].Reg()
|
2021-09-22 13:13:08 -04:00
|
|
|
p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMMOVBreg,
|
|
|
|
|
ssa.OpARMMOVBUreg,
|
|
|
|
|
ssa.OpARMMOVHreg,
|
2016-06-17 10:34:06 -04:00
|
|
|
ssa.OpARMMOVHUreg:
|
|
|
|
|
a := v.Args[0]
|
2016-07-15 14:07:15 -04:00
|
|
|
for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
|
2016-06-17 10:34:06 -04:00
|
|
|
a = a.Args[0]
|
|
|
|
|
}
|
|
|
|
|
if a.Op == ssa.OpLoadReg {
|
|
|
|
|
t := a.Type
|
|
|
|
|
switch {
|
2017-04-28 00:19:49 +00:00
|
|
|
case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
|
|
|
|
|
v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
|
|
|
|
|
v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
|
|
|
|
|
v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
|
2016-06-17 10:34:06 -04:00
|
|
|
// arg is a proper-typed load, already zero/sign-extended, don't extend again
|
2016-09-16 09:36:00 -07:00
|
|
|
if v.Reg() == v.Args[0].Reg() {
|
2016-06-17 10:34:06 -04:00
|
|
|
return
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
return
|
|
|
|
|
default:
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-07-29 18:25:42 -07:00
|
|
|
if buildcfg.GOARM.Version >= 6 {
|
2017-10-20 03:50:15 +00:00
|
|
|
// generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
|
2021-09-22 13:13:08 -04:00
|
|
|
genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
|
2017-10-20 03:50:15 +00:00
|
|
|
return
|
|
|
|
|
}
|
2016-06-17 10:34:06 -04:00
|
|
|
fallthrough
|
|
|
|
|
case ssa.OpARMMVN,
|
2016-08-30 09:12:22 -04:00
|
|
|
ssa.OpARMCLZ,
|
2017-01-24 09:48:58 +00:00
|
|
|
ssa.OpARMREV,
|
2019-02-11 09:40:02 +00:00
|
|
|
ssa.OpARMREV16,
|
2017-01-24 09:48:58 +00:00
|
|
|
ssa.OpARMRBIT,
|
2020-12-07 19:15:15 +08:00
|
|
|
ssa.OpARMSQRTF,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMSQRTD,
|
2016-06-29 15:20:48 -04:00
|
|
|
ssa.OpARMNEGF,
|
|
|
|
|
ssa.OpARMNEGD,
|
2019-08-02 02:41:59 +00:00
|
|
|
ssa.OpARMABSD,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMOVWF,
|
|
|
|
|
ssa.OpARMMOVWD,
|
|
|
|
|
ssa.OpARMMOVFW,
|
|
|
|
|
ssa.OpARMMOVDW,
|
|
|
|
|
ssa.OpARMMOVFD,
|
|
|
|
|
ssa.OpARMMOVDF:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-31 11:27:16 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-05-31 11:27:16 -04:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-31 11:27:16 -04:00
|
|
|
case ssa.OpARMMOVWUF,
|
|
|
|
|
ssa.OpARMMOVWUD,
|
|
|
|
|
ssa.OpARMMOVFWU,
|
|
|
|
|
ssa.OpARMMOVDWU:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-31 11:27:16 -04:00
|
|
|
p.Scond = arm.C_UBIT
|
2016-05-06 10:13:31 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMCMOVWHSconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SCOND_HS
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMCMOVWLSconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SCOND_LS
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2017-04-20 07:50:17 -07:00
|
|
|
case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
|
|
|
|
|
s.Call(v)
|
cmd/compile: restore tail call for method wrappers
For certain type of method wrappers we used to generate a tail
call. That was disabled in CL 307234 when register ABI is used,
because with the current IR it was difficult to generate a tail
call with the arguments in the right places. The problem was that
the IR does not contain a CALL-like node with arguments; instead,
it contains an OAS node that adjusts the receiver, than an
OTAILCALL node that just contains the target, but no argument
(with the assumption that the OAS node will put the adjusted
receiver in the right place). With register ABI, putting
arguments in registers are done in SSA. The assignment (OAS)
doesn't put the receiver in register.
This CL changes the IR of a tail call to take an actual OCALL
node. Specifically, a tail call is represented as
OTAILCALL (OCALL target args...)
This way, the call target and args are connected through the OCALL
node. So the call can be analyzed in SSA and the args can be passed
in the right places.
(Alternatively, we could have OTAILCALL node directly take the
target and the args, without the OCALL node. Using an OCALL node is
convenient as there are existing code that processes OCALL nodes
which do not need to be changed. Also, a tail call is similar to
ORETURN (OCALL target args...), except it doesn't preserve the
frame. I did the former but I'm open to change.)
The SSA representation is similar. Previously, the IR lowers to
a Store the receiver then a BlockRetJmp which jumps to the target
(without putting the arg in register). Now we use a TailCall op,
which takes the target and the args. The call expansion pass and
the register allocator handles TailCall pretty much like a
StaticCall, and it will do the right ABI analysis and put the args
in the right places. (Args other than the receiver are already in
the right places. For register args it generates no code for them.
For stack args currently it generates a self copy. I'll work on
optimize that out.) BlockRetJmp is still used, signaling it is a
tail call. The actual call is made in the TailCall op so
BlockRetJmp generates no code (we could use BlockExit if we like).
This slightly reduces binary size:
old new
cmd/go 14003088 13953936
cmd/link 6275552 6271456
Change-Id: I2d16d8d419fe1f17554916d317427383e17e27f0
Reviewed-on: https://go-review.googlesource.com/c/go/+/350145
Trust: Cherry Mui <cherryyz@google.com>
Run-TryBot: Cherry Mui <cherryyz@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: David Chase <drchase@google.com>
2021-09-10 22:05:55 -04:00
|
|
|
case ssa.OpARMCALLtail:
|
|
|
|
|
s.TailCall(v)
|
2017-04-20 07:50:17 -07:00
|
|
|
case ssa.OpARMCALLudiv:
|
2017-04-21 06:50:02 -04:00
|
|
|
p := s.Prog(obj.ACALL)
|
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
[dev.regabi] cmd/compile: group known symbols, packages, names [generated]
There are a handful of pre-computed magic symbols known by
package gc, and we need a place to store them.
If we keep them together, the need for type *ir.Name means that
package ir is the lowest package in the import hierarchy that they
can go in. And package ir needs gopkg for methodSymSuffix
(in a later CL), so they can't go any higher either, at least not all together.
So package ir it is.
Rather than dump them all into the top-level package ir
namespace, however, we introduce global structs, Syms, Pkgs, and Names,
and make the known symbols, packages, and names fields of those.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
add go.go:$ \
// Names holds known names. \
var Names struct{} \
\
// Syms holds known symbols. \
var Syms struct {} \
\
// Pkgs holds known packages. \
var Pkgs struct {} \
mv staticuint64s Names.Staticuint64s
mv zerobase Names.Zerobase
mv assertE2I Syms.AssertE2I
mv assertE2I2 Syms.AssertE2I2
mv assertI2I Syms.AssertI2I
mv assertI2I2 Syms.AssertI2I2
mv deferproc Syms.Deferproc
mv deferprocStack Syms.DeferprocStack
mv Deferreturn Syms.Deferreturn
mv Duffcopy Syms.Duffcopy
mv Duffzero Syms.Duffzero
mv gcWriteBarrier Syms.GCWriteBarrier
mv goschedguarded Syms.Goschedguarded
mv growslice Syms.Growslice
mv msanread Syms.Msanread
mv msanwrite Syms.Msanwrite
mv msanmove Syms.Msanmove
mv newobject Syms.Newobject
mv newproc Syms.Newproc
mv panicdivide Syms.Panicdivide
mv panicshift Syms.Panicshift
mv panicdottypeE Syms.PanicdottypeE
mv panicdottypeI Syms.PanicdottypeI
mv panicnildottype Syms.Panicnildottype
mv panicoverflow Syms.Panicoverflow
mv raceread Syms.Raceread
mv racereadrange Syms.Racereadrange
mv racewrite Syms.Racewrite
mv racewriterange Syms.Racewriterange
mv SigPanic Syms.SigPanic
mv typedmemclr Syms.Typedmemclr
mv typedmemmove Syms.Typedmemmove
mv Udiv Syms.Udiv
mv writeBarrier Syms.WriteBarrier
mv zerobaseSym Syms.Zerobase
mv arm64HasATOMICS Syms.ARM64HasATOMICS
mv armHasVFPv4 Syms.ARMHasVFPv4
mv x86HasFMA Syms.X86HasFMA
mv x86HasPOPCNT Syms.X86HasPOPCNT
mv x86HasSSE41 Syms.X86HasSSE41
mv WasmDiv Syms.WasmDiv
mv WasmMove Syms.WasmMove
mv WasmZero Syms.WasmZero
mv WasmTruncS Syms.WasmTruncS
mv WasmTruncU Syms.WasmTruncU
mv gopkg Pkgs.Go
mv itabpkg Pkgs.Itab
mv itablinkpkg Pkgs.Itablink
mv mappkg Pkgs.Map
mv msanpkg Pkgs.Msan
mv racepkg Pkgs.Race
mv Runtimepkg Pkgs.Runtime
mv trackpkg Pkgs.Track
mv unsafepkg Pkgs.Unsafe
mv Names Syms Pkgs symtab.go
mv symtab.go cmd/compile/internal/ir
'
Change-Id: Ic143862148569a3bcde8e70b26d75421aa2d00f3
Reviewed-on: https://go-review.googlesource.com/c/go/+/279235
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:10:25 -05:00
|
|
|
p.To.Sym = ir.Syms.Udiv
|
2017-11-15 14:54:24 -08:00
|
|
|
case ssa.OpARMLoweredWB:
|
|
|
|
|
p := s.Prog(obj.ACALL)
|
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
2022-11-01 16:46:43 -07:00
|
|
|
// AuxInt encodes how many buffer entries we need.
|
|
|
|
|
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
|
2019-02-06 14:12:36 -08:00
|
|
|
case ssa.OpARMLoweredPanicBoundsA, ssa.OpARMLoweredPanicBoundsB, ssa.OpARMLoweredPanicBoundsC:
|
|
|
|
|
p := s.Prog(obj.ACALL)
|
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
2020-12-23 00:57:10 -05:00
|
|
|
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
|
2019-02-06 14:12:36 -08:00
|
|
|
s.UseArgs(8) // space used in callee args area by assembly stubs
|
|
|
|
|
case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
|
|
|
|
|
p := s.Prog(obj.ACALL)
|
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
2020-12-23 00:57:10 -05:00
|
|
|
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
|
2019-02-06 14:12:36 -08:00
|
|
|
s.UseArgs(12) // space used in callee args area by assembly stubs
|
2016-05-13 15:31:14 -04:00
|
|
|
case ssa.OpARMDUFFZERO:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.ADUFFZERO)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
[dev.regabi] cmd/compile: group known symbols, packages, names [generated]
There are a handful of pre-computed magic symbols known by
package gc, and we need a place to store them.
If we keep them together, the need for type *ir.Name means that
package ir is the lowest package in the import hierarchy that they
can go in. And package ir needs gopkg for methodSymSuffix
(in a later CL), so they can't go any higher either, at least not all together.
So package ir it is.
Rather than dump them all into the top-level package ir
namespace, however, we introduce global structs, Syms, Pkgs, and Names,
and make the known symbols, packages, and names fields of those.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
add go.go:$ \
// Names holds known names. \
var Names struct{} \
\
// Syms holds known symbols. \
var Syms struct {} \
\
// Pkgs holds known packages. \
var Pkgs struct {} \
mv staticuint64s Names.Staticuint64s
mv zerobase Names.Zerobase
mv assertE2I Syms.AssertE2I
mv assertE2I2 Syms.AssertE2I2
mv assertI2I Syms.AssertI2I
mv assertI2I2 Syms.AssertI2I2
mv deferproc Syms.Deferproc
mv deferprocStack Syms.DeferprocStack
mv Deferreturn Syms.Deferreturn
mv Duffcopy Syms.Duffcopy
mv Duffzero Syms.Duffzero
mv gcWriteBarrier Syms.GCWriteBarrier
mv goschedguarded Syms.Goschedguarded
mv growslice Syms.Growslice
mv msanread Syms.Msanread
mv msanwrite Syms.Msanwrite
mv msanmove Syms.Msanmove
mv newobject Syms.Newobject
mv newproc Syms.Newproc
mv panicdivide Syms.Panicdivide
mv panicshift Syms.Panicshift
mv panicdottypeE Syms.PanicdottypeE
mv panicdottypeI Syms.PanicdottypeI
mv panicnildottype Syms.Panicnildottype
mv panicoverflow Syms.Panicoverflow
mv raceread Syms.Raceread
mv racereadrange Syms.Racereadrange
mv racewrite Syms.Racewrite
mv racewriterange Syms.Racewriterange
mv SigPanic Syms.SigPanic
mv typedmemclr Syms.Typedmemclr
mv typedmemmove Syms.Typedmemmove
mv Udiv Syms.Udiv
mv writeBarrier Syms.WriteBarrier
mv zerobaseSym Syms.Zerobase
mv arm64HasATOMICS Syms.ARM64HasATOMICS
mv armHasVFPv4 Syms.ARMHasVFPv4
mv x86HasFMA Syms.X86HasFMA
mv x86HasPOPCNT Syms.X86HasPOPCNT
mv x86HasSSE41 Syms.X86HasSSE41
mv WasmDiv Syms.WasmDiv
mv WasmMove Syms.WasmMove
mv WasmZero Syms.WasmZero
mv WasmTruncS Syms.WasmTruncS
mv WasmTruncU Syms.WasmTruncU
mv gopkg Pkgs.Go
mv itabpkg Pkgs.Itab
mv itablinkpkg Pkgs.Itablink
mv mappkg Pkgs.Map
mv msanpkg Pkgs.Msan
mv racepkg Pkgs.Race
mv Runtimepkg Pkgs.Runtime
mv trackpkg Pkgs.Track
mv unsafepkg Pkgs.Unsafe
mv Names Syms Pkgs symtab.go
mv symtab.go cmd/compile/internal/ir
'
Change-Id: Ic143862148569a3bcde8e70b26d75421aa2d00f3
Reviewed-on: https://go-review.googlesource.com/c/go/+/279235
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:10:25 -05:00
|
|
|
p.To.Sym = ir.Syms.Duffzero
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Offset = v.AuxInt
|
|
|
|
|
case ssa.OpARMDUFFCOPY:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.ADUFFCOPY)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
[dev.regabi] cmd/compile: group known symbols, packages, names [generated]
There are a handful of pre-computed magic symbols known by
package gc, and we need a place to store them.
If we keep them together, the need for type *ir.Name means that
package ir is the lowest package in the import hierarchy that they
can go in. And package ir needs gopkg for methodSymSuffix
(in a later CL), so they can't go any higher either, at least not all together.
So package ir it is.
Rather than dump them all into the top-level package ir
namespace, however, we introduce global structs, Syms, Pkgs, and Names,
and make the known symbols, packages, and names fields of those.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
add go.go:$ \
// Names holds known names. \
var Names struct{} \
\
// Syms holds known symbols. \
var Syms struct {} \
\
// Pkgs holds known packages. \
var Pkgs struct {} \
mv staticuint64s Names.Staticuint64s
mv zerobase Names.Zerobase
mv assertE2I Syms.AssertE2I
mv assertE2I2 Syms.AssertE2I2
mv assertI2I Syms.AssertI2I
mv assertI2I2 Syms.AssertI2I2
mv deferproc Syms.Deferproc
mv deferprocStack Syms.DeferprocStack
mv Deferreturn Syms.Deferreturn
mv Duffcopy Syms.Duffcopy
mv Duffzero Syms.Duffzero
mv gcWriteBarrier Syms.GCWriteBarrier
mv goschedguarded Syms.Goschedguarded
mv growslice Syms.Growslice
mv msanread Syms.Msanread
mv msanwrite Syms.Msanwrite
mv msanmove Syms.Msanmove
mv newobject Syms.Newobject
mv newproc Syms.Newproc
mv panicdivide Syms.Panicdivide
mv panicshift Syms.Panicshift
mv panicdottypeE Syms.PanicdottypeE
mv panicdottypeI Syms.PanicdottypeI
mv panicnildottype Syms.Panicnildottype
mv panicoverflow Syms.Panicoverflow
mv raceread Syms.Raceread
mv racereadrange Syms.Racereadrange
mv racewrite Syms.Racewrite
mv racewriterange Syms.Racewriterange
mv SigPanic Syms.SigPanic
mv typedmemclr Syms.Typedmemclr
mv typedmemmove Syms.Typedmemmove
mv Udiv Syms.Udiv
mv writeBarrier Syms.WriteBarrier
mv zerobaseSym Syms.Zerobase
mv arm64HasATOMICS Syms.ARM64HasATOMICS
mv armHasVFPv4 Syms.ARMHasVFPv4
mv x86HasFMA Syms.X86HasFMA
mv x86HasPOPCNT Syms.X86HasPOPCNT
mv x86HasSSE41 Syms.X86HasSSE41
mv WasmDiv Syms.WasmDiv
mv WasmMove Syms.WasmMove
mv WasmZero Syms.WasmZero
mv WasmTruncS Syms.WasmTruncS
mv WasmTruncU Syms.WasmTruncU
mv gopkg Pkgs.Go
mv itabpkg Pkgs.Itab
mv itablinkpkg Pkgs.Itablink
mv mappkg Pkgs.Map
mv msanpkg Pkgs.Msan
mv racepkg Pkgs.Race
mv Runtimepkg Pkgs.Runtime
mv trackpkg Pkgs.Track
mv unsafepkg Pkgs.Unsafe
mv Names Syms Pkgs symtab.go
mv symtab.go cmd/compile/internal/ir
'
Change-Id: Ic143862148569a3bcde8e70b26d75421aa2d00f3
Reviewed-on: https://go-review.googlesource.com/c/go/+/279235
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:10:25 -05:00
|
|
|
p.To.Sym = ir.Syms.Duffcopy
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Offset = v.AuxInt
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMLoweredNilCheck:
|
|
|
|
|
// Issue a load which will fault if arg is nil.
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVB)
|
2016-05-06 10:13:31 -07:00
|
|
|
p.From.Type = obj.TYPE_MEM
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2020-12-23 00:57:10 -05:00
|
|
|
ssagen.AddAux(&p.From, v)
|
2016-05-06 10:13:31 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = arm.REGTMP
|
2019-10-29 14:24:43 -04:00
|
|
|
if logopt.Enabled() {
|
|
|
|
|
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
|
|
|
|
|
}
|
2020-11-19 20:49:23 -05:00
|
|
|
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
|
|
|
|
|
base.WarnfAt(v.Pos, "generated nil check")
|
2016-05-06 10:13:31 -07:00
|
|
|
}
|
2016-07-27 12:33:08 -04:00
|
|
|
case ssa.OpARMLoweredZero:
|
2016-05-13 15:31:14 -04:00
|
|
|
// MOVW.P Rarg2, 4(R1)
|
|
|
|
|
// CMP Rarg1, R1
|
2016-07-27 12:33:08 -04:00
|
|
|
// BLE -2(PC)
|
|
|
|
|
// arg1 is the address of the last element to zero
|
2016-05-13 15:31:14 -04:00
|
|
|
// arg2 is known to be zero
|
2016-07-27 12:33:08 -04:00
|
|
|
// auxint is alignment
|
|
|
|
|
var sz int64
|
|
|
|
|
var mov obj.As
|
|
|
|
|
switch {
|
|
|
|
|
case v.AuxInt%4 == 0:
|
|
|
|
|
sz = 4
|
|
|
|
|
mov = arm.AMOVW
|
|
|
|
|
case v.AuxInt%2 == 0:
|
|
|
|
|
sz = 2
|
|
|
|
|
mov = arm.AMOVH
|
|
|
|
|
default:
|
2016-06-27 16:54:57 -04:00
|
|
|
sz = 1
|
|
|
|
|
mov = arm.AMOVB
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(mov)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.Scond = arm.C_PBIT
|
|
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[2].Reg()
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Reg = arm.REG_R1
|
2016-06-27 16:54:57 -04:00
|
|
|
p.To.Offset = sz
|
2017-03-20 08:01:28 -07:00
|
|
|
p2 := s.Prog(arm.ACMP)
|
2016-05-13 15:31:14 -04:00
|
|
|
p2.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p2.From.Reg = v.Args[1].Reg()
|
2016-05-13 15:31:14 -04:00
|
|
|
p2.Reg = arm.REG_R1
|
2017-03-20 08:01:28 -07:00
|
|
|
p3 := s.Prog(arm.ABLE)
|
2016-05-13 15:31:14 -04:00
|
|
|
p3.To.Type = obj.TYPE_BRANCH
|
2020-12-23 00:46:27 -05:00
|
|
|
p3.To.SetTarget(p)
|
2016-07-27 12:33:08 -04:00
|
|
|
case ssa.OpARMLoweredMove:
|
2016-05-13 15:31:14 -04:00
|
|
|
// MOVW.P 4(R1), Rtmp
|
|
|
|
|
// MOVW.P Rtmp, 4(R2)
|
|
|
|
|
// CMP Rarg2, R1
|
2016-07-27 12:33:08 -04:00
|
|
|
// BLE -3(PC)
|
|
|
|
|
// arg2 is the address of the last element of src
|
|
|
|
|
// auxint is alignment
|
|
|
|
|
var sz int64
|
|
|
|
|
var mov obj.As
|
|
|
|
|
switch {
|
|
|
|
|
case v.AuxInt%4 == 0:
|
|
|
|
|
sz = 4
|
|
|
|
|
mov = arm.AMOVW
|
|
|
|
|
case v.AuxInt%2 == 0:
|
|
|
|
|
sz = 2
|
|
|
|
|
mov = arm.AMOVH
|
|
|
|
|
default:
|
2016-06-27 16:54:57 -04:00
|
|
|
sz = 1
|
|
|
|
|
mov = arm.AMOVB
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(mov)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.Scond = arm.C_PBIT
|
|
|
|
|
p.From.Type = obj.TYPE_MEM
|
|
|
|
|
p.From.Reg = arm.REG_R1
|
2016-06-27 16:54:57 -04:00
|
|
|
p.From.Offset = sz
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = arm.REGTMP
|
2017-03-20 08:01:28 -07:00
|
|
|
p2 := s.Prog(mov)
|
2016-05-13 15:31:14 -04:00
|
|
|
p2.Scond = arm.C_PBIT
|
|
|
|
|
p2.From.Type = obj.TYPE_REG
|
|
|
|
|
p2.From.Reg = arm.REGTMP
|
|
|
|
|
p2.To.Type = obj.TYPE_MEM
|
|
|
|
|
p2.To.Reg = arm.REG_R2
|
2016-06-27 16:54:57 -04:00
|
|
|
p2.To.Offset = sz
|
2017-03-20 08:01:28 -07:00
|
|
|
p3 := s.Prog(arm.ACMP)
|
2016-05-13 15:31:14 -04:00
|
|
|
p3.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p3.From.Reg = v.Args[2].Reg()
|
2016-05-13 15:31:14 -04:00
|
|
|
p3.Reg = arm.REG_R1
|
2017-03-20 08:01:28 -07:00
|
|
|
p4 := s.Prog(arm.ABLE)
|
2016-05-13 15:31:14 -04:00
|
|
|
p4.To.Type = obj.TYPE_BRANCH
|
2020-12-23 00:46:27 -05:00
|
|
|
p4.To.SetTarget(p)
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMEqual,
|
|
|
|
|
ssa.OpARMNotEqual,
|
|
|
|
|
ssa.OpARMLessThan,
|
|
|
|
|
ssa.OpARMLessEqual,
|
|
|
|
|
ssa.OpARMGreaterThan,
|
|
|
|
|
ssa.OpARMGreaterEqual,
|
|
|
|
|
ssa.OpARMLessThanU,
|
|
|
|
|
ssa.OpARMLessEqualU,
|
|
|
|
|
ssa.OpARMGreaterThanU,
|
|
|
|
|
ssa.OpARMGreaterEqualU:
|
2016-05-13 11:25:07 -04:00
|
|
|
// generate boolean values
|
|
|
|
|
// use conditional move
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-05-13 11:25:07 -04:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = 0
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2017-03-20 08:01:28 -07:00
|
|
|
p = s.Prog(arm.AMOVW)
|
2016-05-13 11:25:07 -04:00
|
|
|
p.Scond = condBits[v.Op]
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = 1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-25 09:49:28 -04:00
|
|
|
case ssa.OpARMLoweredGetClosurePtr:
|
2016-07-03 13:40:03 -07:00
|
|
|
// Closure pointer is R7 (arm.REGCTXT).
|
2020-12-23 00:57:10 -05:00
|
|
|
ssagen.CheckLoweredGetClosurePtr(v)
|
2017-10-09 15:33:29 -04:00
|
|
|
case ssa.OpARMLoweredGetCallerSP:
|
|
|
|
|
// caller's SP is FixedFrameSize below the address of the first arg
|
|
|
|
|
p := s.Prog(arm.AMOVW)
|
|
|
|
|
p.From.Type = obj.TYPE_ADDR
|
2022-04-18 13:41:08 -04:00
|
|
|
p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
|
2017-10-09 15:33:29 -04:00
|
|
|
p.From.Name = obj.NAME_PARAM
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = v.Reg()
|
2018-05-02 14:25:00 +08:00
|
|
|
case ssa.OpARMLoweredGetCallerPC:
|
|
|
|
|
p := s.Prog(obj.AGETCALLERPC)
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = v.Reg()
|
2020-06-15 14:43:02 -07:00
|
|
|
case ssa.OpARMFlagConstant:
|
|
|
|
|
v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
|
2016-06-13 16:49:09 -04:00
|
|
|
case ssa.OpARMInvertFlags:
|
|
|
|
|
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
|
2021-03-17 19:15:38 -04:00
|
|
|
case ssa.OpClobber, ssa.OpClobberReg:
|
2016-06-08 22:02:08 -07:00
|
|
|
// TODO: implement for clobberdead experiment. Nop is ok for now.
|
2016-03-21 22:57:26 -07:00
|
|
|
default:
|
2016-09-14 10:01:05 -07:00
|
|
|
v.Fatalf("genValue not implemented: %s", v.LongString())
|
2016-03-21 22:57:26 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-13 11:25:07 -04:00
|
|
|
var condBits = map[ssa.Op]uint8{
|
|
|
|
|
ssa.OpARMEqual: arm.C_SCOND_EQ,
|
|
|
|
|
ssa.OpARMNotEqual: arm.C_SCOND_NE,
|
|
|
|
|
ssa.OpARMLessThan: arm.C_SCOND_LT,
|
|
|
|
|
ssa.OpARMLessThanU: arm.C_SCOND_LO,
|
|
|
|
|
ssa.OpARMLessEqual: arm.C_SCOND_LE,
|
|
|
|
|
ssa.OpARMLessEqualU: arm.C_SCOND_LS,
|
|
|
|
|
ssa.OpARMGreaterThan: arm.C_SCOND_GT,
|
|
|
|
|
ssa.OpARMGreaterThanU: arm.C_SCOND_HI,
|
|
|
|
|
ssa.OpARMGreaterEqual: arm.C_SCOND_GE,
|
|
|
|
|
ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 10:13:31 -07:00
|
|
|
var blockJump = map[ssa.BlockKind]struct {
|
|
|
|
|
asm, invasm obj.As
|
|
|
|
|
}{
|
2020-06-01 11:01:14 +00:00
|
|
|
ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE},
|
|
|
|
|
ssa.BlockARMNE: {arm.ABNE, arm.ABEQ},
|
|
|
|
|
ssa.BlockARMLT: {arm.ABLT, arm.ABGE},
|
|
|
|
|
ssa.BlockARMGE: {arm.ABGE, arm.ABLT},
|
|
|
|
|
ssa.BlockARMLE: {arm.ABLE, arm.ABGT},
|
|
|
|
|
ssa.BlockARMGT: {arm.ABGT, arm.ABLE},
|
|
|
|
|
ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
|
|
|
|
|
ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
|
|
|
|
|
ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
|
|
|
|
|
ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
|
|
|
|
|
ssa.BlockARMLTnoov: {arm.ABMI, arm.ABPL},
|
|
|
|
|
ssa.BlockARMGEnoov: {arm.ABPL, arm.ABMI},
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-11 19:22:35 +08:00
|
|
|
// To model a 'LEnoov' ('<=' without overflow checking) branching.
|
2020-12-23 00:57:10 -05:00
|
|
|
var leJumps = [2][2]ssagen.IndexJump{
|
2020-06-01 11:01:14 +00:00
|
|
|
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
|
|
|
|
|
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-11 19:22:35 +08:00
|
|
|
// To model a 'GTnoov' ('>' without overflow checking) branching.
|
2020-12-23 00:57:10 -05:00
|
|
|
var gtJumps = [2][2]ssagen.IndexJump{
|
2020-06-01 11:01:14 +00:00
|
|
|
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
|
|
|
|
|
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
|
2016-05-06 10:13:31 -07:00
|
|
|
}
|
|
|
|
|
|
2020-12-23 00:57:10 -05:00
|
|
|
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
2016-03-21 22:57:26 -07:00
|
|
|
switch b.Kind {
|
2025-02-19 16:47:31 -05:00
|
|
|
case ssa.BlockPlain, ssa.BlockDefer:
|
2016-05-15 00:12:56 -04:00
|
|
|
if b.Succs[0].Block() != next {
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.AJMP)
|
2016-05-15 00:12:56 -04:00
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
2020-12-23 00:57:10 -05:00
|
|
|
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
2016-05-15 00:12:56 -04:00
|
|
|
}
|
|
|
|
|
|
cmd/compile: restore tail call for method wrappers
For certain type of method wrappers we used to generate a tail
call. That was disabled in CL 307234 when register ABI is used,
because with the current IR it was difficult to generate a tail
call with the arguments in the right places. The problem was that
the IR does not contain a CALL-like node with arguments; instead,
it contains an OAS node that adjusts the receiver, than an
OTAILCALL node that just contains the target, but no argument
(with the assumption that the OAS node will put the adjusted
receiver in the right place). With register ABI, putting
arguments in registers are done in SSA. The assignment (OAS)
doesn't put the receiver in register.
This CL changes the IR of a tail call to take an actual OCALL
node. Specifically, a tail call is represented as
OTAILCALL (OCALL target args...)
This way, the call target and args are connected through the OCALL
node. So the call can be analyzed in SSA and the args can be passed
in the right places.
(Alternatively, we could have OTAILCALL node directly take the
target and the args, without the OCALL node. Using an OCALL node is
convenient as there are existing code that processes OCALL nodes
which do not need to be changed. Also, a tail call is similar to
ORETURN (OCALL target args...), except it doesn't preserve the
frame. I did the former but I'm open to change.)
The SSA representation is similar. Previously, the IR lowers to
a Store the receiver then a BlockRetJmp which jumps to the target
(without putting the arg in register). Now we use a TailCall op,
which takes the target and the args. The call expansion pass and
the register allocator handles TailCall pretty much like a
StaticCall, and it will do the right ABI analysis and put the args
in the right places. (Args other than the receiver are already in
the right places. For register args it generates no code for them.
For stack args currently it generates a self copy. I'll work on
optimize that out.) BlockRetJmp is still used, signaling it is a
tail call. The actual call is made in the TailCall op so
BlockRetJmp generates no code (we could use BlockExit if we like).
This slightly reduces binary size:
old new
cmd/go 14003088 13953936
cmd/link 6275552 6271456
Change-Id: I2d16d8d419fe1f17554916d317427383e17e27f0
Reviewed-on: https://go-review.googlesource.com/c/go/+/350145
Trust: Cherry Mui <cherryyz@google.com>
Run-TryBot: Cherry Mui <cherryyz@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: David Chase <drchase@google.com>
2021-09-10 22:05:55 -04:00
|
|
|
case ssa.BlockExit, ssa.BlockRetJmp:
|
2016-05-13 15:22:56 -04:00
|
|
|
|
2016-03-21 22:57:26 -07:00
|
|
|
case ssa.BlockRet:
|
2017-03-20 08:01:28 -07:00
|
|
|
s.Prog(obj.ARET)
|
2016-05-06 10:13:31 -07:00
|
|
|
|
|
|
|
|
case ssa.BlockARMEQ, ssa.BlockARMNE,
|
|
|
|
|
ssa.BlockARMLT, ssa.BlockARMGE,
|
|
|
|
|
ssa.BlockARMLE, ssa.BlockARMGT,
|
|
|
|
|
ssa.BlockARMULT, ssa.BlockARMUGT,
|
2020-06-01 11:01:14 +00:00
|
|
|
ssa.BlockARMULE, ssa.BlockARMUGE,
|
|
|
|
|
ssa.BlockARMLTnoov, ssa.BlockARMGEnoov:
|
2016-05-06 10:13:31 -07:00
|
|
|
jmp := blockJump[b.Kind]
|
|
|
|
|
switch next {
|
|
|
|
|
case b.Succs[0].Block():
|
2018-04-05 16:14:42 -04:00
|
|
|
s.Br(jmp.invasm, b.Succs[1].Block())
|
2016-05-06 10:13:31 -07:00
|
|
|
case b.Succs[1].Block():
|
2018-04-05 16:14:42 -04:00
|
|
|
s.Br(jmp.asm, b.Succs[0].Block())
|
2016-05-06 10:13:31 -07:00
|
|
|
default:
|
2018-04-05 16:14:42 -04:00
|
|
|
if b.Likely != ssa.BranchUnlikely {
|
|
|
|
|
s.Br(jmp.asm, b.Succs[0].Block())
|
|
|
|
|
s.Br(obj.AJMP, b.Succs[1].Block())
|
|
|
|
|
} else {
|
|
|
|
|
s.Br(jmp.invasm, b.Succs[1].Block())
|
|
|
|
|
s.Br(obj.AJMP, b.Succs[0].Block())
|
|
|
|
|
}
|
2016-05-06 10:13:31 -07:00
|
|
|
}
|
|
|
|
|
|
2020-06-01 11:01:14 +00:00
|
|
|
case ssa.BlockARMLEnoov:
|
|
|
|
|
s.CombJump(b, next, &leJumps)
|
|
|
|
|
|
|
|
|
|
case ssa.BlockARMGTnoov:
|
|
|
|
|
s.CombJump(b, next, >Jumps)
|
|
|
|
|
|
2016-05-06 10:13:31 -07:00
|
|
|
default:
|
2019-08-12 20:19:58 +01:00
|
|
|
b.Fatalf("branch not implemented: %s", b.LongString())
|
2016-03-21 22:57:26 -07:00
|
|
|
}
|
|
|
|
|
}
|