2016-03-21 22:57:26 -07:00
|
|
|
// Copyright 2016 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package arm
|
|
|
|
|
|
|
|
|
|
import (
|
2016-06-17 10:34:06 -04:00
|
|
|
"fmt"
|
2016-05-31 11:27:16 -04:00
|
|
|
"math"
|
2018-09-10 08:29:52 +00:00
|
|
|
"math/bits"
|
2016-05-31 11:27:16 -04:00
|
|
|
|
2020-11-19 20:49:23 -05:00
|
|
|
"cmd/compile/internal/base"
|
2016-03-21 22:57:26 -07:00
|
|
|
"cmd/compile/internal/gc"
|
[dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated]
If we want to break up package gc at all, we will need to move
the compiler IR it defines into a separate package that can be
imported by packages that gc itself imports. This CL does that.
It also removes the TINT8 etc aliases so that all code is clear
about which package things are coming from.
This CL is automatically generated by the script below.
See the comments in the script for details about the changes.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# These names were never fully qualified
# when the types package was added.
# Do it now, to avoid confusion about where they live.
inline -rm \
Txxx \
TINT8 \
TUINT8 \
TINT16 \
TUINT16 \
TINT32 \
TUINT32 \
TINT64 \
TUINT64 \
TINT \
TUINT \
TUINTPTR \
TCOMPLEX64 \
TCOMPLEX128 \
TFLOAT32 \
TFLOAT64 \
TBOOL \
TPTR \
TFUNC \
TSLICE \
TARRAY \
TSTRUCT \
TCHAN \
TMAP \
TINTER \
TFORW \
TANY \
TSTRING \
TUNSAFEPTR \
TIDEAL \
TNIL \
TBLANK \
TFUNCARGS \
TCHANARGS \
NTYPE \
BADWIDTH
# esc.go and escape.go do not need to be split.
# Append esc.go onto the end of escape.go.
mv esc.go escape.go
# Pull out the type format installation from func Main,
# so it can be carried into package ir.
mv Main:/Sconv.=/-0,/TypeLinkSym/-1 InstallTypeFormats
# Names that need to be exported for use by code left in gc.
mv Isconst IsConst
mv asNode AsNode
mv asNodes AsNodes
mv asTypesNode AsTypesNode
mv basicnames BasicTypeNames
mv builtinpkg BuiltinPkg
mv consttype ConstType
mv dumplist DumpList
mv fdumplist FDumpList
mv fmtMode FmtMode
mv goopnames OpNames
mv inspect Inspect
mv inspectList InspectList
mv localpkg LocalPkg
mv nblank BlankNode
mv numImport NumImport
mv opprec OpPrec
mv origSym OrigSym
mv stmtwithinit StmtWithInit
mv dump DumpAny
mv fdump FDumpAny
mv nod Nod
mv nodl NodAt
mv newname NewName
mv newnamel NewNameAt
mv assertRepresents AssertValidTypeForConst
mv represents ValidTypeForConst
mv nodlit NewLiteral
# Types and fields that need to be exported for use by gc.
mv nowritebarrierrecCallSym SymAndPos
mv SymAndPos.lineno SymAndPos.Pos
mv SymAndPos.target SymAndPos.Sym
mv Func.lsym Func.LSym
mv Func.setWBPos Func.SetWBPos
mv Func.numReturns Func.NumReturns
mv Func.numDefers Func.NumDefers
mv Func.nwbrCalls Func.NWBRCalls
# initLSym is an algorithm left behind in gc,
# not an operation on Func itself.
mv Func.initLSym initLSym
mv nodeQueue NodeQueue
mv NodeQueue.empty NodeQueue.Empty
mv NodeQueue.popLeft NodeQueue.PopLeft
mv NodeQueue.pushRight NodeQueue.PushRight
# Many methods on Node are actually algorithms that
# would apply to any node implementation.
# Those become plain functions.
mv Node.funcname FuncName
mv Node.isBlank IsBlank
mv Node.isGoConst isGoConst
mv Node.isNil IsNil
mv Node.isParamHeapCopy isParamHeapCopy
mv Node.isParamStackCopy isParamStackCopy
mv Node.isSimpleName isSimpleName
mv Node.mayBeShared MayBeShared
mv Node.pkgFuncName PkgFuncName
mv Node.backingArrayPtrLen backingArrayPtrLen
mv Node.isterminating isTermNode
mv Node.labeledControl labeledControl
mv Nodes.isterminating isTermNodes
mv Nodes.sigerr fmtSignature
mv Node.MethodName methodExprName
mv Node.MethodFunc methodExprFunc
mv Node.IsMethod IsMethod
# Every node will need to implement RawCopy;
# Copy and SepCopy algorithms will use it.
mv Node.rawcopy Node.RawCopy
mv Node.copy Copy
mv Node.sepcopy SepCopy
# Extract Node.Format method body into func FmtNode,
# but leave method wrapper behind.
mv Node.Format:0,$ FmtNode
# Formatting helpers that will apply to all node implementations.
mv Node.Line Line
mv Node.exprfmt exprFmt
mv Node.jconv jconvFmt
mv Node.modeString modeString
mv Node.nconv nconvFmt
mv Node.nodedump nodeDumpFmt
mv Node.nodefmt nodeFmt
mv Node.stmtfmt stmtFmt
# Constant support needed for code moving to ir.
mv okforconst OKForConst
mv vconv FmtConst
mv int64Val Int64Val
mv float64Val Float64Val
mv Node.ValueInterface ConstValue
# Organize code into files.
mv LocalPkg BuiltinPkg ir.go
mv NumImport InstallTypeFormats Line fmt.go
mv syntax.go Nod NodAt NewNameAt Class Pxxx PragmaFlag Nointerface SymAndPos \
AsNode AsTypesNode BlankNode OrigSym \
Node.SliceBounds Node.SetSliceBounds Op.IsSlice3 \
IsConst Node.Int64Val Node.CanInt64 Node.Uint64Val Node.BoolVal Node.StringVal \
Node.RawCopy SepCopy Copy \
IsNil IsBlank IsMethod \
Node.Typ Node.StorageClass node.go
mv ConstType ConstValue Int64Val Float64Val AssertValidTypeForConst ValidTypeForConst NewLiteral idealType OKForConst val.go
# Move files to new ir package.
mv bitset.go class_string.go dump.go fmt.go \
ir.go node.go op_string.go val.go \
sizeof_test.go cmd/compile/internal/ir
'
: # fix mkbuiltin.go to generate the changes made to builtin.go during rf
sed -i '' '
s/\[T/[types.T/g
s/\*Node/*ir.Node/g
/internal\/types/c \
fmt.Fprintln(&b, `import (`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) \
fmt.Fprintln(&b, `)`)
' mkbuiltin.go
gofmt -w mkbuiltin.go
: # update cmd/dist to add internal/ir
cd ../../../dist
sed -i '' '/compile.internal.gc/a\
"cmd/compile/internal/ir",
' buildtool.go
gofmt -w buildtool.go
: # update cmd/compile TestFormats
cd ../..
go install std cmd
cd cmd/compile
go test -u || go test # first one updates but fails; second passes
Change-Id: I5f7caf6b20629b51970279e81231a3574d5b51db
Reviewed-on: https://go-review.googlesource.com/c/go/+/273008
Trust: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-19 21:09:22 -05:00
|
|
|
"cmd/compile/internal/ir"
|
2019-10-29 14:24:43 -04:00
|
|
|
"cmd/compile/internal/logopt"
|
2016-03-21 22:57:26 -07:00
|
|
|
"cmd/compile/internal/ssa"
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
"cmd/compile/internal/types"
|
2016-03-21 22:57:26 -07:00
|
|
|
"cmd/internal/obj"
|
|
|
|
|
"cmd/internal/obj/arm"
|
2017-10-20 03:50:15 +00:00
|
|
|
"cmd/internal/objabi"
|
2016-03-21 22:57:26 -07:00
|
|
|
)
|
|
|
|
|
|
2016-05-15 00:12:56 -04:00
|
|
|
// loadByType returns the load instruction of the given type.
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
func loadByType(t *types.Type) obj.As {
|
2016-05-15 00:12:56 -04:00
|
|
|
if t.IsFloat() {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch t.Size() {
|
2016-05-31 11:27:16 -04:00
|
|
|
case 4:
|
|
|
|
|
return arm.AMOVF
|
|
|
|
|
case 8:
|
|
|
|
|
return arm.AMOVD
|
|
|
|
|
}
|
2016-05-15 00:12:56 -04:00
|
|
|
} else {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch t.Size() {
|
2016-05-15 00:12:56 -04:00
|
|
|
case 1:
|
|
|
|
|
if t.IsSigned() {
|
|
|
|
|
return arm.AMOVB
|
|
|
|
|
} else {
|
|
|
|
|
return arm.AMOVBU
|
|
|
|
|
}
|
|
|
|
|
case 2:
|
|
|
|
|
if t.IsSigned() {
|
|
|
|
|
return arm.AMOVH
|
|
|
|
|
} else {
|
|
|
|
|
return arm.AMOVHU
|
|
|
|
|
}
|
|
|
|
|
case 4:
|
|
|
|
|
return arm.AMOVW
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
panic("bad load type")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// storeByType returns the store instruction of the given type.
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
func storeByType(t *types.Type) obj.As {
|
2016-05-15 00:12:56 -04:00
|
|
|
if t.IsFloat() {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch t.Size() {
|
2016-05-31 11:27:16 -04:00
|
|
|
case 4:
|
|
|
|
|
return arm.AMOVF
|
|
|
|
|
case 8:
|
|
|
|
|
return arm.AMOVD
|
|
|
|
|
}
|
2016-05-15 00:12:56 -04:00
|
|
|
} else {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch t.Size() {
|
2016-05-15 00:12:56 -04:00
|
|
|
case 1:
|
|
|
|
|
return arm.AMOVB
|
|
|
|
|
case 2:
|
|
|
|
|
return arm.AMOVH
|
|
|
|
|
case 4:
|
|
|
|
|
return arm.AMOVW
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
panic("bad store type")
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-17 10:34:06 -04:00
|
|
|
// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
|
|
|
|
|
type shift int64
|
|
|
|
|
|
|
|
|
|
// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
|
|
|
|
|
func (v shift) String() string {
|
|
|
|
|
op := "<<>>->@>"[((v>>5)&3)<<1:]
|
|
|
|
|
if v&(1<<4) != 0 {
|
|
|
|
|
// register shift
|
|
|
|
|
return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
|
|
|
|
|
} else {
|
|
|
|
|
// constant shift
|
|
|
|
|
return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// makeshift encodes a register shifted by a constant
|
|
|
|
|
func makeshift(reg int16, typ int64, s int64) shift {
|
|
|
|
|
return shift(int64(reg&0xf) | typ | (s&31)<<7)
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-20 08:01:28 -07:00
|
|
|
// genshift generates a Prog for r = r0 op (r1 shifted by n)
|
|
|
|
|
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
|
|
|
|
|
p := s.Prog(as)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_SHIFT
|
2017-03-20 08:01:28 -07:00
|
|
|
p.From.Offset = int64(makeshift(r1, typ, n))
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Reg = r0
|
|
|
|
|
if r != 0 {
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
|
|
|
|
}
|
|
|
|
|
return p
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// makeregshift encodes a register shifted by a register
|
|
|
|
|
func makeregshift(r1 int16, typ int64, r2 int16) shift {
|
|
|
|
|
return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
|
2017-03-20 08:01:28 -07:00
|
|
|
func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
|
|
|
|
|
p := s.Prog(as)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_SHIFT
|
|
|
|
|
p.From.Offset = int64(makeregshift(r1, typ, r2))
|
|
|
|
|
p.Reg = r0
|
|
|
|
|
if r != 0 {
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
|
|
|
|
}
|
|
|
|
|
return p
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-10 08:29:52 +00:00
|
|
|
// find a (lsb, width) pair for BFC
|
|
|
|
|
// lsb must be in [0, 31], width must be in [1, 32 - lsb]
|
|
|
|
|
// return (0xffffffff, 0) if v is not a binary like 0...01...10...0
|
|
|
|
|
func getBFC(v uint32) (uint32, uint32) {
|
|
|
|
|
var m, l uint32
|
|
|
|
|
// BFC is not applicable with zero
|
|
|
|
|
if v == 0 {
|
|
|
|
|
return 0xffffffff, 0
|
|
|
|
|
}
|
|
|
|
|
// find the lowest set bit, for example l=2 for 0x3ffffffc
|
|
|
|
|
l = uint32(bits.TrailingZeros32(v))
|
|
|
|
|
// m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
|
|
|
|
|
m = 32 - uint32(bits.LeadingZeros32(v))
|
|
|
|
|
// check if v is a binary like 0...01...10...0
|
|
|
|
|
if (1<<m)-(1<<l) == v {
|
|
|
|
|
// it must be m > l for non-zero v
|
|
|
|
|
return l, m - l
|
|
|
|
|
}
|
|
|
|
|
// invalid
|
|
|
|
|
return 0xffffffff, 0
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-21 22:57:26 -07:00
|
|
|
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
|
|
|
|
switch v.Op {
|
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
|
|
|
case ssa.OpCopy, ssa.OpARMMOVWreg:
|
2016-05-13 15:31:14 -04:00
|
|
|
if v.Type.IsMemory() {
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-16 09:36:00 -07:00
|
|
|
x := v.Args[0].Reg()
|
|
|
|
|
y := v.Reg()
|
2016-05-13 15:31:14 -04:00
|
|
|
if x == y {
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-05-31 11:27:16 -04:00
|
|
|
as := arm.AMOVW
|
|
|
|
|
if v.Type.IsFloat() {
|
2017-04-28 00:19:49 +00:00
|
|
|
switch v.Type.Size() {
|
2016-05-31 11:27:16 -04:00
|
|
|
case 4:
|
|
|
|
|
as = arm.AMOVF
|
|
|
|
|
case 8:
|
|
|
|
|
as = arm.AMOVD
|
|
|
|
|
default:
|
|
|
|
|
panic("bad float size")
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(as)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
|
p.From.Reg = x
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = y
|
2016-07-15 14:07:15 -04:00
|
|
|
case ssa.OpARMMOVWnop:
|
2016-09-16 09:36:00 -07:00
|
|
|
if v.Reg() != v.Args[0].Reg() {
|
2016-07-15 14:07:15 -04:00
|
|
|
v.Fatalf("input[0] and output not in same register %s", v.LongString())
|
|
|
|
|
}
|
|
|
|
|
// nothing to do
|
2016-03-21 22:57:26 -07:00
|
|
|
case ssa.OpLoadReg:
|
2016-05-15 00:12:56 -04:00
|
|
|
if v.Type.IsFlags() {
|
2016-09-14 10:01:05 -07:00
|
|
|
v.Fatalf("load flags not implemented: %v", v.LongString())
|
2016-05-15 00:12:56 -04:00
|
|
|
return
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(loadByType(v.Type))
|
2016-10-03 12:26:25 -07:00
|
|
|
gc.AddrAuto(&p.From, v.Args[0])
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-03-21 22:57:26 -07:00
|
|
|
case ssa.OpStoreReg:
|
2016-05-15 00:12:56 -04:00
|
|
|
if v.Type.IsFlags() {
|
2016-09-14 10:01:05 -07:00
|
|
|
v.Fatalf("store flags not implemented: %v", v.LongString())
|
2016-05-15 00:12:56 -04:00
|
|
|
return
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(storeByType(v.Type))
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-10-03 12:26:25 -07:00
|
|
|
gc.AddrAuto(&p.To, v)
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMADD,
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
ssa.OpARMADC,
|
2016-05-06 10:13:31 -07:00
|
|
|
ssa.OpARMSUB,
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
ssa.OpARMSBC,
|
2016-05-06 10:13:31 -07:00
|
|
|
ssa.OpARMRSB,
|
|
|
|
|
ssa.OpARMAND,
|
|
|
|
|
ssa.OpARMOR,
|
|
|
|
|
ssa.OpARMXOR,
|
2016-05-13 15:22:56 -04:00
|
|
|
ssa.OpARMBIC,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMUL,
|
|
|
|
|
ssa.OpARMADDF,
|
|
|
|
|
ssa.OpARMADDD,
|
|
|
|
|
ssa.OpARMSUBF,
|
|
|
|
|
ssa.OpARMSUBD,
|
2019-04-12 14:03:39 +02:00
|
|
|
ssa.OpARMSLL,
|
|
|
|
|
ssa.OpARMSRL,
|
|
|
|
|
ssa.OpARMSRA,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMULF,
|
|
|
|
|
ssa.OpARMMULD,
|
2017-09-02 08:14:08 +00:00
|
|
|
ssa.OpARMNMULF,
|
|
|
|
|
ssa.OpARMNMULD,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMDIVF,
|
|
|
|
|
ssa.OpARMDIVD:
|
2016-09-16 09:36:00 -07:00
|
|
|
r := v.Reg()
|
|
|
|
|
r1 := v.Args[0].Reg()
|
|
|
|
|
r2 := v.Args[1].Reg()
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-05-06 10:13:31 -07:00
|
|
|
p.From.Reg = r2
|
|
|
|
|
p.Reg = r1
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
2019-08-02 02:20:38 +00:00
|
|
|
case ssa.OpARMSRR:
|
|
|
|
|
genregshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR)
|
2018-10-15 03:14:57 -04:00
|
|
|
case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD, ssa.OpARMFMULAD:
|
2017-09-14 06:52:51 +00:00
|
|
|
r := v.Reg()
|
|
|
|
|
r0 := v.Args[0].Reg()
|
|
|
|
|
r1 := v.Args[1].Reg()
|
|
|
|
|
r2 := v.Args[2].Reg()
|
|
|
|
|
if r != r0 {
|
|
|
|
|
v.Fatalf("result and addend are not in the same register: %v", v.LongString())
|
|
|
|
|
}
|
|
|
|
|
p := s.Prog(v.Op.Asm())
|
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
|
p.From.Reg = r2
|
|
|
|
|
p.Reg = r1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
case ssa.OpARMADDS,
|
|
|
|
|
ssa.OpARMSUBS:
|
2016-09-16 09:36:00 -07:00
|
|
|
r := v.Reg0()
|
|
|
|
|
r1 := v.Args[0].Reg()
|
|
|
|
|
r2 := v.Args[1].Reg()
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
|
p.From.Reg = r2
|
|
|
|
|
p.Reg = r1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMSRAcond:
|
2016-05-13 15:22:56 -04:00
|
|
|
// ARM shift instructions uses only the low-order byte of the shift amount
|
|
|
|
|
// generate conditional instructions to deal with large shifts
|
2016-06-17 10:34:06 -04:00
|
|
|
// flag is already set
|
2016-05-13 15:22:56 -04:00
|
|
|
// SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit
|
|
|
|
|
// SRA.LO Rarg1, Rarg0, Rdst
|
2016-09-16 09:36:00 -07:00
|
|
|
r := v.Reg()
|
|
|
|
|
r1 := v.Args[0].Reg()
|
|
|
|
|
r2 := v.Args[1].Reg()
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.ASRA)
|
2016-05-13 15:22:56 -04:00
|
|
|
p.Scond = arm.C_SCOND_HS
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = 31
|
|
|
|
|
p.Reg = r1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
2017-03-20 08:01:28 -07:00
|
|
|
p = s.Prog(arm.ASRA)
|
2016-05-13 15:22:56 -04:00
|
|
|
p.Scond = arm.C_SCOND_LO
|
|
|
|
|
p.From.Type = obj.TYPE_REG
|
|
|
|
|
p.From.Reg = r2
|
|
|
|
|
p.Reg = r1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = r
|
2017-09-20 08:48:34 +00:00
|
|
|
case ssa.OpARMBFX, ssa.OpARMBFXU:
|
|
|
|
|
p := s.Prog(v.Op.Asm())
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt >> 8
|
|
|
|
|
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
|
|
|
|
|
p.Reg = v.Args[0].Reg()
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = v.Reg()
|
2018-09-10 08:29:52 +00:00
|
|
|
case ssa.OpARMANDconst, ssa.OpARMBICconst:
|
|
|
|
|
// try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
|
|
|
|
|
// BFC is only available on ARMv7, and its result and source are in the same register
|
|
|
|
|
if objabi.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
|
|
|
|
|
var val uint32
|
|
|
|
|
if v.Op == ssa.OpARMANDconst {
|
|
|
|
|
val = ^uint32(v.AuxInt)
|
|
|
|
|
} else { // BICconst
|
|
|
|
|
val = uint32(v.AuxInt)
|
|
|
|
|
}
|
|
|
|
|
lsb, width := getBFC(val)
|
|
|
|
|
// omit BFC for ARM's imm12
|
|
|
|
|
if 8 < width && width < 24 {
|
|
|
|
|
p := s.Prog(arm.ABFC)
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = int64(width)
|
|
|
|
|
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(lsb)})
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = v.Reg()
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// fall back to ordinary form
|
|
|
|
|
fallthrough
|
2016-06-06 22:36:45 -04:00
|
|
|
case ssa.OpARMADDconst,
|
2016-06-13 16:49:09 -04:00
|
|
|
ssa.OpARMADCconst,
|
2016-06-06 22:36:45 -04:00
|
|
|
ssa.OpARMSUBconst,
|
2016-06-13 16:49:09 -04:00
|
|
|
ssa.OpARMSBCconst,
|
2016-05-06 10:13:31 -07:00
|
|
|
ssa.OpARMRSBconst,
|
2016-06-13 16:49:09 -04:00
|
|
|
ssa.OpARMRSCconst,
|
2016-05-06 10:13:31 -07:00
|
|
|
ssa.OpARMORconst,
|
|
|
|
|
ssa.OpARMXORconst,
|
2016-05-13 15:22:56 -04:00
|
|
|
ssa.OpARMSLLconst,
|
|
|
|
|
ssa.OpARMSRLconst,
|
|
|
|
|
ssa.OpARMSRAconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
2016-09-16 09:36:00 -07:00
|
|
|
p.Reg = v.Args[0].Reg()
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-13 16:49:09 -04:00
|
|
|
case ssa.OpARMADDSconst,
|
|
|
|
|
ssa.OpARMSUBSconst,
|
|
|
|
|
ssa.OpARMRSBSconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-06-13 16:49:09 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
2016-09-16 09:36:00 -07:00
|
|
|
p.Reg = v.Args[0].Reg()
|
2016-06-13 16:49:09 -04:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg0()
|
2016-05-25 23:17:42 -04:00
|
|
|
case ssa.OpARMSRRconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDshiftLL,
|
|
|
|
|
ssa.OpARMADCshiftLL,
|
|
|
|
|
ssa.OpARMSUBshiftLL,
|
|
|
|
|
ssa.OpARMSBCshiftLL,
|
|
|
|
|
ssa.OpARMRSBshiftLL,
|
|
|
|
|
ssa.OpARMRSCshiftLL,
|
|
|
|
|
ssa.OpARMANDshiftLL,
|
|
|
|
|
ssa.OpARMORshiftLL,
|
|
|
|
|
ssa.OpARMXORshiftLL,
|
|
|
|
|
ssa.OpARMBICshiftLL:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftLL,
|
|
|
|
|
ssa.OpARMSUBSshiftLL,
|
|
|
|
|
ssa.OpARMRSBSshiftLL:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
case ssa.OpARMADDshiftRL,
|
|
|
|
|
ssa.OpARMADCshiftRL,
|
|
|
|
|
ssa.OpARMSUBshiftRL,
|
|
|
|
|
ssa.OpARMSBCshiftRL,
|
|
|
|
|
ssa.OpARMRSBshiftRL,
|
|
|
|
|
ssa.OpARMRSCshiftRL,
|
|
|
|
|
ssa.OpARMANDshiftRL,
|
|
|
|
|
ssa.OpARMORshiftRL,
|
|
|
|
|
ssa.OpARMXORshiftRL,
|
|
|
|
|
ssa.OpARMBICshiftRL:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftRL,
|
|
|
|
|
ssa.OpARMSUBSshiftRL,
|
|
|
|
|
ssa.OpARMRSBSshiftRL:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
case ssa.OpARMADDshiftRA,
|
|
|
|
|
ssa.OpARMADCshiftRA,
|
|
|
|
|
ssa.OpARMSUBshiftRA,
|
|
|
|
|
ssa.OpARMSBCshiftRA,
|
|
|
|
|
ssa.OpARMRSBshiftRA,
|
|
|
|
|
ssa.OpARMRSCshiftRA,
|
|
|
|
|
ssa.OpARMANDshiftRA,
|
|
|
|
|
ssa.OpARMORshiftRA,
|
|
|
|
|
ssa.OpARMXORshiftRA,
|
|
|
|
|
ssa.OpARMBICshiftRA:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftRA,
|
|
|
|
|
ssa.OpARMSUBSshiftRA,
|
|
|
|
|
ssa.OpARMRSBSshiftRA:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
2016-08-30 09:12:22 -04:00
|
|
|
case ssa.OpARMXORshiftRR:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftLL:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftRL:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftRA:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftLLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftRLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMVNshiftRAreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDshiftLLreg,
|
|
|
|
|
ssa.OpARMADCshiftLLreg,
|
|
|
|
|
ssa.OpARMSUBshiftLLreg,
|
|
|
|
|
ssa.OpARMSBCshiftLLreg,
|
|
|
|
|
ssa.OpARMRSBshiftLLreg,
|
|
|
|
|
ssa.OpARMRSCshiftLLreg,
|
|
|
|
|
ssa.OpARMANDshiftLLreg,
|
|
|
|
|
ssa.OpARMORshiftLLreg,
|
|
|
|
|
ssa.OpARMXORshiftLLreg,
|
|
|
|
|
ssa.OpARMBICshiftLLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftLLreg,
|
|
|
|
|
ssa.OpARMSUBSshiftLLreg,
|
|
|
|
|
ssa.OpARMRSBSshiftLLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
case ssa.OpARMADDshiftRLreg,
|
|
|
|
|
ssa.OpARMADCshiftRLreg,
|
|
|
|
|
ssa.OpARMSUBshiftRLreg,
|
|
|
|
|
ssa.OpARMSBCshiftRLreg,
|
|
|
|
|
ssa.OpARMRSBshiftRLreg,
|
|
|
|
|
ssa.OpARMRSCshiftRLreg,
|
|
|
|
|
ssa.OpARMANDshiftRLreg,
|
|
|
|
|
ssa.OpARMORshiftRLreg,
|
|
|
|
|
ssa.OpARMXORshiftRLreg,
|
|
|
|
|
ssa.OpARMBICshiftRLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftRLreg,
|
|
|
|
|
ssa.OpARMSUBSshiftRLreg,
|
|
|
|
|
ssa.OpARMRSBSshiftRLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
|
|
|
|
case ssa.OpARMADDshiftRAreg,
|
|
|
|
|
ssa.OpARMADCshiftRAreg,
|
|
|
|
|
ssa.OpARMSUBshiftRAreg,
|
|
|
|
|
ssa.OpARMSBCshiftRAreg,
|
|
|
|
|
ssa.OpARMRSBshiftRAreg,
|
|
|
|
|
ssa.OpARMRSCshiftRAreg,
|
|
|
|
|
ssa.OpARMANDshiftRAreg,
|
|
|
|
|
ssa.OpARMORshiftRAreg,
|
|
|
|
|
ssa.OpARMXORshiftRAreg,
|
|
|
|
|
ssa.OpARMBICshiftRAreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMADDSshiftRAreg,
|
|
|
|
|
ssa.OpARMSUBSshiftRAreg,
|
|
|
|
|
ssa.OpARMRSBSshiftRAreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SBIT
|
2016-05-13 15:22:56 -04:00
|
|
|
case ssa.OpARMHMUL,
|
|
|
|
|
ssa.OpARMHMULU:
|
|
|
|
|
// 32-bit high multiplication
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-13 15:22:56 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
|
|
|
|
p.Reg = v.Args[1].Reg()
|
2016-05-13 15:22:56 -04:00
|
|
|
p.To.Type = obj.TYPE_REGREG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-13 15:22:56 -04:00
|
|
|
p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
case ssa.OpARMMULLU:
|
2016-07-13 16:15:54 -07:00
|
|
|
// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
|
|
|
|
p.Reg = v.Args[1].Reg()
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.To.Type = obj.TYPE_REGREG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg0() // high 32-bit
|
|
|
|
|
p.To.Offset = int64(v.Reg1()) // low 32-bit
|
2017-08-25 12:07:01 +00:00
|
|
|
case ssa.OpARMMULA, ssa.OpARMMULS:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
|
|
|
|
p.Reg = v.Args[1].Reg()
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
p.To.Type = obj.TYPE_REGREG2
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg() // result
|
|
|
|
|
p.To.Offset = int64(v.Args[2].Reg()) // addend
|
2016-03-21 22:57:26 -07:00
|
|
|
case ssa.OpARMMOVWconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
2016-03-29 16:39:53 -07:00
|
|
|
p.From.Offset = v.AuxInt
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-31 11:27:16 -04:00
|
|
|
case ssa.OpARMMOVFconst,
|
|
|
|
|
ssa.OpARMMOVDconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-31 11:27:16 -04:00
|
|
|
p.From.Type = obj.TYPE_FCONST
|
|
|
|
|
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMCMP,
|
|
|
|
|
ssa.OpARMCMN,
|
|
|
|
|
ssa.OpARMTST,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMTEQ,
|
|
|
|
|
ssa.OpARMCMPF,
|
|
|
|
|
ssa.OpARMCMPD:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-04-18 12:21:51 -04:00
|
|
|
// Special layout in ARM assembly
|
|
|
|
|
// Comparing to x86, the operands of ARM's CMP are reversed.
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[1].Reg()
|
|
|
|
|
p.Reg = v.Args[0].Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMCMPconst,
|
|
|
|
|
ssa.OpARMCMNconst,
|
|
|
|
|
ssa.OpARMTSTconst,
|
|
|
|
|
ssa.OpARMTEQconst:
|
|
|
|
|
// Special layout in ARM assembly
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-06 10:13:31 -07:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
2016-09-16 09:36:00 -07:00
|
|
|
p.Reg = v.Args[0].Reg()
|
2016-07-06 10:04:45 -04:00
|
|
|
case ssa.OpARMCMPF0,
|
|
|
|
|
ssa.OpARMCMPD0:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-07-06 10:04:45 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
|
2017-03-20 08:01:28 -07:00
|
|
|
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
|
2017-10-02 03:09:28 +00:00
|
|
|
case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
|
2017-03-20 08:01:28 -07:00
|
|
|
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
|
2016-06-06 22:36:45 -04:00
|
|
|
case ssa.OpARMMOVWaddr:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-06-15 15:56:52 -07:00
|
|
|
p.From.Type = obj.TYPE_ADDR
|
2017-07-18 08:35:00 -04:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-06-15 15:56:52 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-15 15:56:52 -07:00
|
|
|
|
|
|
|
|
var wantreg string
|
2016-06-06 22:36:45 -04:00
|
|
|
// MOVW $sym+off(base), R
|
|
|
|
|
// the assembler expands it as the following:
|
|
|
|
|
// - base is SP: add constant offset to SP (R13)
|
|
|
|
|
// when constant is large, tmp register (R11) may be used
|
|
|
|
|
// - base is SB: load external address from constant pool (use relocation)
|
|
|
|
|
switch v.Aux.(type) {
|
|
|
|
|
default:
|
|
|
|
|
v.Fatalf("aux is of unknown type %T", v.Aux)
|
2017-09-18 14:53:56 -07:00
|
|
|
case *obj.LSym:
|
2016-06-15 15:56:52 -07:00
|
|
|
wantreg = "SB"
|
|
|
|
|
gc.AddAux(&p.From, v)
|
[dev.regabi] cmd/compile: replace *Node type with an interface Node [generated]
The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct.
The previous CL defined an interface INode modeling a *Node.
This CL:
- Changes all references outside internal/ir to use INode,
along with many references inside internal/ir as well.
- Renames Node to node.
- Renames INode to Node
So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible,
and the code outside package ir is now (clearly) using only the interface.
The usual rule is never to redefine an existing name with a new meaning,
so that old code that hasn't been updated gets a "unknown name" error
instead of more mysterious errors or silent misbehavior. That rule would
caution against replacing Node-the-struct with Node-the-interface,
as in this CL, because code that says *Node would now be using a pointer
to an interface. But this CL is being landed at the same time as another that
moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node,
which does follow the rule: any lingering references to gc.Node will be told
it's gone, not silently start using pointers to interfaces. So the rule is followed
by the CL sequence, just not this specific CL.
Overall, the loss of inlining caused by using interfaces cuts the compiler speed
by about 6%, a not insignificant amount. However, as we convert the representation
to concrete structs that are not the giant Node over the next weeks, that speed
should come back as more of the compiler starts operating directly on concrete types
and the memory taken up by the graph of Nodes drops due to the more precise
structs. Honestly, I was expecting worse.
% benchstat bench.old bench.new
name old time/op new time/op delta
Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9)
Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9)
GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9)
Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9)
SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10)
Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8)
GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8)
Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9)
Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9)
XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8)
LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8)
ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10)
LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8)
StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9)
[Geo mean] 480ms 510ms +6.31%
name old user-time/op new user-time/op delta
Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10)
Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9)
GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9)
Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9)
SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10)
Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9)
GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9)
Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10)
Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10)
XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9)
LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9)
ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10)
LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8)
[Geo mean] 483ms 504ms +4.41%
[git-generate]
cd src/cmd/compile/internal/ir
: # We need to do the conversion in multiple steps, so we introduce
: # a temporary type alias that will start out meaning the pointer-to-struct
: # and then change to mean the interface.
rf '
mv Node OldNode
add node.go \
type Node = *OldNode
'
: # It should work to do this ex in ir, but it misses test files, due to a bug in rf.
: # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests.
cd ../gc
rf '
ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
cd ../ssa
rf '
ex {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
: # Back in ir, finish conversion clumsily with sed,
: # because type checking and circular aliases do not mix.
cd ../ir
sed -i '' '
/type Node = \*OldNode/d
s/\*OldNode/Node/g
s/^func (n Node)/func (n *OldNode)/
s/OldNode/node/g
s/type INode interface/type Node interface/
s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/
' *.go
gofmt -w *.go
sed -i '' '
s/{Func{}, 136, 248}/{Func{}, 152, 280}/
s/{Name{}, 32, 56}/{Name{}, 44, 80}/
s/{Param{}, 24, 48}/{Param{}, 44, 88}/
s/{node{}, 76, 128}/{node{}, 88, 152}/
' sizeof_test.go
cd ../ssa
sed -i '' '
s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/
' sizeof_test.go
cd ../gc
sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go
cd ../../../..
go install std cmd
cd cmd/compile
go test -u || go test -u
Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553
Reviewed-on: https://go-review.googlesource.com/c/go/+/272935
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-25 01:11:56 -05:00
|
|
|
case ir.Node:
|
2016-06-15 15:56:52 -07:00
|
|
|
wantreg = "SP"
|
|
|
|
|
gc.AddAux(&p.From, v)
|
|
|
|
|
case nil:
|
|
|
|
|
// No sym, just MOVW $off(SP), R
|
|
|
|
|
wantreg = "SP"
|
|
|
|
|
p.From.Offset = v.AuxInt
|
2016-06-06 22:36:45 -04:00
|
|
|
}
|
2016-09-16 09:36:00 -07:00
|
|
|
if reg := v.Args[0].RegName(); reg != wantreg {
|
|
|
|
|
v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
|
2016-06-15 15:56:52 -07:00
|
|
|
}
|
|
|
|
|
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMMOVBload,
|
|
|
|
|
ssa.OpARMMOVBUload,
|
|
|
|
|
ssa.OpARMMOVHload,
|
|
|
|
|
ssa.OpARMMOVHUload,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMOVWload,
|
|
|
|
|
ssa.OpARMMOVFload,
|
|
|
|
|
ssa.OpARMMOVDload:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_MEM
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-03-21 22:57:26 -07:00
|
|
|
gc.AddAux(&p.From, v)
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMMOVBstore,
|
|
|
|
|
ssa.OpARMMOVHstore,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMOVWstore,
|
|
|
|
|
ssa.OpARMMOVFstore,
|
|
|
|
|
ssa.OpARMMOVDstore:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-03-21 22:57:26 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[1].Reg()
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Args[0].Reg()
|
2016-03-21 22:57:26 -07:00
|
|
|
gc.AddAux(&p.To, v)
|
2017-08-24 10:51:34 +00:00
|
|
|
case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
|
2016-06-17 10:34:06 -04:00
|
|
|
// this is just shift 0 bits
|
|
|
|
|
fallthrough
|
|
|
|
|
case ssa.OpARMMOVWloadshiftLL:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMOVWloadshiftRL:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMOVWloadshiftRA:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2017-08-24 10:51:34 +00:00
|
|
|
case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
|
2016-06-17 10:34:06 -04:00
|
|
|
// this is just shift 0 bits
|
|
|
|
|
fallthrough
|
|
|
|
|
case ssa.OpARMMOVWstoreshiftLL:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[2].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
p.To.Type = obj.TYPE_SHIFT
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Args[0].Reg()
|
|
|
|
|
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMOVWstoreshiftRL:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[2].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
p.To.Type = obj.TYPE_SHIFT
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Args[0].Reg()
|
|
|
|
|
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMMOVWstoreshiftRA:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[2].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
p.To.Type = obj.TYPE_SHIFT
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Args[0].Reg()
|
|
|
|
|
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMMOVBreg,
|
|
|
|
|
ssa.OpARMMOVBUreg,
|
|
|
|
|
ssa.OpARMMOVHreg,
|
2016-06-17 10:34:06 -04:00
|
|
|
ssa.OpARMMOVHUreg:
|
|
|
|
|
a := v.Args[0]
|
2016-07-15 14:07:15 -04:00
|
|
|
for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
|
2016-06-17 10:34:06 -04:00
|
|
|
a = a.Args[0]
|
|
|
|
|
}
|
|
|
|
|
if a.Op == ssa.OpLoadReg {
|
|
|
|
|
t := a.Type
|
|
|
|
|
switch {
|
2017-04-28 00:19:49 +00:00
|
|
|
case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
|
|
|
|
|
v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
|
|
|
|
|
v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
|
|
|
|
|
v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
|
2016-06-17 10:34:06 -04:00
|
|
|
// arg is a proper-typed load, already zero/sign-extended, don't extend again
|
2016-09-16 09:36:00 -07:00
|
|
|
if v.Reg() == v.Args[0].Reg() {
|
2016-06-17 10:34:06 -04:00
|
|
|
return
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
return
|
|
|
|
|
default:
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-10-20 03:50:15 +00:00
|
|
|
if objabi.GOARM >= 6 {
|
|
|
|
|
// generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
|
|
|
|
|
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-06-17 10:34:06 -04:00
|
|
|
fallthrough
|
|
|
|
|
case ssa.OpARMMVN,
|
2016-08-30 09:12:22 -04:00
|
|
|
ssa.OpARMCLZ,
|
2017-01-24 09:48:58 +00:00
|
|
|
ssa.OpARMREV,
|
2019-02-11 09:40:02 +00:00
|
|
|
ssa.OpARMREV16,
|
2017-01-24 09:48:58 +00:00
|
|
|
ssa.OpARMRBIT,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMSQRTD,
|
2016-06-29 15:20:48 -04:00
|
|
|
ssa.OpARMNEGF,
|
|
|
|
|
ssa.OpARMNEGD,
|
2019-08-02 02:41:59 +00:00
|
|
|
ssa.OpARMABSD,
|
2016-05-31 11:27:16 -04:00
|
|
|
ssa.OpARMMOVWF,
|
|
|
|
|
ssa.OpARMMOVWD,
|
|
|
|
|
ssa.OpARMMOVFW,
|
|
|
|
|
ssa.OpARMMOVDW,
|
|
|
|
|
ssa.OpARMMOVFD,
|
|
|
|
|
ssa.OpARMMOVDF:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-31 11:27:16 -04:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-05-31 11:27:16 -04:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-31 11:27:16 -04:00
|
|
|
case ssa.OpARMMOVWUF,
|
|
|
|
|
ssa.OpARMMOVWUD,
|
|
|
|
|
ssa.OpARMMOVFWU,
|
|
|
|
|
ssa.OpARMMOVDWU:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(v.Op.Asm())
|
2016-05-31 11:27:16 -04:00
|
|
|
p.Scond = arm.C_UBIT
|
2016-05-06 10:13:31 -07:00
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMCMOVWHSconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SCOND_HS
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-06-17 10:34:06 -04:00
|
|
|
case ssa.OpARMCMOVWLSconst:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-06-17 10:34:06 -04:00
|
|
|
p.Scond = arm.C_SCOND_LS
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = v.AuxInt
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2017-04-20 07:50:17 -07:00
|
|
|
case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
|
|
|
|
|
s.Call(v)
|
|
|
|
|
case ssa.OpARMCALLudiv:
|
2017-04-21 06:50:02 -04:00
|
|
|
p := s.Prog(obj.ACALL)
|
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
|
|
|
|
p.To.Sym = gc.Udiv
|
2017-11-15 14:54:24 -08:00
|
|
|
case ssa.OpARMLoweredWB:
|
|
|
|
|
p := s.Prog(obj.ACALL)
|
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
|
|
|
|
p.To.Sym = v.Aux.(*obj.LSym)
|
2019-02-06 14:12:36 -08:00
|
|
|
case ssa.OpARMLoweredPanicBoundsA, ssa.OpARMLoweredPanicBoundsB, ssa.OpARMLoweredPanicBoundsC:
|
|
|
|
|
p := s.Prog(obj.ACALL)
|
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
|
|
|
|
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
|
|
|
|
|
s.UseArgs(8) // space used in callee args area by assembly stubs
|
|
|
|
|
case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
|
|
|
|
|
p := s.Prog(obj.ACALL)
|
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
|
|
|
|
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
|
|
|
|
|
s.UseArgs(12) // space used in callee args area by assembly stubs
|
2016-05-13 15:31:14 -04:00
|
|
|
case ssa.OpARMDUFFZERO:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.ADUFFZERO)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
2017-02-06 14:46:48 -08:00
|
|
|
p.To.Sym = gc.Duffzero
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Offset = v.AuxInt
|
|
|
|
|
case ssa.OpARMDUFFCOPY:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.ADUFFCOPY)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
2017-02-06 14:46:48 -08:00
|
|
|
p.To.Sym = gc.Duffcopy
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Offset = v.AuxInt
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMLoweredNilCheck:
|
|
|
|
|
// Issue a load which will fault if arg is nil.
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVB)
|
2016-05-06 10:13:31 -07:00
|
|
|
p.From.Type = obj.TYPE_MEM
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[0].Reg()
|
2016-05-06 10:13:31 -07:00
|
|
|
gc.AddAux(&p.From, v)
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = arm.REGTMP
|
2019-10-29 14:24:43 -04:00
|
|
|
if logopt.Enabled() {
|
|
|
|
|
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
|
|
|
|
|
}
|
2020-11-19 20:49:23 -05:00
|
|
|
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
|
|
|
|
|
base.WarnfAt(v.Pos, "generated nil check")
|
2016-05-06 10:13:31 -07:00
|
|
|
}
|
2016-07-27 12:33:08 -04:00
|
|
|
case ssa.OpARMLoweredZero:
|
2016-05-13 15:31:14 -04:00
|
|
|
// MOVW.P Rarg2, 4(R1)
|
|
|
|
|
// CMP Rarg1, R1
|
2016-07-27 12:33:08 -04:00
|
|
|
// BLE -2(PC)
|
|
|
|
|
// arg1 is the address of the last element to zero
|
2016-05-13 15:31:14 -04:00
|
|
|
// arg2 is known to be zero
|
2016-07-27 12:33:08 -04:00
|
|
|
// auxint is alignment
|
|
|
|
|
var sz int64
|
|
|
|
|
var mov obj.As
|
|
|
|
|
switch {
|
|
|
|
|
case v.AuxInt%4 == 0:
|
|
|
|
|
sz = 4
|
|
|
|
|
mov = arm.AMOVW
|
|
|
|
|
case v.AuxInt%2 == 0:
|
|
|
|
|
sz = 2
|
|
|
|
|
mov = arm.AMOVH
|
|
|
|
|
default:
|
2016-06-27 16:54:57 -04:00
|
|
|
sz = 1
|
|
|
|
|
mov = arm.AMOVB
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(mov)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.Scond = arm.C_PBIT
|
|
|
|
|
p.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.From.Reg = v.Args[2].Reg()
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Reg = arm.REG_R1
|
2016-06-27 16:54:57 -04:00
|
|
|
p.To.Offset = sz
|
2017-03-20 08:01:28 -07:00
|
|
|
p2 := s.Prog(arm.ACMP)
|
2016-05-13 15:31:14 -04:00
|
|
|
p2.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p2.From.Reg = v.Args[1].Reg()
|
2016-05-13 15:31:14 -04:00
|
|
|
p2.Reg = arm.REG_R1
|
2017-03-20 08:01:28 -07:00
|
|
|
p3 := s.Prog(arm.ABLE)
|
2016-05-13 15:31:14 -04:00
|
|
|
p3.To.Type = obj.TYPE_BRANCH
|
|
|
|
|
gc.Patch(p3, p)
|
2016-07-27 12:33:08 -04:00
|
|
|
case ssa.OpARMLoweredMove:
|
2016-05-13 15:31:14 -04:00
|
|
|
// MOVW.P 4(R1), Rtmp
|
|
|
|
|
// MOVW.P Rtmp, 4(R2)
|
|
|
|
|
// CMP Rarg2, R1
|
2016-07-27 12:33:08 -04:00
|
|
|
// BLE -3(PC)
|
|
|
|
|
// arg2 is the address of the last element of src
|
|
|
|
|
// auxint is alignment
|
|
|
|
|
var sz int64
|
|
|
|
|
var mov obj.As
|
|
|
|
|
switch {
|
|
|
|
|
case v.AuxInt%4 == 0:
|
|
|
|
|
sz = 4
|
|
|
|
|
mov = arm.AMOVW
|
|
|
|
|
case v.AuxInt%2 == 0:
|
|
|
|
|
sz = 2
|
|
|
|
|
mov = arm.AMOVH
|
|
|
|
|
default:
|
2016-06-27 16:54:57 -04:00
|
|
|
sz = 1
|
|
|
|
|
mov = arm.AMOVB
|
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(mov)
|
2016-05-13 15:31:14 -04:00
|
|
|
p.Scond = arm.C_PBIT
|
|
|
|
|
p.From.Type = obj.TYPE_MEM
|
|
|
|
|
p.From.Reg = arm.REG_R1
|
2016-06-27 16:54:57 -04:00
|
|
|
p.From.Offset = sz
|
2016-05-13 15:31:14 -04:00
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = arm.REGTMP
|
2017-03-20 08:01:28 -07:00
|
|
|
p2 := s.Prog(mov)
|
2016-05-13 15:31:14 -04:00
|
|
|
p2.Scond = arm.C_PBIT
|
|
|
|
|
p2.From.Type = obj.TYPE_REG
|
|
|
|
|
p2.From.Reg = arm.REGTMP
|
|
|
|
|
p2.To.Type = obj.TYPE_MEM
|
|
|
|
|
p2.To.Reg = arm.REG_R2
|
2016-06-27 16:54:57 -04:00
|
|
|
p2.To.Offset = sz
|
2017-03-20 08:01:28 -07:00
|
|
|
p3 := s.Prog(arm.ACMP)
|
2016-05-13 15:31:14 -04:00
|
|
|
p3.From.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p3.From.Reg = v.Args[2].Reg()
|
2016-05-13 15:31:14 -04:00
|
|
|
p3.Reg = arm.REG_R1
|
2017-03-20 08:01:28 -07:00
|
|
|
p4 := s.Prog(arm.ABLE)
|
2016-05-13 15:31:14 -04:00
|
|
|
p4.To.Type = obj.TYPE_BRANCH
|
|
|
|
|
gc.Patch(p4, p)
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.OpARMEqual,
|
|
|
|
|
ssa.OpARMNotEqual,
|
|
|
|
|
ssa.OpARMLessThan,
|
|
|
|
|
ssa.OpARMLessEqual,
|
|
|
|
|
ssa.OpARMGreaterThan,
|
|
|
|
|
ssa.OpARMGreaterEqual,
|
|
|
|
|
ssa.OpARMLessThanU,
|
|
|
|
|
ssa.OpARMLessEqualU,
|
|
|
|
|
ssa.OpARMGreaterThanU,
|
|
|
|
|
ssa.OpARMGreaterEqualU:
|
2016-05-13 11:25:07 -04:00
|
|
|
// generate boolean values
|
|
|
|
|
// use conditional move
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.AMOVW)
|
2016-05-13 11:25:07 -04:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = 0
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2017-03-20 08:01:28 -07:00
|
|
|
p = s.Prog(arm.AMOVW)
|
2016-05-13 11:25:07 -04:00
|
|
|
p.Scond = condBits[v.Op]
|
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = 1
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
2016-09-16 09:36:00 -07:00
|
|
|
p.To.Reg = v.Reg()
|
2016-05-25 09:49:28 -04:00
|
|
|
case ssa.OpARMLoweredGetClosurePtr:
|
2016-07-03 13:40:03 -07:00
|
|
|
// Closure pointer is R7 (arm.REGCTXT).
|
|
|
|
|
gc.CheckLoweredGetClosurePtr(v)
|
2017-10-09 15:33:29 -04:00
|
|
|
case ssa.OpARMLoweredGetCallerSP:
|
|
|
|
|
// caller's SP is FixedFrameSize below the address of the first arg
|
|
|
|
|
p := s.Prog(arm.AMOVW)
|
|
|
|
|
p.From.Type = obj.TYPE_ADDR
|
2020-11-19 20:49:23 -05:00
|
|
|
p.From.Offset = -base.Ctxt.FixedFrameSize()
|
2017-10-09 15:33:29 -04:00
|
|
|
p.From.Name = obj.NAME_PARAM
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = v.Reg()
|
2018-05-02 14:25:00 +08:00
|
|
|
case ssa.OpARMLoweredGetCallerPC:
|
|
|
|
|
p := s.Prog(obj.AGETCALLERPC)
|
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
|
p.To.Reg = v.Reg()
|
2020-06-15 14:43:02 -07:00
|
|
|
case ssa.OpARMFlagConstant:
|
|
|
|
|
v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
|
2016-06-13 16:49:09 -04:00
|
|
|
case ssa.OpARMInvertFlags:
|
|
|
|
|
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
|
2016-06-08 22:02:08 -07:00
|
|
|
case ssa.OpClobber:
|
|
|
|
|
// TODO: implement for clobberdead experiment. Nop is ok for now.
|
2016-03-21 22:57:26 -07:00
|
|
|
default:
|
2016-09-14 10:01:05 -07:00
|
|
|
v.Fatalf("genValue not implemented: %s", v.LongString())
|
2016-03-21 22:57:26 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-13 11:25:07 -04:00
|
|
|
var condBits = map[ssa.Op]uint8{
|
|
|
|
|
ssa.OpARMEqual: arm.C_SCOND_EQ,
|
|
|
|
|
ssa.OpARMNotEqual: arm.C_SCOND_NE,
|
|
|
|
|
ssa.OpARMLessThan: arm.C_SCOND_LT,
|
|
|
|
|
ssa.OpARMLessThanU: arm.C_SCOND_LO,
|
|
|
|
|
ssa.OpARMLessEqual: arm.C_SCOND_LE,
|
|
|
|
|
ssa.OpARMLessEqualU: arm.C_SCOND_LS,
|
|
|
|
|
ssa.OpARMGreaterThan: arm.C_SCOND_GT,
|
|
|
|
|
ssa.OpARMGreaterThanU: arm.C_SCOND_HI,
|
|
|
|
|
ssa.OpARMGreaterEqual: arm.C_SCOND_GE,
|
|
|
|
|
ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 10:13:31 -07:00
|
|
|
var blockJump = map[ssa.BlockKind]struct {
|
|
|
|
|
asm, invasm obj.As
|
|
|
|
|
}{
|
2020-06-01 11:01:14 +00:00
|
|
|
ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE},
|
|
|
|
|
ssa.BlockARMNE: {arm.ABNE, arm.ABEQ},
|
|
|
|
|
ssa.BlockARMLT: {arm.ABLT, arm.ABGE},
|
|
|
|
|
ssa.BlockARMGE: {arm.ABGE, arm.ABLT},
|
|
|
|
|
ssa.BlockARMLE: {arm.ABLE, arm.ABGT},
|
|
|
|
|
ssa.BlockARMGT: {arm.ABGT, arm.ABLE},
|
|
|
|
|
ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
|
|
|
|
|
ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
|
|
|
|
|
ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
|
|
|
|
|
ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
|
|
|
|
|
ssa.BlockARMLTnoov: {arm.ABMI, arm.ABPL},
|
|
|
|
|
ssa.BlockARMGEnoov: {arm.ABPL, arm.ABMI},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// To model a 'LEnoov' ('<=' without overflow checking) branching
|
|
|
|
|
var leJumps = [2][2]gc.IndexJump{
|
|
|
|
|
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
|
|
|
|
|
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// To model a 'GTnoov' ('>' without overflow checking) branching
|
|
|
|
|
var gtJumps = [2][2]gc.IndexJump{
|
|
|
|
|
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
|
|
|
|
|
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
|
2016-05-06 10:13:31 -07:00
|
|
|
}
|
|
|
|
|
|
2016-03-21 22:57:26 -07:00
|
|
|
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
|
|
|
|
|
switch b.Kind {
|
2016-09-13 17:01:01 -07:00
|
|
|
case ssa.BlockPlain:
|
2016-04-28 16:52:47 -07:00
|
|
|
if b.Succs[0].Block() != next {
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.AJMP)
|
2016-03-21 22:57:26 -07:00
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
2016-04-28 16:52:47 -07:00
|
|
|
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
2016-03-21 22:57:26 -07:00
|
|
|
}
|
2016-05-06 10:13:31 -07:00
|
|
|
|
2016-05-15 00:12:56 -04:00
|
|
|
case ssa.BlockDefer:
|
|
|
|
|
// defer returns in R0:
|
|
|
|
|
// 0 if we should continue executing
|
|
|
|
|
// 1 if we should jump to deferreturn call
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(arm.ACMP)
|
2016-05-15 00:12:56 -04:00
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
|
p.From.Offset = 0
|
|
|
|
|
p.Reg = arm.REG_R0
|
2017-03-20 08:01:28 -07:00
|
|
|
p = s.Prog(arm.ABNE)
|
2016-05-15 00:12:56 -04:00
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
|
|
|
|
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
|
|
|
|
|
if b.Succs[0].Block() != next {
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.AJMP)
|
2016-05-15 00:12:56 -04:00
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
|
|
|
|
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-13 15:22:56 -04:00
|
|
|
case ssa.BlockExit:
|
|
|
|
|
|
2016-03-21 22:57:26 -07:00
|
|
|
case ssa.BlockRet:
|
2017-03-20 08:01:28 -07:00
|
|
|
s.Prog(obj.ARET)
|
2016-05-06 10:13:31 -07:00
|
|
|
|
2016-05-15 00:12:56 -04:00
|
|
|
case ssa.BlockRetJmp:
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.ARET)
|
2016-05-15 00:12:56 -04:00
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
2017-02-06 13:30:40 -08:00
|
|
|
p.To.Sym = b.Aux.(*obj.LSym)
|
2016-05-15 00:12:56 -04:00
|
|
|
|
2016-05-06 10:13:31 -07:00
|
|
|
case ssa.BlockARMEQ, ssa.BlockARMNE,
|
|
|
|
|
ssa.BlockARMLT, ssa.BlockARMGE,
|
|
|
|
|
ssa.BlockARMLE, ssa.BlockARMGT,
|
|
|
|
|
ssa.BlockARMULT, ssa.BlockARMUGT,
|
2020-06-01 11:01:14 +00:00
|
|
|
ssa.BlockARMULE, ssa.BlockARMUGE,
|
|
|
|
|
ssa.BlockARMLTnoov, ssa.BlockARMGEnoov:
|
2016-05-06 10:13:31 -07:00
|
|
|
jmp := blockJump[b.Kind]
|
|
|
|
|
switch next {
|
|
|
|
|
case b.Succs[0].Block():
|
2018-04-05 16:14:42 -04:00
|
|
|
s.Br(jmp.invasm, b.Succs[1].Block())
|
2016-05-06 10:13:31 -07:00
|
|
|
case b.Succs[1].Block():
|
2018-04-05 16:14:42 -04:00
|
|
|
s.Br(jmp.asm, b.Succs[0].Block())
|
2016-05-06 10:13:31 -07:00
|
|
|
default:
|
2018-04-05 16:14:42 -04:00
|
|
|
if b.Likely != ssa.BranchUnlikely {
|
|
|
|
|
s.Br(jmp.asm, b.Succs[0].Block())
|
|
|
|
|
s.Br(obj.AJMP, b.Succs[1].Block())
|
|
|
|
|
} else {
|
|
|
|
|
s.Br(jmp.invasm, b.Succs[1].Block())
|
|
|
|
|
s.Br(obj.AJMP, b.Succs[0].Block())
|
|
|
|
|
}
|
2016-05-06 10:13:31 -07:00
|
|
|
}
|
|
|
|
|
|
2020-06-01 11:01:14 +00:00
|
|
|
case ssa.BlockARMLEnoov:
|
|
|
|
|
s.CombJump(b, next, &leJumps)
|
|
|
|
|
|
|
|
|
|
case ssa.BlockARMGTnoov:
|
|
|
|
|
s.CombJump(b, next, >Jumps)
|
|
|
|
|
|
2016-05-06 10:13:31 -07:00
|
|
|
default:
|
2019-08-12 20:19:58 +01:00
|
|
|
b.Fatalf("branch not implemented: %s", b.LongString())
|
2016-03-21 22:57:26 -07:00
|
|
|
}
|
|
|
|
|
}
|