mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
cmd/compile/internal/ssa: more constant folding rules for ARM
(ADDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) -> (SUBconst [int64(int32(-c))] x) (SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) -> (ADDconst [int64(int32(-c))] x) Currently a = a + 0xfffffff1 is compiled to (variable a is in R0) MVN $14, R11 ADD R11, R0, R0 After applying the above 2 rules, it becomes SUB $15, R0, R0 (BICconst [c] (BICconst [d] x)) -> (BICconst [int64(int32(c|d))] x) This rule also optimizes the generated ARM code. The other rules are added to avoid to generate less optimized ARM code when substitutions ADD->SUB happen. Change-Id: I3ead9aae2b446b674e2ab42d37259d38ceb93a4d Reviewed-on: https://go-review.googlesource.com/41679 Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
parent
c4335f81a2
commit
38fbada557
3 changed files with 340 additions and 0 deletions
|
|
@ -624,3 +624,15 @@ func reciprocalExact32(c float32) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// check if an immediate can be directly encoded into an ARM's instruction
|
||||
func isARMImmRot(v uint32) bool {
|
||||
for i := 0; i < 16; i++ {
|
||||
if v&^0xff == 0 {
|
||||
return true
|
||||
}
|
||||
v = v<<2 | v>>30
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue