mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
cmd/compile: simplify naming for arm64 bitfield accessors
They are already methods on an arm64-specific type, so they don't need to have arm64-specific names. Change-Id: I2be29907f9892891d88d52cced043ca248aa4e08 Reviewed-on: https://go-review.googlesource.com/c/go/+/615196 Auto-Submit: Keith Randall <khr@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com> Reviewed-by: Keith Randall <khr@google.com>
This commit is contained in:
parent
1e5a72e980
commit
b92f3f29c1
4 changed files with 142 additions and 142 deletions
|
|
@ -1779,24 +1779,24 @@
|
|||
(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
|
||||
(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
|
||||
// merge sbfx and sign-extension into sbfx
|
||||
(MOVWreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (SBFX [bfc] x)
|
||||
(MOVHreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (SBFX [bfc] x)
|
||||
(MOVBreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (SBFX [bfc] x)
|
||||
(MOVWreg (SBFX [bfc] x)) && bfc.width() <= 32 => (SBFX [bfc] x)
|
||||
(MOVHreg (SBFX [bfc] x)) && bfc.width() <= 16 => (SBFX [bfc] x)
|
||||
(MOVBreg (SBFX [bfc] x)) && bfc.width() <= 8 => (SBFX [bfc] x)
|
||||
|
||||
// sbfiz/sbfx combinations: merge shifts into bitfield ops
|
||||
(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
|
||||
=> (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
|
||||
(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
|
||||
&& sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
|
||||
=> (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
|
||||
(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.lsb()
|
||||
=> (SBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
|
||||
(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.lsb()
|
||||
&& sc < bfc.lsb()+bfc.width()
|
||||
=> (SBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
|
||||
(SBFX [bfc] s:(SLLconst [sc] x))
|
||||
&& s.Uses == 1
|
||||
&& sc <= bfc.getARM64BFlsb()
|
||||
=> (SBFX [armBFAuxInt(bfc.getARM64BFlsb() - sc, bfc.getARM64BFwidth())] x)
|
||||
&& sc <= bfc.lsb()
|
||||
=> (SBFX [armBFAuxInt(bfc.lsb() - sc, bfc.width())] x)
|
||||
(SBFX [bfc] s:(SLLconst [sc] x))
|
||||
&& s.Uses == 1
|
||||
&& sc > bfc.getARM64BFlsb()
|
||||
=> (SBFIZ [armBFAuxInt(sc - bfc.getARM64BFlsb(), bfc.getARM64BFwidth() - (sc-bfc.getARM64BFlsb()))] x)
|
||||
&& sc > bfc.lsb()
|
||||
=> (SBFIZ [armBFAuxInt(sc - bfc.lsb(), bfc.width() - (sc-bfc.lsb()))] x)
|
||||
|
||||
// ubfiz
|
||||
// (x << lc) >> rc
|
||||
|
|
@ -1838,57 +1838,57 @@
|
|||
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
|
||||
// merge ANDconst and ubfx into ubfx
|
||||
(ANDconst [c] (UBFX [bfc] x)) && isARM64BFMask(0, c, 0) =>
|
||||
(UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x)
|
||||
(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0) =>
|
||||
(UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0)))] x)
|
||||
(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.lsb() + bfc.width() <= arm64BFWidth(c, 0) =>
|
||||
(UBFX [bfc] x)
|
||||
// merge ubfx and zero-extension into ubfx
|
||||
(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x)
|
||||
(MOVHUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (UBFX [bfc] x)
|
||||
(MOVBUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (UBFX [bfc] x)
|
||||
(MOVWUreg (UBFX [bfc] x)) && bfc.width() <= 32 => (UBFX [bfc] x)
|
||||
(MOVHUreg (UBFX [bfc] x)) && bfc.width() <= 16 => (UBFX [bfc] x)
|
||||
(MOVBUreg (UBFX [bfc] x)) && bfc.width() <= 8 => (UBFX [bfc] x)
|
||||
|
||||
// Extracting bits from across a zero-extension boundary.
|
||||
(UBFX [bfc] e:(MOVWUreg x))
|
||||
&& e.Uses == 1
|
||||
&& bfc.getARM64BFlsb() < 32
|
||||
=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb()))] x)
|
||||
&& bfc.lsb() < 32
|
||||
=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb()))] x)
|
||||
(UBFX [bfc] e:(MOVHUreg x))
|
||||
&& e.Uses == 1
|
||||
&& bfc.getARM64BFlsb() < 16
|
||||
=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb()))] x)
|
||||
&& bfc.lsb() < 16
|
||||
=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb()))] x)
|
||||
(UBFX [bfc] e:(MOVBUreg x))
|
||||
&& e.Uses == 1
|
||||
&& bfc.getARM64BFlsb() < 8
|
||||
=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb()))] x)
|
||||
&& bfc.lsb() < 8
|
||||
=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb()))] x)
|
||||
|
||||
// ubfiz/ubfx combinations: merge shifts into bitfield ops
|
||||
(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
|
||||
=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
|
||||
(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
|
||||
=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
|
||||
(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
|
||||
=> (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
|
||||
(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth()
|
||||
=> (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
|
||||
(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.width()
|
||||
=> (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
|
||||
(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.width()+bfc.lsb() < 64
|
||||
=> (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
|
||||
(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.width()+bfc.lsb() < 64
|
||||
=> (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
|
||||
(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.width()
|
||||
=> (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
|
||||
// ((x << c1) >> c2) >> c3
|
||||
(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb()
|
||||
=> (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
|
||||
(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
|
||||
=> (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
|
||||
(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb()
|
||||
&& sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
|
||||
=> (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
|
||||
(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.lsb()
|
||||
=> (ANDconst [1<<uint(bfc.width())-1] x)
|
||||
(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.lsb()
|
||||
=> (UBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
|
||||
(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.lsb()
|
||||
&& sc < bfc.lsb()+bfc.width()
|
||||
=> (UBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
|
||||
// ((x << c1) << c2) >> c3
|
||||
(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb()
|
||||
=> (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
|
||||
(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFlsb()
|
||||
=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
|
||||
(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb()
|
||||
&& sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
|
||||
=> (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
|
||||
(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.lsb()
|
||||
=> (ANDconst [1<<uint(bfc.width())-1] x)
|
||||
(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.lsb()
|
||||
=> (UBFX [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
|
||||
(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.lsb()
|
||||
&& sc < bfc.lsb()+bfc.width()
|
||||
=> (UBFIZ [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
|
||||
|
||||
// bfi
|
||||
(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
|
||||
&& ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
|
||||
&& ac == ^((1<<uint(bfc.width())-1) << uint(bfc.lsb()))
|
||||
=> (BFI [bfc] y x)
|
||||
(ORshiftLL [s] (ANDconst [xc] x) (ANDconst [yc] y))
|
||||
&& xc == ^(yc << s) // opposite masks
|
||||
|
|
@ -1900,9 +1900,9 @@
|
|||
&& lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
|
||||
=> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
|
||||
// bfxil
|
||||
(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
|
||||
(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.width())-1)
|
||||
=> (BFXIL [bfc] y x)
|
||||
(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth()
|
||||
(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.width()
|
||||
=> (BFXIL [bfc] y x)
|
||||
(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
|
||||
=> (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
|
||||
|
|
|
|||
|
|
@ -1808,12 +1808,12 @@ func armBFAuxInt(lsb, width int64) arm64BitField {
|
|||
}
|
||||
|
||||
// returns the lsb part of the auxInt field of arm64 bitfield ops.
|
||||
func (bfc arm64BitField) getARM64BFlsb() int64 {
|
||||
func (bfc arm64BitField) lsb() int64 {
|
||||
return int64(uint64(bfc) >> 8)
|
||||
}
|
||||
|
||||
// returns the width part of the auxInt field of arm64 bitfield ops.
|
||||
func (bfc arm64BitField) getARM64BFwidth() int64 {
|
||||
func (bfc arm64BitField) width() int64 {
|
||||
return int64(bfc) & 0xff
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2010,7 +2010,7 @@ func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
|
|||
}
|
||||
// match: (ANDconst [c] (UBFX [bfc] x))
|
||||
// cond: isARM64BFMask(0, c, 0)
|
||||
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x)
|
||||
// result: (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0)))] x)
|
||||
for {
|
||||
c := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpARM64UBFX {
|
||||
|
|
@ -2022,7 +2022,7 @@ func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0))))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0))))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
|
|
@ -8414,7 +8414,7 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (MOVBUreg (UBFX [bfc] x))
|
||||
// cond: bfc.getARM64BFwidth() <= 8
|
||||
// cond: bfc.width() <= 8
|
||||
// result: (UBFX [bfc] x)
|
||||
for {
|
||||
if v_0.Op != OpARM64UBFX {
|
||||
|
|
@ -8422,7 +8422,7 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(bfc.getARM64BFwidth() <= 8) {
|
||||
if !(bfc.width() <= 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
|
|
@ -8642,7 +8642,7 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (MOVBreg (SBFX [bfc] x))
|
||||
// cond: bfc.getARM64BFwidth() <= 8
|
||||
// cond: bfc.width() <= 8
|
||||
// result: (SBFX [bfc] x)
|
||||
for {
|
||||
if v_0.Op != OpARM64SBFX {
|
||||
|
|
@ -8650,7 +8650,7 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(bfc.getARM64BFwidth() <= 8) {
|
||||
if !(bfc.width() <= 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64SBFX)
|
||||
|
|
@ -10240,7 +10240,7 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (MOVHUreg (UBFX [bfc] x))
|
||||
// cond: bfc.getARM64BFwidth() <= 16
|
||||
// cond: bfc.width() <= 16
|
||||
// result: (UBFX [bfc] x)
|
||||
for {
|
||||
if v_0.Op != OpARM64UBFX {
|
||||
|
|
@ -10248,7 +10248,7 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(bfc.getARM64BFwidth() <= 16) {
|
||||
if !(bfc.width() <= 16) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
|
|
@ -10574,7 +10574,7 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (MOVHreg (SBFX [bfc] x))
|
||||
// cond: bfc.getARM64BFwidth() <= 16
|
||||
// cond: bfc.width() <= 16
|
||||
// result: (SBFX [bfc] x)
|
||||
for {
|
||||
if v_0.Op != OpARM64SBFX {
|
||||
|
|
@ -10582,7 +10582,7 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(bfc.getARM64BFwidth() <= 16) {
|
||||
if !(bfc.width() <= 16) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64SBFX)
|
||||
|
|
@ -11655,7 +11655,7 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (MOVWUreg (UBFX [bfc] x))
|
||||
// cond: bfc.getARM64BFwidth() <= 32
|
||||
// cond: bfc.width() <= 32
|
||||
// result: (UBFX [bfc] x)
|
||||
for {
|
||||
if v_0.Op != OpARM64UBFX {
|
||||
|
|
@ -11663,7 +11663,7 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(bfc.getARM64BFwidth() <= 32) {
|
||||
if !(bfc.width() <= 32) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
|
|
@ -11970,7 +11970,7 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (MOVWreg (SBFX [bfc] x))
|
||||
// cond: bfc.getARM64BFwidth() <= 32
|
||||
// cond: bfc.width() <= 32
|
||||
// result: (SBFX [bfc] x)
|
||||
for {
|
||||
if v_0.Op != OpARM64SBFX {
|
||||
|
|
@ -11978,7 +11978,7 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(bfc.getARM64BFwidth() <= 32) {
|
||||
if !(bfc.width() <= 32) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64SBFX)
|
||||
|
|
@ -14671,7 +14671,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
|
|||
break
|
||||
}
|
||||
// match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
|
||||
// cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
|
||||
// cond: ac == ^((1<<uint(bfc.width())-1) << uint(bfc.lsb()))
|
||||
// result: (BFI [bfc] y x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
|
|
@ -14685,7 +14685,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
|
|||
}
|
||||
ac := auxIntToInt64(v_1.AuxInt)
|
||||
y := v_1.Args[0]
|
||||
if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) {
|
||||
if !(ac == ^((1<<uint(bfc.width()) - 1) << uint(bfc.lsb()))) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpARM64BFI)
|
||||
|
|
@ -14696,7 +14696,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
|
|||
break
|
||||
}
|
||||
// match: (OR (UBFX [bfc] x) (ANDconst [ac] y))
|
||||
// cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
|
||||
// cond: ac == ^(1<<uint(bfc.width())-1)
|
||||
// result: (BFXIL [bfc] y x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
|
|
@ -14710,7 +14710,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
|
|||
}
|
||||
ac := auxIntToInt64(v_1.AuxInt)
|
||||
y := v_1.Args[0]
|
||||
if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) {
|
||||
if !(ac == ^(1<<uint(bfc.width()) - 1)) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpARM64BFXIL)
|
||||
|
|
@ -15229,7 +15229,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
|
||||
// cond: sc == bfc.getARM64BFwidth()
|
||||
// cond: sc == bfc.width()
|
||||
// result: (BFXIL [bfc] y x)
|
||||
for {
|
||||
sc := auxIntToInt64(v.AuxInt)
|
||||
|
|
@ -15242,7 +15242,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
|
|||
break
|
||||
}
|
||||
y := v_1.Args[0]
|
||||
if !(sc == bfc.getARM64BFwidth()) {
|
||||
if !(sc == bfc.width()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64BFXIL)
|
||||
|
|
@ -15574,8 +15574,8 @@ func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool {
|
|||
func rewriteValueARM64_OpARM64SBFX(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (SBFX [bfc] s:(SLLconst [sc] x))
|
||||
// cond: s.Uses == 1 && sc <= bfc.getARM64BFlsb()
|
||||
// result: (SBFX [armBFAuxInt(bfc.getARM64BFlsb() - sc, bfc.getARM64BFwidth())] x)
|
||||
// cond: s.Uses == 1 && sc <= bfc.lsb()
|
||||
// result: (SBFX [armBFAuxInt(bfc.lsb() - sc, bfc.width())] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
s := v_0
|
||||
|
|
@ -15584,17 +15584,17 @@ func rewriteValueARM64_OpARM64SBFX(v *Value) bool {
|
|||
}
|
||||
sc := auxIntToInt64(s.AuxInt)
|
||||
x := s.Args[0]
|
||||
if !(s.Uses == 1 && sc <= bfc.getARM64BFlsb()) {
|
||||
if !(s.Uses == 1 && sc <= bfc.lsb()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64SBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()-sc, bfc.width()))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (SBFX [bfc] s:(SLLconst [sc] x))
|
||||
// cond: s.Uses == 1 && sc > bfc.getARM64BFlsb()
|
||||
// result: (SBFIZ [armBFAuxInt(sc - bfc.getARM64BFlsb(), bfc.getARM64BFwidth() - (sc-bfc.getARM64BFlsb()))] x)
|
||||
// cond: s.Uses == 1 && sc > bfc.lsb()
|
||||
// result: (SBFIZ [armBFAuxInt(sc - bfc.lsb(), bfc.width() - (sc-bfc.lsb()))] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
s := v_0
|
||||
|
|
@ -15603,11 +15603,11 @@ func rewriteValueARM64_OpARM64SBFX(v *Value) bool {
|
|||
}
|
||||
sc := auxIntToInt64(s.AuxInt)
|
||||
x := s.Args[0]
|
||||
if !(s.Uses == 1 && sc > bfc.getARM64BFlsb()) {
|
||||
if !(s.Uses == 1 && sc > bfc.lsb()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64SBFIZ)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFwidth()-(sc-bfc.getARM64BFlsb())))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.lsb(), bfc.width()-(sc-bfc.lsb())))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
|
|
@ -15771,8 +15771,8 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (SLLconst [sc] (UBFIZ [bfc] x))
|
||||
// cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
|
||||
// result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
|
||||
// cond: sc+bfc.width()+bfc.lsb() < 64
|
||||
// result: (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
|
||||
for {
|
||||
sc := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpARM64UBFIZ {
|
||||
|
|
@ -15780,11 +15780,11 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
|
||||
if !(sc+bfc.width()+bfc.lsb() < 64) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFIZ)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()+sc, bfc.width()))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
|
|
@ -15922,8 +15922,8 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (SRAconst [sc] (SBFIZ [bfc] x))
|
||||
// cond: sc < bfc.getARM64BFlsb()
|
||||
// result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
|
||||
// cond: sc < bfc.lsb()
|
||||
// result: (SBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
|
||||
for {
|
||||
sc := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpARM64SBFIZ {
|
||||
|
|
@ -15931,17 +15931,17 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc < bfc.getARM64BFlsb()) {
|
||||
if !(sc < bfc.lsb()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64SBFIZ)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()-sc, bfc.width()))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (SRAconst [sc] (SBFIZ [bfc] x))
|
||||
// cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
|
||||
// result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
|
||||
// cond: sc >= bfc.lsb() && sc < bfc.lsb()+bfc.width()
|
||||
// result: (SBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
|
||||
for {
|
||||
sc := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpARM64SBFIZ {
|
||||
|
|
@ -15949,11 +15949,11 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
|
||||
if !(sc >= bfc.lsb() && sc < bfc.lsb()+bfc.width()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64SBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
|
|
@ -16171,8 +16171,8 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (SRLconst [sc] (UBFX [bfc] x))
|
||||
// cond: sc < bfc.getARM64BFwidth()
|
||||
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
|
||||
// cond: sc < bfc.width()
|
||||
// result: (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
|
||||
for {
|
||||
sc := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpARM64UBFX {
|
||||
|
|
@ -16180,17 +16180,17 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc < bfc.getARM64BFwidth()) {
|
||||
if !(sc < bfc.width()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (SRLconst [sc] (UBFIZ [bfc] x))
|
||||
// cond: sc == bfc.getARM64BFlsb()
|
||||
// result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
|
||||
// cond: sc == bfc.lsb()
|
||||
// result: (ANDconst [1<<uint(bfc.width())-1] x)
|
||||
for {
|
||||
sc := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpARM64UBFIZ {
|
||||
|
|
@ -16198,17 +16198,17 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc == bfc.getARM64BFlsb()) {
|
||||
if !(sc == bfc.lsb()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
|
||||
v.AuxInt = int64ToAuxInt(1<<uint(bfc.width()) - 1)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (SRLconst [sc] (UBFIZ [bfc] x))
|
||||
// cond: sc < bfc.getARM64BFlsb()
|
||||
// result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
|
||||
// cond: sc < bfc.lsb()
|
||||
// result: (UBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
|
||||
for {
|
||||
sc := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpARM64UBFIZ {
|
||||
|
|
@ -16216,17 +16216,17 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc < bfc.getARM64BFlsb()) {
|
||||
if !(sc < bfc.lsb()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFIZ)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()-sc, bfc.width()))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (SRLconst [sc] (UBFIZ [bfc] x))
|
||||
// cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
|
||||
// result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
|
||||
// cond: sc > bfc.lsb() && sc < bfc.lsb()+bfc.width()
|
||||
// result: (UBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
|
||||
for {
|
||||
sc := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpARM64UBFIZ {
|
||||
|
|
@ -16234,11 +16234,11 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
|
|||
}
|
||||
bfc := auxIntToArm64BitField(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
|
||||
if !(sc > bfc.lsb() && sc < bfc.lsb()+bfc.width()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
|
|
@ -16974,8 +16974,8 @@ func rewriteValueARM64_OpARM64TSTshiftRO(v *Value) bool {
|
|||
func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (UBFIZ [bfc] (SLLconst [sc] x))
|
||||
// cond: sc < bfc.getARM64BFwidth()
|
||||
// result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
|
||||
// cond: sc < bfc.width()
|
||||
// result: (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
if v_0.Op != OpARM64SLLconst {
|
||||
|
|
@ -16983,11 +16983,11 @@ func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
|
|||
}
|
||||
sc := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc < bfc.getARM64BFwidth()) {
|
||||
if !(sc < bfc.width()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFIZ)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
|
|
@ -16996,7 +16996,7 @@ func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
|
|||
func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (UBFX [bfc] (ANDconst [c] x))
|
||||
// cond: isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0)
|
||||
// cond: isARM64BFMask(0, c, 0) && bfc.lsb() + bfc.width() <= arm64BFWidth(c, 0)
|
||||
// result: (UBFX [bfc] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
|
|
@ -17005,7 +17005,7 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb()+bfc.getARM64BFwidth() <= arm64BFWidth(c, 0)) {
|
||||
if !(isARM64BFMask(0, c, 0) && bfc.lsb()+bfc.width() <= arm64BFWidth(c, 0)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
|
|
@ -17014,8 +17014,8 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (UBFX [bfc] e:(MOVWUreg x))
|
||||
// cond: e.Uses == 1 && bfc.getARM64BFlsb() < 32
|
||||
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb()))] x)
|
||||
// cond: e.Uses == 1 && bfc.lsb() < 32
|
||||
// result: (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb()))] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
e := v_0
|
||||
|
|
@ -17023,17 +17023,17 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
break
|
||||
}
|
||||
x := e.Args[0]
|
||||
if !(e.Uses == 1 && bfc.getARM64BFlsb() < 32) {
|
||||
if !(e.Uses == 1 && bfc.lsb() < 32) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb())))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb())))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (UBFX [bfc] e:(MOVHUreg x))
|
||||
// cond: e.Uses == 1 && bfc.getARM64BFlsb() < 16
|
||||
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb()))] x)
|
||||
// cond: e.Uses == 1 && bfc.lsb() < 16
|
||||
// result: (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb()))] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
e := v_0
|
||||
|
|
@ -17041,17 +17041,17 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
break
|
||||
}
|
||||
x := e.Args[0]
|
||||
if !(e.Uses == 1 && bfc.getARM64BFlsb() < 16) {
|
||||
if !(e.Uses == 1 && bfc.lsb() < 16) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb())))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb())))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (UBFX [bfc] e:(MOVBUreg x))
|
||||
// cond: e.Uses == 1 && bfc.getARM64BFlsb() < 8
|
||||
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb()))] x)
|
||||
// cond: e.Uses == 1 && bfc.lsb() < 8
|
||||
// result: (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb()))] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
e := v_0
|
||||
|
|
@ -17059,17 +17059,17 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
break
|
||||
}
|
||||
x := e.Args[0]
|
||||
if !(e.Uses == 1 && bfc.getARM64BFlsb() < 8) {
|
||||
if !(e.Uses == 1 && bfc.lsb() < 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb())))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb())))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (UBFX [bfc] (SRLconst [sc] x))
|
||||
// cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
|
||||
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
|
||||
// cond: sc+bfc.width()+bfc.lsb() < 64
|
||||
// result: (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
if v_0.Op != OpARM64SRLconst {
|
||||
|
|
@ -17077,17 +17077,17 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
}
|
||||
sc := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
|
||||
if !(sc+bfc.width()+bfc.lsb() < 64) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()+sc, bfc.width()))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (UBFX [bfc] (SLLconst [sc] x))
|
||||
// cond: sc == bfc.getARM64BFlsb()
|
||||
// result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
|
||||
// cond: sc == bfc.lsb()
|
||||
// result: (ANDconst [1<<uint(bfc.width())-1] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
if v_0.Op != OpARM64SLLconst {
|
||||
|
|
@ -17095,17 +17095,17 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
}
|
||||
sc := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc == bfc.getARM64BFlsb()) {
|
||||
if !(sc == bfc.lsb()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
|
||||
v.AuxInt = int64ToAuxInt(1<<uint(bfc.width()) - 1)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (UBFX [bfc] (SLLconst [sc] x))
|
||||
// cond: sc < bfc.getARM64BFlsb()
|
||||
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
|
||||
// cond: sc < bfc.lsb()
|
||||
// result: (UBFX [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
if v_0.Op != OpARM64SLLconst {
|
||||
|
|
@ -17113,17 +17113,17 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
}
|
||||
sc := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc < bfc.getARM64BFlsb()) {
|
||||
if !(sc < bfc.lsb()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFX)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()-sc, bfc.width()))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (UBFX [bfc] (SLLconst [sc] x))
|
||||
// cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
|
||||
// result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
|
||||
// cond: sc > bfc.lsb() && sc < bfc.lsb()+bfc.width()
|
||||
// result: (UBFIZ [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
|
||||
for {
|
||||
bfc := auxIntToArm64BitField(v.AuxInt)
|
||||
if v_0.Op != OpARM64SLLconst {
|
||||
|
|
@ -17131,11 +17131,11 @@ func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
|
|||
}
|
||||
sc := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
|
||||
if !(sc > bfc.lsb() && sc < bfc.lsb()+bfc.width()) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64UBFIZ)
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
|
||||
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -200,8 +200,8 @@ func (v *Value) auxString() string {
|
|||
case auxUInt8:
|
||||
return fmt.Sprintf(" [%d]", v.AuxUInt8())
|
||||
case auxARM64BitField:
|
||||
lsb := v.AuxArm64BitField().getARM64BFlsb()
|
||||
width := v.AuxArm64BitField().getARM64BFwidth()
|
||||
lsb := v.AuxArm64BitField().lsb()
|
||||
width := v.AuxArm64BitField().width()
|
||||
return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width)
|
||||
case auxFloat32, auxFloat64:
|
||||
return fmt.Sprintf(" [%g]", v.AuxFloat())
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue