go/src/cmd/compile/internal/ssa/rewriteAMD64.go

10602 lines
228 KiB
Go
Raw Normal View History

// autogenerated from gen/AMD64.rules: do not edit!
// generated with: cd gen; go run *.go
package ssa
func rewriteValueAMD64(v *Value, config *Config) bool {
b := v.Block
switch v.Op {
case OpAMD64ADDB:
// match: (ADDB x (MOVBconst [c]))
// cond:
// result: (ADDBconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto endab690db69bfd8192eea57a2f9f76bf84
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ADDBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endab690db69bfd8192eea57a2f9f76bf84
endab690db69bfd8192eea57a2f9f76bf84:
;
// match: (ADDB (MOVBconst [c]) x)
// cond:
// result: (ADDBconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto end28aa1a4abe7e1abcdd64135e9967d39d
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ADDBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end28aa1a4abe7e1abcdd64135e9967d39d
end28aa1a4abe7e1abcdd64135e9967d39d:
;
// match: (ADDB x (NEGB y))
// cond:
// result: (SUBB x y)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64NEGB {
goto end9464509b8874ffb00b43b843da01f0bc
}
y := v.Args[1].Args[0]
v.Op = OpAMD64SUBB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end9464509b8874ffb00b43b843da01f0bc
end9464509b8874ffb00b43b843da01f0bc:
;
case OpAMD64ADDBconst:
// match: (ADDBconst [c] (MOVBconst [d]))
// cond:
// result: (MOVBconst [c+d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVBconst {
goto enda9b1e9e31ccdf0af5f4fe57bf4b1343f
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
return true
}
goto enda9b1e9e31ccdf0af5f4fe57bf4b1343f
enda9b1e9e31ccdf0af5f4fe57bf4b1343f:
;
// match: (ADDBconst [c] (ADDBconst [d] x))
// cond:
// result: (ADDBconst [c+d] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64ADDBconst {
goto end9b1e6890adbf9d9e447d591b4148cbd0
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ADDBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
v.AddArg(x)
return true
}
goto end9b1e6890adbf9d9e447d591b4148cbd0
end9b1e6890adbf9d9e447d591b4148cbd0:
;
case OpAMD64ADDL:
// match: (ADDL x (MOVLconst [c]))
// cond:
// result: (ADDLconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end8d6d3b99a7be8da6b7a254b7e709cc95
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ADDLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end8d6d3b99a7be8da6b7a254b7e709cc95
end8d6d3b99a7be8da6b7a254b7e709cc95:
;
// match: (ADDL (MOVLconst [c]) x)
// cond:
// result: (ADDLconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto end739561e08a561e26ce3634dc0d5ec733
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ADDLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end739561e08a561e26ce3634dc0d5ec733
end739561e08a561e26ce3634dc0d5ec733:
;
// match: (ADDL x (NEGL y))
// cond:
// result: (SUBL x y)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64NEGL {
goto end9596df31f2685a49df67c6fb912a521d
}
y := v.Args[1].Args[0]
v.Op = OpAMD64SUBL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end9596df31f2685a49df67c6fb912a521d
end9596df31f2685a49df67c6fb912a521d:
;
case OpAMD64ADDLconst:
// match: (ADDLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [c+d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVLconst {
goto ende04850e987890abf1d66199042a19c23
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
return true
}
goto ende04850e987890abf1d66199042a19c23
ende04850e987890abf1d66199042a19c23:
;
// match: (ADDLconst [c] (ADDLconst [d] x))
// cond:
// result: (ADDLconst [c+d] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64ADDLconst {
goto endf1dd8673b2fef4950aec87aa7523a236
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ADDLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
v.AddArg(x)
return true
}
goto endf1dd8673b2fef4950aec87aa7523a236
endf1dd8673b2fef4950aec87aa7523a236:
;
case OpAMD64ADDQ:
// match: (ADDQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ADDQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end1de8aeb1d043e0dadcffd169a99ce5c0
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto end1de8aeb1d043e0dadcffd169a99ce5c0
}
v.Op = OpAMD64ADDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end1de8aeb1d043e0dadcffd169a99ce5c0
end1de8aeb1d043e0dadcffd169a99ce5c0:
;
// match: (ADDQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (ADDQconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto endca635e3bdecd9e3aeb892f841021dfaa
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto endca635e3bdecd9e3aeb892f841021dfaa
}
v.Op = OpAMD64ADDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endca635e3bdecd9e3aeb892f841021dfaa
endca635e3bdecd9e3aeb892f841021dfaa:
;
// match: (ADDQ x (SHLQconst [3] y))
// cond:
// result: (LEAQ8 x y)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64SHLQconst {
goto endc02313d35a0525d1d680cd58992e820d
}
if v.Args[1].AuxInt != 3 {
goto endc02313d35a0525d1d680cd58992e820d
}
y := v.Args[1].Args[0]
v.Op = OpAMD64LEAQ8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endc02313d35a0525d1d680cd58992e820d
endc02313d35a0525d1d680cd58992e820d:
;
// match: (ADDQ x (NEGQ y))
// cond:
// result: (SUBQ x y)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64NEGQ {
goto endec8f899c6e175a0147a90750f9bfe0a2
}
y := v.Args[1].Args[0]
v.Op = OpAMD64SUBQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endec8f899c6e175a0147a90750f9bfe0a2
endec8f899c6e175a0147a90750f9bfe0a2:
;
case OpAMD64ADDQconst:
// match: (ADDQconst [c] (LEAQ8 [d] x y))
// cond:
// result: (LEAQ8 [addOff(c, d)] x y)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64LEAQ8 {
goto ende2cc681c9abf9913288803fb1b39e639
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
v.Op = OpAMD64LEAQ8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(c, d)
v.AddArg(x)
v.AddArg(y)
return true
}
goto ende2cc681c9abf9913288803fb1b39e639
ende2cc681c9abf9913288803fb1b39e639:
;
// match: (ADDQconst [0] x)
// cond:
// result: x
{
if v.AuxInt != 0 {
goto end03d9f5a3e153048b0afa781401e2a849
}
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end03d9f5a3e153048b0afa781401e2a849
end03d9f5a3e153048b0afa781401e2a849:
;
// match: (ADDQconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [c+d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto end09dc54395b4e96e8332cf8e4e7481c52
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
return true
}
goto end09dc54395b4e96e8332cf8e4e7481c52
end09dc54395b4e96e8332cf8e4e7481c52:
;
// match: (ADDQconst [c] (ADDQconst [d] x))
// cond:
// result: (ADDQconst [c+d] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64ADDQconst {
goto endd4cb539641f0dc40bfd0cb7fbb9b0405
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ADDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
v.AddArg(x)
return true
}
goto endd4cb539641f0dc40bfd0cb7fbb9b0405
endd4cb539641f0dc40bfd0cb7fbb9b0405:
;
case OpAMD64ADDW:
// match: (ADDW x (MOVWconst [c]))
// cond:
// result: (ADDWconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end1aabd2317de77c7dfc4876fd7e4c5011
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ADDWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end1aabd2317de77c7dfc4876fd7e4c5011
end1aabd2317de77c7dfc4876fd7e4c5011:
;
// match: (ADDW (MOVWconst [c]) x)
// cond:
// result: (ADDWconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto ende3aede99966f388afc624f9e86676fd2
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ADDWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto ende3aede99966f388afc624f9e86676fd2
ende3aede99966f388afc624f9e86676fd2:
;
// match: (ADDW x (NEGW y))
// cond:
// result: (SUBW x y)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64NEGW {
goto end55cf2af0d75f3ec413528eeb799e94d5
}
y := v.Args[1].Args[0]
v.Op = OpAMD64SUBW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end55cf2af0d75f3ec413528eeb799e94d5
end55cf2af0d75f3ec413528eeb799e94d5:
;
case OpAMD64ADDWconst:
// match: (ADDWconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [c+d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVWconst {
goto end32541920f2f5a920dfae41d8ebbef00f
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
return true
}
goto end32541920f2f5a920dfae41d8ebbef00f
end32541920f2f5a920dfae41d8ebbef00f:
;
// match: (ADDWconst [c] (ADDWconst [d] x))
// cond:
// result: (ADDWconst [c+d] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64ADDWconst {
goto end73944f6ddda7e4c050f11d17484ff9a5
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ADDWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
v.AddArg(x)
return true
}
goto end73944f6ddda7e4c050f11d17484ff9a5
end73944f6ddda7e4c050f11d17484ff9a5:
;
case OpAMD64ANDB:
// match: (ANDB x (MOVLconst [c]))
// cond:
// result: (ANDBconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end01100cd255396e29bfdb130f4fbc9bbc
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ANDBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end01100cd255396e29bfdb130f4fbc9bbc
end01100cd255396e29bfdb130f4fbc9bbc:
;
// match: (ANDB (MOVLconst [c]) x)
// cond:
// result: (ANDBconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto end70830ce2834dc5f8d786fa6789460926
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ANDBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end70830ce2834dc5f8d786fa6789460926
end70830ce2834dc5f8d786fa6789460926:
;
// match: (ANDB x (MOVBconst [c]))
// cond:
// result: (ANDBconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto endd275ec2e73768cb3d201478fc934e06c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ANDBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endd275ec2e73768cb3d201478fc934e06c
endd275ec2e73768cb3d201478fc934e06c:
;
// match: (ANDB (MOVBconst [c]) x)
// cond:
// result: (ANDBconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto end4068edac2ae0f354cf581db210288b98
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ANDBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end4068edac2ae0f354cf581db210288b98
end4068edac2ae0f354cf581db210288b98:
;
// match: (ANDB x x)
// cond:
// result: x
{
x := v.Args[0]
if v.Args[1] != x {
goto endb8ff272a1456513da708603abe37541c
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto endb8ff272a1456513da708603abe37541c
endb8ff272a1456513da708603abe37541c:
;
case OpAMD64ANDBconst:
// match: (ANDBconst [c] _)
// cond: int8(c)==0
// result: (MOVBconst [0])
{
c := v.AuxInt
if !(int8(c) == 0) {
goto end2106d410c949da14d7c00041f40eca76
}
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end2106d410c949da14d7c00041f40eca76
end2106d410c949da14d7c00041f40eca76:
;
// match: (ANDBconst [c] x)
// cond: int8(c)==-1
// result: x
{
c := v.AuxInt
x := v.Args[0]
if !(int8(c) == -1) {
goto enda0b78503c204c8225de1433949a71fe4
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto enda0b78503c204c8225de1433949a71fe4
enda0b78503c204c8225de1433949a71fe4:
;
// match: (ANDBconst [c] (MOVBconst [d]))
// cond:
// result: (MOVBconst [c&d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVBconst {
goto end946312b1f216933da86febe293eb956f
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & d
return true
}
goto end946312b1f216933da86febe293eb956f
end946312b1f216933da86febe293eb956f:
;
case OpAMD64ANDL:
// match: (ANDL x (MOVLconst [c]))
// cond:
// result: (ANDLconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end0a4c49d9a26759c0fd21369dafcd7abb
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ANDLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end0a4c49d9a26759c0fd21369dafcd7abb
end0a4c49d9a26759c0fd21369dafcd7abb:
;
// match: (ANDL (MOVLconst [c]) x)
// cond:
// result: (ANDLconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto end0529ba323d9b6f15c41add401ef67959
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ANDLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end0529ba323d9b6f15c41add401ef67959
end0529ba323d9b6f15c41add401ef67959:
;
// match: (ANDL x x)
// cond:
// result: x
{
x := v.Args[0]
if v.Args[1] != x {
goto enddfb08a0d0c262854db3905cb323388c7
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto enddfb08a0d0c262854db3905cb323388c7
enddfb08a0d0c262854db3905cb323388c7:
;
case OpAMD64ANDLconst:
// match: (ANDLconst [c] _)
// cond: int32(c)==0
// result: (MOVLconst [0])
{
c := v.AuxInt
if !(int32(c) == 0) {
goto end5efb241208aef28c950b7bcf8d85d5de
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end5efb241208aef28c950b7bcf8d85d5de
end5efb241208aef28c950b7bcf8d85d5de:
;
// match: (ANDLconst [c] x)
// cond: int32(c)==-1
// result: x
{
c := v.AuxInt
x := v.Args[0]
if !(int32(c) == -1) {
goto end0e852ae30bb8289d6ffee0c9267e3e0c
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end0e852ae30bb8289d6ffee0c9267e3e0c
end0e852ae30bb8289d6ffee0c9267e3e0c:
;
// match: (ANDLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [c&d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVLconst {
goto end7bfd24059369753eadd235f07e2dd7b8
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & d
return true
}
goto end7bfd24059369753eadd235f07e2dd7b8
end7bfd24059369753eadd235f07e2dd7b8:
;
case OpAMD64ANDQ:
// match: (ANDQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ANDQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end048fadc69e81103480015b84b9cafff7
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto end048fadc69e81103480015b84b9cafff7
}
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end048fadc69e81103480015b84b9cafff7
end048fadc69e81103480015b84b9cafff7:
;
// match: (ANDQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (ANDQconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto end3035a3bf650b708705fd27dd857ab0a4
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto end3035a3bf650b708705fd27dd857ab0a4
}
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end3035a3bf650b708705fd27dd857ab0a4
end3035a3bf650b708705fd27dd857ab0a4:
;
// match: (ANDQ x x)
// cond:
// result: x
{
x := v.Args[0]
if v.Args[1] != x {
goto end06b5ec19efdd4e79f03a5e4a2c3c3427
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end06b5ec19efdd4e79f03a5e4a2c3c3427
end06b5ec19efdd4e79f03a5e4a2c3c3427:
;
case OpAMD64ANDQconst:
// match: (ANDQconst [0] _)
// cond:
// result: (MOVQconst [0])
{
if v.AuxInt != 0 {
goto end57018c1d0f54fd721521095b4832bab2
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end57018c1d0f54fd721521095b4832bab2
end57018c1d0f54fd721521095b4832bab2:
;
// match: (ANDQconst [-1] x)
// cond:
// result: x
{
if v.AuxInt != -1 {
goto endb542c4b42ab94a7bedb32dec8f610d67
}
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto endb542c4b42ab94a7bedb32dec8f610d67
endb542c4b42ab94a7bedb32dec8f610d67:
;
// match: (ANDQconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [c&d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto end67ca66494705b0345a5f22c710225292
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & d
return true
}
goto end67ca66494705b0345a5f22c710225292
end67ca66494705b0345a5f22c710225292:
;
case OpAMD64ANDW:
// match: (ANDW x (MOVLconst [c]))
// cond:
// result: (ANDWconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto endce6f557823ee2fdd7a8f47b6f925fc7c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ANDWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endce6f557823ee2fdd7a8f47b6f925fc7c
endce6f557823ee2fdd7a8f47b6f925fc7c:
;
// match: (ANDW (MOVLconst [c]) x)
// cond:
// result: (ANDWconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto endc46af0d9265c08b09f1f1fba24feda80
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ANDWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endc46af0d9265c08b09f1f1fba24feda80
endc46af0d9265c08b09f1f1fba24feda80:
;
// match: (ANDW x (MOVWconst [c]))
// cond:
// result: (ANDWconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto enda77a39f65a5eb3436a5842eab69a3103
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ANDWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto enda77a39f65a5eb3436a5842eab69a3103
enda77a39f65a5eb3436a5842eab69a3103:
;
// match: (ANDW (MOVWconst [c]) x)
// cond:
// result: (ANDWconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto endea2a25eb525a5dbf6d5132d84ea4e7a5
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ANDWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endea2a25eb525a5dbf6d5132d84ea4e7a5
endea2a25eb525a5dbf6d5132d84ea4e7a5:
;
// match: (ANDW x x)
// cond:
// result: x
{
x := v.Args[0]
if v.Args[1] != x {
goto end3a26cf52dd1b77f07cc9e005760dbb11
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end3a26cf52dd1b77f07cc9e005760dbb11
end3a26cf52dd1b77f07cc9e005760dbb11:
;
case OpAMD64ANDWconst:
// match: (ANDWconst [c] _)
// cond: int16(c)==0
// result: (MOVWconst [0])
{
c := v.AuxInt
if !(int16(c) == 0) {
goto end336ece33b4f0fb44dfe1f24981df7b74
}
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end336ece33b4f0fb44dfe1f24981df7b74
end336ece33b4f0fb44dfe1f24981df7b74:
;
// match: (ANDWconst [c] x)
// cond: int16(c)==-1
// result: x
{
c := v.AuxInt
x := v.Args[0]
if !(int16(c) == -1) {
goto endfb111c3afa8c5c4040fa6000fadee810
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto endfb111c3afa8c5c4040fa6000fadee810
endfb111c3afa8c5c4040fa6000fadee810:
;
// match: (ANDWconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [c&d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVWconst {
goto end250eb27fcac10bf6c0d96ce66a21726e
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & d
return true
}
goto end250eb27fcac10bf6c0d96ce66a21726e
end250eb27fcac10bf6c0d96ce66a21726e:
;
case OpAdd16:
// match: (Add16 x y)
// cond:
// result: (ADDW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto ende604481c6de9fe4574cb2954ba2ddc67
ende604481c6de9fe4574cb2954ba2ddc67:
;
case OpAdd32:
// match: (Add32 x y)
// cond:
// result: (ADDL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto endc445ea2a65385445676cd684ae9a42b5
endc445ea2a65385445676cd684ae9a42b5:
;
case OpAdd32F:
// match: (Add32F x y)
// cond:
// result: (ADDSS x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDSS
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end5d82e1c10823774894c036b7c5b8fed4
end5d82e1c10823774894c036b7c5b8fed4:
;
case OpAdd64:
// match: (Add64 x y)
// cond:
// result: (ADDQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endd88f18b3f39e3ccc201477a616f0abc0
endd88f18b3f39e3ccc201477a616f0abc0:
;
case OpAdd64F:
// match: (Add64F x y)
// cond:
// result: (ADDSD x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDSD
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end62f2de6c70abd214e6987ee37976653a
end62f2de6c70abd214e6987ee37976653a:
;
case OpAdd8:
// match: (Add8 x y)
// cond:
// result: (ADDB x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end6117c84a6b75c1b816b3fb095bc5f656
end6117c84a6b75c1b816b3fb095bc5f656:
;
case OpAddPtr:
// match: (AddPtr x y)
// cond:
// result: (ADDQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto enda1d5640788c7157996f9d4af602dec1c
enda1d5640788c7157996f9d4af602dec1c:
;
case OpAddr:
// match: (Addr {sym} base)
// cond:
// result: (LEAQ {sym} base)
{
sym := v.Aux
base := v.Args[0]
v.Op = OpAMD64LEAQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Aux = sym
v.AddArg(base)
return true
}
goto end53cad0c3c9daa5575680e77c14e05e72
end53cad0c3c9daa5575680e77c14e05e72:
;
case OpAnd16:
// match: (And16 x y)
// cond:
// result: (ANDW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end1c01f04a173d86ce1a6d1ef59e753014
end1c01f04a173d86ce1a6d1ef59e753014:
;
case OpAnd32:
// match: (And32 x y)
// cond:
// result: (ANDL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end6b9eb9375b3a859028a6ba6bf6b8ec88
end6b9eb9375b3a859028a6ba6bf6b8ec88:
;
case OpAnd64:
// match: (And64 x y)
// cond:
// result: (ANDQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto enda0bde5853819d05fa2b7d3b723629552
enda0bde5853819d05fa2b7d3b723629552:
;
case OpAnd8:
// match: (And8 x y)
// cond:
// result: (ANDB x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end0f53bee6291f1229b43aa1b5f977b4f2
end0f53bee6291f1229b43aa1b5f977b4f2:
;
case OpAMD64CMPB:
// match: (CMPB x (MOVBconst [c]))
// cond:
// result: (CMPBconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end52190c0b8759133aa6c540944965c4c0
}
c := v.Args[1].AuxInt
v.Op = OpAMD64CMPBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end52190c0b8759133aa6c540944965c4c0
end52190c0b8759133aa6c540944965c4c0:
;
// match: (CMPB (MOVBconst [c]) x)
// cond:
// result: (InvertFlags (CMPBconst <TypeFlags> x [c]))
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto end6798593f4f9a27e90de089b3248187fd
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64InvertFlags
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto end6798593f4f9a27e90de089b3248187fd
end6798593f4f9a27e90de089b3248187fd:
;
case OpAMD64CMPL:
// match: (CMPL x (MOVLconst [c]))
// cond:
// result: (CMPLconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end49ff4559c4bdecb2aef0c905e2d9a6cf
}
c := v.Args[1].AuxInt
v.Op = OpAMD64CMPLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end49ff4559c4bdecb2aef0c905e2d9a6cf
end49ff4559c4bdecb2aef0c905e2d9a6cf:
;
// match: (CMPL (MOVLconst [c]) x)
// cond:
// result: (InvertFlags (CMPLconst <TypeFlags> x [c]))
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto end3c04e861f07a442be9e2f5e0e0d07cce
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64InvertFlags
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto end3c04e861f07a442be9e2f5e0e0d07cce
end3c04e861f07a442be9e2f5e0e0d07cce:
;
case OpAMD64CMPQ:
// match: (CMPQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (CMPQconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end3bbb2c6caa57853a7561738ce3c0c630
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto end3bbb2c6caa57853a7561738ce3c0c630
}
v.Op = OpAMD64CMPQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end3bbb2c6caa57853a7561738ce3c0c630
end3bbb2c6caa57853a7561738ce3c0c630:
;
// match: (CMPQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (InvertFlags (CMPQconst <TypeFlags> x [c]))
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto end5edbe48a495a51ecabd3b2c0ed44a3d3
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto end5edbe48a495a51ecabd3b2c0ed44a3d3
}
v.Op = OpAMD64InvertFlags
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto end5edbe48a495a51ecabd3b2c0ed44a3d3
end5edbe48a495a51ecabd3b2c0ed44a3d3:
;
case OpAMD64CMPW:
// match: (CMPW x (MOVWconst [c]))
// cond:
// result: (CMPWconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end310a9ba58ac35c97587e08c63fe8a46c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64CMPWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end310a9ba58ac35c97587e08c63fe8a46c
end310a9ba58ac35c97587e08c63fe8a46c:
;
// match: (CMPW (MOVWconst [c]) x)
// cond:
// result: (InvertFlags (CMPWconst <TypeFlags> x [c]))
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto end1ce191aaab0f4dd3b98dafdfbfac13ce
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64InvertFlags
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto end1ce191aaab0f4dd3b98dafdfbfac13ce
end1ce191aaab0f4dd3b98dafdfbfac13ce:
;
case OpClosureCall:
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
{
argwid := v.AuxInt
entry := v.Args[0]
closure := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64CALLclosure
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = argwid
v.AddArg(entry)
v.AddArg(closure)
v.AddArg(mem)
return true
}
goto endfd75d26316012d86cb71d0dd1214259b
endfd75d26316012d86cb71d0dd1214259b:
;
case OpCom16:
// match: (Com16 x)
// cond:
// result: (NOTW x)
{
x := v.Args[0]
v.Op = OpAMD64NOTW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end1b14ba8d7d7aa585ec0a211827f280ae
end1b14ba8d7d7aa585ec0a211827f280ae:
;
case OpCom32:
// match: (Com32 x)
// cond:
// result: (NOTL x)
{
x := v.Args[0]
v.Op = OpAMD64NOTL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end6eb124ba3bdb3fd6031414370852feb6
end6eb124ba3bdb3fd6031414370852feb6:
;
case OpCom64:
// match: (Com64 x)
// cond:
// result: (NOTQ x)
{
x := v.Args[0]
v.Op = OpAMD64NOTQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endf5f3b355a87779c347e305719dddda05
endf5f3b355a87779c347e305719dddda05:
;
case OpCom8:
// match: (Com8 x)
// cond:
// result: (NOTB x)
{
x := v.Args[0]
v.Op = OpAMD64NOTB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end1c7c5c055d663ccf1f05fbc4883030c6
end1c7c5c055d663ccf1f05fbc4883030c6:
;
case OpConst16:
// match: (Const16 [val])
// cond:
// result: (MOVWconst [val])
{
val := v.AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = val
return true
}
goto end2c6c92f297873b8ac12bd035d56d001e
end2c6c92f297873b8ac12bd035d56d001e:
;
case OpConst32:
// match: (Const32 [val])
// cond:
// result: (MOVLconst [val])
{
val := v.AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = val
return true
}
goto enddae5807662af67143a3ac3ad9c63bae5
enddae5807662af67143a3ac3ad9c63bae5:
;
case OpConst32F:
// match: (Const32F {val})
// cond:
// result: (MOVSSconst {val})
{
val := v.Aux
v.Op = OpAMD64MOVSSconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Aux = val
return true
}
goto end30a68b43982e55971cc58f893ae2c04a
end30a68b43982e55971cc58f893ae2c04a:
;
case OpConst64:
// match: (Const64 [val])
// cond:
// result: (MOVQconst [val])
{
val := v.AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = val
return true
}
goto endc630434ae7f143ab69d5f482a9b52b5f
endc630434ae7f143ab69d5f482a9b52b5f:
;
case OpConst64F:
// match: (Const64F {val})
// cond:
// result: (MOVSDconst {val})
{
val := v.Aux
v.Op = OpAMD64MOVSDconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Aux = val
return true
}
goto end958041a44a2ee8fc571cbc0832fad285
end958041a44a2ee8fc571cbc0832fad285:
;
case OpConst8:
// match: (Const8 [val])
// cond:
// result: (MOVBconst [val])
{
val := v.AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = val
return true
}
goto end200524c722ed14ca935ba47f8f30327d
end200524c722ed14ca935ba47f8f30327d:
;
case OpConstBool:
// match: (ConstBool {b})
// cond: !b.(bool)
// result: (MOVBconst [0])
{
b := v.Aux
if !(!b.(bool)) {
goto end876159ea073d2dcefcc251667c1a7780
}
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end876159ea073d2dcefcc251667c1a7780
end876159ea073d2dcefcc251667c1a7780:
;
// match: (ConstBool {b})
// cond: b.(bool)
// result: (MOVBconst [1])
{
b := v.Aux
if !(b.(bool)) {
goto end0dacad3f7cad53905aad5303391447f6
}
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 1
return true
}
goto end0dacad3f7cad53905aad5303391447f6
end0dacad3f7cad53905aad5303391447f6:
;
case OpConstNil:
// match: (ConstNil)
// cond:
// result: (MOVQconst [0])
{
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto endea557d921056c25b945a49649e4b9b91
endea557d921056c25b945a49649e4b9b91:
;
case OpConstPtr:
// match: (ConstPtr [val])
// cond:
// result: (MOVQconst [val])
{
val := v.AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = val
return true
}
goto endc395c0a53eeccf597e225a07b53047d1
endc395c0a53eeccf597e225a07b53047d1:
;
case OpDiv16:
// match: (Div16 x y)
// cond:
// result: (DIVW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endb60a86e606726640c84d3e1e5a5ce890
endb60a86e606726640c84d3e1e5a5ce890:
;
case OpDiv16u:
// match: (Div16u x y)
// cond:
// result: (DIVWU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVWU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end6af9e212a865593e506bfdf7db67c9ec
end6af9e212a865593e506bfdf7db67c9ec:
;
case OpDiv32:
// match: (Div32 x y)
// cond:
// result: (DIVL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endf20ac71407e57c2904684d3cc33cf697
endf20ac71407e57c2904684d3cc33cf697:
;
case OpDiv32F:
// match: (Div32F x y)
// cond:
// result: (DIVSS x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVSS
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto enddca0462c7b176c4138854d7d5627ab5b
enddca0462c7b176c4138854d7d5627ab5b:
;
case OpDiv32u:
// match: (Div32u x y)
// cond:
// result: (DIVLU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVLU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto enda22604d23eeb1298008c97b817f60bbd
enda22604d23eeb1298008c97b817f60bbd:
;
case OpDiv64:
// match: (Div64 x y)
// cond:
// result: (DIVQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end86490d9b337333dfc09a413e1e0120a9
end86490d9b337333dfc09a413e1e0120a9:
;
case OpDiv64F:
// match: (Div64F x y)
// cond:
// result: (DIVSD x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVSD
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end12299d76db5144a60f564d34ba97eb43
end12299d76db5144a60f564d34ba97eb43:
;
case OpDiv64u:
// match: (Div64u x y)
// cond:
// result: (DIVQU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVQU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endf871d8b397e5fad6a5b500cc0c759a8d
endf871d8b397e5fad6a5b500cc0c759a8d:
;
case OpDiv8:
// match: (Div8 x y)
// cond:
// result: (DIVW (SignExt8to16 <config.Frontend().TypeInt16()> x) (SignExt8to16 <config.Frontend().TypeInt16()> y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpSignExt8to16, TypeInvalid)
v0.Type = config.Frontend().TypeInt16()
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpSignExt8to16, TypeInvalid)
v1.Type = config.Frontend().TypeInt16()
v1.AddArg(y)
v.AddArg(v1)
return true
}
goto ende25a7899b9c7a869f74226b4b6033084
ende25a7899b9c7a869f74226b4b6033084:
;
case OpDiv8u:
// match: (Div8u x y)
// cond:
// result: (DIVWU (ZeroExt8to16 <config.Frontend().TypeUInt16()> x) (ZeroExt8to16 <config.Frontend().TypeUInt16()> y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64DIVWU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpZeroExt8to16, TypeInvalid)
v0.Type = config.Frontend().TypeUInt16()
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt8to16, TypeInvalid)
v1.Type = config.Frontend().TypeUInt16()
v1.AddArg(y)
v.AddArg(v1)
return true
}
goto ende655b41d48feafc4d139b815a3b7b55c
ende655b41d48feafc4d139b815a3b7b55c:
;
case OpEq16:
// match: (Eq16 x y)
// cond:
// result: (SETEQ (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end66a03470b5b3e8457ba205ccfcaccea6
end66a03470b5b3e8457ba205ccfcaccea6:
;
case OpEq32:
// match: (Eq32 x y)
// cond:
// result: (SETEQ (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end4d77d0b016f93817fd6e5f60fa0e7ef2
end4d77d0b016f93817fd6e5f60fa0e7ef2:
;
case OpEq64:
// match: (Eq64 x y)
// cond:
// result: (SETEQ (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endae6c62e4e20b4f62694b6ee40dbd9211
endae6c62e4e20b4f62694b6ee40dbd9211:
;
case OpEq8:
// match: (Eq8 x y)
// cond:
// result: (SETEQ (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end84a692e769900e3adbfe00718d2169e0
end84a692e769900e3adbfe00718d2169e0:
;
case OpEqPtr:
// match: (EqPtr x y)
// cond:
// result: (SETEQ (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end6de1d39c9d151e5e503d643bd835356e
end6de1d39c9d151e5e503d643bd835356e:
;
case OpGeq16:
// match: (Geq16 x y)
// cond:
// result: (SETGE (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETGE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end26084bf821f9e418934fee812632b774
end26084bf821f9e418934fee812632b774:
;
case OpGeq16U:
// match: (Geq16U x y)
// cond:
// result: (SETAE (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETAE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end20b00f850ca834cb2013414645c19ad9
end20b00f850ca834cb2013414645c19ad9:
;
case OpGeq32:
// match: (Geq32 x y)
// cond:
// result: (SETGE (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETGE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end713c3dfa0f7247dcc232bcfc916fb044
end713c3dfa0f7247dcc232bcfc916fb044:
;
case OpGeq32U:
// match: (Geq32U x y)
// cond:
// result: (SETAE (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETAE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endac2cde17ec6ab0107eabbda6407d1004
endac2cde17ec6ab0107eabbda6407d1004:
;
case OpGeq64:
// match: (Geq64 x y)
// cond:
// result: (SETGE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETGE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end63f44e3fec8d92723b5bde42d6d7eea0
end63f44e3fec8d92723b5bde42d6d7eea0:
;
case OpGeq64U:
// match: (Geq64U x y)
// cond:
// result: (SETAE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETAE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endd8d2d9faa19457f6a7b0635a756d234f
endd8d2d9faa19457f6a7b0635a756d234f:
;
case OpGeq8:
// match: (Geq8 x y)
// cond:
// result: (SETGE (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETGE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endb5f40ee158007e675b2113c3ce962382
endb5f40ee158007e675b2113c3ce962382:
;
case OpGeq8U:
// match: (Geq8U x y)
// cond:
// result: (SETAE (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETAE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endd30ee67afc0284c419cef70261f61452
endd30ee67afc0284c419cef70261f61452:
;
case OpGetG:
// match: (GetG)
// cond:
// result: (LoweredGetG)
{
v.Op = OpAMD64LoweredGetG
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
return true
}
goto endb17140e71dd641aa4d89e14479160260
endb17140e71dd641aa4d89e14479160260:
;
case OpGreater16:
// match: (Greater16 x y)
// cond:
// result: (SETG (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETG
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end5bc9fdb7e563a6b949e42d721903cb58
end5bc9fdb7e563a6b949e42d721903cb58:
;
case OpGreater16U:
// match: (Greater16U x y)
// cond:
// result: (SETA (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETA
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endd5b646f04fd839d11082a9ff6adb4a3f
endd5b646f04fd839d11082a9ff6adb4a3f:
;
case OpGreater32:
// match: (Greater32 x y)
// cond:
// result: (SETG (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETG
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endbf0b2b1368aadff48969a7386eee5795
endbf0b2b1368aadff48969a7386eee5795:
;
case OpGreater32U:
// match: (Greater32U x y)
// cond:
// result: (SETA (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETA
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end033c944272dc0af6fafe33f667cf7485
end033c944272dc0af6fafe33f667cf7485:
;
case OpGreater64:
// match: (Greater64 x y)
// cond:
// result: (SETG (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETG
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endaef0cfa5e27e23cf5e527061cf251069
endaef0cfa5e27e23cf5e527061cf251069:
;
case OpGreater64U:
// match: (Greater64U x y)
// cond:
// result: (SETA (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETA
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end2afc16a19fe1073dfa86770a78eba2b4
end2afc16a19fe1073dfa86770a78eba2b4:
;
case OpGreater8:
// match: (Greater8 x y)
// cond:
// result: (SETG (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETG
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endbdb1e5f6b760cf02e0fc2f474622e6be
endbdb1e5f6b760cf02e0fc2f474622e6be:
;
case OpGreater8U:
// match: (Greater8U x y)
// cond:
// result: (SETA (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETA
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end22eaafbcfe70447f79d9b3e6cc395bbd
end22eaafbcfe70447f79d9b3e6cc395bbd:
;
case OpHmul16:
// match: (Hmul16 x y)
// cond:
// result: (HMULW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64HMULW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end1b9ff394bb3b06fc109637656b6875f5
end1b9ff394bb3b06fc109637656b6875f5:
;
case OpHmul16u:
// match: (Hmul16u x y)
// cond:
// result: (HMULWU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64HMULWU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endee9089e794a43f2ce1619a6ef61670f4
endee9089e794a43f2ce1619a6ef61670f4:
;
case OpHmul32:
// match: (Hmul32 x y)
// cond:
// result: (HMULL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64HMULL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end7c83c91ef2634f0b1da4f49350b437b1
end7c83c91ef2634f0b1da4f49350b437b1:
;
case OpHmul32u:
// match: (Hmul32u x y)
// cond:
// result: (HMULLU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64HMULLU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end3c4f36611dc8815aa2a63d4ec0eaa06d
end3c4f36611dc8815aa2a63d4ec0eaa06d:
;
case OpHmul8:
// match: (Hmul8 x y)
// cond:
// result: (HMULB x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64HMULB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end51b2cc9f1ed15314e68fc81024f281a7
end51b2cc9f1ed15314e68fc81024f281a7:
;
case OpHmul8u:
// match: (Hmul8u x y)
// cond:
// result: (HMULBU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64HMULBU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto ende68d7b3a3c774cedc3522af9d635c39d
ende68d7b3a3c774cedc3522af9d635c39d:
;
case OpITab:
// match: (ITab (Load ptr mem))
// cond:
// result: (MOVQload ptr mem)
{
if v.Args[0].Op != OpLoad {
goto enda49fcae3630a097c78aa58189c90a97a
}
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
v.Op = OpAMD64MOVQload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto enda49fcae3630a097c78aa58189c90a97a
enda49fcae3630a097c78aa58189c90a97a:
;
case OpIsInBounds:
// match: (IsInBounds idx len)
// cond:
// result: (SETB (CMPQ <TypeFlags> idx len))
{
idx := v.Args[0]
len := v.Args[1]
v.Op = OpAMD64SETB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
goto endb51d371171154c0f1613b687757e0576
endb51d371171154c0f1613b687757e0576:
;
case OpIsNonNil:
// match: (IsNonNil p)
// cond:
// result: (SETNE (TESTQ <TypeFlags> p p))
{
p := v.Args[0]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(p)
v0.AddArg(p)
v.AddArg(v0)
return true
}
goto endff508c3726edfb573abc6128c177e76c
endff508c3726edfb573abc6128c177e76c:
;
case OpLeq16:
// match: (Leq16 x y)
// cond:
// result: (SETLE (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endc1916dfcb3eae58ab237e40a57e1ff16
endc1916dfcb3eae58ab237e40a57e1ff16:
;
case OpLeq16U:
// match: (Leq16U x y)
// cond:
// result: (SETBE (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETBE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end627e261aea217b5d17177b52711b8c82
end627e261aea217b5d17177b52711b8c82:
;
case OpLeq32:
// match: (Leq32 x y)
// cond:
// result: (SETLE (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endf422ecc8da0033e22242de9c67112537
endf422ecc8da0033e22242de9c67112537:
;
case OpLeq32U:
// match: (Leq32U x y)
// cond:
// result: (SETBE (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETBE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end1b39c9661896abdff8a29de509311b96
end1b39c9661896abdff8a29de509311b96:
;
case OpLeq64:
// match: (Leq64 x y)
// cond:
// result: (SETLE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endf03da5e28dccdb4797671f39e824fb10
endf03da5e28dccdb4797671f39e824fb10:
;
case OpLeq64U:
// match: (Leq64U x y)
// cond:
// result: (SETBE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETBE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end37302777dd91a5d0c6f410a5444ccb38
end37302777dd91a5d0c6f410a5444ccb38:
;
case OpLeq8:
// match: (Leq8 x y)
// cond:
// result: (SETLE (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end03be536eea60fdd98d48b17681acaf5a
end03be536eea60fdd98d48b17681acaf5a:
;
case OpLeq8U:
// match: (Leq8U x y)
// cond:
// result: (SETBE (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETBE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end661377f6745450bb1fa7fd0608ef0a86
end661377f6745450bb1fa7fd0608ef0a86:
;
case OpLess16:
// match: (Less16 x y)
// cond:
// result: (SETL (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endeb09704ef62ba2695a967b6fcb42e562
endeb09704ef62ba2695a967b6fcb42e562:
;
case OpLess16U:
// match: (Less16U x y)
// cond:
// result: (SETB (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end2209a57bd887f68ad732aa7da2bc7286
end2209a57bd887f68ad732aa7da2bc7286:
;
case OpLess32:
// match: (Less32 x y)
// cond:
// result: (SETL (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end8da8d2030c0a323a84503c1240c566ae
end8da8d2030c0a323a84503c1240c566ae:
;
case OpLess32U:
// match: (Less32U x y)
// cond:
// result: (SETB (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto enddcfbbb482eb194146f4f7c8f12029a7a
enddcfbbb482eb194146f4f7c8f12029a7a:
;
case OpLess64:
// match: (Less64 x y)
// cond:
// result: (SETL (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endf8e7a24c25692045bbcfd2c9356d1a8c
endf8e7a24c25692045bbcfd2c9356d1a8c:
;
case OpLess64U:
// match: (Less64U x y)
// cond:
// result: (SETB (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end2fac0a2c2e972b5e04b5062d5786b87d
end2fac0a2c2e972b5e04b5062d5786b87d:
;
case OpLess8:
// match: (Less8 x y)
// cond:
// result: (SETL (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end445ad05f8d23dfecf246ce083f1ea167
end445ad05f8d23dfecf246ce083f1ea167:
;
case OpLess8U:
// match: (Less8U x y)
// cond:
// result: (SETB (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end816d1dff858c45836dfa337262e04649
end816d1dff858c45836dfa337262e04649:
;
case OpLoad:
// match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t))
// result: (MOVQload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitInt(t) || isPtr(t)) {
goto end7c4c53acf57ebc5f03273652ba1d5934
}
v.Op = OpAMD64MOVQload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end7c4c53acf57ebc5f03273652ba1d5934
end7c4c53acf57ebc5f03273652ba1d5934:
;
// match: (Load <t> ptr mem)
// cond: is32BitInt(t)
// result: (MOVLload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is32BitInt(t)) {
goto ende1cfcb15bfbcfd448ce303d0882a4057
}
v.Op = OpAMD64MOVLload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto ende1cfcb15bfbcfd448ce303d0882a4057
ende1cfcb15bfbcfd448ce303d0882a4057:
;
// match: (Load <t> ptr mem)
// cond: is16BitInt(t)
// result: (MOVWload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is16BitInt(t)) {
goto end2d0a1304501ed9f4e9e2d288505a9c7c
}
v.Op = OpAMD64MOVWload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end2d0a1304501ed9f4e9e2d288505a9c7c
end2d0a1304501ed9f4e9e2d288505a9c7c:
;
// match: (Load <t> ptr mem)
// cond: (t.IsBoolean() || is8BitInt(t))
// result: (MOVBload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsBoolean() || is8BitInt(t)) {
goto end8f83bf72293670e75b22d6627bd13f0b
}
v.Op = OpAMD64MOVBload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end8f83bf72293670e75b22d6627bd13f0b
end8f83bf72293670e75b22d6627bd13f0b:
;
// match: (Load <t> ptr mem)
// cond: is32BitFloat(t)
// result: (MOVSSload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is32BitFloat(t)) {
goto end63383c4895805881aabceebea3c4c533
}
v.Op = OpAMD64MOVSSload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end63383c4895805881aabceebea3c4c533
end63383c4895805881aabceebea3c4c533:
;
// match: (Load <t> ptr mem)
// cond: is64BitFloat(t)
// result: (MOVSDload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitFloat(t)) {
goto end99d0858c0a5bb72f0fe4decc748da812
}
v.Op = OpAMD64MOVSDload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end99d0858c0a5bb72f0fe4decc748da812
end99d0858c0a5bb72f0fe4decc748da812:
;
case OpLrot16:
// match: (Lrot16 <t> x [c])
// cond:
// result: (ROLWconst <t> [c&15] x)
{
t := v.Type
x := v.Args[0]
c := v.AuxInt
v.Op = OpAMD64ROLWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AuxInt = c & 15
v.AddArg(x)
return true
}
goto endb23dfa24c619d0068f925899d53ee7fd
endb23dfa24c619d0068f925899d53ee7fd:
;
case OpLrot32:
// match: (Lrot32 <t> x [c])
// cond:
// result: (ROLLconst <t> [c&31] x)
{
t := v.Type
x := v.Args[0]
c := v.AuxInt
v.Op = OpAMD64ROLLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end38b2215c011896c36845f72ecb72b1b0
end38b2215c011896c36845f72ecb72b1b0:
;
case OpLrot64:
// match: (Lrot64 <t> x [c])
// cond:
// result: (ROLQconst <t> [c&63] x)
{
t := v.Type
x := v.Args[0]
c := v.AuxInt
v.Op = OpAMD64ROLQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end5cb355e4f3ca387f252ef4f6a55f9f68
end5cb355e4f3ca387f252ef4f6a55f9f68:
;
case OpLrot8:
// match: (Lrot8 <t> x [c])
// cond:
// result: (ROLBconst <t> [c&7] x)
{
t := v.Type
x := v.Args[0]
c := v.AuxInt
v.Op = OpAMD64ROLBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AuxInt = c & 7
v.AddArg(x)
return true
}
goto end26bfb3dd5b537cf13ac9f2978d94ed71
end26bfb3dd5b537cf13ac9f2978d94ed71:
;
case OpLsh16x16:
// match: (Lsh16x16 <t> x y)
// cond:
// result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [16] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLW, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end5b63495f0e75ac68c4ce9d4afa1472d4
end5b63495f0e75ac68c4ce9d4afa1472d4:
;
case OpLsh16x32:
// match: (Lsh16x32 <t> x y)
// cond:
// result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [16] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLW, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end6384dd9bdcec3046732d7347250d49f6
end6384dd9bdcec3046732d7347250d49f6:
;
case OpLsh16x64:
// match: (Lsh16x64 <t> x y)
// cond:
// result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [16] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLW, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end0975ca28988350db0ad556c925d8af07
end0975ca28988350db0ad556c925d8af07:
;
case OpLsh16x8:
// match: (Lsh16x8 <t> x y)
// cond:
// result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [16] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLW, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto endd17c913707f29d59cfcb5d57d5f5c6ff
endd17c913707f29d59cfcb5d57d5f5c6ff:
;
case OpLsh32x16:
// match: (Lsh32x16 <t> x y)
// cond:
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [32] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLL, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end027b6f888054cc1dd8911fe16a6315a1
end027b6f888054cc1dd8911fe16a6315a1:
;
case OpLsh32x32:
// match: (Lsh32x32 <t> x y)
// cond:
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [32] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLL, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto endbcc31e2bd8800d5ddb27c09d37f867b9
endbcc31e2bd8800d5ddb27c09d37f867b9:
;
case OpLsh32x64:
// match: (Lsh32x64 <t> x y)
// cond:
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [32] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLL, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end6797e3a3bbb0fe7eda819fe19a4d4b49
end6797e3a3bbb0fe7eda819fe19a4d4b49:
;
case OpLsh32x8:
// match: (Lsh32x8 <t> x y)
// cond:
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [32] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLL, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end7dd2c717933f46750e8a0871aab6fc63
end7dd2c717933f46750e8a0871aab6fc63:
;
case OpLsh64x16:
// match: (Lsh64x16 <t> x y)
// cond:
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end3a2fda1dddb29e49f46ccde6f5397222
end3a2fda1dddb29e49f46ccde6f5397222:
;
case OpLsh64x32:
// match: (Lsh64x32 <t> x y)
// cond:
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end147322aba732027ac2290fd8173d806a
end147322aba732027ac2290fd8173d806a:
;
case OpLsh64x64:
// match: (Lsh64x64 <t> x y)
// cond:
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto endeb8e78c9c960fa12e29ea07a8519649b
endeb8e78c9c960fa12e29ea07a8519649b:
;
case OpLsh64x8:
// match: (Lsh64x8 <t> x y)
// cond:
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end42cdc11c34c81bbd5e8b4ad19ceec1ef
end42cdc11c34c81bbd5e8b4ad19ceec1ef:
;
case OpLsh8x16:
// match: (Lsh8x16 <t> x y)
// cond:
// result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [8] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLB, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end60bf962bf5256e20b547e18e3c886aa5
end60bf962bf5256e20b547e18e3c886aa5:
;
case OpLsh8x32:
// match: (Lsh8x32 <t> x y)
// cond:
// result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [8] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLB, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end8ed3445f6dbba1a87c80b140371445ce
end8ed3445f6dbba1a87c80b140371445ce:
;
case OpLsh8x64:
// match: (Lsh8x64 <t> x y)
// cond:
// result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [8] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLB, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end0a03c9cc48ef1bfd74973de5f5fb02b0
end0a03c9cc48ef1bfd74973de5f5fb02b0:
;
case OpLsh8x8:
// match: (Lsh8x8 <t> x y)
// cond:
// result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [8] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHLB, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end781e3a47b186cf99fcb7137afd3432b9
end781e3a47b186cf99fcb7137afd3432b9:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
;
case OpAMD64MOVBQSX:
// match: (MOVBQSX (MOVBload [off] {sym} ptr mem))
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// cond:
// result: (MOVBQSXload [off] {sym} ptr mem)
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
{
if v.Args[0].Op != OpAMD64MOVBload {
goto end9de452216bde3b2e2a2d01f43da1f78e
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
}
off := v.Args[0].AuxInt
sym := v.Args[0].Aux
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
v.Op = OpAMD64MOVBQSXload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end9de452216bde3b2e2a2d01f43da1f78e
end9de452216bde3b2e2a2d01f43da1f78e:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
;
case OpAMD64MOVBQZX:
// match: (MOVBQZX (MOVBload [off] {sym} ptr mem))
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// cond:
// result: (MOVBQZXload [off] {sym} ptr mem)
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
{
if v.Args[0].Op != OpAMD64MOVBload {
goto end573f4e6a6fe8032338b85fddd4d1bab4
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
}
off := v.Args[0].AuxInt
sym := v.Args[0].Aux
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
v.Op = OpAMD64MOVBQZXload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end573f4e6a6fe8032338b85fddd4d1bab4
end573f4e6a6fe8032338b85fddd4d1bab4:
;
case OpAMD64MOVBload:
// match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVBload [addOff(off1, off2)] {sym} ptr mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end7ec9147ab863c1bd59190fed81f894b6
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVBload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end7ec9147ab863c1bd59190fed81f894b6
end7ec9147ab863c1bd59190fed81f894b6:
;
// match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end3771a59cf66b0df99120d76f4c358fab
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end3771a59cf66b0df99120d76f4c358fab
}
v.Op = OpAMD64MOVBload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
goto end3771a59cf66b0df99120d76f4c358fab
end3771a59cf66b0df99120d76f4c358fab:
;
case OpAMD64MOVBstore:
// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr x mem)
{
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBQSX {
goto end5b3f41f0770d566ff1647dea1d4a40e8
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto end5b3f41f0770d566ff1647dea1d4a40e8
end5b3f41f0770d566ff1647dea1d4a40e8:
;
// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr x mem)
{
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBQZX {
goto end3a2e55db7e03920700c4875f6a55de3b
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto end3a2e55db7e03920700c4875f6a55de3b
end3a2e55db7e03920700c4875f6a55de3b:
;
// match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVBstore [addOff(off1, off2)] {sym} ptr val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto ende6347ac19d0469ee59d2e7f2e18d1070
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto ende6347ac19d0469ee59d2e7f2e18d1070
ende6347ac19d0469ee59d2e7f2e18d1070:
;
// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto enda7086cf7f6b8cf81972e2c3d4b12f3fc
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto enda7086cf7f6b8cf81972e2c3d4b12f3fc
}
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto enda7086cf7f6b8cf81972e2c3d4b12f3fc
enda7086cf7f6b8cf81972e2c3d4b12f3fc:
;
case OpAMD64MOVLload:
// match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVLload [addOff(off1, off2)] {sym} ptr mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end0c8b8a40360c5c581d92723eca04d340
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVLload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end0c8b8a40360c5c581d92723eca04d340
end0c8b8a40360c5c581d92723eca04d340:
;
// match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto enddb9e59335876d8a565c425731438a1b3
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto enddb9e59335876d8a565c425731438a1b3
}
v.Op = OpAMD64MOVLload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
goto enddb9e59335876d8a565c425731438a1b3
enddb9e59335876d8a565c425731438a1b3:
;
case OpAMD64MOVLstore:
// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
// cond:
// result: (MOVLstore [off] {sym} ptr x mem)
{
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLQSX {
goto end1fb7b2ae707c76d30927c21f85d77472
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto end1fb7b2ae707c76d30927c21f85d77472
end1fb7b2ae707c76d30927c21f85d77472:
;
// match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
// cond:
// result: (MOVLstore [off] {sym} ptr x mem)
{
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLQZX {
goto end199e8c23a5e7e99728a43d6a83b2c2cf
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto end199e8c23a5e7e99728a43d6a83b2c2cf
end199e8c23a5e7e99728a43d6a83b2c2cf:
;
// match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVLstore [addOff(off1, off2)] {sym} ptr val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end43bffdb8d9c1fc85a95778d4911955f1
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end43bffdb8d9c1fc85a95778d4911955f1
end43bffdb8d9c1fc85a95778d4911955f1:
;
// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto endd57b1e4313fc7a3331340a9af00ba116
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto endd57b1e4313fc7a3331340a9af00ba116
}
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endd57b1e4313fc7a3331340a9af00ba116
endd57b1e4313fc7a3331340a9af00ba116:
;
case OpAMD64MOVQload:
// match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVQload [addOff(off1, off2)] {sym} ptr mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end0b8c50dd7faefb7d046f9a27e054df77
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVQload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end0b8c50dd7faefb7d046f9a27e054df77
end0b8c50dd7faefb7d046f9a27e054df77:
;
// match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto endd0c093adc4f05f2037005734c77d3cc4
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto endd0c093adc4f05f2037005734c77d3cc4
}
v.Op = OpAMD64MOVQload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
goto endd0c093adc4f05f2037005734c77d3cc4
endd0c093adc4f05f2037005734c77d3cc4:
;
// match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVQloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ8 {
goto end74a50d810fb3945e809f608cd094a59c
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end74a50d810fb3945e809f608cd094a59c
}
v.Op = OpAMD64MOVQloadidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(mem)
return true
}
goto end74a50d810fb3945e809f608cd094a59c
end74a50d810fb3945e809f608cd094a59c:
;
case OpAMD64MOVQloadidx8:
// match: (MOVQloadidx8 [off1] {sym} (ADDQconst [off2] ptr) idx mem)
// cond:
// result: (MOVQloadidx8 [addOff(off1, off2)] {sym} ptr idx mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endb138bf9b0b33ec824bf0aff619f8bafa
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVQloadidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(mem)
return true
}
goto endb138bf9b0b33ec824bf0aff619f8bafa
endb138bf9b0b33ec824bf0aff619f8bafa:
;
case OpAMD64MOVQstore:
// match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVQstore [addOff(off1, off2)] {sym} ptr val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end0a110b5e42a4576c32fda50590092848
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVQstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end0a110b5e42a4576c32fda50590092848
end0a110b5e42a4576c32fda50590092848:
;
// match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end9a0cfe20b3b0f587e252760907c1b5c0
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto end9a0cfe20b3b0f587e252760907c1b5c0
}
v.Op = OpAMD64MOVQstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end9a0cfe20b3b0f587e252760907c1b5c0
end9a0cfe20b3b0f587e252760907c1b5c0:
;
// match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVQstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ8 {
goto end442c322e6719e280b6be1c12858e49d7
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto end442c322e6719e280b6be1c12858e49d7
}
v.Op = OpAMD64MOVQstoreidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end442c322e6719e280b6be1c12858e49d7
end442c322e6719e280b6be1c12858e49d7:
;
case OpAMD64MOVQstoreidx8:
// match: (MOVQstoreidx8 [off1] {sym} (ADDQconst [off2] ptr) idx val mem)
// cond:
// result: (MOVQstoreidx8 [addOff(off1, off2)] {sym} ptr idx val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end50671766fdab364c1edbd2072fb8e525
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.Op = OpAMD64MOVQstoreidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end50671766fdab364c1edbd2072fb8e525
end50671766fdab364c1edbd2072fb8e525:
;
case OpAMD64MOVSDload:
// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVSDload [addOff(off1, off2)] {sym} ptr mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end6dad9bf78e7368bb095eb2dfba7e244a
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVSDload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end6dad9bf78e7368bb095eb2dfba7e244a
end6dad9bf78e7368bb095eb2dfba7e244a:
;
// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSDload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end96fa9c439e31050aa91582bc2a9f2c20
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end96fa9c439e31050aa91582bc2a9f2c20
}
v.Op = OpAMD64MOVSDload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
goto end96fa9c439e31050aa91582bc2a9f2c20
end96fa9c439e31050aa91582bc2a9f2c20:
;
// match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSDloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ8 {
goto endbcb2ce441824d0e3a4b501018cfa7f60
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto endbcb2ce441824d0e3a4b501018cfa7f60
}
v.Op = OpAMD64MOVSDloadidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(mem)
return true
}
goto endbcb2ce441824d0e3a4b501018cfa7f60
endbcb2ce441824d0e3a4b501018cfa7f60:
;
case OpAMD64MOVSDloadidx8:
// match: (MOVSDloadidx8 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx mem)
// cond:
// result: (MOVSDloadidx8 [addOff(off1, off2)] {sym} ptr idx mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end84f0f457e271104a92343e3b1d2804c6
}
off2 := v.Args[0].AuxInt
if v.Args[0].Aux != v.Aux {
goto end84f0f457e271104a92343e3b1d2804c6
}
ptr := v.Args[0].Args[0]
idx := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVSDloadidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(mem)
return true
}
goto end84f0f457e271104a92343e3b1d2804c6
end84f0f457e271104a92343e3b1d2804c6:
;
case OpAMD64MOVSDstore:
// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVSDstore [addOff(off1, off2)] {sym} ptr val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end6c6160664143cc66e63e67b9aa43a7ef
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVSDstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end6c6160664143cc66e63e67b9aa43a7ef
end6c6160664143cc66e63e67b9aa43a7ef:
;
// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSDstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end415dde14f3400bec1b2756174a5d7179
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto end415dde14f3400bec1b2756174a5d7179
}
v.Op = OpAMD64MOVSDstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end415dde14f3400bec1b2756174a5d7179
end415dde14f3400bec1b2756174a5d7179:
;
// match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSDstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ8 {
goto end1ad6fc0c5b59610dabf7f9595a48a230
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto end1ad6fc0c5b59610dabf7f9595a48a230
}
v.Op = OpAMD64MOVSDstoreidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end1ad6fc0c5b59610dabf7f9595a48a230
end1ad6fc0c5b59610dabf7f9595a48a230:
;
case OpAMD64MOVSDstoreidx8:
// match: (MOVSDstoreidx8 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx val mem)
// cond:
// result: (MOVSDstoreidx8 [addOff(off1, off2)] {sym} ptr idx val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endc0e28f57697cb6038d5d09eafe26c947
}
off2 := v.Args[0].AuxInt
if v.Args[0].Aux != v.Aux {
goto endc0e28f57697cb6038d5d09eafe26c947
}
ptr := v.Args[0].Args[0]
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.Op = OpAMD64MOVSDstoreidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endc0e28f57697cb6038d5d09eafe26c947
endc0e28f57697cb6038d5d09eafe26c947:
;
case OpAMD64MOVSSload:
// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVSSload [addOff(off1, off2)] {sym} ptr mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end96d63dbb64b0adfa944684c9e939c972
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVSSload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end96d63dbb64b0adfa944684c9e939c972
end96d63dbb64b0adfa944684c9e939c972:
;
// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSSload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end15f2583bd72ad7fc077b3952634a1c85
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end15f2583bd72ad7fc077b3952634a1c85
}
v.Op = OpAMD64MOVSSload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
goto end15f2583bd72ad7fc077b3952634a1c85
end15f2583bd72ad7fc077b3952634a1c85:
;
// match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSSloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ4 {
goto end49722f4a0adba31bb143601ce1d2aae0
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end49722f4a0adba31bb143601ce1d2aae0
}
v.Op = OpAMD64MOVSSloadidx4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(mem)
return true
}
goto end49722f4a0adba31bb143601ce1d2aae0
end49722f4a0adba31bb143601ce1d2aae0:
;
case OpAMD64MOVSSloadidx4:
// match: (MOVSSloadidx4 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx mem)
// cond:
// result: (MOVSSloadidx4 [addOff(off1, off2)] {sym} ptr idx mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end7eb5a1ab1e2508683d879ec25286754b
}
off2 := v.Args[0].AuxInt
if v.Args[0].Aux != v.Aux {
goto end7eb5a1ab1e2508683d879ec25286754b
}
ptr := v.Args[0].Args[0]
idx := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVSSloadidx4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(mem)
return true
}
goto end7eb5a1ab1e2508683d879ec25286754b
end7eb5a1ab1e2508683d879ec25286754b:
;
case OpAMD64MOVSSstore:
// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVSSstore [addOff(off1, off2)] {sym} ptr val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endf711aa4081a9b2924b55387d4f70cfd6
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVSSstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endf711aa4081a9b2924b55387d4f70cfd6
endf711aa4081a9b2924b55387d4f70cfd6:
;
// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSSstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end70ebc170131920e515e3f416a6b952c5
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto end70ebc170131920e515e3f416a6b952c5
}
v.Op = OpAMD64MOVSSstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end70ebc170131920e515e3f416a6b952c5
end70ebc170131920e515e3f416a6b952c5:
;
// match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVSSstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ4 {
goto end1622dc435e45833eda4d29d44df7cc34
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto end1622dc435e45833eda4d29d44df7cc34
}
v.Op = OpAMD64MOVSSstoreidx4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end1622dc435e45833eda4d29d44df7cc34
end1622dc435e45833eda4d29d44df7cc34:
;
case OpAMD64MOVSSstoreidx4:
// match: (MOVSSstoreidx4 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx val mem)
// cond:
// result: (MOVSSstoreidx4 [addOff(off1, off2)] {sym} ptr idx val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end66e4853026306cd46f414c22d281254f
}
off2 := v.Args[0].AuxInt
if v.Args[0].Aux != v.Aux {
goto end66e4853026306cd46f414c22d281254f
}
ptr := v.Args[0].Args[0]
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.Op = OpAMD64MOVSSstoreidx4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end66e4853026306cd46f414c22d281254f
end66e4853026306cd46f414c22d281254f:
;
case OpAMD64MOVWload:
// match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVWload [addOff(off1, off2)] {sym} ptr mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endfcb0ce76f96e8b0c2eb19a9b827c1b73
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVWload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto endfcb0ce76f96e8b0c2eb19a9b827c1b73
endfcb0ce76f96e8b0c2eb19a9b827c1b73:
;
// match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end7a79314cb49bf53d79c38c3077d87457
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end7a79314cb49bf53d79c38c3077d87457
}
v.Op = OpAMD64MOVWload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
goto end7a79314cb49bf53d79c38c3077d87457
end7a79314cb49bf53d79c38c3077d87457:
;
case OpAMD64MOVWstore:
// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
// cond:
// result: (MOVWstore [off] {sym} ptr x mem)
{
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWQSX {
goto endca90c534e75c7f5cb803504d119a853f
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto endca90c534e75c7f5cb803504d119a853f
endca90c534e75c7f5cb803504d119a853f:
;
// match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
// cond:
// result: (MOVWstore [off] {sym} ptr x mem)
{
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWQZX {
goto end187fe73dfaf9cf5f4c349283b4dfd9d1
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto end187fe73dfaf9cf5f4c349283b4dfd9d1
end187fe73dfaf9cf5f4c349283b4dfd9d1:
;
// match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVWstore [addOff(off1, off2)] {sym} ptr val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endda15fdd59aa956ded0440188f38de1aa
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endda15fdd59aa956ded0440188f38de1aa
endda15fdd59aa956ded0440188f38de1aa:
;
// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end4cc466ede8e64e415c899ccac81c0f27
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto end4cc466ede8e64e415c899ccac81c0f27
}
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end4cc466ede8e64e415c899ccac81c0f27
end4cc466ede8e64e415c899ccac81c0f27:
;
case OpAMD64MULB:
// match: (MULB x (MOVBconst [c]))
// cond:
// result: (MULBconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end66c6419213ddeb52b1c53fb589a70e5f
}
c := v.Args[1].AuxInt
v.Op = OpAMD64MULBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end66c6419213ddeb52b1c53fb589a70e5f
end66c6419213ddeb52b1c53fb589a70e5f:
;
// match: (MULB (MOVBconst [c]) x)
// cond:
// result: (MULBconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto end7e82c8dbbba265b78035ca7df394bb06
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64MULBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end7e82c8dbbba265b78035ca7df394bb06
end7e82c8dbbba265b78035ca7df394bb06:
;
case OpAMD64MULBconst:
// match: (MULBconst [c] (MOVBconst [d]))
// cond:
// result: (MOVBconst [c*d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVBconst {
goto endf2db9f96016085f8cb4082b4af01b2aa
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c * d
return true
}
goto endf2db9f96016085f8cb4082b4af01b2aa
endf2db9f96016085f8cb4082b4af01b2aa:
;
case OpAMD64MULL:
// match: (MULL x (MOVLconst [c]))
// cond:
// result: (MULLconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end893477a261bcad6c2821b77c83075c6c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64MULLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end893477a261bcad6c2821b77c83075c6c
end893477a261bcad6c2821b77c83075c6c:
;
// match: (MULL (MOVLconst [c]) x)
// cond:
// result: (MULLconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto end8a0f957c528a54eecb0dbfc5d96e017a
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64MULLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end8a0f957c528a54eecb0dbfc5d96e017a
end8a0f957c528a54eecb0dbfc5d96e017a:
;
case OpAMD64MULLconst:
// match: (MULLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [c*d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVLconst {
goto endd5732835ed1276ef8b728bcfc1289f73
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c * d
return true
}
goto endd5732835ed1276ef8b728bcfc1289f73
endd5732835ed1276ef8b728bcfc1289f73:
;
case OpAMD64MULQ:
// match: (MULQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (MULQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
}
v.Op = OpAMD64MULQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
endb38c6e3e0ddfa25ba0ef9684ac1528c0:
;
// match: (MULQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (MULQconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto end9cb4f29b0bd7141639416735dcbb3b87
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto end9cb4f29b0bd7141639416735dcbb3b87
}
v.Op = OpAMD64MULQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end9cb4f29b0bd7141639416735dcbb3b87
end9cb4f29b0bd7141639416735dcbb3b87:
;
case OpAMD64MULQconst:
// match: (MULQconst [-1] x)
// cond:
// result: (NEGQ x)
{
if v.AuxInt != -1 {
goto end82501cca6b5fb121a7f8b197e55f2fec
}
x := v.Args[0]
v.Op = OpAMD64NEGQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end82501cca6b5fb121a7f8b197e55f2fec
end82501cca6b5fb121a7f8b197e55f2fec:
;
// match: (MULQconst [0] _)
// cond:
// result: (MOVQconst [0])
{
if v.AuxInt != 0 {
goto endcb9faa068e3558ff44daaf1d47d091b5
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto endcb9faa068e3558ff44daaf1d47d091b5
endcb9faa068e3558ff44daaf1d47d091b5:
;
// match: (MULQconst [1] x)
// cond:
// result: x
{
if v.AuxInt != 1 {
goto end0b527e71db2b288b2841a1f757aa580d
}
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end0b527e71db2b288b2841a1f757aa580d
end0b527e71db2b288b2841a1f757aa580d:
;
// match: (MULQconst [3] x)
// cond:
// result: (LEAQ2 x x)
{
if v.AuxInt != 3 {
goto end34a86f261671b5852bec6c57155fe0da
}
x := v.Args[0]
v.Op = OpAMD64LEAQ2
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(x)
return true
}
goto end34a86f261671b5852bec6c57155fe0da
end34a86f261671b5852bec6c57155fe0da:
;
// match: (MULQconst [5] x)
// cond:
// result: (LEAQ4 x x)
{
if v.AuxInt != 5 {
goto end534601906c45a9171a9fec3e4b82b189
}
x := v.Args[0]
v.Op = OpAMD64LEAQ4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(x)
return true
}
goto end534601906c45a9171a9fec3e4b82b189
end534601906c45a9171a9fec3e4b82b189:
;
// match: (MULQconst [9] x)
// cond:
// result: (LEAQ8 x x)
{
if v.AuxInt != 9 {
goto end48a2280b6459821289c56073b8354997
}
x := v.Args[0]
v.Op = OpAMD64LEAQ8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(x)
return true
}
goto end48a2280b6459821289c56073b8354997
end48a2280b6459821289c56073b8354997:
;
// match: (MULQconst [c] x)
// cond: isPowerOfTwo(c)
// result: (SHLQconst [log2(c)] x)
{
c := v.AuxInt
x := v.Args[0]
if !(isPowerOfTwo(c)) {
goto end75076953dbfe022526a153eda99b39b2
}
v.Op = OpAMD64SHLQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = log2(c)
v.AddArg(x)
return true
}
goto end75076953dbfe022526a153eda99b39b2
end75076953dbfe022526a153eda99b39b2:
;
// match: (MULQconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [c*d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto end55c38c5c405101e610d7ba7fc702ddc0
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c * d
return true
}
goto end55c38c5c405101e610d7ba7fc702ddc0
end55c38c5c405101e610d7ba7fc702ddc0:
;
case OpAMD64MULW:
// match: (MULW x (MOVWconst [c]))
// cond:
// result: (MULWconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end542112cc08217d4bdffc1a645d290ffb
}
c := v.Args[1].AuxInt
v.Op = OpAMD64MULWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end542112cc08217d4bdffc1a645d290ffb
end542112cc08217d4bdffc1a645d290ffb:
;
// match: (MULW (MOVWconst [c]) x)
// cond:
// result: (MULWconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto endd97b4245ced2b3d27d8c555b06281de4
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64MULWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endd97b4245ced2b3d27d8c555b06281de4
endd97b4245ced2b3d27d8c555b06281de4:
;
case OpAMD64MULWconst:
// match: (MULWconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [c*d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVWconst {
goto end61dbc9d9e93dd6946a20a1f475b3f74b
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c * d
return true
}
goto end61dbc9d9e93dd6946a20a1f475b3f74b
end61dbc9d9e93dd6946a20a1f475b3f74b:
;
case OpMod16:
// match: (Mod16 x y)
// cond:
// result: (MODW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MODW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end036bac694be9fe0d6b00b86c2e625990
end036bac694be9fe0d6b00b86c2e625990:
;
case OpMod16u:
// match: (Mod16u x y)
// cond:
// result: (MODWU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MODWU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto enda75d900097f1510ca1c6df786bef0c24
enda75d900097f1510ca1c6df786bef0c24:
;
case OpMod32:
// match: (Mod32 x y)
// cond:
// result: (MODL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MODL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end12c8c0ecf3296810b8217cd4e40f7707
end12c8c0ecf3296810b8217cd4e40f7707:
;
case OpMod32u:
// match: (Mod32u x y)
// cond:
// result: (MODLU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MODLU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end1f0892076cfd58733a08d3ab175a3c1c
end1f0892076cfd58733a08d3ab175a3c1c:
;
case OpMod64:
// match: (Mod64 x y)
// cond:
// result: (MODQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MODQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endaae75f449baf5dc108be4e0439af97f2
endaae75f449baf5dc108be4e0439af97f2:
;
case OpMod64u:
// match: (Mod64u x y)
// cond:
// result: (MODQU x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MODQU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end0d4c8b9df77e59289fb14e2496559d1d
end0d4c8b9df77e59289fb14e2496559d1d:
;
case OpMod8:
// match: (Mod8 x y)
// cond:
// result: (MODW (SignExt8to16 <config.Frontend().TypeInt16()> x) (SignExt8to16 <config.Frontend().TypeInt16()> y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MODW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpSignExt8to16, TypeInvalid)
v0.Type = config.Frontend().TypeInt16()
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpSignExt8to16, TypeInvalid)
v1.Type = config.Frontend().TypeInt16()
v1.AddArg(y)
v.AddArg(v1)
return true
}
goto end13bfd4e75ea363f7b6926fa05136e193
end13bfd4e75ea363f7b6926fa05136e193:
;
case OpMod8u:
// match: (Mod8u x y)
// cond:
// result: (MODWU (ZeroExt8to16 <config.Frontend().TypeUInt16()> x) (ZeroExt8to16 <config.Frontend().TypeUInt16()> y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MODWU
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpZeroExt8to16, TypeInvalid)
v0.Type = config.Frontend().TypeUInt16()
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt8to16, TypeInvalid)
v1.Type = config.Frontend().TypeUInt16()
v1.AddArg(y)
v.AddArg(v1)
return true
}
goto end4c0e16e55b5f8f6d19811fc8d07eacf2
end4c0e16e55b5f8f6d19811fc8d07eacf2:
;
case OpMove:
// match: (Move [size] dst src mem)
// cond:
// result: (REPMOVSB dst src (MOVQconst <config.Frontend().TypeUInt64()> [size]) mem)
{
size := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64REPMOVSB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
v0.Type = config.Frontend().TypeUInt64()
v0.AuxInt = size
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end4dd156b33beb9981378c91e46f055a56
end4dd156b33beb9981378c91e46f055a56:
;
case OpMul16:
// match: (Mul16 x y)
// cond:
// result: (MULW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end1addf5ea2c885aa1729b8f944859d00c
end1addf5ea2c885aa1729b8f944859d00c:
;
case OpMul32:
// match: (Mul32 x y)
// cond:
// result: (MULL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto ende144381f85808e5144782804768e2859
ende144381f85808e5144782804768e2859:
;
case OpMul32F:
// match: (Mul32F x y)
// cond:
// result: (MULSS x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULSS
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end32105a3bfe0237b799b69d83b3f171ca
end32105a3bfe0237b799b69d83b3f171ca:
;
case OpMul64:
// match: (Mul64 x y)
// cond:
// result: (MULQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end38da21e77ac329eb643b20e7d97d5853
end38da21e77ac329eb643b20e7d97d5853:
;
case OpMul64F:
// match: (Mul64F x y)
// cond:
// result: (MULSD x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULSD
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end0ff6e1919fb0a3e549eb82b43edf1f52
end0ff6e1919fb0a3e549eb82b43edf1f52:
;
case OpMul8:
// match: (Mul8 x y)
// cond:
// result: (MULB x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
v.AddArg(y)
return true
}
goto endd876d6bc42a2285b801f42dadbd8757c
endd876d6bc42a2285b801f42dadbd8757c:
;
case OpMulPtr:
// match: (MulPtr x y)
// cond:
// result: (MULQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endbbedad106c011a93243e2062afdcc75f
endbbedad106c011a93243e2062afdcc75f:
;
case OpAMD64NEGB:
// match: (NEGB (MOVBconst [c]))
// cond:
// result: (MOVBconst [-c])
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto end36d0300ba9eab8c9da86246ff653ca96
}
c := v.Args[0].AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -c
return true
}
goto end36d0300ba9eab8c9da86246ff653ca96
end36d0300ba9eab8c9da86246ff653ca96:
;
case OpAMD64NEGL:
// match: (NEGL (MOVLconst [c]))
// cond:
// result: (MOVLconst [-c])
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto end7a245ec67e56bd51911e5ba2d0aa0a16
}
c := v.Args[0].AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -c
return true
}
goto end7a245ec67e56bd51911e5ba2d0aa0a16
end7a245ec67e56bd51911e5ba2d0aa0a16:
;
case OpAMD64NEGQ:
// match: (NEGQ (MOVQconst [c]))
// cond:
// result: (MOVQconst [-c])
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto end04ddd98bc6724ecb85c80c2a4e2bca5a
}
c := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -c
return true
}
goto end04ddd98bc6724ecb85c80c2a4e2bca5a
end04ddd98bc6724ecb85c80c2a4e2bca5a:
;
case OpAMD64NEGW:
// match: (NEGW (MOVWconst [c]))
// cond:
// result: (MOVWconst [-c])
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto end1db6636f0a51848d8a34f6561ecfe7ae
}
c := v.Args[0].AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -c
return true
}
goto end1db6636f0a51848d8a34f6561ecfe7ae
end1db6636f0a51848d8a34f6561ecfe7ae:
;
case OpAMD64NOTB:
// match: (NOTB (MOVBconst [c]))
// cond:
// result: (MOVBconst [^c])
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto end9e383a9ceb29a9e2bf890ec6a67212a8
}
c := v.Args[0].AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = ^c
return true
}
goto end9e383a9ceb29a9e2bf890ec6a67212a8
end9e383a9ceb29a9e2bf890ec6a67212a8:
;
case OpAMD64NOTL:
// match: (NOTL (MOVLconst [c]))
// cond:
// result: (MOVLconst [^c])
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto endcc73972c088d5e652a1370a96e56502d
}
c := v.Args[0].AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = ^c
return true
}
goto endcc73972c088d5e652a1370a96e56502d
endcc73972c088d5e652a1370a96e56502d:
;
case OpAMD64NOTQ:
// match: (NOTQ (MOVQconst [c]))
// cond:
// result: (MOVQconst [^c])
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto endb39ddb6bf7339d46f74114baad4333b6
}
c := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = ^c
return true
}
goto endb39ddb6bf7339d46f74114baad4333b6
endb39ddb6bf7339d46f74114baad4333b6:
;
case OpAMD64NOTW:
// match: (NOTW (MOVWconst [c]))
// cond:
// result: (MOVWconst [^c])
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto end35848095ebcf894c6957ad3be5f82c43
}
c := v.Args[0].AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = ^c
return true
}
goto end35848095ebcf894c6957ad3be5f82c43
end35848095ebcf894c6957ad3be5f82c43:
;
case OpNeg16:
// match: (Neg16 x)
// cond:
// result: (NEGW x)
{
x := v.Args[0]
v.Op = OpAMD64NEGW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end7a8c652f4ffeb49656119af69512edb2
end7a8c652f4ffeb49656119af69512edb2:
;
case OpNeg32:
// match: (Neg32 x)
// cond:
// result: (NEGL x)
{
x := v.Args[0]
v.Op = OpAMD64NEGL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto endce1f7e17fc193f6c076e47d5e401e126
endce1f7e17fc193f6c076e47d5e401e126:
;
case OpNeg64:
// match: (Neg64 x)
// cond:
// result: (NEGQ x)
{
x := v.Args[0]
v.Op = OpAMD64NEGQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto enda06c5b1718f2b96aba10bf5a5c437c6c
enda06c5b1718f2b96aba10bf5a5c437c6c:
;
case OpNeg8:
// match: (Neg8 x)
// cond:
// result: (NEGB x)
{
x := v.Args[0]
v.Op = OpAMD64NEGB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end1e5f495a2ac6cdea47b1ae5ba62aa95d
end1e5f495a2ac6cdea47b1ae5ba62aa95d:
;
case OpNeq16:
// match: (Neq16 x y)
// cond:
// result: (SETNE (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endf177c3b3868606824e43e11da7804572
endf177c3b3868606824e43e11da7804572:
;
case OpNeq32:
// match: (Neq32 x y)
// cond:
// result: (SETNE (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end39c4bf6d063f8a0b6f0064c96ce25173
end39c4bf6d063f8a0b6f0064c96ce25173:
;
case OpNeq64:
// match: (Neq64 x y)
// cond:
// result: (SETNE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end8ab0bcb910c0d3213dd8726fbcc4848e
end8ab0bcb910c0d3213dd8726fbcc4848e:
;
case OpNeq8:
// match: (Neq8 x y)
// cond:
// result: (SETNE (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end4aaff28af59a65b3684f4f1897299932
end4aaff28af59a65b3684f4f1897299932:
;
case OpNeqPtr:
// match: (NeqPtr x y)
// cond:
// result: (SETNE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end6e180ffd9583cd55361ed3e465158a4c
end6e180ffd9583cd55361ed3e465158a4c:
;
case OpNot:
// match: (Not x)
// cond:
// result: (XORBconst [1] x)
{
x := v.Args[0]
v.Op = OpAMD64XORBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 1
v.AddArg(x)
return true
}
goto end73973101aad60079c62fa64624e21db1
end73973101aad60079c62fa64624e21db1:
;
case OpAMD64ORB:
// match: (ORB x (MOVBconst [c]))
// cond:
// result: (ORBconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end7b63870decde2515cb77ec4f8f76817c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ORBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end7b63870decde2515cb77ec4f8f76817c
end7b63870decde2515cb77ec4f8f76817c:
;
// match: (ORB (MOVBconst [c]) x)
// cond:
// result: (ORBconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto end70b43d531e2097a4f6293f66256a642e
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ORBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end70b43d531e2097a4f6293f66256a642e
end70b43d531e2097a4f6293f66256a642e:
;
// match: (ORB x x)
// cond:
// result: x
{
x := v.Args[0]
if v.Args[1] != x {
goto enddca5ce800a9eca157f243cb2fdb1408a
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto enddca5ce800a9eca157f243cb2fdb1408a
enddca5ce800a9eca157f243cb2fdb1408a:
;
case OpAMD64ORBconst:
// match: (ORBconst [c] x)
// cond: int8(c)==0
// result: x
{
c := v.AuxInt
x := v.Args[0]
if !(int8(c) == 0) {
goto end565f78e3a843dc73943b59227b39a1b3
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end565f78e3a843dc73943b59227b39a1b3
end565f78e3a843dc73943b59227b39a1b3:
;
// match: (ORBconst [c] _)
// cond: int8(c)==-1
// result: (MOVBconst [-1])
{
c := v.AuxInt
if !(int8(c) == -1) {
goto end6033c7910d8cd536b31446e179e4610d
}
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end6033c7910d8cd536b31446e179e4610d
end6033c7910d8cd536b31446e179e4610d:
;
// match: (ORBconst [c] (MOVBconst [d]))
// cond:
// result: (MOVBconst [c|d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVBconst {
goto endbe5263f022dc10a5cf53c118937d79dd
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c | d
return true
}
goto endbe5263f022dc10a5cf53c118937d79dd
endbe5263f022dc10a5cf53c118937d79dd:
;
case OpAMD64ORL:
// match: (ORL x (MOVLconst [c]))
// cond:
// result: (ORLconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end1b883e30d860b6fac14ae98462c4f61a
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ORLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end1b883e30d860b6fac14ae98462c4f61a
end1b883e30d860b6fac14ae98462c4f61a:
;
// match: (ORL (MOVLconst [c]) x)
// cond:
// result: (ORLconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto enda5bc49524a0cbd2241f792837d0a48a8
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ORLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto enda5bc49524a0cbd2241f792837d0a48a8
enda5bc49524a0cbd2241f792837d0a48a8:
;
// match: (ORL x x)
// cond:
// result: x
{
x := v.Args[0]
if v.Args[1] != x {
goto end2dd719b68f4938777ef0d820aab93659
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end2dd719b68f4938777ef0d820aab93659
end2dd719b68f4938777ef0d820aab93659:
;
case OpAMD64ORLconst:
// match: (ORLconst [c] x)
// cond: int32(c)==0
// result: x
{
c := v.AuxInt
x := v.Args[0]
if !(int32(c) == 0) {
goto end5b52623a724e8a7167c71289fb7192f1
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end5b52623a724e8a7167c71289fb7192f1
end5b52623a724e8a7167c71289fb7192f1:
;
// match: (ORLconst [c] _)
// cond: int32(c)==-1
// result: (MOVLconst [-1])
{
c := v.AuxInt
if !(int32(c) == -1) {
goto end345a8ea439ef2ef54bd84fc8a0f73e97
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end345a8ea439ef2ef54bd84fc8a0f73e97
end345a8ea439ef2ef54bd84fc8a0f73e97:
;
// match: (ORLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [c|d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVLconst {
goto ende9ca05024248f782c88084715f81d727
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c | d
return true
}
goto ende9ca05024248f782c88084715f81d727
ende9ca05024248f782c88084715f81d727:
;
case OpAMD64ORQ:
// match: (ORQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ORQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end601f2bb3ccda102e484ff60adeaf6d26
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto end601f2bb3ccda102e484ff60adeaf6d26
}
v.Op = OpAMD64ORQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end601f2bb3ccda102e484ff60adeaf6d26
end601f2bb3ccda102e484ff60adeaf6d26:
;
// match: (ORQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (ORQconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto end010afbebcd314e288509d79a16a6d5cc
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto end010afbebcd314e288509d79a16a6d5cc
}
v.Op = OpAMD64ORQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end010afbebcd314e288509d79a16a6d5cc
end010afbebcd314e288509d79a16a6d5cc:
;
// match: (ORQ x x)
// cond:
// result: x
{
x := v.Args[0]
if v.Args[1] != x {
goto end47a27d30b82db576978c5a3a57b520fb
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end47a27d30b82db576978c5a3a57b520fb
end47a27d30b82db576978c5a3a57b520fb:
;
case OpAMD64ORQconst:
// match: (ORQconst [0] x)
// cond:
// result: x
{
if v.AuxInt != 0 {
goto end44534da6b9ce98d33fad7e20f0be1fbd
}
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end44534da6b9ce98d33fad7e20f0be1fbd
end44534da6b9ce98d33fad7e20f0be1fbd:
;
// match: (ORQconst [-1] _)
// cond:
// result: (MOVQconst [-1])
{
if v.AuxInt != -1 {
goto endcde9b9d7c4527eaa5d50b252f50b43c1
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto endcde9b9d7c4527eaa5d50b252f50b43c1
endcde9b9d7c4527eaa5d50b252f50b43c1:
;
// match: (ORQconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [c|d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto enda2488509b71db9abcb06a5115c4ddc2c
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c | d
return true
}
goto enda2488509b71db9abcb06a5115c4ddc2c
enda2488509b71db9abcb06a5115c4ddc2c:
;
case OpAMD64ORW:
// match: (ORW x (MOVWconst [c]))
// cond:
// result: (ORWconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end9f98df10892dbf170b49aace86ee0d7f
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ORWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end9f98df10892dbf170b49aace86ee0d7f
end9f98df10892dbf170b49aace86ee0d7f:
;
// match: (ORW (MOVWconst [c]) x)
// cond:
// result: (ORWconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto end96405942c9ceb5fcb0ddb85a8709d015
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ORWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end96405942c9ceb5fcb0ddb85a8709d015
end96405942c9ceb5fcb0ddb85a8709d015:
;
// match: (ORW x x)
// cond:
// result: x
{
x := v.Args[0]
if v.Args[1] != x {
goto endc6a23b64e541dc9cfc6a90fd7028e8c1
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto endc6a23b64e541dc9cfc6a90fd7028e8c1
endc6a23b64e541dc9cfc6a90fd7028e8c1:
;
case OpAMD64ORWconst:
// match: (ORWconst [c] x)
// cond: int16(c)==0
// result: x
{
c := v.AuxInt
x := v.Args[0]
if !(int16(c) == 0) {
goto endbbbdec9091c8b4c58e587eac8a43402d
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto endbbbdec9091c8b4c58e587eac8a43402d
endbbbdec9091c8b4c58e587eac8a43402d:
;
// match: (ORWconst [c] _)
// cond: int16(c)==-1
// result: (MOVWconst [-1])
{
c := v.AuxInt
if !(int16(c) == -1) {
goto ended87a5775f5e04b2d2a117a63d82dd9b
}
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto ended87a5775f5e04b2d2a117a63d82dd9b
ended87a5775f5e04b2d2a117a63d82dd9b:
;
// match: (ORWconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [c|d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVWconst {
goto endba9221a8462b5c62e8d7c686f64c2778
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c | d
return true
}
goto endba9221a8462b5c62e8d7c686f64c2778
endba9221a8462b5c62e8d7c686f64c2778:
;
case OpOffPtr:
// match: (OffPtr [off] ptr)
// cond:
// result: (ADDQconst [off] ptr)
{
off := v.AuxInt
ptr := v.Args[0]
v.Op = OpAMD64ADDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.AddArg(ptr)
return true
}
goto end0429f947ee7ac49ff45a243e461a5290
end0429f947ee7ac49ff45a243e461a5290:
;
case OpOr16:
// match: (Or16 x y)
// cond:
// result: (ORW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ORW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end8fedf2c79d5607b7056b0ff015199cbd
end8fedf2c79d5607b7056b0ff015199cbd:
;
case OpOr32:
// match: (Or32 x y)
// cond:
// result: (ORL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ORL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endea45bed9ca97d2995b68b53e6012d384
endea45bed9ca97d2995b68b53e6012d384:
;
case OpOr64:
// match: (Or64 x y)
// cond:
// result: (ORQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ORQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end3a446becaf2461f4f1a41faeef313f41
end3a446becaf2461f4f1a41faeef313f41:
;
case OpOr8:
// match: (Or8 x y)
// cond:
// result: (ORB x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ORB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end6f8a8c559a167d1f0a5901d09a1fb248
end6f8a8c559a167d1f0a5901d09a1fb248:
;
case OpPanicIndexCheck:
// match: (PanicIndexCheck mem)
// cond:
// result: (LoweredPanicIndexCheck mem)
{
mem := v.Args[0]
v.Op = OpAMD64LoweredPanicIndexCheck
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(mem)
return true
}
goto enda5014ba73d3550a5b66424044395c70f
enda5014ba73d3550a5b66424044395c70f:
;
case OpPanicNilCheck:
// match: (PanicNilCheck ptr mem)
// cond:
// result: (LoweredPanicNilCheck ptr mem)
{
ptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64LoweredPanicNilCheck
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto enda02b1ad5a6f929b782190145f2c8628b
enda02b1ad5a6f929b782190145f2c8628b:
;
case OpPanicSliceCheck:
// match: (PanicSliceCheck mem)
// cond:
// result: (LoweredPanicSliceCheck mem)
{
mem := v.Args[0]
v.Op = OpAMD64LoweredPanicSliceCheck
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(mem)
return true
}
goto end238ed0074810b55bd2bba7b45cdeed68
end238ed0074810b55bd2bba7b45cdeed68:
;
case OpRsh16Ux16:
// match: (Rsh16Ux16 <t> x y)
// cond:
// result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [16] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRW, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end73239750a306668023d2c49875ac442f
end73239750a306668023d2c49875ac442f:
;
case OpRsh16Ux32:
// match: (Rsh16Ux32 <t> x y)
// cond:
// result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [16] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRW, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end9951e3b2e92c892256feece722b32219
end9951e3b2e92c892256feece722b32219:
;
case OpRsh16Ux64:
// match: (Rsh16Ux64 <t> x y)
// cond:
// result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [16] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRW, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end610d56d808c204abfa40d653447b2c17
end610d56d808c204abfa40d653447b2c17:
;
case OpRsh16Ux8:
// match: (Rsh16Ux8 <t> x y)
// cond:
// result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [16] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRW, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end45e76a8d2b004e6802d53cf12b4757b3
end45e76a8d2b004e6802d53cf12b4757b3:
;
case OpRsh16x16:
// match: (Rsh16x16 <t> x y)
// cond:
// result: (SARW <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [16] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORW, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto endbcd8fd69ada08517f6f94f35da91e1c3
endbcd8fd69ada08517f6f94f35da91e1c3:
;
case OpRsh16x32:
// match: (Rsh16x32 <t> x y)
// cond:
// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [16] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORL, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto endec3994083e7f82857ecec05906c29aa6
endec3994083e7f82857ecec05906c29aa6:
;
case OpRsh16x64:
// match: (Rsh16x64 <t> x y)
// cond:
// result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [16] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORQ, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTQ, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end19da3883e21ffa3a45d7fc648ef38b66
end19da3883e21ffa3a45d7fc648ef38b66:
;
case OpRsh16x8:
// match: (Rsh16x8 <t> x y)
// cond:
// result: (SARW <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [16] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORB, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end3c989f6931d059ea04e4ba93601b6c51
end3c989f6931d059ea04e4ba93601b6c51:
;
case OpRsh32Ux16:
// match: (Rsh32Ux16 <t> x y)
// cond:
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [32] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRL, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end056ede9885a9fc2f32615a2a03b35388
end056ede9885a9fc2f32615a2a03b35388:
;
case OpRsh32Ux32:
// match: (Rsh32Ux32 <t> x y)
// cond:
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [32] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRL, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end30439bdc3517479ea25ae7f54408ba7f
end30439bdc3517479ea25ae7f54408ba7f:
;
case OpRsh32Ux64:
// match: (Rsh32Ux64 <t> x y)
// cond:
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [32] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRL, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end49b47fd18b54461d8eea51f6e5889cd2
end49b47fd18b54461d8eea51f6e5889cd2:
;
case OpRsh32Ux8:
// match: (Rsh32Ux8 <t> x y)
// cond:
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [32] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRL, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end46e045970a8b1afb9035605fc0e50c69
end46e045970a8b1afb9035605fc0e50c69:
;
case OpRsh32x16:
// match: (Rsh32x16 <t> x y)
// cond:
// result: (SARL <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [32] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORW, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end5d1b8d7e1d1e53e621d13bb0eafc9102
end5d1b8d7e1d1e53e621d13bb0eafc9102:
;
case OpRsh32x32:
// match: (Rsh32x32 <t> x y)
// cond:
// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [32] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORL, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end9c27383961c2161a9955012fce808cab
end9c27383961c2161a9955012fce808cab:
;
case OpRsh32x64:
// match: (Rsh32x64 <t> x y)
// cond:
// result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [32] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORQ, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTQ, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end75dc7144497705c800e0c60dcd4a2828
end75dc7144497705c800e0c60dcd4a2828:
;
case OpRsh32x8:
// match: (Rsh32x8 <t> x y)
// cond:
// result: (SARL <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [32] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORB, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto enda7b94b2fd5cbcd12bb2dcd576bdca481
enda7b94b2fd5cbcd12bb2dcd576bdca481:
;
case OpRsh64Ux16:
// match: (Rsh64Ux16 <t> x y)
// cond:
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto endc4bdfdc375a5c94978d936bd0db89cc5
endc4bdfdc375a5c94978d936bd0db89cc5:
;
case OpRsh64Ux32:
// match: (Rsh64Ux32 <t> x y)
// cond:
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end217f32bca5f6744b9a7de052f4fae13e
end217f32bca5f6744b9a7de052f4fae13e:
;
case OpRsh64Ux64:
// match: (Rsh64Ux64 <t> x y)
// cond:
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end530dee0bcadf1cf5d092894b6210ffcd
end530dee0bcadf1cf5d092894b6210ffcd:
;
case OpRsh64Ux8:
// match: (Rsh64Ux8 <t> x y)
// cond:
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto endf09baf4e0005c5eb4905f71ce4c8b306
endf09baf4e0005c5eb4905f71ce4c8b306:
;
case OpRsh64x16:
// match: (Rsh64x16 <t> x y)
// cond:
// result: (SARQ <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [64] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORW, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 64
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto endb370ee74ca256a604138321ddca9d543
endb370ee74ca256a604138321ddca9d543:
;
case OpRsh64x32:
// match: (Rsh64x32 <t> x y)
// cond:
// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [64] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORL, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 64
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end3cc6edf5b286a449332757ea12d2d601
end3cc6edf5b286a449332757ea12d2d601:
;
case OpRsh64x64:
// match: (Rsh64x64 <t> x y)
// cond:
// result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [64] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORQ, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTQ, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 64
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end45de7b33396d9fd2ba377bd095f1d7a6
end45de7b33396d9fd2ba377bd095f1d7a6:
;
case OpRsh64x8:
// match: (Rsh64x8 <t> x y)
// cond:
// result: (SARQ <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [64] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORB, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 64
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto ende03fa68104fd18bb9b2bb94370e0c8b3
ende03fa68104fd18bb9b2bb94370e0c8b3:
;
case OpRsh8Ux16:
// match: (Rsh8Ux16 <t> x y)
// cond:
// result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [8] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRB, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto enda1adfc560334e10d5e83fbff27a8752f
enda1adfc560334e10d5e83fbff27a8752f:
;
case OpRsh8Ux32:
// match: (Rsh8Ux32 <t> x y)
// cond:
// result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [8] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRB, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end17f63b4b712e715a33ac780193b59c2e
end17f63b4b712e715a33ac780193b59c2e:
;
case OpRsh8Ux64:
// match: (Rsh8Ux64 <t> x y)
// cond:
// result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [8] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRB, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end77d5c3ef9982ebd27c135d3461b7430b
end77d5c3ef9982ebd27c135d3461b7430b:
;
case OpRsh8Ux8:
// match: (Rsh8Ux8 <t> x y)
// cond:
// result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [8] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SHRB, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v1.Type = t
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
goto end206712ffbda924142afbf384aeb8f09e
end206712ffbda924142afbf384aeb8f09e:
;
case OpRsh8x16:
// match: (Rsh8x16 <t> x y)
// cond:
// result: (SARB <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [8] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORW, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto endd303f390b49d9716dc783d5c4d57ddd1
endd303f390b49d9716dc783d5c4d57ddd1:
;
case OpRsh8x32:
// match: (Rsh8x32 <t> x y)
// cond:
// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [8] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORL, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto ende12a524a6fc68eb245140c6919034337
ende12a524a6fc68eb245140c6919034337:
;
case OpRsh8x64:
// match: (Rsh8x64 <t> x y)
// cond:
// result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [8] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORQ, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTQ, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end6ee53459daa5458d163c86ea02dd2f31
end6ee53459daa5458d163c86ea02dd2f31:
;
case OpRsh8x8:
// match: (Rsh8x8 <t> x y)
// cond:
// result: (SARB <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [8] y)))))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SARB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpAMD64ORB, TypeInvalid)
v0.Type = y.Type
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
v1.Type = y.Type
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
v2.Type = y.Type
v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
v3.Type = TypeFlags
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
goto end07f447a7e25b048c41d412c242330ec0
end07f447a7e25b048c41d412c242330ec0:
;
case OpAMD64SARB:
// match: (SARB x (MOVBconst [c]))
// cond:
// result: (SARBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end3bf3d17717aa6c04462e56d1c87902ce
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end3bf3d17717aa6c04462e56d1c87902ce
end3bf3d17717aa6c04462e56d1c87902ce:
;
case OpAMD64SARBconst:
// match: (SARBconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [d>>uint64(c)])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto end06e0e38775f0650ed672427d19cd8fff
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d >> uint64(c)
return true
}
goto end06e0e38775f0650ed672427d19cd8fff
end06e0e38775f0650ed672427d19cd8fff:
;
case OpAMD64SARL:
// match: (SARL x (MOVLconst [c]))
// cond:
// result: (SARLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto ende586a72c1b232ee0b63e37c71eeb8470
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto ende586a72c1b232ee0b63e37c71eeb8470
ende586a72c1b232ee0b63e37c71eeb8470:
;
case OpAMD64SARLconst:
// match: (SARLconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [d>>uint64(c)])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto end8f34dc94323303e75b7bcc8e731cf1db
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d >> uint64(c)
return true
}
goto end8f34dc94323303e75b7bcc8e731cf1db
end8f34dc94323303e75b7bcc8e731cf1db:
;
case OpAMD64SARQ:
// match: (SARQ x (MOVQconst [c]))
// cond:
// result: (SARQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end25e720ab203be2745dded5550e6d8a7c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end25e720ab203be2745dded5550e6d8a7c
end25e720ab203be2745dded5550e6d8a7c:
;
case OpAMD64SARQconst:
// match: (SARQconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [d>>uint64(c)])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto endd949ba69a1ff71ba62c49b39c68f269e
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d >> uint64(c)
return true
}
goto endd949ba69a1ff71ba62c49b39c68f269e
endd949ba69a1ff71ba62c49b39c68f269e:
;
case OpAMD64SARW:
// match: (SARW x (MOVWconst [c]))
// cond:
// result: (SARWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto endc46e3f211f94238f9a0aec3c498af490
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endc46e3f211f94238f9a0aec3c498af490
endc46e3f211f94238f9a0aec3c498af490:
;
case OpAMD64SARWconst:
// match: (SARWconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [d>>uint64(c)])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto endca23e80dba22ab574f843c7a4cef24ab
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d >> uint64(c)
return true
}
goto endca23e80dba22ab574f843c7a4cef24ab
endca23e80dba22ab574f843c7a4cef24ab:
;
case OpAMD64SBBLcarrymask:
// match: (SBBLcarrymask (CMPQconst [c] (MOVQconst [d])))
// cond: inBounds64(d, c)
// result: (MOVLconst [-1])
{
if v.Args[0].Op != OpAMD64CMPQconst {
goto end490c8a7039bab41e90e564fbb8500233
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
goto end490c8a7039bab41e90e564fbb8500233
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds64(d, c)) {
goto end490c8a7039bab41e90e564fbb8500233
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end490c8a7039bab41e90e564fbb8500233
end490c8a7039bab41e90e564fbb8500233:
;
// match: (SBBLcarrymask (CMPQconst [c] (MOVQconst [d])))
// cond: !inBounds64(d, c)
// result: (MOVLconst [0])
{
if v.Args[0].Op != OpAMD64CMPQconst {
goto end95e703eabe71d831b7a3d2f9fabe7de9
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
goto end95e703eabe71d831b7a3d2f9fabe7de9
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds64(d, c)) {
goto end95e703eabe71d831b7a3d2f9fabe7de9
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end95e703eabe71d831b7a3d2f9fabe7de9
end95e703eabe71d831b7a3d2f9fabe7de9:
;
// match: (SBBLcarrymask (CMPLconst [c] (MOVLconst [d])))
// cond: inBounds32(d, c)
// result: (MOVLconst [-1])
{
if v.Args[0].Op != OpAMD64CMPLconst {
goto end00c0a561340b0172c9a21f63648b86e2
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVLconst {
goto end00c0a561340b0172c9a21f63648b86e2
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds32(d, c)) {
goto end00c0a561340b0172c9a21f63648b86e2
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end00c0a561340b0172c9a21f63648b86e2
end00c0a561340b0172c9a21f63648b86e2:
;
// match: (SBBLcarrymask (CMPLconst [c] (MOVLconst [d])))
// cond: !inBounds32(d, c)
// result: (MOVLconst [0])
{
if v.Args[0].Op != OpAMD64CMPLconst {
goto enda73c8bf14f7b45dd97c6a006e317b0b8
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVLconst {
goto enda73c8bf14f7b45dd97c6a006e317b0b8
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds32(d, c)) {
goto enda73c8bf14f7b45dd97c6a006e317b0b8
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto enda73c8bf14f7b45dd97c6a006e317b0b8
enda73c8bf14f7b45dd97c6a006e317b0b8:
;
// match: (SBBLcarrymask (CMPWconst [c] (MOVWconst [d])))
// cond: inBounds16(d, c)
// result: (MOVLconst [-1])
{
if v.Args[0].Op != OpAMD64CMPWconst {
goto endb94dc44cd77f66ed3bf3742874b666fc
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVWconst {
goto endb94dc44cd77f66ed3bf3742874b666fc
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds16(d, c)) {
goto endb94dc44cd77f66ed3bf3742874b666fc
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto endb94dc44cd77f66ed3bf3742874b666fc
endb94dc44cd77f66ed3bf3742874b666fc:
;
// match: (SBBLcarrymask (CMPWconst [c] (MOVWconst [d])))
// cond: !inBounds16(d, c)
// result: (MOVLconst [0])
{
if v.Args[0].Op != OpAMD64CMPWconst {
goto end7a02def6194822f7ab937d78088504d2
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVWconst {
goto end7a02def6194822f7ab937d78088504d2
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds16(d, c)) {
goto end7a02def6194822f7ab937d78088504d2
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end7a02def6194822f7ab937d78088504d2
end7a02def6194822f7ab937d78088504d2:
;
// match: (SBBLcarrymask (CMPBconst [c] (MOVBconst [d])))
// cond: inBounds8(d, c)
// result: (MOVLconst [-1])
{
if v.Args[0].Op != OpAMD64CMPBconst {
goto end79c8e4a20761df731521e6cd956c4245
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVBconst {
goto end79c8e4a20761df731521e6cd956c4245
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds8(d, c)) {
goto end79c8e4a20761df731521e6cd956c4245
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end79c8e4a20761df731521e6cd956c4245
end79c8e4a20761df731521e6cd956c4245:
;
// match: (SBBLcarrymask (CMPBconst [c] (MOVBconst [d])))
// cond: !inBounds8(d, c)
// result: (MOVLconst [0])
{
if v.Args[0].Op != OpAMD64CMPBconst {
goto end95b5b21dd7756ae41575759a1eff2bea
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVBconst {
goto end95b5b21dd7756ae41575759a1eff2bea
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds8(d, c)) {
goto end95b5b21dd7756ae41575759a1eff2bea
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end95b5b21dd7756ae41575759a1eff2bea
end95b5b21dd7756ae41575759a1eff2bea:
;
case OpAMD64SBBQcarrymask:
// match: (SBBQcarrymask (CMPQconst [c] (MOVQconst [d])))
// cond: inBounds64(d, c)
// result: (MOVQconst [-1])
{
if v.Args[0].Op != OpAMD64CMPQconst {
goto end0c26df98feb38f149eca12f33c15de1b
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
goto end0c26df98feb38f149eca12f33c15de1b
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds64(d, c)) {
goto end0c26df98feb38f149eca12f33c15de1b
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end0c26df98feb38f149eca12f33c15de1b
end0c26df98feb38f149eca12f33c15de1b:
;
// match: (SBBQcarrymask (CMPQconst [c] (MOVQconst [d])))
// cond: !inBounds64(d, c)
// result: (MOVQconst [0])
{
if v.Args[0].Op != OpAMD64CMPQconst {
goto end8965aa1e1153e5ecd123bbb31a618570
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
goto end8965aa1e1153e5ecd123bbb31a618570
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds64(d, c)) {
goto end8965aa1e1153e5ecd123bbb31a618570
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end8965aa1e1153e5ecd123bbb31a618570
end8965aa1e1153e5ecd123bbb31a618570:
;
// match: (SBBQcarrymask (CMPLconst [c] (MOVLconst [d])))
// cond: inBounds32(d, c)
// result: (MOVQconst [-1])
{
if v.Args[0].Op != OpAMD64CMPLconst {
goto end8772ede6098981a61af0f478841d7d54
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVLconst {
goto end8772ede6098981a61af0f478841d7d54
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds32(d, c)) {
goto end8772ede6098981a61af0f478841d7d54
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end8772ede6098981a61af0f478841d7d54
end8772ede6098981a61af0f478841d7d54:
;
// match: (SBBQcarrymask (CMPLconst [c] (MOVLconst [d])))
// cond: !inBounds32(d, c)
// result: (MOVQconst [0])
{
if v.Args[0].Op != OpAMD64CMPLconst {
goto end2d535e90075ee777fc616e6b9847a384
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVLconst {
goto end2d535e90075ee777fc616e6b9847a384
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds32(d, c)) {
goto end2d535e90075ee777fc616e6b9847a384
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end2d535e90075ee777fc616e6b9847a384
end2d535e90075ee777fc616e6b9847a384:
;
// match: (SBBQcarrymask (CMPWconst [c] (MOVWconst [d])))
// cond: inBounds16(d, c)
// result: (MOVQconst [-1])
{
if v.Args[0].Op != OpAMD64CMPWconst {
goto end3103c51e14b4fc894b4170f16f37eebc
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVWconst {
goto end3103c51e14b4fc894b4170f16f37eebc
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds16(d, c)) {
goto end3103c51e14b4fc894b4170f16f37eebc
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end3103c51e14b4fc894b4170f16f37eebc
end3103c51e14b4fc894b4170f16f37eebc:
;
// match: (SBBQcarrymask (CMPWconst [c] (MOVWconst [d])))
// cond: !inBounds16(d, c)
// result: (MOVQconst [0])
{
if v.Args[0].Op != OpAMD64CMPWconst {
goto enddae2191a59cfef5efb04ebab9354745c
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVWconst {
goto enddae2191a59cfef5efb04ebab9354745c
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds16(d, c)) {
goto enddae2191a59cfef5efb04ebab9354745c
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto enddae2191a59cfef5efb04ebab9354745c
enddae2191a59cfef5efb04ebab9354745c:
;
// match: (SBBQcarrymask (CMPBconst [c] (MOVBconst [d])))
// cond: inBounds8(d, c)
// result: (MOVQconst [-1])
{
if v.Args[0].Op != OpAMD64CMPBconst {
goto end72e088325ca005b0251b1ee82da3c5d9
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVBconst {
goto end72e088325ca005b0251b1ee82da3c5d9
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds8(d, c)) {
goto end72e088325ca005b0251b1ee82da3c5d9
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto end72e088325ca005b0251b1ee82da3c5d9
end72e088325ca005b0251b1ee82da3c5d9:
;
// match: (SBBQcarrymask (CMPBconst [c] (MOVBconst [d])))
// cond: !inBounds8(d, c)
// result: (MOVQconst [0])
{
if v.Args[0].Op != OpAMD64CMPBconst {
goto endcb388100f5b933aa94095096d2bb425e
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVBconst {
goto endcb388100f5b933aa94095096d2bb425e
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds8(d, c)) {
goto endcb388100f5b933aa94095096d2bb425e
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto endcb388100f5b933aa94095096d2bb425e
endcb388100f5b933aa94095096d2bb425e:
;
case OpAMD64SETA:
// match: (SETA (InvertFlags x))
// cond:
// result: (SETB x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto enda4ac36e94fc279d762b5a6c7c6cc665d
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto enda4ac36e94fc279d762b5a6c7c6cc665d
enda4ac36e94fc279d762b5a6c7c6cc665d:
;
case OpAMD64SETAE:
// match: (SETAE (InvertFlags x))
// cond:
// result: (SETBE x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto end0468f5be6caf682fdea6b91d6648991e
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETBE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end0468f5be6caf682fdea6b91d6648991e
end0468f5be6caf682fdea6b91d6648991e:
;
case OpAMD64SETB:
// match: (SETB (InvertFlags x))
// cond:
// result: (SETA x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto endc9eba7aa1e54a228570d2f5cc96f3565
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETA
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endc9eba7aa1e54a228570d2f5cc96f3565
endc9eba7aa1e54a228570d2f5cc96f3565:
;
case OpAMD64SETBE:
// match: (SETBE (InvertFlags x))
// cond:
// result: (SETAE x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto end9d9031643469798b14b8cad1f5a7a1ba
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETAE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end9d9031643469798b14b8cad1f5a7a1ba
end9d9031643469798b14b8cad1f5a7a1ba:
;
case OpAMD64SETEQ:
// match: (SETEQ (InvertFlags x))
// cond:
// result: (SETEQ x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto end5d2039c9368d8c0cfba23b5a85b459e1
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end5d2039c9368d8c0cfba23b5a85b459e1
end5d2039c9368d8c0cfba23b5a85b459e1:
;
case OpAMD64SETG:
// match: (SETG (InvertFlags x))
// cond:
// result: (SETL x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto endf7586738694c9cd0b74ae28bbadb649f
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endf7586738694c9cd0b74ae28bbadb649f
endf7586738694c9cd0b74ae28bbadb649f:
;
case OpAMD64SETGE:
// match: (SETGE (InvertFlags x))
// cond:
// result: (SETLE x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto end82c11eff6f842159f564f2dad3d2eedc
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETLE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end82c11eff6f842159f564f2dad3d2eedc
end82c11eff6f842159f564f2dad3d2eedc:
;
case OpAMD64SETL:
// match: (SETL (InvertFlags x))
// cond:
// result: (SETG x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto ende33160cd86b9d4d3b77e02fb4658d5d3
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETG
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto ende33160cd86b9d4d3b77e02fb4658d5d3
ende33160cd86b9d4d3b77e02fb4658d5d3:
;
case OpAMD64SETLE:
// match: (SETLE (InvertFlags x))
// cond:
// result: (SETGE x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto end9307d96753efbeb888d1c98a6aba7a29
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETGE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end9307d96753efbeb888d1c98a6aba7a29
end9307d96753efbeb888d1c98a6aba7a29:
;
case OpAMD64SETNE:
// match: (SETNE (InvertFlags x))
// cond:
// result: (SETNE x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto endbc71811b789475308014550f638026eb
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endbc71811b789475308014550f638026eb
endbc71811b789475308014550f638026eb:
;
case OpAMD64SHLB:
// match: (SHLB x (MOVBconst [c]))
// cond:
// result: (SHLBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end2d0d0111d831d8a575b5627284a6337a
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end2d0d0111d831d8a575b5627284a6337a
end2d0d0111d831d8a575b5627284a6337a:
;
case OpAMD64SHLL:
// match: (SHLL x (MOVLconst [c]))
// cond:
// result: (SHLLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end633f9ddcfbb63374c895a5f78da75d25
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end633f9ddcfbb63374c895a5f78da75d25
end633f9ddcfbb63374c895a5f78da75d25:
;
case OpAMD64SHLQ:
// match: (SHLQ x (MOVQconst [c]))
// cond:
// result: (SHLQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end4d7e3a945cacdd6b6c8c0de6f465d4ae
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end4d7e3a945cacdd6b6c8c0de6f465d4ae
end4d7e3a945cacdd6b6c8c0de6f465d4ae:
;
case OpAMD64SHLW:
// match: (SHLW x (MOVWconst [c]))
// cond:
// result: (SHLWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto endba96a52aa58d28b3357828051e0e695c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endba96a52aa58d28b3357828051e0e695c
endba96a52aa58d28b3357828051e0e695c:
;
case OpAMD64SHRB:
// match: (SHRB x (MOVBconst [c]))
// cond:
// result: (SHRBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto enddb1cd5aaa826d43fa4f6d1b2b8795e58
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto enddb1cd5aaa826d43fa4f6d1b2b8795e58
enddb1cd5aaa826d43fa4f6d1b2b8795e58:
;
case OpAMD64SHRL:
// match: (SHRL x (MOVLconst [c]))
// cond:
// result: (SHRLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end344b8b9202e1925e8d0561f1c21412fc
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end344b8b9202e1925e8d0561f1c21412fc
end344b8b9202e1925e8d0561f1c21412fc:
;
case OpAMD64SHRQ:
// match: (SHRQ x (MOVQconst [c]))
// cond:
// result: (SHRQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end699d35e2d5cfa08b8a3b1c8a183ddcf3
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end699d35e2d5cfa08b8a3b1c8a183ddcf3
end699d35e2d5cfa08b8a3b1c8a183ddcf3:
;
case OpAMD64SHRW:
// match: (SHRW x (MOVWconst [c]))
// cond:
// result: (SHRWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto endd75ff1f9b3e9ec9c942a39b6179da1b3
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endd75ff1f9b3e9ec9c942a39b6179da1b3
endd75ff1f9b3e9ec9c942a39b6179da1b3:
;
case OpAMD64SUBB:
// match: (SUBB x (MOVBconst [c]))
// cond:
// result: (SUBBconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end9ca5d2a70e2df1a5a3ed6786bce1f7b2
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SUBBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end9ca5d2a70e2df1a5a3ed6786bce1f7b2
end9ca5d2a70e2df1a5a3ed6786bce1f7b2:
;
// match: (SUBB (MOVBconst [c]) x)
// cond:
// result: (NEGB (SUBBconst <v.Type> x [c]))
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto endc288755d69b04d24a6aac32a73956411
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64NEGB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SUBBconst, TypeInvalid)
v0.Type = v.Type
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto endc288755d69b04d24a6aac32a73956411
endc288755d69b04d24a6aac32a73956411:
;
// match: (SUBB x x)
// cond:
// result: (MOVBconst [0])
{
x := v.Args[0]
if v.Args[1] != x {
goto ende8904403d937d95b0d6133d3ec92bb45
}
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto ende8904403d937d95b0d6133d3ec92bb45
ende8904403d937d95b0d6133d3ec92bb45:
;
case OpAMD64SUBBconst:
// match: (SUBBconst [c] (MOVBconst [d]))
// cond:
// result: (MOVBconst [d-c])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVBconst {
goto enddc5383558e2f3eae507afcb94eada964
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d - c
return true
}
goto enddc5383558e2f3eae507afcb94eada964
enddc5383558e2f3eae507afcb94eada964:
;
// match: (SUBBconst [c] (SUBBconst [d] x))
// cond:
// result: (ADDBconst [-c-d] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64SUBBconst {
goto end035c57413a46eb347ecb3736d1510915
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ADDBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -c - d
v.AddArg(x)
return true
}
goto end035c57413a46eb347ecb3736d1510915
end035c57413a46eb347ecb3736d1510915:
;
case OpAMD64SUBL:
// match: (SUBL x (MOVLconst [c]))
// cond:
// result: (SUBLconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end178c1d6c86f9c16f6497586c2f7d8625
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SUBLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end178c1d6c86f9c16f6497586c2f7d8625
end178c1d6c86f9c16f6497586c2f7d8625:
;
// match: (SUBL (MOVLconst [c]) x)
// cond:
// result: (NEGL (SUBLconst <v.Type> x [c]))
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto endb0efe6e15ec20486b849534a00483ae2
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64NEGL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, TypeInvalid)
v0.Type = v.Type
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto endb0efe6e15ec20486b849534a00483ae2
endb0efe6e15ec20486b849534a00483ae2:
;
// match: (SUBL x x)
// cond:
// result: (MOVLconst [0])
{
x := v.Args[0]
if v.Args[1] != x {
goto end332f1f641f875c69bea7289191e69133
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end332f1f641f875c69bea7289191e69133
end332f1f641f875c69bea7289191e69133:
;
case OpAMD64SUBLconst:
// match: (SUBLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [d-c])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVLconst {
goto end6c5c6d58d4bdd0a5c2f7bf10b343b41e
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d - c
return true
}
goto end6c5c6d58d4bdd0a5c2f7bf10b343b41e
end6c5c6d58d4bdd0a5c2f7bf10b343b41e:
;
// match: (SUBLconst [c] (SUBLconst [d] x))
// cond:
// result: (ADDLconst [-c-d] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64SUBLconst {
goto end0c9ffb11e8a56ced1b14dbf6bf9a6737
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ADDLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -c - d
v.AddArg(x)
return true
}
goto end0c9ffb11e8a56ced1b14dbf6bf9a6737
end0c9ffb11e8a56ced1b14dbf6bf9a6737:
;
case OpAMD64SUBQ:
// match: (SUBQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (SUBQconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end9bbb7b20824a498752c605942fad89c2
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto end9bbb7b20824a498752c605942fad89c2
}
v.Op = OpAMD64SUBQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end9bbb7b20824a498752c605942fad89c2
end9bbb7b20824a498752c605942fad89c2:
;
// match: (SUBQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (NEGQ (SUBQconst <v.Type> x [c]))
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto end8beb96de3efee9206d1bd4b7d777d2cb
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto end8beb96de3efee9206d1bd4b7d777d2cb
}
v.Op = OpAMD64NEGQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, TypeInvalid)
v0.Type = v.Type
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto end8beb96de3efee9206d1bd4b7d777d2cb
end8beb96de3efee9206d1bd4b7d777d2cb:
;
// match: (SUBQ x x)
// cond:
// result: (MOVQconst [0])
{
x := v.Args[0]
if v.Args[1] != x {
goto endd87d1d839d2dc54d9c90fa4f73383480
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto endd87d1d839d2dc54d9c90fa4f73383480
endd87d1d839d2dc54d9c90fa4f73383480:
;
case OpAMD64SUBQconst:
// match: (SUBQconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [d-c])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto endb0daebe6831cf381377c3e4248070f25
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d - c
return true
}
goto endb0daebe6831cf381377c3e4248070f25
endb0daebe6831cf381377c3e4248070f25:
;
// match: (SUBQconst [c] (SUBQconst [d] x))
// cond:
// result: (ADDQconst [-c-d] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64SUBQconst {
goto end2d40ddb5ae9e90679456254c61858d9d
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ADDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -c - d
v.AddArg(x)
return true
}
goto end2d40ddb5ae9e90679456254c61858d9d
end2d40ddb5ae9e90679456254c61858d9d:
;
case OpAMD64SUBW:
// match: (SUBW x (MOVWconst [c]))
// cond:
// result: (SUBWconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end135aa9100b2f61d58b37cede37b63731
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SUBWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end135aa9100b2f61d58b37cede37b63731
end135aa9100b2f61d58b37cede37b63731:
;
// match: (SUBW (MOVWconst [c]) x)
// cond:
// result: (NEGW (SUBWconst <v.Type> x [c]))
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto end44d23f7e65a4b1c42d0e6463f8e493b6
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64NEGW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64SUBWconst, TypeInvalid)
v0.Type = v.Type
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto end44d23f7e65a4b1c42d0e6463f8e493b6
end44d23f7e65a4b1c42d0e6463f8e493b6:
;
// match: (SUBW x x)
// cond:
// result: (MOVWconst [0])
{
x := v.Args[0]
if v.Args[1] != x {
goto endb970e7c318d04a1afe1dfe08a7ca0d9c
}
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto endb970e7c318d04a1afe1dfe08a7ca0d9c
endb970e7c318d04a1afe1dfe08a7ca0d9c:
;
case OpAMD64SUBWconst:
// match: (SUBWconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [d-c])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVWconst {
goto endae629a229c399eaed7dbb95b1b0e6f8a
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d - c
return true
}
goto endae629a229c399eaed7dbb95b1b0e6f8a
endae629a229c399eaed7dbb95b1b0e6f8a:
;
// match: (SUBWconst [c] (SUBWconst [d] x))
// cond:
// result: (ADDWconst [-c-d] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64SUBWconst {
goto enda59f08d12aa08717b0443b7bb1b71374
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ADDWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -c - d
v.AddArg(x)
return true
}
goto enda59f08d12aa08717b0443b7bb1b71374
enda59f08d12aa08717b0443b7bb1b71374:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpSignExt16to32:
// match: (SignExt16to32 x)
// cond:
// result: (MOVWQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVWQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end21e4271c2b48a5aa3561ccfa8fa67cd9
end21e4271c2b48a5aa3561ccfa8fa67cd9:
;
case OpSignExt16to64:
// match: (SignExt16to64 x)
// cond:
// result: (MOVWQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVWQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endc6d242ee3a3e195ef0f9e8dae47ada75
endc6d242ee3a3e195ef0f9e8dae47ada75:
;
case OpSignExt32to64:
// match: (SignExt32to64 x)
// cond:
// result: (MOVLQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVLQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endb9f1a8b2d01eee44964a71a01bca165c
endb9f1a8b2d01eee44964a71a01bca165c:
;
case OpSignExt8to16:
// match: (SignExt8to16 x)
// cond:
// result: (MOVBQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end372869f08e147404b80634e5f83fd506
end372869f08e147404b80634e5f83fd506:
;
case OpSignExt8to32:
// match: (SignExt8to32 x)
// cond:
// result: (MOVBQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end913e3575e5b4cf7f60585c108db40464
end913e3575e5b4cf7f60585c108db40464:
;
case OpSignExt8to64:
// match: (SignExt8to64 x)
// cond:
// result: (MOVBQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endcef6d6001d3f25cf5dacee11a46e5c8c
endcef6d6001d3f25cf5dacee11a46e5c8c:
;
case OpStaticCall:
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
{
argwid := v.AuxInt
target := v.Aux
mem := v.Args[0]
v.Op = OpAMD64CALLstatic
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = argwid
v.Aux = target
v.AddArg(mem)
return true
}
goto end32c5cbec813d1c2ae94fc9b1090e4b2a
end32c5cbec813d1c2ae94fc9b1090e4b2a:
;
case OpStore:
// match: (Store [8] ptr val mem)
// cond: is64BitFloat(val.Type)
// result: (MOVSDstore ptr val mem)
{
if v.AuxInt != 8 {
goto endaeec4f61bc8e67dbf3fa2f79fe4c2b9e
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is64BitFloat(val.Type)) {
goto endaeec4f61bc8e67dbf3fa2f79fe4c2b9e
}
v.Op = OpAMD64MOVSDstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endaeec4f61bc8e67dbf3fa2f79fe4c2b9e
endaeec4f61bc8e67dbf3fa2f79fe4c2b9e:
;
// match: (Store [4] ptr val mem)
// cond: is32BitFloat(val.Type)
// result: (MOVSSstore ptr val mem)
{
if v.AuxInt != 4 {
goto endf638ca0a75871b5062da15324d0e0384
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32BitFloat(val.Type)) {
goto endf638ca0a75871b5062da15324d0e0384
}
v.Op = OpAMD64MOVSSstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endf638ca0a75871b5062da15324d0e0384
endf638ca0a75871b5062da15324d0e0384:
;
// match: (Store [8] ptr val mem)
// cond:
// result: (MOVQstore ptr val mem)
{
if v.AuxInt != 8 {
goto endd1eb7c3ea0c806e7a53ff3be86186eb7
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVQstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endd1eb7c3ea0c806e7a53ff3be86186eb7
endd1eb7c3ea0c806e7a53ff3be86186eb7:
;
// match: (Store [4] ptr val mem)
// cond:
// result: (MOVLstore ptr val mem)
{
if v.AuxInt != 4 {
goto end44e3b22360da76ecd59be9a8c2dd1347
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end44e3b22360da76ecd59be9a8c2dd1347
end44e3b22360da76ecd59be9a8c2dd1347:
;
// match: (Store [2] ptr val mem)
// cond:
// result: (MOVWstore ptr val mem)
{
if v.AuxInt != 2 {
goto endd0342b7fd3d0713f3e26922660047c71
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endd0342b7fd3d0713f3e26922660047c71
endd0342b7fd3d0713f3e26922660047c71:
;
// match: (Store [1] ptr val mem)
// cond:
// result: (MOVBstore ptr val mem)
{
if v.AuxInt != 1 {
goto end8e76e20031197ca875889d2b4d0eb1d1
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end8e76e20031197ca875889d2b4d0eb1d1
end8e76e20031197ca875889d2b4d0eb1d1:
;
case OpSub16:
// match: (Sub16 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (SUBW x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64SUBW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end54adc5de883c0460ca71c6ee464d4244
end54adc5de883c0460ca71c6ee464d4244:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpSub32:
// match: (Sub32 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (SUBL x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64SUBL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto enddc3a2a488bda8c5856f93343e5ffe5f8
enddc3a2a488bda8c5856f93343e5ffe5f8:
;
case OpSub32F:
// match: (Sub32F x y)
// cond:
// result: (SUBSS x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SUBSS
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end20193c1804b0e707702a884fb8abd60d
end20193c1804b0e707702a884fb8abd60d:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpSub64:
// match: (Sub64 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (SUBQ x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64SUBQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto endd88d5646309fd9174584888ecc8aca2c
endd88d5646309fd9174584888ecc8aca2c:
;
case OpSub64F:
// match: (Sub64F x y)
// cond:
// result: (SUBSD x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SUBSD
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end5d5af7b8a3326bf9151f00a0013b73d7
end5d5af7b8a3326bf9151f00a0013b73d7:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpSub8:
// match: (Sub8 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (SUBB x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64SUBB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end7d33bf9bdfa505f96b930563eca7955f
end7d33bf9bdfa505f96b930563eca7955f:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpTrunc16to8:
// match: (Trunc16to8 x)
// cond:
// result: x
{
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end8e2f5e0a6e3a06423c077747de6c2bdd
end8e2f5e0a6e3a06423c077747de6c2bdd:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpTrunc32to16:
// match: (Trunc32to16 x)
// cond:
// result: x
{
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto end5bed0e3a3c1c6374d86beb5a4397708c
end5bed0e3a3c1c6374d86beb5a4397708c:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpTrunc32to8:
// match: (Trunc32to8 x)
// cond:
// result: x
{
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
return true
}
goto endef0b8032ce91979ce6cd0004260c04ee
endef0b8032ce91979ce6cd0004260c04ee:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpTrunc64to16:
// match: (Trunc64to16 x)
// cond:
// result: x
{
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
v.AddArg(x)
return true
}
goto endd32fd6e0ce970c212835e6f71c3dcbfd
endd32fd6e0ce970c212835e6f71c3dcbfd:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
;
case OpTrunc64to32:
// match: (Trunc64to32 x)
// cond:
// result: x
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
{
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
return true
}
goto end1212c4e84153210aff7fd630fb3e1883
end1212c4e84153210aff7fd630fb3e1883:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
;
case OpTrunc64to8:
// match: (Trunc64to8 x)
// cond:
// result: x
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
{
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = x.Type
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
return true
}
goto end734f017d4b2810ca2288f7037365824c
end734f017d4b2810ca2288f7037365824c:
;
case OpAMD64XORB:
// match: (XORB x (MOVBconst [c]))
// cond:
// result: (XORBconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto enda9ed9fdd115ffdffa8127c007c34d7b7
}
c := v.Args[1].AuxInt
v.Op = OpAMD64XORBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto enda9ed9fdd115ffdffa8127c007c34d7b7
enda9ed9fdd115ffdffa8127c007c34d7b7:
;
// match: (XORB (MOVBconst [c]) x)
// cond:
// result: (XORBconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVBconst {
goto endb02a07d9dc7b802c59f013116e952f3f
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64XORBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endb02a07d9dc7b802c59f013116e952f3f
endb02a07d9dc7b802c59f013116e952f3f:
;
// match: (XORB x x)
// cond:
// result: (MOVBconst [0])
{
x := v.Args[0]
if v.Args[1] != x {
goto end2afddc39503d04d572a3a07878f6c9c9
}
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end2afddc39503d04d572a3a07878f6c9c9
end2afddc39503d04d572a3a07878f6c9c9:
;
case OpAMD64XORBconst:
// match: (XORBconst [c] (MOVBconst [d]))
// cond:
// result: (MOVBconst [c^d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVBconst {
goto end6d8d1b612af9d253605c8bc69b822903
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c ^ d
return true
}
goto end6d8d1b612af9d253605c8bc69b822903
end6d8d1b612af9d253605c8bc69b822903:
;
case OpAMD64XORL:
// match: (XORL x (MOVLconst [c]))
// cond:
// result: (XORLconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto enda9459d509d3416da67d13a22dd074a9c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64XORLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto enda9459d509d3416da67d13a22dd074a9c
enda9459d509d3416da67d13a22dd074a9c:
;
// match: (XORL (MOVLconst [c]) x)
// cond:
// result: (XORLconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVLconst {
goto end9c1a0af00eeadd8aa325e55f1f3fb89c
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64XORLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end9c1a0af00eeadd8aa325e55f1f3fb89c
end9c1a0af00eeadd8aa325e55f1f3fb89c:
;
// match: (XORL x x)
// cond:
// result: (MOVLconst [0])
{
x := v.Args[0]
if v.Args[1] != x {
goto end7bcf9cfeb69a0d7647389124eb53ce2a
}
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end7bcf9cfeb69a0d7647389124eb53ce2a
end7bcf9cfeb69a0d7647389124eb53ce2a:
;
case OpAMD64XORLconst:
// match: (XORLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [c^d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVLconst {
goto end71238075b10b68a226903cc453c4715c
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c ^ d
return true
}
goto end71238075b10b68a226903cc453c4715c
end71238075b10b68a226903cc453c4715c:
;
case OpAMD64XORQ:
// match: (XORQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (XORQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end452341f950062e0483f16438fb9ec500
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto end452341f950062e0483f16438fb9ec500
}
v.Op = OpAMD64XORQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end452341f950062e0483f16438fb9ec500
end452341f950062e0483f16438fb9ec500:
;
// match: (XORQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (XORQconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto endd221a7e3daaaaa29ee385ad36e061b57
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto endd221a7e3daaaaa29ee385ad36e061b57
}
v.Op = OpAMD64XORQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endd221a7e3daaaaa29ee385ad36e061b57
endd221a7e3daaaaa29ee385ad36e061b57:
;
// match: (XORQ x x)
// cond:
// result: (MOVQconst [0])
{
x := v.Args[0]
if v.Args[1] != x {
goto end10575a5d711cf14e6d4dffbb0e8dfaeb
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end10575a5d711cf14e6d4dffbb0e8dfaeb
end10575a5d711cf14e6d4dffbb0e8dfaeb:
;
case OpAMD64XORQconst:
// match: (XORQconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [c^d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto end3f404d4f07362319fbad2e1ba0827a9f
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c ^ d
return true
}
goto end3f404d4f07362319fbad2e1ba0827a9f
end3f404d4f07362319fbad2e1ba0827a9f:
;
case OpAMD64XORW:
// match: (XORW x (MOVWconst [c]))
// cond:
// result: (XORWconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end2ca109efd66c221a5691a4da95ec6c67
}
c := v.Args[1].AuxInt
v.Op = OpAMD64XORWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end2ca109efd66c221a5691a4da95ec6c67
end2ca109efd66c221a5691a4da95ec6c67:
;
// match: (XORW (MOVWconst [c]) x)
// cond:
// result: (XORWconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVWconst {
goto end51ee62a06d4301e5a4aed7a6639b1d53
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64XORWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end51ee62a06d4301e5a4aed7a6639b1d53
end51ee62a06d4301e5a4aed7a6639b1d53:
;
// match: (XORW x x)
// cond:
// result: (MOVWconst [0])
{
x := v.Args[0]
if v.Args[1] != x {
goto end07f332e857be0c2707797ed480a2faf4
}
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end07f332e857be0c2707797ed480a2faf4
end07f332e857be0c2707797ed480a2faf4:
;
case OpAMD64XORWconst:
// match: (XORWconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [c^d])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVWconst {
goto ende24881ccdfa8486c4593fd9aa5df1ed6
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c ^ d
return true
}
goto ende24881ccdfa8486c4593fd9aa5df1ed6
ende24881ccdfa8486c4593fd9aa5df1ed6:
;
case OpXor16:
// match: (Xor16 x y)
// cond:
// result: (XORW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64XORW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end20efdd5dfd5130abf818de5546a991a0
end20efdd5dfd5130abf818de5546a991a0:
;
case OpXor32:
// match: (Xor32 x y)
// cond:
// result: (XORL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64XORL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end9da6bce98b437e2609488346116a75d8
end9da6bce98b437e2609488346116a75d8:
;
case OpXor64:
// match: (Xor64 x y)
// cond:
// result: (XORQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64XORQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endc88cd189c2a6f07ecff324ed94809f8f
endc88cd189c2a6f07ecff324ed94809f8f:
;
case OpXor8:
// match: (Xor8 x y)
// cond:
// result: (XORB x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64XORB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end50f4434ef96916d3e65ad3cc236d1723
end50f4434ef96916d3e65ad3cc236d1723:
;
case OpZero:
// match: (Zero [0] _ mem)
// cond:
// result: mem
{
if v.AuxInt != 0 {
goto endc9a38a60f0322f93682daa824611272c
}
mem := v.Args[1]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = mem.Type
v.AddArg(mem)
return true
}
goto endc9a38a60f0322f93682daa824611272c
endc9a38a60f0322f93682daa824611272c:
;
// match: (Zero [1] destptr mem)
// cond:
// result: (MOVBstore destptr (MOVBconst <config.Frontend().TypeInt8()> [0]) mem)
{
if v.AuxInt != 1 {
goto end56bcaef03cce4d15c03efff669bb5585
}
destptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpAMD64MOVBconst, TypeInvalid)
v0.Type = config.Frontend().TypeInt8()
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end56bcaef03cce4d15c03efff669bb5585
end56bcaef03cce4d15c03efff669bb5585:
;
// match: (Zero [2] destptr mem)
// cond:
// result: (MOVWstore destptr (MOVWconst <config.Frontend().TypeInt16()> [0]) mem)
{
if v.AuxInt != 2 {
goto endf52f08f1f7b0ae220c4cfca6586a8586
}
destptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpAMD64MOVWconst, TypeInvalid)
v0.Type = config.Frontend().TypeInt16()
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto endf52f08f1f7b0ae220c4cfca6586a8586
endf52f08f1f7b0ae220c4cfca6586a8586:
;
// match: (Zero [4] destptr mem)
// cond:
// result: (MOVLstore destptr (MOVLconst <config.Frontend().TypeInt32()> [0]) mem)
{
if v.AuxInt != 4 {
goto end41c91e0c7a23e233de77812b5264fd10
}
destptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpAMD64MOVLconst, TypeInvalid)
v0.Type = config.Frontend().TypeInt32()
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end41c91e0c7a23e233de77812b5264fd10
end41c91e0c7a23e233de77812b5264fd10:
;
// match: (Zero [8] destptr mem)
// cond:
// result: (MOVQstore destptr (MOVQconst <config.Frontend().TypeInt64()> [0]) mem)
{
if v.AuxInt != 8 {
goto end157ad586af643d8dac6cc84a776000ca
}
destptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVQstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
v0.Type = config.Frontend().TypeInt64()
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end157ad586af643d8dac6cc84a776000ca
end157ad586af643d8dac6cc84a776000ca:
;
// match: (Zero [size] destptr mem)
// cond: size < 4*8
// result: (MOVXzero [size] destptr mem)
{
size := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(size < 4*8) {
goto endf0a22f1506977610ac0a310eee152075
}
v.Op = OpAMD64MOVXzero
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = size
v.AddArg(destptr)
v.AddArg(mem)
return true
}
goto endf0a22f1506977610ac0a310eee152075
endf0a22f1506977610ac0a310eee152075:
;
// match: (Zero [size] destptr mem)
// cond: size >= 4*8
// result: (Zero [size%8] (OffPtr <config.Frontend().TypeUInt64()> [size-(size%8)] destptr) (REPSTOSQ <TypeMem> destptr (MOVQconst <config.Frontend().TypeUInt64()> [size/8]) mem))
{
size := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(size >= 4*8) {
goto end84c39fe2e8d40e0042a10741a0ef16bd
}
v.Op = OpZero
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = size % 8
v0 := b.NewValue0(v.Line, OpOffPtr, TypeInvalid)
v0.Type = config.Frontend().TypeUInt64()
v0.AuxInt = size - (size % 8)
v0.AddArg(destptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64REPSTOSQ, TypeInvalid)
v1.Type = TypeMem
v1.AddArg(destptr)
v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
v2.Type = config.Frontend().TypeUInt64()
v2.AuxInt = size / 8
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
goto end84c39fe2e8d40e0042a10741a0ef16bd
end84c39fe2e8d40e0042a10741a0ef16bd:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
;
case OpZeroExt16to32:
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVWQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVWQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endbfff79412a2cc96095069c66812844b4
endbfff79412a2cc96095069c66812844b4:
;
case OpZeroExt16to64:
// match: (ZeroExt16to64 x)
// cond:
// result: (MOVWQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVWQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end7a40262c5c856101058d2bd518ed0910
end7a40262c5c856101058d2bd518ed0910:
;
case OpZeroExt32to64:
// match: (ZeroExt32to64 x)
// cond:
// result: (MOVLQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVLQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto enddf83bdc8cc6c5673a9ef7aca7affe45a
enddf83bdc8cc6c5673a9ef7aca7affe45a:
;
case OpZeroExt8to16:
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endd03d53d2a585727e4107ae1a3cc55479
endd03d53d2a585727e4107ae1a3cc55479:
;
case OpZeroExt8to32:
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endcbd33e965b3dab14fced5ae93d8949de
endcbd33e965b3dab14fced5ae93d8949de:
;
case OpZeroExt8to64:
// match: (ZeroExt8to64 x)
// cond:
// result: (MOVBQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end63ae7cc15db9d15189b2f1342604b2cb
end63ae7cc15db9d15189b2f1342604b2cb:
}
return false
}
func rewriteBlockAMD64(b *Block) bool {
switch b.Kind {
case BlockAMD64EQ:
// match: (EQ (InvertFlags cmp) yes no)
// cond:
// result: (EQ cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end6b8e9afc73b1c4d528f31a60d2575fae
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64EQ
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end6b8e9afc73b1c4d528f31a60d2575fae
end6b8e9afc73b1c4d528f31a60d2575fae:
;
case BlockAMD64GE:
// match: (GE (InvertFlags cmp) yes no)
// cond:
// result: (LE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end0610f000a6988ee8310307ec2ea138f8
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end0610f000a6988ee8310307ec2ea138f8
end0610f000a6988ee8310307ec2ea138f8:
;
case BlockAMD64GT:
// match: (GT (InvertFlags cmp) yes no)
// cond:
// result: (LT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto endf60c0660b6a8aa9565c97fc87f04eb34
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endf60c0660b6a8aa9565c97fc87f04eb34
endf60c0660b6a8aa9565c97fc87f04eb34:
;
case BlockIf:
// match: (If (SETL cmp) yes no)
// cond:
// result: (LT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETL {
goto end94277282f4b83f0c035b23711a075801
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end94277282f4b83f0c035b23711a075801
end94277282f4b83f0c035b23711a075801:
;
// match: (If (SETLE cmp) yes no)
// cond:
// result: (LE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETLE {
goto enda84798dd797927b54a9a2987421b2ba2
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto enda84798dd797927b54a9a2987421b2ba2
enda84798dd797927b54a9a2987421b2ba2:
;
// match: (If (SETG cmp) yes no)
// cond:
// result: (GT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETG {
goto end3434ef985979cbf394455ab5b559567c
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end3434ef985979cbf394455ab5b559567c
end3434ef985979cbf394455ab5b559567c:
;
// match: (If (SETGE cmp) yes no)
// cond:
// result: (GE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETGE {
goto endee147d81d8620a5e23cb92bd9f13cf8d
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endee147d81d8620a5e23cb92bd9f13cf8d
endee147d81d8620a5e23cb92bd9f13cf8d:
;
// match: (If (SETEQ cmp) yes no)
// cond:
// result: (EQ cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETEQ {
goto ende7d85ccc850fc3963c50a91df096de17
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64EQ
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto ende7d85ccc850fc3963c50a91df096de17
ende7d85ccc850fc3963c50a91df096de17:
;
// match: (If (SETNE cmp) yes no)
// cond:
// result: (NE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETNE {
goto endba4b54260ecda1b5731b129c0eb493d0
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64NE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endba4b54260ecda1b5731b129c0eb493d0
endba4b54260ecda1b5731b129c0eb493d0:
;
// match: (If (SETB cmp) yes no)
// cond:
// result: (ULT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETB {
goto endf84eedfcd3f18f5c9c3f3d1045a24330
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endf84eedfcd3f18f5c9c3f3d1045a24330
endf84eedfcd3f18f5c9c3f3d1045a24330:
;
// match: (If (SETBE cmp) yes no)
// cond:
// result: (ULE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETBE {
goto endfe0178f6f4406945ca8966817d04be60
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endfe0178f6f4406945ca8966817d04be60
endfe0178f6f4406945ca8966817d04be60:
;
// match: (If (SETA cmp) yes no)
// cond:
// result: (UGT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETA {
goto end2b5a2d7756bdba01a732bf54d9acdb73
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end2b5a2d7756bdba01a732bf54d9acdb73
end2b5a2d7756bdba01a732bf54d9acdb73:
;
// match: (If (SETAE cmp) yes no)
// cond:
// result: (UGE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETAE {
goto end9bea9963c3c5dfb97249a5feb8287f94
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end9bea9963c3c5dfb97249a5feb8287f94
end9bea9963c3c5dfb97249a5feb8287f94:
;
// match: (If cond yes no)
// cond:
// result: (NE (TESTB <TypeFlags> cond cond) yes no)
{
v := b.Control
cond := v
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64NE
v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(cond)
v0.AddArg(cond)
b.Control = v0
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end012351592edfc708bd3181d7e53f3993
end012351592edfc708bd3181d7e53f3993:
;
case BlockAMD64LE:
// match: (LE (InvertFlags cmp) yes no)
// cond:
// result: (GE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end0d49d7d087fe7578e8015cf13dae37e3
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end0d49d7d087fe7578e8015cf13dae37e3
end0d49d7d087fe7578e8015cf13dae37e3:
;
case BlockAMD64LT:
// match: (LT (InvertFlags cmp) yes no)
// cond:
// result: (GT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end6a408cde0fee0ae7b7da0443c8d902bf
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end6a408cde0fee0ae7b7da0443c8d902bf
end6a408cde0fee0ae7b7da0443c8d902bf:
;
case BlockAMD64NE:
// match: (NE (TESTB (SETL cmp)) yes no)
// cond:
// result: (LT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto end0b9ca165d6b395de676eebef94bc62f7
}
if v.Args[0].Op != OpAMD64SETL {
goto end0b9ca165d6b395de676eebef94bc62f7
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end0b9ca165d6b395de676eebef94bc62f7
end0b9ca165d6b395de676eebef94bc62f7:
;
// match: (NE (TESTB (SETLE cmp)) yes no)
// cond:
// result: (LE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto endaaba0ee4d0ff8c66a1c3107d2a14c4bc
}
if v.Args[0].Op != OpAMD64SETLE {
goto endaaba0ee4d0ff8c66a1c3107d2a14c4bc
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endaaba0ee4d0ff8c66a1c3107d2a14c4bc
endaaba0ee4d0ff8c66a1c3107d2a14c4bc:
;
// match: (NE (TESTB (SETG cmp)) yes no)
// cond:
// result: (GT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto end1b689463137526b36ba9ceed1e76e512
}
if v.Args[0].Op != OpAMD64SETG {
goto end1b689463137526b36ba9ceed1e76e512
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end1b689463137526b36ba9ceed1e76e512
end1b689463137526b36ba9ceed1e76e512:
;
// match: (NE (TESTB (SETGE cmp)) yes no)
// cond:
// result: (GE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto end99eefee595c658b997f41577ed853c2e
}
if v.Args[0].Op != OpAMD64SETGE {
goto end99eefee595c658b997f41577ed853c2e
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end99eefee595c658b997f41577ed853c2e
end99eefee595c658b997f41577ed853c2e:
;
// match: (NE (TESTB (SETEQ cmp)) yes no)
// cond:
// result: (EQ cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto end371b67d3d63e9b92d848b09c3324e8b9
}
if v.Args[0].Op != OpAMD64SETEQ {
goto end371b67d3d63e9b92d848b09c3324e8b9
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64EQ
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end371b67d3d63e9b92d848b09c3324e8b9
end371b67d3d63e9b92d848b09c3324e8b9:
;
// match: (NE (TESTB (SETNE cmp)) yes no)
// cond:
// result: (NE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto endd245f2aac2191d32e57cd2e321daa453
}
if v.Args[0].Op != OpAMD64SETNE {
goto endd245f2aac2191d32e57cd2e321daa453
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64NE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endd245f2aac2191d32e57cd2e321daa453
endd245f2aac2191d32e57cd2e321daa453:
;
// match: (NE (TESTB (SETB cmp)) yes no)
// cond:
// result: (ULT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto end90c4bec851e734d37457d611b1a5ff28
}
if v.Args[0].Op != OpAMD64SETB {
goto end90c4bec851e734d37457d611b1a5ff28
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end90c4bec851e734d37457d611b1a5ff28
end90c4bec851e734d37457d611b1a5ff28:
;
// match: (NE (TESTB (SETBE cmp)) yes no)
// cond:
// result: (ULE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto end3a68a28114e9b89ee0708823386bc1ee
}
if v.Args[0].Op != OpAMD64SETBE {
goto end3a68a28114e9b89ee0708823386bc1ee
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end3a68a28114e9b89ee0708823386bc1ee
end3a68a28114e9b89ee0708823386bc1ee:
;
// match: (NE (TESTB (SETA cmp)) yes no)
// cond:
// result: (UGT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto end16496f57185756e960d536b057c776c0
}
if v.Args[0].Op != OpAMD64SETA {
goto end16496f57185756e960d536b057c776c0
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end16496f57185756e960d536b057c776c0
end16496f57185756e960d536b057c776c0:
;
// match: (NE (TESTB (SETAE cmp)) yes no)
// cond:
// result: (UGE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64TESTB {
goto endbd122fd599aeb9e60881a0fa735e2fde
}
if v.Args[0].Op != OpAMD64SETAE {
goto endbd122fd599aeb9e60881a0fa735e2fde
}
cmp := v.Args[0].Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endbd122fd599aeb9e60881a0fa735e2fde
endbd122fd599aeb9e60881a0fa735e2fde:
;
// match: (NE (InvertFlags cmp) yes no)
// cond:
// result: (NE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end713001aba794e50b582fbff930e110af
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64NE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end713001aba794e50b582fbff930e110af
end713001aba794e50b582fbff930e110af:
;
case BlockAMD64UGE:
// match: (UGE (InvertFlags cmp) yes no)
// cond:
// result: (ULE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto ende3e4ddc183ca1a46598b11c2d0d13966
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto ende3e4ddc183ca1a46598b11c2d0d13966
ende3e4ddc183ca1a46598b11c2d0d13966:
;
case BlockAMD64UGT:
// match: (UGT (InvertFlags cmp) yes no)
// cond:
// result: (ULT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end49818853af2e5251175d06c62768cae7
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end49818853af2e5251175d06c62768cae7
end49818853af2e5251175d06c62768cae7:
;
case BlockAMD64ULE:
// match: (ULE (InvertFlags cmp) yes no)
// cond:
// result: (UGE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto endd6698aac0d67261293b558c95ea17b4f
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endd6698aac0d67261293b558c95ea17b4f
endd6698aac0d67261293b558c95ea17b4f:
;
case BlockAMD64ULT:
// match: (ULT (InvertFlags cmp) yes no)
// cond:
// result: (UGT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end35105dbc9646f02577167e45ae2f2fd2
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end35105dbc9646f02577167e45ae2f2fd2
end35105dbc9646f02577167e45ae2f2fd2:
}
return false
}