go/src/cmd/compile/internal/ssa/rewriteAMD64.go

3214 lines
67 KiB
Go
Raw Normal View History

// autogenerated from gen/AMD64.rules: do not edit!
// generated with: cd gen; go run *.go
package ssa
func rewriteValueAMD64(v *Value, config *Config) bool {
switch v.Op {
case OpAMD64ADDQ:
// match: (ADDQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ADDQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end1de8aeb1d043e0dadcffd169a99ce5c0
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto end1de8aeb1d043e0dadcffd169a99ce5c0
}
v.Op = OpAMD64ADDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end1de8aeb1d043e0dadcffd169a99ce5c0
end1de8aeb1d043e0dadcffd169a99ce5c0:
;
// match: (ADDQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (ADDQconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto endca635e3bdecd9e3aeb892f841021dfaa
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto endca635e3bdecd9e3aeb892f841021dfaa
}
v.Op = OpAMD64ADDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endca635e3bdecd9e3aeb892f841021dfaa
endca635e3bdecd9e3aeb892f841021dfaa:
;
// match: (ADDQ x (SHLQconst [3] y))
// cond:
// result: (LEAQ8 x y)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64SHLQconst {
goto endc02313d35a0525d1d680cd58992e820d
}
if v.Args[1].AuxInt != 3 {
goto endc02313d35a0525d1d680cd58992e820d
}
y := v.Args[1].Args[0]
v.Op = OpAMD64LEAQ8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endc02313d35a0525d1d680cd58992e820d
endc02313d35a0525d1d680cd58992e820d:
;
case OpAMD64ADDQconst:
// match: (ADDQconst [c] (LEAQ8 [d] x y))
// cond:
// result: (LEAQ8 [addOff(c, d)] x y)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64LEAQ8 {
goto ende2cc681c9abf9913288803fb1b39e639
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
v.Op = OpAMD64LEAQ8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(c, d)
v.AddArg(x)
v.AddArg(y)
return true
}
goto ende2cc681c9abf9913288803fb1b39e639
ende2cc681c9abf9913288803fb1b39e639:
;
// match: (ADDQconst [0] x)
// cond:
// result: (Copy x)
{
if v.AuxInt != 0 {
goto end288952f259d4a1842f1e8d5c389b3f28
}
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end288952f259d4a1842f1e8d5c389b3f28
end288952f259d4a1842f1e8d5c389b3f28:
;
case OpAMD64ANDQ:
// match: (ANDQ x (MOVQconst [c]))
// cond:
// result: (ANDQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto endb98096e3bbb90933e39c88bf41c688a9
}
c := v.Args[1].AuxInt
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endb98096e3bbb90933e39c88bf41c688a9
endb98096e3bbb90933e39c88bf41c688a9:
;
// match: (ANDQ (MOVQconst [c]) x)
// cond:
// result: (ANDQconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto endd313fd1897a0d2bc79eff70159a81b6b
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endd313fd1897a0d2bc79eff70159a81b6b
endd313fd1897a0d2bc79eff70159a81b6b:
;
case OpAMD64ANDQconst:
// match: (ANDQconst [0] _)
// cond:
// result: (MOVQconst [0])
{
if v.AuxInt != 0 {
goto endf2afa4d9d31c344d6638dcdced383cf1
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto endf2afa4d9d31c344d6638dcdced383cf1
endf2afa4d9d31c344d6638dcdced383cf1:
;
// match: (ANDQconst [-1] x)
// cond:
// result: (Copy x)
{
if v.AuxInt != -1 {
goto end646afc7b328db89ad16ebfa156ae26e5
}
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end646afc7b328db89ad16ebfa156ae26e5
end646afc7b328db89ad16ebfa156ae26e5:
;
case OpAdd16:
// match: (Add16 x y)
// cond:
// result: (ADDW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto ende604481c6de9fe4574cb2954ba2ddc67
ende604481c6de9fe4574cb2954ba2ddc67:
;
case OpAdd32:
// match: (Add32 x y)
// cond:
// result: (ADDL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto endc445ea2a65385445676cd684ae9a42b5
endc445ea2a65385445676cd684ae9a42b5:
;
case OpAdd64:
// match: (Add64 x y)
// cond:
// result: (ADDQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endd88f18b3f39e3ccc201477a616f0abc0
endd88f18b3f39e3ccc201477a616f0abc0:
;
case OpAdd8:
// match: (Add8 x y)
// cond:
// result: (ADDB x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end6117c84a6b75c1b816b3fb095bc5f656
end6117c84a6b75c1b816b3fb095bc5f656:
;
case OpAddPtr:
// match: (AddPtr x y)
// cond:
// result: (ADDQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ADDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto enda1d5640788c7157996f9d4af602dec1c
enda1d5640788c7157996f9d4af602dec1c:
;
case OpAddr:
// match: (Addr {sym} base)
// cond:
// result: (LEAQ {sym} base)
{
sym := v.Aux
base := v.Args[0]
v.Op = OpAMD64LEAQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Aux = sym
v.AddArg(base)
return true
}
goto end53cad0c3c9daa5575680e77c14e05e72
end53cad0c3c9daa5575680e77c14e05e72:
;
case OpAnd16:
// match: (And16 x y)
// cond:
// result: (ANDW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end1c01f04a173d86ce1a6d1ef59e753014
end1c01f04a173d86ce1a6d1ef59e753014:
;
case OpAnd32:
// match: (And32 x y)
// cond:
// result: (ANDL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end6b9eb9375b3a859028a6ba6bf6b8ec88
end6b9eb9375b3a859028a6ba6bf6b8ec88:
;
case OpAnd64:
// match: (And64 x y)
// cond:
// result: (ANDQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto enda0bde5853819d05fa2b7d3b723629552
enda0bde5853819d05fa2b7d3b723629552:
;
case OpAnd8:
// match: (And8 x y)
// cond:
// result: (ANDB x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64ANDB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end0f53bee6291f1229b43aa1b5f977b4f2
end0f53bee6291f1229b43aa1b5f977b4f2:
;
case OpAMD64CMOVQCC:
// match: (CMOVQCC (CMPQconst [c] (MOVQconst [d])) _ x)
// cond: inBounds(d, c)
// result: (Copy x)
{
if v.Args[0].Op != OpAMD64CMPQconst {
goto endd5357f3fd5516dcc859c8c5b3c9efaa4
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
goto endd5357f3fd5516dcc859c8c5b3c9efaa4
}
d := v.Args[0].Args[0].AuxInt
x := v.Args[2]
if !(inBounds(d, c)) {
goto endd5357f3fd5516dcc859c8c5b3c9efaa4
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endd5357f3fd5516dcc859c8c5b3c9efaa4
endd5357f3fd5516dcc859c8c5b3c9efaa4:
;
// match: (CMOVQCC (CMPQconst [c] (MOVQconst [d])) x _)
// cond: !inBounds(d, c)
// result: (Copy x)
{
if v.Args[0].Op != OpAMD64CMPQconst {
goto end6ad8b1758415a9afe758272b34970d5d
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
goto end6ad8b1758415a9afe758272b34970d5d
}
d := v.Args[0].Args[0].AuxInt
x := v.Args[1]
if !(!inBounds(d, c)) {
goto end6ad8b1758415a9afe758272b34970d5d
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end6ad8b1758415a9afe758272b34970d5d
end6ad8b1758415a9afe758272b34970d5d:
;
case OpAMD64CMPQ:
// match: (CMPQ x (MOVQconst [c]))
// cond:
// result: (CMPQconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end32ef1328af280ac18fa8045a3502dae9
}
c := v.Args[1].AuxInt
v.Op = OpAMD64CMPQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end32ef1328af280ac18fa8045a3502dae9
end32ef1328af280ac18fa8045a3502dae9:
;
// match: (CMPQ (MOVQconst [c]) x)
// cond:
// result: (InvertFlags (CMPQconst <TypeFlags> x [c]))
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto endf8ca12fe79290bc82b11cfa463bc9413
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64InvertFlags
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto endf8ca12fe79290bc82b11cfa463bc9413
endf8ca12fe79290bc82b11cfa463bc9413:
;
case OpClosureCall:
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
{
argwid := v.AuxInt
entry := v.Args[0]
closure := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64CALLclosure
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = argwid
v.AddArg(entry)
v.AddArg(closure)
v.AddArg(mem)
return true
}
goto endfd75d26316012d86cb71d0dd1214259b
endfd75d26316012d86cb71d0dd1214259b:
;
case OpConst:
// match: (Const <t> [val])
// cond: t.IsInteger()
// result: (MOVQconst [val])
{
t := v.Type
val := v.AuxInt
if !(t.IsInteger()) {
goto end4c8bfe9df26fc5aa2bd76b211792732a
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = val
return true
}
goto end4c8bfe9df26fc5aa2bd76b211792732a
end4c8bfe9df26fc5aa2bd76b211792732a:
;
// match: (Const <t>)
// cond: t.IsPtr()
// result: (MOVQconst [0])
{
t := v.Type
if !(t.IsPtr()) {
goto endd23abe8d7061f11c260b162e24eec060
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto endd23abe8d7061f11c260b162e24eec060
endd23abe8d7061f11c260b162e24eec060:
;
// match: (Const <t>)
// cond: t.IsBoolean() && !v.Aux.(bool)
// result: (MOVQconst [0])
{
t := v.Type
if !(t.IsBoolean() && !v.Aux.(bool)) {
goto end7b1347fd0902b990ee1e49145c7e8c31
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end7b1347fd0902b990ee1e49145c7e8c31
end7b1347fd0902b990ee1e49145c7e8c31:
;
// match: (Const <t>)
// cond: t.IsBoolean() && v.Aux.(bool)
// result: (MOVQconst [1])
{
t := v.Type
if !(t.IsBoolean() && v.Aux.(bool)) {
goto ende0d1c954b5ab5af7227bff9635774f1c
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 1
return true
}
goto ende0d1c954b5ab5af7227bff9635774f1c
ende0d1c954b5ab5af7227bff9635774f1c:
;
case OpConvNop:
// match: (ConvNop <t> x)
// cond: t == x.Type
// result: (Copy x)
{
t := v.Type
x := v.Args[0]
if !(t == x.Type) {
goto end6c588ed8aedc7dca8c06b4ada77e3ddd
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end6c588ed8aedc7dca8c06b4ada77e3ddd
end6c588ed8aedc7dca8c06b4ada77e3ddd:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// match: (ConvNop <t> x)
// cond: t.IsInteger() && x.Type.IsInteger() && t.Size() == x.Type.Size()
// result: (Copy x)
{
t := v.Type
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
if !(t.IsInteger() && x.Type.IsInteger() && t.Size() == x.Type.Size()) {
goto endfb3563f9df3468ad8123dbaa962cdbf7
}
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto endfb3563f9df3468ad8123dbaa962cdbf7
endfb3563f9df3468ad8123dbaa962cdbf7:
;
case OpEq16:
// match: (Eq16 x y)
// cond:
// result: (SETEQ (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end66a03470b5b3e8457ba205ccfcaccea6
end66a03470b5b3e8457ba205ccfcaccea6:
;
case OpEq32:
// match: (Eq32 x y)
// cond:
// result: (SETEQ (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end4d77d0b016f93817fd6e5f60fa0e7ef2
end4d77d0b016f93817fd6e5f60fa0e7ef2:
;
case OpEq64:
// match: (Eq64 x y)
// cond:
// result: (SETEQ (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endae6c62e4e20b4f62694b6ee40dbd9211
endae6c62e4e20b4f62694b6ee40dbd9211:
;
case OpEq8:
// match: (Eq8 x y)
// cond:
// result: (SETEQ (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end84a692e769900e3adbfe00718d2169e0
end84a692e769900e3adbfe00718d2169e0:
;
case OpEqPtr:
// match: (EqPtr x y)
// cond:
// result: (SETEQ (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETEQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end6de1d39c9d151e5e503d643bd835356e
end6de1d39c9d151e5e503d643bd835356e:
;
case OpGeq64:
// match: (Geq64 x y)
// cond:
// result: (SETGE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETGE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end63f44e3fec8d92723b5bde42d6d7eea0
end63f44e3fec8d92723b5bde42d6d7eea0:
;
case OpGreater64:
// match: (Greater64 x y)
// cond:
// result: (SETG (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETG
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endaef0cfa5e27e23cf5e527061cf251069
endaef0cfa5e27e23cf5e527061cf251069:
;
case OpIsInBounds:
// match: (IsInBounds idx len)
// cond:
// result: (SETB (CMPQ <TypeFlags> idx len))
{
idx := v.Args[0]
len := v.Args[1]
v.Op = OpAMD64SETB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
goto endb51d371171154c0f1613b687757e0576
endb51d371171154c0f1613b687757e0576:
;
case OpIsNonNil:
// match: (IsNonNil p)
// cond:
// result: (SETNE (TESTQ <TypeFlags> p p))
{
p := v.Args[0]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64TESTQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(p)
v0.AddArg(p)
v.AddArg(v0)
return true
}
goto endff508c3726edfb573abc6128c177e76c
endff508c3726edfb573abc6128c177e76c:
;
case OpLeq64:
// match: (Leq64 x y)
// cond:
// result: (SETLE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endf03da5e28dccdb4797671f39e824fb10
endf03da5e28dccdb4797671f39e824fb10:
;
case OpLess64:
// match: (Less64 x y)
// cond:
// result: (SETL (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endf8e7a24c25692045bbcfd2c9356d1a8c
endf8e7a24c25692045bbcfd2c9356d1a8c:
;
case OpLoad:
// match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t))
// result: (MOVQload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitInt(t) || isPtr(t)) {
goto end7c4c53acf57ebc5f03273652ba1d5934
}
v.Op = OpAMD64MOVQload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end7c4c53acf57ebc5f03273652ba1d5934
end7c4c53acf57ebc5f03273652ba1d5934:
;
// match: (Load <t> ptr mem)
// cond: is32BitInt(t)
// result: (MOVLload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is32BitInt(t)) {
goto ende1cfcb15bfbcfd448ce303d0882a4057
}
v.Op = OpAMD64MOVLload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto ende1cfcb15bfbcfd448ce303d0882a4057
ende1cfcb15bfbcfd448ce303d0882a4057:
;
// match: (Load <t> ptr mem)
// cond: is16BitInt(t)
// result: (MOVWload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is16BitInt(t)) {
goto end2d0a1304501ed9f4e9e2d288505a9c7c
}
v.Op = OpAMD64MOVWload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end2d0a1304501ed9f4e9e2d288505a9c7c
end2d0a1304501ed9f4e9e2d288505a9c7c:
;
// match: (Load <t> ptr mem)
// cond: (t.IsBoolean() || is8BitInt(t))
// result: (MOVBload ptr mem)
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsBoolean() || is8BitInt(t)) {
goto end8f83bf72293670e75b22d6627bd13f0b
}
v.Op = OpAMD64MOVBload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end8f83bf72293670e75b22d6627bd13f0b
end8f83bf72293670e75b22d6627bd13f0b:
;
case OpLsh64:
// match: (Lsh64 <t> x y)
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// cond: y.Type.Size() == 8
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
if !(y.Type.Size() == 8) {
goto end04273c7a426341c8f3ecfaa5d653dc6b
}
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end04273c7a426341c8f3ecfaa5d653dc6b
end04273c7a426341c8f3ecfaa5d653dc6b:
;
case OpAMD64MOVBQSX:
// match: (MOVBQSX (MOVBload ptr mem))
// cond:
// result: (MOVBQSXload ptr mem)
{
if v.Args[0].Op != OpAMD64MOVBload {
goto enda3a5eeb5767e31f42b0b6c1db8311ebb
}
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
v.Op = OpAMD64MOVBQSXload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto enda3a5eeb5767e31f42b0b6c1db8311ebb
enda3a5eeb5767e31f42b0b6c1db8311ebb:
;
case OpAMD64MOVBQZX:
// match: (MOVBQZX (MOVBload ptr mem))
// cond:
// result: (MOVBQZXload ptr mem)
{
if v.Args[0].Op != OpAMD64MOVBload {
goto end9510a482da21d9945d53c4233b19e825
}
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
v.Op = OpAMD64MOVBQZXload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end9510a482da21d9945d53c4233b19e825
end9510a482da21d9945d53c4233b19e825:
;
case OpAMD64MOVBstore:
// match: (MOVBstore ptr (MOVBQSX x) mem)
// cond:
// result: (MOVBstore ptr x mem)
{
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBQSX {
goto endc356ef104095b9217b36b594f85171c6
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto endc356ef104095b9217b36b594f85171c6
endc356ef104095b9217b36b594f85171c6:
;
// match: (MOVBstore ptr (MOVBQZX x) mem)
// cond:
// result: (MOVBstore ptr x mem)
{
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBQZX {
goto end25841a70cce7ac32c6d5e561b992d3df
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto end25841a70cce7ac32c6d5e561b992d3df
end25841a70cce7ac32c6d5e561b992d3df:
;
case OpAMD64MOVLstore:
// match: (MOVLstore ptr (MOVLQSX x) mem)
// cond:
// result: (MOVLstore ptr x mem)
{
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLQSX {
goto endf79c699f70cb356abb52dc28f4abf46b
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto endf79c699f70cb356abb52dc28f4abf46b
endf79c699f70cb356abb52dc28f4abf46b:
;
// match: (MOVLstore ptr (MOVLQZX x) mem)
// cond:
// result: (MOVLstore ptr x mem)
{
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLQZX {
goto end67d1549d16d373e4ad6a89298866d1bc
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto end67d1549d16d373e4ad6a89298866d1bc
end67d1549d16d373e4ad6a89298866d1bc:
;
case OpAMD64MOVQload:
// match: (MOVQload [off1] (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVQload [addOff(off1, off2)] ptr mem)
{
off1 := v.AuxInt
if v.Args[0].Op != OpAMD64ADDQconst {
goto end843d29b538c4483b432b632e5666d6e3
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVQload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
goto end843d29b538c4483b432b632e5666d6e3
end843d29b538c4483b432b632e5666d6e3:
;
// match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: (sym1 == nil || sym2 == nil)
// result: (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end227426af95e74caddcf59fdcd30ca8bc
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
if !(sym1 == nil || sym2 == nil) {
goto end227426af95e74caddcf59fdcd30ca8bc
}
v.Op = OpAMD64MOVQload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
goto end227426af95e74caddcf59fdcd30ca8bc
end227426af95e74caddcf59fdcd30ca8bc:
;
// match: (MOVQload [off1] (LEAQ8 [off2] ptr idx) mem)
// cond:
// result: (MOVQloadidx8 [addOff(off1, off2)] ptr idx mem)
{
off1 := v.AuxInt
if v.Args[0].Op != OpAMD64LEAQ8 {
goto end02f5ad148292c46463e7c20d3b821735
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
v.Op = OpAMD64MOVQloadidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(mem)
return true
}
goto end02f5ad148292c46463e7c20d3b821735
end02f5ad148292c46463e7c20d3b821735:
;
case OpAMD64MOVQloadidx8:
// match: (MOVQloadidx8 [off1] (ADDQconst [off2] ptr) idx mem)
// cond:
// result: (MOVQloadidx8 [addOff(off1, off2)] ptr idx mem)
{
off1 := v.AuxInt
if v.Args[0].Op != OpAMD64ADDQconst {
goto ende81e44bcfb11f90916ccb440c590121f
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVQloadidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(mem)
return true
}
goto ende81e44bcfb11f90916ccb440c590121f
ende81e44bcfb11f90916ccb440c590121f:
;
case OpAMD64MOVQstore:
// match: (MOVQstore [off1] (ADDQconst [off2] ptr) val mem)
// cond:
// result: (MOVQstore [addOff(off1, off2)] ptr val mem)
{
off1 := v.AuxInt
if v.Args[0].Op != OpAMD64ADDQconst {
goto end2108c693a43c79aed10b9246c39c80aa
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVQstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end2108c693a43c79aed10b9246c39c80aa
end2108c693a43c79aed10b9246c39c80aa:
;
// match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: (sym1 == nil || sym2 == nil)
// result: (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end5061f48193268a5eb1e1740bdd23c43d
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(sym1 == nil || sym2 == nil) {
goto end5061f48193268a5eb1e1740bdd23c43d
}
v.Op = OpAMD64MOVQstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end5061f48193268a5eb1e1740bdd23c43d
end5061f48193268a5eb1e1740bdd23c43d:
;
// match: (MOVQstore [off1] (LEAQ8 [off2] ptr idx) val mem)
// cond:
// result: (MOVQstoreidx8 [addOff(off1, off2)] ptr idx val mem)
{
off1 := v.AuxInt
if v.Args[0].Op != OpAMD64LEAQ8 {
goto endce1db8c8d37c8397c500a2068a65c215
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVQstoreidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endce1db8c8d37c8397c500a2068a65c215
endce1db8c8d37c8397c500a2068a65c215:
;
case OpAMD64MOVQstoreidx8:
// match: (MOVQstoreidx8 [off1] (ADDQconst [off2] ptr) idx val mem)
// cond:
// result: (MOVQstoreidx8 [addOff(off1, off2)] ptr idx val mem)
{
off1 := v.AuxInt
if v.Args[0].Op != OpAMD64ADDQconst {
goto end01c970657b0fdefeab82458c15022163
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.Op = OpAMD64MOVQstoreidx8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end01c970657b0fdefeab82458c15022163
end01c970657b0fdefeab82458c15022163:
;
case OpAMD64MOVWstore:
// match: (MOVWstore ptr (MOVWQSX x) mem)
// cond:
// result: (MOVWstore ptr x mem)
{
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWQSX {
goto endcc13af07a951a61fcfec3299342f7e1f
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto endcc13af07a951a61fcfec3299342f7e1f
endcc13af07a951a61fcfec3299342f7e1f:
;
// match: (MOVWstore ptr (MOVWQZX x) mem)
// cond:
// result: (MOVWstore ptr x mem)
{
ptr := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWQZX {
goto end4e7df15ee55bdd73d8ecd61b759134d4
}
x := v.Args[1].Args[0]
mem := v.Args[2]
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(x)
v.AddArg(mem)
return true
}
goto end4e7df15ee55bdd73d8ecd61b759134d4
end4e7df15ee55bdd73d8ecd61b759134d4:
;
case OpAMD64MULQ:
// match: (MULQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (MULQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
}
c := v.Args[1].AuxInt
if !(is32Bit(c)) {
goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
}
v.Op = OpAMD64MULQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
endb38c6e3e0ddfa25ba0ef9684ac1528c0:
;
// match: (MULQ (MOVQconst [c]) x)
// cond: is32Bit(c)
// result: (MULQconst [c] x)
{
if v.Args[0].Op != OpAMD64MOVQconst {
goto end9cb4f29b0bd7141639416735dcbb3b87
}
c := v.Args[0].AuxInt
x := v.Args[1]
if !(is32Bit(c)) {
goto end9cb4f29b0bd7141639416735dcbb3b87
}
v.Op = OpAMD64MULQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end9cb4f29b0bd7141639416735dcbb3b87
end9cb4f29b0bd7141639416735dcbb3b87:
;
case OpAMD64MULQconst:
// match: (MULQconst [-1] x)
// cond:
// result: (NEGQ x)
{
if v.AuxInt != -1 {
goto end82501cca6b5fb121a7f8b197e55f2fec
}
x := v.Args[0]
v.Op = OpAMD64NEGQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end82501cca6b5fb121a7f8b197e55f2fec
end82501cca6b5fb121a7f8b197e55f2fec:
;
// match: (MULQconst [0] _)
// cond:
// result: (MOVQconst [0])
{
if v.AuxInt != 0 {
goto endcb9faa068e3558ff44daaf1d47d091b5
}
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto endcb9faa068e3558ff44daaf1d47d091b5
endcb9faa068e3558ff44daaf1d47d091b5:
;
// match: (MULQconst [1] x)
// cond:
// result: (Copy x)
{
if v.AuxInt != 1 {
goto endd7217a7c6311fc7a3e0736a1b0b5be73
}
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endd7217a7c6311fc7a3e0736a1b0b5be73
endd7217a7c6311fc7a3e0736a1b0b5be73:
;
// match: (MULQconst [3] x)
// cond:
// result: (LEAQ2 x x)
{
if v.AuxInt != 3 {
goto end34a86f261671b5852bec6c57155fe0da
}
x := v.Args[0]
v.Op = OpAMD64LEAQ2
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(x)
return true
}
goto end34a86f261671b5852bec6c57155fe0da
end34a86f261671b5852bec6c57155fe0da:
;
// match: (MULQconst [5] x)
// cond:
// result: (LEAQ4 x x)
{
if v.AuxInt != 5 {
goto end534601906c45a9171a9fec3e4b82b189
}
x := v.Args[0]
v.Op = OpAMD64LEAQ4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(x)
return true
}
goto end534601906c45a9171a9fec3e4b82b189
end534601906c45a9171a9fec3e4b82b189:
;
// match: (MULQconst [9] x)
// cond:
// result: (LEAQ8 x x)
{
if v.AuxInt != 9 {
goto end48a2280b6459821289c56073b8354997
}
x := v.Args[0]
v.Op = OpAMD64LEAQ8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(x)
return true
}
goto end48a2280b6459821289c56073b8354997
end48a2280b6459821289c56073b8354997:
;
// match: (MULQconst [c] x)
// cond: isPowerOfTwo(c)
// result: (SHLQconst [log2(c)] x)
{
c := v.AuxInt
x := v.Args[0]
if !(isPowerOfTwo(c)) {
goto end75076953dbfe022526a153eda99b39b2
}
v.Op = OpAMD64SHLQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = log2(c)
v.AddArg(x)
return true
}
goto end75076953dbfe022526a153eda99b39b2
end75076953dbfe022526a153eda99b39b2:
;
case OpMove:
// match: (Move [size] dst src mem)
// cond:
// result: (REPMOVSB dst src (Const <TypeUInt64> [size]) mem)
{
size := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64REPMOVSB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(dst)
v.AddArg(src)
v0 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v0.Type = TypeUInt64
v0.AuxInt = size
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end1b2d226705fd31dbbe74e3286af178ea
end1b2d226705fd31dbbe74e3286af178ea:
;
case OpMul16:
// match: (Mul16 x y)
// cond:
// result: (MULW x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end1addf5ea2c885aa1729b8f944859d00c
end1addf5ea2c885aa1729b8f944859d00c:
;
case OpMul32:
// match: (Mul32 x y)
// cond:
// result: (MULL x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto ende144381f85808e5144782804768e2859
ende144381f85808e5144782804768e2859:
;
case OpMul64:
// match: (Mul64 x y)
// cond:
// result: (MULQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end38da21e77ac329eb643b20e7d97d5853
end38da21e77ac329eb643b20e7d97d5853:
;
case OpMul8:
// match: (Mul8 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (MULW x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64MULW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end861428e804347e8489a6424f2e6ce71c
end861428e804347e8489a6424f2e6ce71c:
;
case OpMulPtr:
// match: (MulPtr x y)
// cond:
// result: (MULQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endbbedad106c011a93243e2062afdcc75f
endbbedad106c011a93243e2062afdcc75f:
;
case OpNeg16:
// match: (Neg16 x)
// cond:
// result: (NEGW x)
{
x := v.Args[0]
v.Op = OpAMD64NEGW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end7a8c652f4ffeb49656119af69512edb2
end7a8c652f4ffeb49656119af69512edb2:
;
case OpNeg32:
// match: (Neg32 x)
// cond:
// result: (NEGL x)
{
x := v.Args[0]
v.Op = OpAMD64NEGL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto endce1f7e17fc193f6c076e47d5e401e126
endce1f7e17fc193f6c076e47d5e401e126:
;
case OpNeg64:
// match: (Neg64 x)
// cond:
// result: (NEGQ x)
{
x := v.Args[0]
v.Op = OpAMD64NEGQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto enda06c5b1718f2b96aba10bf5a5c437c6c
enda06c5b1718f2b96aba10bf5a5c437c6c:
;
case OpNeg8:
// match: (Neg8 x)
// cond:
// result: (NEGB x)
{
x := v.Args[0]
v.Op = OpAMD64NEGB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end1e5f495a2ac6cdea47b1ae5ba62aa95d
end1e5f495a2ac6cdea47b1ae5ba62aa95d:
;
case OpNeq16:
// match: (Neq16 x y)
// cond:
// result: (SETNE (CMPW <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto endf177c3b3868606824e43e11da7804572
endf177c3b3868606824e43e11da7804572:
;
case OpNeq32:
// match: (Neq32 x y)
// cond:
// result: (SETNE (CMPL <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end39c4bf6d063f8a0b6f0064c96ce25173
end39c4bf6d063f8a0b6f0064c96ce25173:
;
case OpNeq64:
// match: (Neq64 x y)
// cond:
// result: (SETNE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end8ab0bcb910c0d3213dd8726fbcc4848e
end8ab0bcb910c0d3213dd8726fbcc4848e:
;
case OpNeq8:
// match: (Neq8 x y)
// cond:
// result: (SETNE (CMPB <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end4aaff28af59a65b3684f4f1897299932
end4aaff28af59a65b3684f4f1897299932:
;
case OpNeqPtr:
// match: (NeqPtr x y)
// cond:
// result: (SETNE (CMPQ <TypeFlags> x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETNE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
goto end6e180ffd9583cd55361ed3e465158a4c
end6e180ffd9583cd55361ed3e465158a4c:
;
case OpNot:
// match: (Not x)
// cond:
// result: (XORQconst [1] x)
{
x := v.Args[0]
v.Op = OpAMD64XORQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 1
v.AddArg(x)
return true
}
goto endaabd7f5e27417cf3182cd5e4f4360410
endaabd7f5e27417cf3182cd5e4f4360410:
;
case OpOffPtr:
// match: (OffPtr [off] ptr)
// cond:
// result: (ADDQconst [off] ptr)
{
off := v.AuxInt
ptr := v.Args[0]
v.Op = OpAMD64ADDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.AddArg(ptr)
return true
}
goto end0429f947ee7ac49ff45a243e461a5290
end0429f947ee7ac49ff45a243e461a5290:
;
case OpRsh64:
// match: (Rsh64 <t> x y)
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// cond: y.Type.Size() == 8
// result: (SARQ <t> x (CMOVQCC <t> (CMPQconst <TypeFlags> [64] y) (Const <t> [63]) y))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
if !(y.Type.Size() == 8) {
goto end16bda9bd1611d415969fdbec55ed4330
}
v.Op = OpAMD64SARQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.Type = t
v.AddArg(x)
v0 := v.Block.NewValue0(v.Line, OpAMD64CMOVQCC, TypeInvalid)
v0.Type = t
v1 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v1.Type = TypeFlags
v1.AuxInt = 64
v1.AddArg(y)
v0.AddArg(v1)
v2 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v2.Type = t
v2.AuxInt = 63
v0.AddArg(v2)
v0.AddArg(y)
v.AddArg(v0)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end16bda9bd1611d415969fdbec55ed4330
end16bda9bd1611d415969fdbec55ed4330:
;
case OpRsh64U:
// match: (Rsh64U <t> x y)
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// cond: y.Type.Size() == 8
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
{
t := v.Type
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
if !(y.Type.Size() == 8) {
goto endfd6815c0dc9f8dff6c3ec6add7a23569
}
v.Op = OpAMD64ANDQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
v1.Type = t
v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
v2.Type = TypeFlags
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto endfd6815c0dc9f8dff6c3ec6add7a23569
endfd6815c0dc9f8dff6c3ec6add7a23569:
;
case OpAMD64SARQ:
// match: (SARQ x (MOVQconst [c]))
// cond:
// result: (SARQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end031712b4008075e25a5827dcb8dd3ebb
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto end031712b4008075e25a5827dcb8dd3ebb
end031712b4008075e25a5827dcb8dd3ebb:
;
case OpAMD64SBBQcarrymask:
// match: (SBBQcarrymask (CMPQconst [c] (MOVQconst [d])))
// cond: inBounds(d, c)
// result: (Const [-1])
{
if v.Args[0].Op != OpAMD64CMPQconst {
goto endf67d323ecef000dbcd15d7e031c3475e
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
goto endf67d323ecef000dbcd15d7e031c3475e
}
d := v.Args[0].Args[0].AuxInt
if !(inBounds(d, c)) {
goto endf67d323ecef000dbcd15d7e031c3475e
}
v.Op = OpConst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = -1
return true
}
goto endf67d323ecef000dbcd15d7e031c3475e
endf67d323ecef000dbcd15d7e031c3475e:
;
// match: (SBBQcarrymask (CMPQconst [c] (MOVQconst [d])))
// cond: !inBounds(d, c)
// result: (Const [0])
{
if v.Args[0].Op != OpAMD64CMPQconst {
goto end4157ddea9c4f71bfabfd6fa50e1208ed
}
c := v.Args[0].AuxInt
if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
goto end4157ddea9c4f71bfabfd6fa50e1208ed
}
d := v.Args[0].Args[0].AuxInt
if !(!inBounds(d, c)) {
goto end4157ddea9c4f71bfabfd6fa50e1208ed
}
v.Op = OpConst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = 0
return true
}
goto end4157ddea9c4f71bfabfd6fa50e1208ed
end4157ddea9c4f71bfabfd6fa50e1208ed:
;
case OpAMD64SETG:
// match: (SETG (InvertFlags x))
// cond:
// result: (SETL x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto endf7586738694c9cd0b74ae28bbadb649f
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endf7586738694c9cd0b74ae28bbadb649f
endf7586738694c9cd0b74ae28bbadb649f:
;
case OpAMD64SETL:
// match: (SETL (InvertFlags x))
// cond:
// result: (SETG x)
{
if v.Args[0].Op != OpAMD64InvertFlags {
goto ende33160cd86b9d4d3b77e02fb4658d5d3
}
x := v.Args[0].Args[0]
v.Op = OpAMD64SETG
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto ende33160cd86b9d4d3b77e02fb4658d5d3
ende33160cd86b9d4d3b77e02fb4658d5d3:
;
case OpAMD64SHLQ:
// match: (SHLQ x (MOVQconst [c]))
// cond:
// result: (SHLQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto endcca412bead06dc3d56ef034a82d184d6
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endcca412bead06dc3d56ef034a82d184d6
endcca412bead06dc3d56ef034a82d184d6:
;
case OpAMD64SHRQ:
// match: (SHRQ x (MOVQconst [c]))
// cond:
// result: (SHRQconst [c] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto endbb0d3a04dd2b810cb3dbdf7ef665f22b
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endbb0d3a04dd2b810cb3dbdf7ef665f22b
endbb0d3a04dd2b810cb3dbdf7ef665f22b:
;
case OpAMD64SUBQ:
// match: (SUBQ x (MOVQconst [c]))
// cond:
// result: (SUBQconst x [c])
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end5a74a63bd9ad15437717c6df3b25eebb
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SUBQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AuxInt = c
return true
}
goto end5a74a63bd9ad15437717c6df3b25eebb
end5a74a63bd9ad15437717c6df3b25eebb:
;
// match: (SUBQ <t> (MOVQconst [c]) x)
// cond:
// result: (NEGQ (SUBQconst <t> x [c]))
{
t := v.Type
if v.Args[0].Op != OpAMD64MOVQconst {
goto end78e66b6fc298684ff4ac8aec5ce873c9
}
c := v.Args[0].AuxInt
x := v.Args[1]
v.Op = OpAMD64NEGQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAMD64SUBQconst, TypeInvalid)
v0.Type = t
v0.AddArg(x)
v0.AuxInt = c
v.AddArg(v0)
return true
}
goto end78e66b6fc298684ff4ac8aec5ce873c9
end78e66b6fc298684ff4ac8aec5ce873c9:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpSignExt16to32:
// match: (SignExt16to32 x)
// cond:
// result: (MOVWQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVWQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end21e4271c2b48a5aa3561ccfa8fa67cd9
end21e4271c2b48a5aa3561ccfa8fa67cd9:
;
case OpSignExt16to64:
// match: (SignExt16to64 x)
// cond:
// result: (MOVWQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVWQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endc6d242ee3a3e195ef0f9e8dae47ada75
endc6d242ee3a3e195ef0f9e8dae47ada75:
;
case OpSignExt32to64:
// match: (SignExt32to64 x)
// cond:
// result: (MOVLQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVLQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endb9f1a8b2d01eee44964a71a01bca165c
endb9f1a8b2d01eee44964a71a01bca165c:
;
case OpSignExt8to16:
// match: (SignExt8to16 x)
// cond:
// result: (MOVBQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end372869f08e147404b80634e5f83fd506
end372869f08e147404b80634e5f83fd506:
;
case OpSignExt8to32:
// match: (SignExt8to32 x)
// cond:
// result: (MOVBQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end913e3575e5b4cf7f60585c108db40464
end913e3575e5b4cf7f60585c108db40464:
;
case OpSignExt8to64:
// match: (SignExt8to64 x)
// cond:
// result: (MOVBQSX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQSX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endcef6d6001d3f25cf5dacee11a46e5c8c
endcef6d6001d3f25cf5dacee11a46e5c8c:
;
case OpStaticCall:
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
{
argwid := v.AuxInt
target := v.Aux
mem := v.Args[0]
v.Op = OpAMD64CALLstatic
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = argwid
v.Aux = target
v.AddArg(mem)
return true
}
goto end32c5cbec813d1c2ae94fc9b1090e4b2a
end32c5cbec813d1c2ae94fc9b1090e4b2a:
;
case OpStore:
// match: (Store ptr val mem)
// cond: (is64BitInt(val.Type) || isPtr(val.Type))
// result: (MOVQstore ptr val mem)
{
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is64BitInt(val.Type) || isPtr(val.Type)) {
goto endbaeb60123806948cd2433605820d5af1
}
v.Op = OpAMD64MOVQstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endbaeb60123806948cd2433605820d5af1
endbaeb60123806948cd2433605820d5af1:
;
// match: (Store ptr val mem)
// cond: is32BitInt(val.Type)
// result: (MOVLstore ptr val mem)
{
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32BitInt(val.Type)) {
goto end582e895008657c728c141c6b95070de7
}
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end582e895008657c728c141c6b95070de7
end582e895008657c728c141c6b95070de7:
;
// match: (Store ptr val mem)
// cond: is16BitInt(val.Type)
// result: (MOVWstore ptr val mem)
{
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16BitInt(val.Type)) {
goto enda3f6a985b6ebb277665f80ad30b178df
}
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto enda3f6a985b6ebb277665f80ad30b178df
enda3f6a985b6ebb277665f80ad30b178df:
;
// match: (Store ptr val mem)
// cond: is8BitInt(val.Type)
// result: (MOVBstore ptr val mem)
{
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is8BitInt(val.Type)) {
goto ende2dee0bc82f631e3c6b0031bf8d224c1
}
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto ende2dee0bc82f631e3c6b0031bf8d224c1
ende2dee0bc82f631e3c6b0031bf8d224c1:
;
// match: (Store ptr val mem)
// cond: val.Type.IsBoolean()
// result: (MOVBstore ptr val mem)
{
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(val.Type.IsBoolean()) {
goto end6f343b676bf49740054e459f972b24f5
}
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end6f343b676bf49740054e459f972b24f5
end6f343b676bf49740054e459f972b24f5:
;
case OpSub16:
// match: (Sub16 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (SUBW x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64SUBW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end54adc5de883c0460ca71c6ee464d4244
end54adc5de883c0460ca71c6ee464d4244:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpSub32:
// match: (Sub32 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (SUBL x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64SUBL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto enddc3a2a488bda8c5856f93343e5ffe5f8
enddc3a2a488bda8c5856f93343e5ffe5f8:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpSub64:
// match: (Sub64 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (SUBQ x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64SUBQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto endd88d5646309fd9174584888ecc8aca2c
endd88d5646309fd9174584888ecc8aca2c:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpSub8:
// match: (Sub8 x y)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (SUBB x y)
{
x := v.Args[0]
y := v.Args[1]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpAMD64SUBB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end7d33bf9bdfa505f96b930563eca7955f
end7d33bf9bdfa505f96b930563eca7955f:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpTrunc16to8:
// match: (Trunc16to8 x)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (Copy x)
{
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end18a19bd8418f9079595720df0874e90a
end18a19bd8418f9079595720df0874e90a:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpTrunc32to16:
// match: (Trunc32to16 x)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (Copy x)
{
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end217b00780a8b1139d068680ed9d61cb0
end217b00780a8b1139d068680ed9d61cb0:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpTrunc32to8:
// match: (Trunc32to8 x)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (Copy x)
{
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end05d10e0a1c707d66b11b2d342634efd0
end05d10e0a1c707d66b11b2d342634efd0:
;
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
case OpTrunc64to16:
// match: (Trunc64to16 x)
// cond:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
// result: (Copy x)
{
x := v.Args[0]
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
goto end4623ae65eb76feca3936354f22d45fa7
end4623ae65eb76feca3936354f22d45fa7:
;
case OpTrunc64to32:
// match: (Trunc64to32 x)
// cond:
// result: (Copy x)
{
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end93e0b16b58a717a3e4f5c2ca67b6be87
end93e0b16b58a717a3e4f5c2ca67b6be87:
;
case OpTrunc64to8:
// match: (Trunc64to8 x)
// cond:
// result: (Copy x)
{
x := v.Args[0]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endc4c1a1b86edd0f082339d17eb5096ad0
endc4c1a1b86edd0f082339d17eb5096ad0:
;
case OpZero:
// match: (Zero [0] _ mem)
// cond:
// result: (Copy mem)
{
if v.AuxInt != 0 {
goto endb85a34a7d102b0e0d801454f437db5bf
}
mem := v.Args[1]
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(mem)
return true
}
goto endb85a34a7d102b0e0d801454f437db5bf
endb85a34a7d102b0e0d801454f437db5bf:
;
// match: (Zero [1] destptr mem)
// cond:
// result: (MOVBstore destptr (Const <TypeInt8> [0]) mem)
{
if v.AuxInt != 1 {
goto end09ec7b1fc5ad40534e0e25c896323f5c
}
destptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVBstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(destptr)
v0 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v0.Type = TypeInt8
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end09ec7b1fc5ad40534e0e25c896323f5c
end09ec7b1fc5ad40534e0e25c896323f5c:
;
// match: (Zero [2] destptr mem)
// cond:
// result: (MOVWstore destptr (Const <TypeInt16> [0]) mem)
{
if v.AuxInt != 2 {
goto end2dee246789dbd305bb1eaec768bdae14
}
destptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVWstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(destptr)
v0 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v0.Type = TypeInt16
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end2dee246789dbd305bb1eaec768bdae14
end2dee246789dbd305bb1eaec768bdae14:
;
// match: (Zero [4] destptr mem)
// cond:
// result: (MOVLstore destptr (Const <TypeInt32> [0]) mem)
{
if v.AuxInt != 4 {
goto ende2bf4ecf21bc9e76700a9c5f62546e78
}
destptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVLstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(destptr)
v0 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v0.Type = TypeInt32
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto ende2bf4ecf21bc9e76700a9c5f62546e78
ende2bf4ecf21bc9e76700a9c5f62546e78:
;
// match: (Zero [8] destptr mem)
// cond:
// result: (MOVQstore destptr (Const <TypeInt64> [0]) mem)
{
if v.AuxInt != 8 {
goto enda65d5d60783daf9b9405f04c44f7adaf
}
destptr := v.Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVQstore
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(destptr)
v0 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v0.Type = TypeInt64
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto enda65d5d60783daf9b9405f04c44f7adaf
enda65d5d60783daf9b9405f04c44f7adaf:
;
// match: (Zero [size] destptr mem)
// cond: size < 4*8
// result: (MOVXzero [size] destptr mem)
{
size := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(size < 4*8) {
goto endf0a22f1506977610ac0a310eee152075
}
v.Op = OpAMD64MOVXzero
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = size
v.AddArg(destptr)
v.AddArg(mem)
return true
}
goto endf0a22f1506977610ac0a310eee152075
endf0a22f1506977610ac0a310eee152075:
;
// match: (Zero [size] destptr mem)
// cond: size >= 4*8
// result: (Zero [size%8] (OffPtr <TypeUInt64> [size-(size%8)] destptr) (REPSTOSQ <TypeMem> destptr (Const <TypeUInt64> [size/8]) mem))
{
size := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(size >= 4*8) {
goto end7a358169d20d6834b21f2e03fbf351b2
}
v.Op = OpZero
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = size % 8
v0 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
v0.Type = TypeUInt64
v0.AuxInt = size - (size % 8)
v0.AddArg(destptr)
v.AddArg(v0)
v1 := v.Block.NewValue0(v.Line, OpAMD64REPSTOSQ, TypeInvalid)
v1.Type = TypeMem
v1.AddArg(destptr)
v2 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v2.Type = TypeUInt64
v2.AuxInt = size / 8
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
goto end7a358169d20d6834b21f2e03fbf351b2
end7a358169d20d6834b21f2e03fbf351b2:
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled For integer types less than a machine register, we have to decide what the invariants are for the high bits of the register. We used to set the high bits to the correct extension (sign or zero, as determined by the type) of the low bits. This CL makes the compiler ignore the high bits of the register altogether (they are junk). On this plus side, this means ops that generate subword results don't have to worry about correctly extending them. On the minus side, ops that consume subword arguments have to deal with the input registers not being correctly extended. For x86, this tradeoff is probably worth it. Almost all opcodes have versions that use only the correct subword piece of their inputs. (The one big exception is array indexing.) Not many opcodes can correctly sign extend on output. For other architectures, the tradeoff is probably not so clear, as they don't have many subword-safe opcodes (e.g. 16-bit compare, ignoring the high 16/48 bits). Fortunately we can decide whether we do this per-architecture. For the machine-independent opcodes, we pretend that the "register" size is equal to the type width, so sign extension is immaterial. Opcodes that care about the signedness of the input (e.g. compare, right shift) have two different variants. Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d Reviewed-on: https://go-review.googlesource.com/12600 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
;
case OpZeroExt16to32:
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVWQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVWQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endbfff79412a2cc96095069c66812844b4
endbfff79412a2cc96095069c66812844b4:
;
case OpZeroExt16to64:
// match: (ZeroExt16to64 x)
// cond:
// result: (MOVWQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVWQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end7a40262c5c856101058d2bd518ed0910
end7a40262c5c856101058d2bd518ed0910:
;
case OpZeroExt32to64:
// match: (ZeroExt32to64 x)
// cond:
// result: (MOVLQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVLQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto enddf83bdc8cc6c5673a9ef7aca7affe45a
enddf83bdc8cc6c5673a9ef7aca7affe45a:
;
case OpZeroExt8to16:
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endd03d53d2a585727e4107ae1a3cc55479
endd03d53d2a585727e4107ae1a3cc55479:
;
case OpZeroExt8to32:
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto endcbd33e965b3dab14fced5ae93d8949de
endcbd33e965b3dab14fced5ae93d8949de:
;
case OpZeroExt8to64:
// match: (ZeroExt8to64 x)
// cond:
// result: (MOVBQZX x)
{
x := v.Args[0]
v.Op = OpAMD64MOVBQZX
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
return true
}
goto end63ae7cc15db9d15189b2f1342604b2cb
end63ae7cc15db9d15189b2f1342604b2cb:
}
return false
}
func rewriteBlockAMD64(b *Block) bool {
switch b.Kind {
case BlockAMD64EQ:
// match: (EQ (InvertFlags cmp) yes no)
// cond:
// result: (EQ cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end6b8e9afc73b1c4d528f31a60d2575fae
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64EQ
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end6b8e9afc73b1c4d528f31a60d2575fae
end6b8e9afc73b1c4d528f31a60d2575fae:
;
case BlockAMD64GE:
// match: (GE (InvertFlags cmp) yes no)
// cond:
// result: (LE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end0610f000a6988ee8310307ec2ea138f8
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end0610f000a6988ee8310307ec2ea138f8
end0610f000a6988ee8310307ec2ea138f8:
;
case BlockAMD64GT:
// match: (GT (InvertFlags cmp) yes no)
// cond:
// result: (LT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto endf60c0660b6a8aa9565c97fc87f04eb34
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endf60c0660b6a8aa9565c97fc87f04eb34
endf60c0660b6a8aa9565c97fc87f04eb34:
;
case BlockIf:
// match: (If (SETL cmp) yes no)
// cond:
// result: (LT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETL {
goto ende4d36879bb8e1bd8facaa8c91ba99dcc
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto ende4d36879bb8e1bd8facaa8c91ba99dcc
ende4d36879bb8e1bd8facaa8c91ba99dcc:
;
// match: (If (SETLE cmp) yes no)
// cond:
// result: (LE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETLE {
goto end40df18679690e8f9005d8642fab44654
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64LE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end40df18679690e8f9005d8642fab44654
end40df18679690e8f9005d8642fab44654:
;
// match: (If (SETG cmp) yes no)
// cond:
// result: (GT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETG {
goto endb1faff07a84ae08a4b05a4a7e71eb740
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endb1faff07a84ae08a4b05a4a7e71eb740
endb1faff07a84ae08a4b05a4a7e71eb740:
;
// match: (If (SETGE cmp) yes no)
// cond:
// result: (GE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETGE {
goto enda9211ccfa5b0ab8eafc0017630c542b6
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto enda9211ccfa5b0ab8eafc0017630c542b6
enda9211ccfa5b0ab8eafc0017630c542b6:
;
// match: (If (SETEQ cmp) yes no)
// cond:
// result: (EQ cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETEQ {
goto endf113deb06abc88613840e6282942921a
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64EQ
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endf113deb06abc88613840e6282942921a
endf113deb06abc88613840e6282942921a:
;
// match: (If (SETNE cmp) yes no)
// cond:
// result: (NE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETNE {
goto end5ff1403aaf7b543bc454177ab584e4f5
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64NE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end5ff1403aaf7b543bc454177ab584e4f5
end5ff1403aaf7b543bc454177ab584e4f5:
;
// match: (If (SETB cmp) yes no)
// cond:
// result: (ULT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETB {
goto end04935012db9defeafceef8175f803ea2
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end04935012db9defeafceef8175f803ea2
end04935012db9defeafceef8175f803ea2:
;
// match: (If (SETBE cmp) yes no)
// cond:
// result: (ULE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETBE {
goto endfe0178f6f4406945ca8966817d04be60
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endfe0178f6f4406945ca8966817d04be60
endfe0178f6f4406945ca8966817d04be60:
;
// match: (If (SETA cmp) yes no)
// cond:
// result: (UGT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETA {
goto endbd22a7d56a98d85e4e132ff952dae262
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endbd22a7d56a98d85e4e132ff952dae262
endbd22a7d56a98d85e4e132ff952dae262:
;
// match: (If (SETAE cmp) yes no)
// cond:
// result: (UGE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64SETAE {
goto end9bea9963c3c5dfb97249a5feb8287f94
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end9bea9963c3c5dfb97249a5feb8287f94
end9bea9963c3c5dfb97249a5feb8287f94:
;
// match: (If cond yes no)
// cond: cond.Op == OpAMD64MOVBload
// result: (NE (TESTB <TypeFlags> cond cond) yes no)
{
v := b.Control
cond := v
yes := b.Succs[0]
no := b.Succs[1]
if !(cond.Op == OpAMD64MOVBload) {
goto end7e22019fb0effc80f85c05ea30bdb5d9
}
b.Kind = BlockAMD64NE
v0 := v.Block.NewValue0(v.Line, OpAMD64TESTB, TypeInvalid)
v0.Type = TypeFlags
v0.AddArg(cond)
v0.AddArg(cond)
b.Control = v0
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end7e22019fb0effc80f85c05ea30bdb5d9
end7e22019fb0effc80f85c05ea30bdb5d9:
;
case BlockAMD64LE:
// match: (LE (InvertFlags cmp) yes no)
// cond:
// result: (GE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end0d49d7d087fe7578e8015cf13dae37e3
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end0d49d7d087fe7578e8015cf13dae37e3
end0d49d7d087fe7578e8015cf13dae37e3:
;
case BlockAMD64LT:
// match: (LT (InvertFlags cmp) yes no)
// cond:
// result: (GT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end6a408cde0fee0ae7b7da0443c8d902bf
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64GT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end6a408cde0fee0ae7b7da0443c8d902bf
end6a408cde0fee0ae7b7da0443c8d902bf:
;
case BlockAMD64NE:
// match: (NE (InvertFlags cmp) yes no)
// cond:
// result: (NE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end713001aba794e50b582fbff930e110af
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64NE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end713001aba794e50b582fbff930e110af
end713001aba794e50b582fbff930e110af:
;
case BlockAMD64UGE:
// match: (UGE (InvertFlags cmp) yes no)
// cond:
// result: (ULE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto ende3e4ddc183ca1a46598b11c2d0d13966
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto ende3e4ddc183ca1a46598b11c2d0d13966
ende3e4ddc183ca1a46598b11c2d0d13966:
;
case BlockAMD64UGT:
// match: (UGT (InvertFlags cmp) yes no)
// cond:
// result: (ULT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end49818853af2e5251175d06c62768cae7
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64ULT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end49818853af2e5251175d06c62768cae7
end49818853af2e5251175d06c62768cae7:
;
case BlockAMD64ULE:
// match: (ULE (InvertFlags cmp) yes no)
// cond:
// result: (UGE cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto endd6698aac0d67261293b558c95ea17b4f
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGE
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto endd6698aac0d67261293b558c95ea17b4f
endd6698aac0d67261293b558c95ea17b4f:
;
case BlockAMD64ULT:
// match: (ULT (InvertFlags cmp) yes no)
// cond:
// result: (UGT cmp yes no)
{
v := b.Control
if v.Op != OpAMD64InvertFlags {
goto end35105dbc9646f02577167e45ae2f2fd2
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockAMD64UGT
b.Control = cmp
b.Succs[0] = yes
b.Succs[1] = no
return true
}
goto end35105dbc9646f02577167e45ae2f2fd2
end35105dbc9646f02577167e45ae2f2fd2:
}
return false
}