go/src/cmd/compile/internal/ssa/rewritePPC64.go

8065 lines
178 KiB
Go
Raw Normal View History

// autogenerated from gen/PPC64.rules: do not edit!
// generated with: cd gen; go run *.go
package ssa
import "math"
var _ = math.MinInt8 // in case not otherwise used
func rewriteValuePPC64(v *Value, config *Config) bool {
switch v.Op {
case OpAdd16:
return rewriteValuePPC64_OpAdd16(v, config)
case OpAdd32:
return rewriteValuePPC64_OpAdd32(v, config)
case OpAdd32F:
return rewriteValuePPC64_OpAdd32F(v, config)
case OpAdd64:
return rewriteValuePPC64_OpAdd64(v, config)
case OpAdd64F:
return rewriteValuePPC64_OpAdd64F(v, config)
case OpAdd8:
return rewriteValuePPC64_OpAdd8(v, config)
case OpAddPtr:
return rewriteValuePPC64_OpAddPtr(v, config)
case OpAddr:
return rewriteValuePPC64_OpAddr(v, config)
case OpAnd16:
return rewriteValuePPC64_OpAnd16(v, config)
case OpAnd32:
return rewriteValuePPC64_OpAnd32(v, config)
case OpAnd64:
return rewriteValuePPC64_OpAnd64(v, config)
case OpAnd8:
return rewriteValuePPC64_OpAnd8(v, config)
case OpAndB:
return rewriteValuePPC64_OpAndB(v, config)
case OpAvg64u:
return rewriteValuePPC64_OpAvg64u(v, config)
case OpClosureCall:
return rewriteValuePPC64_OpClosureCall(v, config)
case OpCom16:
return rewriteValuePPC64_OpCom16(v, config)
case OpCom32:
return rewriteValuePPC64_OpCom32(v, config)
case OpCom64:
return rewriteValuePPC64_OpCom64(v, config)
case OpCom8:
return rewriteValuePPC64_OpCom8(v, config)
case OpConst16:
return rewriteValuePPC64_OpConst16(v, config)
case OpConst32:
return rewriteValuePPC64_OpConst32(v, config)
case OpConst32F:
return rewriteValuePPC64_OpConst32F(v, config)
case OpConst64:
return rewriteValuePPC64_OpConst64(v, config)
case OpConst64F:
return rewriteValuePPC64_OpConst64F(v, config)
case OpConst8:
return rewriteValuePPC64_OpConst8(v, config)
case OpConstBool:
return rewriteValuePPC64_OpConstBool(v, config)
case OpConstNil:
return rewriteValuePPC64_OpConstNil(v, config)
case OpConvert:
return rewriteValuePPC64_OpConvert(v, config)
case OpCvt32Fto32:
return rewriteValuePPC64_OpCvt32Fto32(v, config)
case OpCvt32Fto64:
return rewriteValuePPC64_OpCvt32Fto64(v, config)
case OpCvt32Fto64F:
return rewriteValuePPC64_OpCvt32Fto64F(v, config)
case OpCvt32to32F:
return rewriteValuePPC64_OpCvt32to32F(v, config)
case OpCvt32to64F:
return rewriteValuePPC64_OpCvt32to64F(v, config)
case OpCvt64Fto32:
return rewriteValuePPC64_OpCvt64Fto32(v, config)
case OpCvt64Fto32F:
return rewriteValuePPC64_OpCvt64Fto32F(v, config)
case OpCvt64Fto64:
return rewriteValuePPC64_OpCvt64Fto64(v, config)
case OpCvt64to32F:
return rewriteValuePPC64_OpCvt64to32F(v, config)
case OpCvt64to64F:
return rewriteValuePPC64_OpCvt64to64F(v, config)
case OpDeferCall:
return rewriteValuePPC64_OpDeferCall(v, config)
case OpDiv16:
return rewriteValuePPC64_OpDiv16(v, config)
case OpDiv16u:
return rewriteValuePPC64_OpDiv16u(v, config)
case OpDiv32:
return rewriteValuePPC64_OpDiv32(v, config)
case OpDiv32F:
return rewriteValuePPC64_OpDiv32F(v, config)
case OpDiv32u:
return rewriteValuePPC64_OpDiv32u(v, config)
case OpDiv64:
return rewriteValuePPC64_OpDiv64(v, config)
case OpDiv64F:
return rewriteValuePPC64_OpDiv64F(v, config)
case OpDiv64u:
return rewriteValuePPC64_OpDiv64u(v, config)
case OpDiv8:
return rewriteValuePPC64_OpDiv8(v, config)
case OpDiv8u:
return rewriteValuePPC64_OpDiv8u(v, config)
case OpEq16:
return rewriteValuePPC64_OpEq16(v, config)
case OpEq32:
return rewriteValuePPC64_OpEq32(v, config)
case OpEq32F:
return rewriteValuePPC64_OpEq32F(v, config)
case OpEq64:
return rewriteValuePPC64_OpEq64(v, config)
case OpEq64F:
return rewriteValuePPC64_OpEq64F(v, config)
case OpEq8:
return rewriteValuePPC64_OpEq8(v, config)
case OpEqB:
return rewriteValuePPC64_OpEqB(v, config)
case OpEqPtr:
return rewriteValuePPC64_OpEqPtr(v, config)
case OpGeq16:
return rewriteValuePPC64_OpGeq16(v, config)
case OpGeq16U:
return rewriteValuePPC64_OpGeq16U(v, config)
case OpGeq32:
return rewriteValuePPC64_OpGeq32(v, config)
case OpGeq32F:
return rewriteValuePPC64_OpGeq32F(v, config)
case OpGeq32U:
return rewriteValuePPC64_OpGeq32U(v, config)
case OpGeq64:
return rewriteValuePPC64_OpGeq64(v, config)
case OpGeq64F:
return rewriteValuePPC64_OpGeq64F(v, config)
case OpGeq64U:
return rewriteValuePPC64_OpGeq64U(v, config)
case OpGeq8:
return rewriteValuePPC64_OpGeq8(v, config)
case OpGeq8U:
return rewriteValuePPC64_OpGeq8U(v, config)
case OpGetClosurePtr:
return rewriteValuePPC64_OpGetClosurePtr(v, config)
case OpGoCall:
return rewriteValuePPC64_OpGoCall(v, config)
case OpGreater16:
return rewriteValuePPC64_OpGreater16(v, config)
case OpGreater16U:
return rewriteValuePPC64_OpGreater16U(v, config)
case OpGreater32:
return rewriteValuePPC64_OpGreater32(v, config)
case OpGreater32F:
return rewriteValuePPC64_OpGreater32F(v, config)
case OpGreater32U:
return rewriteValuePPC64_OpGreater32U(v, config)
case OpGreater64:
return rewriteValuePPC64_OpGreater64(v, config)
case OpGreater64F:
return rewriteValuePPC64_OpGreater64F(v, config)
case OpGreater64U:
return rewriteValuePPC64_OpGreater64U(v, config)
case OpGreater8:
return rewriteValuePPC64_OpGreater8(v, config)
case OpGreater8U:
return rewriteValuePPC64_OpGreater8U(v, config)
case OpHmul16:
return rewriteValuePPC64_OpHmul16(v, config)
case OpHmul16u:
return rewriteValuePPC64_OpHmul16u(v, config)
case OpHmul32:
return rewriteValuePPC64_OpHmul32(v, config)
case OpHmul32u:
return rewriteValuePPC64_OpHmul32u(v, config)
case OpHmul64:
return rewriteValuePPC64_OpHmul64(v, config)
case OpHmul64u:
return rewriteValuePPC64_OpHmul64u(v, config)
case OpHmul8:
return rewriteValuePPC64_OpHmul8(v, config)
case OpHmul8u:
return rewriteValuePPC64_OpHmul8u(v, config)
case OpInterCall:
return rewriteValuePPC64_OpInterCall(v, config)
case OpIsInBounds:
return rewriteValuePPC64_OpIsInBounds(v, config)
case OpIsNonNil:
return rewriteValuePPC64_OpIsNonNil(v, config)
case OpIsSliceInBounds:
return rewriteValuePPC64_OpIsSliceInBounds(v, config)
case OpLeq16:
return rewriteValuePPC64_OpLeq16(v, config)
case OpLeq16U:
return rewriteValuePPC64_OpLeq16U(v, config)
case OpLeq32:
return rewriteValuePPC64_OpLeq32(v, config)
case OpLeq32F:
return rewriteValuePPC64_OpLeq32F(v, config)
case OpLeq32U:
return rewriteValuePPC64_OpLeq32U(v, config)
case OpLeq64:
return rewriteValuePPC64_OpLeq64(v, config)
case OpLeq64F:
return rewriteValuePPC64_OpLeq64F(v, config)
case OpLeq64U:
return rewriteValuePPC64_OpLeq64U(v, config)
case OpLeq8:
return rewriteValuePPC64_OpLeq8(v, config)
case OpLeq8U:
return rewriteValuePPC64_OpLeq8U(v, config)
case OpLess16:
return rewriteValuePPC64_OpLess16(v, config)
case OpLess16U:
return rewriteValuePPC64_OpLess16U(v, config)
case OpLess32:
return rewriteValuePPC64_OpLess32(v, config)
case OpLess32F:
return rewriteValuePPC64_OpLess32F(v, config)
case OpLess32U:
return rewriteValuePPC64_OpLess32U(v, config)
case OpLess64:
return rewriteValuePPC64_OpLess64(v, config)
case OpLess64F:
return rewriteValuePPC64_OpLess64F(v, config)
case OpLess64U:
return rewriteValuePPC64_OpLess64U(v, config)
case OpLess8:
return rewriteValuePPC64_OpLess8(v, config)
case OpLess8U:
return rewriteValuePPC64_OpLess8U(v, config)
case OpLoad:
return rewriteValuePPC64_OpLoad(v, config)
case OpLsh16x16:
return rewriteValuePPC64_OpLsh16x16(v, config)
case OpLsh16x32:
return rewriteValuePPC64_OpLsh16x32(v, config)
case OpLsh16x64:
return rewriteValuePPC64_OpLsh16x64(v, config)
case OpLsh16x8:
return rewriteValuePPC64_OpLsh16x8(v, config)
case OpLsh32x16:
return rewriteValuePPC64_OpLsh32x16(v, config)
case OpLsh32x32:
return rewriteValuePPC64_OpLsh32x32(v, config)
case OpLsh32x64:
return rewriteValuePPC64_OpLsh32x64(v, config)
case OpLsh32x8:
return rewriteValuePPC64_OpLsh32x8(v, config)
case OpLsh64x16:
return rewriteValuePPC64_OpLsh64x16(v, config)
case OpLsh64x32:
return rewriteValuePPC64_OpLsh64x32(v, config)
case OpLsh64x64:
return rewriteValuePPC64_OpLsh64x64(v, config)
case OpLsh64x8:
return rewriteValuePPC64_OpLsh64x8(v, config)
case OpLsh8x16:
return rewriteValuePPC64_OpLsh8x16(v, config)
case OpLsh8x32:
return rewriteValuePPC64_OpLsh8x32(v, config)
case OpLsh8x64:
return rewriteValuePPC64_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValuePPC64_OpLsh8x8(v, config)
case OpMod16:
return rewriteValuePPC64_OpMod16(v, config)
case OpMod16u:
return rewriteValuePPC64_OpMod16u(v, config)
case OpMod32:
return rewriteValuePPC64_OpMod32(v, config)
case OpMod32u:
return rewriteValuePPC64_OpMod32u(v, config)
case OpMod64:
return rewriteValuePPC64_OpMod64(v, config)
case OpMod64u:
return rewriteValuePPC64_OpMod64u(v, config)
case OpMod8:
return rewriteValuePPC64_OpMod8(v, config)
case OpMod8u:
return rewriteValuePPC64_OpMod8u(v, config)
case OpMove:
return rewriteValuePPC64_OpMove(v, config)
case OpMul16:
return rewriteValuePPC64_OpMul16(v, config)
case OpMul32:
return rewriteValuePPC64_OpMul32(v, config)
case OpMul32F:
return rewriteValuePPC64_OpMul32F(v, config)
case OpMul64:
return rewriteValuePPC64_OpMul64(v, config)
case OpMul64F:
return rewriteValuePPC64_OpMul64F(v, config)
case OpMul8:
return rewriteValuePPC64_OpMul8(v, config)
case OpNeg16:
return rewriteValuePPC64_OpNeg16(v, config)
case OpNeg32:
return rewriteValuePPC64_OpNeg32(v, config)
case OpNeg32F:
return rewriteValuePPC64_OpNeg32F(v, config)
case OpNeg64:
return rewriteValuePPC64_OpNeg64(v, config)
case OpNeg64F:
return rewriteValuePPC64_OpNeg64F(v, config)
case OpNeg8:
return rewriteValuePPC64_OpNeg8(v, config)
case OpNeq16:
return rewriteValuePPC64_OpNeq16(v, config)
case OpNeq32:
return rewriteValuePPC64_OpNeq32(v, config)
case OpNeq32F:
return rewriteValuePPC64_OpNeq32F(v, config)
case OpNeq64:
return rewriteValuePPC64_OpNeq64(v, config)
case OpNeq64F:
return rewriteValuePPC64_OpNeq64F(v, config)
case OpNeq8:
return rewriteValuePPC64_OpNeq8(v, config)
case OpNeqB:
return rewriteValuePPC64_OpNeqB(v, config)
case OpNeqPtr:
return rewriteValuePPC64_OpNeqPtr(v, config)
case OpNilCheck:
return rewriteValuePPC64_OpNilCheck(v, config)
case OpNot:
return rewriteValuePPC64_OpNot(v, config)
case OpOffPtr:
return rewriteValuePPC64_OpOffPtr(v, config)
case OpOr16:
return rewriteValuePPC64_OpOr16(v, config)
case OpOr32:
return rewriteValuePPC64_OpOr32(v, config)
case OpOr64:
return rewriteValuePPC64_OpOr64(v, config)
case OpOr8:
return rewriteValuePPC64_OpOr8(v, config)
case OpOrB:
return rewriteValuePPC64_OpOrB(v, config)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
case OpPPC64ADD:
return rewriteValuePPC64_OpPPC64ADD(v, config)
case OpPPC64CMPUconst:
return rewriteValuePPC64_OpPPC64CMPUconst(v, config)
case OpPPC64CMPWUconst:
return rewriteValuePPC64_OpPPC64CMPWUconst(v, config)
case OpPPC64CMPWconst:
return rewriteValuePPC64_OpPPC64CMPWconst(v, config)
case OpPPC64CMPconst:
return rewriteValuePPC64_OpPPC64CMPconst(v, config)
case OpPPC64Equal:
return rewriteValuePPC64_OpPPC64Equal(v, config)
case OpPPC64FMOVDload:
return rewriteValuePPC64_OpPPC64FMOVDload(v, config)
case OpPPC64FMOVDstore:
return rewriteValuePPC64_OpPPC64FMOVDstore(v, config)
case OpPPC64FMOVSload:
return rewriteValuePPC64_OpPPC64FMOVSload(v, config)
case OpPPC64FMOVSstore:
return rewriteValuePPC64_OpPPC64FMOVSstore(v, config)
case OpPPC64GreaterEqual:
return rewriteValuePPC64_OpPPC64GreaterEqual(v, config)
case OpPPC64GreaterThan:
return rewriteValuePPC64_OpPPC64GreaterThan(v, config)
case OpPPC64LessEqual:
return rewriteValuePPC64_OpPPC64LessEqual(v, config)
case OpPPC64LessThan:
return rewriteValuePPC64_OpPPC64LessThan(v, config)
case OpPPC64MOVBZload:
return rewriteValuePPC64_OpPPC64MOVBZload(v, config)
case OpPPC64MOVBZreg:
return rewriteValuePPC64_OpPPC64MOVBZreg(v, config)
case OpPPC64MOVBload:
return rewriteValuePPC64_OpPPC64MOVBload(v, config)
case OpPPC64MOVBreg:
return rewriteValuePPC64_OpPPC64MOVBreg(v, config)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
case OpPPC64MOVBstore:
return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
case OpPPC64MOVBstorezero:
return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
case OpPPC64MOVDload:
return rewriteValuePPC64_OpPPC64MOVDload(v, config)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
case OpPPC64MOVDstore:
return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
case OpPPC64MOVDstorezero:
return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
case OpPPC64MOVHZload:
return rewriteValuePPC64_OpPPC64MOVHZload(v, config)
case OpPPC64MOVHZreg:
return rewriteValuePPC64_OpPPC64MOVHZreg(v, config)
case OpPPC64MOVHload:
return rewriteValuePPC64_OpPPC64MOVHload(v, config)
case OpPPC64MOVHreg:
return rewriteValuePPC64_OpPPC64MOVHreg(v, config)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
case OpPPC64MOVHstore:
return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
case OpPPC64MOVHstorezero:
return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
case OpPPC64MOVWZload:
return rewriteValuePPC64_OpPPC64MOVWZload(v, config)
case OpPPC64MOVWload:
return rewriteValuePPC64_OpPPC64MOVWload(v, config)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
case OpPPC64MOVWstore:
return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
case OpPPC64MOVWstorezero:
return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
case OpPPC64NotEqual:
return rewriteValuePPC64_OpPPC64NotEqual(v, config)
case OpRsh16Ux16:
return rewriteValuePPC64_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
return rewriteValuePPC64_OpRsh16Ux32(v, config)
case OpRsh16Ux64:
return rewriteValuePPC64_OpRsh16Ux64(v, config)
case OpRsh16Ux8:
return rewriteValuePPC64_OpRsh16Ux8(v, config)
case OpRsh16x16:
return rewriteValuePPC64_OpRsh16x16(v, config)
case OpRsh16x32:
return rewriteValuePPC64_OpRsh16x32(v, config)
case OpRsh16x64:
return rewriteValuePPC64_OpRsh16x64(v, config)
case OpRsh16x8:
return rewriteValuePPC64_OpRsh16x8(v, config)
case OpRsh32Ux16:
return rewriteValuePPC64_OpRsh32Ux16(v, config)
case OpRsh32Ux32:
return rewriteValuePPC64_OpRsh32Ux32(v, config)
case OpRsh32Ux64:
return rewriteValuePPC64_OpRsh32Ux64(v, config)
case OpRsh32Ux8:
return rewriteValuePPC64_OpRsh32Ux8(v, config)
case OpRsh32x16:
return rewriteValuePPC64_OpRsh32x16(v, config)
case OpRsh32x32:
return rewriteValuePPC64_OpRsh32x32(v, config)
case OpRsh32x64:
return rewriteValuePPC64_OpRsh32x64(v, config)
case OpRsh32x8:
return rewriteValuePPC64_OpRsh32x8(v, config)
case OpRsh64Ux16:
return rewriteValuePPC64_OpRsh64Ux16(v, config)
case OpRsh64Ux32:
return rewriteValuePPC64_OpRsh64Ux32(v, config)
case OpRsh64Ux64:
return rewriteValuePPC64_OpRsh64Ux64(v, config)
case OpRsh64Ux8:
return rewriteValuePPC64_OpRsh64Ux8(v, config)
case OpRsh64x16:
return rewriteValuePPC64_OpRsh64x16(v, config)
case OpRsh64x32:
return rewriteValuePPC64_OpRsh64x32(v, config)
case OpRsh64x64:
return rewriteValuePPC64_OpRsh64x64(v, config)
case OpRsh64x8:
return rewriteValuePPC64_OpRsh64x8(v, config)
case OpRsh8Ux16:
return rewriteValuePPC64_OpRsh8Ux16(v, config)
case OpRsh8Ux32:
return rewriteValuePPC64_OpRsh8Ux32(v, config)
case OpRsh8Ux64:
return rewriteValuePPC64_OpRsh8Ux64(v, config)
case OpRsh8Ux8:
return rewriteValuePPC64_OpRsh8Ux8(v, config)
case OpRsh8x16:
return rewriteValuePPC64_OpRsh8x16(v, config)
case OpRsh8x32:
return rewriteValuePPC64_OpRsh8x32(v, config)
case OpRsh8x64:
return rewriteValuePPC64_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValuePPC64_OpRsh8x8(v, config)
case OpSignExt16to32:
return rewriteValuePPC64_OpSignExt16to32(v, config)
case OpSignExt16to64:
return rewriteValuePPC64_OpSignExt16to64(v, config)
case OpSignExt32to64:
return rewriteValuePPC64_OpSignExt32to64(v, config)
case OpSignExt8to16:
return rewriteValuePPC64_OpSignExt8to16(v, config)
case OpSignExt8to32:
return rewriteValuePPC64_OpSignExt8to32(v, config)
case OpSignExt8to64:
return rewriteValuePPC64_OpSignExt8to64(v, config)
case OpSqrt:
return rewriteValuePPC64_OpSqrt(v, config)
case OpStaticCall:
return rewriteValuePPC64_OpStaticCall(v, config)
case OpStore:
return rewriteValuePPC64_OpStore(v, config)
case OpSub16:
return rewriteValuePPC64_OpSub16(v, config)
case OpSub32:
return rewriteValuePPC64_OpSub32(v, config)
case OpSub32F:
return rewriteValuePPC64_OpSub32F(v, config)
case OpSub64:
return rewriteValuePPC64_OpSub64(v, config)
case OpSub64F:
return rewriteValuePPC64_OpSub64F(v, config)
case OpSub8:
return rewriteValuePPC64_OpSub8(v, config)
case OpSubPtr:
return rewriteValuePPC64_OpSubPtr(v, config)
case OpTrunc16to8:
return rewriteValuePPC64_OpTrunc16to8(v, config)
case OpTrunc32to16:
return rewriteValuePPC64_OpTrunc32to16(v, config)
case OpTrunc32to8:
return rewriteValuePPC64_OpTrunc32to8(v, config)
case OpTrunc64to16:
return rewriteValuePPC64_OpTrunc64to16(v, config)
case OpTrunc64to32:
return rewriteValuePPC64_OpTrunc64to32(v, config)
case OpTrunc64to8:
return rewriteValuePPC64_OpTrunc64to8(v, config)
case OpXor16:
return rewriteValuePPC64_OpXor16(v, config)
case OpXor32:
return rewriteValuePPC64_OpXor32(v, config)
case OpXor64:
return rewriteValuePPC64_OpXor64(v, config)
case OpXor8:
return rewriteValuePPC64_OpXor8(v, config)
case OpZero:
return rewriteValuePPC64_OpZero(v, config)
case OpZeroExt16to32:
return rewriteValuePPC64_OpZeroExt16to32(v, config)
case OpZeroExt16to64:
return rewriteValuePPC64_OpZeroExt16to64(v, config)
case OpZeroExt32to64:
return rewriteValuePPC64_OpZeroExt32to64(v, config)
case OpZeroExt8to16:
return rewriteValuePPC64_OpZeroExt8to16(v, config)
case OpZeroExt8to32:
return rewriteValuePPC64_OpZeroExt8to32(v, config)
case OpZeroExt8to64:
return rewriteValuePPC64_OpZeroExt8to64(v, config)
}
return false
}
func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Add16 x y)
// cond:
// result: (ADD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64ADD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAdd32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Add32 x y)
// cond:
// result: (ADD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64ADD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAdd32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Add32F x y)
// cond:
// result: (FADDS x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FADDS)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAdd64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Add64 x y)
// cond:
// result: (ADD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64ADD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAdd64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Add64F x y)
// cond:
// result: (FADD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FADD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAdd8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Add8 x y)
// cond:
// result: (ADD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64ADD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAddPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (AddPtr x y)
// cond:
// result: (ADD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64ADD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAddr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Addr {sym} base)
// cond:
// result: (MOVDaddr {sym} base)
for {
sym := v.Aux
base := v.Args[0]
v.reset(OpPPC64MOVDaddr)
v.Aux = sym
v.AddArg(base)
return true
}
}
func rewriteValuePPC64_OpAnd16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (And16 x y)
// cond:
// result: (AND x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64AND)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAnd32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (And32 x y)
// cond:
// result: (AND x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64AND)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAnd64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (And64 x y)
// cond:
// result: (AND x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64AND)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAnd8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (And8 x y)
// cond:
// result: (AND x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64AND)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAndB(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (AndB x y)
// cond:
// result: (AND x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64AND)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpAvg64u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Avg64u <t> x y)
// cond:
// result: (ADD (ADD <t> (SRD <t> x (MOVDconst <t> [1])) (SRD <t> y (MOVDconst <t> [1]))) (ANDconst <t> (AND <t> x y) [1]))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64ADD)
v0 := b.NewValue0(v.Line, OpPPC64ADD, t)
v1 := b.NewValue0(v.Line, OpPPC64SRD, t)
v1.AddArg(x)
v2 := b.NewValue0(v.Line, OpPPC64MOVDconst, t)
v2.AuxInt = 1
v1.AddArg(v2)
v0.AddArg(v1)
v3 := b.NewValue0(v.Line, OpPPC64SRD, t)
v3.AddArg(y)
v4 := b.NewValue0(v.Line, OpPPC64MOVDconst, t)
v4.AuxInt = 1
v3.AddArg(v4)
v0.AddArg(v3)
v.AddArg(v0)
v5 := b.NewValue0(v.Line, OpPPC64ANDconst, t)
v5.AuxInt = 1
v6 := b.NewValue0(v.Line, OpPPC64AND, t)
v6.AddArg(x)
v6.AddArg(y)
v5.AddArg(v6)
v.AddArg(v5)
return true
}
}
func rewriteValuePPC64_OpClosureCall(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
for {
argwid := v.AuxInt
entry := v.Args[0]
closure := v.Args[1]
mem := v.Args[2]
v.reset(OpPPC64CALLclosure)
v.AuxInt = argwid
v.AddArg(entry)
v.AddArg(closure)
v.AddArg(mem)
return true
}
}
func rewriteValuePPC64_OpCom16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Com16 x)
// cond:
// result: (XORconst [-1] x)
for {
x := v.Args[0]
v.reset(OpPPC64XORconst)
v.AuxInt = -1
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpCom32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Com32 x)
// cond:
// result: (XORconst [-1] x)
for {
x := v.Args[0]
v.reset(OpPPC64XORconst)
v.AuxInt = -1
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpCom64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Com64 x)
// cond:
// result: (XORconst [-1] x)
for {
x := v.Args[0]
v.reset(OpPPC64XORconst)
v.AuxInt = -1
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpCom8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Com8 x)
// cond:
// result: (XORconst [-1] x)
for {
x := v.Args[0]
v.reset(OpPPC64XORconst)
v.AuxInt = -1
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpConst16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Const16 [val])
// cond:
// result: (MOVWconst [val])
for {
val := v.AuxInt
v.reset(OpPPC64MOVWconst)
v.AuxInt = val
return true
}
}
func rewriteValuePPC64_OpConst32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Const32 [val])
// cond:
// result: (MOVWconst [val])
for {
val := v.AuxInt
v.reset(OpPPC64MOVWconst)
v.AuxInt = val
return true
}
}
func rewriteValuePPC64_OpConst32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Const32F [val])
// cond:
// result: (FMOVSconst [val])
for {
val := v.AuxInt
v.reset(OpPPC64FMOVSconst)
v.AuxInt = val
return true
}
}
func rewriteValuePPC64_OpConst64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Const64 [val])
// cond:
// result: (MOVDconst [val])
for {
val := v.AuxInt
v.reset(OpPPC64MOVDconst)
v.AuxInt = val
return true
}
}
func rewriteValuePPC64_OpConst64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Const64F [val])
// cond:
// result: (FMOVDconst [val])
for {
val := v.AuxInt
v.reset(OpPPC64FMOVDconst)
v.AuxInt = val
return true
}
}
func rewriteValuePPC64_OpConst8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Const8 [val])
// cond:
// result: (MOVWconst [val])
for {
val := v.AuxInt
v.reset(OpPPC64MOVWconst)
v.AuxInt = val
return true
}
}
func rewriteValuePPC64_OpConstBool(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ConstBool [b])
// cond:
// result: (MOVWconst [b])
for {
b := v.AuxInt
v.reset(OpPPC64MOVWconst)
v.AuxInt = b
return true
}
}
func rewriteValuePPC64_OpConstNil(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ConstNil)
// cond:
// result: (MOVDconst [0])
for {
v.reset(OpPPC64MOVDconst)
v.AuxInt = 0
return true
}
}
func rewriteValuePPC64_OpConvert(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Convert <t> x mem)
// cond:
// result: (MOVDconvert <t> x mem)
for {
t := v.Type
x := v.Args[0]
mem := v.Args[1]
v.reset(OpPPC64MOVDconvert)
v.Type = t
v.AddArg(x)
v.AddArg(mem)
return true
}
}
func rewriteValuePPC64_OpCvt32Fto32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt32Fto32 x)
// cond:
// result: (Xf2i64 (FCTIWZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
v0 := b.NewValue0(v.Line, OpPPC64FCTIWZ, config.fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpCvt32Fto64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt32Fto64 x)
// cond:
// result: (Xf2i64 (FCTIDZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
v0 := b.NewValue0(v.Line, OpPPC64FCTIDZ, config.fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpCvt32Fto64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt32Fto64F x)
// cond:
// result: x
for {
x := v.Args[0]
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpCvt32to32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt32to32F x)
// cond:
// result: (FRSP (FCFID (Xi2f64 (SignExt32to64 x))))
for {
x := v.Args[0]
v.reset(OpPPC64FRSP)
v0 := b.NewValue0(v.Line, OpPPC64FCFID, config.fe.TypeFloat64())
v1 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
v2.AddArg(x)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpCvt32to64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt32to64F x)
// cond:
// result: (FCFID (Xi2f64 (SignExt32to64 x)))
for {
x := v.Args[0]
v.reset(OpPPC64FCFID)
v0 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpCvt64Fto32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt64Fto32 x)
// cond:
// result: (Xf2i64 (FCTIWZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
v0 := b.NewValue0(v.Line, OpPPC64FCTIWZ, config.fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpCvt64Fto32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt64Fto32F x)
// cond:
// result: (FRSP x)
for {
x := v.Args[0]
v.reset(OpPPC64FRSP)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpCvt64Fto64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt64Fto64 x)
// cond:
// result: (Xf2i64 (FCTIDZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
v0 := b.NewValue0(v.Line, OpPPC64FCTIDZ, config.fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpCvt64to32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt64to32F x)
// cond:
// result: (FRSP (FCFID (Xi2f64 x)))
for {
x := v.Args[0]
v.reset(OpPPC64FRSP)
v0 := b.NewValue0(v.Line, OpPPC64FCFID, config.fe.TypeFloat64())
v1 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpCvt64to64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Cvt64to64F x)
// cond:
// result: (FCFID (Xi2f64 x))
for {
x := v.Args[0]
v.reset(OpPPC64FCFID)
v0 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpDeferCall(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (DeferCall [argwid] mem)
// cond:
// result: (CALLdefer [argwid] mem)
for {
argwid := v.AuxInt
mem := v.Args[0]
v.reset(OpPPC64CALLdefer)
v.AuxInt = argwid
v.AddArg(mem)
return true
}
}
func rewriteValuePPC64_OpDiv16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div16 x y)
// cond:
// result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVW)
v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpDiv16u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div16u x y)
// cond:
// result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVWU)
v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpDiv32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div32 x y)
// cond:
// result: (DIVW x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVW)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpDiv32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div32F x y)
// cond:
// result: (FDIVS x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FDIVS)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpDiv32u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div32u x y)
// cond:
// result: (DIVWU x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVWU)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpDiv64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div64 x y)
// cond:
// result: (DIVD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpDiv64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div64F x y)
// cond:
// result: (FDIV x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FDIV)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpDiv64u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div64u x y)
// cond:
// result: (DIVDU x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVDU)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpDiv8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div8 x y)
// cond:
// result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVW)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpDiv8u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Div8u x y)
// cond:
// result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVWU)
v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpEq16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Eq16 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
if !(isSigned(x.Type) && isSigned(y.Type)) {
break
}
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
// match: (Eq16 x y)
// cond:
// result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpEq32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Eq32 x y)
// cond:
// result: (Equal (CMPW x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpEq32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Eq32F x y)
// cond:
// result: (Equal (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpEq64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Eq64 x y)
// cond:
// result: (Equal (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpEq64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Eq64F x y)
// cond:
// result: (Equal (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpEq8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Eq8 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
if !(isSigned(x.Type) && isSigned(y.Type)) {
break
}
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
// match: (Eq8 x y)
// cond:
// result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpEqB(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (EqB x y)
// cond:
// result: (ANDconst [1] (EQV x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64ANDconst)
v.AuxInt = 1
v0 := b.NewValue0(v.Line, OpPPC64EQV, config.fe.TypeInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpEqPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (EqPtr x y)
// cond:
// result: (Equal (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq16 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq16U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq16U x y)
// cond:
// result: (GreaterEqual (CMPU (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq32 x y)
// cond:
// result: (GreaterEqual (CMPW x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq32F x y)
// cond:
// result: (FGreaterEqual (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FGreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq32U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq32U x y)
// cond:
// result: (GreaterEqual (CMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq64 x y)
// cond:
// result: (GreaterEqual (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq64F x y)
// cond:
// result: (FGreaterEqual (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FGreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq64U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq64U x y)
// cond:
// result: (GreaterEqual (CMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq8 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGeq8U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Geq8U x y)
// cond:
// result: (GreaterEqual (CMPU (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGetClosurePtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
for {
v.reset(OpPPC64LoweredGetClosurePtr)
return true
}
}
func rewriteValuePPC64_OpGoCall(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (GoCall [argwid] mem)
// cond:
// result: (CALLgo [argwid] mem)
for {
argwid := v.AuxInt
mem := v.Args[0]
v.reset(OpPPC64CALLgo)
v.AuxInt = argwid
v.AddArg(mem)
return true
}
}
func rewriteValuePPC64_OpGreater16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater16 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater16U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater16U x y)
// cond:
// result: (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater32 x y)
// cond:
// result: (GreaterThan (CMPW x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater32F x y)
// cond:
// result: (FGreaterThan (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FGreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater32U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater32U x y)
// cond:
// result: (GreaterThan (CMPWU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater64 x y)
// cond:
// result: (GreaterThan (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater64F x y)
// cond:
// result: (FGreaterThan (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FGreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater64U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater64U x y)
// cond:
// result: (GreaterThan (CMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater8 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpGreater8U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Greater8U x y)
// cond:
// result: (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpHmul16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Hmul16 x y)
// cond:
// result: (SRAWconst (MULLW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAWconst)
v.AuxInt = 16
v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpHmul16u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Hmul16u x y)
// cond:
// result: (SRWconst (MULLW <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRWconst)
v.AuxInt = 16
v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeUInt32())
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpHmul32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Hmul32 x y)
// cond:
// result: (MULHW x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULHW)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpHmul32u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Hmul32u x y)
// cond:
// result: (MULHWU x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULHWU)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpHmul64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Hmul64 x y)
// cond:
// result: (MULHD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULHD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpHmul64u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Hmul64u x y)
// cond:
// result: (MULHDU x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULHDU)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpHmul8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Hmul8 x y)
// cond:
// result: (SRAWconst (MULLW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAWconst)
v.AuxInt = 8
v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt16())
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpHmul8u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Hmul8u x y)
// cond:
// result: (SRWconst (MULLW <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRWconst)
v.AuxInt = 8
v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeUInt16())
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpInterCall(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
for {
argwid := v.AuxInt
entry := v.Args[0]
mem := v.Args[1]
v.reset(OpPPC64CALLinter)
v.AuxInt = argwid
v.AddArg(entry)
v.AddArg(mem)
return true
}
}
func rewriteValuePPC64_OpIsInBounds(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (IsInBounds idx len)
// cond:
// result: (LessThan (CMPU idx len))
for {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpIsNonNil(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (IsNonNil ptr)
// cond:
// result: (NotEqual (CMPconst [0] ptr))
for {
ptr := v.Args[0]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPconst, TypeFlags)
v0.AuxInt = 0
v0.AddArg(ptr)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpIsSliceInBounds(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (IsSliceInBounds idx len)
// cond:
// result: (LessEqual (CMPU idx len))
for {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq16 x y)
// cond:
// result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq16U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq16U x y)
// cond:
// result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq32 x y)
// cond:
// result: (LessEqual (CMPW x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
// cond:
// result: (FLessEqual (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FLessEqual)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq32U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq32U x y)
// cond:
// result: (LessEqual (CMPWU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq64 x y)
// cond:
// result: (LessEqual (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq64F x y)
// cond:
// result: (FLessEqual (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FLessEqual)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq64U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq64U x y)
// cond:
// result: (LessEqual (CMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq8 x y)
// cond:
// result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLeq8U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq8U x y)
// cond:
// result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less16 x y)
// cond:
// result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess16U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less16U x y)
// cond:
// result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less32 x y)
// cond:
// result: (LessThan (CMPW x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less32F x y)
// cond:
// result: (FLessThan (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FLessThan)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess32U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less32U x y)
// cond:
// result: (LessThan (CMPWU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less64 x y)
// cond:
// result: (LessThan (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less64F x y)
// cond:
// result: (FLessThan (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FLessThan)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess64U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less64U x y)
// cond:
// result: (LessThan (CMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less8 x y)
// cond:
// result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLess8U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less8U x y)
// cond:
// result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLoad(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t))
// result: (MOVDload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitInt(t) || isPtr(t)) {
break
}
v.reset(OpPPC64MOVDload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (Load <t> ptr mem)
// cond: is32BitInt(t) && isSigned(t)
// result: (MOVWload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is32BitInt(t) && isSigned(t)) {
break
}
v.reset(OpPPC64MOVWload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (Load <t> ptr mem)
// cond: is32BitInt(t) && !isSigned(t)
// result: (MOVWZload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is32BitInt(t) && !isSigned(t)) {
break
}
v.reset(OpPPC64MOVWZload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (Load <t> ptr mem)
// cond: is16BitInt(t) && isSigned(t)
// result: (MOVHload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is16BitInt(t) && isSigned(t)) {
break
}
v.reset(OpPPC64MOVHload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (Load <t> ptr mem)
// cond: is16BitInt(t) && !isSigned(t)
// result: (MOVHZload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is16BitInt(t) && !isSigned(t)) {
break
}
v.reset(OpPPC64MOVHZload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (Load <t> ptr mem)
// cond: (t.IsBoolean() || (is8BitInt(t) && isSigned(t)))
// result: (MOVBload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsBoolean() || (is8BitInt(t) && isSigned(t))) {
break
}
v.reset(OpPPC64MOVBload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (Load <t> ptr mem)
// cond: is8BitInt(t) && !isSigned(t)
// result: (MOVBZload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is8BitInt(t) && !isSigned(t)) {
break
}
v.reset(OpPPC64MOVBZload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (Load <t> ptr mem)
// cond: is32BitFloat(t)
// result: (FMOVSload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is32BitFloat(t)) {
break
}
v.reset(OpPPC64FMOVSload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (Load <t> ptr mem)
// cond: is64BitFloat(t)
// result: (FMOVDload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitFloat(t)) {
break
}
v.reset(OpPPC64FMOVDload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpLsh16x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh16x16 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -16
v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh16x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh16x32 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -16
v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh16x64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh16x64 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -16
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh16x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh16x8 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -16
v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh32x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh32x16 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh32x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh32x32 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh32x64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh32x64 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh32x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh32x8 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh64x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh64x16 x y)
// cond:
// result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh64x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh64x32 x y)
// cond:
// result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh64x64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh64x64 x y)
// cond:
// result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh64x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh64x8 x y)
// cond:
// result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh8x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh8x16 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -8
v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh8x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh8x32 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -8
v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh8x64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh8x64 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -8
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpLsh8x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh8x8 x y)
// cond:
// result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -8
v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpMod16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mod16 x y)
// cond:
// result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpMod16u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mod16u x y)
// cond:
// result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpMod32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mod32 x y)
// cond:
// result: (SUB x (MULLW y (DIVW x y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64DIVW, config.fe.TypeInt32())
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpMod32u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mod32u x y)
// cond:
// result: (SUB x (MULLW y (DIVWU x y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64DIVWU, config.fe.TypeInt32())
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpMod64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mod64 x y)
// cond:
// result: (SUB x (MULLD y (DIVD x y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64MULLD, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64DIVD, config.fe.TypeInt64())
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpMod64u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mod64u x y)
// cond:
// result: (SUB x (MULLD y (DIVDU x y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64MULLD, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64DIVDU, config.fe.TypeInt64())
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpMod8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mod8 x y)
// cond:
// result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpMod8u(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mod8u x y)
// cond:
// result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Move [s] _ _ mem)
// cond: SizeAndAlign(s).Size() == 0
// result: mem
for {
s := v.AuxInt
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 0) {
break
}
v.reset(OpCopy)
v.Type = mem.Type
v.AddArg(mem)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 1
// result: (MOVBstore dst (MOVBZload src mem) mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 1) {
break
}
v.reset(OpPPC64MOVBstore)
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstore dst (MOVHZload src mem) mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
break
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.reset(OpPPC64MOVHstore)
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem)
return true
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 2
// result: (MOVBstore [1] dst (MOVBZload [1] src mem) (MOVBstore dst (MOVBZload src mem) mem))
for {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
if !(SizeAndAlign(s).Size() == 2) {
break
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.reset(OpPPC64MOVBstore)
v.AuxInt = 1
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
v1.AddArg(dst)
v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
break
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.reset(OpPPC64MOVWstore)
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVWload, config.fe.TypeInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem)
return true
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstore [2] dst (MOVHZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstore)
v.AuxInt = 2
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
v1.AddArg(dst)
v2 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 4
// result: (MOVBstore [3] dst (MOVBZload [3] src mem) (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVBstore [1] dst (MOVBZload [1] src mem) (MOVBstore dst (MOVBZload src mem) mem))))
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 4) {
break
}
v.reset(OpPPC64MOVBstore)
v.AuxInt = 3
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
v4 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
v5.AddArg(dst)
v6 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
v5.AddArg(mem)
v3.AddArg(v5)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVDstore dst (MOVDload src mem) mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVDload, config.fe.TypeInt64())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
// result: (MOVWstore [4] dst (MOVWZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
break
}
v.reset(OpPPC64MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVWZload, config.fe.TypeUInt32())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64MOVWstore, TypeMem)
v1.AddArg(dst)
v2 := b.NewValue0(v.Line, OpPPC64MOVWZload, config.fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstore [6] dst (MOVHZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVHstore [2] dst (MOVHZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))))
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstore)
v.AuxInt = 6
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
v2 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
v3.AuxInt = 2
v3.AddArg(dst)
v4 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v4.AuxInt = 2
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
v5.AddArg(dst)
v6 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
v5.AddArg(mem)
v3.AddArg(v5)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 3
// result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVBstore [1] dst (MOVBZload [1] src mem) (MOVBstore dst (MOVBZload src mem) mem)))
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 3) {
break
}
v.reset(OpPPC64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
v3.AddArg(dst)
v4 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v3.AddArg(mem)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
// match: (Move [s] dst src mem)
// cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
// result: (LoweredMove [SizeAndAlign(s).Align()] dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
break
}
v.reset(OpPPC64LoweredMove)
v.AuxInt = SizeAndAlign(s).Align()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Line, OpPPC64ADDconst, src.Type)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpMul16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mul16 x y)
// cond:
// result: (MULLW x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULLW)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpMul32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mul32 x y)
// cond:
// result: (MULLW x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULLW)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpMul32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mul32F x y)
// cond:
// result: (FMULS x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FMULS)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpMul64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mul64 x y)
// cond:
// result: (MULLD x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULLD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpMul64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mul64F x y)
// cond:
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// result: (FMUL x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FMUL)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpMul8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mul8 x y)
// cond:
// result: (MULLW x y)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULLW)
v.AddArg(x)
v.AddArg(y)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
return true
}
}
func rewriteValuePPC64_OpNeg16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg16 x)
// cond:
// result: (NEG x)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
for {
x := v.Args[0]
v.reset(OpPPC64NEG)
v.AddArg(x)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
return true
}
}
func rewriteValuePPC64_OpNeg32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg32 x)
// cond:
// result: (NEG x)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
for {
x := v.Args[0]
v.reset(OpPPC64NEG)
v.AddArg(x)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
return true
}
}
func rewriteValuePPC64_OpNeg32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg32F x)
// cond:
// result: (FNEG x)
for {
x := v.Args[0]
v.reset(OpPPC64FNEG)
v.AddArg(x)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpNeg64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg64 x)
// cond:
// result: (NEG x)
for {
x := v.Args[0]
v.reset(OpPPC64NEG)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpNeg64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg64F x)
// cond:
// result: (FNEG x)
for {
x := v.Args[0]
v.reset(OpPPC64FNEG)
v.AddArg(x)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpNeg8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg8 x)
// cond:
// result: (NEG x)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
for {
x := v.Args[0]
v.reset(OpPPC64NEG)
v.AddArg(x)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
return true
}
}
func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq16 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
if !(isSigned(x.Type) && isSigned(y.Type)) {
break
}
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
// match: (Neq16 x y)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// cond:
// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq32 x y)
// cond:
// result: (NotEqual (CMPW x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq32F x y)
// cond:
// result: (NotEqual (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpNeq64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq64 x y)
// cond:
// result: (NotEqual (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
// cond:
// result: (NotEqual (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq8 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
if !(isSigned(x.Type) && isSigned(y.Type)) {
break
}
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
// match: (Neq8 x y)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// cond:
// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeqB(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (NeqB x y)
// cond:
// result: (XOR x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64XOR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpNeqPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (NeqPtr x y)
// cond:
// result: (NotEqual (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNilCheck(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
for {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
ptr := v.Args[0]
mem := v.Args[1]
v.reset(OpPPC64LoweredNilCheck)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
}
func rewriteValuePPC64_OpNot(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Not x)
// cond:
// result: (XORconst [1] x)
for {
x := v.Args[0]
v.reset(OpPPC64XORconst)
v.AuxInt = 1
v.AddArg(x)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpOffPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (OffPtr [off] ptr)
// cond:
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// result: (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
for {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
off := v.AuxInt
ptr := v.Args[0]
v.reset(OpPPC64ADD)
v0 := b.NewValue0(v.Line, OpPPC64MOVDconst, config.Frontend().TypeInt64())
v0.AuxInt = off
v.AddArg(v0)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AddArg(ptr)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpOr16(v *Value, config *Config) bool {
b := v.Block
_ = b
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (Or16 x y)
// cond:
// result: (OR x y)
for {
x := v.Args[0]
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
y := v.Args[1]
v.reset(OpPPC64OR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpOr32(v *Value, config *Config) bool {
b := v.Block
_ = b
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (Or32 x y)
// cond:
// result: (OR x y)
for {
x := v.Args[0]
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
y := v.Args[1]
v.reset(OpPPC64OR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpOr64(v *Value, config *Config) bool {
b := v.Block
_ = b
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (Or64 x y)
// cond:
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// result: (OR x y)
for {
x := v.Args[0]
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
y := v.Args[1]
v.reset(OpPPC64OR)
v.AddArg(x)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AddArg(y)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool {
b := v.Block
_ = b
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (Or8 x y)
// cond:
// result: (OR x y)
for {
x := v.Args[0]
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
y := v.Args[1]
v.reset(OpPPC64OR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpOrB(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (OrB x y)
// cond:
// result: (OR x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64OR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
b := v.Block
_ = b
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (ADD (MOVDconst [c]) x)
// cond: int64(int32(c)) == c
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// result: (ADDconst [c] x)
for {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
if !(int64(int32(c)) == c) {
break
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.reset(OpPPC64ADDconst)
v.AuxInt = c
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// match: (ADD x (MOVDconst [c]))
// cond: int64(int32(c)) == c
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// result: (ADDconst [c] x)
for {
x := v.Args[0]
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
if !(int64(int32(c)) == c) {
break
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.reset(OpPPC64ADDconst)
v.AuxInt = c
v.AddArg(x)
return true
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
return false
}
func rewriteValuePPC64_OpPPC64CMPUconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (CMPUconst (MOVDconst [x]) [y])
// cond: int64(x)==int64(y)
// result: (FlagEQ)
for {
y := v.AuxInt
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDconst {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
break
}
x := v_0.AuxInt
if !(int64(x) == int64(y)) {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
break
}
v.reset(OpPPC64FlagEQ)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
return true
}
// match: (CMPUconst (MOVDconst [x]) [y])
// cond: uint64(x)<uint64(y)
// result: (FlagLT)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDconst {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
break
}
x := v_0.AuxInt
if !(uint64(x) < uint64(y)) {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
break
}
v.reset(OpPPC64FlagLT)
return true
}
// match: (CMPUconst (MOVDconst [x]) [y])
// cond: uint64(x)>uint64(y)
// result: (FlagGT)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDconst {
break
}
x := v_0.AuxInt
if !(uint64(x) > uint64(y)) {
break
}
v.reset(OpPPC64FlagGT)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (CMPWUconst (MOVWconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVWconst {
break
}
x := v_0.AuxInt
if !(int32(x) == int32(y)) {
break
}
v.reset(OpPPC64FlagEQ)
return true
}
// match: (CMPWUconst (MOVWconst [x]) [y])
// cond: uint32(x)<uint32(y)
// result: (FlagLT)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVWconst {
break
}
x := v_0.AuxInt
if !(uint32(x) < uint32(y)) {
break
}
v.reset(OpPPC64FlagLT)
return true
}
// match: (CMPWUconst (MOVWconst [x]) [y])
// cond: uint32(x)>uint32(y)
// result: (FlagGT)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVWconst {
break
}
x := v_0.AuxInt
if !(uint32(x) > uint32(y)) {
break
}
v.reset(OpPPC64FlagGT)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64CMPWconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (CMPWconst (MOVWconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVWconst {
break
}
x := v_0.AuxInt
if !(int32(x) == int32(y)) {
break
}
v.reset(OpPPC64FlagEQ)
return true
}
// match: (CMPWconst (MOVWconst [x]) [y])
// cond: int32(x)<int32(y)
// result: (FlagLT)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVWconst {
break
}
x := v_0.AuxInt
if !(int32(x) < int32(y)) {
break
}
v.reset(OpPPC64FlagLT)
return true
}
// match: (CMPWconst (MOVWconst [x]) [y])
// cond: int32(x)>int32(y)
// result: (FlagGT)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVWconst {
break
}
x := v_0.AuxInt
if !(int32(x) > int32(y)) {
break
}
v.reset(OpPPC64FlagGT)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64CMPconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (CMPconst (MOVDconst [x]) [y])
// cond: int64(x)==int64(y)
// result: (FlagEQ)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDconst {
break
}
x := v_0.AuxInt
if !(int64(x) == int64(y)) {
break
}
v.reset(OpPPC64FlagEQ)
return true
}
// match: (CMPconst (MOVDconst [x]) [y])
// cond: int64(x)<int64(y)
// result: (FlagLT)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDconst {
break
}
x := v_0.AuxInt
if !(int64(x) < int64(y)) {
break
}
v.reset(OpPPC64FlagLT)
return true
}
// match: (CMPconst (MOVDconst [x]) [y])
// cond: int64(x)>int64(y)
// result: (FlagGT)
for {
y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDconst {
break
}
x := v_0.AuxInt
if !(int64(x) > int64(y)) {
break
}
v.reset(OpPPC64FlagGT)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64Equal(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Equal (FlagEQ))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (Equal (FlagLT))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (Equal (FlagGT))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (Equal (InvertFlags x))
// cond:
// result: (Equal x)
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64InvertFlags {
break
}
x := v_0.Args[0]
v.reset(OpPPC64Equal)
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64FMOVDload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64FMOVDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is16Bit(off1+off2)
// result: (FMOVDload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64FMOVDload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is16Bit(off1+off2)
// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64FMOVDstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64FMOVDstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64FMOVSload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64FMOVSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is16Bit(off1+off2)
// result: (FMOVSload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64FMOVSload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is16Bit(off1+off2)
// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64FMOVSstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64FMOVSstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (GreaterEqual (FlagEQ))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (GreaterEqual (FlagLT))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (GreaterEqual (FlagGT))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (GreaterEqual (InvertFlags x))
// cond:
// result: (LessEqual x)
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64InvertFlags {
break
}
x := v_0.Args[0]
v.reset(OpPPC64LessEqual)
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64GreaterThan(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (GreaterThan (FlagEQ))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (GreaterThan (FlagLT))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (GreaterThan (FlagGT))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (GreaterThan (InvertFlags x))
// cond:
// result: (LessThan x)
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64InvertFlags {
break
}
x := v_0.Args[0]
v.reset(OpPPC64LessThan)
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64LessEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LessEqual (FlagEQ))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (LessEqual (FlagLT))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (LessEqual (FlagGT))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (LessEqual (InvertFlags x))
// cond:
// result: (GreaterEqual x)
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64InvertFlags {
break
}
x := v_0.Args[0]
v.reset(OpPPC64GreaterEqual)
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64LessThan(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LessThan (FlagEQ))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (LessThan (FlagLT))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (LessThan (FlagGT))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (LessThan (InvertFlags x))
// cond:
// result: (GreaterThan x)
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64InvertFlags {
break
}
x := v_0.Args[0]
v.reset(OpPPC64GreaterThan)
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBZload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVBZload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVBZload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVBZload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBZreg x:(MOVBZload _ _))
// cond:
// result: x
for {
x := v.Args[0]
if x.Op != OpPPC64MOVBZload {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVBload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVBload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVBload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVBload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBreg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBreg x:(MOVBload _ _))
// cond:
// result: x
for {
x := v.Args[0]
if x.Op != OpPPC64MOVBload {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVBstore [off1+off2] {sym} x val mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVBstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVBstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
// cond: c == 0
// result: (MOVBstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVBstorezero [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVDload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVDload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVDload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVDstore [off1+off2] {sym} x val mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVDstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVDstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
// cond: c == 0
// result: (MOVDstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVDstorezero [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHZload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVHZload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVHZload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVHZload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHZreg x:(MOVHZload _ _))
// cond:
// result: x
for {
x := v.Args[0]
if x.Op != OpPPC64MOVHZload {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVHload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVHload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVHload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHreg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHreg x:(MOVHload _ _))
// cond:
// result: x
for {
x := v.Args[0]
if x.Op != OpPPC64MOVHload {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVHstore [off1+off2] {sym} x val mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVHstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVHstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
// cond: c == 0
// result: (MOVHstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// cond: is16Bit(off1+off2)
// result: (MOVHstorezero [off1+off2] {sym} x mem)
for {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVHstorezero)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
return false
}
func rewriteValuePPC64_OpPPC64MOVWZload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVWZload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVWZload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVWZload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVWload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVWload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVWload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVWload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// cond: is16Bit(off1+off2)
// result: (MOVWstore [off1+off2] {sym} x val mem)
for {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVWstore)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDaddr {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpPPC64MOVWstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
// cond: c == 0
// result: (MOVWstorezero [off] {sym} ptr mem)
for {
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVWstorezero)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVWstorezero [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64NotEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (NotEqual (FlagEQ))
// cond:
// result: (MOVWconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 0
return true
}
// match: (NotEqual (FlagLT))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (NotEqual (FlagGT))
// cond:
// result: (MOVWconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVWconst)
v.AuxInt = 1
return true
}
// match: (NotEqual (InvertFlags x))
// cond:
// result: (NotEqual x)
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64InvertFlags {
break
}
x := v_0.Args[0]
v.reset(OpPPC64NotEqual)
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpRsh16Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16Ux16 x y)
// cond:
// result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh16Ux32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16Ux32 x y)
// cond:
// result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh16Ux64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16Ux64 x y)
// cond:
// result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh16Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16Ux8 x y)
// cond:
// result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh16x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16x16 x y)
// cond:
// result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh16x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16x32 x y)
// cond:
// result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh16x64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16x64 x y)
// cond:
// result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh16x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16x8 x y)
// cond:
// result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh32Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32Ux16 x y)
// cond:
// result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh32Ux32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32Ux32 x y)
// cond:
// result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh32Ux64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32Ux64 x y)
// cond:
// result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
return true
}
}
func rewriteValuePPC64_OpRsh32Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32Ux8 x y)
// cond:
// result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh32x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32x16 x y)
// cond:
// result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh32x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32x32 x y)
// cond:
// result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh32x64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32x64 x y)
// cond:
// result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh32x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32x8 x y)
// cond:
// result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh64Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh64Ux16 x y)
// cond:
// result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh64Ux32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh64Ux32 x y)
// cond:
// result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh64Ux64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh64Ux64 x y)
// cond:
// result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh64Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh64Ux8 x y)
// cond:
// result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh64x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh64x16 x y)
// cond:
// result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh64x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh64x32 x y)
// cond:
// result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh64x64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh64x64 x y)
// cond:
// result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh64x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh64x8 x y)
// cond:
// result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
[dev.ssa] cmd/compile: refactor out rulegen value parsing Previously, genMatch0 and genResult0 contained lots of duplication: locating the op, parsing the value, validation, etc. Parsing and validation was mixed in with code gen. Extract a helper, parseValue. It is responsible for parsing the value, locating the op, and doing shared validation. As a bonus (and possibly as my original motivation), make op selection pay attention to the number of args present. This allows arch-specific ops to share a name with generic ops as long as there is no ambiguity. It also detects and reports unresolved ambiguity, unlike before, where it would simply always pick the generic op, with no warning. Also use parseValue when generating the top-level op dispatch, to ensure its opinion about ops matches genMatch0 and genResult0. The order of statements in the generated code used to depend on the exact rule. It is now somewhat independent of the rule. That is the source of some of the generated code changes in this CL. See rewritedec64 and rewritegeneric for examples. It is a one-time change. The op dispatch switch and functions used to be sorted by opname without architecture. The sort now includes the architecture, leading to further generated code changes. See rewriteARM and rewriteAMD64 for examples. Again, it is a one-time change. There are no functional changes. Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c Reviewed-on: https://go-review.googlesource.com/24649 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-07-01 11:05:29 -07:00
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpRsh8Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8Ux16 x y)
// cond:
// result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh8Ux32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8Ux32 x y)
// cond:
// result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh8Ux64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8Ux64 x y)
// cond:
// result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh8Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 x y)
// cond:
// result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh8x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8x16 x y)
// cond:
// result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh8x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8x32 x y)
// cond:
// result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh8x64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8x64 x y)
// cond:
// result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpRsh8x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8x8 x y)
// cond:
// result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
v1.AddArg(y)
v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SignExt16to32 x)
// cond:
// result: (MOVHreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVHreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpSignExt16to64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SignExt16to64 x)
// cond:
// result: (MOVHreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVHreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpSignExt32to64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SignExt32to64 x)
// cond:
// result: (MOVWreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVWreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpSignExt8to16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SignExt8to16 x)
// cond:
// result: (MOVBreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpSignExt8to32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SignExt8to32 x)
// cond:
// result: (MOVBreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpSignExt8to64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SignExt8to64 x)
// cond:
// result: (MOVBreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpSqrt(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Sqrt x)
// cond:
// result: (FSQRT x)
for {
x := v.Args[0]
v.reset(OpPPC64FSQRT)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpStaticCall(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
for {
argwid := v.AuxInt
target := v.Aux
mem := v.Args[0]
v.reset(OpPPC64CALLstatic)
v.AuxInt = argwid
v.Aux = target
v.AddArg(mem)
return true
}
}
func rewriteValuePPC64_OpStore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Store [8] ptr val mem)
// cond: is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
if v.AuxInt != 8 {
break
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is64BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVDstore)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (Store [8] ptr val mem)
// cond: is32BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
if v.AuxInt != 8 {
break
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVDstore)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (Store [4] ptr val mem)
// cond: is32BitFloat(val.Type)
// result: (FMOVSstore ptr val mem)
for {
if v.AuxInt != 4 {
break
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVSstore)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (Store [8] ptr val mem)
// cond: (is64BitInt(val.Type) || isPtr(val.Type))
// result: (MOVDstore ptr val mem)
for {
if v.AuxInt != 8 {
break
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is64BitInt(val.Type) || isPtr(val.Type)) {
break
}
v.reset(OpPPC64MOVDstore)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (Store [4] ptr val mem)
// cond: is32BitInt(val.Type)
// result: (MOVWstore ptr val mem)
for {
if v.AuxInt != 4 {
break
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is32BitInt(val.Type)) {
break
}
v.reset(OpPPC64MOVWstore)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (Store [2] ptr val mem)
// cond:
// result: (MOVHstore ptr val mem)
for {
if v.AuxInt != 2 {
break
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpPPC64MOVHstore)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (Store [1] ptr val mem)
// cond:
// result: (MOVBstore ptr val mem)
for {
if v.AuxInt != 1 {
break
}
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpPPC64MOVBstore)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpSub16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Sub16 x y)
// cond:
// result: (SUB x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpSub32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Sub32 x y)
// cond:
// result: (SUB x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpSub32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Sub32F x y)
// cond:
// result: (FSUBS x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FSUBS)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpSub64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Sub64 x y)
// cond:
// result: (SUB x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpSub64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Sub64F x y)
// cond:
// result: (FSUB x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FSUB)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpSub8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Sub8 x y)
// cond:
// result: (SUB x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpSubPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SubPtr x y)
// cond:
// result: (SUB x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpTrunc16to8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Trunc16to8 x)
// cond:
// result: (MOVBreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpTrunc32to16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Trunc32to16 x)
// cond:
// result: (MOVHreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVHreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpTrunc32to8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Trunc32to8 x)
// cond:
// result: (MOVBreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpTrunc64to16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Trunc64to16 x)
// cond:
// result: (MOVHreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVHreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpTrunc64to32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Trunc64to32 x)
// cond:
// result: (MOVWreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVWreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpTrunc64to8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Trunc64to8 x)
// cond:
// result: (MOVBreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Xor16 x y)
// cond:
// result: (XOR x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64XOR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpXor32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Xor32 x y)
// cond:
// result: (XOR x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64XOR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpXor64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Xor64 x y)
// cond:
// result: (XOR x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64XOR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpXor8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Xor8 x y)
// cond:
// result: (XOR x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64XOR)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Zero [s] _ mem)
// cond: SizeAndAlign(s).Size() == 0
// result: mem
for {
s := v.AuxInt
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 0) {
break
}
v.reset(OpCopy)
v.Type = mem.Type
v.AddArg(mem)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 1
// result: (MOVBstorezero destptr mem)
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 1) {
break
}
v.reset(OpPPC64MOVBstorezero)
v.AddArg(destptr)
v.AddArg(mem)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstorezero destptr mem)
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstorezero)
v.AddArg(destptr)
v.AddArg(mem)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 2
// result: (MOVBstorezero [1] destptr (MOVBstorezero [0] destptr mem))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 2) {
break
}
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = 1
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
// result: (MOVWstorezero destptr mem)
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
break
}
v.reset(OpPPC64MOVWstorezero)
v.AddArg(destptr)
v.AddArg(mem)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstorezero [2] destptr (MOVHstorezero [0] destptr mem))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = 2
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 4
// result: (MOVBstorezero [3] destptr (MOVBstorezero [2] destptr (MOVBstorezero [1] destptr (MOVBstorezero [0] destptr mem))))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 4) {
break
}
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = 3
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
v0.AuxInt = 2
v0.AddArg(destptr)
v1 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
v1.AuxInt = 1
v1.AddArg(destptr)
v2 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
v2.AuxInt = 0
v2.AddArg(destptr)
v2.AddArg(mem)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVDstorezero [0] destptr mem)
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 0
v.AddArg(destptr)
v.AddArg(mem)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
// result: (MOVWstorezero [4] destptr (MOVWstorezero [0] destptr mem))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
break
}
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = 4
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVWstorezero, TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstorezero [6] destptr (MOVHstorezero [4] destptr (MOVHstorezero [2] destptr (MOVHstorezero [0] destptr mem))))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = 6
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
v0.AuxInt = 4
v0.AddArg(destptr)
v1 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
v1.AuxInt = 2
v1.AddArg(destptr)
v2 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
v2.AuxInt = 0
v2.AddArg(destptr)
v2.AddArg(mem)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 3
// result: (MOVBstorezero [2] destptr (MOVBstorezero [1] destptr (MOVBstorezero [0] destptr mem)))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 3) {
break
}
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = 2
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
v0.AuxInt = 1
v0.AddArg(destptr)
v1 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
v1.AuxInt = 0
v1.AddArg(destptr)
v1.AddArg(mem)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 8
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 16
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
v0.AuxInt = 8
v0.AddArg(destptr)
v1 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
v1.AuxInt = 0
v1.AddArg(destptr)
v1.AddArg(mem)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0
// result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))
for {
s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
if !(SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 24
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
v0.AuxInt = 16
v0.AddArg(destptr)
v1 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
v1.AuxInt = 8
v1.AddArg(destptr)
v2 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
v2.AuxInt = 0
v2.AddArg(destptr)
v2.AddArg(mem)
v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Zero [s] ptr mem)
// cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
// result: (LoweredZero [SizeAndAlign(s).Align()] ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
for {
s := v.AuxInt
ptr := v.Args[0]
mem := v.Args[1]
if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
break
}
v.reset(OpPPC64LoweredZero)
v.AuxInt = SizeAndAlign(s).Align()
v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpPPC64ADDconst, ptr.Type)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
v0.AddArg(ptr)
v.AddArg(v0)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpZeroExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVHZreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVHZreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpZeroExt16to64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ZeroExt16to64 x)
// cond:
// result: (MOVHZreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVHZreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpZeroExt32to64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ZeroExt32to64 x)
// cond:
// result: (MOVWZreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVWZreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpZeroExt8to16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBZreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBZreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpZeroExt8to32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBZreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBZreg)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpZeroExt8to64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ZeroExt8to64 x)
// cond:
// result: (MOVBZreg x)
for {
x := v.Args[0]
v.reset(OpPPC64MOVBZreg)
v.AddArg(x)
return true
}
}
func rewriteBlockPPC64(b *Block, config *Config) bool {
switch b.Kind {
case BlockPPC64EQ:
// match: (EQ (FlagEQ) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagEQ {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (EQ (FlagLT) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagLT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (EQ (FlagGT) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagGT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (EQ (InvertFlags cmp) yes no)
// cond:
// result: (EQ cmp yes no)
for {
v := b.Control
if v.Op != OpPPC64InvertFlags {
break
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64EQ
b.SetControl(cmp)
_ = yes
_ = no
return true
}
case BlockPPC64GE:
// match: (GE (FlagEQ) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagEQ {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (GE (FlagLT) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagLT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (GE (FlagGT) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagGT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (GE (InvertFlags cmp) yes no)
// cond:
// result: (LE cmp yes no)
for {
v := b.Control
if v.Op != OpPPC64InvertFlags {
break
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64LE
b.SetControl(cmp)
_ = yes
_ = no
return true
}
case BlockPPC64GT:
// match: (GT (FlagEQ) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagEQ {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (GT (FlagLT) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagLT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (GT (FlagGT) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagGT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (GT (InvertFlags cmp) yes no)
// cond:
// result: (LT cmp yes no)
for {
v := b.Control
if v.Op != OpPPC64InvertFlags {
break
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64LT
b.SetControl(cmp)
_ = yes
_ = no
return true
}
case BlockIf:
// match: (If (Equal cc) yes no)
// cond:
// result: (EQ cc yes no)
for {
v := b.Control
if v.Op != OpPPC64Equal {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64EQ
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (NotEqual cc) yes no)
// cond:
// result: (NE cc yes no)
for {
v := b.Control
if v.Op != OpPPC64NotEqual {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64NE
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (LessThan cc) yes no)
// cond:
// result: (LT cc yes no)
for {
v := b.Control
if v.Op != OpPPC64LessThan {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64LT
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (LessEqual cc) yes no)
// cond:
// result: (LE cc yes no)
for {
v := b.Control
if v.Op != OpPPC64LessEqual {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64LE
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (GreaterThan cc) yes no)
// cond:
// result: (GT cc yes no)
for {
v := b.Control
if v.Op != OpPPC64GreaterThan {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64GT
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (GreaterEqual cc) yes no)
// cond:
// result: (GE cc yes no)
for {
v := b.Control
if v.Op != OpPPC64GreaterEqual {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64GE
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (FLessThan cc) yes no)
// cond:
// result: (FLT cc yes no)
for {
v := b.Control
if v.Op != OpPPC64FLessThan {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64FLT
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (FLessEqual cc) yes no)
// cond:
// result: (FLE cc yes no)
for {
v := b.Control
if v.Op != OpPPC64FLessEqual {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64FLE
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (FGreaterThan cc) yes no)
// cond:
// result: (FGT cc yes no)
for {
v := b.Control
if v.Op != OpPPC64FGreaterThan {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64FGT
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If (FGreaterEqual cc) yes no)
// cond:
// result: (FGE cc yes no)
for {
v := b.Control
if v.Op != OpPPC64FGreaterEqual {
break
}
cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64FGE
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (If cond yes no)
// cond:
// result: (NE (CMPWconst [0] cond) yes no)
for {
v := b.Control
_ = v
cond := b.Control
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64NE
v0 := b.NewValue0(v.Line, OpPPC64CMPWconst, TypeFlags)
v0.AuxInt = 0
v0.AddArg(cond)
b.SetControl(v0)
_ = yes
_ = no
return true
}
case BlockPPC64LE:
// match: (LE (FlagEQ) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagEQ {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (LE (FlagLT) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagLT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (LE (FlagGT) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagGT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (LE (InvertFlags cmp) yes no)
// cond:
// result: (GE cmp yes no)
for {
v := b.Control
if v.Op != OpPPC64InvertFlags {
break
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64GE
b.SetControl(cmp)
_ = yes
_ = no
return true
}
case BlockPPC64LT:
// match: (LT (FlagEQ) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagEQ {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (LT (FlagLT) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagLT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (LT (FlagGT) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagGT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (LT (InvertFlags cmp) yes no)
// cond:
// result: (GT cmp yes no)
for {
v := b.Control
if v.Op != OpPPC64InvertFlags {
break
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64GT
b.SetControl(cmp)
_ = yes
_ = no
return true
}
case BlockPPC64NE:
// match: (NE (CMPWconst [0] (Equal cc)) yes no)
// cond:
// result: (EQ cc yes no)
for {
v := b.Control
if v.Op != OpPPC64CMPWconst {
break
}
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != OpPPC64Equal {
break
}
cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64EQ
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (NE (CMPWconst [0] (NotEqual cc)) yes no)
// cond:
// result: (NE cc yes no)
for {
v := b.Control
if v.Op != OpPPC64CMPWconst {
break
}
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != OpPPC64NotEqual {
break
}
cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64NE
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (NE (CMPWconst [0] (LessThan cc)) yes no)
// cond:
// result: (LT cc yes no)
for {
v := b.Control
if v.Op != OpPPC64CMPWconst {
break
}
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != OpPPC64LessThan {
break
}
cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64LT
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (NE (CMPWconst [0] (LessEqual cc)) yes no)
// cond:
// result: (LE cc yes no)
for {
v := b.Control
if v.Op != OpPPC64CMPWconst {
break
}
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != OpPPC64LessEqual {
break
}
cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64LE
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (NE (CMPWconst [0] (GreaterThan cc)) yes no)
// cond:
// result: (GT cc yes no)
for {
v := b.Control
if v.Op != OpPPC64CMPWconst {
break
}
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != OpPPC64GreaterThan {
break
}
cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64GT
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no)
// cond:
// result: (GE cc yes no)
for {
v := b.Control
if v.Op != OpPPC64CMPWconst {
break
}
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != OpPPC64GreaterEqual {
break
}
cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64GE
b.SetControl(cc)
_ = yes
_ = no
return true
}
// match: (NE (FlagEQ) yes no)
// cond:
// result: (First nil no yes)
for {
v := b.Control
if v.Op != OpPPC64FlagEQ {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
b.swapSuccessors()
_ = no
_ = yes
return true
}
// match: (NE (FlagLT) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagLT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (NE (FlagGT) yes no)
// cond:
// result: (First nil yes no)
for {
v := b.Control
if v.Op != OpPPC64FlagGT {
break
}
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockFirst
b.SetControl(nil)
_ = yes
_ = no
return true
}
// match: (NE (InvertFlags cmp) yes no)
// cond:
// result: (NE cmp yes no)
for {
v := b.Control
if v.Op != OpPPC64InvertFlags {
break
}
cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockPPC64NE
b.SetControl(cmp)
_ = yes
_ = no
return true
}
}
return false
}