go/src/cmd/compile/internal/ssa/gen/genericOps.go

536 lines
28 KiB
Go
Raw Normal View History

// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// Generic opcodes typically specify a width. The inputs and outputs
// of that op are the given number of bits wide. There is no notion of
// "sign", so Add32 can be used both for signed and unsigned 32-bit
// addition.
// Signed/unsigned is explicit with the extension ops
// (SignExt*/ZeroExt*) and implicit as the arg to some opcodes
// (e.g. the second argument to shifts is unsigned). If not mentioned,
// all args take signed inputs, or don't care whether their inputs
// are signed or unsigned.
// Unused portions of AuxInt are filled by sign-extending the used portion.
// Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful.
var genericOps = []opData{
// 2-input arithmetic
// Types must be consistent with Go typing. Add, for example, must take two values
// of the same type and produces that same type.
{name: "Add8", argLength: 2, commutative: true}, // arg0 + arg1
{name: "Add16", argLength: 2, commutative: true},
{name: "Add32", argLength: 2, commutative: true},
{name: "Add64", argLength: 2, commutative: true},
{name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int.
cmd/compile: automatically handle commuting ops in rewrite rules Note that this is a redo of an undo of the original buggy CL 38666. We have lots of rewrite rules that vary only in the fact that we have 2 versions for the 2 different orderings of various commuting ops. For example: (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) It can get unwieldly quickly, especially when there is more than one commuting op in a rule. Our existing "fix" for this problem is to have rules that canonicalize the operations first. For example: (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) Subsequent rules can then assume if there is a constant arg to Eq64, it will be the first one. This fix kinda works, but it is fragile and only works when we remember to include the required extra rules. The fundamental problem is that the rule matcher doesn't know anything about commuting ops. This CL fixes that fact. We already have information about which ops commute. (The register allocator takes advantage of commutivity.) The rule generator now automatically generates multiple rules for a single source rule when there are commutative ops in the rule. We can now drop all of our almost-duplicate source-level rules and the canonicalization rules. I have some CLs in progress that will be a lot less verbose when the rule generator handles commutivity for me. I had to reorganize the load-combining rules a bit. The 8-way OR rules generated 128 different reorderings, which was causing the generator to put too much code in the rewrite*.go files (the big ones were going from 25K lines to 132K lines). Instead I reorganized the rules to combine pairs of loads at a time. The generated rule files are now actually a bit (5%) smaller. Make.bash times are ~unchanged. Compiler benchmarks are not observably different. Probably because we don't spend much compiler time in rule matching anyway. I've also done a pass over all of our ops adding commutative markings for ops which hadn't had them previously. Fixes #18292 Change-Id: Ic1c0e43fbf579539f459971625f69690c9ab8805 Reviewed-on: https://go-review.googlesource.com/38801 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
2017-03-30 03:30:22 +00:00
{name: "Add32F", argLength: 2, commutative: true},
{name: "Add64F", argLength: 2, commutative: true},
{name: "Sub8", argLength: 2}, // arg0 - arg1
{name: "Sub16", argLength: 2},
{name: "Sub32", argLength: 2},
{name: "Sub64", argLength: 2},
{name: "SubPtr", argLength: 2},
{name: "Sub32F", argLength: 2},
{name: "Sub64F", argLength: 2},
{name: "Mul8", argLength: 2, commutative: true}, // arg0 * arg1
{name: "Mul16", argLength: 2, commutative: true},
{name: "Mul32", argLength: 2, commutative: true},
{name: "Mul64", argLength: 2, commutative: true},
cmd/compile: automatically handle commuting ops in rewrite rules Note that this is a redo of an undo of the original buggy CL 38666. We have lots of rewrite rules that vary only in the fact that we have 2 versions for the 2 different orderings of various commuting ops. For example: (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) It can get unwieldly quickly, especially when there is more than one commuting op in a rule. Our existing "fix" for this problem is to have rules that canonicalize the operations first. For example: (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) Subsequent rules can then assume if there is a constant arg to Eq64, it will be the first one. This fix kinda works, but it is fragile and only works when we remember to include the required extra rules. The fundamental problem is that the rule matcher doesn't know anything about commuting ops. This CL fixes that fact. We already have information about which ops commute. (The register allocator takes advantage of commutivity.) The rule generator now automatically generates multiple rules for a single source rule when there are commutative ops in the rule. We can now drop all of our almost-duplicate source-level rules and the canonicalization rules. I have some CLs in progress that will be a lot less verbose when the rule generator handles commutivity for me. I had to reorganize the load-combining rules a bit. The 8-way OR rules generated 128 different reorderings, which was causing the generator to put too much code in the rewrite*.go files (the big ones were going from 25K lines to 132K lines). Instead I reorganized the rules to combine pairs of loads at a time. The generated rule files are now actually a bit (5%) smaller. Make.bash times are ~unchanged. Compiler benchmarks are not observably different. Probably because we don't spend much compiler time in rule matching anyway. I've also done a pass over all of our ops adding commutative markings for ops which hadn't had them previously. Fixes #18292 Change-Id: Ic1c0e43fbf579539f459971625f69690c9ab8805 Reviewed-on: https://go-review.googlesource.com/38801 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
2017-03-30 03:30:22 +00:00
{name: "Mul32F", argLength: 2, commutative: true},
{name: "Mul64F", argLength: 2, commutative: true},
{name: "Div32F", argLength: 2}, // arg0 / arg1
{name: "Div64F", argLength: 2},
cmd/compile: automatically handle commuting ops in rewrite rules Note that this is a redo of an undo of the original buggy CL 38666. We have lots of rewrite rules that vary only in the fact that we have 2 versions for the 2 different orderings of various commuting ops. For example: (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) It can get unwieldly quickly, especially when there is more than one commuting op in a rule. Our existing "fix" for this problem is to have rules that canonicalize the operations first. For example: (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) Subsequent rules can then assume if there is a constant arg to Eq64, it will be the first one. This fix kinda works, but it is fragile and only works when we remember to include the required extra rules. The fundamental problem is that the rule matcher doesn't know anything about commuting ops. This CL fixes that fact. We already have information about which ops commute. (The register allocator takes advantage of commutivity.) The rule generator now automatically generates multiple rules for a single source rule when there are commutative ops in the rule. We can now drop all of our almost-duplicate source-level rules and the canonicalization rules. I have some CLs in progress that will be a lot less verbose when the rule generator handles commutivity for me. I had to reorganize the load-combining rules a bit. The 8-way OR rules generated 128 different reorderings, which was causing the generator to put too much code in the rewrite*.go files (the big ones were going from 25K lines to 132K lines). Instead I reorganized the rules to combine pairs of loads at a time. The generated rule files are now actually a bit (5%) smaller. Make.bash times are ~unchanged. Compiler benchmarks are not observably different. Probably because we don't spend much compiler time in rule matching anyway. I've also done a pass over all of our ops adding commutative markings for ops which hadn't had them previously. Fixes #18292 Change-Id: Ic1c0e43fbf579539f459971625f69690c9ab8805 Reviewed-on: https://go-review.googlesource.com/38801 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
2017-03-30 03:30:22 +00:00
{name: "Hmul32", argLength: 2, commutative: true},
{name: "Hmul32u", argLength: 2, commutative: true},
{name: "Hmul64", argLength: 2, commutative: true},
{name: "Hmul64u", argLength: 2, commutative: true},
cmd/compile: automatically handle commuting ops in rewrite rules Note that this is a redo of an undo of the original buggy CL 38666. We have lots of rewrite rules that vary only in the fact that we have 2 versions for the 2 different orderings of various commuting ops. For example: (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) It can get unwieldly quickly, especially when there is more than one commuting op in a rule. Our existing "fix" for this problem is to have rules that canonicalize the operations first. For example: (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) Subsequent rules can then assume if there is a constant arg to Eq64, it will be the first one. This fix kinda works, but it is fragile and only works when we remember to include the required extra rules. The fundamental problem is that the rule matcher doesn't know anything about commuting ops. This CL fixes that fact. We already have information about which ops commute. (The register allocator takes advantage of commutivity.) The rule generator now automatically generates multiple rules for a single source rule when there are commutative ops in the rule. We can now drop all of our almost-duplicate source-level rules and the canonicalization rules. I have some CLs in progress that will be a lot less verbose when the rule generator handles commutivity for me. I had to reorganize the load-combining rules a bit. The 8-way OR rules generated 128 different reorderings, which was causing the generator to put too much code in the rewrite*.go files (the big ones were going from 25K lines to 132K lines). Instead I reorganized the rules to combine pairs of loads at a time. The generated rule files are now actually a bit (5%) smaller. Make.bash times are ~unchanged. Compiler benchmarks are not observably different. Probably because we don't spend much compiler time in rule matching anyway. I've also done a pass over all of our ops adding commutative markings for ops which hadn't had them previously. Fixes #18292 Change-Id: Ic1c0e43fbf579539f459971625f69690c9ab8805 Reviewed-on: https://go-review.googlesource.com/38801 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
2017-03-30 03:30:22 +00:00
{name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo)
{name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo)
// Weird special instructions for use in the strength reduction of divides.
// These ops compute unsigned (arg0 + arg1) / 2, correct to all
// 32/64 bits, even when the intermediate result of the add has 33/65 bits.
// These ops can assume arg0 >= arg1.
cmd/compile: automatically handle commuting ops in rewrite rules Note that this is a redo of an undo of the original buggy CL 38666. We have lots of rewrite rules that vary only in the fact that we have 2 versions for the 2 different orderings of various commuting ops. For example: (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) It can get unwieldly quickly, especially when there is more than one commuting op in a rule. Our existing "fix" for this problem is to have rules that canonicalize the operations first. For example: (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) Subsequent rules can then assume if there is a constant arg to Eq64, it will be the first one. This fix kinda works, but it is fragile and only works when we remember to include the required extra rules. The fundamental problem is that the rule matcher doesn't know anything about commuting ops. This CL fixes that fact. We already have information about which ops commute. (The register allocator takes advantage of commutivity.) The rule generator now automatically generates multiple rules for a single source rule when there are commutative ops in the rule. We can now drop all of our almost-duplicate source-level rules and the canonicalization rules. I have some CLs in progress that will be a lot less verbose when the rule generator handles commutivity for me. I had to reorganize the load-combining rules a bit. The 8-way OR rules generated 128 different reorderings, which was causing the generator to put too much code in the rewrite*.go files (the big ones were going from 25K lines to 132K lines). Instead I reorganized the rules to combine pairs of loads at a time. The generated rule files are now actually a bit (5%) smaller. Make.bash times are ~unchanged. Compiler benchmarks are not observably different. Probably because we don't spend much compiler time in rule matching anyway. I've also done a pass over all of our ops adding commutative markings for ops which hadn't had them previously. Fixes #18292 Change-Id: Ic1c0e43fbf579539f459971625f69690c9ab8805 Reviewed-on: https://go-review.googlesource.com/38801 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
2017-03-30 03:30:22 +00:00
// Note: these ops aren't commutative!
{name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only
{name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only
{name: "Div8", argLength: 2}, // arg0 / arg1, signed
{name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned
{name: "Div16", argLength: 2},
{name: "Div16u", argLength: 2},
{name: "Div32", argLength: 2},
{name: "Div32u", argLength: 2},
{name: "Div64", argLength: 2},
{name: "Div64u", argLength: 2},
{name: "Div128u", argLength: 3}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
{name: "Mod8", argLength: 2}, // arg0 % arg1, signed
{name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned
{name: "Mod16", argLength: 2},
{name: "Mod16u", argLength: 2},
{name: "Mod32", argLength: 2},
{name: "Mod32u", argLength: 2},
{name: "Mod64", argLength: 2},
{name: "Mod64u", argLength: 2},
{name: "And8", argLength: 2, commutative: true}, // arg0 & arg1
{name: "And16", argLength: 2, commutative: true},
{name: "And32", argLength: 2, commutative: true},
{name: "And64", argLength: 2, commutative: true},
{name: "Or8", argLength: 2, commutative: true}, // arg0 | arg1
{name: "Or16", argLength: 2, commutative: true},
{name: "Or32", argLength: 2, commutative: true},
{name: "Or64", argLength: 2, commutative: true},
{name: "Xor8", argLength: 2, commutative: true}, // arg0 ^ arg1
{name: "Xor16", argLength: 2, commutative: true},
{name: "Xor32", argLength: 2, commutative: true},
{name: "Xor64", argLength: 2, commutative: true},
// For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
// Shift amounts are considered unsigned.
{name: "Lsh8x8", argLength: 2}, // arg0 << arg1
{name: "Lsh8x16", argLength: 2},
{name: "Lsh8x32", argLength: 2},
{name: "Lsh8x64", argLength: 2},
{name: "Lsh16x8", argLength: 2},
{name: "Lsh16x16", argLength: 2},
{name: "Lsh16x32", argLength: 2},
{name: "Lsh16x64", argLength: 2},
{name: "Lsh32x8", argLength: 2},
{name: "Lsh32x16", argLength: 2},
{name: "Lsh32x32", argLength: 2},
{name: "Lsh32x64", argLength: 2},
{name: "Lsh64x8", argLength: 2},
{name: "Lsh64x16", argLength: 2},
{name: "Lsh64x32", argLength: 2},
{name: "Lsh64x64", argLength: 2},
{name: "Rsh8x8", argLength: 2}, // arg0 >> arg1, signed
{name: "Rsh8x16", argLength: 2},
{name: "Rsh8x32", argLength: 2},
{name: "Rsh8x64", argLength: 2},
{name: "Rsh16x8", argLength: 2},
{name: "Rsh16x16", argLength: 2},
{name: "Rsh16x32", argLength: 2},
{name: "Rsh16x64", argLength: 2},
{name: "Rsh32x8", argLength: 2},
{name: "Rsh32x16", argLength: 2},
{name: "Rsh32x32", argLength: 2},
{name: "Rsh32x64", argLength: 2},
{name: "Rsh64x8", argLength: 2},
{name: "Rsh64x16", argLength: 2},
{name: "Rsh64x32", argLength: 2},
{name: "Rsh64x64", argLength: 2},
{name: "Rsh8Ux8", argLength: 2}, // arg0 >> arg1, unsigned
{name: "Rsh8Ux16", argLength: 2},
{name: "Rsh8Ux32", argLength: 2},
{name: "Rsh8Ux64", argLength: 2},
{name: "Rsh16Ux8", argLength: 2},
{name: "Rsh16Ux16", argLength: 2},
{name: "Rsh16Ux32", argLength: 2},
{name: "Rsh16Ux64", argLength: 2},
{name: "Rsh32Ux8", argLength: 2},
{name: "Rsh32Ux16", argLength: 2},
{name: "Rsh32Ux32", argLength: 2},
{name: "Rsh32Ux64", argLength: 2},
{name: "Rsh64Ux8", argLength: 2},
{name: "Rsh64Ux16", argLength: 2},
{name: "Rsh64Ux32", argLength: 2},
{name: "Rsh64Ux64", argLength: 2},
// 2-input comparisons
{name: "Eq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
{name: "Eq16", argLength: 2, commutative: true, typ: "Bool"},
{name: "Eq32", argLength: 2, commutative: true, typ: "Bool"},
{name: "Eq64", argLength: 2, commutative: true, typ: "Bool"},
{name: "EqPtr", argLength: 2, commutative: true, typ: "Bool"},
{name: "EqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
{name: "EqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
cmd/compile: automatically handle commuting ops in rewrite rules Note that this is a redo of an undo of the original buggy CL 38666. We have lots of rewrite rules that vary only in the fact that we have 2 versions for the 2 different orderings of various commuting ops. For example: (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) It can get unwieldly quickly, especially when there is more than one commuting op in a rule. Our existing "fix" for this problem is to have rules that canonicalize the operations first. For example: (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) Subsequent rules can then assume if there is a constant arg to Eq64, it will be the first one. This fix kinda works, but it is fragile and only works when we remember to include the required extra rules. The fundamental problem is that the rule matcher doesn't know anything about commuting ops. This CL fixes that fact. We already have information about which ops commute. (The register allocator takes advantage of commutivity.) The rule generator now automatically generates multiple rules for a single source rule when there are commutative ops in the rule. We can now drop all of our almost-duplicate source-level rules and the canonicalization rules. I have some CLs in progress that will be a lot less verbose when the rule generator handles commutivity for me. I had to reorganize the load-combining rules a bit. The 8-way OR rules generated 128 different reorderings, which was causing the generator to put too much code in the rewrite*.go files (the big ones were going from 25K lines to 132K lines). Instead I reorganized the rules to combine pairs of loads at a time. The generated rule files are now actually a bit (5%) smaller. Make.bash times are ~unchanged. Compiler benchmarks are not observably different. Probably because we don't spend much compiler time in rule matching anyway. I've also done a pass over all of our ops adding commutative markings for ops which hadn't had them previously. Fixes #18292 Change-Id: Ic1c0e43fbf579539f459971625f69690c9ab8805 Reviewed-on: https://go-review.googlesource.com/38801 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
2017-03-30 03:30:22 +00:00
{name: "Eq32F", argLength: 2, commutative: true, typ: "Bool"},
{name: "Eq64F", argLength: 2, commutative: true, typ: "Bool"},
{name: "Neq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
{name: "Neq16", argLength: 2, commutative: true, typ: "Bool"},
{name: "Neq32", argLength: 2, commutative: true, typ: "Bool"},
{name: "Neq64", argLength: 2, commutative: true, typ: "Bool"},
{name: "NeqPtr", argLength: 2, commutative: true, typ: "Bool"},
{name: "NeqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
{name: "NeqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
cmd/compile: automatically handle commuting ops in rewrite rules Note that this is a redo of an undo of the original buggy CL 38666. We have lots of rewrite rules that vary only in the fact that we have 2 versions for the 2 different orderings of various commuting ops. For example: (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) It can get unwieldly quickly, especially when there is more than one commuting op in a rule. Our existing "fix" for this problem is to have rules that canonicalize the operations first. For example: (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) Subsequent rules can then assume if there is a constant arg to Eq64, it will be the first one. This fix kinda works, but it is fragile and only works when we remember to include the required extra rules. The fundamental problem is that the rule matcher doesn't know anything about commuting ops. This CL fixes that fact. We already have information about which ops commute. (The register allocator takes advantage of commutivity.) The rule generator now automatically generates multiple rules for a single source rule when there are commutative ops in the rule. We can now drop all of our almost-duplicate source-level rules and the canonicalization rules. I have some CLs in progress that will be a lot less verbose when the rule generator handles commutivity for me. I had to reorganize the load-combining rules a bit. The 8-way OR rules generated 128 different reorderings, which was causing the generator to put too much code in the rewrite*.go files (the big ones were going from 25K lines to 132K lines). Instead I reorganized the rules to combine pairs of loads at a time. The generated rule files are now actually a bit (5%) smaller. Make.bash times are ~unchanged. Compiler benchmarks are not observably different. Probably because we don't spend much compiler time in rule matching anyway. I've also done a pass over all of our ops adding commutative markings for ops which hadn't had them previously. Fixes #18292 Change-Id: Ic1c0e43fbf579539f459971625f69690c9ab8805 Reviewed-on: https://go-review.googlesource.com/38801 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
2017-03-30 03:30:22 +00:00
{name: "Neq32F", argLength: 2, commutative: true, typ: "Bool"},
{name: "Neq64F", argLength: 2, commutative: true, typ: "Bool"},
{name: "Less8", argLength: 2, typ: "Bool"}, // arg0 < arg1, signed
{name: "Less8U", argLength: 2, typ: "Bool"}, // arg0 < arg1, unsigned
{name: "Less16", argLength: 2, typ: "Bool"},
{name: "Less16U", argLength: 2, typ: "Bool"},
{name: "Less32", argLength: 2, typ: "Bool"},
{name: "Less32U", argLength: 2, typ: "Bool"},
{name: "Less64", argLength: 2, typ: "Bool"},
{name: "Less64U", argLength: 2, typ: "Bool"},
{name: "Less32F", argLength: 2, typ: "Bool"},
{name: "Less64F", argLength: 2, typ: "Bool"},
{name: "Leq8", argLength: 2, typ: "Bool"}, // arg0 <= arg1, signed
{name: "Leq8U", argLength: 2, typ: "Bool"}, // arg0 <= arg1, unsigned
{name: "Leq16", argLength: 2, typ: "Bool"},
{name: "Leq16U", argLength: 2, typ: "Bool"},
{name: "Leq32", argLength: 2, typ: "Bool"},
{name: "Leq32U", argLength: 2, typ: "Bool"},
{name: "Leq64", argLength: 2, typ: "Bool"},
{name: "Leq64U", argLength: 2, typ: "Bool"},
{name: "Leq32F", argLength: 2, typ: "Bool"},
{name: "Leq64F", argLength: 2, typ: "Bool"},
{name: "Greater8", argLength: 2, typ: "Bool"}, // arg0 > arg1, signed
{name: "Greater8U", argLength: 2, typ: "Bool"}, // arg0 > arg1, unsigned
{name: "Greater16", argLength: 2, typ: "Bool"},
{name: "Greater16U", argLength: 2, typ: "Bool"},
{name: "Greater32", argLength: 2, typ: "Bool"},
{name: "Greater32U", argLength: 2, typ: "Bool"},
{name: "Greater64", argLength: 2, typ: "Bool"},
{name: "Greater64U", argLength: 2, typ: "Bool"},
{name: "Greater32F", argLength: 2, typ: "Bool"},
{name: "Greater64F", argLength: 2, typ: "Bool"},
{name: "Geq8", argLength: 2, typ: "Bool"}, // arg0 <= arg1, signed
{name: "Geq8U", argLength: 2, typ: "Bool"}, // arg0 <= arg1, unsigned
{name: "Geq16", argLength: 2, typ: "Bool"},
{name: "Geq16U", argLength: 2, typ: "Bool"},
{name: "Geq32", argLength: 2, typ: "Bool"},
{name: "Geq32U", argLength: 2, typ: "Bool"},
{name: "Geq64", argLength: 2, typ: "Bool"},
{name: "Geq64U", argLength: 2, typ: "Bool"},
{name: "Geq32F", argLength: 2, typ: "Bool"},
{name: "Geq64F", argLength: 2, typ: "Bool"},
// boolean ops
{name: "AndB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 && arg1 (not shortcircuited)
{name: "OrB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 || arg1 (not shortcircuited)
{name: "EqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
{name: "NeqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
{name: "Not", argLength: 1, typ: "Bool"}, // !arg0, boolean
// 1-input ops
{name: "Neg8", argLength: 1}, // -arg0
{name: "Neg16", argLength: 1},
{name: "Neg32", argLength: 1},
{name: "Neg64", argLength: 1},
{name: "Neg32F", argLength: 1},
{name: "Neg64F", argLength: 1},
{name: "Com8", argLength: 1}, // ^arg0
{name: "Com16", argLength: 1},
{name: "Com32", argLength: 1},
{name: "Com64", argLength: 1},
{name: "Ctz32", argLength: 1}, // Count trailing (low order) zeroes (returns 0-32)
{name: "Ctz64", argLength: 1}, // Count trailing zeroes (returns 0-64)
{name: "BitLen32", argLength: 1}, // Number of bits in arg[0] (returns 0-32)
{name: "BitLen64", argLength: 1}, // Number of bits in arg[0] (returns 0-64)
{name: "Bswap32", argLength: 1}, // Swap bytes
{name: "Bswap64", argLength: 1}, // Swap bytes
{name: "BitRev8", argLength: 1}, // Reverse the bits in arg[0]
{name: "BitRev16", argLength: 1}, // Reverse the bits in arg[0]
{name: "BitRev32", argLength: 1}, // Reverse the bits in arg[0]
{name: "BitRev64", argLength: 1}, // Reverse the bits in arg[0]
{name: "PopCount8", argLength: 1}, // Count bits in arg[0]
{name: "PopCount16", argLength: 1}, // Count bits in arg[0]
{name: "PopCount32", argLength: 1}, // Count bits in arg[0]
{name: "PopCount64", argLength: 1}, // Count bits in arg[0]
// Square root, float64 only.
// Special cases:
// +∞ → +∞
// ±0 → ±0 (sign preserved)
// x<0 → NaN
// NaN → NaN
{name: "Sqrt", argLength: 1}, // √arg0
// Round to integer, float64 only.
// Special cases:
// ±∞ → ±∞ (sign preserved)
// ±0 → ±0 (sign preserved)
// NaN → NaN
{name: "Floor", argLength: 1}, // round arg0 toward -∞
{name: "Ceil", argLength: 1}, // round arg0 toward +∞
{name: "Trunc", argLength: 1}, // round arg0 toward 0
{name: "Round", argLength: 1}, // round arg0 to nearest, ties away from 0
{name: "RoundToEven", argLength: 1}, // round arg0 to nearest, ties to even
// Modify the sign bit
{name: "Abs", argLength: 1}, // absolute value arg0
{name: "Copysign", argLength: 2}, // copy sign from arg0 to arg1
// Data movement, max argument length for Phi is indefinite so just pick
// a really large number
{name: "Phi", argLength: -1}, // select an argument based on which predecessor block we came from
{name: "Copy", argLength: 1}, // output = arg0
// Convert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// gets correctly ordered with respect to GC safepoints.
// arg0=ptr/int arg1=mem, output=int/ptr
{name: "Convert", argLength: 2},
// constants. Constant values are stored in the aux or
// auxint fields.
{name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true
{name: "ConstString", aux: "String"}, // value is aux.(string)
{name: "ConstNil", typ: "BytePtr"}, // nil pointer
{name: "Const8", aux: "Int8"}, // auxint is sign-extended 8 bits
{name: "Const16", aux: "Int16"}, // auxint is sign-extended 16 bits
{name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits
// Note: ConstX are sign-extended even when the type of the value is unsigned.
// For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
{name: "Const64", aux: "Int64"}, // value is auxint
{name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly prepresentable as float 32
{name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
{name: "ConstInterface"}, // nil interface
{name: "ConstSlice"}, // nil slice
// Constant-like things
{name: "InitMem"}, // memory input to the function.
{name: "Arg", aux: "SymOff", symEffect: "Read"}, // argument to the function. aux=GCNode of arg, off = offset in that arg.
// The address of a variable. arg0 is the base pointer.
// If the variable is a global, the base pointer will be SB and
// the Aux field will be a *obj.LSym.
// If the variable is a local, the base pointer will be SP and
// the Aux field will be a *gc.Node.
{name: "Addr", argLength: 1, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SP or SB. Aux identifies the variable.
{name: "SP"}, // stack pointer
{name: "SB", typ: "Uintptr"}, // static base pointer (a.k.a. globals pointer)
{name: "Invalid"}, // unused value
// Memory operations
{name: "Load", argLength: 2}, // Load from arg0. arg1=memory
{name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
// The source and destination of Move may overlap in some cases. See e.g.
// memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go)
// returns true, we must do all loads before all stores, when lowering Move.
{name: "Move", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
{name: "Zero", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
// Memory operations with write barriers.
// Expand to runtime calls. Write barrier will be removed if write on stack.
{name: "StoreWB", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
{name: "MoveWB", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
{name: "ZeroWB", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
cmd/compile: compiler support for buffered write barrier This CL implements the compiler support for calling the buffered write barrier added by the previous CL. Since the buffered write barrier is only implemented on amd64 right now, this still supports the old, eager write barrier as well. There's little overhead to supporting both and this way a few tests in test/fixedbugs that expect to have liveness maps at write barrier calls can easily opt-in to the old, eager barrier. This significantly improves the performance of the write barrier: name old time/op new time/op delta WriteBarrier-12 73.5ns ±20% 19.2ns ±27% -73.90% (p=0.000 n=19+18) It also reduces the size of binaries because the write barrier call is more compact: name old object-bytes new object-bytes delta Template 398k ± 0% 393k ± 0% -1.14% (p=0.008 n=5+5) Unicode 208k ± 0% 206k ± 0% -1.00% (p=0.008 n=5+5) GoTypes 1.18M ± 0% 1.15M ± 0% -2.00% (p=0.008 n=5+5) Compiler 4.05M ± 0% 3.88M ± 0% -4.26% (p=0.008 n=5+5) SSA 8.25M ± 0% 8.11M ± 0% -1.59% (p=0.008 n=5+5) Flate 228k ± 0% 224k ± 0% -1.83% (p=0.008 n=5+5) GoParser 295k ± 0% 284k ± 0% -3.62% (p=0.008 n=5+5) Reflect 1.00M ± 0% 0.99M ± 0% -0.70% (p=0.008 n=5+5) Tar 339k ± 0% 333k ± 0% -1.67% (p=0.008 n=5+5) XML 404k ± 0% 395k ± 0% -2.10% (p=0.008 n=5+5) [Geo mean] 704k 690k -2.00% name old exe-bytes new exe-bytes delta HelloSize 1.05M ± 0% 1.04M ± 0% -1.55% (p=0.008 n=5+5) https://perf.golang.org/search?q=upload:20171027.1 (Amusingly, this also reduces compiler allocations by 0.75%, which, combined with the better write barrier, speeds up the compiler overall by 2.10%. See the perf link.) It slightly improves the performance of most of the go1 benchmarks and improves the performance of the x/benchmarks: name old time/op new time/op delta BinaryTree17-12 2.40s ± 1% 2.47s ± 1% +2.69% (p=0.000 n=19+19) Fannkuch11-12 2.95s ± 0% 2.95s ± 0% +0.21% (p=0.000 n=20+19) FmtFprintfEmpty-12 41.8ns ± 4% 41.4ns ± 2% -1.03% (p=0.014 n=20+20) FmtFprintfString-12 68.7ns ± 2% 67.5ns ± 1% -1.75% (p=0.000 n=20+17) FmtFprintfInt-12 79.0ns ± 3% 77.1ns ± 1% -2.40% (p=0.000 n=19+17) FmtFprintfIntInt-12 127ns ± 1% 123ns ± 3% -3.42% (p=0.000 n=20+20) FmtFprintfPrefixedInt-12 152ns ± 1% 150ns ± 1% -1.02% (p=0.000 n=18+17) FmtFprintfFloat-12 211ns ± 1% 209ns ± 0% -0.99% (p=0.000 n=20+16) FmtManyArgs-12 500ns ± 0% 496ns ± 0% -0.73% (p=0.000 n=17+20) GobDecode-12 6.44ms ± 1% 6.53ms ± 0% +1.28% (p=0.000 n=20+19) GobEncode-12 5.46ms ± 0% 5.46ms ± 1% ~ (p=0.550 n=19+20) Gzip-12 220ms ± 1% 216ms ± 0% -1.75% (p=0.000 n=19+19) Gunzip-12 38.8ms ± 0% 38.6ms ± 0% -0.30% (p=0.000 n=18+19) HTTPClientServer-12 79.0µs ± 1% 78.2µs ± 1% -1.01% (p=0.000 n=20+20) JSONEncode-12 11.9ms ± 0% 11.9ms ± 0% -0.29% (p=0.000 n=20+19) JSONDecode-12 52.6ms ± 0% 52.2ms ± 0% -0.68% (p=0.000 n=19+20) Mandelbrot200-12 3.69ms ± 0% 3.68ms ± 0% -0.36% (p=0.000 n=20+20) GoParse-12 3.13ms ± 1% 3.18ms ± 1% +1.67% (p=0.000 n=19+20) RegexpMatchEasy0_32-12 73.2ns ± 1% 72.3ns ± 1% -1.19% (p=0.000 n=19+18) RegexpMatchEasy0_1K-12 241ns ± 0% 239ns ± 0% -0.83% (p=0.000 n=17+16) RegexpMatchEasy1_32-12 68.6ns ± 1% 69.0ns ± 1% +0.47% (p=0.015 n=18+16) RegexpMatchEasy1_1K-12 364ns ± 0% 361ns ± 0% -0.67% (p=0.000 n=16+17) RegexpMatchMedium_32-12 104ns ± 1% 103ns ± 1% -0.79% (p=0.001 n=20+15) RegexpMatchMedium_1K-12 33.8µs ± 3% 34.0µs ± 2% ~ (p=0.267 n=20+19) RegexpMatchHard_32-12 1.64µs ± 1% 1.62µs ± 2% -1.25% (p=0.000 n=19+18) RegexpMatchHard_1K-12 49.2µs ± 0% 48.7µs ± 1% -0.93% (p=0.000 n=19+18) Revcomp-12 391ms ± 5% 396ms ± 7% ~ (p=0.154 n=19+19) Template-12 63.1ms ± 0% 59.5ms ± 0% -5.76% (p=0.000 n=18+19) TimeParse-12 307ns ± 0% 306ns ± 0% -0.39% (p=0.000 n=19+17) TimeFormat-12 325ns ± 0% 323ns ± 0% -0.50% (p=0.000 n=19+19) [Geo mean] 47.3µs 46.9µs -0.67% https://perf.golang.org/search?q=upload:20171026.1 name old time/op new time/op delta Garbage/benchmem-MB=64-12 2.25ms ± 1% 2.20ms ± 1% -2.31% (p=0.000 n=18+18) HTTP-12 12.6µs ± 0% 12.6µs ± 0% -0.72% (p=0.000 n=18+17) JSON-12 11.0ms ± 0% 11.0ms ± 1% -0.68% (p=0.000 n=17+19) https://perf.golang.org/search?q=upload:20171026.2 Updates #14951. Updates #22460. Change-Id: Id4c0932890a1d41020071bec73b8522b1367d3e7 Reviewed-on: https://go-review.googlesource.com/73712 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2017-10-26 12:33:04 -04:00
// WB invokes runtime.gcWriteBarrier. This is not a normal
// call: it takes arguments in registers, doesn't clobber
// general-purpose registers (the exact clobber set is
// arch-dependent), and is not a safe-point.
{name: "WB", argLength: 3, typ: "Mem", aux: "Sym", symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
// Function calls. Arguments to the call have already been written to the stack.
// Return values appear on the stack. The method receiver, if any, is treated
// as a phantom first argument.
{name: "ClosureCall", argLength: 3, aux: "Int64", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
{name: "StaticCall", argLength: 1, aux: "SymOff", call: true, symEffect: "None"}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory.
{name: "InterCall", argLength: 2, aux: "Int64", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory.
// Conversions: signed extensions, zero (unsigned) extensions, truncations
{name: "SignExt8to16", argLength: 1, typ: "Int16"},
{name: "SignExt8to32", argLength: 1, typ: "Int32"},
{name: "SignExt8to64", argLength: 1, typ: "Int64"},
{name: "SignExt16to32", argLength: 1, typ: "Int32"},
{name: "SignExt16to64", argLength: 1, typ: "Int64"},
{name: "SignExt32to64", argLength: 1, typ: "Int64"},
{name: "ZeroExt8to16", argLength: 1, typ: "UInt16"},
{name: "ZeroExt8to32", argLength: 1, typ: "UInt32"},
{name: "ZeroExt8to64", argLength: 1, typ: "UInt64"},
{name: "ZeroExt16to32", argLength: 1, typ: "UInt32"},
{name: "ZeroExt16to64", argLength: 1, typ: "UInt64"},
{name: "ZeroExt32to64", argLength: 1, typ: "UInt64"},
{name: "Trunc16to8", argLength: 1},
{name: "Trunc32to8", argLength: 1},
{name: "Trunc32to16", argLength: 1},
{name: "Trunc64to8", argLength: 1},
{name: "Trunc64to16", argLength: 1},
{name: "Trunc64to32", argLength: 1},
{name: "Cvt32to32F", argLength: 1},
{name: "Cvt32to64F", argLength: 1},
{name: "Cvt64to32F", argLength: 1},
{name: "Cvt64to64F", argLength: 1},
{name: "Cvt32Fto32", argLength: 1},
{name: "Cvt32Fto64", argLength: 1},
{name: "Cvt64Fto32", argLength: 1},
{name: "Cvt64Fto64", argLength: 1},
{name: "Cvt32Fto64F", argLength: 1},
{name: "Cvt64Fto32F", argLength: 1},
// Force rounding to precision of type.
{name: "Round32F", argLength: 1},
{name: "Round64F", argLength: 1},
// Automatically inserted safety checks
{name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
{name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
{name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
{name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
// Pseudo-ops
{name: "GetG", argLength: 1}, // runtime.getg() (read g pointer). arg0=mem
{name: "GetClosurePtr"}, // get closure pointer from dedicated register
{name: "GetCallerPC"}, // for getcallerpc intrinsic
{name: "GetCallerSP"}, // for getcallersp intrinsic
// Indexing operations
{name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
{name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers)
// Slices
{name: "SliceMake", argLength: 3}, // arg0=ptr, arg1=len, arg2=cap
{name: "SlicePtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0)
{name: "SliceLen", argLength: 1}, // len(arg0)
{name: "SliceCap", argLength: 1}, // cap(arg0)
// Complex (part/whole)
{name: "ComplexMake", argLength: 2}, // arg0=real, arg1=imag
{name: "ComplexReal", argLength: 1}, // real(arg0)
{name: "ComplexImag", argLength: 1}, // imag(arg0)
// Strings
{name: "StringMake", argLength: 2}, // arg0=ptr, arg1=len
{name: "StringPtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0)
{name: "StringLen", argLength: 1, typ: "Int"}, // len(arg0)
// Interfaces
{name: "IMake", argLength: 2}, // arg0=itab, arg1=data
{name: "ITab", argLength: 1, typ: "BytePtr"}, // arg0=interface, returns itable field
{name: "IData", argLength: 1}, // arg0=interface, returns data field
// Structs
{name: "StructMake0"}, // Returns struct with 0 fields.
{name: "StructMake1", argLength: 1}, // arg0=field0. Returns struct.
{name: "StructMake2", argLength: 2}, // arg0,arg1=field0,field1. Returns struct.
{name: "StructMake3", argLength: 3}, // arg0..2=field0..2. Returns struct.
{name: "StructMake4", argLength: 4}, // arg0..3=field0..3. Returns struct.
{name: "StructSelect", argLength: 1, aux: "Int64"}, // arg0=struct, auxint=field index. Returns the auxint'th field.
// Arrays
{name: "ArrayMake0"}, // Returns array with 0 elements
{name: "ArrayMake1", argLength: 1}, // Returns array with 1 element
{name: "ArraySelect", argLength: 1, aux: "Int64"}, // arg0=array, auxint=index. Returns a[i].
// Spill&restore ops for the register allocator. These are
// semantically identical to OpCopy; they do not take/return
// stores like regular memory ops do. We can get away without memory
// args because we know there is no aliasing of spill slots on the stack.
{name: "StoreReg", argLength: 1},
{name: "LoadReg", argLength: 1},
// Used during ssa construction. Like Copy, but the arg has not been specified yet.
{name: "FwdRef", aux: "Sym", symEffect: "None"},
// Unknown value. Used for Values whose values don't matter because they are dead code.
{name: "Unknown"},
{name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem", symEffect: "None"}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
{name: "VarKill", argLength: 1, aux: "Sym", symEffect: "None"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
{name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read"}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
{name: "KeepAlive", argLength: 2, typ: "Mem"}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
// Ops for breaking 64-bit operations on 32-bit architectures
{name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo
{name: "Int64Hi", argLength: 1, typ: "UInt32"}, // high 32-bit of arg0
{name: "Int64Lo", argLength: 1, typ: "UInt32"}, // low 32-bit of arg0
{name: "Add32carry", argLength: 2, commutative: true, typ: "(UInt32,Flags)"}, // arg0 + arg1, returns (value, carry)
{name: "Add32withcarry", argLength: 3, commutative: true}, // arg0 + arg1 + arg2, arg2=carry (0 or 1)
{name: "Sub32carry", argLength: 2, typ: "(UInt32,Flags)"}, // arg0 - arg1, returns (value, carry)
{name: "Sub32withcarry", argLength: 3}, // arg0 - arg1 - arg2, arg2=carry (0 or 1)
{name: "Signmask", argLength: 1, typ: "Int32"}, // 0 if arg0 >= 0, -1 if arg0 < 0
{name: "Zeromask", argLength: 1, typ: "UInt32"}, // 0 if arg0 == 0, 0xffffffff if arg0 != 0
{name: "Slicemask", argLength: 1}, // 0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0. Type is native int size.
{name: "Cvt32Uto32F", argLength: 1}, // uint32 -> float32, only used on 32-bit arch
{name: "Cvt32Uto64F", argLength: 1}, // uint32 -> float64, only used on 32-bit arch
{name: "Cvt32Fto32U", argLength: 1}, // float32 -> uint32, only used on 32-bit arch
{name: "Cvt64Fto32U", argLength: 1}, // float64 -> uint32, only used on 32-bit arch
{name: "Cvt64Uto32F", argLength: 1}, // uint64 -> float32, only used on archs that has the instruction
{name: "Cvt64Uto64F", argLength: 1}, // uint64 -> float64, only used on archs that has the instruction
{name: "Cvt32Fto64U", argLength: 1}, // float32 -> uint64, only used on archs that has the instruction
{name: "Cvt64Fto64U", argLength: 1}, // float64 -> uint64, only used on archs that has the instruction
// pseudo-ops for breaking Tuple
{name: "Select0", argLength: 1}, // the first component of a tuple
{name: "Select1", argLength: 1}, // the second component of a tuple
// Atomic operations used for semantically inlining runtime/internal/atomic.
// Atomic loads return a new memory so that the loads are properly ordered
// with respect to other loads and stores.
// TODO: use for sync/atomic at some point.
{name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true iff store happens and new memory.
{name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true iff store happens and new memory.
{name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
{name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
// Clobber experiment op
{name: "Clobber", argLength: 0, typ: "Void", aux: "SymOff", symEffect: "None"}, // write an invalid pointer value to the given pointer slot of a stack variable
}
// kind control successors implicit exit
// ----------------------------------------------------------
// Exit return mem [] yes
// Ret return mem [] yes
// RetJmp return mem [] yes
// Plain nil [next]
// If a boolean Value [then, else]
// Call mem [next] yes (control opcode should be OpCall or OpStaticCall)
// Check void [next] yes (control opcode should be Op{Lowered}NilCheck)
// First nil [always,never]
var genericBlocks = []blockData{
{name: "Plain"}, // a single successor
{name: "If"}, // 2 successors, if control goto Succs[0] else goto Succs[1]
{name: "Defer"}, // 2 successors, Succs[0]=defer queued, Succs[1]=defer recovered. control is call op (of memory type)
{name: "Ret"}, // no successors, control value is memory result
{name: "RetJmp"}, // no successors, jumps to b.Aux.(*gc.Sym)
{name: "Exit"}, // no successors, control value generates a panic
// transient block state used for dead code removal
{name: "First"}, // 2 successors, always takes the first one (second is dead)
}
func init() {
archs = append(archs, arch{
name: "generic",
ops: genericOps,
blocks: genericBlocks,
generic: true,
})
}