2015-06-06 16:03:33 -07:00
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
2016-04-05 15:11:08 +10:00
// +build ignore
2015-06-06 16:03:33 -07:00
package main
2016-03-29 16:39:53 -07:00
// Generic opcodes typically specify a width. The inputs and outputs
// of that op are the given number of bits wide. There is no notion of
// "sign", so Add32 can be used both for signed and unsigned 32-bit
// addition.
// Signed/unsigned is explicit with the extension ops
// (SignExt*/ZeroExt*) and implicit as the arg to some opcodes
// (e.g. the second argument to shifts is unsigned). If not mentioned,
// all args take signed inputs, or don't care whether their inputs
// are signed or unsigned.
2015-06-06 16:03:33 -07:00
var genericOps = [ ] opData {
// 2-input arithmetic
2016-03-01 23:21:55 +00:00
// Types must be consistent with Go typing. Add, for example, must take two values
2015-06-06 16:03:33 -07:00
// of the same type and produces that same type.
2016-02-27 08:04:48 -06:00
{ name : "Add8" , argLength : 2 , commutative : true } , // arg0 + arg1
{ name : "Add16" , argLength : 2 , commutative : true } ,
{ name : "Add32" , argLength : 2 , commutative : true } ,
{ name : "Add64" , argLength : 2 , commutative : true } ,
{ name : "AddPtr" , argLength : 2 } , // For address calculations. arg0 is a pointer and arg1 is an int.
2017-03-30 03:30:22 +00:00
{ name : "Add32F" , argLength : 2 , commutative : true } ,
{ name : "Add64F" , argLength : 2 , commutative : true } ,
2015-07-19 15:48:20 -07:00
2016-02-27 08:04:48 -06:00
{ name : "Sub8" , argLength : 2 } , // arg0 - arg1
{ name : "Sub16" , argLength : 2 } ,
{ name : "Sub32" , argLength : 2 } ,
{ name : "Sub64" , argLength : 2 } ,
{ name : "SubPtr" , argLength : 2 } ,
{ name : "Sub32F" , argLength : 2 } ,
{ name : "Sub64F" , argLength : 2 } ,
{ name : "Mul8" , argLength : 2 , commutative : true } , // arg0 * arg1
{ name : "Mul16" , argLength : 2 , commutative : true } ,
{ name : "Mul32" , argLength : 2 , commutative : true } ,
{ name : "Mul64" , argLength : 2 , commutative : true } ,
2017-03-30 03:30:22 +00:00
{ name : "Mul32F" , argLength : 2 , commutative : true } ,
{ name : "Mul64F" , argLength : 2 , commutative : true } ,
2016-02-27 08:04:48 -06:00
{ name : "Div32F" , argLength : 2 } , // arg0 / arg1
{ name : "Div64F" , argLength : 2 } ,
2017-03-30 03:30:22 +00:00
{ name : "Hmul32" , argLength : 2 , commutative : true } ,
{ name : "Hmul32u" , argLength : 2 , commutative : true } ,
{ name : "Hmul64" , argLength : 2 , commutative : true } ,
{ name : "Hmul64u" , argLength : 2 , commutative : true } ,
2016-02-05 20:26:18 -08:00
2017-03-30 03:30:22 +00:00
{ name : "Mul32uhilo" , argLength : 2 , typ : "(UInt32,UInt32)" , commutative : true } , // arg0 * arg1, returns (hi, lo)
{ name : "Mul64uhilo" , argLength : 2 , typ : "(UInt64,UInt64)" , commutative : true } , // arg0 * arg1, returns (hi, lo)
2016-10-06 15:43:47 -04:00
2018-01-27 11:55:34 +01:00
{ name : "Mul32uover" , argLength : 2 , typ : "(UInt32,Bool)" , commutative : true } , // Let x = arg0*arg1 (full 32x32-> 64 unsigned multiply), returns (uint32(x), (uint32(x) != x))
{ name : "Mul64uover" , argLength : 2 , typ : "(UInt64,Bool)" , commutative : true } , // Let x = arg0*arg1 (full 64x64->128 unsigned multiply), returns (uint64(x), (uint64(x) != x))
2017-02-13 16:00:09 -08:00
// Weird special instructions for use in the strength reduction of divides.
// These ops compute unsigned (arg0 + arg1) / 2, correct to all
// 32/64 bits, even when the intermediate result of the add has 33/65 bits.
// These ops can assume arg0 >= arg1.
2017-03-30 03:30:22 +00:00
// Note: these ops aren't commutative!
2017-02-13 16:00:09 -08:00
{ name : "Avg32u" , argLength : 2 , typ : "UInt32" } , // 32-bit platforms only
{ name : "Avg64u" , argLength : 2 , typ : "UInt64" } , // 64-bit platforms only
2016-02-27 08:04:48 -06:00
2018-08-06 19:50:38 +10:00
// For Div16, Div32 and Div64, AuxInt non-zero means that the divisor has been proved to be not -1
// or that the dividend is not the most negative value.
2016-03-29 16:39:53 -07:00
{ name : "Div8" , argLength : 2 } , // arg0 / arg1, signed
{ name : "Div8u" , argLength : 2 } , // arg0 / arg1, unsigned
2018-08-06 19:50:38 +10:00
{ name : "Div16" , argLength : 2 , aux : "Bool" } ,
2016-02-27 08:04:48 -06:00
{ name : "Div16u" , argLength : 2 } ,
2018-08-06 19:50:38 +10:00
{ name : "Div32" , argLength : 2 , aux : "Bool" } ,
2016-02-27 08:04:48 -06:00
{ name : "Div32u" , argLength : 2 } ,
2018-08-06 19:50:38 +10:00
{ name : "Div64" , argLength : 2 , aux : "Bool" } ,
2016-02-27 08:04:48 -06:00
{ name : "Div64u" , argLength : 2 } ,
2016-10-06 15:43:47 -04:00
{ name : "Div128u" , argLength : 3 } , // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
2016-02-27 08:04:48 -06:00
2018-08-06 19:50:38 +10:00
// For Mod16, Mod32 and Mod64, AuxInt non-zero means that the divisor has been proved to be not -1.
2016-03-29 16:39:53 -07:00
{ name : "Mod8" , argLength : 2 } , // arg0 % arg1, signed
{ name : "Mod8u" , argLength : 2 } , // arg0 % arg1, unsigned
2018-08-06 19:50:38 +10:00
{ name : "Mod16" , argLength : 2 , aux : "Bool" } ,
2016-02-27 08:04:48 -06:00
{ name : "Mod16u" , argLength : 2 } ,
2018-08-06 19:50:38 +10:00
{ name : "Mod32" , argLength : 2 , aux : "Bool" } ,
2016-02-27 08:04:48 -06:00
{ name : "Mod32u" , argLength : 2 } ,
2018-08-06 19:50:38 +10:00
{ name : "Mod64" , argLength : 2 , aux : "Bool" } ,
2016-02-27 08:04:48 -06:00
{ name : "Mod64u" , argLength : 2 } ,
{ name : "And8" , argLength : 2 , commutative : true } , // arg0 & arg1
{ name : "And16" , argLength : 2 , commutative : true } ,
{ name : "And32" , argLength : 2 , commutative : true } ,
{ name : "And64" , argLength : 2 , commutative : true } ,
{ name : "Or8" , argLength : 2 , commutative : true } , // arg0 | arg1
{ name : "Or16" , argLength : 2 , commutative : true } ,
{ name : "Or32" , argLength : 2 , commutative : true } ,
{ name : "Or64" , argLength : 2 , commutative : true } ,
{ name : "Xor8" , argLength : 2 , commutative : true } , // arg0 ^ arg1
{ name : "Xor16" , argLength : 2 , commutative : true } ,
{ name : "Xor32" , argLength : 2 , commutative : true } ,
{ name : "Xor64" , argLength : 2 , commutative : true } ,
2015-07-28 16:04:50 -07:00
2015-07-29 17:07:09 -07:00
// For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
2016-03-29 16:39:53 -07:00
// Shift amounts are considered unsigned.
2018-04-26 20:56:03 -07:00
// If arg1 is known to be less than the number of bits in arg0,
2018-05-07 13:42:28 -07:00
// then auxInt may be set to 1.
2018-04-26 20:56:03 -07:00
// This enables better code generation on some platforms.
{ name : "Lsh8x8" , argLength : 2 , aux : "Bool" } , // arg0 << arg1
{ name : "Lsh8x16" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh8x32" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh8x64" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh16x8" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh16x16" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh16x32" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh16x64" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh32x8" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh32x16" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh32x32" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh32x64" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh64x8" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh64x16" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh64x32" , argLength : 2 , aux : "Bool" } ,
{ name : "Lsh64x64" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh8x8" , argLength : 2 , aux : "Bool" } , // arg0 >> arg1, signed
{ name : "Rsh8x16" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh8x32" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh8x64" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh16x8" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh16x16" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh16x32" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh16x64" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh32x8" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh32x16" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh32x32" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh32x64" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh64x8" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh64x16" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh64x32" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh64x64" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh8Ux8" , argLength : 2 , aux : "Bool" } , // arg0 >> arg1, unsigned
{ name : "Rsh8Ux16" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh8Ux32" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh8Ux64" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh16Ux8" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh16Ux16" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh16Ux32" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh16Ux64" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh32Ux8" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh32Ux16" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh32Ux32" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh32Ux64" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh64Ux8" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh64Ux16" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh64Ux32" , argLength : 2 , aux : "Bool" } ,
{ name : "Rsh64Ux64" , argLength : 2 , aux : "Bool" } ,
2015-06-06 16:03:33 -07:00
// 2-input comparisons
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
{ name : "Eq8" , argLength : 2 , commutative : true , typ : "Bool" } , // arg0 == arg1
{ name : "Eq16" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "Eq32" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "Eq64" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "EqPtr" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "EqInter" , argLength : 2 , typ : "Bool" } , // arg0 or arg1 is nil; other cases handled by frontend
{ name : "EqSlice" , argLength : 2 , typ : "Bool" } , // arg0 or arg1 is nil; other cases handled by frontend
2017-03-30 03:30:22 +00:00
{ name : "Eq32F" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "Eq64F" , argLength : 2 , commutative : true , typ : "Bool" } ,
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
{ name : "Neq8" , argLength : 2 , commutative : true , typ : "Bool" } , // arg0 != arg1
{ name : "Neq16" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "Neq32" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "Neq64" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "NeqPtr" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "NeqInter" , argLength : 2 , typ : "Bool" } , // arg0 or arg1 is nil; other cases handled by frontend
{ name : "NeqSlice" , argLength : 2 , typ : "Bool" } , // arg0 or arg1 is nil; other cases handled by frontend
2017-03-30 03:30:22 +00:00
{ name : "Neq32F" , argLength : 2 , commutative : true , typ : "Bool" } ,
{ name : "Neq64F" , argLength : 2 , commutative : true , typ : "Bool" } ,
2016-02-27 08:04:48 -06:00
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
{ name : "Less8" , argLength : 2 , typ : "Bool" } , // arg0 < arg1, signed
{ name : "Less8U" , argLength : 2 , typ : "Bool" } , // arg0 < arg1, unsigned
{ name : "Less16" , argLength : 2 , typ : "Bool" } ,
{ name : "Less16U" , argLength : 2 , typ : "Bool" } ,
{ name : "Less32" , argLength : 2 , typ : "Bool" } ,
{ name : "Less32U" , argLength : 2 , typ : "Bool" } ,
{ name : "Less64" , argLength : 2 , typ : "Bool" } ,
{ name : "Less64U" , argLength : 2 , typ : "Bool" } ,
{ name : "Less32F" , argLength : 2 , typ : "Bool" } ,
{ name : "Less64F" , argLength : 2 , typ : "Bool" } ,
{ name : "Leq8" , argLength : 2 , typ : "Bool" } , // arg0 <= arg1, signed
{ name : "Leq8U" , argLength : 2 , typ : "Bool" } , // arg0 <= arg1, unsigned
{ name : "Leq16" , argLength : 2 , typ : "Bool" } ,
{ name : "Leq16U" , argLength : 2 , typ : "Bool" } ,
{ name : "Leq32" , argLength : 2 , typ : "Bool" } ,
{ name : "Leq32U" , argLength : 2 , typ : "Bool" } ,
{ name : "Leq64" , argLength : 2 , typ : "Bool" } ,
{ name : "Leq64U" , argLength : 2 , typ : "Bool" } ,
{ name : "Leq32F" , argLength : 2 , typ : "Bool" } ,
{ name : "Leq64F" , argLength : 2 , typ : "Bool" } ,
{ name : "Greater8" , argLength : 2 , typ : "Bool" } , // arg0 > arg1, signed
{ name : "Greater8U" , argLength : 2 , typ : "Bool" } , // arg0 > arg1, unsigned
{ name : "Greater16" , argLength : 2 , typ : "Bool" } ,
{ name : "Greater16U" , argLength : 2 , typ : "Bool" } ,
{ name : "Greater32" , argLength : 2 , typ : "Bool" } ,
{ name : "Greater32U" , argLength : 2 , typ : "Bool" } ,
{ name : "Greater64" , argLength : 2 , typ : "Bool" } ,
{ name : "Greater64U" , argLength : 2 , typ : "Bool" } ,
{ name : "Greater32F" , argLength : 2 , typ : "Bool" } ,
{ name : "Greater64F" , argLength : 2 , typ : "Bool" } ,
{ name : "Geq8" , argLength : 2 , typ : "Bool" } , // arg0 <= arg1, signed
{ name : "Geq8U" , argLength : 2 , typ : "Bool" } , // arg0 <= arg1, unsigned
{ name : "Geq16" , argLength : 2 , typ : "Bool" } ,
{ name : "Geq16U" , argLength : 2 , typ : "Bool" } ,
{ name : "Geq32" , argLength : 2 , typ : "Bool" } ,
{ name : "Geq32U" , argLength : 2 , typ : "Bool" } ,
{ name : "Geq64" , argLength : 2 , typ : "Bool" } ,
{ name : "Geq64U" , argLength : 2 , typ : "Bool" } ,
{ name : "Geq32F" , argLength : 2 , typ : "Bool" } ,
{ name : "Geq64F" , argLength : 2 , typ : "Bool" } ,
2015-06-06 16:03:33 -07:00
2017-08-13 22:36:47 +00:00
// the type of a CondSelect is the same as the type of its first
// two arguments, which should be register-width scalars; the third
// argument should be a boolean
{ name : "CondSelect" , argLength : 3 } , // arg2 ? arg0 : arg1
2016-04-24 21:21:07 +02:00
// boolean ops
2017-05-03 13:33:14 +02:00
{ name : "AndB" , argLength : 2 , commutative : true , typ : "Bool" } , // arg0 && arg1 (not shortcircuited)
{ name : "OrB" , argLength : 2 , commutative : true , typ : "Bool" } , // arg0 || arg1 (not shortcircuited)
{ name : "EqB" , argLength : 2 , commutative : true , typ : "Bool" } , // arg0 == arg1
{ name : "NeqB" , argLength : 2 , commutative : true , typ : "Bool" } , // arg0 != arg1
{ name : "Not" , argLength : 1 , typ : "Bool" } , // !arg0, boolean
2016-02-27 08:04:48 -06:00
2016-04-24 21:21:07 +02:00
// 1-input ops
2016-02-27 08:04:48 -06:00
{ name : "Neg8" , argLength : 1 } , // -arg0
{ name : "Neg16" , argLength : 1 } ,
{ name : "Neg32" , argLength : 1 } ,
{ name : "Neg64" , argLength : 1 } ,
{ name : "Neg32F" , argLength : 1 } ,
{ name : "Neg64F" , argLength : 1 } ,
{ name : "Com8" , argLength : 1 } , // ^arg0
{ name : "Com16" , argLength : 1 } ,
{ name : "Com32" , argLength : 1 } ,
{ name : "Com64" , argLength : 1 } ,
2018-04-25 11:52:06 -07:00
{ name : "Ctz8" , argLength : 1 } , // Count trailing (low order) zeroes (returns 0-8)
{ name : "Ctz16" , argLength : 1 } , // Count trailing (low order) zeroes (returns 0-16)
{ name : "Ctz32" , argLength : 1 } , // Count trailing (low order) zeroes (returns 0-32)
{ name : "Ctz64" , argLength : 1 } , // Count trailing (low order) zeroes (returns 0-64)
{ name : "Ctz8NonZero" , argLength : 1 } , // same as above, but arg[0] known to be non-zero, returns 0-7
{ name : "Ctz16NonZero" , argLength : 1 } , // same as above, but arg[0] known to be non-zero, returns 0-15
{ name : "Ctz32NonZero" , argLength : 1 } , // same as above, but arg[0] known to be non-zero, returns 0-31
{ name : "Ctz64NonZero" , argLength : 1 } , // same as above, but arg[0] known to be non-zero, returns 0-63
{ name : "BitLen8" , argLength : 1 } , // Number of bits in arg[0] (returns 0-8)
{ name : "BitLen16" , argLength : 1 } , // Number of bits in arg[0] (returns 0-16)
{ name : "BitLen32" , argLength : 1 } , // Number of bits in arg[0] (returns 0-32)
{ name : "BitLen64" , argLength : 1 } , // Number of bits in arg[0] (returns 0-64)
2016-03-11 00:10:52 -05:00
{ name : "Bswap32" , argLength : 1 } , // Swap bytes
{ name : "Bswap64" , argLength : 1 } , // Swap bytes
2017-03-16 22:34:38 -07:00
{ name : "BitRev8" , argLength : 1 } , // Reverse the bits in arg[0]
{ name : "BitRev16" , argLength : 1 } , // Reverse the bits in arg[0]
{ name : "BitRev32" , argLength : 1 } , // Reverse the bits in arg[0]
{ name : "BitRev64" , argLength : 1 } , // Reverse the bits in arg[0]
2018-08-30 15:47:04 -06:00
{ name : "PopCount8" , argLength : 1 } , // Count bits in arg[0]
{ name : "PopCount16" , argLength : 1 } , // Count bits in arg[0]
{ name : "PopCount32" , argLength : 1 } , // Count bits in arg[0]
{ name : "PopCount64" , argLength : 1 } , // Count bits in arg[0]
{ name : "RotateLeft8" , argLength : 2 } , // Rotate bits in arg[0] left by arg[1]
{ name : "RotateLeft16" , argLength : 2 } , // Rotate bits in arg[0] left by arg[1]
{ name : "RotateLeft32" , argLength : 2 } , // Rotate bits in arg[0] left by arg[1]
{ name : "RotateLeft64" , argLength : 2 } , // Rotate bits in arg[0] left by arg[1]
2017-03-16 21:33:03 -07:00
2017-09-14 20:00:02 +01:00
// Square root, float64 only.
// Special cases:
// +∞ → +∞
// ±0 → ±0 (sign preserved)
// x<0 → NaN
// NaN → NaN
{ name : "Sqrt" , argLength : 1 } , // √arg0
// Round to integer, float64 only.
// Special cases:
// ±∞ → ±∞ (sign preserved)
// ±0 → ±0 (sign preserved)
// NaN → NaN
2017-10-30 09:02:44 -04:00
{ name : "Floor" , argLength : 1 } , // round arg0 toward -∞
{ name : "Ceil" , argLength : 1 } , // round arg0 toward +∞
{ name : "Trunc" , argLength : 1 } , // round arg0 toward 0
{ name : "Round" , argLength : 1 } , // round arg0 to nearest, ties away from 0
{ name : "RoundToEven" , argLength : 1 } , // round arg0 to nearest, ties to even
2016-02-27 08:04:48 -06:00
2017-09-28 17:11:31 -04:00
// Modify the sign bit
{ name : "Abs" , argLength : 1 } , // absolute value arg0
{ name : "Copysign" , argLength : 2 } , // copy sign from arg0 to arg1
2016-02-27 08:04:48 -06:00
// Data movement, max argument length for Phi is indefinite so just pick
// a really large number
2018-02-28 16:30:07 -05:00
{ name : "Phi" , argLength : - 1 , zeroWidth : true } , // select an argument based on which predecessor block we came from
{ name : "Copy" , argLength : 1 } , // output = arg0
2015-11-10 15:35:36 -08:00
// Convert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// gets correctly ordered with respect to GC safepoints.
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
// It gets compiled to nothing, so its result must in the same
// register as its argument. regalloc knows it can use any
// allocatable integer register for OpConvert.
2015-11-10 15:35:36 -08:00
// arg0=ptr/int arg1=mem, output=int/ptr
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
{ name : "Convert" , argLength : 2 , zeroWidth : true , resultInArg0 : true } ,
2015-06-06 16:03:33 -07:00
2016-03-01 23:21:55 +00:00
// constants. Constant values are stored in the aux or
2016-01-25 09:21:17 -08:00
// auxint fields.
2016-01-31 11:39:39 -08:00
{ name : "ConstBool" , aux : "Bool" } , // auxint is 0 for false and 1 for true
{ name : "ConstString" , aux : "String" } , // value is aux.(string)
{ name : "ConstNil" , typ : "BytePtr" } , // nil pointer
2016-03-29 16:39:53 -07:00
{ name : "Const8" , aux : "Int8" } , // auxint is sign-extended 8 bits
{ name : "Const16" , aux : "Int16" } , // auxint is sign-extended 16 bits
{ name : "Const32" , aux : "Int32" } , // auxint is sign-extended 32 bits
2017-02-13 16:00:09 -08:00
// Note: ConstX are sign-extended even when the type of the value is unsigned.
// For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
{ name : "Const64" , aux : "Int64" } , // value is auxint
{ name : "Const32F" , aux : "Float32" } , // value is math.Float64frombits(uint64(auxint)) and is exactly prepresentable as float 32
{ name : "Const64F" , aux : "Float64" } , // value is math.Float64frombits(uint64(auxint))
{ name : "ConstInterface" } , // nil interface
{ name : "ConstSlice" } , // nil slice
2015-06-06 16:03:33 -07:00
// Constant-like things
2018-02-28 16:30:07 -05:00
{ name : "InitMem" , zeroWidth : true } , // memory input to the function.
{ name : "Arg" , aux : "SymOff" , symEffect : "Read" , zeroWidth : true } , // argument to the function. aux=GCNode of arg, off = offset in that arg.
2015-06-19 21:02:28 -07:00
2017-09-18 14:53:56 -07:00
// The address of a variable. arg0 is the base pointer.
// If the variable is a global, the base pointer will be SB and
// the Aux field will be a *obj.LSym.
// If the variable is a local, the base pointer will be SP and
// the Aux field will be a *gc.Node.
2018-07-03 11:34:38 -04:00
{ name : "Addr" , argLength : 1 , aux : "Sym" , symEffect : "Addr" } , // Address of a variable. Arg0=SB. Aux identifies the variable.
{ name : "LocalAddr" , argLength : 2 , aux : "Sym" , symEffect : "Addr" } , // Address of a variable. Arg0=SP. Arg1=mem. Aux identifies the variable.
2015-06-19 21:02:28 -07:00
2018-02-28 16:30:07 -05:00
{ name : "SP" , zeroWidth : true } , // stack pointer
{ name : "SB" , typ : "Uintptr" , zeroWidth : true } , // static base pointer (a.k.a. globals pointer)
{ name : "Invalid" } , // unused value
2015-06-06 16:03:33 -07:00
// Memory operations
2017-08-09 14:00:38 -05:00
{ name : "Load" , argLength : 2 } , // Load from arg0. arg1=memory
{ name : "Store" , argLength : 3 , typ : "Mem" , aux : "Typ" } , // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
// The source and destination of Move may overlap in some cases. See e.g.
// memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go)
// returns true, we must do all loads before all stores, when lowering Move.
2017-03-13 21:51:08 -04:00
{ name : "Move" , argLength : 3 , typ : "Mem" , aux : "TypSize" } , // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
{ name : "Zero" , argLength : 2 , typ : "Mem" , aux : "TypSize" } , // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
2015-06-06 16:03:33 -07:00
2016-10-13 06:57:00 -04:00
// Memory operations with write barriers.
// Expand to runtime calls. Write barrier will be removed if write on stack.
2017-03-13 21:51:08 -04:00
{ name : "StoreWB" , argLength : 3 , typ : "Mem" , aux : "Typ" } , // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
{ name : "MoveWB" , argLength : 3 , typ : "Mem" , aux : "TypSize" } , // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
{ name : "ZeroWB" , argLength : 2 , typ : "Mem" , aux : "TypSize" } , // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
2016-10-13 06:57:00 -04:00
2017-10-26 12:33:04 -04:00
// WB invokes runtime.gcWriteBarrier. This is not a normal
// call: it takes arguments in registers, doesn't clobber
// general-purpose registers (the exact clobber set is
// arch-dependent), and is not a safe-point.
{ name : "WB" , argLength : 3 , typ : "Mem" , aux : "Sym" , symEffect : "None" } , // arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
2019-02-06 14:12:36 -08:00
// PanicBounds and PanicExtend generate a runtime panic.
// Their arguments provide index values to use in panic messages.
// Both PanicBounds and PanicExtend have an AuxInt value from the BoundsKind type (in ../op.go).
// PanicBounds' index is int sized.
// PanicExtend's index is int64 sized. (PanicExtend is only used on 32-bit archs.)
{ name : "PanicBounds" , argLength : 3 , aux : "Int64" , typ : "Mem" } , // arg0=idx, arg1=len, arg2=mem, returns memory.
{ name : "PanicExtend" , argLength : 4 , aux : "Int64" , typ : "Mem" } , // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory.
2016-03-01 23:21:55 +00:00
// Function calls. Arguments to the call have already been written to the stack.
// Return values appear on the stack. The method receiver, if any, is treated
2015-06-06 16:03:33 -07:00
// as a phantom first argument.
2017-03-09 14:46:43 -08:00
{ name : "ClosureCall" , argLength : 3 , aux : "Int64" , call : true } , // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
2016-06-08 22:02:08 -07:00
{ name : "StaticCall" , argLength : 1 , aux : "SymOff" , call : true , symEffect : "None" } , // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory.
2017-03-09 14:46:43 -08:00
{ name : "InterCall" , argLength : 2 , aux : "Int64" , call : true } , // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory.
2015-06-06 16:03:33 -07:00
2015-07-28 14:31:25 -07:00
// Conversions: signed extensions, zero (unsigned) extensions, truncations
2016-02-27 08:04:48 -06:00
{ name : "SignExt8to16" , argLength : 1 , typ : "Int16" } ,
2016-05-06 10:13:31 -07:00
{ name : "SignExt8to32" , argLength : 1 , typ : "Int32" } ,
2016-06-25 16:07:56 -07:00
{ name : "SignExt8to64" , argLength : 1 , typ : "Int64" } ,
2016-05-06 10:13:31 -07:00
{ name : "SignExt16to32" , argLength : 1 , typ : "Int32" } ,
2016-06-25 16:07:56 -07:00
{ name : "SignExt16to64" , argLength : 1 , typ : "Int64" } ,
{ name : "SignExt32to64" , argLength : 1 , typ : "Int64" } ,
2016-02-27 08:04:48 -06:00
{ name : "ZeroExt8to16" , argLength : 1 , typ : "UInt16" } ,
2016-05-06 10:13:31 -07:00
{ name : "ZeroExt8to32" , argLength : 1 , typ : "UInt32" } ,
2016-06-25 16:07:56 -07:00
{ name : "ZeroExt8to64" , argLength : 1 , typ : "UInt64" } ,
2016-05-06 10:13:31 -07:00
{ name : "ZeroExt16to32" , argLength : 1 , typ : "UInt32" } ,
2016-06-25 16:07:56 -07:00
{ name : "ZeroExt16to64" , argLength : 1 , typ : "UInt64" } ,
{ name : "ZeroExt32to64" , argLength : 1 , typ : "UInt64" } ,
2016-02-27 08:04:48 -06:00
{ name : "Trunc16to8" , argLength : 1 } ,
{ name : "Trunc32to8" , argLength : 1 } ,
{ name : "Trunc32to16" , argLength : 1 } ,
{ name : "Trunc64to8" , argLength : 1 } ,
{ name : "Trunc64to16" , argLength : 1 } ,
{ name : "Trunc64to32" , argLength : 1 } ,
{ name : "Cvt32to32F" , argLength : 1 } ,
{ name : "Cvt32to64F" , argLength : 1 } ,
{ name : "Cvt64to32F" , argLength : 1 } ,
{ name : "Cvt64to64F" , argLength : 1 } ,
{ name : "Cvt32Fto32" , argLength : 1 } ,
{ name : "Cvt32Fto64" , argLength : 1 } ,
{ name : "Cvt64Fto32" , argLength : 1 } ,
{ name : "Cvt64Fto64" , argLength : 1 } ,
{ name : "Cvt32Fto64F" , argLength : 1 } ,
{ name : "Cvt64Fto32F" , argLength : 1 } ,
2015-08-20 15:14:20 -04:00
2017-02-12 22:12:12 -05:00
// Force rounding to precision of type.
{ name : "Round32F" , argLength : 1 } ,
{ name : "Round64F" , argLength : 1 } ,
2015-07-24 11:55:52 -07:00
// Automatically inserted safety checks
2016-02-27 08:04:48 -06:00
{ name : "IsNonNil" , argLength : 1 , typ : "Bool" } , // arg0 != nil
2016-03-29 16:39:53 -07:00
{ name : "IsInBounds" , argLength : 2 , typ : "Bool" } , // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
{ name : "IsSliceInBounds" , argLength : 2 , typ : "Bool" } , // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
2016-09-13 17:01:01 -07:00
{ name : "NilCheck" , argLength : 2 , typ : "Void" } , // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
2015-06-06 16:03:33 -07:00
2015-08-12 11:22:16 -07:00
// Pseudo-ops
2018-02-28 16:30:07 -05:00
{ name : "GetG" , argLength : 1 , zeroWidth : true } , // runtime.getg() (read g pointer). arg0=mem
{ name : "GetClosurePtr" } , // get closure pointer from dedicated register
{ name : "GetCallerPC" } , // for getcallerpc intrinsic
{ name : "GetCallerSP" } , // for getcallersp intrinsic
2015-08-11 09:47:45 -07:00
2015-06-06 16:03:33 -07:00
// Indexing operations
2016-10-30 21:10:03 -07:00
{ name : "PtrIndex" , argLength : 2 } , // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
{ name : "OffPtr" , argLength : 1 , aux : "Int64" } , // arg0 + auxint (arg0 and result are pointers)
2015-06-06 16:03:33 -07:00
// Slices
2016-02-27 08:04:48 -06:00
{ name : "SliceMake" , argLength : 3 } , // arg0=ptr, arg1=len, arg2=cap
{ name : "SlicePtr" , argLength : 1 , typ : "BytePtr" } , // ptr(arg0)
{ name : "SliceLen" , argLength : 1 } , // len(arg0)
{ name : "SliceCap" , argLength : 1 } , // cap(arg0)
2015-06-06 16:03:33 -07:00
2015-08-28 14:24:10 -04:00
// Complex (part/whole)
2016-02-27 08:04:48 -06:00
{ name : "ComplexMake" , argLength : 2 } , // arg0=real, arg1=imag
{ name : "ComplexReal" , argLength : 1 } , // real(arg0)
{ name : "ComplexImag" , argLength : 1 } , // imag(arg0)
2015-08-28 14:24:10 -04:00
2015-06-06 16:03:33 -07:00
// Strings
2016-04-21 19:28:28 -07:00
{ name : "StringMake" , argLength : 2 } , // arg0=ptr, arg1=len
{ name : "StringPtr" , argLength : 1 , typ : "BytePtr" } , // ptr(arg0)
{ name : "StringLen" , argLength : 1 , typ : "Int" } , // len(arg0)
2015-06-06 16:03:33 -07:00
2015-08-04 15:47:22 -07:00
// Interfaces
2016-02-27 08:04:48 -06:00
{ name : "IMake" , argLength : 2 } , // arg0=itab, arg1=data
2018-02-27 13:46:03 -08:00
{ name : "ITab" , argLength : 1 , typ : "Uintptr" } , // arg0=interface, returns itable field
2016-02-27 08:04:48 -06:00
{ name : "IData" , argLength : 1 } , // arg0=interface, returns data field
2015-08-04 15:47:22 -07:00
2016-01-11 21:05:33 -08:00
// Structs
2016-02-27 08:04:48 -06:00
{ name : "StructMake0" } , // Returns struct with 0 fields.
{ name : "StructMake1" , argLength : 1 } , // arg0=field0. Returns struct.
{ name : "StructMake2" , argLength : 2 } , // arg0,arg1=field0,field1. Returns struct.
{ name : "StructMake3" , argLength : 3 } , // arg0..2=field0..2. Returns struct.
{ name : "StructMake4" , argLength : 4 } , // arg0..3=field0..3. Returns struct.
{ name : "StructSelect" , argLength : 1 , aux : "Int64" } , // arg0=struct, auxint=field index. Returns the auxint'th field.
2016-01-11 21:05:33 -08:00
2016-10-30 21:10:03 -07:00
// Arrays
{ name : "ArrayMake0" } , // Returns array with 0 elements
{ name : "ArrayMake1" , argLength : 1 } , // Returns array with 1 element
{ name : "ArraySelect" , argLength : 1 , aux : "Int64" } , // arg0=array, auxint=index. Returns a[i].
2016-03-01 23:21:55 +00:00
// Spill&restore ops for the register allocator. These are
2015-06-06 16:03:33 -07:00
// semantically identical to OpCopy; they do not take/return
2016-03-01 23:21:55 +00:00
// stores like regular memory ops do. We can get away without memory
2015-06-06 16:03:33 -07:00
// args because we know there is no aliasing of spill slots on the stack.
2016-02-27 08:04:48 -06:00
{ name : "StoreReg" , argLength : 1 } ,
{ name : "LoadReg" , argLength : 1 } ,
2015-06-06 16:03:33 -07:00
2016-03-01 23:21:55 +00:00
// Used during ssa construction. Like Copy, but the arg has not been specified yet.
2017-03-09 14:46:43 -08:00
{ name : "FwdRef" , aux : "Sym" , symEffect : "None" } ,
2015-08-24 02:16:19 -07:00
2016-03-01 23:21:55 +00:00
// Unknown value. Used for Values whose values don't matter because they are dead code.
2016-01-14 16:02:23 -08:00
{ name : "Unknown" } ,
2018-02-28 16:30:07 -05:00
{ name : "VarDef" , argLength : 1 , aux : "Sym" , typ : "Mem" , symEffect : "None" , zeroWidth : true } , // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
{ name : "VarKill" , argLength : 1 , aux : "Sym" , symEffect : "None" } , // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
2018-09-07 14:55:09 -07:00
// TODO: what's the difference betweeen VarLive and KeepAlive?
{ name : "VarLive" , argLength : 1 , aux : "Sym" , symEffect : "Read" , zeroWidth : true } , // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
{ name : "KeepAlive" , argLength : 2 , typ : "Mem" , zeroWidth : true } , // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
2018-12-04 07:58:18 -08:00
// InlMark marks the start of an inlined function body. Its AuxInt field
// distinguishes which entry in the local inline tree it is marking.
{ name : "InlMark" , argLength : 1 , aux : "Int32" , typ : "Void" } , // arg[0]=mem, returns void.
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
// Ops for breaking 64-bit operations on 32-bit architectures
{ name : "Int64Make" , argLength : 2 , typ : "UInt64" } , // arg0=hi, arg1=lo
{ name : "Int64Hi" , argLength : 1 , typ : "UInt32" } , // high 32-bit of arg0
{ name : "Int64Lo" , argLength : 1 , typ : "UInt32" } , // low 32-bit of arg0
2016-08-23 16:49:28 -07:00
{ name : "Add32carry" , argLength : 2 , commutative : true , typ : "(UInt32,Flags)" } , // arg0 + arg1, returns (value, carry)
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
{ name : "Add32withcarry" , argLength : 3 , commutative : true } , // arg0 + arg1 + arg2, arg2=carry (0 or 1)
2016-08-23 16:49:28 -07:00
{ name : "Sub32carry" , argLength : 2 , typ : "(UInt32,Flags)" } , // arg0 - arg1, returns (value, carry)
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
{ name : "Sub32withcarry" , argLength : 3 } , // arg0 - arg1 - arg2, arg2=carry (0 or 1)
2018-10-23 14:05:38 -07:00
{ name : "Add64carry" , argLength : 3 , commutative : true , typ : "(UInt64,UInt64)" } , // arg0 + arg1 + arg2, arg2 must be 0 or 1. returns (value, value>>64)
2018-10-23 14:38:22 -07:00
{ name : "Sub64borrow" , argLength : 3 , typ : "(UInt64,UInt64)" } , // arg0 - (arg1 + arg2), arg2 must be 0 or 1. returns (value, value>>64&1)
2018-10-23 14:05:38 -07:00
2016-05-25 23:17:42 -04:00
{ name : "Signmask" , argLength : 1 , typ : "Int32" } , // 0 if arg0 >= 0, -1 if arg0 < 0
{ name : "Zeromask" , argLength : 1 , typ : "UInt32" } , // 0 if arg0 == 0, 0xffffffff if arg0 != 0
2016-10-25 15:49:52 -07:00
{ name : "Slicemask" , argLength : 1 } , // 0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0. Type is native int size.
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
2016-05-31 11:27:16 -04:00
{ name : "Cvt32Uto32F" , argLength : 1 } , // uint32 -> float32, only used on 32-bit arch
{ name : "Cvt32Uto64F" , argLength : 1 } , // uint32 -> float64, only used on 32-bit arch
{ name : "Cvt32Fto32U" , argLength : 1 } , // float32 -> uint32, only used on 32-bit arch
{ name : "Cvt64Fto32U" , argLength : 1 } , // float64 -> uint32, only used on 32-bit arch
2016-08-16 14:17:33 -04:00
{ name : "Cvt64Uto32F" , argLength : 1 } , // uint64 -> float32, only used on archs that has the instruction
{ name : "Cvt64Uto64F" , argLength : 1 } , // uint64 -> float64, only used on archs that has the instruction
{ name : "Cvt32Fto64U" , argLength : 1 } , // float32 -> uint64, only used on archs that has the instruction
{ name : "Cvt64Fto64U" , argLength : 1 } , // float64 -> uint64, only used on archs that has the instruction
2016-05-31 11:27:16 -04:00
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
// pseudo-ops for breaking Tuple
2018-02-28 16:30:07 -05:00
{ name : "Select0" , argLength : 1 , zeroWidth : true } , // the first component of a tuple
{ name : "Select1" , argLength : 1 , zeroWidth : true } , // the second component of a tuple
2016-08-23 16:49:28 -07:00
// Atomic operations used for semantically inlining runtime/internal/atomic.
// Atomic loads return a new memory so that the loads are properly ordered
// with respect to other loads and stores.
// TODO: use for sync/atomic at some point.
2019-03-28 14:58:06 -04:00
{ name : "AtomicLoad8" , argLength : 2 , typ : "(UInt8,Mem)" } , // Load from arg0. arg1=memory. Returns loaded value and new memory.
2018-08-06 15:36:16 -05:00
{ name : "AtomicLoad32" , argLength : 2 , typ : "(UInt32,Mem)" } , // Load from arg0. arg1=memory. Returns loaded value and new memory.
{ name : "AtomicLoad64" , argLength : 2 , typ : "(UInt64,Mem)" } , // Load from arg0. arg1=memory. Returns loaded value and new memory.
{ name : "AtomicLoadPtr" , argLength : 2 , typ : "(BytePtr,Mem)" } , // Load from arg0. arg1=memory. Returns loaded value and new memory.
{ name : "AtomicLoadAcq32" , argLength : 2 , typ : "(UInt32,Mem)" } , // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
{ name : "AtomicStore32" , argLength : 3 , typ : "Mem" , hasSideEffects : true } , // Store arg1 to *arg0. arg2=memory. Returns memory.
{ name : "AtomicStore64" , argLength : 3 , typ : "Mem" , hasSideEffects : true } , // Store arg1 to *arg0. arg2=memory. Returns memory.
{ name : "AtomicStorePtrNoWB" , argLength : 3 , typ : "Mem" , hasSideEffects : true } , // Store arg1 to *arg0. arg2=memory. Returns memory.
{ name : "AtomicStoreRel32" , argLength : 3 , typ : "Mem" , hasSideEffects : true } , // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
{ name : "AtomicExchange32" , argLength : 3 , typ : "(UInt32,Mem)" , hasSideEffects : true } , // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{ name : "AtomicExchange64" , argLength : 3 , typ : "(UInt64,Mem)" , hasSideEffects : true } , // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{ name : "AtomicAdd32" , argLength : 3 , typ : "(UInt32,Mem)" , hasSideEffects : true } , // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{ name : "AtomicAdd64" , argLength : 3 , typ : "(UInt64,Mem)" , hasSideEffects : true } , // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{ name : "AtomicCompareAndSwap32" , argLength : 4 , typ : "(Bool,Mem)" , hasSideEffects : true } , // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
{ name : "AtomicCompareAndSwap64" , argLength : 4 , typ : "(Bool,Mem)" , hasSideEffects : true } , // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
2018-11-02 15:18:43 +00:00
{ name : "AtomicCompareAndSwapRel32" , argLength : 4 , typ : "(Bool,Mem)" , hasSideEffects : true } , // if *arg0==arg1, then set *arg0=arg2. Lock release, reports whether store happens and new memory.
2018-08-06 15:36:16 -05:00
{ name : "AtomicAnd8" , argLength : 3 , typ : "Mem" , hasSideEffects : true } , // *arg0 &= arg1. arg2=memory. Returns memory.
{ name : "AtomicOr8" , argLength : 3 , typ : "Mem" , hasSideEffects : true } , // *arg0 |= arg1. arg2=memory. Returns memory.
2016-06-08 22:02:08 -07:00
2017-11-03 02:05:28 +00:00
// Atomic operation variants
// These variants have the same semantics as above atomic operations.
// But they are used for generating more efficient code on certain modern machines, with run-time CPU feature detection.
// Currently, they are used on ARM64 only.
{ name : "AtomicAdd32Variant" , argLength : 3 , typ : "(UInt32,Mem)" , hasSideEffects : true } , // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{ name : "AtomicAdd64Variant" , argLength : 3 , typ : "(UInt64,Mem)" , hasSideEffects : true } , // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
2016-06-08 22:02:08 -07:00
// Clobber experiment op
{ name : "Clobber" , argLength : 0 , typ : "Void" , aux : "SymOff" , symEffect : "None" } , // write an invalid pointer value to the given pointer slot of a stack variable
2015-06-06 16:03:33 -07:00
}
2015-09-09 18:03:41 -07:00
// kind control successors implicit exit
// ----------------------------------------------------------
// Exit return mem [] yes
// Ret return mem [] yes
// RetJmp return mem [] yes
2015-06-06 16:03:33 -07:00
// Plain nil [next]
// If a boolean Value [then, else]
2015-09-09 18:03:41 -07:00
// Call mem [next] yes (control opcode should be OpCall or OpStaticCall)
2015-10-23 19:12:49 -07:00
// Check void [next] yes (control opcode should be Op{Lowered}NilCheck)
2015-09-08 16:04:37 -07:00
// First nil [always,never]
2015-06-06 16:03:33 -07:00
var genericBlocks = [ ] blockData {
2015-09-08 21:28:44 -07:00
{ name : "Plain" } , // a single successor
{ name : "If" } , // 2 successors, if control goto Succs[0] else goto Succs[1]
2016-03-09 19:27:57 -08:00
{ name : "Defer" } , // 2 successors, Succs[0]=defer queued, Succs[1]=defer recovered. control is call op (of memory type)
2015-09-09 18:03:41 -07:00
{ name : "Ret" } , // no successors, control value is memory result
{ name : "RetJmp" } , // no successors, jumps to b.Aux.(*gc.Sym)
{ name : "Exit" } , // no successors, control value generates a panic
2016-04-28 15:04:10 -07:00
// transient block state used for dead code removal
2015-09-09 18:03:41 -07:00
{ name : "First" } , // 2 successors, always takes the first one (second is dead)
2015-06-06 16:03:33 -07:00
}
func init ( ) {
2016-03-12 14:07:40 -08:00
archs = append ( archs , arch {
2016-03-21 22:57:26 -07:00
name : "generic" ,
ops : genericOps ,
blocks : genericBlocks ,
generic : true ,
2016-03-12 14:07:40 -08:00
} )
2015-06-06 16:03:33 -07:00
}