2016-02-26 14:56:31 -08:00
|
|
|
// Copyright 2016 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
2020-12-23 00:55:38 -05:00
|
|
|
package reflectdata
|
2016-02-26 14:56:31 -08:00
|
|
|
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
import (
|
2020-12-23 00:55:38 -05:00
|
|
|
"fmt"
|
|
|
|
|
|
2020-11-19 20:49:23 -05:00
|
|
|
"cmd/compile/internal/base"
|
2022-04-21 23:26:16 +00:00
|
|
|
"cmd/compile/internal/compare"
|
[dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated]
If we want to break up package gc at all, we will need to move
the compiler IR it defines into a separate package that can be
imported by packages that gc itself imports. This CL does that.
It also removes the TINT8 etc aliases so that all code is clear
about which package things are coming from.
This CL is automatically generated by the script below.
See the comments in the script for details about the changes.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# These names were never fully qualified
# when the types package was added.
# Do it now, to avoid confusion about where they live.
inline -rm \
Txxx \
TINT8 \
TUINT8 \
TINT16 \
TUINT16 \
TINT32 \
TUINT32 \
TINT64 \
TUINT64 \
TINT \
TUINT \
TUINTPTR \
TCOMPLEX64 \
TCOMPLEX128 \
TFLOAT32 \
TFLOAT64 \
TBOOL \
TPTR \
TFUNC \
TSLICE \
TARRAY \
TSTRUCT \
TCHAN \
TMAP \
TINTER \
TFORW \
TANY \
TSTRING \
TUNSAFEPTR \
TIDEAL \
TNIL \
TBLANK \
TFUNCARGS \
TCHANARGS \
NTYPE \
BADWIDTH
# esc.go and escape.go do not need to be split.
# Append esc.go onto the end of escape.go.
mv esc.go escape.go
# Pull out the type format installation from func Main,
# so it can be carried into package ir.
mv Main:/Sconv.=/-0,/TypeLinkSym/-1 InstallTypeFormats
# Names that need to be exported for use by code left in gc.
mv Isconst IsConst
mv asNode AsNode
mv asNodes AsNodes
mv asTypesNode AsTypesNode
mv basicnames BasicTypeNames
mv builtinpkg BuiltinPkg
mv consttype ConstType
mv dumplist DumpList
mv fdumplist FDumpList
mv fmtMode FmtMode
mv goopnames OpNames
mv inspect Inspect
mv inspectList InspectList
mv localpkg LocalPkg
mv nblank BlankNode
mv numImport NumImport
mv opprec OpPrec
mv origSym OrigSym
mv stmtwithinit StmtWithInit
mv dump DumpAny
mv fdump FDumpAny
mv nod Nod
mv nodl NodAt
mv newname NewName
mv newnamel NewNameAt
mv assertRepresents AssertValidTypeForConst
mv represents ValidTypeForConst
mv nodlit NewLiteral
# Types and fields that need to be exported for use by gc.
mv nowritebarrierrecCallSym SymAndPos
mv SymAndPos.lineno SymAndPos.Pos
mv SymAndPos.target SymAndPos.Sym
mv Func.lsym Func.LSym
mv Func.setWBPos Func.SetWBPos
mv Func.numReturns Func.NumReturns
mv Func.numDefers Func.NumDefers
mv Func.nwbrCalls Func.NWBRCalls
# initLSym is an algorithm left behind in gc,
# not an operation on Func itself.
mv Func.initLSym initLSym
mv nodeQueue NodeQueue
mv NodeQueue.empty NodeQueue.Empty
mv NodeQueue.popLeft NodeQueue.PopLeft
mv NodeQueue.pushRight NodeQueue.PushRight
# Many methods on Node are actually algorithms that
# would apply to any node implementation.
# Those become plain functions.
mv Node.funcname FuncName
mv Node.isBlank IsBlank
mv Node.isGoConst isGoConst
mv Node.isNil IsNil
mv Node.isParamHeapCopy isParamHeapCopy
mv Node.isParamStackCopy isParamStackCopy
mv Node.isSimpleName isSimpleName
mv Node.mayBeShared MayBeShared
mv Node.pkgFuncName PkgFuncName
mv Node.backingArrayPtrLen backingArrayPtrLen
mv Node.isterminating isTermNode
mv Node.labeledControl labeledControl
mv Nodes.isterminating isTermNodes
mv Nodes.sigerr fmtSignature
mv Node.MethodName methodExprName
mv Node.MethodFunc methodExprFunc
mv Node.IsMethod IsMethod
# Every node will need to implement RawCopy;
# Copy and SepCopy algorithms will use it.
mv Node.rawcopy Node.RawCopy
mv Node.copy Copy
mv Node.sepcopy SepCopy
# Extract Node.Format method body into func FmtNode,
# but leave method wrapper behind.
mv Node.Format:0,$ FmtNode
# Formatting helpers that will apply to all node implementations.
mv Node.Line Line
mv Node.exprfmt exprFmt
mv Node.jconv jconvFmt
mv Node.modeString modeString
mv Node.nconv nconvFmt
mv Node.nodedump nodeDumpFmt
mv Node.nodefmt nodeFmt
mv Node.stmtfmt stmtFmt
# Constant support needed for code moving to ir.
mv okforconst OKForConst
mv vconv FmtConst
mv int64Val Int64Val
mv float64Val Float64Val
mv Node.ValueInterface ConstValue
# Organize code into files.
mv LocalPkg BuiltinPkg ir.go
mv NumImport InstallTypeFormats Line fmt.go
mv syntax.go Nod NodAt NewNameAt Class Pxxx PragmaFlag Nointerface SymAndPos \
AsNode AsTypesNode BlankNode OrigSym \
Node.SliceBounds Node.SetSliceBounds Op.IsSlice3 \
IsConst Node.Int64Val Node.CanInt64 Node.Uint64Val Node.BoolVal Node.StringVal \
Node.RawCopy SepCopy Copy \
IsNil IsBlank IsMethod \
Node.Typ Node.StorageClass node.go
mv ConstType ConstValue Int64Val Float64Val AssertValidTypeForConst ValidTypeForConst NewLiteral idealType OKForConst val.go
# Move files to new ir package.
mv bitset.go class_string.go dump.go fmt.go \
ir.go node.go op_string.go val.go \
sizeof_test.go cmd/compile/internal/ir
'
: # fix mkbuiltin.go to generate the changes made to builtin.go during rf
sed -i '' '
s/\[T/[types.T/g
s/\*Node/*ir.Node/g
/internal\/types/c \
fmt.Fprintln(&b, `import (`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) \
fmt.Fprintln(&b, `)`)
' mkbuiltin.go
gofmt -w mkbuiltin.go
: # update cmd/dist to add internal/ir
cd ../../../dist
sed -i '' '/compile.internal.gc/a\
"cmd/compile/internal/ir",
' buildtool.go
gofmt -w buildtool.go
: # update cmd/compile TestFormats
cd ../..
go install std cmd
cd cmd/compile
go test -u || go test # first one updates but fails; second passes
Change-Id: I5f7caf6b20629b51970279e81231a3574d5b51db
Reviewed-on: https://go-review.googlesource.com/c/go/+/273008
Trust: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-19 21:09:22 -05:00
|
|
|
"cmd/compile/internal/ir"
|
2020-12-23 00:46:27 -05:00
|
|
|
"cmd/compile/internal/objw"
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
"cmd/compile/internal/typecheck"
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
"cmd/compile/internal/types"
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
"cmd/internal/obj"
|
2023-05-08 21:28:43 +00:00
|
|
|
"cmd/internal/src"
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
)
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2020-12-29 18:49:13 +08:00
|
|
|
// AlgType returns the fixed-width AMEMxx variants instead of the general
|
|
|
|
|
// AMEM kind when possible.
|
2020-12-23 00:55:38 -05:00
|
|
|
func AlgType(t *types.Type) types.AlgKind {
|
2020-12-23 00:08:03 -05:00
|
|
|
a, _ := types.AlgType(t)
|
|
|
|
|
if a == types.AMEM {
|
2021-08-26 12:11:14 -07:00
|
|
|
if t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() {
|
2021-05-23 12:38:59 -07:00
|
|
|
// For example, we can't treat [2]int16 as an int32 if int32s require
|
|
|
|
|
// 4-byte alignment. See issue 46283.
|
|
|
|
|
return a
|
|
|
|
|
}
|
2021-08-26 12:11:14 -07:00
|
|
|
switch t.Size() {
|
2016-02-26 14:56:31 -08:00
|
|
|
case 0:
|
2020-12-23 00:08:03 -05:00
|
|
|
return types.AMEM0
|
2016-02-26 14:56:31 -08:00
|
|
|
case 1:
|
2020-12-23 00:08:03 -05:00
|
|
|
return types.AMEM8
|
2016-02-26 14:56:31 -08:00
|
|
|
case 2:
|
2020-12-23 00:08:03 -05:00
|
|
|
return types.AMEM16
|
2016-02-26 14:56:31 -08:00
|
|
|
case 4:
|
2020-12-23 00:08:03 -05:00
|
|
|
return types.AMEM32
|
2016-02-26 14:56:31 -08:00
|
|
|
case 8:
|
2020-12-23 00:08:03 -05:00
|
|
|
return types.AMEM64
|
2016-02-26 14:56:31 -08:00
|
|
|
case 16:
|
2020-12-23 00:08:03 -05:00
|
|
|
return types.AMEM128
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return a
|
|
|
|
|
}
|
|
|
|
|
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// genhash returns a symbol which is the closure used to compute
|
|
|
|
|
// the hash of a value of type t.
|
2020-03-06 14:01:26 -08:00
|
|
|
// Note: the generated function must match runtime.typehash exactly.
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
func genhash(t *types.Type) *obj.LSym {
|
2020-12-23 00:55:38 -05:00
|
|
|
switch AlgType(t) {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
default:
|
|
|
|
|
// genhash is only called for types that have equality
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("genhash %v", t)
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM0:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memhash0")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM8:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memhash8")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM16:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memhash16")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM32:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memhash32")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM64:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memhash64")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM128:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memhash128")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ASTRING:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("strhash")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AINTER:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("interhash")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ANILINTER:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("nilinterhash")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AFLOAT32:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("f32hash")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AFLOAT64:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("f64hash")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ACPLX64:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("c64hash")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ACPLX128:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("c128hash")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// For other sizes of plain memory, we build a closure
|
|
|
|
|
// that calls memhash_varlen. The size of the memory is
|
|
|
|
|
// encoded in the first slot of the closure.
|
2021-08-26 12:11:14 -07:00
|
|
|
closure := TypeLinksymLookup(fmt.Sprintf(".hashfunc%d", t.Size()))
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
if len(closure.P) > 0 { // already generated
|
|
|
|
|
return closure
|
|
|
|
|
}
|
|
|
|
|
if memhashvarlen == nil {
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
}
|
|
|
|
|
ot := 0
|
2020-12-23 00:46:27 -05:00
|
|
|
ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
|
2021-08-26 12:11:14 -07:00
|
|
|
ot = objw.Uintptr(closure, ot, uint64(t.Size())) // size encoded in closure
|
2020-12-23 00:46:27 -05:00
|
|
|
objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return closure
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ASPECIAL:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-28 19:34:35 -08:00
|
|
|
closure := TypeLinksymPrefix(".hashfunc", t)
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
if len(closure.P) > 0 { // already generated
|
|
|
|
|
return closure
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Generate hash functions for subtypes.
|
|
|
|
|
// There are cases where we might not use these hashes,
|
|
|
|
|
// but in that case they will get dead-code eliminated.
|
|
|
|
|
// (And the closure generated by genhash will also get
|
|
|
|
|
// dead-code eliminated, as we call the subtype hashers
|
|
|
|
|
// directly.)
|
2020-12-01 03:25:29 -08:00
|
|
|
switch t.Kind() {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
case types.TARRAY:
|
|
|
|
|
genhash(t.Elem())
|
|
|
|
|
case types.TSTRUCT:
|
2023-08-20 10:05:29 -07:00
|
|
|
for _, f := range t.Fields() {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
genhash(f.Type)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-19 20:49:23 -05:00
|
|
|
if base.Flag.LowerR != 0 {
|
2022-09-30 11:01:43 +07:00
|
|
|
fmt.Printf("genhash %v %v\n", closure, t)
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
|
2022-09-30 11:01:43 +07:00
|
|
|
fn := hashFunc(t)
|
|
|
|
|
|
|
|
|
|
// Build closure. It doesn't close over any variables, so
|
|
|
|
|
// it contains just the function pointer.
|
|
|
|
|
objw.SymPtr(closure, 0, fn.Linksym(), 0)
|
|
|
|
|
objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
|
|
|
|
|
|
|
|
|
|
return closure
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func hashFunc(t *types.Type) *ir.Func {
|
2023-02-17 11:57:30 +07:00
|
|
|
sym := TypeSymPrefix(".hash", t)
|
|
|
|
|
if sym.Def != nil {
|
|
|
|
|
return sym.Def.(*ir.Name).Func
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-18 17:43:15 -07:00
|
|
|
pos := base.AutogeneratedPos // less confusing than end of input
|
|
|
|
|
base.Pos = pos
|
2016-02-26 14:56:31 -08:00
|
|
|
|
|
|
|
|
// func sym(p *T, h uintptr) uintptr
|
2023-08-18 17:43:15 -07:00
|
|
|
fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
|
|
|
|
|
[]*types.Field{
|
|
|
|
|
types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
|
|
|
|
|
types.NewField(pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]),
|
|
|
|
|
},
|
|
|
|
|
[]*types.Field{
|
|
|
|
|
types.NewField(pos, nil, types.Types[types.TUINTPTR]),
|
|
|
|
|
},
|
|
|
|
|
))
|
2023-02-17 11:57:30 +07:00
|
|
|
sym.Def = fn.Nname
|
2023-08-11 14:22:56 -07:00
|
|
|
fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
|
|
|
|
|
|
2023-09-13 19:26:32 -07:00
|
|
|
typecheck.DeclFunc(fn)
|
|
|
|
|
np := fn.Dcl[0]
|
|
|
|
|
nh := fn.Dcl[1]
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2020-12-01 03:25:29 -08:00
|
|
|
switch t.Kind() {
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
case types.TARRAY:
|
2016-02-26 14:56:31 -08:00
|
|
|
// An array of pure memory would be handled by the
|
|
|
|
|
// standard algorithm, so the element type must not be
|
|
|
|
|
// pure memory.
|
2016-03-30 10:57:47 -07:00
|
|
|
hashel := hashfor(t.Elem())
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2020-12-05 17:25:28 -08:00
|
|
|
// for i := 0; i < nelem; i++
|
2023-08-17 22:19:10 -07:00
|
|
|
ni := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
|
2023-02-28 13:27:51 -08:00
|
|
|
init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(base.Pos, 0))
|
|
|
|
|
cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(base.Pos, t.NumElem()))
|
|
|
|
|
post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(base.Pos, 1)))
|
2023-01-25 17:08:16 -05:00
|
|
|
loop := ir.NewForStmt(base.Pos, nil, cond, post, nil, false)
|
2020-12-05 17:25:28 -08:00
|
|
|
loop.PtrInit().Append(init)
|
2016-02-26 14:56:31 -08:00
|
|
|
|
|
|
|
|
// h = hashel(&p[i], h)
|
2021-02-02 12:17:57 -08:00
|
|
|
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
|
2016-02-26 14:56:31 -08:00
|
|
|
|
[dev.regabi] cmd/compile: remove ir.Nod [generated]
Rewrite all uses of ir.Nod and friends to call the IR constructors directly.
This gives the results a more specific type and will play nicely with
introduction of more specific types throughout the code in a followup CL.
Passes buildall w/ toolstash -cmp.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/compile/internal/syntax"
import "cmd/internal/src"
var p *noder
var orig syntax.Node
var op ir.Op
var l, r ir.Node
var sym *types.Sym
p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r)
p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym)
var xpos src.XPos
var ns ir.Nodes
npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym)
npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns)
}
ex . ../ir {
import "cmd/compile/internal/base"
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
var op ir.Op
var l, r ir.Node
ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r)
var sym *types.Sym
nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/internal/src"
# rf overlapping match handling is not quite good enough
# for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early.
var l, r ir.Node
var xpos src.XPos
ir.NodAt(xpos, ir.OAS, l, r) -> ir.NewAssignStmt(xpos, l, r)
ir.NodAt(xpos, ir.OIF, l, nil) -> ir.NewIfStmt(xpos, l, nil, nil)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/internal/src"
var l, r ir.Node
var sym *types.Sym
var xpos src.XPos
nodlSym(xpos, ir.ODOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym)
nodlSym(xpos, ir.OXDOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym)
nodlSym(xpos, ir.ODOTPTR, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym)
nodlSym(xpos, ir.OGOTO, nil, sym) -> ir.NewBranchStmt(xpos, ir.OGOTO, sym)
nodlSym(xpos, ir.ORETJMP, nil, sym) -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym)
nodlSym(xpos, ir.OLABEL, nil, sym) -> ir.NewLabelStmt(xpos, sym)
nodlSym(xpos, ir.OSTRUCTKEY, l, sym) -> ir.NewStructKeyExpr(xpos, sym, l)
ir.NodAt(xpos, ir.OADD, l, r) -> ir.NewBinaryExpr(xpos, ir.OADD, l, r)
ir.NodAt(xpos, ir.OAND, l, r) -> ir.NewBinaryExpr(xpos, ir.OAND, l, r)
ir.NodAt(xpos, ir.OANDNOT, l, r) -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r)
ir.NodAt(xpos, ir.ODIV, l, r) -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r)
ir.NodAt(xpos, ir.OEQ, l, r) -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r)
ir.NodAt(xpos, ir.OGE, l, r) -> ir.NewBinaryExpr(xpos, ir.OGE, l, r)
ir.NodAt(xpos, ir.OGT, l, r) -> ir.NewBinaryExpr(xpos, ir.OGT, l, r)
ir.NodAt(xpos, ir.OLE, l, r) -> ir.NewBinaryExpr(xpos, ir.OLE, l, r)
ir.NodAt(xpos, ir.OLSH, l, r) -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r)
ir.NodAt(xpos, ir.OLT, l, r) -> ir.NewBinaryExpr(xpos, ir.OLT, l, r)
ir.NodAt(xpos, ir.OMOD, l, r) -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r)
ir.NodAt(xpos, ir.OMUL, l, r) -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r)
ir.NodAt(xpos, ir.ONE, l, r) -> ir.NewBinaryExpr(xpos, ir.ONE, l, r)
ir.NodAt(xpos, ir.OOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OOR, l, r)
ir.NodAt(xpos, ir.ORSH, l, r) -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r)
ir.NodAt(xpos, ir.OSUB, l, r) -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r)
ir.NodAt(xpos, ir.OXOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r)
ir.NodAt(xpos, ir.OCOPY, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r)
ir.NodAt(xpos, ir.OCOMPLEX, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r)
ir.NodAt(xpos, ir.OEFACE, l, r) -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r)
ir.NodAt(xpos, ir.OADDR, l, nil) -> ir.NewAddrExpr(xpos, l)
ir.NodAt(xpos, ir.OADDSTR, nil, nil) -> ir.NewAddStringExpr(xpos, nil)
ir.NodAt(xpos, ir.OANDAND, l, r) -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r)
ir.NodAt(xpos, ir.OOROR, l, r) -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r)
ir.NodAt(xpos, ir.OARRAYLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil)
ir.NodAt(xpos, ir.OARRAYLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OAS2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil)
ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil)
ir.NodAt(xpos, ir.OAS2FUNC, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil)
ir.NodAt(xpos, ir.OAS2MAPR, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil)
ir.NodAt(xpos, ir.OAS2RECV, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil)
ir.NodAt(xpos, ir.OSELRECV2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil)
ir.NodAt(xpos, ir.OASOP, l, r) -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r)
ir.NodAt(xpos, ir.OBITNOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l)
ir.NodAt(xpos, ir.ONEG, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEG, l)
ir.NodAt(xpos, ir.ONOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONOT, l)
ir.NodAt(xpos, ir.OPLUS, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPLUS, l)
ir.NodAt(xpos, ir.ORECV, l, nil) -> ir.NewUnaryExpr(xpos, ir.ORECV, l)
ir.NodAt(xpos, ir.OALIGNOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l)
ir.NodAt(xpos, ir.OCAP, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCAP, l)
ir.NodAt(xpos, ir.OCLOSE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l)
ir.NodAt(xpos, ir.OIMAG, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIMAG, l)
ir.NodAt(xpos, ir.OLEN, l, nil) -> ir.NewUnaryExpr(xpos, ir.OLEN, l)
ir.NodAt(xpos, ir.ONEW, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEW, l)
ir.NodAt(xpos, ir.ONEWOBJ, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l)
ir.NodAt(xpos, ir.OOFFSETOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l)
ir.NodAt(xpos, ir.OPANIC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPANIC, l)
ir.NodAt(xpos, ir.OREAL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OREAL, l)
ir.NodAt(xpos, ir.OSIZEOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l)
ir.NodAt(xpos, ir.OCHECKNIL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l)
ir.NodAt(xpos, ir.OCFUNC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l)
ir.NodAt(xpos, ir.OIDATA, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIDATA, l)
ir.NodAt(xpos, ir.OITAB, l, nil) -> ir.NewUnaryExpr(xpos, ir.OITAB, l)
ir.NodAt(xpos, ir.OSPTR, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSPTR, l)
ir.NodAt(xpos, ir.OVARDEF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l)
ir.NodAt(xpos, ir.OVARKILL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l)
ir.NodAt(xpos, ir.OVARLIVE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l)
ir.NodAt(xpos, ir.OBLOCK, nil, nil) -> ir.NewBlockStmt(xpos, nil)
ir.NodAt(xpos, ir.OBREAK, nil, nil) -> ir.NewBranchStmt(xpos, ir.OBREAK, nil)
ir.NodAt(xpos, ir.OCONTINUE, nil, nil) -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil)
ir.NodAt(xpos, ir.OFALL, nil, nil) -> ir.NewBranchStmt(xpos, ir.OFALL, nil)
ir.NodAt(xpos, ir.OGOTO, nil, nil) -> ir.NewBranchStmt(xpos, ir.OGOTO, nil)
ir.NodAt(xpos, ir.ORETJMP, nil, nil) -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil)
ir.NodAt(xpos, ir.OCALL, l, nil) -> ir.NewCallExpr(xpos, ir.OCALL, l, nil)
ir.NodAt(xpos, ir.OCALLFUNC, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil)
ir.NodAt(xpos, ir.OCALLINTER, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil)
ir.NodAt(xpos, ir.OCALLMETH, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil)
ir.NodAt(xpos, ir.OAPPEND, l, nil) -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil)
ir.NodAt(xpos, ir.ODELETE, l, nil) -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil)
ir.NodAt(xpos, ir.OGETG, l, nil) -> ir.NewCallExpr(xpos, ir.OGETG, l, nil)
ir.NodAt(xpos, ir.OMAKE, l, nil) -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil)
ir.NodAt(xpos, ir.OPRINT, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil)
ir.NodAt(xpos, ir.OPRINTN, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil)
ir.NodAt(xpos, ir.ORECOVER, l, nil) -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil)
ir.NodAt(xpos, ir.OCASE, nil, nil) -> ir.NewCaseStmt(xpos, nil, nil)
ir.NodAt(xpos, ir.OCONV, l, nil) -> ir.NewConvExpr(xpos, ir.OCONV, nil, l)
ir.NodAt(xpos, ir.OCONVIFACE, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l)
ir.NodAt(xpos, ir.OCONVNOP, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l)
ir.NodAt(xpos, ir.ORUNESTR, l, nil) -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l)
ir.NodAt(xpos, ir.ODCL, l, nil) -> ir.NewDecl(xpos, ir.ODCL, l)
ir.NodAt(xpos, ir.ODCLCONST, l, nil) -> ir.NewDecl(xpos, ir.ODCLCONST, l)
ir.NodAt(xpos, ir.ODCLTYPE, l, nil) -> ir.NewDecl(xpos, ir.ODCLTYPE, l)
ir.NodAt(xpos, ir.ODCLFUNC, nil, nil) -> ir.NewFunc(xpos)
ir.NodAt(xpos, ir.ODEFER, l, nil) -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l)
ir.NodAt(xpos, ir.OGO, l, nil) -> ir.NewGoDeferStmt(xpos, ir.OGO, l)
ir.NodAt(xpos, ir.ODEREF, l, nil) -> ir.NewStarExpr(xpos, l)
ir.NodAt(xpos, ir.ODOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil)
ir.NodAt(xpos, ir.ODOTPTR, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil)
ir.NodAt(xpos, ir.ODOTMETH, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil)
ir.NodAt(xpos, ir.ODOTINTER, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil)
ir.NodAt(xpos, ir.OXDOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, nil) -> ir.NewTypeAssertExpr(xpos, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, r) -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype))
ir.NodAt(xpos, ir.OFOR, l, r) -> ir.NewForStmt(xpos, nil, l, r, nil)
ir.NodAt(xpos, ir.OINDEX, l, r) -> ir.NewIndexExpr(xpos, l, r)
ir.NodAt(xpos, ir.OINLMARK, nil, nil) -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH)
ir.NodAt(xpos, ir.OKEY, l, r) -> ir.NewKeyExpr(xpos, l, r)
ir.NodAt(xpos, ir.OLABEL, nil, nil) -> ir.NewLabelStmt(xpos, nil)
ir.NodAt(xpos, ir.OMAKECHAN, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r)
ir.NodAt(xpos, ir.OMAKEMAP, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r)
ir.NodAt(xpos, ir.OMAKESLICE, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r)
ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r)
ir.NodAt(xpos, ir.ONIL, nil, nil) -> ir.NewNilExpr(xpos)
ir.NodAt(xpos, ir.OPACK, nil, nil) -> ir.NewPkgName(xpos, nil, nil)
ir.NodAt(xpos, ir.OPAREN, l, nil) -> ir.NewParenExpr(xpos, l)
ir.NodAt(xpos, ir.ORANGE, nil, r) -> ir.NewRangeStmt(xpos, nil, r, nil)
ir.NodAt(xpos, ir.ORESULT, nil, nil) -> ir.NewResultExpr(xpos, nil, types.BADWIDTH)
ir.NodAt(xpos, ir.ORETURN, nil, nil) -> ir.NewReturnStmt(xpos, nil)
ir.NodAt(xpos, ir.OSELECT, nil, nil) -> ir.NewSelectStmt(xpos, nil)
ir.NodAt(xpos, ir.OSEND, l, r) -> ir.NewSendStmt(xpos, l, r)
ir.NodAt(xpos, ir.OSLICE, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE, l)
ir.NodAt(xpos, ir.OSLICEARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l)
ir.NodAt(xpos, ir.OSLICESTR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l)
ir.NodAt(xpos, ir.OSLICE3, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3, l)
ir.NodAt(xpos, ir.OSLICE3ARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l)
ir.NodAt(xpos, ir.OSLICEHEADER, l, nil) -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil)
ir.NodAt(xpos, ir.OSWITCH, l, nil) -> ir.NewSwitchStmt(xpos, l, nil)
ir.NodAt(xpos, ir.OINLCALL, nil, nil) -> ir.NewInlinedCallExpr(xpos, nil, nil)
}
rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod
'
Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e
Reviewed-on: https://go-review.googlesource.com/c/go/+/277933
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-22 23:55:29 -05:00
|
|
|
nx := ir.NewIndexExpr(base.Pos, np, ni)
|
2017-02-27 19:56:38 +02:00
|
|
|
nx.SetBounded(true)
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
na := typecheck.NodAddr(nx)
|
2020-12-23 00:02:08 -05:00
|
|
|
call.Args.Append(na)
|
|
|
|
|
call.Args.Append(nh)
|
|
|
|
|
loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2020-12-23 00:02:08 -05:00
|
|
|
fn.Body.Append(loop)
|
2016-02-26 14:56:31 -08:00
|
|
|
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
case types.TSTRUCT:
|
2016-03-08 03:40:50 -08:00
|
|
|
// Walk the struct using memhash for runs of AMEM
|
|
|
|
|
// and calling specific hash functions for the others.
|
2023-08-20 10:05:29 -07:00
|
|
|
for i, fields := 0, t.Fields(); i < len(fields); {
|
2016-03-10 20:07:00 -08:00
|
|
|
f := fields[i]
|
|
|
|
|
|
2016-03-08 03:40:50 -08:00
|
|
|
// Skip blank fields.
|
2017-04-21 07:51:41 -07:00
|
|
|
if f.Sym.IsBlank() {
|
2016-03-10 20:07:00 -08:00
|
|
|
i++
|
2016-03-08 03:40:50 -08:00
|
|
|
continue
|
|
|
|
|
}
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2016-03-08 03:40:50 -08:00
|
|
|
// Hash non-memory fields with appropriate hash function.
|
2022-04-21 23:26:16 +00:00
|
|
|
if !compare.IsRegularMemory(f.Type) {
|
2016-03-08 03:40:50 -08:00
|
|
|
hashel := hashfor(f.Type)
|
2021-02-02 12:17:57 -08:00
|
|
|
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
|
2023-08-18 19:40:33 -07:00
|
|
|
na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
|
2020-12-23 00:02:08 -05:00
|
|
|
call.Args.Append(na)
|
|
|
|
|
call.Args.Append(nh)
|
|
|
|
|
fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
|
2016-03-10 20:07:00 -08:00
|
|
|
i++
|
2016-02-26 14:56:31 -08:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-08 03:40:50 -08:00
|
|
|
// Otherwise, hash a maximal length run of raw memory.
|
2022-04-21 23:26:16 +00:00
|
|
|
size, next := compare.Memrun(t, i)
|
2016-03-08 03:40:50 -08:00
|
|
|
|
|
|
|
|
// h = hashel(&p.first, size, h)
|
|
|
|
|
hashel := hashmem(f.Type)
|
2021-02-02 12:17:57 -08:00
|
|
|
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
|
2023-08-18 19:40:33 -07:00
|
|
|
na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
|
2020-12-23 00:02:08 -05:00
|
|
|
call.Args.Append(na)
|
|
|
|
|
call.Args.Append(nh)
|
2023-02-28 13:27:51 -08:00
|
|
|
call.Args.Append(ir.NewInt(base.Pos, size))
|
2020-12-23 00:02:08 -05:00
|
|
|
fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2016-03-10 20:07:00 -08:00
|
|
|
i = next
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
[dev.regabi] cmd/compile: remove ir.Nod [generated]
Rewrite all uses of ir.Nod and friends to call the IR constructors directly.
This gives the results a more specific type and will play nicely with
introduction of more specific types throughout the code in a followup CL.
Passes buildall w/ toolstash -cmp.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/compile/internal/syntax"
import "cmd/internal/src"
var p *noder
var orig syntax.Node
var op ir.Op
var l, r ir.Node
var sym *types.Sym
p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r)
p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym)
var xpos src.XPos
var ns ir.Nodes
npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym)
npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns)
}
ex . ../ir {
import "cmd/compile/internal/base"
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
var op ir.Op
var l, r ir.Node
ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r)
var sym *types.Sym
nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/internal/src"
# rf overlapping match handling is not quite good enough
# for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early.
var l, r ir.Node
var xpos src.XPos
ir.NodAt(xpos, ir.OAS, l, r) -> ir.NewAssignStmt(xpos, l, r)
ir.NodAt(xpos, ir.OIF, l, nil) -> ir.NewIfStmt(xpos, l, nil, nil)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/internal/src"
var l, r ir.Node
var sym *types.Sym
var xpos src.XPos
nodlSym(xpos, ir.ODOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym)
nodlSym(xpos, ir.OXDOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym)
nodlSym(xpos, ir.ODOTPTR, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym)
nodlSym(xpos, ir.OGOTO, nil, sym) -> ir.NewBranchStmt(xpos, ir.OGOTO, sym)
nodlSym(xpos, ir.ORETJMP, nil, sym) -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym)
nodlSym(xpos, ir.OLABEL, nil, sym) -> ir.NewLabelStmt(xpos, sym)
nodlSym(xpos, ir.OSTRUCTKEY, l, sym) -> ir.NewStructKeyExpr(xpos, sym, l)
ir.NodAt(xpos, ir.OADD, l, r) -> ir.NewBinaryExpr(xpos, ir.OADD, l, r)
ir.NodAt(xpos, ir.OAND, l, r) -> ir.NewBinaryExpr(xpos, ir.OAND, l, r)
ir.NodAt(xpos, ir.OANDNOT, l, r) -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r)
ir.NodAt(xpos, ir.ODIV, l, r) -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r)
ir.NodAt(xpos, ir.OEQ, l, r) -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r)
ir.NodAt(xpos, ir.OGE, l, r) -> ir.NewBinaryExpr(xpos, ir.OGE, l, r)
ir.NodAt(xpos, ir.OGT, l, r) -> ir.NewBinaryExpr(xpos, ir.OGT, l, r)
ir.NodAt(xpos, ir.OLE, l, r) -> ir.NewBinaryExpr(xpos, ir.OLE, l, r)
ir.NodAt(xpos, ir.OLSH, l, r) -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r)
ir.NodAt(xpos, ir.OLT, l, r) -> ir.NewBinaryExpr(xpos, ir.OLT, l, r)
ir.NodAt(xpos, ir.OMOD, l, r) -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r)
ir.NodAt(xpos, ir.OMUL, l, r) -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r)
ir.NodAt(xpos, ir.ONE, l, r) -> ir.NewBinaryExpr(xpos, ir.ONE, l, r)
ir.NodAt(xpos, ir.OOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OOR, l, r)
ir.NodAt(xpos, ir.ORSH, l, r) -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r)
ir.NodAt(xpos, ir.OSUB, l, r) -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r)
ir.NodAt(xpos, ir.OXOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r)
ir.NodAt(xpos, ir.OCOPY, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r)
ir.NodAt(xpos, ir.OCOMPLEX, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r)
ir.NodAt(xpos, ir.OEFACE, l, r) -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r)
ir.NodAt(xpos, ir.OADDR, l, nil) -> ir.NewAddrExpr(xpos, l)
ir.NodAt(xpos, ir.OADDSTR, nil, nil) -> ir.NewAddStringExpr(xpos, nil)
ir.NodAt(xpos, ir.OANDAND, l, r) -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r)
ir.NodAt(xpos, ir.OOROR, l, r) -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r)
ir.NodAt(xpos, ir.OARRAYLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil)
ir.NodAt(xpos, ir.OARRAYLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OAS2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil)
ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil)
ir.NodAt(xpos, ir.OAS2FUNC, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil)
ir.NodAt(xpos, ir.OAS2MAPR, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil)
ir.NodAt(xpos, ir.OAS2RECV, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil)
ir.NodAt(xpos, ir.OSELRECV2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil)
ir.NodAt(xpos, ir.OASOP, l, r) -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r)
ir.NodAt(xpos, ir.OBITNOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l)
ir.NodAt(xpos, ir.ONEG, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEG, l)
ir.NodAt(xpos, ir.ONOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONOT, l)
ir.NodAt(xpos, ir.OPLUS, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPLUS, l)
ir.NodAt(xpos, ir.ORECV, l, nil) -> ir.NewUnaryExpr(xpos, ir.ORECV, l)
ir.NodAt(xpos, ir.OALIGNOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l)
ir.NodAt(xpos, ir.OCAP, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCAP, l)
ir.NodAt(xpos, ir.OCLOSE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l)
ir.NodAt(xpos, ir.OIMAG, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIMAG, l)
ir.NodAt(xpos, ir.OLEN, l, nil) -> ir.NewUnaryExpr(xpos, ir.OLEN, l)
ir.NodAt(xpos, ir.ONEW, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEW, l)
ir.NodAt(xpos, ir.ONEWOBJ, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l)
ir.NodAt(xpos, ir.OOFFSETOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l)
ir.NodAt(xpos, ir.OPANIC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPANIC, l)
ir.NodAt(xpos, ir.OREAL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OREAL, l)
ir.NodAt(xpos, ir.OSIZEOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l)
ir.NodAt(xpos, ir.OCHECKNIL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l)
ir.NodAt(xpos, ir.OCFUNC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l)
ir.NodAt(xpos, ir.OIDATA, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIDATA, l)
ir.NodAt(xpos, ir.OITAB, l, nil) -> ir.NewUnaryExpr(xpos, ir.OITAB, l)
ir.NodAt(xpos, ir.OSPTR, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSPTR, l)
ir.NodAt(xpos, ir.OVARDEF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l)
ir.NodAt(xpos, ir.OVARKILL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l)
ir.NodAt(xpos, ir.OVARLIVE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l)
ir.NodAt(xpos, ir.OBLOCK, nil, nil) -> ir.NewBlockStmt(xpos, nil)
ir.NodAt(xpos, ir.OBREAK, nil, nil) -> ir.NewBranchStmt(xpos, ir.OBREAK, nil)
ir.NodAt(xpos, ir.OCONTINUE, nil, nil) -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil)
ir.NodAt(xpos, ir.OFALL, nil, nil) -> ir.NewBranchStmt(xpos, ir.OFALL, nil)
ir.NodAt(xpos, ir.OGOTO, nil, nil) -> ir.NewBranchStmt(xpos, ir.OGOTO, nil)
ir.NodAt(xpos, ir.ORETJMP, nil, nil) -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil)
ir.NodAt(xpos, ir.OCALL, l, nil) -> ir.NewCallExpr(xpos, ir.OCALL, l, nil)
ir.NodAt(xpos, ir.OCALLFUNC, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil)
ir.NodAt(xpos, ir.OCALLINTER, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil)
ir.NodAt(xpos, ir.OCALLMETH, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil)
ir.NodAt(xpos, ir.OAPPEND, l, nil) -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil)
ir.NodAt(xpos, ir.ODELETE, l, nil) -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil)
ir.NodAt(xpos, ir.OGETG, l, nil) -> ir.NewCallExpr(xpos, ir.OGETG, l, nil)
ir.NodAt(xpos, ir.OMAKE, l, nil) -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil)
ir.NodAt(xpos, ir.OPRINT, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil)
ir.NodAt(xpos, ir.OPRINTN, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil)
ir.NodAt(xpos, ir.ORECOVER, l, nil) -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil)
ir.NodAt(xpos, ir.OCASE, nil, nil) -> ir.NewCaseStmt(xpos, nil, nil)
ir.NodAt(xpos, ir.OCONV, l, nil) -> ir.NewConvExpr(xpos, ir.OCONV, nil, l)
ir.NodAt(xpos, ir.OCONVIFACE, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l)
ir.NodAt(xpos, ir.OCONVNOP, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l)
ir.NodAt(xpos, ir.ORUNESTR, l, nil) -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l)
ir.NodAt(xpos, ir.ODCL, l, nil) -> ir.NewDecl(xpos, ir.ODCL, l)
ir.NodAt(xpos, ir.ODCLCONST, l, nil) -> ir.NewDecl(xpos, ir.ODCLCONST, l)
ir.NodAt(xpos, ir.ODCLTYPE, l, nil) -> ir.NewDecl(xpos, ir.ODCLTYPE, l)
ir.NodAt(xpos, ir.ODCLFUNC, nil, nil) -> ir.NewFunc(xpos)
ir.NodAt(xpos, ir.ODEFER, l, nil) -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l)
ir.NodAt(xpos, ir.OGO, l, nil) -> ir.NewGoDeferStmt(xpos, ir.OGO, l)
ir.NodAt(xpos, ir.ODEREF, l, nil) -> ir.NewStarExpr(xpos, l)
ir.NodAt(xpos, ir.ODOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil)
ir.NodAt(xpos, ir.ODOTPTR, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil)
ir.NodAt(xpos, ir.ODOTMETH, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil)
ir.NodAt(xpos, ir.ODOTINTER, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil)
ir.NodAt(xpos, ir.OXDOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, nil) -> ir.NewTypeAssertExpr(xpos, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, r) -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype))
ir.NodAt(xpos, ir.OFOR, l, r) -> ir.NewForStmt(xpos, nil, l, r, nil)
ir.NodAt(xpos, ir.OINDEX, l, r) -> ir.NewIndexExpr(xpos, l, r)
ir.NodAt(xpos, ir.OINLMARK, nil, nil) -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH)
ir.NodAt(xpos, ir.OKEY, l, r) -> ir.NewKeyExpr(xpos, l, r)
ir.NodAt(xpos, ir.OLABEL, nil, nil) -> ir.NewLabelStmt(xpos, nil)
ir.NodAt(xpos, ir.OMAKECHAN, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r)
ir.NodAt(xpos, ir.OMAKEMAP, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r)
ir.NodAt(xpos, ir.OMAKESLICE, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r)
ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r)
ir.NodAt(xpos, ir.ONIL, nil, nil) -> ir.NewNilExpr(xpos)
ir.NodAt(xpos, ir.OPACK, nil, nil) -> ir.NewPkgName(xpos, nil, nil)
ir.NodAt(xpos, ir.OPAREN, l, nil) -> ir.NewParenExpr(xpos, l)
ir.NodAt(xpos, ir.ORANGE, nil, r) -> ir.NewRangeStmt(xpos, nil, r, nil)
ir.NodAt(xpos, ir.ORESULT, nil, nil) -> ir.NewResultExpr(xpos, nil, types.BADWIDTH)
ir.NodAt(xpos, ir.ORETURN, nil, nil) -> ir.NewReturnStmt(xpos, nil)
ir.NodAt(xpos, ir.OSELECT, nil, nil) -> ir.NewSelectStmt(xpos, nil)
ir.NodAt(xpos, ir.OSEND, l, r) -> ir.NewSendStmt(xpos, l, r)
ir.NodAt(xpos, ir.OSLICE, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE, l)
ir.NodAt(xpos, ir.OSLICEARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l)
ir.NodAt(xpos, ir.OSLICESTR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l)
ir.NodAt(xpos, ir.OSLICE3, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3, l)
ir.NodAt(xpos, ir.OSLICE3ARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l)
ir.NodAt(xpos, ir.OSLICEHEADER, l, nil) -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil)
ir.NodAt(xpos, ir.OSWITCH, l, nil) -> ir.NewSwitchStmt(xpos, l, nil)
ir.NodAt(xpos, ir.OINLCALL, nil, nil) -> ir.NewInlinedCallExpr(xpos, nil, nil)
}
rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod
'
Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e
Reviewed-on: https://go-review.googlesource.com/c/go/+/277933
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-22 23:55:29 -05:00
|
|
|
r := ir.NewReturnStmt(base.Pos, nil)
|
2020-12-23 00:02:08 -05:00
|
|
|
r.Results.Append(nh)
|
|
|
|
|
fn.Body.Append(r)
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2020-11-19 20:49:23 -05:00
|
|
|
if base.Flag.LowerR != 0 {
|
2020-12-23 00:02:08 -05:00
|
|
|
ir.DumpList("genhash body", fn.Body)
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
typecheck.FinishFuncBody()
|
2018-04-18 23:22:26 -07:00
|
|
|
|
2020-11-28 07:31:18 -05:00
|
|
|
fn.SetDupok(true)
|
2018-04-18 23:22:26 -07:00
|
|
|
|
2023-02-17 11:57:30 +07:00
|
|
|
ir.WithFunc(fn, func() {
|
|
|
|
|
typecheck.Stmts(fn.Body)
|
|
|
|
|
})
|
2018-04-18 23:22:26 -07:00
|
|
|
|
2020-11-28 07:31:18 -05:00
|
|
|
fn.SetNilCheckDisabled(true)
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
|
2022-09-30 11:01:43 +07:00
|
|
|
return fn
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
|
2022-09-30 12:04:46 +07:00
|
|
|
func runtimeHashFor(name string, t *types.Type) *ir.Name {
|
2023-08-20 16:14:50 -07:00
|
|
|
return typecheck.LookupRuntime(name, t)
|
2022-09-30 12:04:46 +07:00
|
|
|
}
|
|
|
|
|
|
2022-09-30 11:01:43 +07:00
|
|
|
// hashfor returns the function to compute the hash of a value of type t.
|
|
|
|
|
func hashfor(t *types.Type) *ir.Name {
|
2020-12-23 00:08:03 -05:00
|
|
|
switch a, _ := types.AlgType(t); a {
|
|
|
|
|
case types.AMEM:
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("hashfor with AMEM type")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AINTER:
|
2022-09-30 12:04:46 +07:00
|
|
|
return runtimeHashFor("interhash", t)
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ANILINTER:
|
2022-09-30 12:04:46 +07:00
|
|
|
return runtimeHashFor("nilinterhash", t)
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ASTRING:
|
2022-09-30 12:04:46 +07:00
|
|
|
return runtimeHashFor("strhash", t)
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AFLOAT32:
|
2022-09-30 12:04:46 +07:00
|
|
|
return runtimeHashFor("f32hash", t)
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AFLOAT64:
|
2022-09-30 12:04:46 +07:00
|
|
|
return runtimeHashFor("f64hash", t)
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ACPLX64:
|
2022-09-30 12:04:46 +07:00
|
|
|
return runtimeHashFor("c64hash", t)
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ACPLX128:
|
2022-09-30 12:04:46 +07:00
|
|
|
return runtimeHashFor("c128hash", t)
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
|
2023-02-17 11:57:30 +07:00
|
|
|
fn := hashFunc(t)
|
|
|
|
|
return fn.Nname
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// sysClosure returns a closure which will call the
|
|
|
|
|
// given runtime function (with no closed-over variables).
|
|
|
|
|
func sysClosure(name string) *obj.LSym {
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
s := typecheck.LookupRuntimeVar(name + "·f")
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
if len(s.P) == 0 {
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
f := typecheck.LookupRuntimeFunc(name)
|
2020-12-23 00:46:27 -05:00
|
|
|
objw.SymPtr(s, 0, f, 0)
|
|
|
|
|
objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
}
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// geneq returns a symbol which is the closure used to compute
|
|
|
|
|
// equality for two objects of type t.
|
|
|
|
|
func geneq(t *types.Type) *obj.LSym {
|
2020-12-23 00:55:38 -05:00
|
|
|
switch AlgType(t) {
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ANOEQ:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// The runtime will panic if it tries to compare
|
|
|
|
|
// a type with a nil equality function.
|
|
|
|
|
return nil
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM0:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memequal0")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM8:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memequal8")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM16:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memequal16")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM32:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memequal32")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM64:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memequal64")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM128:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("memequal128")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ASTRING:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("strequal")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AINTER:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("interequal")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ANILINTER:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("nilinterequal")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AFLOAT32:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("f32equal")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AFLOAT64:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("f64equal")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ACPLX64:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("c64equal")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ACPLX128:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return sysClosure("c128equal")
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.AMEM:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// make equality closure. The size of the type
|
|
|
|
|
// is encoded in the closure.
|
2021-08-26 12:11:14 -07:00
|
|
|
closure := TypeLinksymLookup(fmt.Sprintf(".eqfunc%d", t.Size()))
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
if len(closure.P) != 0 {
|
|
|
|
|
return closure
|
|
|
|
|
}
|
|
|
|
|
if memequalvarlen == nil {
|
2021-04-04 12:15:13 -04:00
|
|
|
memequalvarlen = typecheck.LookupRuntimeFunc("memequal_varlen")
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
}
|
|
|
|
|
ot := 0
|
2020-12-23 00:46:27 -05:00
|
|
|
ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
|
2021-08-26 12:11:14 -07:00
|
|
|
ot = objw.Uintptr(closure, ot, uint64(t.Size()))
|
2020-12-23 00:46:27 -05:00
|
|
|
objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return closure
|
2020-12-23 00:08:03 -05:00
|
|
|
case types.ASPECIAL:
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-28 19:34:35 -08:00
|
|
|
closure := TypeLinksymPrefix(".eqfunc", t)
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
if len(closure.P) > 0 { // already generated
|
|
|
|
|
return closure
|
|
|
|
|
}
|
2022-09-28 10:20:25 +07:00
|
|
|
|
2020-11-19 20:49:23 -05:00
|
|
|
if base.Flag.LowerR != 0 {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
fmt.Printf("geneq %v\n", t)
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
|
2022-09-28 10:20:25 +07:00
|
|
|
fn := eqFunc(t)
|
|
|
|
|
|
|
|
|
|
// Generate a closure which points at the function we just generated.
|
|
|
|
|
objw.SymPtr(closure, 0, fn.Linksym(), 0)
|
|
|
|
|
objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
|
|
|
|
|
return closure
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func eqFunc(t *types.Type) *ir.Func {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// Autogenerate code for equality of structs and arrays.
|
2022-09-28 12:30:46 +07:00
|
|
|
sym := TypeSymPrefix(".eq", t)
|
|
|
|
|
if sym.Def != nil {
|
|
|
|
|
return sym.Def.(*ir.Name).Func
|
|
|
|
|
}
|
2023-08-18 17:43:15 -07:00
|
|
|
|
|
|
|
|
pos := base.AutogeneratedPos // less confusing than end of input
|
|
|
|
|
base.Pos = pos
|
2016-02-26 14:56:31 -08:00
|
|
|
|
|
|
|
|
// func sym(p, q *T) bool
|
2023-08-18 17:43:15 -07:00
|
|
|
fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
|
|
|
|
|
[]*types.Field{
|
|
|
|
|
types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
|
|
|
|
|
types.NewField(pos, typecheck.Lookup("q"), types.NewPtr(t)),
|
|
|
|
|
},
|
|
|
|
|
[]*types.Field{
|
|
|
|
|
types.NewField(pos, typecheck.Lookup("r"), types.Types[types.TBOOL]),
|
|
|
|
|
},
|
|
|
|
|
))
|
2022-09-28 12:30:46 +07:00
|
|
|
sym.Def = fn.Nname
|
2023-08-11 14:22:56 -07:00
|
|
|
fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
|
|
|
|
|
|
2023-09-13 19:26:32 -07:00
|
|
|
typecheck.DeclFunc(fn)
|
|
|
|
|
np := fn.Dcl[0]
|
|
|
|
|
nq := fn.Dcl[1]
|
|
|
|
|
nr := fn.Dcl[2]
|
2020-09-16 10:13:37 -07:00
|
|
|
|
|
|
|
|
// Label to jump to if an equality test fails.
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
neq := typecheck.AutoLabel(".neq")
|
2016-02-26 14:56:31 -08:00
|
|
|
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// We reach here only for types that have equality but
|
2016-02-26 14:56:31 -08:00
|
|
|
// cannot be handled by the standard algorithms,
|
|
|
|
|
// so t must be either an array or a struct.
|
2020-12-01 03:25:29 -08:00
|
|
|
switch t.Kind() {
|
2016-02-26 14:56:31 -08:00
|
|
|
default:
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("geneq %v", t)
|
2016-02-26 14:56:31 -08:00
|
|
|
|
[dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated]
If we want to break up package gc at all, we will need to move
the compiler IR it defines into a separate package that can be
imported by packages that gc itself imports. This CL does that.
It also removes the TINT8 etc aliases so that all code is clear
about which package things are coming from.
This CL is automatically generated by the script below.
See the comments in the script for details about the changes.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# These names were never fully qualified
# when the types package was added.
# Do it now, to avoid confusion about where they live.
inline -rm \
Txxx \
TINT8 \
TUINT8 \
TINT16 \
TUINT16 \
TINT32 \
TUINT32 \
TINT64 \
TUINT64 \
TINT \
TUINT \
TUINTPTR \
TCOMPLEX64 \
TCOMPLEX128 \
TFLOAT32 \
TFLOAT64 \
TBOOL \
TPTR \
TFUNC \
TSLICE \
TARRAY \
TSTRUCT \
TCHAN \
TMAP \
TINTER \
TFORW \
TANY \
TSTRING \
TUNSAFEPTR \
TIDEAL \
TNIL \
TBLANK \
TFUNCARGS \
TCHANARGS \
NTYPE \
BADWIDTH
# esc.go and escape.go do not need to be split.
# Append esc.go onto the end of escape.go.
mv esc.go escape.go
# Pull out the type format installation from func Main,
# so it can be carried into package ir.
mv Main:/Sconv.=/-0,/TypeLinkSym/-1 InstallTypeFormats
# Names that need to be exported for use by code left in gc.
mv Isconst IsConst
mv asNode AsNode
mv asNodes AsNodes
mv asTypesNode AsTypesNode
mv basicnames BasicTypeNames
mv builtinpkg BuiltinPkg
mv consttype ConstType
mv dumplist DumpList
mv fdumplist FDumpList
mv fmtMode FmtMode
mv goopnames OpNames
mv inspect Inspect
mv inspectList InspectList
mv localpkg LocalPkg
mv nblank BlankNode
mv numImport NumImport
mv opprec OpPrec
mv origSym OrigSym
mv stmtwithinit StmtWithInit
mv dump DumpAny
mv fdump FDumpAny
mv nod Nod
mv nodl NodAt
mv newname NewName
mv newnamel NewNameAt
mv assertRepresents AssertValidTypeForConst
mv represents ValidTypeForConst
mv nodlit NewLiteral
# Types and fields that need to be exported for use by gc.
mv nowritebarrierrecCallSym SymAndPos
mv SymAndPos.lineno SymAndPos.Pos
mv SymAndPos.target SymAndPos.Sym
mv Func.lsym Func.LSym
mv Func.setWBPos Func.SetWBPos
mv Func.numReturns Func.NumReturns
mv Func.numDefers Func.NumDefers
mv Func.nwbrCalls Func.NWBRCalls
# initLSym is an algorithm left behind in gc,
# not an operation on Func itself.
mv Func.initLSym initLSym
mv nodeQueue NodeQueue
mv NodeQueue.empty NodeQueue.Empty
mv NodeQueue.popLeft NodeQueue.PopLeft
mv NodeQueue.pushRight NodeQueue.PushRight
# Many methods on Node are actually algorithms that
# would apply to any node implementation.
# Those become plain functions.
mv Node.funcname FuncName
mv Node.isBlank IsBlank
mv Node.isGoConst isGoConst
mv Node.isNil IsNil
mv Node.isParamHeapCopy isParamHeapCopy
mv Node.isParamStackCopy isParamStackCopy
mv Node.isSimpleName isSimpleName
mv Node.mayBeShared MayBeShared
mv Node.pkgFuncName PkgFuncName
mv Node.backingArrayPtrLen backingArrayPtrLen
mv Node.isterminating isTermNode
mv Node.labeledControl labeledControl
mv Nodes.isterminating isTermNodes
mv Nodes.sigerr fmtSignature
mv Node.MethodName methodExprName
mv Node.MethodFunc methodExprFunc
mv Node.IsMethod IsMethod
# Every node will need to implement RawCopy;
# Copy and SepCopy algorithms will use it.
mv Node.rawcopy Node.RawCopy
mv Node.copy Copy
mv Node.sepcopy SepCopy
# Extract Node.Format method body into func FmtNode,
# but leave method wrapper behind.
mv Node.Format:0,$ FmtNode
# Formatting helpers that will apply to all node implementations.
mv Node.Line Line
mv Node.exprfmt exprFmt
mv Node.jconv jconvFmt
mv Node.modeString modeString
mv Node.nconv nconvFmt
mv Node.nodedump nodeDumpFmt
mv Node.nodefmt nodeFmt
mv Node.stmtfmt stmtFmt
# Constant support needed for code moving to ir.
mv okforconst OKForConst
mv vconv FmtConst
mv int64Val Int64Val
mv float64Val Float64Val
mv Node.ValueInterface ConstValue
# Organize code into files.
mv LocalPkg BuiltinPkg ir.go
mv NumImport InstallTypeFormats Line fmt.go
mv syntax.go Nod NodAt NewNameAt Class Pxxx PragmaFlag Nointerface SymAndPos \
AsNode AsTypesNode BlankNode OrigSym \
Node.SliceBounds Node.SetSliceBounds Op.IsSlice3 \
IsConst Node.Int64Val Node.CanInt64 Node.Uint64Val Node.BoolVal Node.StringVal \
Node.RawCopy SepCopy Copy \
IsNil IsBlank IsMethod \
Node.Typ Node.StorageClass node.go
mv ConstType ConstValue Int64Val Float64Val AssertValidTypeForConst ValidTypeForConst NewLiteral idealType OKForConst val.go
# Move files to new ir package.
mv bitset.go class_string.go dump.go fmt.go \
ir.go node.go op_string.go val.go \
sizeof_test.go cmd/compile/internal/ir
'
: # fix mkbuiltin.go to generate the changes made to builtin.go during rf
sed -i '' '
s/\[T/[types.T/g
s/\*Node/*ir.Node/g
/internal\/types/c \
fmt.Fprintln(&b, `import (`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) \
fmt.Fprintln(&b, `)`)
' mkbuiltin.go
gofmt -w mkbuiltin.go
: # update cmd/dist to add internal/ir
cd ../../../dist
sed -i '' '/compile.internal.gc/a\
"cmd/compile/internal/ir",
' buildtool.go
gofmt -w buildtool.go
: # update cmd/compile TestFormats
cd ../..
go install std cmd
cd cmd/compile
go test -u || go test # first one updates but fails; second passes
Change-Id: I5f7caf6b20629b51970279e81231a3574d5b51db
Reviewed-on: https://go-review.googlesource.com/c/go/+/273008
Trust: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-19 21:09:22 -05:00
|
|
|
case types.TARRAY:
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
nelem := t.NumElem()
|
|
|
|
|
|
|
|
|
|
// checkAll generates code to check the equality of all array elements.
|
|
|
|
|
// If unroll is greater than nelem, checkAll generates:
|
|
|
|
|
//
|
|
|
|
|
// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
|
|
|
|
|
// } else {
|
2021-12-02 16:18:29 +03:00
|
|
|
// goto neq
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
// }
|
2020-04-24 15:59:17 -07:00
|
|
|
//
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
// And so on.
|
|
|
|
|
//
|
|
|
|
|
// Otherwise it generates:
|
|
|
|
|
//
|
2021-12-02 16:18:29 +03:00
|
|
|
// iterateTo := nelem/unroll*unroll
|
|
|
|
|
// for i := 0; i < iterateTo; i += unroll {
|
|
|
|
|
// if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
|
2020-04-24 15:59:17 -07:00
|
|
|
// } else {
|
2020-09-16 10:13:37 -07:00
|
|
|
// goto neq
|
2020-04-24 15:59:17 -07:00
|
|
|
// }
|
|
|
|
|
// }
|
2021-12-02 16:18:29 +03:00
|
|
|
// if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
|
|
|
|
|
// } else {
|
|
|
|
|
// goto neq
|
|
|
|
|
// }
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
//
|
[dev.regabi] cmd/compile: replace *Node type with an interface Node [generated]
The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct.
The previous CL defined an interface INode modeling a *Node.
This CL:
- Changes all references outside internal/ir to use INode,
along with many references inside internal/ir as well.
- Renames Node to node.
- Renames INode to Node
So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible,
and the code outside package ir is now (clearly) using only the interface.
The usual rule is never to redefine an existing name with a new meaning,
so that old code that hasn't been updated gets a "unknown name" error
instead of more mysterious errors or silent misbehavior. That rule would
caution against replacing Node-the-struct with Node-the-interface,
as in this CL, because code that says *Node would now be using a pointer
to an interface. But this CL is being landed at the same time as another that
moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node,
which does follow the rule: any lingering references to gc.Node will be told
it's gone, not silently start using pointers to interfaces. So the rule is followed
by the CL sequence, just not this specific CL.
Overall, the loss of inlining caused by using interfaces cuts the compiler speed
by about 6%, a not insignificant amount. However, as we convert the representation
to concrete structs that are not the giant Node over the next weeks, that speed
should come back as more of the compiler starts operating directly on concrete types
and the memory taken up by the graph of Nodes drops due to the more precise
structs. Honestly, I was expecting worse.
% benchstat bench.old bench.new
name old time/op new time/op delta
Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9)
Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9)
GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9)
Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9)
SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10)
Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8)
GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8)
Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9)
Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9)
XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8)
LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8)
ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10)
LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8)
StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9)
[Geo mean] 480ms 510ms +6.31%
name old user-time/op new user-time/op delta
Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10)
Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9)
GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9)
Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9)
SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10)
Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9)
GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9)
Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10)
Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10)
XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9)
LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9)
ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10)
LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8)
[Geo mean] 483ms 504ms +4.41%
[git-generate]
cd src/cmd/compile/internal/ir
: # We need to do the conversion in multiple steps, so we introduce
: # a temporary type alias that will start out meaning the pointer-to-struct
: # and then change to mean the interface.
rf '
mv Node OldNode
add node.go \
type Node = *OldNode
'
: # It should work to do this ex in ir, but it misses test files, due to a bug in rf.
: # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests.
cd ../gc
rf '
ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
cd ../ssa
rf '
ex {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
: # Back in ir, finish conversion clumsily with sed,
: # because type checking and circular aliases do not mix.
cd ../ir
sed -i '' '
/type Node = \*OldNode/d
s/\*OldNode/Node/g
s/^func (n Node)/func (n *OldNode)/
s/OldNode/node/g
s/type INode interface/type Node interface/
s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/
' *.go
gofmt -w *.go
sed -i '' '
s/{Func{}, 136, 248}/{Func{}, 152, 280}/
s/{Name{}, 32, 56}/{Name{}, 44, 80}/
s/{Param{}, 24, 48}/{Param{}, 44, 88}/
s/{node{}, 76, 128}/{node{}, 88, 152}/
' sizeof_test.go
cd ../ssa
sed -i '' '
s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/
' sizeof_test.go
cd ../gc
sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go
cd ../../../..
go install std cmd
cd cmd/compile
go test -u || go test -u
Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553
Reviewed-on: https://go-review.googlesource.com/c/go/+/272935
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-25 01:11:56 -05:00
|
|
|
checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
// checkIdx generates a node to check for equality at index i.
|
[dev.regabi] cmd/compile: replace *Node type with an interface Node [generated]
The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct.
The previous CL defined an interface INode modeling a *Node.
This CL:
- Changes all references outside internal/ir to use INode,
along with many references inside internal/ir as well.
- Renames Node to node.
- Renames INode to Node
So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible,
and the code outside package ir is now (clearly) using only the interface.
The usual rule is never to redefine an existing name with a new meaning,
so that old code that hasn't been updated gets a "unknown name" error
instead of more mysterious errors or silent misbehavior. That rule would
caution against replacing Node-the-struct with Node-the-interface,
as in this CL, because code that says *Node would now be using a pointer
to an interface. But this CL is being landed at the same time as another that
moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node,
which does follow the rule: any lingering references to gc.Node will be told
it's gone, not silently start using pointers to interfaces. So the rule is followed
by the CL sequence, just not this specific CL.
Overall, the loss of inlining caused by using interfaces cuts the compiler speed
by about 6%, a not insignificant amount. However, as we convert the representation
to concrete structs that are not the giant Node over the next weeks, that speed
should come back as more of the compiler starts operating directly on concrete types
and the memory taken up by the graph of Nodes drops due to the more precise
structs. Honestly, I was expecting worse.
% benchstat bench.old bench.new
name old time/op new time/op delta
Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9)
Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9)
GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9)
Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9)
SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10)
Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8)
GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8)
Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9)
Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9)
XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8)
LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8)
ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10)
LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8)
StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9)
[Geo mean] 480ms 510ms +6.31%
name old user-time/op new user-time/op delta
Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10)
Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9)
GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9)
Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9)
SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10)
Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9)
GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9)
Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10)
Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10)
XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9)
LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9)
ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10)
LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8)
[Geo mean] 483ms 504ms +4.41%
[git-generate]
cd src/cmd/compile/internal/ir
: # We need to do the conversion in multiple steps, so we introduce
: # a temporary type alias that will start out meaning the pointer-to-struct
: # and then change to mean the interface.
rf '
mv Node OldNode
add node.go \
type Node = *OldNode
'
: # It should work to do this ex in ir, but it misses test files, due to a bug in rf.
: # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests.
cd ../gc
rf '
ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
cd ../ssa
rf '
ex {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
: # Back in ir, finish conversion clumsily with sed,
: # because type checking and circular aliases do not mix.
cd ../ir
sed -i '' '
/type Node = \*OldNode/d
s/\*OldNode/Node/g
s/^func (n Node)/func (n *OldNode)/
s/OldNode/node/g
s/type INode interface/type Node interface/
s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/
' *.go
gofmt -w *.go
sed -i '' '
s/{Func{}, 136, 248}/{Func{}, 152, 280}/
s/{Name{}, 32, 56}/{Name{}, 44, 80}/
s/{Param{}, 24, 48}/{Param{}, 44, 88}/
s/{node{}, 76, 128}/{node{}, 88, 152}/
' sizeof_test.go
cd ../ssa
sed -i '' '
s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/
' sizeof_test.go
cd ../gc
sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go
cd ../../../..
go install std cmd
cd cmd/compile
go test -u || go test -u
Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553
Reviewed-on: https://go-review.googlesource.com/c/go/+/272935
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-25 01:11:56 -05:00
|
|
|
checkIdx := func(i ir.Node) ir.Node {
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
// pi := p[i]
|
[dev.regabi] cmd/compile: remove ir.Nod [generated]
Rewrite all uses of ir.Nod and friends to call the IR constructors directly.
This gives the results a more specific type and will play nicely with
introduction of more specific types throughout the code in a followup CL.
Passes buildall w/ toolstash -cmp.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/compile/internal/syntax"
import "cmd/internal/src"
var p *noder
var orig syntax.Node
var op ir.Op
var l, r ir.Node
var sym *types.Sym
p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r)
p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym)
var xpos src.XPos
var ns ir.Nodes
npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym)
npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns)
}
ex . ../ir {
import "cmd/compile/internal/base"
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
var op ir.Op
var l, r ir.Node
ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r)
var sym *types.Sym
nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/internal/src"
# rf overlapping match handling is not quite good enough
# for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early.
var l, r ir.Node
var xpos src.XPos
ir.NodAt(xpos, ir.OAS, l, r) -> ir.NewAssignStmt(xpos, l, r)
ir.NodAt(xpos, ir.OIF, l, nil) -> ir.NewIfStmt(xpos, l, nil, nil)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/internal/src"
var l, r ir.Node
var sym *types.Sym
var xpos src.XPos
nodlSym(xpos, ir.ODOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym)
nodlSym(xpos, ir.OXDOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym)
nodlSym(xpos, ir.ODOTPTR, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym)
nodlSym(xpos, ir.OGOTO, nil, sym) -> ir.NewBranchStmt(xpos, ir.OGOTO, sym)
nodlSym(xpos, ir.ORETJMP, nil, sym) -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym)
nodlSym(xpos, ir.OLABEL, nil, sym) -> ir.NewLabelStmt(xpos, sym)
nodlSym(xpos, ir.OSTRUCTKEY, l, sym) -> ir.NewStructKeyExpr(xpos, sym, l)
ir.NodAt(xpos, ir.OADD, l, r) -> ir.NewBinaryExpr(xpos, ir.OADD, l, r)
ir.NodAt(xpos, ir.OAND, l, r) -> ir.NewBinaryExpr(xpos, ir.OAND, l, r)
ir.NodAt(xpos, ir.OANDNOT, l, r) -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r)
ir.NodAt(xpos, ir.ODIV, l, r) -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r)
ir.NodAt(xpos, ir.OEQ, l, r) -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r)
ir.NodAt(xpos, ir.OGE, l, r) -> ir.NewBinaryExpr(xpos, ir.OGE, l, r)
ir.NodAt(xpos, ir.OGT, l, r) -> ir.NewBinaryExpr(xpos, ir.OGT, l, r)
ir.NodAt(xpos, ir.OLE, l, r) -> ir.NewBinaryExpr(xpos, ir.OLE, l, r)
ir.NodAt(xpos, ir.OLSH, l, r) -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r)
ir.NodAt(xpos, ir.OLT, l, r) -> ir.NewBinaryExpr(xpos, ir.OLT, l, r)
ir.NodAt(xpos, ir.OMOD, l, r) -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r)
ir.NodAt(xpos, ir.OMUL, l, r) -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r)
ir.NodAt(xpos, ir.ONE, l, r) -> ir.NewBinaryExpr(xpos, ir.ONE, l, r)
ir.NodAt(xpos, ir.OOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OOR, l, r)
ir.NodAt(xpos, ir.ORSH, l, r) -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r)
ir.NodAt(xpos, ir.OSUB, l, r) -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r)
ir.NodAt(xpos, ir.OXOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r)
ir.NodAt(xpos, ir.OCOPY, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r)
ir.NodAt(xpos, ir.OCOMPLEX, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r)
ir.NodAt(xpos, ir.OEFACE, l, r) -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r)
ir.NodAt(xpos, ir.OADDR, l, nil) -> ir.NewAddrExpr(xpos, l)
ir.NodAt(xpos, ir.OADDSTR, nil, nil) -> ir.NewAddStringExpr(xpos, nil)
ir.NodAt(xpos, ir.OANDAND, l, r) -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r)
ir.NodAt(xpos, ir.OOROR, l, r) -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r)
ir.NodAt(xpos, ir.OARRAYLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil)
ir.NodAt(xpos, ir.OARRAYLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OAS2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil)
ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil)
ir.NodAt(xpos, ir.OAS2FUNC, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil)
ir.NodAt(xpos, ir.OAS2MAPR, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil)
ir.NodAt(xpos, ir.OAS2RECV, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil)
ir.NodAt(xpos, ir.OSELRECV2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil)
ir.NodAt(xpos, ir.OASOP, l, r) -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r)
ir.NodAt(xpos, ir.OBITNOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l)
ir.NodAt(xpos, ir.ONEG, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEG, l)
ir.NodAt(xpos, ir.ONOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONOT, l)
ir.NodAt(xpos, ir.OPLUS, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPLUS, l)
ir.NodAt(xpos, ir.ORECV, l, nil) -> ir.NewUnaryExpr(xpos, ir.ORECV, l)
ir.NodAt(xpos, ir.OALIGNOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l)
ir.NodAt(xpos, ir.OCAP, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCAP, l)
ir.NodAt(xpos, ir.OCLOSE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l)
ir.NodAt(xpos, ir.OIMAG, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIMAG, l)
ir.NodAt(xpos, ir.OLEN, l, nil) -> ir.NewUnaryExpr(xpos, ir.OLEN, l)
ir.NodAt(xpos, ir.ONEW, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEW, l)
ir.NodAt(xpos, ir.ONEWOBJ, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l)
ir.NodAt(xpos, ir.OOFFSETOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l)
ir.NodAt(xpos, ir.OPANIC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPANIC, l)
ir.NodAt(xpos, ir.OREAL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OREAL, l)
ir.NodAt(xpos, ir.OSIZEOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l)
ir.NodAt(xpos, ir.OCHECKNIL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l)
ir.NodAt(xpos, ir.OCFUNC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l)
ir.NodAt(xpos, ir.OIDATA, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIDATA, l)
ir.NodAt(xpos, ir.OITAB, l, nil) -> ir.NewUnaryExpr(xpos, ir.OITAB, l)
ir.NodAt(xpos, ir.OSPTR, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSPTR, l)
ir.NodAt(xpos, ir.OVARDEF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l)
ir.NodAt(xpos, ir.OVARKILL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l)
ir.NodAt(xpos, ir.OVARLIVE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l)
ir.NodAt(xpos, ir.OBLOCK, nil, nil) -> ir.NewBlockStmt(xpos, nil)
ir.NodAt(xpos, ir.OBREAK, nil, nil) -> ir.NewBranchStmt(xpos, ir.OBREAK, nil)
ir.NodAt(xpos, ir.OCONTINUE, nil, nil) -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil)
ir.NodAt(xpos, ir.OFALL, nil, nil) -> ir.NewBranchStmt(xpos, ir.OFALL, nil)
ir.NodAt(xpos, ir.OGOTO, nil, nil) -> ir.NewBranchStmt(xpos, ir.OGOTO, nil)
ir.NodAt(xpos, ir.ORETJMP, nil, nil) -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil)
ir.NodAt(xpos, ir.OCALL, l, nil) -> ir.NewCallExpr(xpos, ir.OCALL, l, nil)
ir.NodAt(xpos, ir.OCALLFUNC, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil)
ir.NodAt(xpos, ir.OCALLINTER, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil)
ir.NodAt(xpos, ir.OCALLMETH, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil)
ir.NodAt(xpos, ir.OAPPEND, l, nil) -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil)
ir.NodAt(xpos, ir.ODELETE, l, nil) -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil)
ir.NodAt(xpos, ir.OGETG, l, nil) -> ir.NewCallExpr(xpos, ir.OGETG, l, nil)
ir.NodAt(xpos, ir.OMAKE, l, nil) -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil)
ir.NodAt(xpos, ir.OPRINT, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil)
ir.NodAt(xpos, ir.OPRINTN, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil)
ir.NodAt(xpos, ir.ORECOVER, l, nil) -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil)
ir.NodAt(xpos, ir.OCASE, nil, nil) -> ir.NewCaseStmt(xpos, nil, nil)
ir.NodAt(xpos, ir.OCONV, l, nil) -> ir.NewConvExpr(xpos, ir.OCONV, nil, l)
ir.NodAt(xpos, ir.OCONVIFACE, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l)
ir.NodAt(xpos, ir.OCONVNOP, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l)
ir.NodAt(xpos, ir.ORUNESTR, l, nil) -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l)
ir.NodAt(xpos, ir.ODCL, l, nil) -> ir.NewDecl(xpos, ir.ODCL, l)
ir.NodAt(xpos, ir.ODCLCONST, l, nil) -> ir.NewDecl(xpos, ir.ODCLCONST, l)
ir.NodAt(xpos, ir.ODCLTYPE, l, nil) -> ir.NewDecl(xpos, ir.ODCLTYPE, l)
ir.NodAt(xpos, ir.ODCLFUNC, nil, nil) -> ir.NewFunc(xpos)
ir.NodAt(xpos, ir.ODEFER, l, nil) -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l)
ir.NodAt(xpos, ir.OGO, l, nil) -> ir.NewGoDeferStmt(xpos, ir.OGO, l)
ir.NodAt(xpos, ir.ODEREF, l, nil) -> ir.NewStarExpr(xpos, l)
ir.NodAt(xpos, ir.ODOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil)
ir.NodAt(xpos, ir.ODOTPTR, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil)
ir.NodAt(xpos, ir.ODOTMETH, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil)
ir.NodAt(xpos, ir.ODOTINTER, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil)
ir.NodAt(xpos, ir.OXDOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, nil) -> ir.NewTypeAssertExpr(xpos, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, r) -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype))
ir.NodAt(xpos, ir.OFOR, l, r) -> ir.NewForStmt(xpos, nil, l, r, nil)
ir.NodAt(xpos, ir.OINDEX, l, r) -> ir.NewIndexExpr(xpos, l, r)
ir.NodAt(xpos, ir.OINLMARK, nil, nil) -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH)
ir.NodAt(xpos, ir.OKEY, l, r) -> ir.NewKeyExpr(xpos, l, r)
ir.NodAt(xpos, ir.OLABEL, nil, nil) -> ir.NewLabelStmt(xpos, nil)
ir.NodAt(xpos, ir.OMAKECHAN, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r)
ir.NodAt(xpos, ir.OMAKEMAP, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r)
ir.NodAt(xpos, ir.OMAKESLICE, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r)
ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r)
ir.NodAt(xpos, ir.ONIL, nil, nil) -> ir.NewNilExpr(xpos)
ir.NodAt(xpos, ir.OPACK, nil, nil) -> ir.NewPkgName(xpos, nil, nil)
ir.NodAt(xpos, ir.OPAREN, l, nil) -> ir.NewParenExpr(xpos, l)
ir.NodAt(xpos, ir.ORANGE, nil, r) -> ir.NewRangeStmt(xpos, nil, r, nil)
ir.NodAt(xpos, ir.ORESULT, nil, nil) -> ir.NewResultExpr(xpos, nil, types.BADWIDTH)
ir.NodAt(xpos, ir.ORETURN, nil, nil) -> ir.NewReturnStmt(xpos, nil)
ir.NodAt(xpos, ir.OSELECT, nil, nil) -> ir.NewSelectStmt(xpos, nil)
ir.NodAt(xpos, ir.OSEND, l, r) -> ir.NewSendStmt(xpos, l, r)
ir.NodAt(xpos, ir.OSLICE, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE, l)
ir.NodAt(xpos, ir.OSLICEARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l)
ir.NodAt(xpos, ir.OSLICESTR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l)
ir.NodAt(xpos, ir.OSLICE3, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3, l)
ir.NodAt(xpos, ir.OSLICE3ARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l)
ir.NodAt(xpos, ir.OSLICEHEADER, l, nil) -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil)
ir.NodAt(xpos, ir.OSWITCH, l, nil) -> ir.NewSwitchStmt(xpos, l, nil)
ir.NodAt(xpos, ir.OINLCALL, nil, nil) -> ir.NewInlinedCallExpr(xpos, nil, nil)
}
rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod
'
Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e
Reviewed-on: https://go-review.googlesource.com/c/go/+/277933
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-22 23:55:29 -05:00
|
|
|
pi := ir.NewIndexExpr(base.Pos, np, i)
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
pi.SetBounded(true)
|
2020-11-22 09:59:15 -05:00
|
|
|
pi.SetType(t.Elem())
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
// qi := q[i]
|
[dev.regabi] cmd/compile: remove ir.Nod [generated]
Rewrite all uses of ir.Nod and friends to call the IR constructors directly.
This gives the results a more specific type and will play nicely with
introduction of more specific types throughout the code in a followup CL.
Passes buildall w/ toolstash -cmp.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/compile/internal/syntax"
import "cmd/internal/src"
var p *noder
var orig syntax.Node
var op ir.Op
var l, r ir.Node
var sym *types.Sym
p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r)
p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym)
var xpos src.XPos
var ns ir.Nodes
npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym)
npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns)
}
ex . ../ir {
import "cmd/compile/internal/base"
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
var op ir.Op
var l, r ir.Node
ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r)
var sym *types.Sym
nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/internal/src"
# rf overlapping match handling is not quite good enough
# for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early.
var l, r ir.Node
var xpos src.XPos
ir.NodAt(xpos, ir.OAS, l, r) -> ir.NewAssignStmt(xpos, l, r)
ir.NodAt(xpos, ir.OIF, l, nil) -> ir.NewIfStmt(xpos, l, nil, nil)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/internal/src"
var l, r ir.Node
var sym *types.Sym
var xpos src.XPos
nodlSym(xpos, ir.ODOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym)
nodlSym(xpos, ir.OXDOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym)
nodlSym(xpos, ir.ODOTPTR, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym)
nodlSym(xpos, ir.OGOTO, nil, sym) -> ir.NewBranchStmt(xpos, ir.OGOTO, sym)
nodlSym(xpos, ir.ORETJMP, nil, sym) -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym)
nodlSym(xpos, ir.OLABEL, nil, sym) -> ir.NewLabelStmt(xpos, sym)
nodlSym(xpos, ir.OSTRUCTKEY, l, sym) -> ir.NewStructKeyExpr(xpos, sym, l)
ir.NodAt(xpos, ir.OADD, l, r) -> ir.NewBinaryExpr(xpos, ir.OADD, l, r)
ir.NodAt(xpos, ir.OAND, l, r) -> ir.NewBinaryExpr(xpos, ir.OAND, l, r)
ir.NodAt(xpos, ir.OANDNOT, l, r) -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r)
ir.NodAt(xpos, ir.ODIV, l, r) -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r)
ir.NodAt(xpos, ir.OEQ, l, r) -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r)
ir.NodAt(xpos, ir.OGE, l, r) -> ir.NewBinaryExpr(xpos, ir.OGE, l, r)
ir.NodAt(xpos, ir.OGT, l, r) -> ir.NewBinaryExpr(xpos, ir.OGT, l, r)
ir.NodAt(xpos, ir.OLE, l, r) -> ir.NewBinaryExpr(xpos, ir.OLE, l, r)
ir.NodAt(xpos, ir.OLSH, l, r) -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r)
ir.NodAt(xpos, ir.OLT, l, r) -> ir.NewBinaryExpr(xpos, ir.OLT, l, r)
ir.NodAt(xpos, ir.OMOD, l, r) -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r)
ir.NodAt(xpos, ir.OMUL, l, r) -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r)
ir.NodAt(xpos, ir.ONE, l, r) -> ir.NewBinaryExpr(xpos, ir.ONE, l, r)
ir.NodAt(xpos, ir.OOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OOR, l, r)
ir.NodAt(xpos, ir.ORSH, l, r) -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r)
ir.NodAt(xpos, ir.OSUB, l, r) -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r)
ir.NodAt(xpos, ir.OXOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r)
ir.NodAt(xpos, ir.OCOPY, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r)
ir.NodAt(xpos, ir.OCOMPLEX, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r)
ir.NodAt(xpos, ir.OEFACE, l, r) -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r)
ir.NodAt(xpos, ir.OADDR, l, nil) -> ir.NewAddrExpr(xpos, l)
ir.NodAt(xpos, ir.OADDSTR, nil, nil) -> ir.NewAddStringExpr(xpos, nil)
ir.NodAt(xpos, ir.OANDAND, l, r) -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r)
ir.NodAt(xpos, ir.OOROR, l, r) -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r)
ir.NodAt(xpos, ir.OARRAYLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil)
ir.NodAt(xpos, ir.OARRAYLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OAS2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil)
ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil)
ir.NodAt(xpos, ir.OAS2FUNC, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil)
ir.NodAt(xpos, ir.OAS2MAPR, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil)
ir.NodAt(xpos, ir.OAS2RECV, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil)
ir.NodAt(xpos, ir.OSELRECV2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil)
ir.NodAt(xpos, ir.OASOP, l, r) -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r)
ir.NodAt(xpos, ir.OBITNOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l)
ir.NodAt(xpos, ir.ONEG, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEG, l)
ir.NodAt(xpos, ir.ONOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONOT, l)
ir.NodAt(xpos, ir.OPLUS, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPLUS, l)
ir.NodAt(xpos, ir.ORECV, l, nil) -> ir.NewUnaryExpr(xpos, ir.ORECV, l)
ir.NodAt(xpos, ir.OALIGNOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l)
ir.NodAt(xpos, ir.OCAP, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCAP, l)
ir.NodAt(xpos, ir.OCLOSE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l)
ir.NodAt(xpos, ir.OIMAG, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIMAG, l)
ir.NodAt(xpos, ir.OLEN, l, nil) -> ir.NewUnaryExpr(xpos, ir.OLEN, l)
ir.NodAt(xpos, ir.ONEW, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEW, l)
ir.NodAt(xpos, ir.ONEWOBJ, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l)
ir.NodAt(xpos, ir.OOFFSETOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l)
ir.NodAt(xpos, ir.OPANIC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPANIC, l)
ir.NodAt(xpos, ir.OREAL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OREAL, l)
ir.NodAt(xpos, ir.OSIZEOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l)
ir.NodAt(xpos, ir.OCHECKNIL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l)
ir.NodAt(xpos, ir.OCFUNC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l)
ir.NodAt(xpos, ir.OIDATA, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIDATA, l)
ir.NodAt(xpos, ir.OITAB, l, nil) -> ir.NewUnaryExpr(xpos, ir.OITAB, l)
ir.NodAt(xpos, ir.OSPTR, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSPTR, l)
ir.NodAt(xpos, ir.OVARDEF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l)
ir.NodAt(xpos, ir.OVARKILL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l)
ir.NodAt(xpos, ir.OVARLIVE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l)
ir.NodAt(xpos, ir.OBLOCK, nil, nil) -> ir.NewBlockStmt(xpos, nil)
ir.NodAt(xpos, ir.OBREAK, nil, nil) -> ir.NewBranchStmt(xpos, ir.OBREAK, nil)
ir.NodAt(xpos, ir.OCONTINUE, nil, nil) -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil)
ir.NodAt(xpos, ir.OFALL, nil, nil) -> ir.NewBranchStmt(xpos, ir.OFALL, nil)
ir.NodAt(xpos, ir.OGOTO, nil, nil) -> ir.NewBranchStmt(xpos, ir.OGOTO, nil)
ir.NodAt(xpos, ir.ORETJMP, nil, nil) -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil)
ir.NodAt(xpos, ir.OCALL, l, nil) -> ir.NewCallExpr(xpos, ir.OCALL, l, nil)
ir.NodAt(xpos, ir.OCALLFUNC, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil)
ir.NodAt(xpos, ir.OCALLINTER, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil)
ir.NodAt(xpos, ir.OCALLMETH, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil)
ir.NodAt(xpos, ir.OAPPEND, l, nil) -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil)
ir.NodAt(xpos, ir.ODELETE, l, nil) -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil)
ir.NodAt(xpos, ir.OGETG, l, nil) -> ir.NewCallExpr(xpos, ir.OGETG, l, nil)
ir.NodAt(xpos, ir.OMAKE, l, nil) -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil)
ir.NodAt(xpos, ir.OPRINT, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil)
ir.NodAt(xpos, ir.OPRINTN, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil)
ir.NodAt(xpos, ir.ORECOVER, l, nil) -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil)
ir.NodAt(xpos, ir.OCASE, nil, nil) -> ir.NewCaseStmt(xpos, nil, nil)
ir.NodAt(xpos, ir.OCONV, l, nil) -> ir.NewConvExpr(xpos, ir.OCONV, nil, l)
ir.NodAt(xpos, ir.OCONVIFACE, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l)
ir.NodAt(xpos, ir.OCONVNOP, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l)
ir.NodAt(xpos, ir.ORUNESTR, l, nil) -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l)
ir.NodAt(xpos, ir.ODCL, l, nil) -> ir.NewDecl(xpos, ir.ODCL, l)
ir.NodAt(xpos, ir.ODCLCONST, l, nil) -> ir.NewDecl(xpos, ir.ODCLCONST, l)
ir.NodAt(xpos, ir.ODCLTYPE, l, nil) -> ir.NewDecl(xpos, ir.ODCLTYPE, l)
ir.NodAt(xpos, ir.ODCLFUNC, nil, nil) -> ir.NewFunc(xpos)
ir.NodAt(xpos, ir.ODEFER, l, nil) -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l)
ir.NodAt(xpos, ir.OGO, l, nil) -> ir.NewGoDeferStmt(xpos, ir.OGO, l)
ir.NodAt(xpos, ir.ODEREF, l, nil) -> ir.NewStarExpr(xpos, l)
ir.NodAt(xpos, ir.ODOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil)
ir.NodAt(xpos, ir.ODOTPTR, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil)
ir.NodAt(xpos, ir.ODOTMETH, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil)
ir.NodAt(xpos, ir.ODOTINTER, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil)
ir.NodAt(xpos, ir.OXDOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, nil) -> ir.NewTypeAssertExpr(xpos, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, r) -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype))
ir.NodAt(xpos, ir.OFOR, l, r) -> ir.NewForStmt(xpos, nil, l, r, nil)
ir.NodAt(xpos, ir.OINDEX, l, r) -> ir.NewIndexExpr(xpos, l, r)
ir.NodAt(xpos, ir.OINLMARK, nil, nil) -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH)
ir.NodAt(xpos, ir.OKEY, l, r) -> ir.NewKeyExpr(xpos, l, r)
ir.NodAt(xpos, ir.OLABEL, nil, nil) -> ir.NewLabelStmt(xpos, nil)
ir.NodAt(xpos, ir.OMAKECHAN, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r)
ir.NodAt(xpos, ir.OMAKEMAP, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r)
ir.NodAt(xpos, ir.OMAKESLICE, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r)
ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r)
ir.NodAt(xpos, ir.ONIL, nil, nil) -> ir.NewNilExpr(xpos)
ir.NodAt(xpos, ir.OPACK, nil, nil) -> ir.NewPkgName(xpos, nil, nil)
ir.NodAt(xpos, ir.OPAREN, l, nil) -> ir.NewParenExpr(xpos, l)
ir.NodAt(xpos, ir.ORANGE, nil, r) -> ir.NewRangeStmt(xpos, nil, r, nil)
ir.NodAt(xpos, ir.ORESULT, nil, nil) -> ir.NewResultExpr(xpos, nil, types.BADWIDTH)
ir.NodAt(xpos, ir.ORETURN, nil, nil) -> ir.NewReturnStmt(xpos, nil)
ir.NodAt(xpos, ir.OSELECT, nil, nil) -> ir.NewSelectStmt(xpos, nil)
ir.NodAt(xpos, ir.OSEND, l, r) -> ir.NewSendStmt(xpos, l, r)
ir.NodAt(xpos, ir.OSLICE, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE, l)
ir.NodAt(xpos, ir.OSLICEARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l)
ir.NodAt(xpos, ir.OSLICESTR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l)
ir.NodAt(xpos, ir.OSLICE3, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3, l)
ir.NodAt(xpos, ir.OSLICE3ARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l)
ir.NodAt(xpos, ir.OSLICEHEADER, l, nil) -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil)
ir.NodAt(xpos, ir.OSWITCH, l, nil) -> ir.NewSwitchStmt(xpos, l, nil)
ir.NodAt(xpos, ir.OINLCALL, nil, nil) -> ir.NewInlinedCallExpr(xpos, nil, nil)
}
rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod
'
Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e
Reviewed-on: https://go-review.googlesource.com/c/go/+/277933
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-22 23:55:29 -05:00
|
|
|
qi := ir.NewIndexExpr(base.Pos, nq, i)
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
qi.SetBounded(true)
|
2020-11-22 09:59:15 -05:00
|
|
|
qi.SetType(t.Elem())
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
return eq(pi, qi)
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-02 16:18:29 +03:00
|
|
|
iterations := nelem / unroll
|
|
|
|
|
iterateTo := iterations * unroll
|
|
|
|
|
// If a loop is iterated only once, there shouldn't be any loop at all.
|
|
|
|
|
if iterations == 1 {
|
|
|
|
|
iterateTo = 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if iterateTo > 0 {
|
|
|
|
|
// Generate an unrolled for loop.
|
|
|
|
|
// for i := 0; i < nelem/unroll*unroll; i += unroll
|
2023-08-17 22:19:10 -07:00
|
|
|
i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
|
2023-02-28 13:27:51 -08:00
|
|
|
init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0))
|
|
|
|
|
cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, iterateTo))
|
2023-01-25 17:08:16 -05:00
|
|
|
loop := ir.NewForStmt(base.Pos, nil, cond, nil, nil, false)
|
2020-11-22 09:59:15 -05:00
|
|
|
loop.PtrInit().Append(init)
|
2021-12-02 16:18:29 +03:00
|
|
|
|
|
|
|
|
// if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
|
|
|
|
|
// } else {
|
|
|
|
|
// goto neq
|
|
|
|
|
// }
|
|
|
|
|
for j := int64(0); j < unroll; j++ {
|
|
|
|
|
// if check {} else { goto neq }
|
|
|
|
|
nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
|
|
|
|
|
nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
|
|
|
|
|
loop.Body.Append(nif)
|
2023-02-28 13:27:51 -08:00
|
|
|
post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(base.Pos, 1)))
|
2021-12-02 16:18:29 +03:00
|
|
|
loop.Body.Append(post)
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-23 00:02:08 -05:00
|
|
|
fn.Body.Append(loop)
|
2021-12-02 16:18:29 +03:00
|
|
|
|
|
|
|
|
if nelem == iterateTo {
|
|
|
|
|
if last {
|
2023-02-28 13:27:51 -08:00
|
|
|
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true)))
|
2021-12-02 16:18:29 +03:00
|
|
|
}
|
|
|
|
|
return
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
}
|
|
|
|
|
}
|
2021-12-02 16:18:29 +03:00
|
|
|
|
|
|
|
|
// Generate remaining checks, if nelem is not a multiple of unroll.
|
|
|
|
|
if last {
|
|
|
|
|
// Do last comparison in a different manner.
|
|
|
|
|
nelem--
|
|
|
|
|
}
|
|
|
|
|
// if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
|
|
|
|
|
// } else {
|
|
|
|
|
// goto neq
|
|
|
|
|
// }
|
|
|
|
|
for j := iterateTo; j < nelem; j++ {
|
|
|
|
|
// if check {} else { goto neq }
|
2023-02-28 13:27:51 -08:00
|
|
|
nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(base.Pos, j)), nil, nil)
|
2021-12-02 16:18:29 +03:00
|
|
|
nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
|
|
|
|
|
fn.Body.Append(nif)
|
|
|
|
|
}
|
|
|
|
|
if last {
|
2023-02-28 13:27:51 -08:00
|
|
|
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(base.Pos, nelem))))
|
2021-12-02 16:18:29 +03:00
|
|
|
}
|
2020-04-24 15:59:17 -07:00
|
|
|
}
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2020-12-01 03:25:29 -08:00
|
|
|
switch t.Elem().Kind() {
|
[dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated]
If we want to break up package gc at all, we will need to move
the compiler IR it defines into a separate package that can be
imported by packages that gc itself imports. This CL does that.
It also removes the TINT8 etc aliases so that all code is clear
about which package things are coming from.
This CL is automatically generated by the script below.
See the comments in the script for details about the changes.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# These names were never fully qualified
# when the types package was added.
# Do it now, to avoid confusion about where they live.
inline -rm \
Txxx \
TINT8 \
TUINT8 \
TINT16 \
TUINT16 \
TINT32 \
TUINT32 \
TINT64 \
TUINT64 \
TINT \
TUINT \
TUINTPTR \
TCOMPLEX64 \
TCOMPLEX128 \
TFLOAT32 \
TFLOAT64 \
TBOOL \
TPTR \
TFUNC \
TSLICE \
TARRAY \
TSTRUCT \
TCHAN \
TMAP \
TINTER \
TFORW \
TANY \
TSTRING \
TUNSAFEPTR \
TIDEAL \
TNIL \
TBLANK \
TFUNCARGS \
TCHANARGS \
NTYPE \
BADWIDTH
# esc.go and escape.go do not need to be split.
# Append esc.go onto the end of escape.go.
mv esc.go escape.go
# Pull out the type format installation from func Main,
# so it can be carried into package ir.
mv Main:/Sconv.=/-0,/TypeLinkSym/-1 InstallTypeFormats
# Names that need to be exported for use by code left in gc.
mv Isconst IsConst
mv asNode AsNode
mv asNodes AsNodes
mv asTypesNode AsTypesNode
mv basicnames BasicTypeNames
mv builtinpkg BuiltinPkg
mv consttype ConstType
mv dumplist DumpList
mv fdumplist FDumpList
mv fmtMode FmtMode
mv goopnames OpNames
mv inspect Inspect
mv inspectList InspectList
mv localpkg LocalPkg
mv nblank BlankNode
mv numImport NumImport
mv opprec OpPrec
mv origSym OrigSym
mv stmtwithinit StmtWithInit
mv dump DumpAny
mv fdump FDumpAny
mv nod Nod
mv nodl NodAt
mv newname NewName
mv newnamel NewNameAt
mv assertRepresents AssertValidTypeForConst
mv represents ValidTypeForConst
mv nodlit NewLiteral
# Types and fields that need to be exported for use by gc.
mv nowritebarrierrecCallSym SymAndPos
mv SymAndPos.lineno SymAndPos.Pos
mv SymAndPos.target SymAndPos.Sym
mv Func.lsym Func.LSym
mv Func.setWBPos Func.SetWBPos
mv Func.numReturns Func.NumReturns
mv Func.numDefers Func.NumDefers
mv Func.nwbrCalls Func.NWBRCalls
# initLSym is an algorithm left behind in gc,
# not an operation on Func itself.
mv Func.initLSym initLSym
mv nodeQueue NodeQueue
mv NodeQueue.empty NodeQueue.Empty
mv NodeQueue.popLeft NodeQueue.PopLeft
mv NodeQueue.pushRight NodeQueue.PushRight
# Many methods on Node are actually algorithms that
# would apply to any node implementation.
# Those become plain functions.
mv Node.funcname FuncName
mv Node.isBlank IsBlank
mv Node.isGoConst isGoConst
mv Node.isNil IsNil
mv Node.isParamHeapCopy isParamHeapCopy
mv Node.isParamStackCopy isParamStackCopy
mv Node.isSimpleName isSimpleName
mv Node.mayBeShared MayBeShared
mv Node.pkgFuncName PkgFuncName
mv Node.backingArrayPtrLen backingArrayPtrLen
mv Node.isterminating isTermNode
mv Node.labeledControl labeledControl
mv Nodes.isterminating isTermNodes
mv Nodes.sigerr fmtSignature
mv Node.MethodName methodExprName
mv Node.MethodFunc methodExprFunc
mv Node.IsMethod IsMethod
# Every node will need to implement RawCopy;
# Copy and SepCopy algorithms will use it.
mv Node.rawcopy Node.RawCopy
mv Node.copy Copy
mv Node.sepcopy SepCopy
# Extract Node.Format method body into func FmtNode,
# but leave method wrapper behind.
mv Node.Format:0,$ FmtNode
# Formatting helpers that will apply to all node implementations.
mv Node.Line Line
mv Node.exprfmt exprFmt
mv Node.jconv jconvFmt
mv Node.modeString modeString
mv Node.nconv nconvFmt
mv Node.nodedump nodeDumpFmt
mv Node.nodefmt nodeFmt
mv Node.stmtfmt stmtFmt
# Constant support needed for code moving to ir.
mv okforconst OKForConst
mv vconv FmtConst
mv int64Val Int64Val
mv float64Val Float64Val
mv Node.ValueInterface ConstValue
# Organize code into files.
mv LocalPkg BuiltinPkg ir.go
mv NumImport InstallTypeFormats Line fmt.go
mv syntax.go Nod NodAt NewNameAt Class Pxxx PragmaFlag Nointerface SymAndPos \
AsNode AsTypesNode BlankNode OrigSym \
Node.SliceBounds Node.SetSliceBounds Op.IsSlice3 \
IsConst Node.Int64Val Node.CanInt64 Node.Uint64Val Node.BoolVal Node.StringVal \
Node.RawCopy SepCopy Copy \
IsNil IsBlank IsMethod \
Node.Typ Node.StorageClass node.go
mv ConstType ConstValue Int64Val Float64Val AssertValidTypeForConst ValidTypeForConst NewLiteral idealType OKForConst val.go
# Move files to new ir package.
mv bitset.go class_string.go dump.go fmt.go \
ir.go node.go op_string.go val.go \
sizeof_test.go cmd/compile/internal/ir
'
: # fix mkbuiltin.go to generate the changes made to builtin.go during rf
sed -i '' '
s/\[T/[types.T/g
s/\*Node/*ir.Node/g
/internal\/types/c \
fmt.Fprintln(&b, `import (`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) \
fmt.Fprintln(&b, `)`)
' mkbuiltin.go
gofmt -w mkbuiltin.go
: # update cmd/dist to add internal/ir
cd ../../../dist
sed -i '' '/compile.internal.gc/a\
"cmd/compile/internal/ir",
' buildtool.go
gofmt -w buildtool.go
: # update cmd/compile TestFormats
cd ../..
go install std cmd
cd cmd/compile
go test -u || go test # first one updates but fails; second passes
Change-Id: I5f7caf6b20629b51970279e81231a3574d5b51db
Reviewed-on: https://go-review.googlesource.com/c/go/+/273008
Trust: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-19 21:09:22 -05:00
|
|
|
case types.TSTRING:
|
2020-04-25 15:14:35 -07:00
|
|
|
// Do two loops. First, check that all the lengths match (cheap).
|
|
|
|
|
// Second, check that all the contents match (expensive).
|
[dev.regabi] cmd/compile: replace *Node type with an interface Node [generated]
The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct.
The previous CL defined an interface INode modeling a *Node.
This CL:
- Changes all references outside internal/ir to use INode,
along with many references inside internal/ir as well.
- Renames Node to node.
- Renames INode to Node
So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible,
and the code outside package ir is now (clearly) using only the interface.
The usual rule is never to redefine an existing name with a new meaning,
so that old code that hasn't been updated gets a "unknown name" error
instead of more mysterious errors or silent misbehavior. That rule would
caution against replacing Node-the-struct with Node-the-interface,
as in this CL, because code that says *Node would now be using a pointer
to an interface. But this CL is being landed at the same time as another that
moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node,
which does follow the rule: any lingering references to gc.Node will be told
it's gone, not silently start using pointers to interfaces. So the rule is followed
by the CL sequence, just not this specific CL.
Overall, the loss of inlining caused by using interfaces cuts the compiler speed
by about 6%, a not insignificant amount. However, as we convert the representation
to concrete structs that are not the giant Node over the next weeks, that speed
should come back as more of the compiler starts operating directly on concrete types
and the memory taken up by the graph of Nodes drops due to the more precise
structs. Honestly, I was expecting worse.
% benchstat bench.old bench.new
name old time/op new time/op delta
Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9)
Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9)
GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9)
Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9)
SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10)
Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8)
GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8)
Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9)
Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9)
XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8)
LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8)
ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10)
LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8)
StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9)
[Geo mean] 480ms 510ms +6.31%
name old user-time/op new user-time/op delta
Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10)
Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9)
GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9)
Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9)
SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10)
Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9)
GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9)
Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10)
Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10)
XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9)
LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9)
ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10)
LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8)
[Geo mean] 483ms 504ms +4.41%
[git-generate]
cd src/cmd/compile/internal/ir
: # We need to do the conversion in multiple steps, so we introduce
: # a temporary type alias that will start out meaning the pointer-to-struct
: # and then change to mean the interface.
rf '
mv Node OldNode
add node.go \
type Node = *OldNode
'
: # It should work to do this ex in ir, but it misses test files, due to a bug in rf.
: # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests.
cd ../gc
rf '
ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
cd ../ssa
rf '
ex {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
: # Back in ir, finish conversion clumsily with sed,
: # because type checking and circular aliases do not mix.
cd ../ir
sed -i '' '
/type Node = \*OldNode/d
s/\*OldNode/Node/g
s/^func (n Node)/func (n *OldNode)/
s/OldNode/node/g
s/type INode interface/type Node interface/
s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/
' *.go
gofmt -w *.go
sed -i '' '
s/{Func{}, 136, 248}/{Func{}, 152, 280}/
s/{Name{}, 32, 56}/{Name{}, 44, 80}/
s/{Param{}, 24, 48}/{Param{}, 44, 88}/
s/{node{}, 76, 128}/{node{}, 88, 152}/
' sizeof_test.go
cd ../ssa
sed -i '' '
s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/
' sizeof_test.go
cd ../gc
sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go
cd ../../../..
go install std cmd
cd cmd/compile
go test -u || go test -u
Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553
Reviewed-on: https://go-review.googlesource.com/c/go/+/272935
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-25 01:11:56 -05:00
|
|
|
checkAll(3, false, func(pi, qi ir.Node) ir.Node {
|
2020-04-25 15:14:35 -07:00
|
|
|
// Compare lengths.
|
2022-04-21 23:26:16 +00:00
|
|
|
eqlen, _ := compare.EqString(pi, qi)
|
2020-04-25 15:14:35 -07:00
|
|
|
return eqlen
|
|
|
|
|
})
|
[dev.regabi] cmd/compile: replace *Node type with an interface Node [generated]
The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct.
The previous CL defined an interface INode modeling a *Node.
This CL:
- Changes all references outside internal/ir to use INode,
along with many references inside internal/ir as well.
- Renames Node to node.
- Renames INode to Node
So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible,
and the code outside package ir is now (clearly) using only the interface.
The usual rule is never to redefine an existing name with a new meaning,
so that old code that hasn't been updated gets a "unknown name" error
instead of more mysterious errors or silent misbehavior. That rule would
caution against replacing Node-the-struct with Node-the-interface,
as in this CL, because code that says *Node would now be using a pointer
to an interface. But this CL is being landed at the same time as another that
moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node,
which does follow the rule: any lingering references to gc.Node will be told
it's gone, not silently start using pointers to interfaces. So the rule is followed
by the CL sequence, just not this specific CL.
Overall, the loss of inlining caused by using interfaces cuts the compiler speed
by about 6%, a not insignificant amount. However, as we convert the representation
to concrete structs that are not the giant Node over the next weeks, that speed
should come back as more of the compiler starts operating directly on concrete types
and the memory taken up by the graph of Nodes drops due to the more precise
structs. Honestly, I was expecting worse.
% benchstat bench.old bench.new
name old time/op new time/op delta
Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9)
Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9)
GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9)
Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9)
SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10)
Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8)
GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8)
Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9)
Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9)
XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8)
LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8)
ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10)
LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8)
StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9)
[Geo mean] 480ms 510ms +6.31%
name old user-time/op new user-time/op delta
Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10)
Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9)
GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9)
Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9)
SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10)
Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9)
GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9)
Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10)
Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10)
XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9)
LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9)
ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10)
LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8)
[Geo mean] 483ms 504ms +4.41%
[git-generate]
cd src/cmd/compile/internal/ir
: # We need to do the conversion in multiple steps, so we introduce
: # a temporary type alias that will start out meaning the pointer-to-struct
: # and then change to mean the interface.
rf '
mv Node OldNode
add node.go \
type Node = *OldNode
'
: # It should work to do this ex in ir, but it misses test files, due to a bug in rf.
: # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests.
cd ../gc
rf '
ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
cd ../ssa
rf '
ex {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
: # Back in ir, finish conversion clumsily with sed,
: # because type checking and circular aliases do not mix.
cd ../ir
sed -i '' '
/type Node = \*OldNode/d
s/\*OldNode/Node/g
s/^func (n Node)/func (n *OldNode)/
s/OldNode/node/g
s/type INode interface/type Node interface/
s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/
' *.go
gofmt -w *.go
sed -i '' '
s/{Func{}, 136, 248}/{Func{}, 152, 280}/
s/{Name{}, 32, 56}/{Name{}, 44, 80}/
s/{Param{}, 24, 48}/{Param{}, 44, 88}/
s/{node{}, 76, 128}/{node{}, 88, 152}/
' sizeof_test.go
cd ../ssa
sed -i '' '
s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/
' sizeof_test.go
cd ../gc
sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go
cd ../../../..
go install std cmd
cd cmd/compile
go test -u || go test -u
Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553
Reviewed-on: https://go-review.googlesource.com/c/go/+/272935
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-25 01:11:56 -05:00
|
|
|
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
|
2020-04-25 15:14:35 -07:00
|
|
|
// Compare contents.
|
2022-04-21 23:26:16 +00:00
|
|
|
_, eqmem := compare.EqString(pi, qi)
|
2020-04-25 15:14:35 -07:00
|
|
|
return eqmem
|
|
|
|
|
})
|
[dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated]
If we want to break up package gc at all, we will need to move
the compiler IR it defines into a separate package that can be
imported by packages that gc itself imports. This CL does that.
It also removes the TINT8 etc aliases so that all code is clear
about which package things are coming from.
This CL is automatically generated by the script below.
See the comments in the script for details about the changes.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# These names were never fully qualified
# when the types package was added.
# Do it now, to avoid confusion about where they live.
inline -rm \
Txxx \
TINT8 \
TUINT8 \
TINT16 \
TUINT16 \
TINT32 \
TUINT32 \
TINT64 \
TUINT64 \
TINT \
TUINT \
TUINTPTR \
TCOMPLEX64 \
TCOMPLEX128 \
TFLOAT32 \
TFLOAT64 \
TBOOL \
TPTR \
TFUNC \
TSLICE \
TARRAY \
TSTRUCT \
TCHAN \
TMAP \
TINTER \
TFORW \
TANY \
TSTRING \
TUNSAFEPTR \
TIDEAL \
TNIL \
TBLANK \
TFUNCARGS \
TCHANARGS \
NTYPE \
BADWIDTH
# esc.go and escape.go do not need to be split.
# Append esc.go onto the end of escape.go.
mv esc.go escape.go
# Pull out the type format installation from func Main,
# so it can be carried into package ir.
mv Main:/Sconv.=/-0,/TypeLinkSym/-1 InstallTypeFormats
# Names that need to be exported for use by code left in gc.
mv Isconst IsConst
mv asNode AsNode
mv asNodes AsNodes
mv asTypesNode AsTypesNode
mv basicnames BasicTypeNames
mv builtinpkg BuiltinPkg
mv consttype ConstType
mv dumplist DumpList
mv fdumplist FDumpList
mv fmtMode FmtMode
mv goopnames OpNames
mv inspect Inspect
mv inspectList InspectList
mv localpkg LocalPkg
mv nblank BlankNode
mv numImport NumImport
mv opprec OpPrec
mv origSym OrigSym
mv stmtwithinit StmtWithInit
mv dump DumpAny
mv fdump FDumpAny
mv nod Nod
mv nodl NodAt
mv newname NewName
mv newnamel NewNameAt
mv assertRepresents AssertValidTypeForConst
mv represents ValidTypeForConst
mv nodlit NewLiteral
# Types and fields that need to be exported for use by gc.
mv nowritebarrierrecCallSym SymAndPos
mv SymAndPos.lineno SymAndPos.Pos
mv SymAndPos.target SymAndPos.Sym
mv Func.lsym Func.LSym
mv Func.setWBPos Func.SetWBPos
mv Func.numReturns Func.NumReturns
mv Func.numDefers Func.NumDefers
mv Func.nwbrCalls Func.NWBRCalls
# initLSym is an algorithm left behind in gc,
# not an operation on Func itself.
mv Func.initLSym initLSym
mv nodeQueue NodeQueue
mv NodeQueue.empty NodeQueue.Empty
mv NodeQueue.popLeft NodeQueue.PopLeft
mv NodeQueue.pushRight NodeQueue.PushRight
# Many methods on Node are actually algorithms that
# would apply to any node implementation.
# Those become plain functions.
mv Node.funcname FuncName
mv Node.isBlank IsBlank
mv Node.isGoConst isGoConst
mv Node.isNil IsNil
mv Node.isParamHeapCopy isParamHeapCopy
mv Node.isParamStackCopy isParamStackCopy
mv Node.isSimpleName isSimpleName
mv Node.mayBeShared MayBeShared
mv Node.pkgFuncName PkgFuncName
mv Node.backingArrayPtrLen backingArrayPtrLen
mv Node.isterminating isTermNode
mv Node.labeledControl labeledControl
mv Nodes.isterminating isTermNodes
mv Nodes.sigerr fmtSignature
mv Node.MethodName methodExprName
mv Node.MethodFunc methodExprFunc
mv Node.IsMethod IsMethod
# Every node will need to implement RawCopy;
# Copy and SepCopy algorithms will use it.
mv Node.rawcopy Node.RawCopy
mv Node.copy Copy
mv Node.sepcopy SepCopy
# Extract Node.Format method body into func FmtNode,
# but leave method wrapper behind.
mv Node.Format:0,$ FmtNode
# Formatting helpers that will apply to all node implementations.
mv Node.Line Line
mv Node.exprfmt exprFmt
mv Node.jconv jconvFmt
mv Node.modeString modeString
mv Node.nconv nconvFmt
mv Node.nodedump nodeDumpFmt
mv Node.nodefmt nodeFmt
mv Node.stmtfmt stmtFmt
# Constant support needed for code moving to ir.
mv okforconst OKForConst
mv vconv FmtConst
mv int64Val Int64Val
mv float64Val Float64Val
mv Node.ValueInterface ConstValue
# Organize code into files.
mv LocalPkg BuiltinPkg ir.go
mv NumImport InstallTypeFormats Line fmt.go
mv syntax.go Nod NodAt NewNameAt Class Pxxx PragmaFlag Nointerface SymAndPos \
AsNode AsTypesNode BlankNode OrigSym \
Node.SliceBounds Node.SetSliceBounds Op.IsSlice3 \
IsConst Node.Int64Val Node.CanInt64 Node.Uint64Val Node.BoolVal Node.StringVal \
Node.RawCopy SepCopy Copy \
IsNil IsBlank IsMethod \
Node.Typ Node.StorageClass node.go
mv ConstType ConstValue Int64Val Float64Val AssertValidTypeForConst ValidTypeForConst NewLiteral idealType OKForConst val.go
# Move files to new ir package.
mv bitset.go class_string.go dump.go fmt.go \
ir.go node.go op_string.go val.go \
sizeof_test.go cmd/compile/internal/ir
'
: # fix mkbuiltin.go to generate the changes made to builtin.go during rf
sed -i '' '
s/\[T/[types.T/g
s/\*Node/*ir.Node/g
/internal\/types/c \
fmt.Fprintln(&b, `import (`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) \
fmt.Fprintln(&b, `)`)
' mkbuiltin.go
gofmt -w mkbuiltin.go
: # update cmd/dist to add internal/ir
cd ../../../dist
sed -i '' '/compile.internal.gc/a\
"cmd/compile/internal/ir",
' buildtool.go
gofmt -w buildtool.go
: # update cmd/compile TestFormats
cd ../..
go install std cmd
cd cmd/compile
go test -u || go test # first one updates but fails; second passes
Change-Id: I5f7caf6b20629b51970279e81231a3574d5b51db
Reviewed-on: https://go-review.googlesource.com/c/go/+/273008
Trust: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-19 21:09:22 -05:00
|
|
|
case types.TFLOAT32, types.TFLOAT64:
|
[dev.regabi] cmd/compile: replace *Node type with an interface Node [generated]
The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct.
The previous CL defined an interface INode modeling a *Node.
This CL:
- Changes all references outside internal/ir to use INode,
along with many references inside internal/ir as well.
- Renames Node to node.
- Renames INode to Node
So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible,
and the code outside package ir is now (clearly) using only the interface.
The usual rule is never to redefine an existing name with a new meaning,
so that old code that hasn't been updated gets a "unknown name" error
instead of more mysterious errors or silent misbehavior. That rule would
caution against replacing Node-the-struct with Node-the-interface,
as in this CL, because code that says *Node would now be using a pointer
to an interface. But this CL is being landed at the same time as another that
moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node,
which does follow the rule: any lingering references to gc.Node will be told
it's gone, not silently start using pointers to interfaces. So the rule is followed
by the CL sequence, just not this specific CL.
Overall, the loss of inlining caused by using interfaces cuts the compiler speed
by about 6%, a not insignificant amount. However, as we convert the representation
to concrete structs that are not the giant Node over the next weeks, that speed
should come back as more of the compiler starts operating directly on concrete types
and the memory taken up by the graph of Nodes drops due to the more precise
structs. Honestly, I was expecting worse.
% benchstat bench.old bench.new
name old time/op new time/op delta
Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9)
Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9)
GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9)
Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9)
SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10)
Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8)
GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8)
Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9)
Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9)
XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8)
LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8)
ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10)
LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8)
StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9)
[Geo mean] 480ms 510ms +6.31%
name old user-time/op new user-time/op delta
Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10)
Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9)
GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9)
Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9)
SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10)
Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9)
GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9)
Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10)
Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10)
XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9)
LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9)
ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10)
LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8)
[Geo mean] 483ms 504ms +4.41%
[git-generate]
cd src/cmd/compile/internal/ir
: # We need to do the conversion in multiple steps, so we introduce
: # a temporary type alias that will start out meaning the pointer-to-struct
: # and then change to mean the interface.
rf '
mv Node OldNode
add node.go \
type Node = *OldNode
'
: # It should work to do this ex in ir, but it misses test files, due to a bug in rf.
: # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests.
cd ../gc
rf '
ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
cd ../ssa
rf '
ex {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
: # Back in ir, finish conversion clumsily with sed,
: # because type checking and circular aliases do not mix.
cd ../ir
sed -i '' '
/type Node = \*OldNode/d
s/\*OldNode/Node/g
s/^func (n Node)/func (n *OldNode)/
s/OldNode/node/g
s/type INode interface/type Node interface/
s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/
' *.go
gofmt -w *.go
sed -i '' '
s/{Func{}, 136, 248}/{Func{}, 152, 280}/
s/{Name{}, 32, 56}/{Name{}, 44, 80}/
s/{Param{}, 24, 48}/{Param{}, 44, 88}/
s/{node{}, 76, 128}/{node{}, 88, 152}/
' sizeof_test.go
cd ../ssa
sed -i '' '
s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/
' sizeof_test.go
cd ../gc
sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go
cd ../../../..
go install std cmd
cd cmd/compile
go test -u || go test -u
Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553
Reviewed-on: https://go-review.googlesource.com/c/go/+/272935
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-25 01:11:56 -05:00
|
|
|
checkAll(2, true, func(pi, qi ir.Node) ir.Node {
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
// p[i] == q[i]
|
[dev.regabi] cmd/compile: remove ir.Nod [generated]
Rewrite all uses of ir.Nod and friends to call the IR constructors directly.
This gives the results a more specific type and will play nicely with
introduction of more specific types throughout the code in a followup CL.
Passes buildall w/ toolstash -cmp.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/compile/internal/syntax"
import "cmd/internal/src"
var p *noder
var orig syntax.Node
var op ir.Op
var l, r ir.Node
var sym *types.Sym
p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r)
p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym)
var xpos src.XPos
var ns ir.Nodes
npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym)
npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns)
}
ex . ../ir {
import "cmd/compile/internal/base"
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
var op ir.Op
var l, r ir.Node
ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r)
var sym *types.Sym
nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/internal/src"
# rf overlapping match handling is not quite good enough
# for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early.
var l, r ir.Node
var xpos src.XPos
ir.NodAt(xpos, ir.OAS, l, r) -> ir.NewAssignStmt(xpos, l, r)
ir.NodAt(xpos, ir.OIF, l, nil) -> ir.NewIfStmt(xpos, l, nil, nil)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/internal/src"
var l, r ir.Node
var sym *types.Sym
var xpos src.XPos
nodlSym(xpos, ir.ODOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym)
nodlSym(xpos, ir.OXDOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym)
nodlSym(xpos, ir.ODOTPTR, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym)
nodlSym(xpos, ir.OGOTO, nil, sym) -> ir.NewBranchStmt(xpos, ir.OGOTO, sym)
nodlSym(xpos, ir.ORETJMP, nil, sym) -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym)
nodlSym(xpos, ir.OLABEL, nil, sym) -> ir.NewLabelStmt(xpos, sym)
nodlSym(xpos, ir.OSTRUCTKEY, l, sym) -> ir.NewStructKeyExpr(xpos, sym, l)
ir.NodAt(xpos, ir.OADD, l, r) -> ir.NewBinaryExpr(xpos, ir.OADD, l, r)
ir.NodAt(xpos, ir.OAND, l, r) -> ir.NewBinaryExpr(xpos, ir.OAND, l, r)
ir.NodAt(xpos, ir.OANDNOT, l, r) -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r)
ir.NodAt(xpos, ir.ODIV, l, r) -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r)
ir.NodAt(xpos, ir.OEQ, l, r) -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r)
ir.NodAt(xpos, ir.OGE, l, r) -> ir.NewBinaryExpr(xpos, ir.OGE, l, r)
ir.NodAt(xpos, ir.OGT, l, r) -> ir.NewBinaryExpr(xpos, ir.OGT, l, r)
ir.NodAt(xpos, ir.OLE, l, r) -> ir.NewBinaryExpr(xpos, ir.OLE, l, r)
ir.NodAt(xpos, ir.OLSH, l, r) -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r)
ir.NodAt(xpos, ir.OLT, l, r) -> ir.NewBinaryExpr(xpos, ir.OLT, l, r)
ir.NodAt(xpos, ir.OMOD, l, r) -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r)
ir.NodAt(xpos, ir.OMUL, l, r) -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r)
ir.NodAt(xpos, ir.ONE, l, r) -> ir.NewBinaryExpr(xpos, ir.ONE, l, r)
ir.NodAt(xpos, ir.OOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OOR, l, r)
ir.NodAt(xpos, ir.ORSH, l, r) -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r)
ir.NodAt(xpos, ir.OSUB, l, r) -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r)
ir.NodAt(xpos, ir.OXOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r)
ir.NodAt(xpos, ir.OCOPY, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r)
ir.NodAt(xpos, ir.OCOMPLEX, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r)
ir.NodAt(xpos, ir.OEFACE, l, r) -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r)
ir.NodAt(xpos, ir.OADDR, l, nil) -> ir.NewAddrExpr(xpos, l)
ir.NodAt(xpos, ir.OADDSTR, nil, nil) -> ir.NewAddStringExpr(xpos, nil)
ir.NodAt(xpos, ir.OANDAND, l, r) -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r)
ir.NodAt(xpos, ir.OOROR, l, r) -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r)
ir.NodAt(xpos, ir.OARRAYLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil)
ir.NodAt(xpos, ir.OARRAYLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OAS2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil)
ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil)
ir.NodAt(xpos, ir.OAS2FUNC, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil)
ir.NodAt(xpos, ir.OAS2MAPR, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil)
ir.NodAt(xpos, ir.OAS2RECV, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil)
ir.NodAt(xpos, ir.OSELRECV2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil)
ir.NodAt(xpos, ir.OASOP, l, r) -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r)
ir.NodAt(xpos, ir.OBITNOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l)
ir.NodAt(xpos, ir.ONEG, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEG, l)
ir.NodAt(xpos, ir.ONOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONOT, l)
ir.NodAt(xpos, ir.OPLUS, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPLUS, l)
ir.NodAt(xpos, ir.ORECV, l, nil) -> ir.NewUnaryExpr(xpos, ir.ORECV, l)
ir.NodAt(xpos, ir.OALIGNOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l)
ir.NodAt(xpos, ir.OCAP, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCAP, l)
ir.NodAt(xpos, ir.OCLOSE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l)
ir.NodAt(xpos, ir.OIMAG, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIMAG, l)
ir.NodAt(xpos, ir.OLEN, l, nil) -> ir.NewUnaryExpr(xpos, ir.OLEN, l)
ir.NodAt(xpos, ir.ONEW, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEW, l)
ir.NodAt(xpos, ir.ONEWOBJ, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l)
ir.NodAt(xpos, ir.OOFFSETOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l)
ir.NodAt(xpos, ir.OPANIC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPANIC, l)
ir.NodAt(xpos, ir.OREAL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OREAL, l)
ir.NodAt(xpos, ir.OSIZEOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l)
ir.NodAt(xpos, ir.OCHECKNIL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l)
ir.NodAt(xpos, ir.OCFUNC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l)
ir.NodAt(xpos, ir.OIDATA, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIDATA, l)
ir.NodAt(xpos, ir.OITAB, l, nil) -> ir.NewUnaryExpr(xpos, ir.OITAB, l)
ir.NodAt(xpos, ir.OSPTR, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSPTR, l)
ir.NodAt(xpos, ir.OVARDEF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l)
ir.NodAt(xpos, ir.OVARKILL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l)
ir.NodAt(xpos, ir.OVARLIVE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l)
ir.NodAt(xpos, ir.OBLOCK, nil, nil) -> ir.NewBlockStmt(xpos, nil)
ir.NodAt(xpos, ir.OBREAK, nil, nil) -> ir.NewBranchStmt(xpos, ir.OBREAK, nil)
ir.NodAt(xpos, ir.OCONTINUE, nil, nil) -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil)
ir.NodAt(xpos, ir.OFALL, nil, nil) -> ir.NewBranchStmt(xpos, ir.OFALL, nil)
ir.NodAt(xpos, ir.OGOTO, nil, nil) -> ir.NewBranchStmt(xpos, ir.OGOTO, nil)
ir.NodAt(xpos, ir.ORETJMP, nil, nil) -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil)
ir.NodAt(xpos, ir.OCALL, l, nil) -> ir.NewCallExpr(xpos, ir.OCALL, l, nil)
ir.NodAt(xpos, ir.OCALLFUNC, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil)
ir.NodAt(xpos, ir.OCALLINTER, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil)
ir.NodAt(xpos, ir.OCALLMETH, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil)
ir.NodAt(xpos, ir.OAPPEND, l, nil) -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil)
ir.NodAt(xpos, ir.ODELETE, l, nil) -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil)
ir.NodAt(xpos, ir.OGETG, l, nil) -> ir.NewCallExpr(xpos, ir.OGETG, l, nil)
ir.NodAt(xpos, ir.OMAKE, l, nil) -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil)
ir.NodAt(xpos, ir.OPRINT, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil)
ir.NodAt(xpos, ir.OPRINTN, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil)
ir.NodAt(xpos, ir.ORECOVER, l, nil) -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil)
ir.NodAt(xpos, ir.OCASE, nil, nil) -> ir.NewCaseStmt(xpos, nil, nil)
ir.NodAt(xpos, ir.OCONV, l, nil) -> ir.NewConvExpr(xpos, ir.OCONV, nil, l)
ir.NodAt(xpos, ir.OCONVIFACE, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l)
ir.NodAt(xpos, ir.OCONVNOP, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l)
ir.NodAt(xpos, ir.ORUNESTR, l, nil) -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l)
ir.NodAt(xpos, ir.ODCL, l, nil) -> ir.NewDecl(xpos, ir.ODCL, l)
ir.NodAt(xpos, ir.ODCLCONST, l, nil) -> ir.NewDecl(xpos, ir.ODCLCONST, l)
ir.NodAt(xpos, ir.ODCLTYPE, l, nil) -> ir.NewDecl(xpos, ir.ODCLTYPE, l)
ir.NodAt(xpos, ir.ODCLFUNC, nil, nil) -> ir.NewFunc(xpos)
ir.NodAt(xpos, ir.ODEFER, l, nil) -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l)
ir.NodAt(xpos, ir.OGO, l, nil) -> ir.NewGoDeferStmt(xpos, ir.OGO, l)
ir.NodAt(xpos, ir.ODEREF, l, nil) -> ir.NewStarExpr(xpos, l)
ir.NodAt(xpos, ir.ODOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil)
ir.NodAt(xpos, ir.ODOTPTR, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil)
ir.NodAt(xpos, ir.ODOTMETH, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil)
ir.NodAt(xpos, ir.ODOTINTER, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil)
ir.NodAt(xpos, ir.OXDOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, nil) -> ir.NewTypeAssertExpr(xpos, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, r) -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype))
ir.NodAt(xpos, ir.OFOR, l, r) -> ir.NewForStmt(xpos, nil, l, r, nil)
ir.NodAt(xpos, ir.OINDEX, l, r) -> ir.NewIndexExpr(xpos, l, r)
ir.NodAt(xpos, ir.OINLMARK, nil, nil) -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH)
ir.NodAt(xpos, ir.OKEY, l, r) -> ir.NewKeyExpr(xpos, l, r)
ir.NodAt(xpos, ir.OLABEL, nil, nil) -> ir.NewLabelStmt(xpos, nil)
ir.NodAt(xpos, ir.OMAKECHAN, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r)
ir.NodAt(xpos, ir.OMAKEMAP, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r)
ir.NodAt(xpos, ir.OMAKESLICE, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r)
ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r)
ir.NodAt(xpos, ir.ONIL, nil, nil) -> ir.NewNilExpr(xpos)
ir.NodAt(xpos, ir.OPACK, nil, nil) -> ir.NewPkgName(xpos, nil, nil)
ir.NodAt(xpos, ir.OPAREN, l, nil) -> ir.NewParenExpr(xpos, l)
ir.NodAt(xpos, ir.ORANGE, nil, r) -> ir.NewRangeStmt(xpos, nil, r, nil)
ir.NodAt(xpos, ir.ORESULT, nil, nil) -> ir.NewResultExpr(xpos, nil, types.BADWIDTH)
ir.NodAt(xpos, ir.ORETURN, nil, nil) -> ir.NewReturnStmt(xpos, nil)
ir.NodAt(xpos, ir.OSELECT, nil, nil) -> ir.NewSelectStmt(xpos, nil)
ir.NodAt(xpos, ir.OSEND, l, r) -> ir.NewSendStmt(xpos, l, r)
ir.NodAt(xpos, ir.OSLICE, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE, l)
ir.NodAt(xpos, ir.OSLICEARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l)
ir.NodAt(xpos, ir.OSLICESTR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l)
ir.NodAt(xpos, ir.OSLICE3, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3, l)
ir.NodAt(xpos, ir.OSLICE3ARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l)
ir.NodAt(xpos, ir.OSLICEHEADER, l, nil) -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil)
ir.NodAt(xpos, ir.OSWITCH, l, nil) -> ir.NewSwitchStmt(xpos, l, nil)
ir.NodAt(xpos, ir.OINLCALL, nil, nil) -> ir.NewInlinedCallExpr(xpos, nil, nil)
}
rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod
'
Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e
Reviewed-on: https://go-review.googlesource.com/c/go/+/277933
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-22 23:55:29 -05:00
|
|
|
return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
|
cmd/compile: eliminate some array equality alg loops
type T [3]string
Prior to this change, we generated this equality alg for T:
func eqT(p, q *T) (r bool) {
for i := range *p {
if len(p[i]) == len(q[i]) {
} else {
return
}
}
for j := range *p {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
That first loop can be profitably eliminated;
it's cheaper to spell out 3 length equality checks.
We now generate:
func eqT(p, q *T) (r bool) {
if len(p[0]) == len(q[0]) &&
len(p[1]) == len(q[1]) &&
len(p[2]) == len(q[2]) {
} else {
return
}
for i := 0; i < len(p); i++ {
if runtime.memeq(p[j].ptr, q[j].ptr, len(p[j])) {
} else {
return
}
}
return true
}
We now also eliminate loops for small float arrays as well,
and for any array of size 1.
These cutoffs were selected to minimize code size on amd64
at this moment, for lack of a more compelling methodology.
Any smallish number would do.
The switch from range loops to plain for loops allowed me
to use a temp instead of a named var, which eliminated
a pointless argument to checkAll.
The code to construct them is also a bit clearer, in my opinion.
Change-Id: I1bdd8ee4a2739d00806e66b17a4e76b46e71231a
Reviewed-on: https://go-review.googlesource.com/c/go/+/230210
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2020-04-25 18:14:42 -07:00
|
|
|
})
|
2023-05-08 21:28:43 +00:00
|
|
|
case types.TSTRUCT:
|
|
|
|
|
isCall := func(n ir.Node) bool {
|
|
|
|
|
return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
|
|
|
|
|
}
|
|
|
|
|
var expr ir.Node
|
|
|
|
|
var hasCallExprs bool
|
|
|
|
|
allCallExprs := true
|
|
|
|
|
and := func(cond ir.Node) {
|
|
|
|
|
if expr == nil {
|
|
|
|
|
expr = cond
|
|
|
|
|
} else {
|
|
|
|
|
expr = ir.NewLogicalExpr(base.Pos, ir.OANDAND, expr, cond)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var tmpPos src.XPos
|
|
|
|
|
pi := ir.NewIndexExpr(tmpPos, np, ir.NewInt(tmpPos, 0))
|
|
|
|
|
pi.SetBounded(true)
|
|
|
|
|
pi.SetType(t.Elem())
|
|
|
|
|
qi := ir.NewIndexExpr(tmpPos, nq, ir.NewInt(tmpPos, 0))
|
|
|
|
|
qi.SetBounded(true)
|
|
|
|
|
qi.SetType(t.Elem())
|
|
|
|
|
flatConds, canPanic := compare.EqStruct(t.Elem(), pi, qi)
|
|
|
|
|
for _, c := range flatConds {
|
|
|
|
|
if isCall(c) {
|
|
|
|
|
hasCallExprs = true
|
|
|
|
|
} else {
|
|
|
|
|
allCallExprs = false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !hasCallExprs || allCallExprs || canPanic {
|
|
|
|
|
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
|
|
|
|
|
// p[i] == q[i]
|
|
|
|
|
return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
|
|
|
|
|
})
|
|
|
|
|
} else {
|
|
|
|
|
checkAll(4, false, func(pi, qi ir.Node) ir.Node {
|
|
|
|
|
expr = nil
|
|
|
|
|
flatConds, _ := compare.EqStruct(t.Elem(), pi, qi)
|
|
|
|
|
if len(flatConds) == 0 {
|
|
|
|
|
return ir.NewBool(base.Pos, true)
|
|
|
|
|
}
|
|
|
|
|
for _, c := range flatConds {
|
|
|
|
|
if !isCall(c) {
|
|
|
|
|
and(c)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return expr
|
|
|
|
|
})
|
|
|
|
|
checkAll(2, true, func(pi, qi ir.Node) ir.Node {
|
|
|
|
|
expr = nil
|
|
|
|
|
flatConds, _ := compare.EqStruct(t.Elem(), pi, qi)
|
|
|
|
|
for _, c := range flatConds {
|
|
|
|
|
if isCall(c) {
|
|
|
|
|
and(c)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return expr
|
|
|
|
|
})
|
|
|
|
|
}
|
2020-04-24 15:59:17 -07:00
|
|
|
default:
|
[dev.regabi] cmd/compile: replace *Node type with an interface Node [generated]
The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct.
The previous CL defined an interface INode modeling a *Node.
This CL:
- Changes all references outside internal/ir to use INode,
along with many references inside internal/ir as well.
- Renames Node to node.
- Renames INode to Node
So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible,
and the code outside package ir is now (clearly) using only the interface.
The usual rule is never to redefine an existing name with a new meaning,
so that old code that hasn't been updated gets a "unknown name" error
instead of more mysterious errors or silent misbehavior. That rule would
caution against replacing Node-the-struct with Node-the-interface,
as in this CL, because code that says *Node would now be using a pointer
to an interface. But this CL is being landed at the same time as another that
moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node,
which does follow the rule: any lingering references to gc.Node will be told
it's gone, not silently start using pointers to interfaces. So the rule is followed
by the CL sequence, just not this specific CL.
Overall, the loss of inlining caused by using interfaces cuts the compiler speed
by about 6%, a not insignificant amount. However, as we convert the representation
to concrete structs that are not the giant Node over the next weeks, that speed
should come back as more of the compiler starts operating directly on concrete types
and the memory taken up by the graph of Nodes drops due to the more precise
structs. Honestly, I was expecting worse.
% benchstat bench.old bench.new
name old time/op new time/op delta
Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9)
Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9)
GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9)
Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9)
SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10)
Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8)
GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8)
Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9)
Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9)
XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8)
LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8)
ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10)
LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8)
StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9)
[Geo mean] 480ms 510ms +6.31%
name old user-time/op new user-time/op delta
Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10)
Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9)
GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9)
Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9)
SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10)
Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9)
GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9)
Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10)
Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10)
XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9)
LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9)
ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10)
LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8)
[Geo mean] 483ms 504ms +4.41%
[git-generate]
cd src/cmd/compile/internal/ir
: # We need to do the conversion in multiple steps, so we introduce
: # a temporary type alias that will start out meaning the pointer-to-struct
: # and then change to mean the interface.
rf '
mv Node OldNode
add node.go \
type Node = *OldNode
'
: # It should work to do this ex in ir, but it misses test files, due to a bug in rf.
: # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests.
cd ../gc
rf '
ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
cd ../ssa
rf '
ex {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
: # Back in ir, finish conversion clumsily with sed,
: # because type checking and circular aliases do not mix.
cd ../ir
sed -i '' '
/type Node = \*OldNode/d
s/\*OldNode/Node/g
s/^func (n Node)/func (n *OldNode)/
s/OldNode/node/g
s/type INode interface/type Node interface/
s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/
' *.go
gofmt -w *.go
sed -i '' '
s/{Func{}, 136, 248}/{Func{}, 152, 280}/
s/{Name{}, 32, 56}/{Name{}, 44, 80}/
s/{Param{}, 24, 48}/{Param{}, 44, 88}/
s/{node{}, 76, 128}/{node{}, 88, 152}/
' sizeof_test.go
cd ../ssa
sed -i '' '
s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/
' sizeof_test.go
cd ../gc
sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go
cd ../../../..
go install std cmd
cd cmd/compile
go test -u || go test -u
Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553
Reviewed-on: https://go-review.googlesource.com/c/go/+/272935
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-25 01:11:56 -05:00
|
|
|
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
|
2020-04-24 15:59:17 -07:00
|
|
|
// p[i] == q[i]
|
[dev.regabi] cmd/compile: remove ir.Nod [generated]
Rewrite all uses of ir.Nod and friends to call the IR constructors directly.
This gives the results a more specific type and will play nicely with
introduction of more specific types throughout the code in a followup CL.
Passes buildall w/ toolstash -cmp.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/compile/internal/syntax"
import "cmd/internal/src"
var p *noder
var orig syntax.Node
var op ir.Op
var l, r ir.Node
var sym *types.Sym
p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r)
p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym)
var xpos src.XPos
var ns ir.Nodes
npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym)
npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns)
}
ex . ../ir {
import "cmd/compile/internal/base"
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
var op ir.Op
var l, r ir.Node
ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r)
var sym *types.Sym
nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/internal/src"
# rf overlapping match handling is not quite good enough
# for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early.
var l, r ir.Node
var xpos src.XPos
ir.NodAt(xpos, ir.OAS, l, r) -> ir.NewAssignStmt(xpos, l, r)
ir.NodAt(xpos, ir.OIF, l, nil) -> ir.NewIfStmt(xpos, l, nil, nil)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/internal/src"
var l, r ir.Node
var sym *types.Sym
var xpos src.XPos
nodlSym(xpos, ir.ODOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym)
nodlSym(xpos, ir.OXDOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym)
nodlSym(xpos, ir.ODOTPTR, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym)
nodlSym(xpos, ir.OGOTO, nil, sym) -> ir.NewBranchStmt(xpos, ir.OGOTO, sym)
nodlSym(xpos, ir.ORETJMP, nil, sym) -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym)
nodlSym(xpos, ir.OLABEL, nil, sym) -> ir.NewLabelStmt(xpos, sym)
nodlSym(xpos, ir.OSTRUCTKEY, l, sym) -> ir.NewStructKeyExpr(xpos, sym, l)
ir.NodAt(xpos, ir.OADD, l, r) -> ir.NewBinaryExpr(xpos, ir.OADD, l, r)
ir.NodAt(xpos, ir.OAND, l, r) -> ir.NewBinaryExpr(xpos, ir.OAND, l, r)
ir.NodAt(xpos, ir.OANDNOT, l, r) -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r)
ir.NodAt(xpos, ir.ODIV, l, r) -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r)
ir.NodAt(xpos, ir.OEQ, l, r) -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r)
ir.NodAt(xpos, ir.OGE, l, r) -> ir.NewBinaryExpr(xpos, ir.OGE, l, r)
ir.NodAt(xpos, ir.OGT, l, r) -> ir.NewBinaryExpr(xpos, ir.OGT, l, r)
ir.NodAt(xpos, ir.OLE, l, r) -> ir.NewBinaryExpr(xpos, ir.OLE, l, r)
ir.NodAt(xpos, ir.OLSH, l, r) -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r)
ir.NodAt(xpos, ir.OLT, l, r) -> ir.NewBinaryExpr(xpos, ir.OLT, l, r)
ir.NodAt(xpos, ir.OMOD, l, r) -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r)
ir.NodAt(xpos, ir.OMUL, l, r) -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r)
ir.NodAt(xpos, ir.ONE, l, r) -> ir.NewBinaryExpr(xpos, ir.ONE, l, r)
ir.NodAt(xpos, ir.OOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OOR, l, r)
ir.NodAt(xpos, ir.ORSH, l, r) -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r)
ir.NodAt(xpos, ir.OSUB, l, r) -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r)
ir.NodAt(xpos, ir.OXOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r)
ir.NodAt(xpos, ir.OCOPY, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r)
ir.NodAt(xpos, ir.OCOMPLEX, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r)
ir.NodAt(xpos, ir.OEFACE, l, r) -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r)
ir.NodAt(xpos, ir.OADDR, l, nil) -> ir.NewAddrExpr(xpos, l)
ir.NodAt(xpos, ir.OADDSTR, nil, nil) -> ir.NewAddStringExpr(xpos, nil)
ir.NodAt(xpos, ir.OANDAND, l, r) -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r)
ir.NodAt(xpos, ir.OOROR, l, r) -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r)
ir.NodAt(xpos, ir.OARRAYLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil)
ir.NodAt(xpos, ir.OARRAYLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OAS2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil)
ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil)
ir.NodAt(xpos, ir.OAS2FUNC, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil)
ir.NodAt(xpos, ir.OAS2MAPR, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil)
ir.NodAt(xpos, ir.OAS2RECV, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil)
ir.NodAt(xpos, ir.OSELRECV2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil)
ir.NodAt(xpos, ir.OASOP, l, r) -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r)
ir.NodAt(xpos, ir.OBITNOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l)
ir.NodAt(xpos, ir.ONEG, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEG, l)
ir.NodAt(xpos, ir.ONOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONOT, l)
ir.NodAt(xpos, ir.OPLUS, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPLUS, l)
ir.NodAt(xpos, ir.ORECV, l, nil) -> ir.NewUnaryExpr(xpos, ir.ORECV, l)
ir.NodAt(xpos, ir.OALIGNOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l)
ir.NodAt(xpos, ir.OCAP, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCAP, l)
ir.NodAt(xpos, ir.OCLOSE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l)
ir.NodAt(xpos, ir.OIMAG, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIMAG, l)
ir.NodAt(xpos, ir.OLEN, l, nil) -> ir.NewUnaryExpr(xpos, ir.OLEN, l)
ir.NodAt(xpos, ir.ONEW, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEW, l)
ir.NodAt(xpos, ir.ONEWOBJ, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l)
ir.NodAt(xpos, ir.OOFFSETOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l)
ir.NodAt(xpos, ir.OPANIC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPANIC, l)
ir.NodAt(xpos, ir.OREAL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OREAL, l)
ir.NodAt(xpos, ir.OSIZEOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l)
ir.NodAt(xpos, ir.OCHECKNIL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l)
ir.NodAt(xpos, ir.OCFUNC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l)
ir.NodAt(xpos, ir.OIDATA, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIDATA, l)
ir.NodAt(xpos, ir.OITAB, l, nil) -> ir.NewUnaryExpr(xpos, ir.OITAB, l)
ir.NodAt(xpos, ir.OSPTR, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSPTR, l)
ir.NodAt(xpos, ir.OVARDEF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l)
ir.NodAt(xpos, ir.OVARKILL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l)
ir.NodAt(xpos, ir.OVARLIVE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l)
ir.NodAt(xpos, ir.OBLOCK, nil, nil) -> ir.NewBlockStmt(xpos, nil)
ir.NodAt(xpos, ir.OBREAK, nil, nil) -> ir.NewBranchStmt(xpos, ir.OBREAK, nil)
ir.NodAt(xpos, ir.OCONTINUE, nil, nil) -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil)
ir.NodAt(xpos, ir.OFALL, nil, nil) -> ir.NewBranchStmt(xpos, ir.OFALL, nil)
ir.NodAt(xpos, ir.OGOTO, nil, nil) -> ir.NewBranchStmt(xpos, ir.OGOTO, nil)
ir.NodAt(xpos, ir.ORETJMP, nil, nil) -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil)
ir.NodAt(xpos, ir.OCALL, l, nil) -> ir.NewCallExpr(xpos, ir.OCALL, l, nil)
ir.NodAt(xpos, ir.OCALLFUNC, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil)
ir.NodAt(xpos, ir.OCALLINTER, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil)
ir.NodAt(xpos, ir.OCALLMETH, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil)
ir.NodAt(xpos, ir.OAPPEND, l, nil) -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil)
ir.NodAt(xpos, ir.ODELETE, l, nil) -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil)
ir.NodAt(xpos, ir.OGETG, l, nil) -> ir.NewCallExpr(xpos, ir.OGETG, l, nil)
ir.NodAt(xpos, ir.OMAKE, l, nil) -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil)
ir.NodAt(xpos, ir.OPRINT, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil)
ir.NodAt(xpos, ir.OPRINTN, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil)
ir.NodAt(xpos, ir.ORECOVER, l, nil) -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil)
ir.NodAt(xpos, ir.OCASE, nil, nil) -> ir.NewCaseStmt(xpos, nil, nil)
ir.NodAt(xpos, ir.OCONV, l, nil) -> ir.NewConvExpr(xpos, ir.OCONV, nil, l)
ir.NodAt(xpos, ir.OCONVIFACE, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l)
ir.NodAt(xpos, ir.OCONVNOP, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l)
ir.NodAt(xpos, ir.ORUNESTR, l, nil) -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l)
ir.NodAt(xpos, ir.ODCL, l, nil) -> ir.NewDecl(xpos, ir.ODCL, l)
ir.NodAt(xpos, ir.ODCLCONST, l, nil) -> ir.NewDecl(xpos, ir.ODCLCONST, l)
ir.NodAt(xpos, ir.ODCLTYPE, l, nil) -> ir.NewDecl(xpos, ir.ODCLTYPE, l)
ir.NodAt(xpos, ir.ODCLFUNC, nil, nil) -> ir.NewFunc(xpos)
ir.NodAt(xpos, ir.ODEFER, l, nil) -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l)
ir.NodAt(xpos, ir.OGO, l, nil) -> ir.NewGoDeferStmt(xpos, ir.OGO, l)
ir.NodAt(xpos, ir.ODEREF, l, nil) -> ir.NewStarExpr(xpos, l)
ir.NodAt(xpos, ir.ODOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil)
ir.NodAt(xpos, ir.ODOTPTR, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil)
ir.NodAt(xpos, ir.ODOTMETH, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil)
ir.NodAt(xpos, ir.ODOTINTER, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil)
ir.NodAt(xpos, ir.OXDOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, nil) -> ir.NewTypeAssertExpr(xpos, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, r) -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype))
ir.NodAt(xpos, ir.OFOR, l, r) -> ir.NewForStmt(xpos, nil, l, r, nil)
ir.NodAt(xpos, ir.OINDEX, l, r) -> ir.NewIndexExpr(xpos, l, r)
ir.NodAt(xpos, ir.OINLMARK, nil, nil) -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH)
ir.NodAt(xpos, ir.OKEY, l, r) -> ir.NewKeyExpr(xpos, l, r)
ir.NodAt(xpos, ir.OLABEL, nil, nil) -> ir.NewLabelStmt(xpos, nil)
ir.NodAt(xpos, ir.OMAKECHAN, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r)
ir.NodAt(xpos, ir.OMAKEMAP, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r)
ir.NodAt(xpos, ir.OMAKESLICE, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r)
ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r)
ir.NodAt(xpos, ir.ONIL, nil, nil) -> ir.NewNilExpr(xpos)
ir.NodAt(xpos, ir.OPACK, nil, nil) -> ir.NewPkgName(xpos, nil, nil)
ir.NodAt(xpos, ir.OPAREN, l, nil) -> ir.NewParenExpr(xpos, l)
ir.NodAt(xpos, ir.ORANGE, nil, r) -> ir.NewRangeStmt(xpos, nil, r, nil)
ir.NodAt(xpos, ir.ORESULT, nil, nil) -> ir.NewResultExpr(xpos, nil, types.BADWIDTH)
ir.NodAt(xpos, ir.ORETURN, nil, nil) -> ir.NewReturnStmt(xpos, nil)
ir.NodAt(xpos, ir.OSELECT, nil, nil) -> ir.NewSelectStmt(xpos, nil)
ir.NodAt(xpos, ir.OSEND, l, r) -> ir.NewSendStmt(xpos, l, r)
ir.NodAt(xpos, ir.OSLICE, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE, l)
ir.NodAt(xpos, ir.OSLICEARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l)
ir.NodAt(xpos, ir.OSLICESTR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l)
ir.NodAt(xpos, ir.OSLICE3, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3, l)
ir.NodAt(xpos, ir.OSLICE3ARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l)
ir.NodAt(xpos, ir.OSLICEHEADER, l, nil) -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil)
ir.NodAt(xpos, ir.OSWITCH, l, nil) -> ir.NewSwitchStmt(xpos, l, nil)
ir.NodAt(xpos, ir.OINLCALL, nil, nil) -> ir.NewInlinedCallExpr(xpos, nil, nil)
}
rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod
'
Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e
Reviewed-on: https://go-review.googlesource.com/c/go/+/277933
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-22 23:55:29 -05:00
|
|
|
return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
|
2020-04-24 15:59:17 -07:00
|
|
|
})
|
|
|
|
|
}
|
2016-02-26 14:56:31 -08:00
|
|
|
|
[dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated]
If we want to break up package gc at all, we will need to move
the compiler IR it defines into a separate package that can be
imported by packages that gc itself imports. This CL does that.
It also removes the TINT8 etc aliases so that all code is clear
about which package things are coming from.
This CL is automatically generated by the script below.
See the comments in the script for details about the changes.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# These names were never fully qualified
# when the types package was added.
# Do it now, to avoid confusion about where they live.
inline -rm \
Txxx \
TINT8 \
TUINT8 \
TINT16 \
TUINT16 \
TINT32 \
TUINT32 \
TINT64 \
TUINT64 \
TINT \
TUINT \
TUINTPTR \
TCOMPLEX64 \
TCOMPLEX128 \
TFLOAT32 \
TFLOAT64 \
TBOOL \
TPTR \
TFUNC \
TSLICE \
TARRAY \
TSTRUCT \
TCHAN \
TMAP \
TINTER \
TFORW \
TANY \
TSTRING \
TUNSAFEPTR \
TIDEAL \
TNIL \
TBLANK \
TFUNCARGS \
TCHANARGS \
NTYPE \
BADWIDTH
# esc.go and escape.go do not need to be split.
# Append esc.go onto the end of escape.go.
mv esc.go escape.go
# Pull out the type format installation from func Main,
# so it can be carried into package ir.
mv Main:/Sconv.=/-0,/TypeLinkSym/-1 InstallTypeFormats
# Names that need to be exported for use by code left in gc.
mv Isconst IsConst
mv asNode AsNode
mv asNodes AsNodes
mv asTypesNode AsTypesNode
mv basicnames BasicTypeNames
mv builtinpkg BuiltinPkg
mv consttype ConstType
mv dumplist DumpList
mv fdumplist FDumpList
mv fmtMode FmtMode
mv goopnames OpNames
mv inspect Inspect
mv inspectList InspectList
mv localpkg LocalPkg
mv nblank BlankNode
mv numImport NumImport
mv opprec OpPrec
mv origSym OrigSym
mv stmtwithinit StmtWithInit
mv dump DumpAny
mv fdump FDumpAny
mv nod Nod
mv nodl NodAt
mv newname NewName
mv newnamel NewNameAt
mv assertRepresents AssertValidTypeForConst
mv represents ValidTypeForConst
mv nodlit NewLiteral
# Types and fields that need to be exported for use by gc.
mv nowritebarrierrecCallSym SymAndPos
mv SymAndPos.lineno SymAndPos.Pos
mv SymAndPos.target SymAndPos.Sym
mv Func.lsym Func.LSym
mv Func.setWBPos Func.SetWBPos
mv Func.numReturns Func.NumReturns
mv Func.numDefers Func.NumDefers
mv Func.nwbrCalls Func.NWBRCalls
# initLSym is an algorithm left behind in gc,
# not an operation on Func itself.
mv Func.initLSym initLSym
mv nodeQueue NodeQueue
mv NodeQueue.empty NodeQueue.Empty
mv NodeQueue.popLeft NodeQueue.PopLeft
mv NodeQueue.pushRight NodeQueue.PushRight
# Many methods on Node are actually algorithms that
# would apply to any node implementation.
# Those become plain functions.
mv Node.funcname FuncName
mv Node.isBlank IsBlank
mv Node.isGoConst isGoConst
mv Node.isNil IsNil
mv Node.isParamHeapCopy isParamHeapCopy
mv Node.isParamStackCopy isParamStackCopy
mv Node.isSimpleName isSimpleName
mv Node.mayBeShared MayBeShared
mv Node.pkgFuncName PkgFuncName
mv Node.backingArrayPtrLen backingArrayPtrLen
mv Node.isterminating isTermNode
mv Node.labeledControl labeledControl
mv Nodes.isterminating isTermNodes
mv Nodes.sigerr fmtSignature
mv Node.MethodName methodExprName
mv Node.MethodFunc methodExprFunc
mv Node.IsMethod IsMethod
# Every node will need to implement RawCopy;
# Copy and SepCopy algorithms will use it.
mv Node.rawcopy Node.RawCopy
mv Node.copy Copy
mv Node.sepcopy SepCopy
# Extract Node.Format method body into func FmtNode,
# but leave method wrapper behind.
mv Node.Format:0,$ FmtNode
# Formatting helpers that will apply to all node implementations.
mv Node.Line Line
mv Node.exprfmt exprFmt
mv Node.jconv jconvFmt
mv Node.modeString modeString
mv Node.nconv nconvFmt
mv Node.nodedump nodeDumpFmt
mv Node.nodefmt nodeFmt
mv Node.stmtfmt stmtFmt
# Constant support needed for code moving to ir.
mv okforconst OKForConst
mv vconv FmtConst
mv int64Val Int64Val
mv float64Val Float64Val
mv Node.ValueInterface ConstValue
# Organize code into files.
mv LocalPkg BuiltinPkg ir.go
mv NumImport InstallTypeFormats Line fmt.go
mv syntax.go Nod NodAt NewNameAt Class Pxxx PragmaFlag Nointerface SymAndPos \
AsNode AsTypesNode BlankNode OrigSym \
Node.SliceBounds Node.SetSliceBounds Op.IsSlice3 \
IsConst Node.Int64Val Node.CanInt64 Node.Uint64Val Node.BoolVal Node.StringVal \
Node.RawCopy SepCopy Copy \
IsNil IsBlank IsMethod \
Node.Typ Node.StorageClass node.go
mv ConstType ConstValue Int64Val Float64Val AssertValidTypeForConst ValidTypeForConst NewLiteral idealType OKForConst val.go
# Move files to new ir package.
mv bitset.go class_string.go dump.go fmt.go \
ir.go node.go op_string.go val.go \
sizeof_test.go cmd/compile/internal/ir
'
: # fix mkbuiltin.go to generate the changes made to builtin.go during rf
sed -i '' '
s/\[T/[types.T/g
s/\*Node/*ir.Node/g
/internal\/types/c \
fmt.Fprintln(&b, `import (`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) \
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) \
fmt.Fprintln(&b, `)`)
' mkbuiltin.go
gofmt -w mkbuiltin.go
: # update cmd/dist to add internal/ir
cd ../../../dist
sed -i '' '/compile.internal.gc/a\
"cmd/compile/internal/ir",
' buildtool.go
gofmt -w buildtool.go
: # update cmd/compile TestFormats
cd ../..
go install std cmd
cd cmd/compile
go test -u || go test # first one updates but fails; second passes
Change-Id: I5f7caf6b20629b51970279e81231a3574d5b51db
Reviewed-on: https://go-review.googlesource.com/c/go/+/273008
Trust: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-19 21:09:22 -05:00
|
|
|
case types.TSTRUCT:
|
2023-05-08 21:28:43 +00:00
|
|
|
flatConds, _ := compare.EqStruct(t, np, nq)
|
2020-06-15 09:17:18 -07:00
|
|
|
if len(flatConds) == 0 {
|
2023-02-28 13:27:51 -08:00
|
|
|
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true)))
|
cmd/compile: make runtime calls last in eq algs
type T struct {
f float64
a [64]uint64
g float64
}
Prior to this change, the generated equality algorithm for T was:
func eqT(p, q *T) bool {
return p.f == q.f && runtime.memequal(p.a, q.a, 512) && p.g == q.g
}
In handwritten code, we would normally put the cheapest checks first.
This change takes a step in that direction. We now generate:
func eqT(p, q *T) bool {
return p.f == q.f && p.g == q.g && runtime.memequal(p.a, q.a, 512)
}
For most types, this also generates considerably shorter code. Examples:
runtime
.eq."".mstats 406 -> 391 (-3.69%)
.eq.""._func 114 -> 101 (-11.40%)
.eq."".itab 115 -> 102 (-11.30%)
.eq."".scase 125 -> 116 (-7.20%)
.eq."".traceStack 119 -> 102 (-14.29%)
.eq."".gcControllerState 169 -> 161 (-4.73%)
.eq."".sweepdata 121 -> 112 (-7.44%)
However, for types in which we make unwise choices about inlining
memory-only comparisons (#38494), this generates longer code.
Example:
cmd/internal/obj
.eq."".objWriter 211 -> 214 (+1.42%)
.eq."".Addr 185 -> 187 (+1.08%)
Fortunately, such cases are not common.
Change-Id: I47a27da93c1f88ec71fa350c192f36b29548a217
Reviewed-on: https://go-review.googlesource.com/c/go/+/230203
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2020-04-24 09:49:35 -07:00
|
|
|
} else {
|
2020-09-16 10:13:37 -07:00
|
|
|
for _, c := range flatConds[:len(flatConds)-1] {
|
|
|
|
|
// if cond {} else { goto neq }
|
[dev.regabi] cmd/compile: remove ir.Nod [generated]
Rewrite all uses of ir.Nod and friends to call the IR constructors directly.
This gives the results a more specific type and will play nicely with
introduction of more specific types throughout the code in a followup CL.
Passes buildall w/ toolstash -cmp.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/compile/internal/syntax"
import "cmd/internal/src"
var p *noder
var orig syntax.Node
var op ir.Op
var l, r ir.Node
var sym *types.Sym
p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r)
p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym)
var xpos src.XPos
var ns ir.Nodes
npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym)
npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns)
}
ex . ../ir {
import "cmd/compile/internal/base"
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
var op ir.Op
var l, r ir.Node
ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r)
var sym *types.Sym
nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/internal/src"
# rf overlapping match handling is not quite good enough
# for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early.
var l, r ir.Node
var xpos src.XPos
ir.NodAt(xpos, ir.OAS, l, r) -> ir.NewAssignStmt(xpos, l, r)
ir.NodAt(xpos, ir.OIF, l, nil) -> ir.NewIfStmt(xpos, l, nil, nil)
}
ex . ../ir {
import "cmd/compile/internal/ir"
import "cmd/compile/internal/types"
import "cmd/internal/src"
var l, r ir.Node
var sym *types.Sym
var xpos src.XPos
nodlSym(xpos, ir.ODOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym)
nodlSym(xpos, ir.OXDOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym)
nodlSym(xpos, ir.ODOTPTR, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym)
nodlSym(xpos, ir.OGOTO, nil, sym) -> ir.NewBranchStmt(xpos, ir.OGOTO, sym)
nodlSym(xpos, ir.ORETJMP, nil, sym) -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym)
nodlSym(xpos, ir.OLABEL, nil, sym) -> ir.NewLabelStmt(xpos, sym)
nodlSym(xpos, ir.OSTRUCTKEY, l, sym) -> ir.NewStructKeyExpr(xpos, sym, l)
ir.NodAt(xpos, ir.OADD, l, r) -> ir.NewBinaryExpr(xpos, ir.OADD, l, r)
ir.NodAt(xpos, ir.OAND, l, r) -> ir.NewBinaryExpr(xpos, ir.OAND, l, r)
ir.NodAt(xpos, ir.OANDNOT, l, r) -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r)
ir.NodAt(xpos, ir.ODIV, l, r) -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r)
ir.NodAt(xpos, ir.OEQ, l, r) -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r)
ir.NodAt(xpos, ir.OGE, l, r) -> ir.NewBinaryExpr(xpos, ir.OGE, l, r)
ir.NodAt(xpos, ir.OGT, l, r) -> ir.NewBinaryExpr(xpos, ir.OGT, l, r)
ir.NodAt(xpos, ir.OLE, l, r) -> ir.NewBinaryExpr(xpos, ir.OLE, l, r)
ir.NodAt(xpos, ir.OLSH, l, r) -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r)
ir.NodAt(xpos, ir.OLT, l, r) -> ir.NewBinaryExpr(xpos, ir.OLT, l, r)
ir.NodAt(xpos, ir.OMOD, l, r) -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r)
ir.NodAt(xpos, ir.OMUL, l, r) -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r)
ir.NodAt(xpos, ir.ONE, l, r) -> ir.NewBinaryExpr(xpos, ir.ONE, l, r)
ir.NodAt(xpos, ir.OOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OOR, l, r)
ir.NodAt(xpos, ir.ORSH, l, r) -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r)
ir.NodAt(xpos, ir.OSUB, l, r) -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r)
ir.NodAt(xpos, ir.OXOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r)
ir.NodAt(xpos, ir.OCOPY, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r)
ir.NodAt(xpos, ir.OCOMPLEX, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r)
ir.NodAt(xpos, ir.OEFACE, l, r) -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r)
ir.NodAt(xpos, ir.OADDR, l, nil) -> ir.NewAddrExpr(xpos, l)
ir.NodAt(xpos, ir.OADDSTR, nil, nil) -> ir.NewAddStringExpr(xpos, nil)
ir.NodAt(xpos, ir.OANDAND, l, r) -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r)
ir.NodAt(xpos, ir.OOROR, l, r) -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r)
ir.NodAt(xpos, ir.OARRAYLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil)
ir.NodAt(xpos, ir.OARRAYLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OCOMPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OMAPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OSLICELIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil)
ir.NodAt(xpos, ir.OAS2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil)
ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil)
ir.NodAt(xpos, ir.OAS2FUNC, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil)
ir.NodAt(xpos, ir.OAS2MAPR, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil)
ir.NodAt(xpos, ir.OAS2RECV, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil)
ir.NodAt(xpos, ir.OSELRECV2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil)
ir.NodAt(xpos, ir.OASOP, l, r) -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r)
ir.NodAt(xpos, ir.OBITNOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l)
ir.NodAt(xpos, ir.ONEG, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEG, l)
ir.NodAt(xpos, ir.ONOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONOT, l)
ir.NodAt(xpos, ir.OPLUS, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPLUS, l)
ir.NodAt(xpos, ir.ORECV, l, nil) -> ir.NewUnaryExpr(xpos, ir.ORECV, l)
ir.NodAt(xpos, ir.OALIGNOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l)
ir.NodAt(xpos, ir.OCAP, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCAP, l)
ir.NodAt(xpos, ir.OCLOSE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l)
ir.NodAt(xpos, ir.OIMAG, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIMAG, l)
ir.NodAt(xpos, ir.OLEN, l, nil) -> ir.NewUnaryExpr(xpos, ir.OLEN, l)
ir.NodAt(xpos, ir.ONEW, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEW, l)
ir.NodAt(xpos, ir.ONEWOBJ, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l)
ir.NodAt(xpos, ir.OOFFSETOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l)
ir.NodAt(xpos, ir.OPANIC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPANIC, l)
ir.NodAt(xpos, ir.OREAL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OREAL, l)
ir.NodAt(xpos, ir.OSIZEOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l)
ir.NodAt(xpos, ir.OCHECKNIL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l)
ir.NodAt(xpos, ir.OCFUNC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l)
ir.NodAt(xpos, ir.OIDATA, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIDATA, l)
ir.NodAt(xpos, ir.OITAB, l, nil) -> ir.NewUnaryExpr(xpos, ir.OITAB, l)
ir.NodAt(xpos, ir.OSPTR, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSPTR, l)
ir.NodAt(xpos, ir.OVARDEF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l)
ir.NodAt(xpos, ir.OVARKILL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l)
ir.NodAt(xpos, ir.OVARLIVE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l)
ir.NodAt(xpos, ir.OBLOCK, nil, nil) -> ir.NewBlockStmt(xpos, nil)
ir.NodAt(xpos, ir.OBREAK, nil, nil) -> ir.NewBranchStmt(xpos, ir.OBREAK, nil)
ir.NodAt(xpos, ir.OCONTINUE, nil, nil) -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil)
ir.NodAt(xpos, ir.OFALL, nil, nil) -> ir.NewBranchStmt(xpos, ir.OFALL, nil)
ir.NodAt(xpos, ir.OGOTO, nil, nil) -> ir.NewBranchStmt(xpos, ir.OGOTO, nil)
ir.NodAt(xpos, ir.ORETJMP, nil, nil) -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil)
ir.NodAt(xpos, ir.OCALL, l, nil) -> ir.NewCallExpr(xpos, ir.OCALL, l, nil)
ir.NodAt(xpos, ir.OCALLFUNC, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil)
ir.NodAt(xpos, ir.OCALLINTER, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil)
ir.NodAt(xpos, ir.OCALLMETH, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil)
ir.NodAt(xpos, ir.OAPPEND, l, nil) -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil)
ir.NodAt(xpos, ir.ODELETE, l, nil) -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil)
ir.NodAt(xpos, ir.OGETG, l, nil) -> ir.NewCallExpr(xpos, ir.OGETG, l, nil)
ir.NodAt(xpos, ir.OMAKE, l, nil) -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil)
ir.NodAt(xpos, ir.OPRINT, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil)
ir.NodAt(xpos, ir.OPRINTN, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil)
ir.NodAt(xpos, ir.ORECOVER, l, nil) -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil)
ir.NodAt(xpos, ir.OCASE, nil, nil) -> ir.NewCaseStmt(xpos, nil, nil)
ir.NodAt(xpos, ir.OCONV, l, nil) -> ir.NewConvExpr(xpos, ir.OCONV, nil, l)
ir.NodAt(xpos, ir.OCONVIFACE, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l)
ir.NodAt(xpos, ir.OCONVNOP, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l)
ir.NodAt(xpos, ir.ORUNESTR, l, nil) -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l)
ir.NodAt(xpos, ir.ODCL, l, nil) -> ir.NewDecl(xpos, ir.ODCL, l)
ir.NodAt(xpos, ir.ODCLCONST, l, nil) -> ir.NewDecl(xpos, ir.ODCLCONST, l)
ir.NodAt(xpos, ir.ODCLTYPE, l, nil) -> ir.NewDecl(xpos, ir.ODCLTYPE, l)
ir.NodAt(xpos, ir.ODCLFUNC, nil, nil) -> ir.NewFunc(xpos)
ir.NodAt(xpos, ir.ODEFER, l, nil) -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l)
ir.NodAt(xpos, ir.OGO, l, nil) -> ir.NewGoDeferStmt(xpos, ir.OGO, l)
ir.NodAt(xpos, ir.ODEREF, l, nil) -> ir.NewStarExpr(xpos, l)
ir.NodAt(xpos, ir.ODOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil)
ir.NodAt(xpos, ir.ODOTPTR, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil)
ir.NodAt(xpos, ir.ODOTMETH, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil)
ir.NodAt(xpos, ir.ODOTINTER, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil)
ir.NodAt(xpos, ir.OXDOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, nil) -> ir.NewTypeAssertExpr(xpos, l, nil)
ir.NodAt(xpos, ir.ODOTTYPE, l, r) -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype))
ir.NodAt(xpos, ir.OFOR, l, r) -> ir.NewForStmt(xpos, nil, l, r, nil)
ir.NodAt(xpos, ir.OINDEX, l, r) -> ir.NewIndexExpr(xpos, l, r)
ir.NodAt(xpos, ir.OINLMARK, nil, nil) -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH)
ir.NodAt(xpos, ir.OKEY, l, r) -> ir.NewKeyExpr(xpos, l, r)
ir.NodAt(xpos, ir.OLABEL, nil, nil) -> ir.NewLabelStmt(xpos, nil)
ir.NodAt(xpos, ir.OMAKECHAN, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r)
ir.NodAt(xpos, ir.OMAKEMAP, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r)
ir.NodAt(xpos, ir.OMAKESLICE, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r)
ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r)
ir.NodAt(xpos, ir.ONIL, nil, nil) -> ir.NewNilExpr(xpos)
ir.NodAt(xpos, ir.OPACK, nil, nil) -> ir.NewPkgName(xpos, nil, nil)
ir.NodAt(xpos, ir.OPAREN, l, nil) -> ir.NewParenExpr(xpos, l)
ir.NodAt(xpos, ir.ORANGE, nil, r) -> ir.NewRangeStmt(xpos, nil, r, nil)
ir.NodAt(xpos, ir.ORESULT, nil, nil) -> ir.NewResultExpr(xpos, nil, types.BADWIDTH)
ir.NodAt(xpos, ir.ORETURN, nil, nil) -> ir.NewReturnStmt(xpos, nil)
ir.NodAt(xpos, ir.OSELECT, nil, nil) -> ir.NewSelectStmt(xpos, nil)
ir.NodAt(xpos, ir.OSEND, l, r) -> ir.NewSendStmt(xpos, l, r)
ir.NodAt(xpos, ir.OSLICE, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE, l)
ir.NodAt(xpos, ir.OSLICEARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l)
ir.NodAt(xpos, ir.OSLICESTR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l)
ir.NodAt(xpos, ir.OSLICE3, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3, l)
ir.NodAt(xpos, ir.OSLICE3ARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l)
ir.NodAt(xpos, ir.OSLICEHEADER, l, nil) -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil)
ir.NodAt(xpos, ir.OSWITCH, l, nil) -> ir.NewSwitchStmt(xpos, l, nil)
ir.NodAt(xpos, ir.OINLCALL, nil, nil) -> ir.NewInlinedCallExpr(xpos, nil, nil)
}
rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod
'
Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e
Reviewed-on: https://go-review.googlesource.com/c/go/+/277933
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-22 23:55:29 -05:00
|
|
|
n := ir.NewIfStmt(base.Pos, c, nil, nil)
|
2020-12-23 00:02:08 -05:00
|
|
|
n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
|
|
|
|
|
fn.Body.Append(n)
|
cmd/compile: make runtime calls last in eq algs
type T struct {
f float64
a [64]uint64
g float64
}
Prior to this change, the generated equality algorithm for T was:
func eqT(p, q *T) bool {
return p.f == q.f && runtime.memequal(p.a, q.a, 512) && p.g == q.g
}
In handwritten code, we would normally put the cheapest checks first.
This change takes a step in that direction. We now generate:
func eqT(p, q *T) bool {
return p.f == q.f && p.g == q.g && runtime.memequal(p.a, q.a, 512)
}
For most types, this also generates considerably shorter code. Examples:
runtime
.eq."".mstats 406 -> 391 (-3.69%)
.eq.""._func 114 -> 101 (-11.40%)
.eq."".itab 115 -> 102 (-11.30%)
.eq."".scase 125 -> 116 (-7.20%)
.eq."".traceStack 119 -> 102 (-14.29%)
.eq."".gcControllerState 169 -> 161 (-4.73%)
.eq."".sweepdata 121 -> 112 (-7.44%)
However, for types in which we make unwise choices about inlining
memory-only comparisons (#38494), this generates longer code.
Example:
cmd/internal/obj
.eq."".objWriter 211 -> 214 (+1.42%)
.eq."".Addr 185 -> 187 (+1.08%)
Fortunately, such cases are not common.
Change-Id: I47a27da93c1f88ec71fa350c192f36b29548a217
Reviewed-on: https://go-review.googlesource.com/c/go/+/230203
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2020-04-24 09:49:35 -07:00
|
|
|
}
|
2020-12-23 00:02:08 -05:00
|
|
|
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1]))
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
2020-09-16 10:13:37 -07:00
|
|
|
}
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2020-09-16 10:13:37 -07:00
|
|
|
// ret:
|
|
|
|
|
// return
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
ret := typecheck.AutoLabel(".ret")
|
2020-12-23 00:02:08 -05:00
|
|
|
fn.Body.Append(ir.NewLabelStmt(base.Pos, ret))
|
|
|
|
|
fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
|
2020-09-16 10:13:37 -07:00
|
|
|
|
|
|
|
|
// neq:
|
|
|
|
|
// r = false
|
|
|
|
|
// return (or goto ret)
|
2020-12-23 00:02:08 -05:00
|
|
|
fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
|
2023-02-28 13:27:51 -08:00
|
|
|
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, false)))
|
2022-04-21 23:26:16 +00:00
|
|
|
if compare.EqCanPanic(t) || anyCall(fn) {
|
2020-09-16 10:13:37 -07:00
|
|
|
// Epilogue is large, so share it with the equal case.
|
2020-12-23 00:02:08 -05:00
|
|
|
fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
|
2020-09-16 10:13:37 -07:00
|
|
|
} else {
|
|
|
|
|
// Epilogue is small, so don't bother sharing.
|
2020-12-23 00:02:08 -05:00
|
|
|
fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
2020-09-16 10:13:37 -07:00
|
|
|
// TODO(khr): the epilogue size detection condition above isn't perfect.
|
|
|
|
|
// We should really do a generic CL that shares epilogues across
|
|
|
|
|
// the board. See #24936.
|
2016-02-26 14:56:31 -08:00
|
|
|
|
2020-11-19 20:49:23 -05:00
|
|
|
if base.Flag.LowerR != 0 {
|
2020-12-23 00:02:08 -05:00
|
|
|
ir.DumpList("geneq body", fn.Body)
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
|
[dev.regabi] cmd/compile: split out package typecheck [generated]
This commit splits the typechecking logic into its own package,
the first of a sequence of CLs to break package gc into more
manageable units.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# The binary import/export has to be part of typechecking,
# because we load inlined function bodies lazily, but "exporter"
# should not be. Move that out of bexport.go.
mv exporter exporter.markObject exporter.markType export.go
# Use the typechecking helpers, so that the calls left behind
# in package gc do not need access to ctxExpr etc.
ex {
import "cmd/compile/internal/ir"
# TODO(rsc): Should not be necessary.
avoid TypecheckExpr
avoid TypecheckStmt
avoid TypecheckExprs
avoid TypecheckStmts
avoid TypecheckAssignExpr
avoid TypecheckCallee
var n ir.Node
var ns []ir.Node
typecheck(n, ctxExpr) -> TypecheckExpr(n)
typecheck(n, ctxStmt) -> TypecheckStmt(n)
typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns)
typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns)
typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n)
typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n)
}
# Move some typechecking API to typecheck.
mv syslook LookupRuntime
mv substArgTypes SubstArgTypes
mv LookupRuntime SubstArgTypes syms.go
mv conv Conv
mv convnop ConvNop
mv Conv ConvNop typecheck.go
mv colasdefn AssignDefn
mv colasname assignableName
mv Target target.go
mv initname autoexport exportsym dcl.go
mv exportsym Export
# Export API to be called from outside typecheck.
# The ones with "Typecheck" prefixes will be renamed later to drop the prefix.
mv adddot AddImplicitDots
mv assignconv AssignConv
mv expandmeth CalcMethods
mv capturevarscomplete CaptureVarsComplete
mv checkMapKeys CheckMapKeys
mv checkreturn CheckReturn
mv dclcontext DeclContext
mv dclfunc DeclFunc
mv declare Declare
mv dotImportRefs DotImportRefs
mv declImporter DeclImporter
mv variter DeclVars
mv defaultlit DefaultLit
mv evalConst EvalConst
mv expandInline ImportBody
mv finishUniverse declareUniverse
mv funcbody FinishFuncBody
mv funchdr StartFuncBody
mv indexconst IndexConst
mv initTodo InitTodoFunc
mv lookup Lookup
mv resolve Resolve
mv lookupN LookupNum
mv nodAddr NodAddr
mv nodAddrAt NodAddrAt
mv nodnil NodNil
mv origBoolConst OrigBool
mv origConst OrigConst
mv origIntConst OrigInt
mv redeclare Redeclared
mv tostruct NewStructType
mv functype NewFuncType
mv methodfunc NewMethodType
mv structargs NewFuncParams
mv temp Temp
mv tempAt TempAt
mv typecheckok TypecheckAllowed
mv typecheck _typecheck # make room for typecheck pkg
mv typecheckinl TypecheckImportedBody
mv typecheckFunc TypecheckFunc
mv iimport ReadImports
mv iexport WriteExports
mv sysfunc LookupRuntimeFunc
mv sysvar LookupRuntimeVar
# Move function constructors to typecheck.
mv mkdotargslice MakeDotArgs
mv fixVariadicCall FixVariadicCall
mv closureType ClosureType
mv partialCallType PartialCallType
mv capturevars CaptureVars
mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go
mv autolabel AutoLabel
mv AutoLabel syms.go
mv Dlist dlist
mv Symlink symlink
mv \
AssignDefn assignableName \
AssignConv \
CaptureVarsComplete \
DeclContext \
DeclFunc \
DeclImporter \
DeclVars \
Declare \
DotImportRefs \
Export \
InitTodoFunc \
Lookup \
LookupNum \
LookupRuntimeFunc \
LookupRuntimeVar \
NewFuncParams \
NewName \
NodAddr \
NodAddrAt \
NodNil \
Redeclared \
StartFuncBody \
FinishFuncBody \
TypecheckImportedBody \
AddImplicitDots \
CalcMethods \
CheckFuncStack \
NewFuncType \
NewMethodType \
NewStructType \
TypecheckAllowed \
Temp \
TempAt \
adddot1 \
dotlist \
addmethod \
assignconvfn \
assignop \
autotmpname \
autoexport \
bexport.go \
checkdupfields \
checkembeddedtype \
closurename \
convertop \
declare_typegen \
decldepth \
dlist \
dotpath \
expand0 \
expand1 \
expandDecl \
fakeRecvField \
fnpkg \
funcStack \
funcStackEnt \
funcarg \
funcarg2 \
funcargs \
funcargs2 \
globClosgen \
ifacelookdot \
implements \
importalias \
importconst \
importfunc \
importobj \
importsym \
importtype \
importvar \
inimport \
initname \
isptrto \
loadsys \
lookdot0 \
lookdot1 \
makepartialcall \
okfor \
okforlen \
operandType \
slist \
symlink \
tointerface \
typeSet \
typeSet.add \
typeSetEntry \
typecheckExprSwitch \
typecheckTypeSwitch \
typecheckpartialcall \
typecheckrange \
typecheckrangeExpr \
typecheckselect \
typecheckswitch \
vargen \
builtin.go \
builtin_test.go \
const.go \
func.go \
iexport.go \
iimport.go \
mapfile_mmap.go \
syms.go \
target.go \
typecheck.go \
unsafe.go \
universe.go \
cmd/compile/internal/typecheck
'
rm gen.go types.go types_acc.go
sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go
mv mapfile_read.go ../typecheck # not part of default build
mv mkbuiltin.go ../typecheck # package main helper
mv builtin ../typecheck
cd ../typecheck
mv dcl.go dcl1.go
mv typecheck.go typecheck1.go
mv universe.go universe1.go
rf '
# Sweep some small files into larger ones.
# "mv sym... file1.go file.go" (after the mv file1.go file.go above)
# lets us insert sym... at the top of file.go.
mv okfor okforeq universe1.go universe.go
mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go
mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go
mv inl.go closure.go func.go
mv range.go select.go swt.go stmt.go
mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go
mv unsafe.go const.go
mv TypecheckAssignExpr AssignExpr
mv TypecheckExpr Expr
mv TypecheckStmt Stmt
mv TypecheckExprs Exprs
mv TypecheckStmts Stmts
mv TypecheckCall Call
mv TypecheckCallee Callee
mv _typecheck check
mv TypecheckFunc Func
mv TypecheckFuncBody FuncBody
mv TypecheckImports AllImportedBodies
mv TypecheckImportedBody ImportedBody
mv TypecheckInit Init
mv TypecheckPackage Package
'
rm gen.go go.go init.go main.go reflect.go
Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0
Reviewed-on: https://go-review.googlesource.com/c/go/+/279236
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 00:41:49 -05:00
|
|
|
typecheck.FinishFuncBody()
|
2018-04-18 23:22:26 -07:00
|
|
|
|
2020-11-28 07:31:18 -05:00
|
|
|
fn.SetDupok(true)
|
2018-04-18 23:22:26 -07:00
|
|
|
|
2022-09-28 12:30:46 +07:00
|
|
|
ir.WithFunc(fn, func() {
|
|
|
|
|
typecheck.Stmts(fn.Body)
|
|
|
|
|
})
|
2018-04-18 23:22:26 -07:00
|
|
|
|
2016-02-26 14:56:31 -08:00
|
|
|
// Disable checknils while compiling this code.
|
|
|
|
|
// We are comparing a struct or an array,
|
|
|
|
|
// neither of which can be nil, and our comparisons
|
|
|
|
|
// are shallow.
|
2020-11-28 07:31:18 -05:00
|
|
|
fn.SetNilCheckDisabled(true)
|
2022-09-28 10:20:25 +07:00
|
|
|
return fn
|
2016-02-26 14:56:31 -08:00
|
|
|
}
|
|
|
|
|
|
2022-09-28 12:30:46 +07:00
|
|
|
// EqFor returns ONAME node represents type t's equal function, and a boolean
|
|
|
|
|
// to indicates whether a length needs to be passed when calling the function.
|
|
|
|
|
func EqFor(t *types.Type) (ir.Node, bool) {
|
|
|
|
|
switch a, _ := types.AlgType(t); a {
|
|
|
|
|
case types.AMEM:
|
2023-08-20 16:14:50 -07:00
|
|
|
return typecheck.LookupRuntime("memequal", t, t), true
|
2022-09-28 12:30:46 +07:00
|
|
|
case types.ASPECIAL:
|
|
|
|
|
fn := eqFunc(t)
|
|
|
|
|
return fn.Nname, false
|
|
|
|
|
}
|
|
|
|
|
base.Fatalf("EqFor %v", t)
|
|
|
|
|
return nil, false
|
|
|
|
|
}
|
|
|
|
|
|
[dev.regabi] cmd/compile: rename ir.Find to ir.Any and update uses
ir.Find is called "any" in C#, Dart, Haskell, Python, R, Ruby, and Rust,
and "any_of" in C++, "anyMatch" in Java, "some" in JavaScript,
"exists in OCaml, and "existsb" in Coq.
(Thanks to Matthew Dempsky for the research.)
This CL changes Find to Any to use the mostly standard name.
It also updates wrapper helpers to use the any terminology:
hasCall -> anyCall
hasCallOrChan -> anyCallOrChan
hasSideEffects -> anySideEffects
Unchanged are "hasNamedResults", "hasUniquePos", and "hasDefaultCase",
which are all about a single node, not any node in the IR tree.
I also renamed hasFall to endsInFallthrough, since its semantics are
neither that of "any" nor that of the remaining "has" functions.
So the new terminology helps separate different kinds of predicates nicely.
Change-Id: I9bb3c9ebf060a30447224be09a5c34ad5244ea0d
Reviewed-on: https://go-review.googlesource.com/c/go/+/278912
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-16 10:53:20 -05:00
|
|
|
func anyCall(fn *ir.Func) bool {
|
|
|
|
|
return ir.Any(fn, func(n ir.Node) bool {
|
2020-12-12 18:50:21 -05:00
|
|
|
// TODO(rsc): No methods?
|
|
|
|
|
op := n.Op()
|
|
|
|
|
return op == ir.OCALL || op == ir.OCALLFUNC
|
2020-12-02 20:18:47 -05:00
|
|
|
})
|
2020-09-16 10:13:37 -07:00
|
|
|
}
|
|
|
|
|
|
2020-12-23 00:55:38 -05:00
|
|
|
func hashmem(t *types.Type) ir.Node {
|
2023-08-20 16:14:50 -07:00
|
|
|
return typecheck.LookupRuntime("memhash", t)
|
2020-12-23 00:55:38 -05:00
|
|
|
}
|