[dev.typeparams] merge dev.regabi 41f3af9d04 into dev.typeparams

This brings in the new ir.Node interface, replacing *gc.Node.

Change-Id: I82c623655eee08d77d623babf22ec4d91f9aa3cd
This commit is contained in:
Russ Cox 2020-11-25 12:44:11 -05:00
commit 5c2e14872c
151 changed files with 15197 additions and 15289 deletions

View file

@ -455,7 +455,7 @@ environmental variable is set accordingly.</p>
each collection, summarizing the amount of memory collected
and the length of the pause.</li>
<li>GODEBUG=inittrace=1 prints a summary of execution time and memory allocation
information for completed package initilization work.</li>
information for completed package initialization work.</li>
<li>GODEBUG=schedtrace=X prints scheduling events every X milliseconds.</li>
</ul>

View file

@ -501,6 +501,10 @@ Do not send CLs removing the interior tags from such phrases.
<p><!-- CL 261917 -->
<a href="/pkg/syscall/#SysProcAttr"><code>SysProcAttr</code></a> on Windows has a new NoInheritHandles field that disables inheriting handles when creating a new process.
</p>
<p><!-- CL 269761, golang.org/issue/42584 -->
<a href="/pkg/syscall/#DLLError"><code>DLLError</code></a> on Windows now has an Unwrap function for unwrapping its underlying error.
</p>
</dd>
</dl><!-- syscall -->

View file

@ -186,7 +186,7 @@ func (p *Package) writeDefs() {
panic(fmt.Errorf("invalid var kind %q", n.Kind))
}
if *gccgo {
fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, n.Mangle)
fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, gccgoToSymbol(n.Mangle))
fmt.Fprintf(&gccgoInit, "\t%s = &%s;\n", n.Mangle, n.C)
fmt.Fprintf(fc, "\n")
}
@ -1148,7 +1148,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
// will not be able to link against it from the C
// code.
goName := "Cgoexp_" + exp.ExpName
fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, gccgoToSymbol(goName))
fmt.Fprint(fgcc, "\n")
fmt.Fprint(fgcc, "\nCGO_NO_SANITIZE_THREAD\n")
@ -1182,7 +1182,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprint(fgcc, "}\n")
// Dummy declaration for _cgo_main.c
fmt.Fprintf(fm, `char %s[1] __asm__("%s.%s");`, goName, gccgoSymbolPrefix, goName)
fmt.Fprintf(fm, `char %s[1] __asm__("%s.%s");`, goName, gccgoSymbolPrefix, gccgoToSymbol(goName))
fmt.Fprint(fm, "\n")
// For gccgo we use a wrapper function in Go, in order
@ -1266,9 +1266,8 @@ func (p *Package) writeExportHeader(fgcch io.Writer) {
fmt.Fprintf(fgcch, "%s\n", p.gccExportHeaderProlog())
}
// gccgoPkgpathToSymbol converts a package path to a mangled packagepath
// symbol.
func gccgoPkgpathToSymbol(ppath string) string {
// gccgoToSymbol converts a name to a mangled symbol for gccgo.
func gccgoToSymbol(ppath string) string {
if gccgoMangler == nil {
var err error
cmd := os.Getenv("GCCGO")
@ -1293,12 +1292,12 @@ func (p *Package) gccgoSymbolPrefix() string {
}
if *gccgopkgpath != "" {
return gccgoPkgpathToSymbol(*gccgopkgpath)
return gccgoToSymbol(*gccgopkgpath)
}
if *gccgoprefix == "" && p.PackageName == "main" {
return "main"
}
prefix := gccgoPkgpathToSymbol(*gccgoprefix)
prefix := gccgoToSymbol(*gccgoprefix)
if prefix == "" {
prefix = "go"
}
@ -1687,8 +1686,12 @@ void _cgoPREFIX_Cfunc__Cmalloc(void *v) {
`
func (p *Package) cPrologGccgo() string {
return strings.Replace(strings.Replace(cPrologGccgo, "PREFIX", cPrefix, -1),
"GCCGOSYMBOLPREF", p.gccgoSymbolPrefix(), -1)
r := strings.NewReplacer(
"PREFIX", cPrefix,
"GCCGOSYMBOLPREF", p.gccgoSymbolPrefix(),
"_cgoCheckPointer", gccgoToSymbol("_cgoCheckPointer"),
"_cgoCheckResult", gccgoToSymbol("_cgoCheckResult"))
return r.Replace(cPrologGccgo)
}
const cPrologGccgo = `

View file

@ -22,16 +22,7 @@ package main_test
var knownFormats = map[string]string{
"*bytes.Buffer %s": "",
"*cmd/compile/internal/gc.EscLocation %v": "",
"*cmd/compile/internal/gc.Mpflt %v": "",
"*cmd/compile/internal/gc.Mpint %v": "",
"*cmd/compile/internal/gc.Node %#v": "",
"*cmd/compile/internal/gc.Node %+S": "",
"*cmd/compile/internal/gc.Node %+v": "",
"*cmd/compile/internal/gc.Node %L": "",
"*cmd/compile/internal/gc.Node %S": "",
"*cmd/compile/internal/gc.Node %j": "",
"*cmd/compile/internal/gc.Node %p": "",
"*cmd/compile/internal/gc.Node %v": "",
"*cmd/compile/internal/ir.node %v": "",
"*cmd/compile/internal/ssa.Block %s": "",
"*cmd/compile/internal/ssa.Block %v": "",
"*cmd/compile/internal/ssa.Func %s": "",
@ -54,7 +45,6 @@ var knownFormats = map[string]string{
"*cmd/compile/internal/types.Sym %v": "",
"*cmd/compile/internal/types.Type %#L": "",
"*cmd/compile/internal/types.Type %#v": "",
"*cmd/compile/internal/types.Type %+v": "",
"*cmd/compile/internal/types.Type %-S": "",
"*cmd/compile/internal/types.Type %0S": "",
"*cmd/compile/internal/types.Type %L": "",
@ -84,9 +74,7 @@ var knownFormats = map[string]string{
"*cmd/internal/obj.Addr %v": "",
"*cmd/internal/obj.LSym %v": "",
"*math/big.Float %f": "",
"*math/big.Int %#x": "",
"*math/big.Int %s": "",
"*math/big.Int %v": "",
"[16]byte %x": "",
"[]*cmd/compile/internal/ssa.Block %v": "",
"[]*cmd/compile/internal/ssa.Value %v": "",
@ -110,27 +98,28 @@ var knownFormats = map[string]string{
"byte %q": "",
"byte %v": "",
"cmd/compile/internal/arm.shift %d": "",
"cmd/compile/internal/gc.Class %d": "",
"cmd/compile/internal/gc.Class %s": "",
"cmd/compile/internal/gc.Class %v": "",
"cmd/compile/internal/gc.Ctype %d": "",
"cmd/compile/internal/gc.Ctype %v": "",
"cmd/compile/internal/gc.Nodes %#v": "",
"cmd/compile/internal/gc.Nodes %+v": "",
"cmd/compile/internal/gc.Nodes %.v": "",
"cmd/compile/internal/gc.Nodes %v": "",
"cmd/compile/internal/gc.Op %#v": "",
"cmd/compile/internal/gc.Op %v": "",
"cmd/compile/internal/gc.Val %#v": "",
"cmd/compile/internal/gc.Val %T": "",
"cmd/compile/internal/gc.Val %v": "",
"cmd/compile/internal/gc.fmtMode %d": "",
"cmd/compile/internal/gc.initKind %d": "",
"cmd/compile/internal/gc.itag %v": "",
"cmd/compile/internal/importer.itag %v": "",
"cmd/compile/internal/ir.Class %d": "",
"cmd/compile/internal/ir.Class %v": "",
"cmd/compile/internal/ir.FmtMode %d": "",
"cmd/compile/internal/ir.Node %#v": "",
"cmd/compile/internal/ir.Node %+S": "",
"cmd/compile/internal/ir.Node %+v": "",
"cmd/compile/internal/ir.Node %L": "",
"cmd/compile/internal/ir.Node %S": "",
"cmd/compile/internal/ir.Node %j": "",
"cmd/compile/internal/ir.Node %p": "",
"cmd/compile/internal/ir.Node %v": "",
"cmd/compile/internal/ir.Nodes %#v": "",
"cmd/compile/internal/ir.Nodes %+v": "",
"cmd/compile/internal/ir.Nodes %.v": "",
"cmd/compile/internal/ir.Nodes %v": "",
"cmd/compile/internal/ir.Op %#v": "",
"cmd/compile/internal/ir.Op %v": "",
"cmd/compile/internal/ssa.BranchPrediction %d": "",
"cmd/compile/internal/ssa.Edge %v": "",
"cmd/compile/internal/ssa.GCNode %v": "",
"cmd/compile/internal/ssa.ID %d": "",
"cmd/compile/internal/ssa.ID %v": "",
"cmd/compile/internal/ssa.LocalSlot %s": "",
@ -179,9 +168,11 @@ var knownFormats = map[string]string{
"error %v": "",
"float64 %.2f": "",
"float64 %.3f": "",
"float64 %.6g": "",
"float64 %g": "",
"go/constant.Kind %v": "",
"go/constant.Value %#v": "",
"go/constant.Value %s": "",
"go/constant.Value %v": "",
"int %#x": "",
"int %-12d": "",
"int %-6d": "",
@ -199,7 +190,6 @@ var knownFormats = map[string]string{
"int32 %v": "",
"int32 %x": "",
"int64 %#x": "",
"int64 %+d": "",
"int64 %-10d": "",
"int64 %.5d": "",
"int64 %d": "",
@ -214,13 +204,14 @@ var knownFormats = map[string]string{
"interface{} %q": "",
"interface{} %s": "",
"interface{} %v": "",
"map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
"map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
"map[*cmd/compile/internal/types2.TypeParam]cmd/compile/internal/types2.Type %s": "",
"map[cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "",
"map[cmd/compile/internal/ir.Node][]cmd/compile/internal/ir.Node %v": "",
"map[cmd/compile/internal/ssa.ID]uint32 %v": "",
"map[int64]uint32 %v": "",
"math/big.Accuracy %s": "",
"reflect.Type %s": "",
"reflect.Type %v": "",
"rune %#U": "",
"rune %c": "",
"rune %q": "",

View file

@ -5,6 +5,7 @@
package amd64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/x86"
@ -64,7 +65,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)

View file

@ -8,6 +8,7 @@ import (
"fmt"
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
@ -975,7 +976,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) {
if x86.CanUse1InsnTLS(base.Ctxt) {
// MOVQ (TLS), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
@ -1017,7 +1018,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p := s.Prog(mov)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -1164,8 +1165,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
p := s.Prog(v.Op.Asm())

View file

@ -9,7 +9,9 @@ import (
"math"
"math/bits"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@ -544,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
case *gc.Node:
case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@ -741,8 +743,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpARMLoweredZero:
// MOVW.P Rarg2, 4(R1)
@ -849,7 +851,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()

View file

@ -7,7 +7,9 @@ package arm64
import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@ -394,7 +396,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
case *gc.Node:
case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@ -1038,8 +1040,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpARM64Equal,
ssa.OpARM64NotEqual,
@ -1068,7 +1070,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()

View file

@ -0,0 +1,28 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"os"
"cmd/internal/obj"
)
var Ctxt *obj.Link
var atExitFuncs []func()
func AtExit(f func()) {
atExitFuncs = append(atExitFuncs, f)
}
func Exit(code int) {
for i := len(atExitFuncs) - 1; i >= 0; i-- {
f := atExitFuncs[i]
atExitFuncs = atExitFuncs[:i]
f()
}
os.Exit(code)
}

View file

@ -0,0 +1,194 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Debug arguments, set by -d flag.
package base
import (
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
"cmd/internal/objabi"
)
// Debug holds the parsed debugging configuration values.
var Debug = DebugFlags{
Fieldtrack: &objabi.Fieldtrack_enabled,
}
// DebugFlags defines the debugging configuration values (see var Debug).
// Each struct field is a different value, named for the lower-case of the field name.
// Each field must be an int or string and must have a `help` struct tag.
//
// The -d option takes a comma-separated list of settings.
// Each setting is name=value; for ints, name is short for name=1.
type DebugFlags struct {
Append int `help:"print information about append compilation"`
Checkptr int `help:"instrument unsafe pointer conversions"`
Closure int `help:"print information about closure compilation"`
CompileLater int `help:"compile functions as late as possible"`
DclStack int `help:"run internal dclstack check"`
Defer int `help:"print information about defer compilation"`
DisableNil int `help:"disable nil checks"`
DumpPtrs int `help:"show Node pointers values in dump output"`
DwarfInl int `help:"print information about DWARF inlined function creation"`
Export int `help:"print export data"`
Fieldtrack *int `help:"enable field tracking"`
GCProg int `help:"print dump of GC programs"`
Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
LocationLists int `help:"print information about DWARF location list creation"`
Nil int `help:"print information about nil checks"`
PCTab string `help:"print named pc-value table"`
Panic int `help:"show all compiler panics"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code"`
TypeAssert int `help:"print information about type assertion inlining"`
TypecheckInl int `help:"eager typechecking of inline function bodies"`
WB int `help:"print information about write barriers"`
any bool // set when any of the values have been set
}
// Any reports whether any of the debug flags have been set.
func (d *DebugFlags) Any() bool { return d.any }
type debugField struct {
name string
help string
val interface{} // *int or *string
}
var debugTab []debugField
func init() {
v := reflect.ValueOf(&Debug).Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Name == "any" {
continue
}
name := strings.ToLower(f.Name)
help := f.Tag.Get("help")
if help == "" {
panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
}
ptr := v.Field(i).Addr().Interface()
switch ptr.(type) {
default:
panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
case *int, *string:
// ok
case **int:
ptr = *ptr.(**int) // record the *int itself
}
debugTab = append(debugTab, debugField{name, help, ptr})
}
}
// DebugSSA is called to set a -d ssa/... option.
// If nil, those options are reported as invalid options.
// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
var DebugSSA func(phase, flag string, val int, valString string) string
// parseDebug parses the -d debug string argument.
func parseDebug(debugstr string) {
// parse -d argument
if debugstr == "" {
return
}
Debug.any = true
Split:
for _, name := range strings.Split(debugstr, ",") {
if name == "" {
continue
}
// display help about the -d option itself and quit
if name == "help" {
fmt.Print(debugHelpHeader)
maxLen := len("ssa/help")
for _, t := range debugTab {
if len(t.name) > maxLen {
maxLen = len(t.name)
}
}
for _, t := range debugTab {
fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
}
// ssa options have their own help
fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
fmt.Print(debugHelpFooter)
os.Exit(0)
}
val, valstring, haveInt := 1, "", true
if i := strings.IndexAny(name, "=:"); i >= 0 {
var err error
name, valstring = name[:i], name[i+1:]
val, err = strconv.Atoi(valstring)
if err != nil {
val, haveInt = 1, false
}
}
for _, t := range debugTab {
if t.name != name {
continue
}
switch vp := t.val.(type) {
case nil:
// Ignore
case *string:
*vp = valstring
case *int:
if !haveInt {
log.Fatalf("invalid debug value %v", name)
}
*vp = val
default:
panic("bad debugtab type")
}
continue Split
}
// special case for ssa for now
if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
// expect form ssa/phase/flag
// e.g. -d=ssa/generic_cse/time
// _ in phase name also matches space
phase := name[4:]
flag := "debug" // default flag is debug
if i := strings.Index(phase, "/"); i >= 0 {
flag = phase[i+1:]
phase = phase[:i]
}
err := DebugSSA(phase, flag, val, valstring)
if err != "" {
log.Fatalf(err)
}
continue Split
}
log.Fatalf("unknown debug key -d %s\n", name)
}
}
const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
<key> is one of:
`
const debugHelpFooter = `
<value> is key-specific.
Key "checkptr" supports values:
"0": instrumentation disabled
"1": conversions involving unsafe.Pointer are instrumented
"2": conversions to unsafe.Pointer force heap allocation
Key "pctab" supports values:
"pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
`

View file

@ -0,0 +1,454 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"reflect"
"runtime"
"strings"
"cmd/internal/objabi"
"cmd/internal/sys"
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
objabi.Flagprint(os.Stderr)
Exit(2)
}
// Flag holds the parsed command-line flags.
// See ParseFlag for non-zero defaults.
var Flag CmdFlags
// A CountFlag is a counting integer flag.
// It accepts -name=value to set the value directly,
// but it also accepts -name with no =value to increment the count.
type CountFlag int
// CmdFlags defines the command-line flags (see var Flag).
// Each struct field is a different flag, by default named for the lower-case of the field name.
// If the flag name is a single letter, the default flag name is left upper-case.
// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
//
// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
// but this should be done only in exceptional circumstances: it helps everyone if the flag name
// is obvious from the field name when the flag is used elsewhere in the compiler sources.
// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
//
// Each field must have a `help` struct tag giving the flag help message.
//
// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
type CmdFlags struct {
// Single letters
B CountFlag "help:\"disable bounds checking\""
C CountFlag "help:\"disable printing of columns in error messages\""
D string "help:\"set relative `path` for local imports\""
E CountFlag "help:\"debug symbol export\""
G CountFlag "help:\"accept generic code\""
I func(string) "help:\"add `directory` to import search path\""
K CountFlag "help:\"debug missing line numbers\""
L CountFlag "help:\"show full file names in error messages\""
N CountFlag "help:\"disable optimizations\""
S CountFlag "help:\"print assembly listing\""
// V is added by objabi.AddVersionFlag
W CountFlag "help:\"debug parse tree after type checking\""
LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
LowerD func(string) "help:\"enable debugging settings; try -d help\""
LowerE CountFlag "help:\"no limit on number of errors reported\""
LowerH CountFlag "help:\"halt on error\""
LowerJ CountFlag "help:\"debug runtime-initialized variables\""
LowerL CountFlag "help:\"disable inlining\""
LowerM CountFlag "help:\"print optimization decisions\""
LowerO string "help:\"write output to `file`\""
LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
LowerR CountFlag "help:\"debug generated wrappers\""
LowerT bool "help:\"enable tracing for debugging the compiler\""
LowerW CountFlag "help:\"debug type checking\""
LowerV *bool "help:\"increase debug verbosity\""
// Special characters
Percent int "flag:\"%\" help:\"debug non-static initializers\""
CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
// Longer names
AsmHdr string "help:\"write assembly header to `file`\""
Bench string "help:\"append benchmark times to `file`\""
BlockProfile string "help:\"write block profile to `file`\""
BuildID string "help:\"record `id` as the build id in the export metadata\""
CPUProfile string "help:\"write cpu profile to `file`\""
Complete bool "help:\"compiling complete package (no C or assembly)\""
Dwarf bool "help:\"generate DWARF symbols\""
DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
GoVersion string "help:\"required version of the runtime\""
ImportCfg func(string) "help:\"read import configuration from `file`\""
ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
InstallSuffix string "help:\"set pkg directory `suffix`\""
JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
Lang string "help:\"Go language version source code expects\""
LinkObj string "help:\"write linker-specific object to `file`\""
LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
Live CountFlag "help:\"debug liveness analysis\""
MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
MemProfile string "help:\"write memory profile to `file`\""
MemProfileRate int64 "help:\"set runtime.MemProfileRate to `rate`\""
MutexProfile string "help:\"write mutex profile to `file`\""
NoLocalImports bool "help:\"reject local (relative) imports\""
Pack bool "help:\"write to file.a instead of file.o\""
Race bool "help:\"enable race detector\""
Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732
Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\""
Std bool "help:\"compiling standard library\""
SymABIs string "help:\"read symbol ABIs from `file`\""
TraceProfile string "help:\"write an execution trace to `file`\""
TrimPath string "help:\"remove `prefix` from recorded source file paths\""
WB bool "help:\"enable write barrier\"" // TODO: remove
// Configuration derived from flags; not a flag itself.
Cfg struct {
Embed struct { // set by -embedcfg
Patterns map[string][]string
Files map[string]string
}
ImportDirs []string // appended to by -I
ImportMap map[string]string // set by -importmap OR -importcfg
PackageFile map[string]string // set by -importcfg; nil means not in use
SpectreIndex bool // set by -spectre=index or -spectre=all
}
}
// ParseFlags parses the command-line flags into Flag.
func ParseFlags() {
Flag.I = addImportDir
Flag.LowerC = 1
Flag.LowerD = parseDebug
Flag.LowerP = &Ctxt.Pkgpath
Flag.LowerV = &Ctxt.Debugvlog
Flag.Dwarf = objabi.GOARCH != "wasm"
Flag.DwarfBASEntries = &Ctxt.UseBASEntries
Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
*Flag.DwarfLocationLists = true
Flag.Dynlink = &Ctxt.Flag_dynlink
Flag.EmbedCfg = readEmbedCfg
Flag.GenDwarfInl = 2
Flag.ImportCfg = readImportCfg
Flag.ImportMap = addImportMap
Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true
Flag.Cfg.ImportMap = make(map[string]string)
objabi.AddVersionFlag() // -V
registerFlags()
objabi.Flagparse(usage)
if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH)
}
if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH)
}
if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
}
parseSpectre(Flag.Spectre) // left as string for recordFlags
Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
Ctxt.Flag_optimize = Flag.N == 0
Ctxt.Debugasm = int(Flag.S)
if flag.NArg() < 1 {
usage()
}
if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
Exit(2)
}
if Flag.LowerO == "" {
p := flag.Arg(0)
if i := strings.LastIndex(p, "/"); i >= 0 {
p = p[i+1:]
}
if runtime.GOOS == "windows" {
if i := strings.LastIndex(p, `\`); i >= 0 {
p = p[i+1:]
}
}
if i := strings.LastIndex(p, "."); i >= 0 {
p = p[:i]
}
suffix := ".o"
if Flag.Pack {
suffix = ".a"
}
Flag.LowerO = p + suffix
}
if Flag.Race && Flag.MSan {
log.Fatal("cannot use both -race and -msan")
}
if Flag.Race || Flag.MSan {
// -race and -msan imply -d=checkptr for now.
Debug.Checkptr = 1
}
if Flag.CompilingRuntime && Flag.N != 0 {
log.Fatal("cannot disable optimizations while compiling runtime")
}
if Flag.LowerC < 1 {
log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
}
if Flag.LowerC > 1 && !concurrentBackendAllowed() {
log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
}
if Flag.CompilingRuntime {
// Runtime can't use -d=checkptr, at least not yet.
Debug.Checkptr = 0
// Fuzzing the runtime isn't interesting either.
Debug.Libfuzzer = 0
}
// set via a -d flag
Ctxt.Debugpcln = Debug.PCTab
}
// registerFlags adds flag registrations for all the fields in Flag.
// See the comment on type CmdFlags for the rules.
func registerFlags() {
var (
boolType = reflect.TypeOf(bool(false))
intType = reflect.TypeOf(int(0))
stringType = reflect.TypeOf(string(""))
ptrBoolType = reflect.TypeOf(new(bool))
ptrIntType = reflect.TypeOf(new(int))
ptrStringType = reflect.TypeOf(new(string))
countType = reflect.TypeOf(CountFlag(0))
funcType = reflect.TypeOf((func(string))(nil))
)
v := reflect.ValueOf(&Flag).Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Name == "Cfg" {
continue
}
var name string
if len(f.Name) == 1 {
name = f.Name
} else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
name = string(rune(f.Name[5] + 'a' - 'A'))
} else {
name = strings.ToLower(f.Name)
}
if tag := f.Tag.Get("flag"); tag != "" {
name = tag
}
help := f.Tag.Get("help")
if help == "" {
panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
}
if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
}
switch f.Type {
case boolType:
p := v.Field(i).Addr().Interface().(*bool)
flag.BoolVar(p, name, *p, help)
case intType:
p := v.Field(i).Addr().Interface().(*int)
flag.IntVar(p, name, *p, help)
case stringType:
p := v.Field(i).Addr().Interface().(*string)
flag.StringVar(p, name, *p, help)
case ptrBoolType:
p := v.Field(i).Interface().(*bool)
flag.BoolVar(p, name, *p, help)
case ptrIntType:
p := v.Field(i).Interface().(*int)
flag.IntVar(p, name, *p, help)
case ptrStringType:
p := v.Field(i).Interface().(*string)
flag.StringVar(p, name, *p, help)
case countType:
p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
objabi.Flagcount(name, help, p)
case funcType:
f := v.Field(i).Interface().(func(string))
objabi.Flagfn1(name, help, f)
}
}
}
// concurrentFlagOk reports whether the current compiler flags
// are compatible with concurrent compilation.
func concurrentFlagOk() bool {
// TODO(rsc): Many of these are fine. Remove them.
return Flag.Percent == 0 &&
Flag.E == 0 &&
Flag.K == 0 &&
Flag.L == 0 &&
Flag.LowerH == 0 &&
Flag.LowerJ == 0 &&
Flag.LowerM == 0 &&
Flag.LowerR == 0
}
func concurrentBackendAllowed() bool {
if !concurrentFlagOk() {
return false
}
// Debug.S by itself is ok, because all printing occurs
// while writing the object file, and that is non-concurrent.
// Adding Debug_vlog, however, causes Debug.S to also print
// while flushing the plist, which happens concurrently.
if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
return false
}
// TODO: Test and delete this condition.
if objabi.Fieldtrack_enabled != 0 {
return false
}
// TODO: fix races and enable the following flags
if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race {
return false
}
return true
}
func addImportDir(dir string) {
if dir != "" {
Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
}
}
func addImportMap(s string) {
if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string)
}
if strings.Count(s, "=") != 1 {
log.Fatal("-importmap argument must be of the form source=actual")
}
i := strings.Index(s, "=")
source, actual := s[:i], s[i+1:]
if source == "" || actual == "" {
log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
}
Flag.Cfg.ImportMap[source] = actual
}
func readImportCfg(file string) {
if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string)
}
Flag.Cfg.PackageFile = map[string]string{}
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-importcfg: %v", err)
}
for lineNum, line := range strings.Split(string(data), "\n") {
lineNum++ // 1-based
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
var verb, args string
if i := strings.Index(line, " "); i < 0 {
verb = line
} else {
verb, args = line[:i], strings.TrimSpace(line[i+1:])
}
var before, after string
if i := strings.Index(args, "="); i >= 0 {
before, after = args[:i], args[i+1:]
}
switch verb {
default:
log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
case "importmap":
if before == "" || after == "" {
log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
}
Flag.Cfg.ImportMap[before] = after
case "packagefile":
if before == "" || after == "" {
log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
}
Flag.Cfg.PackageFile[before] = after
}
}
}
func readEmbedCfg(file string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-embedcfg: %v", err)
}
if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
log.Fatalf("%s: %v", file, err)
}
if Flag.Cfg.Embed.Patterns == nil {
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
}
if Flag.Cfg.Embed.Files == nil {
log.Fatalf("%s: invalid embedcfg: missing Files", file)
}
}
// parseSpectre parses the spectre configuration from the string s.
func parseSpectre(s string) {
for _, f := range strings.Split(s, ",") {
f = strings.TrimSpace(f)
switch f {
default:
log.Fatalf("unknown setting -spectre=%s", f)
case "":
// nothing
case "all":
Flag.Cfg.SpectreIndex = true
Ctxt.Retpoline = true
case "index":
Flag.Cfg.SpectreIndex = true
case "ret":
Ctxt.Retpoline = true
}
}
if Flag.Cfg.SpectreIndex {
switch objabi.GOARCH {
case "amd64":
// ok
default:
log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
}
}
}

View file

@ -0,0 +1,260 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"fmt"
"os"
"runtime/debug"
"sort"
"strings"
"cmd/internal/objabi"
"cmd/internal/src"
)
// An errorMsg is a queued error message, waiting to be printed.
type errorMsg struct {
pos src.XPos
msg string
}
// Pos is the current source position being processed,
// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
var Pos src.XPos
var (
errorMsgs []errorMsg
numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings)
numSyntaxErrors int
)
// Errors returns the number of errors reported.
func Errors() int {
return numErrors
}
// SyntaxErrors returns the number of syntax errors reported
func SyntaxErrors() int {
return numSyntaxErrors
}
// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
// Only add the position if know the position.
// See issue golang.org/issue/11361.
if pos.IsKnown() {
msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
}
errorMsgs = append(errorMsgs, errorMsg{
pos: pos,
msg: msg + "\n",
})
}
// FmtPos formats pos as a file:line string.
func FmtPos(pos src.XPos) string {
if Ctxt == nil {
return "???"
}
return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
}
// byPos sorts errors by source position.
type byPos []errorMsg
func (x byPos) Len() int { return len(x) }
func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
// FlushErrors sorts errors seen so far by line number, prints them to stdout,
// and empties the errors array.
func FlushErrors() {
Ctxt.Bso.Flush()
if len(errorMsgs) == 0 {
return
}
sort.Stable(byPos(errorMsgs))
for i, err := range errorMsgs {
if i == 0 || err.msg != errorMsgs[i-1].msg {
fmt.Printf("%s", err.msg)
}
}
errorMsgs = errorMsgs[:0]
}
// lasterror keeps track of the most recently issued error,
// to avoid printing multiple error messages on the same line.
var lasterror struct {
syntax src.XPos // source position of last syntax error
other src.XPos // source position of last non-syntax error
msg string // error message of last non-syntax error
}
// sameline reports whether two positions a, b are on the same line.
func sameline(a, b src.XPos) bool {
p := Ctxt.PosTable.Pos(a)
q := Ctxt.PosTable.Pos(b)
return p.Base() == q.Base() && p.Line() == q.Line()
}
// Errorf reports a formatted error at the current line.
func Errorf(format string, args ...interface{}) {
ErrorfAt(Pos, format, args...)
}
// ErrorfAt reports a formatted error message at pos.
func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if strings.HasPrefix(msg, "syntax error") {
numSyntaxErrors++
// only one syntax error per line, no matter what error
if sameline(lasterror.syntax, pos) {
return
}
lasterror.syntax = pos
} else {
// only one of multiple equal non-syntax errors per line
// (flusherrors shows only one of them, so we filter them
// here as best as we can (they may not appear in order)
// so that we don't count them here and exit early, and
// then have nothing to show for.)
if sameline(lasterror.other, pos) && lasterror.msg == msg {
return
}
lasterror.other = pos
lasterror.msg = msg
}
addErrorMsg(pos, "%s", msg)
numErrors++
hcrash()
if numErrors >= 10 && Flag.LowerE == 0 {
FlushErrors()
fmt.Printf("%v: too many errors\n", FmtPos(pos))
ErrorExit()
}
}
// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
func ErrorfVers(lang string, format string, args ...interface{}) {
Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
}
// UpdateErrorDot is a clumsy hack that rewrites the last error,
// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
// It is used to give better error messages for dot (selector) expressions.
func UpdateErrorDot(line string, name, expr string) {
if len(errorMsgs) == 0 {
return
}
e := &errorMsgs[len(errorMsgs)-1]
if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
}
}
// Warnf reports a formatted warning at the current line.
// In general the Go compiler does NOT generate warnings,
// so this should be used only when the user has opted in
// to additional output by setting a particular flag.
func Warn(format string, args ...interface{}) {
WarnfAt(Pos, format, args...)
}
// WarnfAt reports a formatted warning at pos.
// In general the Go compiler does NOT generate warnings,
// so this should be used only when the user has opted in
// to additional output by setting a particular flag.
func WarnfAt(pos src.XPos, format string, args ...interface{}) {
addErrorMsg(pos, format, args...)
if Flag.LowerM != 0 {
FlushErrors()
}
}
// Fatalf reports a fatal error - an internal problem - at the current line and exits.
// If other errors have already been printed, then Fatalf just quietly exits.
// (The internal problem may have been caused by incomplete information
// after the already-reported errors, so best to let users fix those and
// try again without being bothered about a spurious internal error.)
//
// But if no errors have been printed, or if -d panic has been specified,
// Fatalf prints the error as an "internal compiler error". In a released build,
// it prints an error asking to file a bug report. In development builds, it
// prints a stack trace.
//
// If -h has been specified, Fatalf panics to force the usual runtime info dump.
func Fatalf(format string, args ...interface{}) {
FatalfAt(Pos, format, args...)
}
// FatalfAt reports a fatal error - an internal problem - at pos and exits.
// If other errors have already been printed, then FatalfAt just quietly exits.
// (The internal problem may have been caused by incomplete information
// after the already-reported errors, so best to let users fix those and
// try again without being bothered about a spurious internal error.)
//
// But if no errors have been printed, or if -d panic has been specified,
// FatalfAt prints the error as an "internal compiler error". In a released build,
// it prints an error asking to file a bug report. In development builds, it
// prints a stack trace.
//
// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
func FatalfAt(pos src.XPos, format string, args ...interface{}) {
FlushErrors()
if Debug.Panic != 0 || numErrors == 0 {
fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
fmt.Printf(format, args...)
fmt.Printf("\n")
// If this is a released compiler version, ask for a bug report.
if strings.HasPrefix(objabi.Version, "go") {
fmt.Printf("\n")
fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
fmt.Printf("https://golang.org/issue/new\n")
} else {
// Not a release; dump a stack trace, too.
fmt.Println()
os.Stdout.Write(debug.Stack())
fmt.Println()
}
}
hcrash()
ErrorExit()
}
// hcrash crashes the compiler when -h is set, to find out where a message is generated.
func hcrash() {
if Flag.LowerH != 0 {
FlushErrors()
if Flag.LowerO != "" {
os.Remove(Flag.LowerO)
}
panic("-h")
}
}
// ErrorExit handles an error-status exit.
// It flushes any pending errors, removes the output file, and exits.
func ErrorExit() {
FlushErrors()
if Flag.LowerO != "" {
os.Remove(Flag.LowerO)
}
os.Exit(2)
}
// ExitIfErrors calls ErrorExit if any errors have been reported.
func ExitIfErrors() {
if Errors() > 0 {
ErrorExit()
}
}

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
@ -69,11 +71,11 @@ func EqCanPanic(t *types.Type) bool {
switch t.Etype {
default:
return false
case TINTER:
case types.TINTER:
return true
case TARRAY:
case types.TARRAY:
return EqCanPanic(t.Elem())
case TSTRUCT:
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
return true
@ -119,45 +121,45 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
}
switch t.Etype {
case TANY, TFORW:
case types.TANY, types.TFORW:
// will be defined later.
return ANOEQ, t
case TINT8, TUINT8, TINT16, TUINT16,
TINT32, TUINT32, TINT64, TUINT64,
TINT, TUINT, TUINTPTR,
TBOOL, TPTR,
TCHAN, TUNSAFEPTR:
case types.TINT8, types.TUINT8, types.TINT16, types.TUINT16,
types.TINT32, types.TUINT32, types.TINT64, types.TUINT64,
types.TINT, types.TUINT, types.TUINTPTR,
types.TBOOL, types.TPTR,
types.TCHAN, types.TUNSAFEPTR:
return AMEM, nil
case TFUNC, TMAP:
case types.TFUNC, types.TMAP:
return ANOEQ, t
case TFLOAT32:
case types.TFLOAT32:
return AFLOAT32, nil
case TFLOAT64:
case types.TFLOAT64:
return AFLOAT64, nil
case TCOMPLEX64:
case types.TCOMPLEX64:
return ACPLX64, nil
case TCOMPLEX128:
case types.TCOMPLEX128:
return ACPLX128, nil
case TSTRING:
case types.TSTRING:
return ASTRING, nil
case TINTER:
case types.TINTER:
if t.IsEmptyInterface() {
return ANILINTER, nil
}
return AINTER, nil
case TSLICE:
case types.TSLICE:
return ANOEQ, t
case TARRAY:
case types.TARRAY:
a, bad := algtype1(t.Elem())
switch a {
case AMEM:
@ -177,7 +179,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
return ASPECIAL, nil
case TSTRUCT:
case types.TSTRUCT:
fields := t.FieldSlice()
// One-field struct is same as that one field alone.
@ -203,7 +205,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
return ret, nil
}
Fatalf("algtype1: unexpected type %v", t)
base.Fatalf("algtype1: unexpected type %v", t)
return 0, nil
}
@ -214,7 +216,7 @@ func genhash(t *types.Type) *obj.LSym {
switch algtype(t) {
default:
// genhash is only called for types that have equality
Fatalf("genhash %v", t)
base.Fatalf("genhash %v", t)
case AMEM0:
return sysClosure("memhash0")
case AMEM8:
@ -282,24 +284,24 @@ func genhash(t *types.Type) *obj.LSym {
}
sym := typesymprefix(".hash", t)
if Debug.r != 0 {
if base.Flag.LowerR != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
lineno = autogeneratedPos // less confusing than end of input
dclcontext = PEXTERN
base.Pos = autogeneratedPos // less confusing than end of input
dclcontext = ir.PEXTERN
// func sym(p *T, h uintptr) uintptr
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set2(
tfn := ir.Nod(ir.OTFUNC, nil, nil)
tfn.PtrList().Set2(
namedfield("p", types.NewPtr(t)),
namedfield("h", types.Types[TUINTPTR]),
namedfield("h", types.Types[types.TUINTPTR]),
)
tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
tfn.PtrRlist().Set1(anonfield(types.Types[types.TUINTPTR]))
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nh := asNode(tfn.Type.Params().Field(1).Nname)
np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
switch t.Etype {
case types.TARRAY:
@ -308,25 +310,25 @@ func genhash(t *types.Type) *obj.LSym {
// pure memory.
hashel := hashfor(t.Elem())
n := nod(ORANGE, nil, nod(ODEREF, np, nil))
ni := newname(lookup("i"))
ni.Type = types.Types[TINT]
n.List.Set1(ni)
n := ir.Nod(ir.ORANGE, nil, ir.Nod(ir.ODEREF, np, nil))
ni := NewName(lookup("i"))
ni.SetType(types.Types[types.TINT])
n.PtrList().Set1(ni)
n.SetColas(true)
colasdefn(n.List.Slice(), n)
ni = n.List.First()
colasdefn(n.List().Slice(), n)
ni = n.List().First()
// h = hashel(&p[i], h)
call := nod(OCALL, hashel, nil)
call := ir.Nod(ir.OCALL, hashel, nil)
nx := nod(OINDEX, np, ni)
nx := ir.Nod(ir.OINDEX, np, ni)
nx.SetBounded(true)
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
n.Nbody.Append(nod(OAS, nh, call))
na := ir.Nod(ir.OADDR, nx, nil)
call.PtrList().Append(na)
call.PtrList().Append(nh)
n.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
fn.Nbody.Append(n)
fn.PtrBody().Append(n)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
@ -343,12 +345,12 @@ func genhash(t *types.Type) *obj.LSym {
// Hash non-memory fields with appropriate hash function.
if !IsRegularMemory(f.Type) {
hashel := hashfor(f.Type)
call := nod(OCALL, hashel, nil)
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
fn.Nbody.Append(nod(OAS, nh, call))
call := ir.Nod(ir.OCALL, hashel, nil)
nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
na := ir.Nod(ir.OADDR, nx, nil)
call.PtrList().Append(na)
call.PtrList().Append(nh)
fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
i++
continue
}
@ -358,40 +360,40 @@ func genhash(t *types.Type) *obj.LSym {
// h = hashel(&p.first, size, h)
hashel := hashmem(f.Type)
call := nod(OCALL, hashel, nil)
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
call.List.Append(nodintconst(size))
fn.Nbody.Append(nod(OAS, nh, call))
call := ir.Nod(ir.OCALL, hashel, nil)
nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
na := ir.Nod(ir.OADDR, nx, nil)
call.PtrList().Append(na)
call.PtrList().Append(nh)
call.PtrList().Append(nodintconst(size))
fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
i = next
}
}
r := nod(ORETURN, nil, nil)
r.List.Append(nh)
fn.Nbody.Append(r)
r := ir.Nod(ir.ORETURN, nil, nil)
r.PtrList().Append(nh)
fn.PtrBody().Append(r)
if Debug.r != 0 {
dumplist("genhash body", fn.Nbody)
if base.Flag.LowerR != 0 {
ir.DumpList("genhash body", fn.Body())
}
funcbody()
fn.Func.SetDupok(true)
fn.Func().SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
typecheckslice(fn.Nbody.Slice(), ctxStmt)
typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
if debug_dclstack != 0 {
if base.Debug.DclStack != 0 {
testdclstack()
}
fn.Func.SetNilCheckDisabled(true)
fn.Func().SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Build closure. It doesn't close over any variables, so
@ -402,12 +404,12 @@ func genhash(t *types.Type) *obj.LSym {
return closure
}
func hashfor(t *types.Type) *Node {
func hashfor(t *types.Type) ir.Node {
var sym *types.Sym
switch a, _ := algtype1(t); a {
case AMEM:
Fatalf("hashfor with AMEM type")
base.Fatalf("hashfor with AMEM type")
case AINTER:
sym = Runtimepkg.Lookup("interhash")
case ANILINTER:
@ -428,14 +430,14 @@ func hashfor(t *types.Type) *Node {
sym = typesymprefix(".hash", t)
}
n := newname(sym)
n := NewName(sym)
setNodeNameFunc(n)
n.Type = functype(nil, []*Node{
n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[TUINTPTR]),
}, []*Node{
anonfield(types.Types[TUINTPTR]),
})
anonfield(types.Types[types.TUINTPTR]),
}, []ir.Node{
anonfield(types.Types[types.TUINTPTR]),
}))
return n
}
@ -509,27 +511,27 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
sym := typesymprefix(".eq", t)
if Debug.r != 0 {
if base.Flag.LowerR != 0 {
fmt.Printf("geneq %v\n", t)
}
// Autogenerate code for equality of structs and arrays.
lineno = autogeneratedPos // less confusing than end of input
dclcontext = PEXTERN
base.Pos = autogeneratedPos // less confusing than end of input
dclcontext = ir.PEXTERN
// func sym(p, q *T) bool
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set2(
tfn := ir.Nod(ir.OTFUNC, nil, nil)
tfn.PtrList().Set2(
namedfield("p", types.NewPtr(t)),
namedfield("q", types.NewPtr(t)),
)
tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
tfn.PtrRlist().Set1(namedfield("r", types.Types[types.TBOOL]))
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname)
nr := asNode(tfn.Type.Results().Field(0).Nname)
np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
nq := ir.AsNode(tfn.Type().Params().Field(1).Nname)
nr := ir.AsNode(tfn.Type().Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
@ -539,9 +541,9 @@ func geneq(t *types.Type) *obj.LSym {
// so t must be either an array or a struct.
switch t.Etype {
default:
Fatalf("geneq %v", t)
base.Fatalf("geneq %v", t)
case TARRAY:
case types.TARRAY:
nelem := t.NumElem()
// checkAll generates code to check the equality of all array elements.
@ -565,17 +567,17 @@ func geneq(t *types.Type) *obj.LSym {
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i *Node) *Node {
checkIdx := func(i ir.Node) ir.Node {
// pi := p[i]
pi := nod(OINDEX, np, i)
pi := ir.Nod(ir.OINDEX, np, i)
pi.SetBounded(true)
pi.Type = t.Elem()
pi.SetType(t.Elem())
// qi := q[i]
qi := nod(OINDEX, nq, i)
qi := ir.Nod(ir.OINDEX, nq, i)
qi.SetBounded(true)
qi.Type = t.Elem()
qi.SetType(t.Elem())
return eq(pi, qi)
}
@ -587,68 +589,68 @@ func geneq(t *types.Type) *obj.LSym {
// Generate a series of checks.
for i := int64(0); i < nelem; i++ {
// if check {} else { goto neq }
nif := nod(OIF, checkIdx(nodintconst(i)), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(nif)
nif := ir.Nod(ir.OIF, checkIdx(nodintconst(i)), nil)
nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
fn.PtrBody().Append(nif)
}
if last {
fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
fn.PtrBody().Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
i := temp(types.Types[TINT])
init := nod(OAS, i, nodintconst(0))
cond := nod(OLT, i, nodintconst(nelem))
post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
loop := nod(OFOR, cond, post)
loop.Ninit.Append(init)
i := temp(types.Types[types.TINT])
init := ir.Nod(ir.OAS, i, nodintconst(0))
cond := ir.Nod(ir.OLT, i, nodintconst(nelem))
post := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1)))
loop := ir.Nod(ir.OFOR, cond, post)
loop.PtrInit().Append(init)
// if eq(pi, qi) {} else { goto neq }
nif := nod(OIF, checkIdx(i), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
loop.Nbody.Append(nif)
fn.Nbody.Append(loop)
nif := ir.Nod(ir.OIF, checkIdx(i), nil)
nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
loop.PtrBody().Append(nif)
fn.PtrBody().Append(loop)
if last {
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
}
}
}
switch t.Elem().Etype {
case TSTRING:
case types.TSTRING:
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
checkAll(3, false, func(pi, qi *Node) *Node {
checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
checkAll(1, true, func(pi, qi *Node) *Node {
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case TFLOAT32, TFLOAT64:
checkAll(2, true, func(pi, qi *Node) *Node {
case types.TFLOAT32, types.TFLOAT64:
checkAll(2, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
return ir.Nod(ir.OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
checkAll(1, true, func(pi, qi *Node) *Node {
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
return ir.Nod(ir.OEQ, pi, qi)
})
}
case TSTRUCT:
case types.TSTRUCT:
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
var conds [][]*Node
conds = append(conds, []*Node{})
and := func(n *Node) {
var conds [][]ir.Node
conds = append(conds, []ir.Node{})
and := func(n ir.Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
@ -668,21 +670,21 @@ func geneq(t *types.Type) *obj.LSym {
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []*Node{})
conds = append(conds, []ir.Node{})
}
p := nodSym(OXDOT, np, f.Sym)
q := nodSym(OXDOT, nq, f.Sym)
p := nodSym(ir.OXDOT, np, f.Sym)
q := nodSym(ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
eqlen, eqmem := eqstring(p, q)
and(eqlen)
and(eqmem)
default:
and(nod(OEQ, p, q))
and(ir.Nod(ir.OEQ, p, q))
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []*Node{})
conds = append(conds, []ir.Node{})
}
i++
continue
@ -707,10 +709,10 @@ func geneq(t *types.Type) *obj.LSym {
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
var flatConds []*Node
var flatConds []ir.Node
for _, c := range conds {
isCall := func(n *Node) bool {
return n.Op == OCALL || n.Op == OCALLFUNC
isCall := func(n ir.Node) bool {
return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
@ -719,54 +721,54 @@ func geneq(t *types.Type) *obj.LSym {
}
if len(flatConds) == 0 {
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
} else {
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
n := nod(OIF, c, nil)
n.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(n)
n := ir.Nod(ir.OIF, c, nil)
n.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
fn.PtrBody().Append(n)
}
fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
fn.PtrBody().Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1]))
}
}
// ret:
// return
ret := autolabel(".ret")
fn.Nbody.Append(nodSym(OLABEL, nil, ret))
fn.Nbody.Append(nod(ORETURN, nil, nil))
fn.PtrBody().Append(nodSym(ir.OLABEL, nil, ret))
fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
// neq:
// r = false
// return (or goto ret)
fn.Nbody.Append(nodSym(OLABEL, nil, neq))
fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq))
fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false)))
if EqCanPanic(t) || hasCall(fn) {
// Epilogue is large, so share it with the equal case.
fn.Nbody.Append(nodSym(OGOTO, nil, ret))
fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret))
} else {
// Epilogue is small, so don't bother sharing.
fn.Nbody.Append(nod(ORETURN, nil, nil))
fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if Debug.r != 0 {
dumplist("geneq body", fn.Nbody)
if base.Flag.LowerR != 0 {
ir.DumpList("geneq body", fn.Body())
}
funcbody()
fn.Func.SetDupok(true)
fn.Func().SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
typecheckslice(fn.Nbody.Slice(), ctxStmt)
typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
if debug_dclstack != 0 {
if base.Debug.DclStack != 0 {
testdclstack()
}
@ -774,7 +776,7 @@ func geneq(t *types.Type) *obj.LSym {
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
fn.Func.SetNilCheckDisabled(true)
fn.Func().SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Generate a closure which points at the function we just generated.
@ -783,32 +785,32 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
func hasCall(n *Node) bool {
if n.Op == OCALL || n.Op == OCALLFUNC {
func hasCall(n ir.Node) bool {
if n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC {
return true
}
if n.Left != nil && hasCall(n.Left) {
if n.Left() != nil && hasCall(n.Left()) {
return true
}
if n.Right != nil && hasCall(n.Right) {
if n.Right() != nil && hasCall(n.Right()) {
return true
}
for _, x := range n.Ninit.Slice() {
for _, x := range n.Init().Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Nbody.Slice() {
for _, x := range n.Body().Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.List.Slice() {
for _, x := range n.List().Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Rlist.Slice() {
for _, x := range n.Rlist().Slice() {
if hasCall(x) {
return true
}
@ -818,10 +820,10 @@ func hasCall(n *Node) bool {
// eqfield returns the node
// p.field == q.field
func eqfield(p *Node, q *Node, field *types.Sym) *Node {
nx := nodSym(OXDOT, p, field)
ny := nodSym(OXDOT, q, field)
ne := nod(OEQ, nx, ny)
func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
nx := nodSym(ir.OXDOT, p, field)
ny := nodSym(ir.OXDOT, q, field)
ne := ir.Nod(ir.OEQ, nx, ny)
return ne
}
@ -831,23 +833,23 @@ func eqfield(p *Node, q *Node, field *types.Sym) *Node {
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
func eqstring(s, t *Node) (eqlen, eqmem *Node) {
s = conv(s, types.Types[TSTRING])
t = conv(t, types.Types[TSTRING])
sptr := nod(OSPTR, s, nil)
tptr := nod(OSPTR, t, nil)
slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) {
s = conv(s, types.Types[types.TSTRING])
t = conv(t, types.Types[types.TSTRING])
sptr := ir.Nod(ir.OSPTR, s, nil)
tptr := ir.Nod(ir.OSPTR, t, nil)
slen := conv(ir.Nod(ir.OLEN, s, nil), types.Types[types.TUINTPTR])
tlen := conv(ir.Nod(ir.OLEN, t, nil), types.Types[types.TUINTPTR])
fn := syslook("memequal")
fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
call := nod(OCALL, fn, nil)
call.List.Append(sptr, tptr, slen.copy())
fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
call := ir.Nod(ir.OCALL, fn, nil)
call.PtrList().Append(sptr, tptr, ir.Copy(slen))
call = typecheck(call, ctxExpr|ctxMultiOK)
cmp := nod(OEQ, slen, tlen)
cmp := ir.Nod(ir.OEQ, slen, tlen)
cmp = typecheck(cmp, ctxExpr)
cmp.Type = types.Types[TBOOL]
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
@ -857,58 +859,58 @@ func eqstring(s, t *Node) (eqlen, eqmem *Node) {
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
if !types.Identical(s.Type, t.Type) {
Fatalf("eqinterface %v %v", s.Type, t.Type)
func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
var fn *Node
if s.Type.IsEmptyInterface() {
var fn ir.Node
if s.Type().IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
fn = syslook("ifaceeq")
}
stab := nod(OITAB, s, nil)
ttab := nod(OITAB, t, nil)
sdata := nod(OIDATA, s, nil)
tdata := nod(OIDATA, t, nil)
sdata.Type = types.Types[TUNSAFEPTR]
tdata.Type = types.Types[TUNSAFEPTR]
stab := ir.Nod(ir.OITAB, s, nil)
ttab := ir.Nod(ir.OITAB, t, nil)
sdata := ir.Nod(ir.OIDATA, s, nil)
tdata := ir.Nod(ir.OIDATA, t, nil)
sdata.SetType(types.Types[types.TUNSAFEPTR])
tdata.SetType(types.Types[types.TUNSAFEPTR])
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := nod(OCALL, fn, nil)
call.List.Append(stab, sdata, tdata)
call := ir.Nod(ir.OCALL, fn, nil)
call.PtrList().Append(stab, sdata, tdata)
call = typecheck(call, ctxExpr|ctxMultiOK)
cmp := nod(OEQ, stab, ttab)
cmp := ir.Nod(ir.OEQ, stab, ttab)
cmp = typecheck(cmp, ctxExpr)
cmp.Type = types.Types[TBOOL]
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
nx := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, p, field), nil)
ny := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, q, field), nil)
nx = typecheck(nx, ctxExpr)
ny = typecheck(ny, ctxExpr)
fn, needsize := eqmemfunc(size, nx.Type.Elem())
call := nod(OCALL, fn, nil)
call.List.Append(nx)
call.List.Append(ny)
fn, needsize := eqmemfunc(size, nx.Type().Elem())
call := ir.Nod(ir.OCALL, fn, nil)
call.PtrList().Append(nx)
call.PtrList().Append(ny)
if needsize {
call.List.Append(nodintconst(size))
call.PtrList().Append(nodintconst(size))
}
return call
}
func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
func eqmemfunc(size int64, t *types.Type) (fn ir.Node, needsize bool) {
switch size {
default:
fn = syslook("memequal")
@ -949,7 +951,7 @@ func memrun(t *types.Type, start int) (size int64, next int) {
// by padding.
func ispaddedfield(t *types.Type, i int) bool {
if !t.IsStruct() {
Fatalf("ispaddedfield called non-struct %v", t)
base.Fatalf("ispaddedfield called non-struct %v", t)
}
end := t.Width
if i+1 < t.NumFields() {

View file

@ -6,6 +6,8 @@ package gc
import (
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"fmt"
"sort"
@ -21,7 +23,7 @@ var defercalc int
func Rnd(o int64, r int64) int64 {
if r < 1 || r > 8 || r&(r-1) != 0 {
Fatalf("rnd %d", r)
base.Fatalf("rnd %d", r)
}
return (o + r - 1) &^ (r - 1)
}
@ -39,7 +41,7 @@ func expandiface(t *types.Type) {
case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
return
default:
yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name)
base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name)
}
methods = append(methods, m)
}
@ -59,7 +61,7 @@ func expandiface(t *types.Type) {
}
if !m.Type.IsInterface() {
yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type)
base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
m.SetBroke(true)
t.SetBroke(true)
// Add to fields so that error messages
@ -74,11 +76,8 @@ func expandiface(t *types.Type) {
// (including broken ones, if any) and add to t's
// method set.
for _, t1 := range m.Type.Fields().Slice() {
f := types.NewField()
f.Pos = m.Pos // preserve embedding position
f.Sym = t1.Sym
f.Type = t1.Type
f.SetBroke(t1.Broke())
// Use m.Pos rather than t1.Pos to preserve embedding position.
f := types.NewField(m.Pos, t1.Sym, t1.Type)
addMethod(f, false)
}
}
@ -86,7 +85,7 @@ func expandiface(t *types.Type) {
sort.Sort(methcmp(methods))
if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
yyerrorl(typePos(t), "interface too large")
base.ErrorfAt(typePos(t), "interface too large")
}
for i, m := range methods {
m.Offset = int64(i) * int64(Widthptr)
@ -119,7 +118,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
o = Rnd(o, int64(f.Type.Align))
}
f.Offset = o
if n := asNode(f.Nname); n != nil {
if n := ir.AsNode(f.Nname); n != nil {
// addrescapes has similar code to update these offsets.
// Usually addrescapes runs after widstruct,
// in which case we could drop this,
@ -127,17 +126,17 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
// NOTE(rsc): This comment may be stale.
// It's possible the ordering has changed and this is
// now the common case. I'm not sure.
if n.Name.Param.Stackcopy != nil {
n.Name.Param.Stackcopy.Xoffset = o
n.Xoffset = 0
if n.Name().Param.Stackcopy != nil {
n.Name().Param.Stackcopy.SetOffset(o)
n.SetOffset(0)
} else {
n.Xoffset = o
n.SetOffset(o)
}
}
w := f.Type.Width
if w < 0 {
Fatalf("invalid width %d", f.Type.Width)
base.Fatalf("invalid width %d", f.Type.Width)
}
if w == 0 {
lastzero = o
@ -150,7 +149,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
maxwidth = 1<<31 - 1
}
if o >= maxwidth {
yyerrorl(typePos(errtype), "type %L too large", errtype)
base.ErrorfAt(typePos(errtype), "type %L too large", errtype)
o = 8 // small but nonzero
}
}
@ -199,7 +198,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
}
*path = append(*path, t)
if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
if p := ir.AsNode(t.Nod).Name().Param; p != nil && findTypeLoop(p.Ntype.Type(), path) {
return true
}
*path = (*path)[:len(*path)-1]
@ -207,17 +206,17 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
// Anonymous type. Recurse on contained types.
switch t.Etype {
case TARRAY:
case types.TARRAY:
if findTypeLoop(t.Elem(), path) {
return true
}
case TSTRUCT:
case types.TSTRUCT:
for _, f := range t.Fields().Slice() {
if findTypeLoop(f.Type, path) {
return true
}
}
case TINTER:
case types.TINTER:
for _, m := range t.Methods().Slice() {
if m.Type.IsInterface() { // embedded interface
if findTypeLoop(m.Type, path) {
@ -238,7 +237,7 @@ func reportTypeLoop(t *types.Type) {
var l []*types.Type
if !findTypeLoop(t, &l) {
Fatalf("failed to find type loop for: %v", t)
base.Fatalf("failed to find type loop for: %v", t)
}
// Rotate loop so that the earliest type declaration is first.
@ -253,11 +252,11 @@ func reportTypeLoop(t *types.Type) {
var msg bytes.Buffer
fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
for _, t := range l {
fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t)
fmt.Fprintf(&msg, "\t%v: %v refers to\n", base.FmtPos(typePos(t)), t)
t.SetBroke(true)
}
fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0])
yyerrorl(typePos(l[0]), msg.String())
fmt.Fprintf(&msg, "\t%v: %v", base.FmtPos(typePos(l[0])), l[0])
base.ErrorfAt(typePos(l[0]), msg.String())
}
// dowidth calculates and stores the size and alignment for t.
@ -271,7 +270,7 @@ func dowidth(t *types.Type) {
return
}
if Widthptr == 0 {
Fatalf("dowidth without betypeinit")
base.Fatalf("dowidth without betypeinit")
}
if t == nil {
@ -295,7 +294,7 @@ func dowidth(t *types.Type) {
return
}
t.SetBroke(true)
Fatalf("width not calculated: %v", t)
base.Fatalf("width not calculated: %v", t)
}
// break infinite recursion if the broken recursive type
@ -307,9 +306,9 @@ func dowidth(t *types.Type) {
// defer checkwidth calls until after we're done
defercheckwidth()
lno := lineno
if asNode(t.Nod) != nil {
lineno = asNode(t.Nod).Pos
lno := base.Pos
if ir.AsNode(t.Nod) != nil {
base.Pos = ir.AsNode(t.Nod).Pos()
}
t.Width = -2
@ -317,7 +316,7 @@ func dowidth(t *types.Type) {
et := t.Etype
switch et {
case TFUNC, TCHAN, TMAP, TSTRING:
case types.TFUNC, types.TCHAN, types.TMAP, types.TSTRING:
break
// simtype == 0 during bootstrap
@ -330,44 +329,44 @@ func dowidth(t *types.Type) {
var w int64
switch et {
default:
Fatalf("dowidth: unknown type: %v", t)
base.Fatalf("dowidth: unknown type: %v", t)
// compiler-specific stuff
case TINT8, TUINT8, TBOOL:
case types.TINT8, types.TUINT8, types.TBOOL:
// bool is int8
w = 1
case TINT16, TUINT16:
case types.TINT16, types.TUINT16:
w = 2
case TINT32, TUINT32, TFLOAT32:
case types.TINT32, types.TUINT32, types.TFLOAT32:
w = 4
case TINT64, TUINT64, TFLOAT64:
case types.TINT64, types.TUINT64, types.TFLOAT64:
w = 8
t.Align = uint8(Widthreg)
case TCOMPLEX64:
case types.TCOMPLEX64:
w = 8
t.Align = 4
case TCOMPLEX128:
case types.TCOMPLEX128:
w = 16
t.Align = uint8(Widthreg)
case TPTR:
case types.TPTR:
w = int64(Widthptr)
checkwidth(t.Elem())
case TUNSAFEPTR:
case types.TUNSAFEPTR:
w = int64(Widthptr)
case TINTER: // implemented as 2 pointers
case types.TINTER: // implemented as 2 pointers
w = 2 * int64(Widthptr)
t.Align = uint8(Widthptr)
expandiface(t)
case TCHAN: // implemented as pointer
case types.TCHAN: // implemented as pointer
w = int64(Widthptr)
checkwidth(t.Elem())
@ -377,35 +376,35 @@ func dowidth(t *types.Type) {
t1 := types.NewChanArgs(t)
checkwidth(t1)
case TCHANARGS:
case types.TCHANARGS:
t1 := t.ChanArgs()
dowidth(t1) // just in case
if t1.Elem().Width >= 1<<16 {
yyerrorl(typePos(t1), "channel element type too large (>64kB)")
base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)")
}
w = 1 // anything will do
case TMAP: // implemented as pointer
case types.TMAP: // implemented as pointer
w = int64(Widthptr)
checkwidth(t.Elem())
checkwidth(t.Key())
case TFORW: // should have been filled in
case types.TFORW: // should have been filled in
reportTypeLoop(t)
w = 1 // anything will do
case TANY:
// dummy type; should be replaced before use.
Fatalf("dowidth any")
case types.TANY:
// not a real type; should be replaced before use.
base.Fatalf("dowidth any")
case TSTRING:
case types.TSTRING:
if sizeofString == 0 {
Fatalf("early dowidth string")
base.Fatalf("early dowidth string")
}
w = sizeofString
t.Align = uint8(Widthptr)
case TARRAY:
case types.TARRAY:
if t.Elem() == nil {
break
}
@ -414,13 +413,13 @@ func dowidth(t *types.Type) {
if t.Elem().Width != 0 {
cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
if uint64(t.NumElem()) > cap {
yyerrorl(typePos(t), "type %L larger than address space", t)
base.ErrorfAt(typePos(t), "type %L larger than address space", t)
}
}
w = t.NumElem() * t.Elem().Width
t.Align = t.Elem().Align
case TSLICE:
case types.TSLICE:
if t.Elem() == nil {
break
}
@ -428,46 +427,46 @@ func dowidth(t *types.Type) {
checkwidth(t.Elem())
t.Align = uint8(Widthptr)
case TSTRUCT:
case types.TSTRUCT:
if t.IsFuncArgStruct() {
Fatalf("dowidth fn struct %v", t)
base.Fatalf("dowidth fn struct %v", t)
}
w = widstruct(t, t, 0, 1)
// make fake type to check later to
// trigger function argument computation.
case TFUNC:
case types.TFUNC:
t1 := types.NewFuncArgs(t)
checkwidth(t1)
w = int64(Widthptr) // width of func type is pointer
// function is 3 cated structures;
// compute their widths as side-effect.
case TFUNCARGS:
case types.TFUNCARGS:
t1 := t.FuncArgs()
w = widstruct(t1, t1.Recvs(), 0, 0)
w = widstruct(t1, t1.Params(), w, Widthreg)
w = widstruct(t1, t1.Results(), w, Widthreg)
t1.Extra.(*types.Func).Argwid = w
if w%int64(Widthreg) != 0 {
Warn("bad type %v %d\n", t1, w)
base.Warn("bad type %v %d\n", t1, w)
}
t.Align = 1
}
if Widthptr == 4 && w != int64(int32(w)) {
yyerrorl(typePos(t), "type %v too large", t)
base.ErrorfAt(typePos(t), "type %v too large", t)
}
t.Width = w
if t.Align == 0 {
if w == 0 || w > 8 || w&(w-1) != 0 {
Fatalf("invalid alignment for %v", t)
base.Fatalf("invalid alignment for %v", t)
}
t.Align = uint8(w)
}
lineno = lno
base.Pos = lno
resumecheckwidth()
}
@ -498,7 +497,7 @@ func checkwidth(t *types.Type) {
// function arg structs should not be checked
// outside of the enclosing function.
if t.IsFuncArgStruct() {
Fatalf("checkwidth %v", t)
base.Fatalf("checkwidth %v", t)
}
if defercalc == 0 {

View file

@ -5,6 +5,7 @@
package gc
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
@ -12,6 +13,15 @@ type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME && n.Class() == ir.PFUNC {
inlFlood(n)
}
p.markType(n.Type())
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
@ -25,10 +35,10 @@ func (p *exporter) markType(t *types.Type) {
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym != nil && t.Etype != TINTER {
if t.Sym != nil && t.Etype != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markType(m.Type)
p.markObject(ir.AsNode(m.Nname))
}
}
}
@ -43,36 +53,31 @@ func (p *exporter) markType(t *types.Type) {
// the user already needs some way to construct values of
// those types.
switch t.Etype {
case TPTR, TARRAY, TSLICE:
case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
case TCHAN:
case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case TMAP:
case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case TSTRUCT:
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case TFUNC:
// If t is the type of a function or method, then
// t.Nname() is its ONAME. Mark its inline body and
// any recursively called functions for export.
inlFlood(asNode(t.Nname()))
case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case TINTER:
case types.TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
@ -129,23 +134,23 @@ func predeclared() []*types.Type {
// elements have been initialized before
predecl = []*types.Type{
// basic types
types.Types[TBOOL],
types.Types[TINT],
types.Types[TINT8],
types.Types[TINT16],
types.Types[TINT32],
types.Types[TINT64],
types.Types[TUINT],
types.Types[TUINT8],
types.Types[TUINT16],
types.Types[TUINT32],
types.Types[TUINT64],
types.Types[TUINTPTR],
types.Types[TFLOAT32],
types.Types[TFLOAT64],
types.Types[TCOMPLEX64],
types.Types[TCOMPLEX128],
types.Types[TSTRING],
types.Types[types.TBOOL],
types.Types[types.TINT],
types.Types[types.TINT8],
types.Types[types.TINT16],
types.Types[types.TINT32],
types.Types[types.TINT64],
types.Types[types.TUINT],
types.Types[types.TUINT8],
types.Types[types.TUINT16],
types.Types[types.TUINT32],
types.Types[types.TUINT64],
types.Types[types.TUINTPTR],
types.Types[types.TFLOAT32],
types.Types[types.TFLOAT64],
types.Types[types.TCOMPLEX64],
types.Types[types.TCOMPLEX128],
types.Types[types.TSTRING],
// basic type aliases
types.Bytetype,
@ -161,16 +166,16 @@ func predeclared() []*types.Type {
types.UntypedFloat,
types.UntypedComplex,
types.UntypedString,
types.Types[TNIL],
types.Types[types.TNIL],
// package unsafe
types.Types[TUNSAFEPTR],
types.Types[types.TUNSAFEPTR],
// invalid type (package contains errors)
types.Types[Txxx],
types.Types[types.Txxx],
// any type, for builtin export data
types.Types[TANY],
types.Types[types.TANY],
}
}
return predecl

View file

@ -5,20 +5,15 @@
package gc
import (
"cmd/compile/internal/ir"
"cmd/internal/src"
)
// numImport tracks how often a package with a given name is imported.
// It is used to provide a better error message (by using the package
// path to disambiguate) if a package that appears multiple times with
// the same name appears in an error message.
var numImport = make(map[string]int)
func npos(pos src.XPos, n *Node) *Node {
n.Pos = pos
func npos(pos src.XPos, n ir.Node) ir.Node {
n.SetPos(pos)
return n
}
func builtinCall(op Op) *Node {
return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
func builtinCall(op ir.Op) ir.Node {
return ir.Nod(ir.OCALL, mkname(ir.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}

View file

@ -6,8 +6,11 @@
package gc
import "runtime"
import (
"cmd/compile/internal/base"
"runtime"
)
func startMutexProfiling() {
Fatalf("mutex profiling unavailable in version %v", runtime.Version())
base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
}

View file

@ -2,7 +2,10 @@
package gc
import "cmd/compile/internal/types"
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
var runtimeDecls = [...]struct {
name string
@ -205,134 +208,134 @@ func runtimeTypes() []*types.Type {
var typs [131]*types.Type
typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[TANY]
typs[2] = types.Types[types.TANY]
typs[3] = types.NewPtr(typs[2])
typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[5] = types.Types[TUINTPTR]
typs[6] = types.Types[TBOOL]
typs[7] = types.Types[TUNSAFEPTR]
typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
typs[4] = functype(nil, []ir.Node{anonfield(typs[1])}, []ir.Node{anonfield(typs[3])})
typs[5] = types.Types[types.TUINTPTR]
typs[6] = types.Types[types.TBOOL]
typs[7] = types.Types[types.TUNSAFEPTR]
typs[8] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []ir.Node{anonfield(typs[7])})
typs[9] = functype(nil, nil, nil)
typs[10] = types.Types[TINTER]
typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
typs[12] = types.Types[TINT32]
typs[10] = types.Types[types.TINTER]
typs[11] = functype(nil, []ir.Node{anonfield(typs[10])}, nil)
typs[12] = types.Types[types.TINT32]
typs[13] = types.NewPtr(typs[12])
typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
typs[15] = types.Types[TINT]
typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
typs[17] = types.Types[TUINT]
typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
typs[20] = types.Types[TFLOAT64]
typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
typs[22] = types.Types[TINT64]
typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
typs[24] = types.Types[TUINT64]
typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
typs[26] = types.Types[TCOMPLEX128]
typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
typs[28] = types.Types[TSTRING]
typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
typs[14] = functype(nil, []ir.Node{anonfield(typs[13])}, []ir.Node{anonfield(typs[10])})
typs[15] = types.Types[types.TINT]
typs[16] = functype(nil, []ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
typs[17] = types.Types[types.TUINT]
typs[18] = functype(nil, []ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
typs[19] = functype(nil, []ir.Node{anonfield(typs[6])}, nil)
typs[20] = types.Types[types.TFLOAT64]
typs[21] = functype(nil, []ir.Node{anonfield(typs[20])}, nil)
typs[22] = types.Types[types.TINT64]
typs[23] = functype(nil, []ir.Node{anonfield(typs[22])}, nil)
typs[24] = types.Types[types.TUINT64]
typs[25] = functype(nil, []ir.Node{anonfield(typs[24])}, nil)
typs[26] = types.Types[types.TCOMPLEX128]
typs[27] = functype(nil, []ir.Node{anonfield(typs[26])}, nil)
typs[28] = types.Types[types.TSTRING]
typs[29] = functype(nil, []ir.Node{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []ir.Node{anonfield(typs[2])}, nil)
typs[31] = functype(nil, []ir.Node{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[34] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
typs[35] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
typs[36] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
typs[37] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[39] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []ir.Node{anonfield(typs[28])})
typs[40] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[43] = functype(nil, []ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []ir.Node{anonfield(typs[28])})
typs[44] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])})
typs[45] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])})
typs[46] = types.Runetype
typs[47] = types.NewSlice(typs[46])
typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
typs[48] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []ir.Node{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
typs[50] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []ir.Node{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[53] = functype(nil, []ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []ir.Node{anonfield(typs[47])})
typs[54] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []ir.Node{anonfield(typs[15])})
typs[55] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []ir.Node{anonfield(typs[46]), anonfield(typs[15])})
typs[56] = functype(nil, []ir.Node{anonfield(typs[28])}, []ir.Node{anonfield(typs[15])})
typs[57] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2])})
typs[58] = functype(nil, []ir.Node{anonfield(typs[2])}, []ir.Node{anonfield(typs[7])})
typs[59] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []ir.Node{anonfield(typs[2])})
typs[60] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2]), anonfield(typs[6])})
typs[61] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[62] = functype(nil, []ir.Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[65] = types.Types[TUINT32]
typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
typs[64] = functype(nil, []ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])})
typs[65] = types.Types[types.TUINT32]
typs[66] = functype(nil, nil, []ir.Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[68] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])})
typs[69] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])})
typs[70] = functype(nil, nil, []ir.Node{anonfield(typs[67])})
typs[71] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3])})
typs[72] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3])})
typs[73] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3])})
typs[74] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
typs[77] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
typs[78] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
typs[79] = functype(nil, []ir.Node{anonfield(typs[3])}, nil)
typs[80] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
typs[82] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []ir.Node{anonfield(typs[81])})
typs[83] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[85] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[88] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[90] = tostruct([]ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[91] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[93] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []ir.Node{anonfield(typs[15])})
typs[94] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
typs[95] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
typs[97] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])})
typs[98] = functype(nil, []ir.Node{anonfield(typs[63])}, nil)
typs[99] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []ir.Node{anonfield(typs[15]), anonfield(typs[6])})
typs[100] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []ir.Node{anonfield(typs[7])})
typs[101] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[7])})
typs[102] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []ir.Node{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[104] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []ir.Node{anonfield(typs[103])})
typs[105] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[107] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []ir.Node{anonfield(typs[6])})
typs[108] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
typs[109] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])})
typs[110] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])})
typs[111] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])})
typs[112] = functype(nil, []ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[22])})
typs[113] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []ir.Node{anonfield(typs[24])})
typs[114] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[22])})
typs[115] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[24])})
typs[116] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[65])})
typs[117] = functype(nil, []ir.Node{anonfield(typs[22])}, []ir.Node{anonfield(typs[20])})
typs[118] = functype(nil, []ir.Node{anonfield(typs[24])}, []ir.Node{anonfield(typs[20])})
typs[119] = functype(nil, []ir.Node{anonfield(typs[65])}, []ir.Node{anonfield(typs[20])})
typs[120] = functype(nil, []ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []ir.Node{anonfield(typs[26])})
typs[121] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[123] = types.NewSlice(typs[7])
typs[124] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[123])}, nil)
typs[125] = types.Types[TUINT8]
typs[126] = functype(nil, []*Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[TUINT16]
typs[128] = functype(nil, []*Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
typs[129] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[130] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
typs[124] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil)
typs[125] = types.Types[types.TUINT8]
typs[126] = functype(nil, []ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[types.TUINT16]
typs[128] = functype(nil, []ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
typs[129] = functype(nil, []ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[130] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}

View file

@ -6,6 +6,8 @@ package gc
import (
"math/bits"
"cmd/compile/internal/base"
)
const (
@ -35,7 +37,7 @@ func bvbulkalloc(nbit int32, count int32) bulkBvec {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return bulkBvec{
words: make([]uint32, size),
@ -52,7 +54,7 @@ func (b *bulkBvec) next() bvec {
func (bv1 bvec) Eq(bv2 bvec) bool {
if bv1.n != bv2.n {
Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
@ -68,7 +70,7 @@ func (dst bvec) Copy(src bvec) {
func (bv bvec) Get(i int32) bool {
if i < 0 || i >= bv.n {
Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
return bv.b[i>>wordShift]&mask != 0
@ -76,7 +78,7 @@ func (bv bvec) Get(i int32) bool {
func (bv bvec) Set(i int32) {
if i < 0 || i >= bv.n {
Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] |= mask
@ -84,7 +86,7 @@ func (bv bvec) Set(i int32) {
func (bv bvec) Unset(i int32) {
if i < 0 || i >= bv.n {
Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] &^= mask

View file

@ -5,37 +5,40 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
)
func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
xfunc := p.nod(expr, ODCLFUNC, nil, nil)
xfunc.Func.SetIsHiddenClosure(Curfn != nil)
xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
xfunc.Func.Nname.Name.Param.Ntype = xtype
xfunc.Func.Nname.Name.Defn = xfunc
dcl := p.nod(expr, ir.ODCLFUNC, nil, nil)
fn := dcl.Func()
fn.SetIsHiddenClosure(Curfn != nil)
fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
fn.Nname.Name().Param.Ntype = xtype
fn.Nname.Name().Defn = dcl
clo := p.nod(expr, OCLOSURE, nil, nil)
clo.Func.Ntype = ntype
clo := p.nod(expr, ir.OCLOSURE, nil, nil)
clo.SetFunc(fn)
fn.ClosureType = ntype
fn.OClosure = clo
xfunc.Func.Closure = clo
clo.Func.Closure = xfunc
p.funcBody(xfunc, expr.Body)
p.funcBody(dcl, expr.Body)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
for _, v := range xfunc.Func.Cvars.Slice() {
for _, v := range fn.ClosureVars.Slice() {
// Unlink from v1; see comment in syntax.go type Param for these fields.
v1 := v.Name.Defn
v1.Name.Param.Innermost = v.Name.Param.Outer
v1 := v.Name().Defn
v1.Name().Param.Innermost = v.Name().Param.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
@ -65,7 +68,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
v.Name.Param.Outer = oldname(v.Sym)
v.Name().Param.Outer = oldname(v.Sym())
}
return clo
@ -75,60 +78,61 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
func typecheckclosure(clo *Node, top int) {
xfunc := clo.Func.Closure
func typecheckclosure(clo ir.Node, top int) {
fn := clo.Func()
dcl := fn.Decl
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
xfunc.SetIota(x)
dcl.SetIota(x)
}
clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
clo.Type = clo.Func.Ntype.Type
clo.Func.Top = top
fn.ClosureType = typecheck(fn.ClosureType, ctxType)
clo.SetType(fn.ClosureType.Type())
fn.ClosureCalled = top&ctxCallee != 0
// Do not typecheck xfunc twice, otherwise, we will end up pushing
// xfunc to xtop multiple times, causing initLSym called twice.
// Do not typecheck dcl twice, otherwise, we will end up pushing
// dcl to xtop multiple times, causing initLSym called twice.
// See #30709
if xfunc.Typecheck() == 1 {
if dcl.Typecheck() == 1 {
return
}
for _, ln := range xfunc.Func.Cvars.Slice() {
n := ln.Name.Defn
if !n.Name.Captured() {
n.Name.SetCaptured(true)
if n.Name.Decldepth == 0 {
Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
for _, ln := range fn.ClosureVars.Slice() {
n := ln.Name().Defn
if !n.Name().Captured() {
n.Name().SetCaptured(true)
if n.Name().Decldepth == 0 {
base.Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
if n.Name.Decldepth == decldepth {
n.Name.SetAssigned(false)
if n.Name().Decldepth == decldepth {
n.Name().SetAssigned(false)
}
}
}
xfunc.Func.Nname.Sym = closurename(Curfn)
setNodeNameFunc(xfunc.Func.Nname)
xfunc = typecheck(xfunc, ctxStmt)
fn.Nname.SetSym(closurename(Curfn))
setNodeNameFunc(fn.Nname)
dcl = typecheck(dcl, ctxStmt)
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to xtop.
if Curfn != nil && clo.Type != nil {
if Curfn != nil && clo.Type() != nil {
oldfn := Curfn
Curfn = xfunc
Curfn = dcl
olddd := decldepth
decldepth = 1
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
typecheckslice(dcl.Body().Slice(), ctxStmt)
decldepth = olddd
Curfn = oldfn
}
xtop = append(xtop, xfunc)
xtop = append(xtop, dcl)
}
// globClosgen is like Func.Closgen, but for the global scope.
@ -136,23 +140,23 @@ var globClosgen int
// closurename generates a new unique name for a closure within
// outerfunc.
func closurename(outerfunc *Node) *types.Sym {
func closurename(outerfunc ir.Node) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfunc != nil {
if outerfunc.Func.Closure != nil {
if outerfunc.Func().OClosure != nil {
prefix = ""
}
outer = outerfunc.funcname()
outer = ir.FuncName(outerfunc)
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
if !outerfunc.Func.Nname.isBlank() {
gen = &outerfunc.Func.Closgen
if !ir.IsBlank(outerfunc.Func().Nname) {
gen = &outerfunc.Func().Closgen
}
}
@ -168,15 +172,14 @@ var capturevarscomplete bool
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
func capturevars(xfunc *Node) {
lno := lineno
lineno = xfunc.Pos
clo := xfunc.Func.Closure
cvars := xfunc.Func.Cvars.Slice()
func capturevars(dcl ir.Node) {
lno := base.Pos
base.Pos = dcl.Pos()
fn := dcl.Func()
cvars := fn.ClosureVars.Slice()
out := cvars[:0]
for _, v := range cvars {
if v.Type == nil {
if v.Type() == nil {
// If v.Type is nil, it means v looked like it
// was going to be used in the closure, but
// isn't. This happens in struct literals like
@ -189,47 +192,47 @@ func capturevars(xfunc *Node) {
// type check the & of closed variables outside the closure,
// so that the outer frame also grabs them and knows they escape.
dowidth(v.Type)
dowidth(v.Type())
outer := v.Name.Param.Outer
outermost := v.Name.Defn
outer := v.Name().Param.Outer
outermost := v.Name().Defn
// out parameters will be assigned to implicitly upon return.
if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
v.Name.SetByval(true)
if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
v.Name().SetByval(true)
} else {
outermost.Name.SetAddrtaken(true)
outer = nod(OADDR, outer, nil)
outermost.Name().SetAddrtaken(true)
outer = ir.Nod(ir.OADDR, outer, nil)
}
if Debug.m > 1 {
if base.Flag.LowerM > 1 {
var name *types.Sym
if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
name = v.Name.Curfn.Func.Nname.Sym
if v.Name().Curfn != nil && v.Name().Curfn.Func().Nname != nil {
name = v.Name().Curfn.Func().Nname.Sym()
}
how := "ref"
if v.Name.Byval() {
if v.Name().Byval() {
how = "value"
}
Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width))
}
outer = typecheck(outer, ctxExpr)
clo.Func.Enter.Append(outer)
fn.ClosureEnter.Append(outer)
}
xfunc.Func.Cvars.Set(out)
lineno = lno
fn.ClosureVars.Set(out)
base.Pos = lno
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
func transformclosure(xfunc *Node) {
lno := lineno
lineno = xfunc.Pos
clo := xfunc.Func.Closure
func transformclosure(dcl ir.Node) {
lno := base.Pos
base.Pos = dcl.Pos()
fn := dcl.Func()
if clo.Func.Top&ctxCallee != 0 {
if fn.ClosureCalled {
// If the closure is directly called, we transform it to a plain function call
// with variables passed as args. This avoids allocation of a closure object.
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
@ -246,116 +249,112 @@ func transformclosure(xfunc *Node) {
// }(byval, &byref, 42)
// f is ONAME of the actual function.
f := xfunc.Func.Nname
f := fn.Nname
// We are going to insert captured variables before input args.
var params []*types.Field
var decls []*Node
for _, v := range xfunc.Func.Cvars.Slice() {
if !v.Name.Byval() {
var decls []ir.Node
for _, v := range fn.ClosureVars.Slice() {
if !v.Name().Byval() {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
addr := newname(lookup("&" + v.Sym.Name))
addr.Type = types.NewPtr(v.Type)
v.Name.Param.Heapaddr = addr
addr := NewName(lookup("&" + v.Sym().Name))
addr.SetType(types.NewPtr(v.Type()))
v.Name().Param.Heapaddr = addr
v = addr
}
v.SetClass(PPARAM)
v.SetClass(ir.PPARAM)
decls = append(decls, v)
fld := types.NewField()
fld.Nname = asTypesNode(v)
fld.Type = v.Type
fld.Sym = v.Sym
fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
fld.Nname = v
params = append(params, fld)
}
if len(params) > 0 {
// Prepend params and decls.
f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...))
fn.Dcl = append(decls, fn.Dcl...)
}
dowidth(f.Type)
xfunc.Type = f.Type // update type of ODCLFUNC
dowidth(f.Type())
dcl.SetType(f.Type()) // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
var body []*Node
var body []ir.Node
offset := int64(Widthptr)
for _, v := range xfunc.Func.Cvars.Slice() {
for _, v := range fn.ClosureVars.Slice() {
// cv refers to the field inside of closure OSTRUCTLIT.
cv := nod(OCLOSUREVAR, nil, nil)
cv := ir.Nod(ir.OCLOSUREVAR, nil, nil)
cv.Type = v.Type
if !v.Name.Byval() {
cv.Type = types.NewPtr(v.Type)
cv.SetType(v.Type())
if !v.Name().Byval() {
cv.SetType(types.NewPtr(v.Type()))
}
offset = Rnd(offset, int64(cv.Type.Align))
cv.Xoffset = offset
offset += cv.Type.Width
offset = Rnd(offset, int64(cv.Type().Align))
cv.SetOffset(offset)
offset += cv.Type().Width
if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
if v.Name().Byval() && v.Type().Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
v.SetClass(PAUTO)
xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
body = append(body, nod(OAS, v, cv))
v.SetClass(ir.PAUTO)
fn.Dcl = append(fn.Dcl, v)
body = append(body, ir.Nod(ir.OAS, v, cv))
} else {
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
addr := newname(lookup("&" + v.Sym.Name))
addr.Type = types.NewPtr(v.Type)
addr.SetClass(PAUTO)
addr.Name.SetUsed(true)
addr.Name.Curfn = xfunc
xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
v.Name.Param.Heapaddr = addr
if v.Name.Byval() {
cv = nod(OADDR, cv, nil)
addr := NewName(lookup("&" + v.Sym().Name))
addr.SetType(types.NewPtr(v.Type()))
addr.SetClass(ir.PAUTO)
addr.Name().SetUsed(true)
addr.Name().Curfn = dcl
fn.Dcl = append(fn.Dcl, addr)
v.Name().Param.Heapaddr = addr
if v.Name().Byval() {
cv = ir.Nod(ir.OADDR, cv, nil)
}
body = append(body, nod(OAS, addr, cv))
body = append(body, ir.Nod(ir.OAS, addr, cv))
}
}
if len(body) > 0 {
typecheckslice(body, ctxStmt)
xfunc.Func.Enter.Set(body)
xfunc.Func.SetNeedctxt(true)
fn.Enter.Set(body)
fn.SetNeedctxt(true)
}
}
lineno = lno
base.Pos = lno
}
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
func hasemptycvars(clo *Node) bool {
xfunc := clo.Func.Closure
return xfunc.Func.Cvars.Len() == 0
func hasemptycvars(clo ir.Node) bool {
return clo.Func().ClosureVars.Len() == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
func closuredebugruntimecheck(clo *Node) {
if Debug_closure > 0 {
xfunc := clo.Func.Closure
if clo.Esc == EscHeap {
Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
func closuredebugruntimecheck(clo ir.Node) {
if base.Debug.Closure > 0 {
if clo.Esc() == EscHeap {
base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars)
} else {
Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func().ClosureVars)
}
}
if compiling_runtime && clo.Esc == EscHeap {
yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
}
}
// closureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
func closureType(clo *Node) *types.Type {
func closureType(clo ir.Node) *types.Type {
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
@ -369,94 +368,95 @@ func closureType(clo *Node) *types.Type {
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
fields := []*Node{
namedfield(".F", types.Types[TUINTPTR]),
fields := []ir.Node{
namedfield(".F", types.Types[types.TUINTPTR]),
}
for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
typ := v.Type
if !v.Name.Byval() {
for _, v := range clo.Func().ClosureVars.Slice() {
typ := v.Type()
if !v.Name().Byval() {
typ = types.NewPtr(typ)
}
fields = append(fields, symfield(v.Sym, typ))
fields = append(fields, symfield(v.Sym(), typ))
}
typ := tostruct(fields)
typ.SetNoalg(true)
return typ
}
func walkclosure(clo *Node, init *Nodes) *Node {
xfunc := clo.Func.Closure
func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node {
fn := clo.Func()
// If no closure vars, don't bother wrapping.
if hasemptycvars(clo) {
if Debug_closure > 0 {
Warnl(clo.Pos, "closure converted to global")
if base.Debug.Closure > 0 {
base.WarnfAt(clo.Pos(), "closure converted to global")
}
return xfunc.Func.Nname
return fn.Nname
}
closuredebugruntimecheck(clo)
typ := closureType(clo)
clos := nod(OCOMPLIT, nil, typenod(typ))
clos.Esc = clo.Esc
clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
clos.SetEsc(clo.Esc())
clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
clos = nod(OADDR, clos, nil)
clos.Esc = clo.Esc
clos = ir.Nod(ir.OADDR, clos, nil)
clos.SetEsc(clo.Esc())
// Force type conversion from *struct to the func type.
clos = convnop(clos, clo.Type)
clos = convnop(clos, clo.Type())
// non-escaping temp to use, if any.
if x := prealloc[clo]; x != nil {
if !types.Identical(typ, x.Type) {
if !types.Identical(typ, x.Type()) {
panic("closure type does not match order's assigned type")
}
clos.Left.Right = x
clos.Left().SetRight(x)
delete(prealloc, clo)
}
return walkexpr(clos, init)
}
func typecheckpartialcall(fn *Node, sym *types.Sym) {
switch fn.Op {
case ODOTINTER, ODOTMETH:
func typecheckpartialcall(dot ir.Node, sym *types.Sym) {
switch dot.Op() {
case ir.ODOTINTER, ir.ODOTMETH:
break
default:
Fatalf("invalid typecheckpartialcall")
base.Fatalf("invalid typecheckpartialcall")
}
// Create top-level function.
xfunc := makepartialcall(fn, fn.Type, sym)
fn.Func = xfunc.Func
fn.Func.SetWrapper(true)
fn.Right = newname(sym)
fn.Op = OCALLPART
fn.Type = xfunc.Type
dcl := makepartialcall(dot, dot.Type(), sym)
dcl.Func().SetWrapper(true)
dot.SetOp(ir.OCALLPART)
dot.SetRight(NewName(sym))
dot.SetType(dcl.Type())
dot.SetFunc(dcl.Func())
dot.SetOpt(nil) // clear types.Field from ODOTMETH
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
rcvrtype := fn.Left.Type
func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node {
rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
if sym.Uniq() {
return asNode(sym.Def)
return ir.AsNode(sym.Def)
}
sym.SetUniq(true)
savecurfn := Curfn
saveLineNo := lineno
saveLineNo := base.Pos
Curfn = nil
// Set line number equal to the line number where the method is declared.
var m *types.Field
if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
lineno = m.Pos
base.Pos = m.Pos
}
// Note: !m.Pos.IsKnown() happens for method expressions where
// the method is implicitly declared. The Error method of the
@ -464,73 +464,74 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
// number at the use of the method expression in this
// case. See issue 29389.
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set(structargs(t0.Params(), true))
tfn.Rlist.Set(structargs(t0.Results(), false))
tfn := ir.Nod(ir.OTFUNC, nil, nil)
tfn.PtrList().Set(structargs(t0.Params(), true))
tfn.PtrRlist().Set(structargs(t0.Results(), false))
xfunc := dclfunc(sym, tfn)
xfunc.Func.SetDupok(true)
xfunc.Func.SetNeedctxt(true)
dcl := dclfunc(sym, tfn)
fn := dcl.Func()
fn.SetDupok(true)
fn.SetNeedctxt(true)
tfn.Type.SetPkg(t0.Pkg())
tfn.Type().SetPkg(t0.Pkg())
// Declare and initialize variable holding receiver.
cv := nod(OCLOSUREVAR, nil, nil)
cv.Type = rcvrtype
cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
cv := ir.Nod(ir.OCLOSUREVAR, nil, nil)
cv.SetType(rcvrtype)
cv.SetOffset(Rnd(int64(Widthptr), int64(cv.Type().Align)))
ptr := newname(lookup(".this"))
declare(ptr, PAUTO)
ptr.Name.SetUsed(true)
var body []*Node
ptr := NewName(lookup(".this"))
declare(ptr, ir.PAUTO)
ptr.Name().SetUsed(true)
var body []ir.Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
ptr.Type = rcvrtype
body = append(body, nod(OAS, ptr, cv))
ptr.SetType(rcvrtype)
body = append(body, ir.Nod(ir.OAS, ptr, cv))
} else {
ptr.Type = types.NewPtr(rcvrtype)
body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
ptr.SetType(types.NewPtr(rcvrtype))
body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cv, nil)))
}
call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
call.List.Set(paramNnames(tfn.Type))
call.SetIsDDD(tfn.Type.IsVariadic())
call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil)
call.PtrList().Set(paramNnames(tfn.Type()))
call.SetIsDDD(tfn.Type().IsVariadic())
if t0.NumResults() != 0 {
n := nod(ORETURN, nil, nil)
n.List.Set1(call)
n := ir.Nod(ir.ORETURN, nil, nil)
n.PtrList().Set1(call)
call = n
}
body = append(body, call)
xfunc.Nbody.Set(body)
dcl.PtrBody().Set(body)
funcbody()
xfunc = typecheck(xfunc, ctxStmt)
dcl = typecheck(dcl, ctxStmt)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
Curfn = xfunc
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
sym.Def = asTypesNode(xfunc)
xtop = append(xtop, xfunc)
Curfn = dcl
typecheckslice(dcl.Body().Slice(), ctxStmt)
sym.Def = dcl
xtop = append(xtop, dcl)
Curfn = savecurfn
lineno = saveLineNo
base.Pos = saveLineNo
return xfunc
return dcl
}
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
func partialCallType(n *Node) *types.Type {
t := tostruct([]*Node{
namedfield("F", types.Types[TUINTPTR]),
namedfield("R", n.Left.Type),
func partialCallType(n ir.Node) *types.Type {
t := tostruct([]ir.Node{
namedfield("F", types.Types[types.TUINTPTR]),
namedfield("R", n.Left().Type()),
})
t.SetNoalg(true)
return t
}
func walkpartialcall(n *Node, init *Nodes) *Node {
func walkpartialcall(n ir.Node, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
@ -538,38 +539,38 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
//
// Like walkclosure above.
if n.Left.Type.IsInterface() {
if n.Left().Type().IsInterface() {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
n.Left = cheapexpr(n.Left, init)
n.Left = walkexpr(n.Left, nil)
n.SetLeft(cheapexpr(n.Left(), init))
n.SetLeft(walkexpr(n.Left(), nil))
tab := nod(OITAB, n.Left, nil)
tab := ir.Nod(ir.OITAB, n.Left(), nil)
tab = typecheck(tab, ctxExpr)
c := nod(OCHECKNIL, tab, nil)
c := ir.Nod(ir.OCHECKNIL, tab, nil)
c.SetTypecheck(1)
init.Append(c)
}
typ := partialCallType(n)
clos := nod(OCOMPLIT, nil, typenod(typ))
clos.Esc = n.Esc
clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
clos.SetEsc(n.Esc())
clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left())
clos = nod(OADDR, clos, nil)
clos.Esc = n.Esc
clos = ir.Nod(ir.OADDR, clos, nil)
clos.SetEsc(n.Esc())
// Force type conversion from *struct to the func type.
clos = convnop(clos, n.Type)
clos = convnop(clos, n.Type())
// non-escaping temp to use, if any.
if x := prealloc[n]; x != nil {
if !types.Identical(typ, x.Type) {
if !types.Identical(typ, x.Type()) {
panic("partial call type does not match order's assigned type")
}
clos.Left.Right = x
clos.Left().SetRight(x)
delete(prealloc, n)
}
@ -578,16 +579,16 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
func callpartMethod(n *Node) *types.Field {
if n.Op != OCALLPART {
Fatalf("expected OCALLPART, got %v", n)
func callpartMethod(n ir.Node) *types.Field {
if n.Op() != ir.OCALLPART {
base.Fatalf("expected OCALLPART, got %v", n)
}
// TODO(mdempsky): Optimize this. If necessary,
// makepartialcall could save m for us somewhere.
var m *types.Field
if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
Fatalf("failed to find field for OCALLPART")
if lookdot0(n.Right().Sym(), n.Left().Type(), &m, false) != 1 {
base.Fatalf("failed to find field for OCALLPART")
}
return m

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -18,7 +18,7 @@ func TestDeps(t *testing.T) {
}
for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
switch dep {
case "go/build", "go/token":
case "go/build", "go/scanner":
// cmd/compile/internal/importer introduces a dependency
// on go/build and go/token; cmd/compile/internal/ uses
// go/constant which uses go/token in its API. Once we

View file

@ -5,6 +5,7 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
@ -26,8 +27,8 @@ type varPos struct {
func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
var inlcalls dwarf.InlCalls
if Debug_gendwarfinl != 0 {
Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
if base.Debug.DwarfInl != 0 {
base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
}
// This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
@ -106,7 +107,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
}
m = makePreinlineDclMap(fnsym)
} else {
ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
m = makePreinlineDclMap(ifnlsym)
}
@ -181,7 +182,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
}
// Debugging
if Debug_gendwarfinl != 0 {
if base.Debug.DwarfInl != 0 {
dumpInlCalls(inlcalls)
dumpInlVars(dwVars)
}
@ -205,15 +206,15 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
// abstract function DIE for an inlined routine imported from a
// previously compiled package.
func genAbstractFunc(fn *obj.LSym) {
ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
if ifn == nil {
Ctxt.Diag("failed to locate precursor fn for %v", fn)
base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
return
}
if Debug_gendwarfinl != 0 {
Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
if base.Debug.DwarfInl != 0 {
base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
}
Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
}
// Undo any versioning performed when a name was written
@ -235,15 +236,15 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
dcl := preInliningDcls(fnsym)
m := make(map[varPos]int)
for i, n := range dcl {
pos := Ctxt.InnermostPos(n.Pos)
pos := base.Ctxt.InnermostPos(n.Pos())
vp := varPos{
DeclName: unversion(n.Sym.Name),
DeclName: unversion(n.Sym().Name),
DeclFile: pos.RelFilename(),
DeclLine: pos.RelLine(),
DeclCol: pos.Col(),
}
if _, found := m[vp]; found {
Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name)
base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name)
}
m[vp] = i
}
@ -260,17 +261,17 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// is one. We do this first so that parents appear before their
// children in the resulting table.
parCallIdx := -1
parInlIdx := Ctxt.InlTree.Parent(inlIdx)
parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
if parInlIdx >= 0 {
parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
}
// Create new entry for this inline
inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
callXPos := Ctxt.InlTree.CallPos(inlIdx)
absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
pb := Ctxt.PosTable.Pos(callXPos).Base()
callFileSym := Ctxt.Lookup(pb.SymFilename())
inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
pb := base.Ctxt.PosTable.Pos(callXPos).Base()
callFileSym := base.Ctxt.Lookup(pb.SymFilename())
ic := dwarf.InlCall{
InlIndex: inlIdx,
CallFile: callFileSym,
@ -298,7 +299,7 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// the index for a node from the inlined body of D will refer to the
// call to D from C. Whew.
func posInlIndex(xpos src.XPos) int {
pos := Ctxt.PosTable.Pos(xpos)
pos := base.Ctxt.PosTable.Pos(xpos)
if b := pos.Base(); b != nil {
ii := b.InliningIndex()
if ii >= 0 {
@ -324,7 +325,7 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
// Append range to correct inlined call
callIdx, found := imap[ii]
if !found {
Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
}
call := &calls[callIdx]
call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
@ -332,23 +333,23 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
for i := 0; i < ilevel; i++ {
Ctxt.Logf(" ")
base.Ctxt.Logf(" ")
}
ic := inlcalls.Calls[idx]
callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
for _, f := range ic.InlVars {
Ctxt.Logf(" %v", f.Name)
base.Ctxt.Logf(" %v", f.Name)
}
Ctxt.Logf(" ) C: (")
base.Ctxt.Logf(" ) C: (")
for _, k := range ic.Children {
Ctxt.Logf(" %v", k)
base.Ctxt.Logf(" %v", k)
}
Ctxt.Logf(" ) R:")
base.Ctxt.Logf(" ) R:")
for _, r := range ic.Ranges {
Ctxt.Logf(" [%d,%d)", r.Start, r.End)
base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
}
Ctxt.Logf("\n")
base.Ctxt.Logf("\n")
for _, k := range ic.Children {
dumpInlCall(inlcalls, k, ilevel+1)
}
@ -373,7 +374,7 @@ func dumpInlVars(dwvars []*dwarf.Var) {
if dwv.IsInAbstract {
ia = 1
}
Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
}
}
@ -410,7 +411,7 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
// Callee
ic := inlCalls.Calls[idx]
callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
calleeRanges := ic.Ranges
// Caller
@ -418,14 +419,14 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
if parentIdx != -1 {
pic := inlCalls.Calls[parentIdx]
caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
parentRanges = pic.Ranges
}
// Callee ranges contained in caller ranges?
c, m := rangesContainsAll(parentRanges, calleeRanges)
if !c {
Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
}
// Now visit kids

View file

@ -5,40 +5,19 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/obj"
"encoding/json"
"io/ioutil"
"log"
"path"
"sort"
"strconv"
"strings"
)
var embedlist []*Node
var embedCfg struct {
Patterns map[string][]string
Files map[string]string
}
func readEmbedCfg(file string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-embedcfg: %v", err)
}
if err := json.Unmarshal(data, &embedCfg); err != nil {
log.Fatalf("%s: %v", file, err)
}
if embedCfg.Patterns == nil {
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
}
if embedCfg.Files == nil {
log.Fatalf("%s: invalid embedcfg: missing Files", file)
}
}
var embedlist []ir.Node
const (
embedUnknown = iota
@ -49,7 +28,7 @@ const (
var numLocalEmbed int
func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) (newExprs []*Node) {
func varEmbed(p *noder, names []ir.Node, typ ir.Node, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
@ -66,30 +45,30 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma
pos := embeds[0].Pos
if !haveEmbed {
p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
p.errorAt(pos, "invalid go:embed: missing import \"embed\"")
return exprs
}
if embedCfg.Patterns == nil {
p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration")
if base.Flag.Cfg.Embed.Patterns == nil {
p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration")
return exprs
}
if len(names) > 1 {
p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
p.errorAt(pos, "go:embed cannot apply to multiple vars")
return exprs
}
if len(exprs) > 0 {
p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
p.errorAt(pos, "go:embed cannot apply to var with initializer")
return exprs
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
p.yyerrorpos(pos, "go:embed cannot apply to var without type")
p.errorAt(pos, "go:embed cannot apply to var without type")
return exprs
}
kind := embedKindApprox(typ)
if kind == embedUnknown {
p.yyerrorpos(pos, "go:embed cannot apply to var of type %v", typ)
p.errorAt(pos, "go:embed cannot apply to var of type %v", typ)
return exprs
}
@ -98,13 +77,13 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma
var list []string
for _, e := range embeds {
for _, pattern := range e.Patterns {
files, ok := embedCfg.Patterns[pattern]
files, ok := base.Flag.Cfg.Embed.Patterns[pattern]
if !ok {
p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
p.errorAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
}
for _, file := range files {
if embedCfg.Files[file] == "" {
p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file)
if base.Flag.Cfg.Embed.Files[file] == "" {
p.errorAt(e.Pos, "invalid go:embed: build system did not map file: %s", file)
continue
}
if !have[file] {
@ -126,23 +105,23 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma
if kind == embedString || kind == embedBytes {
if len(list) > 1 {
p.yyerrorpos(pos, "invalid go:embed: multiple files for type %v", typ)
p.errorAt(pos, "invalid go:embed: multiple files for type %v", typ)
return exprs
}
}
v := names[0]
if dclcontext != PEXTERN {
if dclcontext != ir.PEXTERN {
numLocalEmbed++
v = newnamel(v.Pos, lookupN("embed.", numLocalEmbed))
v.Sym.Def = asTypesNode(v)
v.Name.Param.Ntype = typ
v.SetClass(PEXTERN)
v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed))
v.Sym().Def = v
v.Name().Param.Ntype = typ
v.SetClass(ir.PEXTERN)
externdcl = append(externdcl, v)
exprs = []*Node{v}
exprs = []ir.Node{v}
}
v.Name.Param.SetEmbedFiles(list)
v.Name().Param.SetEmbedFiles(list)
embedlist = append(embedlist, v)
return exprs
}
@ -151,18 +130,18 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma
// The match is approximate because we haven't done scope resolution yet and
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
func embedKindApprox(typ *Node) int {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
func embedKindApprox(typ ir.Node) int {
if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
// These are not guaranteed to match only string and []byte -
// maybe the local package has redefined one of those words.
// But it's the best we can do now during the noder.
// The stricter check happens later, in initEmbed calling embedKind.
if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == localpkg {
if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == ir.LocalPkg {
return embedString
}
if typ.Op == OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == localpkg {
if typ.Op() == ir.OTARRAY && typ.Left() == nil && typ.Right().Sym() != nil && typ.Right().Sym().Name == "byte" && typ.Right().Sym().Pkg == ir.LocalPkg {
return embedBytes
}
return embedUnknown
@ -170,10 +149,10 @@ func embedKindApprox(typ *Node) int {
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
if typ == types.Types[TSTRING] {
if typ == types.Types[types.TSTRING] {
return embedString
}
if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype {
@ -213,19 +192,19 @@ func dumpembeds() {
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func initEmbed(v *Node) {
files := v.Name.Param.EmbedFiles()
switch kind := embedKind(v.Type); kind {
func initEmbed(v ir.Node) {
files := v.Name().Param.EmbedFiles()
switch kind := embedKind(v.Type()); kind {
case embedUnknown:
yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
case embedString, embedBytes:
file := files[0]
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
sym := v.Sym.Linksym()
sym := v.Sym().Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
@ -234,7 +213,7 @@ func initEmbed(v *Node) {
}
case embedFiles:
slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
@ -249,7 +228,7 @@ func initEmbed(v *Node) {
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
@ -257,17 +236,17 @@ func initEmbed(v *Node) {
off = duintptr(slicedata, off, 0)
off += hashSize
} else {
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym.Linksym()
sym := v.Sym().Linksym()
dsymptr(sym, 0, slicedata, 0)
}
}

View file

@ -1,472 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"fmt"
)
func escapes(all []*Node) {
visitBottomUp(all, escapeFuncs)
}
const (
EscFuncUnknown = 0 + iota
EscFuncPlanned
EscFuncStarted
EscFuncTagged
)
func min8(a, b int8) int8 {
if a < b {
return a
}
return b
}
func max8(a, b int8) int8 {
if a > b {
return a
}
return b
}
const (
EscUnknown = iota
EscNone // Does not escape to heap, result, or parameters.
EscHeap // Reachable from the heap
EscNever // By construction will not escape.
)
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
func funcSym(fn *Node) *types.Sym {
if fn == nil || fn.Func.Nname == nil {
return nil
}
return fn.Func.Nname.Sym
}
// Mark labels that have no backjumps to them as not increasing e.loopdepth.
// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
// and set it to one of the following two. Then in esc we'll clear it again.
var (
looping Node
nonlooping Node
)
func isSliceSelfAssign(dst, src *Node) bool {
// Detect the following special case.
//
// func (b *Buffer) Foo() {
// n, m := ...
// b.buf = b.buf[n:m]
// }
//
// This assignment is a no-op for escape analysis,
// it does not store any new pointers into b that were not already there.
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
// Here we assume that the statement will not contain calls,
// that is, that order will move any calls to init.
// Otherwise base ONAME value could change between the moments
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
return false
}
// src is a slice operation.
switch src.Op {
case OSLICE, OSLICE3, OSLICESTR:
// OK.
case OSLICEARR, OSLICE3ARR:
// Since arrays are embedded into containing object,
// slice of non-pointer array will introduce a new pointer into b that was not already there
// (pointer to b itself). After such assignment, if b contents escape,
// b escapes as well. If we ignore such OSLICEARR, we will conclude
// that b does not escape when b contents do.
//
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
if src.Left.Op == OADDR {
return false
}
default:
return false
}
// slice is applied to ONAME dereference.
if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
return false
}
// dst and src reference the same base ONAME.
return dst.Left == src.Left.Left
}
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
func isSelfAssign(dst, src *Node) bool {
if isSliceSelfAssign(dst, src) {
return true
}
// Detect trivial assignments that assign back to the same object.
//
// It covers these cases:
// val.x = val.y
// val.x[i] = val.y[j]
// val.x1.x2 = val.x1.y2
// ... etc
//
// These assignments do not change assigned object lifetime.
if dst == nil || src == nil || dst.Op != src.Op {
return false
}
switch dst.Op {
case ODOT, ODOTPTR:
// Safe trailing accessors that are permitted to differ.
case OINDEX:
if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
return false
}
default:
return false
}
// The expression prefix must be both "safe" and identical.
return samesafeexpr(dst.Left, src.Left)
}
// mayAffectMemory reports whether evaluation of n may affect the program's
// memory state. If the expression can't affect memory state, then it can be
// safely ignored by the escape analysis.
func mayAffectMemory(n *Node) bool {
// We may want to use a list of "memory safe" ops instead of generally
// "side-effect free", which would include all calls and other ops that can
// allocate or change global state. For now, it's safer to start with the latter.
//
// We're ignoring things like division by zero, index out of range,
// and nil pointer dereference here.
switch n.Op {
case ONAME, OCLOSUREVAR, OLITERAL:
return false
// Left+Right group.
case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
// Left group.
case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
return mayAffectMemory(n.Left)
default:
return true
}
}
// heapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
func heapAllocReason(n *Node) string {
if n.Type == nil {
return ""
}
// Parameters are always passed via the stack.
if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
return ""
}
if n.Type.Width > maxStackVarSize {
return "too large for stack"
}
if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OMAKESLICE {
r := n.Right
if r == nil {
r = n.Left
}
if !smallintconst(r) {
return "non-constant size"
}
if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
return "too large for stack"
}
}
return ""
}
// addrescapes tags node n as having had its address taken
// by "increasing" the "value" of n.Esc to EscHeap.
// Storage is allocated as necessary to allow the address
// to be taken.
func addrescapes(n *Node) {
switch n.Op {
default:
// Unexpected Op, probably due to a previous type error. Ignore.
case ODEREF, ODOTPTR:
// Nothing to do.
case ONAME:
if n == nodfp {
break
}
// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
// on PPARAM it means something different.
if n.Class() == PAUTO && n.Esc == EscNever {
break
}
// If a closure reference escapes, mark the outer variable as escaping.
if n.Name.IsClosureVar() {
addrescapes(n.Name.Defn)
break
}
if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
break
}
// This is a plain parameter or local variable that needs to move to the heap,
// but possibly for the function outside the one we're compiling.
// That is, if we have:
//
// func f(x int) {
// func() {
// global = &x
// }
// }
//
// then we're analyzing the inner closure but we need to move x to the
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
oldfn := Curfn
Curfn = n.Name.Curfn
if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
Curfn = Curfn.Func.Closure
}
ln := lineno
lineno = Curfn.Pos
moveToHeap(n)
Curfn = oldfn
lineno = ln
// ODOTPTR has already been introduced,
// so these are the non-pointer ODOT and OINDEX.
// In &x[0], if x is a slice, then x does not
// escape--the pointer inside x does, but that
// is always a heap pointer anyway.
case ODOT, OINDEX, OPAREN, OCONVNOP:
if !n.Left.Type.IsSlice() {
addrescapes(n.Left)
}
}
}
// moveToHeap records the parameter or local variable n as moved to the heap.
func moveToHeap(n *Node) {
if Debug.r != 0 {
Dump("MOVE", n)
}
if compiling_runtime {
yyerror("%v escapes to heap, not allowed in runtime", n)
}
if n.Class() == PAUTOHEAP {
Dump("n", n)
Fatalf("double move to heap")
}
// Allocate a local stack variable to hold the pointer to the heap copy.
// temp will add it to the function declaration list automatically.
heapaddr := temp(types.NewPtr(n.Type))
heapaddr.Sym = lookup("&" + n.Sym.Name)
heapaddr.Orig.Sym = heapaddr.Sym
heapaddr.Pos = n.Pos
// Unset AutoTemp to persist the &foo variable name through SSA to
// liveness analysis.
// TODO(mdempsky/drchase): Cleaner solution?
heapaddr.Name.SetAutoTemp(false)
// Parameters have a local stack copy used at function start/end
// in addition to the copy in the heap that may live longer than
// the function.
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
if n.Xoffset == BADWIDTH {
Fatalf("addrescapes before param assignment")
}
// We rewrite n below to be a heap variable (indirection of heapaddr).
// Preserve a copy so we can still write code referring to the original,
// and substitute that copy into the function declaration list
// so that analyses of the local (on-stack) variables use it.
stackcopy := newname(n.Sym)
stackcopy.Type = n.Type
stackcopy.Xoffset = n.Xoffset
stackcopy.SetClass(n.Class())
stackcopy.Name.Param.Heapaddr = heapaddr
if n.Class() == PPARAMOUT {
// Make sure the pointer to the heap copy is kept live throughout the function.
// The function could panic at any point, and then a defer could recover.
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
heapaddr.Name.SetIsOutputParamHeapAddr(true)
}
n.Name.Param.Stackcopy = stackcopy
// Substitute the stackcopy into the function variable list so that
// liveness and other analyses use the underlying stack slot
// and not the now-pseudo-variable n.
found := false
for i, d := range Curfn.Func.Dcl {
if d == n {
Curfn.Func.Dcl[i] = stackcopy
found = true
break
}
// Parameters are before locals, so can stop early.
// This limits the search even in functions with many local variables.
if d.Class() == PAUTO {
break
}
}
if !found {
Fatalf("cannot find %v in local variable list", n)
}
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
n.SetClass(PAUTOHEAP)
n.Xoffset = 0
n.Name.Param.Heapaddr = heapaddr
n.Esc = EscHeap
if Debug.m != 0 {
Warnl(n.Pos, "moved to heap: %v", n)
}
}
// This special tag is applied to uintptr variables
// that we believe may hold unsafe.Pointers for
// calls into assembly functions.
const unsafeUintptrTag = "unsafe-uintptr"
// This special tag is applied to uintptr parameters of functions
// marked go:uintptrescapes.
const uintptrEscapesTag = "uintptr-escapes"
func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
name := func() string {
if f.Sym != nil {
return f.Sym.Name
}
return fmt.Sprintf("arg#%d", narg)
}
if fn.Nbody.Len() == 0 {
// Assume that uintptr arguments must be held live across the call.
// This is most important for syscall.Syscall.
// See golang.org/issue/13372.
// This really doesn't have much to do with escape analysis per se,
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
if f.Type.IsUintptr() {
if Debug.m != 0 {
Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
}
return unsafeUintptrTag
}
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
var esc EscLeaks
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
if fn.Func.Pragma&Noescape != 0 {
if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "%v does not escape", name())
}
} else {
if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "leaking param: %v", name())
}
esc.AddHeap(0)
}
return esc.Encode()
}
if fn.Func.Pragma&UintptrEscapes != 0 {
if f.Type.IsUintptr() {
if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping uintptr", name())
}
return uintptrEscapesTag
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
}
return uintptrEscapesTag
}
}
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
// Unnamed parameters are unused and therefore do not escape.
if f.Sym == nil || f.Sym.IsBlank() {
var esc EscLeaks
return esc.Encode()
}
n := asNode(f.Nname)
loc := e.oldLoc(n)
esc := loc.paramEsc
esc.Optimize()
if Debug.m != 0 && !loc.escapes {
if esc.Empty() {
Warnl(f.Pos, "%v does not escape", name())
}
if x := esc.Heap(); x >= 0 {
if x == 0 {
Warnl(f.Pos, "leaking param: %v", name())
} else {
// TODO(mdempsky): Mention level=x like below?
Warnl(f.Pos, "leaking param content: %v", name())
}
}
for i := 0; i < numEscResults; i++ {
if x := esc.Result(i); x >= 0 {
res := fn.Type.Results().Field(i).Sym
Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
}
}
}
return esc.Encode()
}

File diff suppressed because it is too large Load diff

View file

@ -5,34 +5,33 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/src"
"fmt"
)
var (
Debug_export int // if set, print debugging information about export data
"go/constant"
)
func exportf(bout *bio.Writer, format string, args ...interface{}) {
fmt.Fprintf(bout, format, args...)
if Debug_export != 0 {
if base.Debug.Export != 0 {
fmt.Printf(format, args...)
}
}
var asmlist []*Node
var asmlist []ir.Node
// exportsym marks n for export (or reexport).
func exportsym(n *Node) {
if n.Sym.OnExportList() {
func exportsym(n ir.Node) {
if n.Sym().OnExportList() {
return
}
n.Sym.SetOnExportList(true)
n.Sym().SetOnExportList(true)
if Debug.E != 0 {
fmt.Printf("export symbol %v\n", n.Sym)
if base.Flag.E != 0 {
fmt.Printf("export symbol %v\n", n.Sym())
}
exportlist = append(exportlist, n)
@ -42,22 +41,22 @@ func initname(s string) bool {
return s == "init"
}
func autoexport(n *Node, ctxt Class) {
if n.Sym.Pkg != localpkg {
func autoexport(n ir.Node, ctxt ir.Class) {
if n.Sym().Pkg != ir.LocalPkg {
return
}
if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN {
return
}
if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
return
}
if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) {
exportsym(n)
}
if asmhdr != "" && !n.Sym.Asm() {
n.Sym.SetAsm(true)
if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
n.Sym().SetAsm(true)
asmlist = append(asmlist, n)
}
}
@ -70,28 +69,28 @@ func dumpexport(bout *bio.Writer) {
size := bout.Offset() - off
exportf(bout, "\n$$\n")
if Debug_export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
if base.Debug.Export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
}
}
func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
n := asNode(s.PkgDef())
func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node {
n := ir.AsNode(s.PkgDef())
if n == nil {
// iimport should have created a stub ONONAME
// declaration for all imported symbols. The exception
// is declarations for Runtimepkg, which are populated
// by loadsys instead.
if s.Pkg != Runtimepkg {
Fatalf("missing ONONAME for %v\n", s)
base.Fatalf("missing ONONAME for %v\n", s)
}
n = dclname(s)
s.SetPkgDef(asTypesNode(n))
s.SetPkgDef(n)
s.Importdef = ipkg
}
if n.Op != ONONAME && n.Op != op {
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
if n.Op() != ir.ONONAME && n.Op() != op {
redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return n
}
@ -100,57 +99,57 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
n := importsym(ipkg, s, OTYPE)
if n.Op != OTYPE {
t := types.New(TFORW)
n := importsym(ipkg, s, ir.OTYPE)
if n.Op() != ir.OTYPE {
t := types.New(types.TFORW)
t.Sym = s
t.Nod = asTypesNode(n)
t.Nod = n
n.Op = OTYPE
n.Pos = pos
n.Type = t
n.SetClass(PEXTERN)
n.SetOp(ir.OTYPE)
n.SetPos(pos)
n.SetType(t)
n.SetClass(ir.PEXTERN)
}
t := n.Type
t := n.Type()
if t == nil {
Fatalf("importtype %v", s)
base.Fatalf("importtype %v", s)
}
return t
}
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) ir.Node {
n := importsym(ipkg, s, op)
if n.Op != ONONAME {
if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
if n.Op() != ir.ONONAME {
if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) {
redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return nil
}
n.Op = op
n.Pos = pos
n.SetOp(op)
n.SetPos(pos)
n.SetClass(ctxt)
if ctxt == PFUNC {
n.Sym.SetFunc(true)
if ctxt == ir.PFUNC {
n.Sym().SetFunc(true)
}
n.Type = t
n.SetType(t)
return n
}
// importconst declares symbol s as an imported constant with type t and value val.
// ipkg is the package being imported
func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) {
n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
if n == nil { // TODO: Check that value matches.
return
}
n.SetVal(val)
if Debug.E != 0 {
if base.Flag.E != 0 {
fmt.Printf("import const %v %L = %v\n", s, t, val)
}
}
@ -158,15 +157,14 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val
// importfunc declares symbol s as an imported function with type t.
// ipkg is the package being imported
func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
if n == nil {
return
}
n.Func = new(Func)
t.SetNname(asTypesNode(n))
n.SetFunc(new(ir.Func))
if Debug.E != 0 {
if base.Flag.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
}
@ -174,12 +172,12 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
// importvar declares symbol s as an imported variable with type t.
// ipkg is the package being imported
func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
n := importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
if n == nil {
return
}
if Debug.E != 0 {
if base.Flag.E != 0 {
fmt.Printf("import var %v %L\n", s, t)
}
}
@ -187,43 +185,43 @@ func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
// importalias declares symbol s as an imported type alias with type t.
// ipkg is the package being imported
func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
n := importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
if n == nil {
return
}
if Debug.E != 0 {
if base.Flag.E != 0 {
fmt.Printf("import type %v = %L\n", s, t)
}
}
func dumpasmhdr() {
b, err := bio.Create(asmhdr)
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", ir.LocalPkg.Name)
for _, n := range asmlist {
if n.Sym.IsBlank() {
if n.Sym().IsBlank() {
continue
}
switch n.Op {
case OLITERAL:
t := n.Val().Ctype()
if t == CTFLT || t == CTCPLX {
switch n.Op() {
case ir.OLITERAL:
t := n.Val().Kind()
if t == constant.Float || t == constant.Complex {
break
}
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
case OTYPE:
t := n.Type
case ir.OTYPE:
t := n.Type()
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
break
}
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
for _, f := range t.Fields().Slice() {
if !f.Sym.IsBlank() {
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
}
}
}

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
@ -28,14 +30,14 @@ func sysvar(name string) *obj.LSym {
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
func (n *Node) isParamStackCopy() bool {
return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil
func isParamStackCopy(n ir.Node) bool {
return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Param.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
func (n *Node) isParamHeapCopy() bool {
return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil
func isParamHeapCopy(n ir.Node) bool {
return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy != nil
}
// autotmpname returns the name for an autotmp variable numbered n.
@ -50,37 +52,37 @@ func autotmpname(n int) string {
}
// make a new Node off the books
func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) ir.Node {
if curfn == nil {
Fatalf("no curfn for tempAt")
base.Fatalf("no curfn for tempAt")
}
if curfn.Func.Closure != nil && curfn.Op == OCLOSURE {
Dump("tempAt", curfn)
Fatalf("adding tempAt to wrong closure function")
if curfn.Op() == ir.OCLOSURE {
ir.Dump("tempAt", curfn)
base.Fatalf("adding tempAt to wrong closure function")
}
if t == nil {
Fatalf("tempAt called with nil type")
base.Fatalf("tempAt called with nil type")
}
s := &types.Sym{
Name: autotmpname(len(curfn.Func.Dcl)),
Pkg: localpkg,
Name: autotmpname(len(curfn.Func().Dcl)),
Pkg: ir.LocalPkg,
}
n := newnamel(pos, s)
s.Def = asTypesNode(n)
n.Type = t
n.SetClass(PAUTO)
n.Esc = EscNever
n.Name.Curfn = curfn
n.Name.SetUsed(true)
n.Name.SetAutoTemp(true)
curfn.Func.Dcl = append(curfn.Func.Dcl, n)
n := ir.NewNameAt(pos, s)
s.Def = n
n.SetType(t)
n.SetClass(ir.PAUTO)
n.SetEsc(EscNever)
n.Name().Curfn = curfn
n.Name().SetUsed(true)
n.Name().SetAutoTemp(true)
curfn.Func().Dcl = append(curfn.Func().Dcl, n)
dowidth(t)
return n.Orig
return n.Orig()
}
func temp(t *types.Type) *Node {
return tempAt(lineno, Curfn, t)
func temp(t *types.Type) ir.Node {
return tempAt(base.Pos, Curfn, t)
}

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@ -12,10 +14,6 @@ import (
"sync"
)
const (
BADWIDTH = types.BADWIDTH
)
var (
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
@ -39,7 +37,7 @@ var (
// isRuntimePkg reports whether p is package runtime.
func isRuntimePkg(p *types.Pkg) bool {
if compiling_runtime && p == localpkg {
if base.Flag.CompilingRuntime && p == ir.LocalPkg {
return true
}
return p.Path == "runtime"
@ -47,31 +45,12 @@ func isRuntimePkg(p *types.Pkg) bool {
// isReflectPkg reports whether p is package reflect.
func isReflectPkg(p *types.Pkg) bool {
if p == localpkg {
return myimportpath == "reflect"
if p == ir.LocalPkg {
return base.Ctxt.Pkgpath == "reflect"
}
return p.Path == "reflect"
}
// The Class of a variable/function describes the "storage class"
// of a variable or function. During parsing, storage classes are
// called declaration contexts.
type Class uint8
//go:generate stringer -type=Class
const (
Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
PEXTERN // global variables
PAUTO // local variables
PAUTOHEAP // local variables or parameters moved to heap
PPARAM // input arguments
PPARAMOUT // output results
PFUNC // global functions
// Careful: Class is stored in three bits in Node.flags.
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
)
// Slices in the runtime are represented by three components:
//
// type slice struct {
@ -99,40 +78,10 @@ var (
var pragcgobuf [][]string
var outfile string
var linkobj string
// nerrors is the number of compiler errors reported
// since the last call to saveerrors.
var nerrors int
// nsavederrors is the total number of compiler errors
// reported before the last call to saveerrors.
var nsavederrors int
var nsyntaxerrors int
var decldepth int32
var nolocalimports bool
// gc debug flags
type DebugFlags struct {
P, B, C, E, G,
K, L, N, S,
W, e, h, j,
l, m, r, w int
}
var Debug DebugFlags
var debugstr string
var Debug_checknil int
var Debug_typeassert int
var localpkg *types.Pkg // package being compiled
var inimport bool // set during import
var itabpkg *types.Pkg // fake pkg for itab entries
@ -155,87 +104,53 @@ var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver
var zerosize int64
var myimportpath string
var localimport string
var asmhdr string
var simtype [NTYPE]types.EType
var simtype [types.NTYPE]types.EType
var (
isInt [NTYPE]bool
isFloat [NTYPE]bool
isComplex [NTYPE]bool
issimple [NTYPE]bool
isInt [types.NTYPE]bool
isFloat [types.NTYPE]bool
isComplex [types.NTYPE]bool
issimple [types.NTYPE]bool
)
var (
okforeq [NTYPE]bool
okforadd [NTYPE]bool
okforand [NTYPE]bool
okfornone [NTYPE]bool
okforcmp [NTYPE]bool
okforbool [NTYPE]bool
okforcap [NTYPE]bool
okforlen [NTYPE]bool
okforarith [NTYPE]bool
okforconst [NTYPE]bool
okforeq [types.NTYPE]bool
okforadd [types.NTYPE]bool
okforand [types.NTYPE]bool
okfornone [types.NTYPE]bool
okforcmp [types.NTYPE]bool
okforbool [types.NTYPE]bool
okforcap [types.NTYPE]bool
okforlen [types.NTYPE]bool
okforarith [types.NTYPE]bool
)
var (
okfor [OEND][]bool
iscmp [OEND]bool
okfor [ir.OEND][]bool
iscmp [ir.OEND]bool
)
var minintval [NTYPE]*Mpint
var xtop []ir.Node
var maxintval [NTYPE]*Mpint
var exportlist []ir.Node
var minfltval [NTYPE]*Mpflt
var maxfltval [NTYPE]*Mpflt
var xtop []*Node
var exportlist []*Node
var importlist []*Node // imported functions and methods with inlinable bodies
var importlist []ir.Node // imported functions and methods with inlinable bodies
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
funcsyms []*types.Sym
)
var dclcontext Class // PEXTERN/PAUTO
var dclcontext ir.Class // PEXTERN/PAUTO
var Curfn *Node
var Curfn ir.Node
var Widthptr int
var Widthreg int
var nblank *Node
var typecheckok bool
var compiling_runtime bool
// Compiling the standard library
var compiling_std bool
var use_writebarrier bool
var pure_go bool
var flag_installsuffix string
var flag_race bool
var flag_msan bool
var flagDWARF bool
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
var instrumenting bool
@ -243,20 +158,7 @@ var instrumenting bool
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
// Controls generation of DWARF inlined instance records. Zero
// disables, 1 emits inlined routines but suppresses var info,
// and 2 emits inlined routines with tracking of formals/locals.
var genDwarfInline int
var debuglive int
var Ctxt *obj.Link
var writearchive bool
var nodfp *Node
var disable_checknil int
var nodfp ir.Node
var autogeneratedPos src.XPos
@ -293,7 +195,7 @@ var thearch Arch
var (
staticuint64s,
zerobase *Node
zerobase ir.Node
assertE2I,
assertE2I2,

View file

@ -31,6 +31,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/objabi"
@ -45,7 +47,7 @@ type Progs struct {
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
curfn *Node // fn these Progs are for
curfn ir.Node // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
@ -55,10 +57,10 @@ type Progs struct {
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
func newProgs(fn *Node, worker int) *Progs {
func newProgs(fn ir.Node, worker int) *Progs {
pp := new(Progs)
if Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / nBackendWorkers
if base.Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / base.Flag.LowerC
pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.curfn = fn
@ -67,7 +69,7 @@ func newProgs(fn *Node, worker int) *Progs {
pp.next = pp.NewProg()
pp.clearp(pp.next)
pp.pos = fn.Pos
pp.pos = fn.Pos()
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, false}
@ -83,19 +85,19 @@ func (pp *Progs) NewProg() *obj.Prog {
} else {
p = new(obj.Prog)
}
p.Ctxt = Ctxt
p.Ctxt = base.Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
if Ctxt.CanReuseProgs() {
if base.Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.progcache[:pp.cacheidx]
for i := range s {
@ -133,8 +135,8 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog {
pp.clearp(pp.next)
p.Link = pp.next
if !pp.pos.IsKnown() && Debug.K != 0 {
Warn("prog: unknown position (line 0)")
if !pp.pos.IsKnown() && base.Flag.K != 0 {
base.Warn("prog: unknown position (line 0)")
}
p.As = as
@ -172,17 +174,17 @@ func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16
return q
}
func (pp *Progs) settext(fn *Node) {
func (pp *Progs) settext(fn ir.Node) {
if pp.Text != nil {
Fatalf("Progs.settext called twice")
base.Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
fn.Func.lsym.Func().Text = ptxt
fn.Func().LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.Func.lsym
ptxt.From.Sym = fn.Func().LSym
}
// initLSym defines f's obj.LSym and initializes it based on the
@ -191,36 +193,36 @@ func (pp *Progs) settext(fn *Node) {
//
// initLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
func (f *Func) initLSym(hasBody bool) {
if f.lsym != nil {
Fatalf("Func.initLSym called twice")
func initLSym(f *ir.Func, hasBody bool) {
if f.LSym != nil {
base.Fatalf("Func.initLSym called twice")
}
if nam := f.Nname; !nam.isBlank() {
f.lsym = nam.Sym.Linksym()
if f.Pragma&Systemstack != 0 {
f.lsym.Set(obj.AttrCFunc, true)
if nam := f.Nname; !ir.IsBlank(nam) {
f.LSym = nam.Sym().Linksym()
if f.Pragma&ir.Systemstack != 0 {
f.LSym.Set(obj.AttrCFunc, true)
}
var aliasABI obj.ABI
needABIAlias := false
defABI, hasDefABI := symabiDefs[f.lsym.Name]
defABI, hasDefABI := symabiDefs[f.LSym.Name]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
f.lsym.SetABI(obj.ABI0)
f.LSym.SetABI(obj.ABI0)
needABIAlias, aliasABI = true, obj.ABIInternal
} else {
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
if f.lsym.ABI() != want {
Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want)
if f.LSym.ABI() != want {
base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
}
}
isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
@ -232,7 +234,7 @@ func (f *Func) initLSym(hasBody bool) {
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
if f.lsym.ABI() != obj.ABI0 {
if f.LSym.ABI() != obj.ABI0 {
needABIAlias, aliasABI = true, obj.ABI0
}
}
@ -243,13 +245,13 @@ func (f *Func) initLSym(hasBody bool) {
// rather than looking them up. The uniqueness
// of f.lsym ensures uniqueness of asym.
asym := &obj.LSym{
Name: f.lsym.Name,
Name: f.LSym.Name,
Type: objabi.SABIALIAS,
R: []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational"
R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
}
asym.SetABI(aliasABI)
asym.Set(obj.AttrDuplicateOK, true)
Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym)
base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
}
}
@ -268,7 +270,7 @@ func (f *Func) initLSym(hasBody bool) {
if f.Needctxt() {
flag |= obj.NEEDCTXT
}
if f.Pragma&Nosplit != 0 {
if f.Pragma&ir.Nosplit != 0 {
flag |= obj.NOSPLIT
}
if f.ReflectMethod() {
@ -278,31 +280,31 @@ func (f *Func) initLSym(hasBody bool) {
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
if myimportpath == "reflect" {
switch f.Nname.Sym.Name {
if base.Ctxt.Pkgpath == "reflect" {
switch f.Nname.Sym().Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
}
Ctxt.InitTextSym(f.lsym, flag)
base.Ctxt.InitTextSym(f.LSym, flag)
}
func ggloblnod(nam *Node) {
s := nam.Sym.Linksym()
func ggloblnod(nam ir.Node) {
s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
if nam.Name.Readonly() {
if nam.Name().Readonly() {
flags = obj.RODATA
}
if nam.Type != nil && !nam.Type.HasPointers() {
if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
Ctxt.Globl(s, nam.Type.Width, flags)
if nam.Name.LibfuzzerExtraCounter() {
base.Ctxt.Globl(s, nam.Type().Width, flags)
if nam.Name().LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym.Linkname != "" {
if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
@ -315,7 +317,7 @@ func ggloblsym(s *obj.LSym, width int32, flags int16) {
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
Ctxt.Globl(s, int64(width), int(flags))
base.Ctxt.Globl(s, int64(width), int(flags))
}
func Addrconst(a *obj.Addr, v int64) {
@ -326,7 +328,7 @@ func Addrconst(a *obj.Addr, v int64) {
func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH {
Fatalf("patch: not a branch")
base.Fatalf("patch: not a branch")
}
p.To.SetTarget(to)
p.To.Offset = to.Pc

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
@ -15,8 +17,9 @@ import (
// the name, normally "pkg.init", is altered to "pkg.init.0".
var renameinitgen int
// Dummy function for autotmps generated during typechecking.
var dummyInitFn = nod(ODCLFUNC, nil, nil)
// Function collecting autotmps generated during typechecking,
// to be included in the package-level init function.
var initTodo = ir.Nod(ir.ODCLFUNC, nil, nil)
func renameinit() *types.Sym {
s := lookupN("init.", renameinitgen)
@ -30,7 +33,7 @@ func renameinit() *types.Sym {
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
func fninit(n []*Node) {
func fninit(n []ir.Node) {
nf := initOrder(n)
var deps []*obj.LSym // initTask records for packages the current package depends on
@ -43,16 +46,16 @@ func fninit(n []*Node) {
// Make a function that contains all the initialization statements.
if len(nf) > 0 {
lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
initializers := lookup("init")
fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
for _, dcl := range dummyInitFn.Func.Dcl {
dcl.Name.Curfn = fn
fn := dclfunc(initializers, ir.Nod(ir.OTFUNC, nil, nil))
for _, dcl := range initTodo.Func().Dcl {
dcl.Name().Curfn = fn
}
fn.Func.Dcl = append(fn.Func.Dcl, dummyInitFn.Func.Dcl...)
dummyInitFn.Func.Dcl = nil
fn.Func().Dcl = append(fn.Func().Dcl, initTodo.Func().Dcl...)
initTodo.Func().Dcl = nil
fn.Nbody.Set(nf)
fn.PtrBody().Set(nf)
funcbody()
fn = typecheck(fn, ctxStmt)
@ -62,35 +65,35 @@ func fninit(n []*Node) {
xtop = append(xtop, fn)
fns = append(fns, initializers.Linksym())
}
if dummyInitFn.Func.Dcl != nil {
// We only generate temps using dummyInitFn if there
if initTodo.Func().Dcl != nil {
// We only generate temps using initTodo if there
// are package-scope initialization statements, so
// something's weird if we get here.
Fatalf("dummyInitFn still has declarations")
base.Fatalf("initTodo still has declarations")
}
dummyInitFn = nil
initTodo = nil
// Record user init functions.
for i := 0; i < renameinitgen; i++ {
s := lookupN("init.", i)
fn := asNode(s.Def).Name.Defn
fn := ir.AsNode(s.Def).Name().Defn
// Skip init functions with empty bodies.
if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY {
if fn.Body().Len() == 1 && fn.Body().First().Op() == ir.OEMPTY {
continue
}
fns = append(fns, s.Linksym())
}
if len(deps) == 0 && len(fns) == 0 && localpkg.Name != "main" && localpkg.Name != "runtime" {
if len(deps) == 0 && len(fns) == 0 && ir.LocalPkg.Name != "main" && ir.LocalPkg.Name != "runtime" {
return // nothing to initialize
}
// Make an .inittask structure.
sym := lookup(".inittask")
nn := newname(sym)
nn.Type = types.Types[TUINT8] // dummy type
nn.SetClass(PEXTERN)
sym.Def = asTypesNode(nn)
nn := NewName(sym)
nn.SetType(types.Types[types.TUINT8]) // fake type
nn.SetClass(ir.PEXTERN)
sym.Def = nn
exportsym(nn)
lsym := sym.Linksym()
ot := 0

View file

@ -8,6 +8,10 @@ import (
"bytes"
"container/heap"
"fmt"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
// Package initialization
@ -60,7 +64,7 @@ const (
type InitOrder struct {
// blocking maps initialization assignments to the assignments
// that depend on it.
blocking map[*Node][]*Node
blocking map[ir.Node][]ir.Node
// ready is the queue of Pending initialization assignments
// that are ready for initialization.
@ -71,45 +75,43 @@ type InitOrder struct {
// package-level declarations (in declaration order) and outputs the
// corresponding list of statements to include in the init() function
// body.
func initOrder(l []*Node) []*Node {
func initOrder(l []ir.Node) []ir.Node {
s := InitSchedule{
initplans: make(map[*Node]*InitPlan),
inittemps: make(map[*Node]*Node),
initplans: make(map[ir.Node]*InitPlan),
inittemps: make(map[ir.Node]ir.Node),
}
o := InitOrder{
blocking: make(map[*Node][]*Node),
blocking: make(map[ir.Node][]ir.Node),
}
// Process all package-level assignment in declaration order.
for _, n := range l {
switch n.Op {
case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
switch n.Op() {
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
o.processAssign(n)
o.flushReady(s.staticInit)
case ODCLCONST, ODCLFUNC, ODCLTYPE:
case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
// nop
default:
Fatalf("unexpected package-level statement: %v", n)
base.Fatalf("unexpected package-level statement: %v", n)
}
}
// Check that all assignments are now Done; if not, there must
// have been a dependency cycle.
for _, n := range l {
switch n.Op {
case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
switch n.Op() {
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
if n.Initorder() != InitDone {
// If there have already been errors
// printed, those errors may have
// confused us and there might not be
// a loop. Let the user fix those
// first.
if nerrors > 0 {
errorexit()
}
base.ExitIfErrors()
findInitLoopAndExit(firstLHS(n), new([]*Node))
Fatalf("initialization unfinished, but failed to identify loop")
findInitLoopAndExit(firstLHS(n), new([]ir.Node))
base.Fatalf("initialization unfinished, but failed to identify loop")
}
}
}
@ -117,34 +119,34 @@ func initOrder(l []*Node) []*Node {
// Invariant consistency check. If this is non-zero, then we
// should have found a cycle above.
if len(o.blocking) != 0 {
Fatalf("expected empty map: %v", o.blocking)
base.Fatalf("expected empty map: %v", o.blocking)
}
return s.out
}
func (o *InitOrder) processAssign(n *Node) {
if n.Initorder() != InitNotStarted || n.Xoffset != BADWIDTH {
Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
func (o *InitOrder) processAssign(n ir.Node) {
if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH {
base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
n.SetInitorder(InitPending)
n.Xoffset = 0
n.SetOffset(0)
// Compute number of variable dependencies and build the
// inverse dependency ("blocking") graph.
for dep := range collectDeps(n, true) {
defn := dep.Name.Defn
defn := dep.Name().Defn
// Skip dependencies on functions (PFUNC) and
// variables already initialized (InitDone).
if dep.Class() != PEXTERN || defn.Initorder() == InitDone {
if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone {
continue
}
n.Xoffset++
n.SetOffset(n.Offset() + 1)
o.blocking[defn] = append(o.blocking[defn], n)
}
if n.Xoffset == 0 {
if n.Offset() == 0 {
heap.Push(&o.ready, n)
}
}
@ -152,23 +154,23 @@ func (o *InitOrder) processAssign(n *Node) {
// flushReady repeatedly applies initialize to the earliest (in
// declaration order) assignment ready for initialization and updates
// the inverse dependency ("blocking") graph.
func (o *InitOrder) flushReady(initialize func(*Node)) {
func (o *InitOrder) flushReady(initialize func(ir.Node)) {
for o.ready.Len() != 0 {
n := heap.Pop(&o.ready).(*Node)
if n.Initorder() != InitPending || n.Xoffset != 0 {
Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
n := heap.Pop(&o.ready).(ir.Node)
if n.Initorder() != InitPending || n.Offset() != 0 {
base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
initialize(n)
n.SetInitorder(InitDone)
n.Xoffset = BADWIDTH
n.SetOffset(types.BADWIDTH)
blocked := o.blocking[n]
delete(o.blocking, n)
for _, m := range blocked {
m.Xoffset--
if m.Xoffset == 0 {
m.SetOffset(m.Offset() - 1)
if m.Offset() == 0 {
heap.Push(&o.ready, m)
}
}
@ -181,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(*Node)) {
// path points to a slice used for tracking the sequence of
// variables/functions visited. Using a pointer to a slice allows the
// slice capacity to grow and limit reallocations.
func findInitLoopAndExit(n *Node, path *[]*Node) {
func findInitLoopAndExit(n ir.Node, path *[]ir.Node) {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but initialization cycles are rare.
@ -194,14 +196,14 @@ func findInitLoopAndExit(n *Node, path *[]*Node) {
// There might be multiple loops involving n; by sorting
// references, we deterministically pick the one reported.
refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *Node) bool {
return ni.Pos.Before(nj.Pos)
refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj ir.Node) bool {
return ni.Pos().Before(nj.Pos())
})
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
if ref.Class() == PEXTERN && ref.Name.Defn.Initorder() == InitDone {
if ref.Class() == ir.PEXTERN && ref.Name().Defn.Initorder() == InitDone {
continue
}
@ -213,12 +215,12 @@ func findInitLoopAndExit(n *Node, path *[]*Node) {
// reportInitLoopAndExit reports and initialization loop as an error
// and exits. However, if l is not actually an initialization loop, it
// simply returns instead.
func reportInitLoopAndExit(l []*Node) {
func reportInitLoopAndExit(l []ir.Node) {
// Rotate loop so that the earliest variable declaration is at
// the start.
i := -1
for j, n := range l {
if n.Class() == PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) {
if n.Class() == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
i = j
}
}
@ -236,61 +238,60 @@ func reportInitLoopAndExit(l []*Node) {
var msg bytes.Buffer
fmt.Fprintf(&msg, "initialization loop:\n")
for _, n := range l {
fmt.Fprintf(&msg, "\t%v: %v refers to\n", n.Line(), n)
fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n)
}
fmt.Fprintf(&msg, "\t%v: %v", l[0].Line(), l[0])
fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
yyerrorl(l[0].Pos, msg.String())
errorexit()
base.ErrorfAt(l[0].Pos(), msg.String())
base.ErrorExit()
}
// collectDeps returns all of the package-level functions and
// variables that declaration n depends on. If transitive is true,
// then it also includes the transitive dependencies of any depended
// upon functions (but not variables).
func collectDeps(n *Node, transitive bool) NodeSet {
func collectDeps(n ir.Node, transitive bool) ir.NodeSet {
d := initDeps{transitive: transitive}
switch n.Op {
case OAS:
d.inspect(n.Right)
case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
d.inspect(n.Right)
case ODCLFUNC:
d.inspectList(n.Nbody)
switch n.Op() {
case ir.OAS:
d.inspect(n.Right())
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
d.inspect(n.Right())
case ir.ODCLFUNC:
d.inspectList(n.Body())
default:
Fatalf("unexpected Op: %v", n.Op)
base.Fatalf("unexpected Op: %v", n.Op())
}
return d.seen
}
type initDeps struct {
transitive bool
seen NodeSet
seen ir.NodeSet
}
func (d *initDeps) inspect(n *Node) { inspect(n, d.visit) }
func (d *initDeps) inspectList(l Nodes) { inspectList(l, d.visit) }
func (d *initDeps) inspect(n ir.Node) { ir.Inspect(n, d.visit) }
func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) }
// visit calls foundDep on any package-level functions or variables
// referenced by n, if any.
func (d *initDeps) visit(n *Node) bool {
switch n.Op {
case ONAME:
if n.isMethodExpression() {
d.foundDep(asNode(n.Type.FuncType().Nname))
func (d *initDeps) visit(n ir.Node) bool {
switch n.Op() {
case ir.OMETHEXPR:
d.foundDep(methodExprName(n))
return false
}
case ir.ONAME:
switch n.Class() {
case PEXTERN, PFUNC:
case ir.PEXTERN, ir.PFUNC:
d.foundDep(n)
}
case OCLOSURE:
d.inspectList(n.Func.Closure.Nbody)
case ir.OCLOSURE:
d.inspectList(n.Func().Decl.Body())
case ODOTMETH, OCALLPART:
d.foundDep(asNode(n.Type.FuncType().Nname))
case ir.ODOTMETH, ir.OCALLPART:
d.foundDep(methodExprName(n))
}
return true
@ -298,7 +299,7 @@ func (d *initDeps) visit(n *Node) bool {
// foundDep records that we've found a dependency on n by adding it to
// seen.
func (d *initDeps) foundDep(n *Node) {
func (d *initDeps) foundDep(n ir.Node) {
// Can happen with method expressions involving interface
// types; e.g., fixedbugs/issue4495.go.
if n == nil {
@ -307,7 +308,7 @@ func (d *initDeps) foundDep(n *Node) {
// Names without definitions aren't interesting as far as
// initialization ordering goes.
if n.Name.Defn == nil {
if n.Name().Defn == nil {
return
}
@ -315,8 +316,8 @@ func (d *initDeps) foundDep(n *Node) {
return
}
d.seen.Add(n)
if d.transitive && n.Class() == PFUNC {
d.inspectList(n.Name.Defn.Nbody)
if d.transitive && n.Class() == ir.PFUNC {
d.inspectList(n.Name().Defn.Body())
}
}
@ -327,13 +328,15 @@ func (d *initDeps) foundDep(n *Node) {
// an OAS node's Pos may not be unique. For example, given the
// declaration "var a, b = f(), g()", "a" must be ordered before "b",
// but both OAS nodes use the "=" token's position as their Pos.
type declOrder []*Node
type declOrder []ir.Node
func (s declOrder) Len() int { return len(s) }
func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) }
func (s declOrder) Less(i, j int) bool {
return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
}
func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*Node)) }
func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
func (s *declOrder) Pop() interface{} {
n := (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
@ -342,14 +345,14 @@ func (s *declOrder) Pop() interface{} {
// firstLHS returns the first expression on the left-hand side of
// assignment n.
func firstLHS(n *Node) *Node {
switch n.Op {
case OAS:
return n.Left
case OAS2DOTTYPE, OAS2FUNC, OAS2RECV, OAS2MAPR:
return n.List.First()
func firstLHS(n ir.Node) ir.Node {
switch n.Op() {
case ir.OAS:
return n.Left()
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
return n.List().First()
}
Fatalf("unexpected Op: %v", n.Op)
base.Fatalf("unexpected Op: %v", n.Op())
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/internal/objabi"
"cmd/internal/src"
@ -12,12 +14,8 @@ import (
"strings"
)
// lineno is the source position at the start of the most recently lexed token.
// TODO(gri) rename and eventually remove
var lineno src.XPos
func makePos(base *src.PosBase, line, col uint) src.XPos {
return Ctxt.PosTable.XPos(src.MakePos(base, line, col))
func makePos(b *src.PosBase, line, col uint) src.XPos {
return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
}
func isSpace(c rune) bool {
@ -28,78 +26,51 @@ func isQuoted(s string) bool {
return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
}
type PragmaFlag int16
const (
// Func pragmas.
Nointerface PragmaFlag = 1 << iota
Noescape // func parameters don't escape
Norace // func must not have race detector annotations
Nosplit // func should not execute on separate stack
Noinline // func should not be inlined
NoCheckPtr // func should not be instrumented by checkptr
CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
UintptrEscapes // pointers converted to uintptr escape
FuncPragmas = ir.Nointerface |
ir.Noescape |
ir.Norace |
ir.Nosplit |
ir.Noinline |
ir.NoCheckPtr |
ir.CgoUnsafeArgs |
ir.UintptrEscapes |
ir.Systemstack |
ir.Nowritebarrier |
ir.Nowritebarrierrec |
ir.Yeswritebarrierrec
// Runtime-only func pragmas.
// See ../../../../runtime/README.md for detailed descriptions.
Systemstack // func must run on system stack
Nowritebarrier // emit compiler error instead of write barrier
Nowritebarrierrec // error on write barrier in this or recursive callees
Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
// Runtime and cgo type pragmas
NotInHeap // values of this type must not be heap allocated
// Go command pragmas
GoBuildPragma
TypePragmas = ir.NotInHeap
)
const (
FuncPragmas = Nointerface |
Noescape |
Norace |
Nosplit |
Noinline |
NoCheckPtr |
CgoUnsafeArgs |
UintptrEscapes |
Systemstack |
Nowritebarrier |
Nowritebarrierrec |
Yeswritebarrierrec
TypePragmas = NotInHeap
)
func pragmaFlag(verb string) PragmaFlag {
func pragmaFlag(verb string) ir.PragmaFlag {
switch verb {
case "go:build":
return GoBuildPragma
return ir.GoBuildPragma
case "go:nointerface":
if objabi.Fieldtrack_enabled != 0 {
return Nointerface
return ir.Nointerface
}
case "go:noescape":
return Noescape
return ir.Noescape
case "go:norace":
return Norace
return ir.Norace
case "go:nosplit":
return Nosplit | NoCheckPtr // implies NoCheckPtr (see #34972)
return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972)
case "go:noinline":
return Noinline
return ir.Noinline
case "go:nocheckptr":
return NoCheckPtr
return ir.NoCheckPtr
case "go:systemstack":
return Systemstack
return ir.Systemstack
case "go:nowritebarrier":
return Nowritebarrier
return ir.Nowritebarrier
case "go:nowritebarrierrec":
return Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier
return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier
case "go:yeswritebarrierrec":
return Yeswritebarrierrec
return ir.Yeswritebarrierrec
case "go:cgo_unsafe_args":
return CgoUnsafeArgs | NoCheckPtr // implies NoCheckPtr (see #34968)
return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968)
case "go:uintptrescapes":
// For the next function declared in the file
// any uintptr arguments may be pointer values
@ -112,9 +83,9 @@ func pragmaFlag(verb string) PragmaFlag {
// call. The conversion to uintptr must appear
// in the argument list.
// Used in syscall/dll_windows.go.
return UintptrEscapes
return ir.UintptrEscapes
case "go:notinheap":
return NotInHeap
return ir.NotInHeap
}
return 0
}

File diff suppressed because it is too large Load diff

View file

@ -35,7 +35,10 @@ func main() {
fmt.Fprintln(&b)
fmt.Fprintln(&b, "package gc")
fmt.Fprintln(&b)
fmt.Fprintln(&b, `import "cmd/compile/internal/types"`)
fmt.Fprintln(&b, `import (`)
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`)
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`)
fmt.Fprintln(&b, `)`)
mkbuiltin(&b, "runtime")
@ -144,12 +147,12 @@ func (i *typeInterner) mktype(t ast.Expr) string {
case "rune":
return "types.Runetype"
}
return fmt.Sprintf("types.Types[T%s]", strings.ToUpper(t.Name))
return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name))
case *ast.SelectorExpr:
if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
log.Fatalf("unhandled type: %#v", t)
}
return "types.Types[TUNSAFEPTR]"
return "types.Types[types.TUNSAFEPTR]"
case *ast.ArrayType:
if t.Len == nil {
@ -171,7 +174,7 @@ func (i *typeInterner) mktype(t ast.Expr) string {
if len(t.Methods.List) != 0 {
log.Fatal("non-empty interfaces unsupported")
}
return "types.Types[TINTER]"
return "types.Types[types.TINTER]"
case *ast.MapType:
return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
case *ast.StarExpr:
@ -204,7 +207,7 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string {
}
}
}
return fmt.Sprintf("[]*Node{%s}", strings.Join(res, ", "))
return fmt.Sprintf("[]ir.Node{%s}", strings.Join(res, ", "))
}
func intconst(e ast.Expr) int64 {

View file

@ -1,357 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"fmt"
"math"
"math/big"
)
// implements float arithmetic
const (
// Maximum size in bits for Mpints before signalling
// overflow and also mantissa precision for Mpflts.
Mpprec = 512
// Turn on for constant arithmetic debugging output.
Mpdebug = false
)
// Mpflt represents a floating-point constant.
type Mpflt struct {
Val big.Float
}
// Mpcplx represents a complex constant.
type Mpcplx struct {
Real Mpflt
Imag Mpflt
}
// Use newMpflt (not new(Mpflt)!) to get the correct default precision.
func newMpflt() *Mpflt {
var a Mpflt
a.Val.SetPrec(Mpprec)
return &a
}
// Use newMpcmplx (not new(Mpcplx)!) to get the correct default precision.
func newMpcmplx() *Mpcplx {
var a Mpcplx
a.Real = *newMpflt()
a.Imag = *newMpflt()
return &a
}
func (a *Mpflt) SetInt(b *Mpint) {
if b.checkOverflow(0) {
// sign doesn't really matter but copy anyway
a.Val.SetInf(b.Val.Sign() < 0)
return
}
a.Val.SetInt(&b.Val)
}
func (a *Mpflt) Set(b *Mpflt) {
a.Val.Set(&b.Val)
}
func (a *Mpflt) Add(b *Mpflt) {
if Mpdebug {
fmt.Printf("\n%v + %v", a, b)
}
a.Val.Add(&a.Val, &b.Val)
if Mpdebug {
fmt.Printf(" = %v\n\n", a)
}
}
func (a *Mpflt) AddFloat64(c float64) {
var b Mpflt
b.SetFloat64(c)
a.Add(&b)
}
func (a *Mpflt) Sub(b *Mpflt) {
if Mpdebug {
fmt.Printf("\n%v - %v", a, b)
}
a.Val.Sub(&a.Val, &b.Val)
if Mpdebug {
fmt.Printf(" = %v\n\n", a)
}
}
func (a *Mpflt) Mul(b *Mpflt) {
if Mpdebug {
fmt.Printf("%v\n * %v\n", a, b)
}
a.Val.Mul(&a.Val, &b.Val)
if Mpdebug {
fmt.Printf(" = %v\n\n", a)
}
}
func (a *Mpflt) MulFloat64(c float64) {
var b Mpflt
b.SetFloat64(c)
a.Mul(&b)
}
func (a *Mpflt) Quo(b *Mpflt) {
if Mpdebug {
fmt.Printf("%v\n / %v\n", a, b)
}
a.Val.Quo(&a.Val, &b.Val)
if Mpdebug {
fmt.Printf(" = %v\n\n", a)
}
}
func (a *Mpflt) Cmp(b *Mpflt) int {
return a.Val.Cmp(&b.Val)
}
func (a *Mpflt) CmpFloat64(c float64) int {
if c == 0 {
return a.Val.Sign() // common case shortcut
}
return a.Val.Cmp(big.NewFloat(c))
}
func (a *Mpflt) Float64() float64 {
x, _ := a.Val.Float64()
// check for overflow
if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpflt Float64")
}
return x + 0 // avoid -0 (should not be needed, but be conservative)
}
func (a *Mpflt) Float32() float64 {
x32, _ := a.Val.Float32()
x := float64(x32)
// check for overflow
if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpflt Float32")
}
return x + 0 // avoid -0 (should not be needed, but be conservative)
}
func (a *Mpflt) SetFloat64(c float64) {
if Mpdebug {
fmt.Printf("\nconst %g", c)
}
// convert -0 to 0
if c == 0 {
c = 0
}
a.Val.SetFloat64(c)
if Mpdebug {
fmt.Printf(" = %v\n", a)
}
}
func (a *Mpflt) Neg() {
// avoid -0
if a.Val.Sign() != 0 {
a.Val.Neg(&a.Val)
}
}
func (a *Mpflt) SetString(as string) {
f, _, err := a.Val.Parse(as, 0)
if err != nil {
yyerror("malformed constant: %s (%v)", as, err)
a.Val.SetFloat64(0)
return
}
if f.IsInf() {
yyerror("constant too large: %s", as)
a.Val.SetFloat64(0)
return
}
// -0 becomes 0
if f.Sign() == 0 && f.Signbit() {
a.Val.SetFloat64(0)
}
}
func (f *Mpflt) String() string {
return f.Val.Text('b', 0)
}
func (fvp *Mpflt) GoString() string {
// determine sign
sign := ""
f := &fvp.Val
if f.Sign() < 0 {
sign = "-"
f = new(big.Float).Abs(f)
}
// Don't try to convert infinities (will not terminate).
if f.IsInf() {
return sign + "Inf"
}
// Use exact fmt formatting if in float64 range (common case):
// proceed if f doesn't underflow to 0 or overflow to inf.
if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
return fmt.Sprintf("%s%.6g", sign, x)
}
// Out of float64 range. Do approximate manual to decimal
// conversion to avoid precise but possibly slow Float
// formatting.
// f = mant * 2**exp
var mant big.Float
exp := f.MantExp(&mant) // 0.5 <= mant < 1.0
// approximate float64 mantissa m and decimal exponent d
// f ~ m * 10**d
m, _ := mant.Float64() // 0.5 <= m < 1.0
d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
// adjust m for truncated (integer) decimal exponent e
e := int64(d)
m *= math.Pow(10, d-float64(e))
// ensure 1 <= m < 10
switch {
case m < 1-0.5e-6:
// The %.6g format below rounds m to 5 digits after the
// decimal point. Make sure that m*10 < 10 even after
// rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
m *= 10
e--
case m >= 10:
m /= 10
e++
}
return fmt.Sprintf("%s%.6ge%+d", sign, m, e)
}
// complex multiply v *= rv
// (a, b) * (c, d) = (a*c - b*d, b*c + a*d)
func (v *Mpcplx) Mul(rv *Mpcplx) {
var ac, ad, bc, bd Mpflt
ac.Set(&v.Real)
ac.Mul(&rv.Real) // ac
bd.Set(&v.Imag)
bd.Mul(&rv.Imag) // bd
bc.Set(&v.Imag)
bc.Mul(&rv.Real) // bc
ad.Set(&v.Real)
ad.Mul(&rv.Imag) // ad
v.Real.Set(&ac)
v.Real.Sub(&bd) // ac-bd
v.Imag.Set(&bc)
v.Imag.Add(&ad) // bc+ad
}
// complex divide v /= rv
// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
func (v *Mpcplx) Div(rv *Mpcplx) bool {
if rv.Real.CmpFloat64(0) == 0 && rv.Imag.CmpFloat64(0) == 0 {
return false
}
var ac, ad, bc, bd, cc_plus_dd Mpflt
cc_plus_dd.Set(&rv.Real)
cc_plus_dd.Mul(&rv.Real) // cc
ac.Set(&rv.Imag)
ac.Mul(&rv.Imag) // dd
cc_plus_dd.Add(&ac) // cc+dd
// We already checked that c and d are not both zero, but we can't
// assume that c²+d² != 0 follows, because for tiny values of c
// and/or d c²+d² can underflow to zero. Check that c²+d² is
// nonzero, return if it's not.
if cc_plus_dd.CmpFloat64(0) == 0 {
return false
}
ac.Set(&v.Real)
ac.Mul(&rv.Real) // ac
bd.Set(&v.Imag)
bd.Mul(&rv.Imag) // bd
bc.Set(&v.Imag)
bc.Mul(&rv.Real) // bc
ad.Set(&v.Real)
ad.Mul(&rv.Imag) // ad
v.Real.Set(&ac)
v.Real.Add(&bd) // ac+bd
v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd)
v.Imag.Set(&bc)
v.Imag.Sub(&ad) // bc-ad
v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd)
return true
}
func (v *Mpcplx) String() string {
return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
}
func (v *Mpcplx) GoString() string {
var re string
sre := v.Real.CmpFloat64(0)
if sre != 0 {
re = v.Real.GoString()
}
var im string
sim := v.Imag.CmpFloat64(0)
if sim != 0 {
im = v.Imag.GoString()
}
switch {
case sre == 0 && sim == 0:
return "0"
case sre == 0:
return im + "i"
case sim == 0:
return re
case sim < 0:
return fmt.Sprintf("(%s%si)", re, im)
default:
return fmt.Sprintf("(%s+%si)", re, im)
}
}

View file

@ -1,304 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"fmt"
"math/big"
)
// implements integer arithmetic
// Mpint represents an integer constant.
type Mpint struct {
Val big.Int
Ovf bool // set if Val overflowed compiler limit (sticky)
Rune bool // set if syntax indicates default type rune
}
func (a *Mpint) SetOverflow() {
a.Val.SetUint64(1) // avoid spurious div-zero errors
a.Ovf = true
}
func (a *Mpint) checkOverflow(extra int) bool {
// We don't need to be precise here, any reasonable upper limit would do.
// For now, use existing limit so we pass all the tests unchanged.
if a.Val.BitLen()+extra > Mpprec {
a.SetOverflow()
}
return a.Ovf
}
func (a *Mpint) Set(b *Mpint) {
a.Val.Set(&b.Val)
}
func (a *Mpint) SetFloat(b *Mpflt) bool {
// avoid converting huge floating-point numbers to integers
// (2*Mpprec is large enough to permit all tests to pass)
if b.Val.MantExp(nil) > 2*Mpprec {
a.SetOverflow()
return false
}
if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
return true
}
const delta = 16 // a reasonably small number of bits > 0
var t big.Float
t.SetPrec(Mpprec - delta)
// try rounding down a little
t.SetMode(big.ToZero)
t.Set(&b.Val)
if _, acc := t.Int(&a.Val); acc == big.Exact {
return true
}
// try rounding up a little
t.SetMode(big.AwayFromZero)
t.Set(&b.Val)
if _, acc := t.Int(&a.Val); acc == big.Exact {
return true
}
a.Ovf = false
return false
}
func (a *Mpint) Add(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Add")
}
a.SetOverflow()
return
}
a.Val.Add(&a.Val, &b.Val)
if a.checkOverflow(0) {
yyerror("constant addition overflow")
}
}
func (a *Mpint) Sub(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Sub")
}
a.SetOverflow()
return
}
a.Val.Sub(&a.Val, &b.Val)
if a.checkOverflow(0) {
yyerror("constant subtraction overflow")
}
}
func (a *Mpint) Mul(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Mul")
}
a.SetOverflow()
return
}
a.Val.Mul(&a.Val, &b.Val)
if a.checkOverflow(0) {
yyerror("constant multiplication overflow")
}
}
func (a *Mpint) Quo(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Quo")
}
a.SetOverflow()
return
}
a.Val.Quo(&a.Val, &b.Val)
if a.checkOverflow(0) {
// can only happen for div-0 which should be checked elsewhere
yyerror("constant division overflow")
}
}
func (a *Mpint) Rem(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Rem")
}
a.SetOverflow()
return
}
a.Val.Rem(&a.Val, &b.Val)
if a.checkOverflow(0) {
// should never happen
yyerror("constant modulo overflow")
}
}
func (a *Mpint) Or(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Or")
}
a.SetOverflow()
return
}
a.Val.Or(&a.Val, &b.Val)
}
func (a *Mpint) And(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint And")
}
a.SetOverflow()
return
}
a.Val.And(&a.Val, &b.Val)
}
func (a *Mpint) AndNot(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint AndNot")
}
a.SetOverflow()
return
}
a.Val.AndNot(&a.Val, &b.Val)
}
func (a *Mpint) Xor(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Xor")
}
a.SetOverflow()
return
}
a.Val.Xor(&a.Val, &b.Val)
}
func (a *Mpint) Lsh(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Lsh")
}
a.SetOverflow()
return
}
s := b.Int64()
if s < 0 || s >= Mpprec {
msg := "shift count too large"
if s < 0 {
msg = "invalid negative shift count"
}
yyerror("%s: %d", msg, s)
a.SetInt64(0)
return
}
if a.checkOverflow(int(s)) {
yyerror("constant shift overflow")
return
}
a.Val.Lsh(&a.Val, uint(s))
}
func (a *Mpint) Rsh(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Rsh")
}
a.SetOverflow()
return
}
s := b.Int64()
if s < 0 {
yyerror("invalid negative shift count: %d", s)
if a.Val.Sign() < 0 {
a.SetInt64(-1)
} else {
a.SetInt64(0)
}
return
}
a.Val.Rsh(&a.Val, uint(s))
}
func (a *Mpint) Cmp(b *Mpint) int {
return a.Val.Cmp(&b.Val)
}
func (a *Mpint) CmpInt64(c int64) int {
if c == 0 {
return a.Val.Sign() // common case shortcut
}
return a.Val.Cmp(big.NewInt(c))
}
func (a *Mpint) Neg() {
a.Val.Neg(&a.Val)
}
func (a *Mpint) Int64() int64 {
if a.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("constant overflow")
}
return 0
}
return a.Val.Int64()
}
func (a *Mpint) SetInt64(c int64) {
a.Val.SetInt64(c)
}
func (a *Mpint) SetString(as string) {
_, ok := a.Val.SetString(as, 0)
if !ok {
// The lexer checks for correct syntax of the literal
// and reports detailed errors. Thus SetString should
// never fail (in theory it might run out of memory,
// but that wouldn't be reported as an error here).
Fatalf("malformed integer constant: %s", as)
return
}
if a.checkOverflow(0) {
yyerror("constant too large: %s", as)
}
}
func (a *Mpint) GoString() string {
return a.Val.String()
}
func (a *Mpint) String() string {
return fmt.Sprintf("%#x", &a.Val)
}

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/obj"
@ -13,6 +15,7 @@ import (
"crypto/sha256"
"encoding/json"
"fmt"
"go/constant"
"io"
"io/ioutil"
"os"
@ -46,20 +49,20 @@ const (
)
func dumpobj() {
if linkobj == "" {
dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
if base.Flag.LinkObj == "" {
dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj)
return
}
dumpobj1(outfile, modeCompilerObj)
dumpobj1(linkobj, modeLinkerObj)
dumpobj1(base.Flag.LowerO, modeCompilerObj)
dumpobj1(base.Flag.LinkObj, modeLinkerObj)
}
func dumpobj1(outfile string, mode int) {
bout, err := bio.Create(outfile)
if err != nil {
flusherrors()
base.FlushErrors()
fmt.Printf("can't create %s: %v\n", outfile, err)
errorexit()
base.ErrorExit()
}
defer bout.Close()
bout.WriteString("!<arch>\n")
@ -78,10 +81,10 @@ func dumpobj1(outfile string, mode int) {
func printObjHeader(bout *bio.Writer) {
fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
if buildid != "" {
fmt.Fprintf(bout, "build id %q\n", buildid)
if base.Flag.BuildID != "" {
fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID)
}
if localpkg.Name == "main" {
if ir.LocalPkg.Name == "main" {
fmt.Fprintf(bout, "main\n")
}
fmt.Fprintf(bout, "\n") // header ends with blank line
@ -139,7 +142,7 @@ func dumpdata() {
for {
for i := xtops; i < len(xtop); i++ {
n := xtop[i]
if n.Op == ODCLFUNC {
if n.Op() == ir.ODCLFUNC {
funccompile(n)
}
}
@ -168,13 +171,13 @@ func dumpdata() {
addGCLocals()
if exportlistLen != len(exportlist) {
Fatalf("exportlist changed after compile functions loop")
base.Fatalf("exportlist changed after compile functions loop")
}
if ptabsLen != len(ptabs) {
Fatalf("ptabs changed after compile functions loop")
base.Fatalf("ptabs changed after compile functions loop")
}
if itabsLen != len(itabs) {
Fatalf("itabs changed after compile functions loop")
base.Fatalf("itabs changed after compile functions loop")
}
}
@ -186,27 +189,27 @@ func dumpLinkerObj(bout *bio.Writer) {
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
fmt.Fprintf(bout, "\n$$ // cgo\n")
if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil {
Fatalf("serializing pragcgobuf: %v", err)
base.Fatalf("serializing pragcgobuf: %v", err)
}
fmt.Fprintf(bout, "\n$$\n\n")
}
fmt.Fprintf(bout, "\n!\n")
obj.WriteObjFile(Ctxt, bout)
obj.WriteObjFile(base.Ctxt, bout)
}
func addptabs() {
if !Ctxt.Flag_dynlink || localpkg.Name != "main" {
if !base.Ctxt.Flag_dynlink || ir.LocalPkg.Name != "main" {
return
}
for _, exportn := range exportlist {
s := exportn.Sym
n := asNode(s.Def)
s := exportn.Sym()
n := ir.AsNode(s.Def)
if n == nil {
continue
}
if n.Op != ONAME {
if n.Op() != ir.ONAME {
continue
}
if !types.IsExported(s.Name) {
@ -215,76 +218,61 @@ func addptabs() {
if s.Pkg.Name != "main" {
continue
}
if n.Type.Etype == TFUNC && n.Class() == PFUNC {
if n.Type().Etype == types.TFUNC && n.Class() == ir.PFUNC {
// function
ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type})
ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type()})
} else {
// variable
ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)})
ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(ir.AsNode(s.Def).Type())})
}
}
}
func dumpGlobal(n *Node) {
if n.Type == nil {
Fatalf("external %v nil type\n", n)
func dumpGlobal(n ir.Node) {
if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
}
if n.Class() == PFUNC {
if n.Class() == ir.PFUNC {
return
}
if n.Sym.Pkg != localpkg {
if n.Sym().Pkg != ir.LocalPkg {
return
}
dowidth(n.Type)
dowidth(n.Type())
ggloblnod(n)
}
func dumpGlobalConst(n *Node) {
func dumpGlobalConst(n ir.Node) {
// only export typed constants
t := n.Type
t := n.Type()
if t == nil {
return
}
if n.Sym.Pkg != localpkg {
if n.Sym().Pkg != ir.LocalPkg {
return
}
// only export integer constants for now
switch t.Etype {
case TINT8:
case TINT16:
case TINT32:
case TINT64:
case TINT:
case TUINT8:
case TUINT16:
case TUINT32:
case TUINT64:
case TUINT:
case TUINTPTR:
// ok
case TIDEAL:
if !Isconst(n, CTINT) {
if !t.IsInteger() {
return
}
x := n.Val().U.(*Mpint)
if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 {
v := n.Val()
if t.IsUntyped() {
// Export untyped integers as int (if they fit).
t = types.Types[types.TINT]
if doesoverflow(v, t) {
return
}
// Ideal integers we export as int (if they fit).
t = types.Types[TINT]
default:
return
}
Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.Int64Val(t, v))
}
func dumpglobls() {
// add globals
for _, n := range externdcl {
switch n.Op {
case ONAME:
switch n.Op() {
case ir.ONAME:
dumpGlobal(n)
case OLITERAL:
case ir.OLITERAL:
dumpGlobalConst(n)
}
}
@ -307,7 +295,7 @@ func dumpglobls() {
// This is done during the sequential phase after compilation, since
// global symbols can't be declared during parallel compilation.
func addGCLocals() {
for _, s := range Ctxt.Text {
for _, s := range base.Ctxt.Text {
fn := s.Func()
if fn == nil {
continue
@ -330,9 +318,9 @@ func addGCLocals() {
func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
if off&(wid-1) != 0 {
Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
}
s.WriteInt(Ctxt, int64(off), wid, int64(v))
s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
return off + wid
}
@ -383,7 +371,7 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) {
symname = strconv.Quote(s)
}
symdata := Ctxt.Lookup(stringSymPrefix + symname)
symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
@ -426,7 +414,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
if readonly {
sym = stringsym(pos, string(data))
} else {
sym = slicedata(pos, string(data)).Sym.Linksym()
sym = slicedata(pos, string(data)).Sym().Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
@ -461,7 +449,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
var symdata *obj.LSym
if readonly {
symname := fmt.Sprintf(stringSymPattern, size, sum)
symdata = Ctxt.Lookup(stringSymPrefix + symname)
symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
info := symdata.NewFileInfo()
info.Name = file
@ -474,7 +462,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
symdata = slicedata(pos, "").Sym.Linksym()
symdata = slicedata(pos, "").Sym().Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
@ -487,12 +475,12 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
var slicedataGen int
func slicedata(pos src.XPos, s string) *Node {
func slicedata(pos src.XPos, s string) ir.Node {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := localpkg.Lookup(symname)
symnode := newname(sym)
sym.Def = asTypesNode(symnode)
sym := ir.LocalPkg.Lookup(symname)
symnode := NewName(sym)
sym.Def = symnode
lsym := sym.Linksym()
off := dstringdata(lsym, 0, s, pos, "slice")
@ -501,11 +489,11 @@ func slicedata(pos src.XPos, s string) *Node {
return symnode
}
func slicebytes(nam *Node, s string) {
if nam.Op != ONAME {
Fatalf("slicebytes %v", nam)
func slicebytes(nam ir.Node, s string) {
if nam.Op() != ir.ONAME {
base.Fatalf("slicebytes %v", nam)
}
slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
slicesym(nam, slicedata(nam.Pos(), s), int64(len(s)))
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
@ -513,126 +501,133 @@ func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.
if int64(len(t)) > 2e9 {
yyerrorl(pos, "%v with length %v is too big", what, len(t))
base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
return 0
}
s.WriteString(Ctxt, int64(off), len(t), t)
s.WriteString(base.Ctxt, int64(off), len(t), t)
return off + len(t)
}
func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
off = int(Rnd(int64(off), int64(Widthptr)))
s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff))
s.WriteAddr(base.Ctxt, int64(off), Widthptr, x, int64(xoff))
off += Widthptr
return off
}
func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteOff(Ctxt, int64(off), x, 0)
s.WriteOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteWeakOff(Ctxt, int64(off), x, 0)
s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
// arr must be an ONAME. slicesym does not modify n.
func slicesym(n, arr *Node, lencap int64) {
s := n.Sym.Linksym()
base := n.Xoffset
if arr.Op != ONAME {
Fatalf("slicesym non-name arr %v", arr)
func slicesym(n, arr ir.Node, lencap int64) {
s := n.Sym().Linksym()
off := n.Offset()
if arr.Op() != ir.ONAME {
base.Fatalf("slicesym non-name arr %v", arr)
}
s.WriteAddr(Ctxt, base, Widthptr, arr.Sym.Linksym(), arr.Xoffset)
s.WriteInt(Ctxt, base+sliceLenOffset, Widthptr, lencap)
s.WriteInt(Ctxt, base+sliceCapOffset, Widthptr, lencap)
s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym().Linksym(), arr.Offset())
s.WriteInt(base.Ctxt, off+sliceLenOffset, Widthptr, lencap)
s.WriteInt(base.Ctxt, off+sliceCapOffset, Widthptr, lencap)
}
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
func addrsym(n, a *Node) {
if n.Op != ONAME {
Fatalf("addrsym n op %v", n.Op)
func addrsym(n, a ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("addrsym n op %v", n.Op())
}
if n.Sym == nil {
Fatalf("addrsym nil n sym")
if n.Sym() == nil {
base.Fatalf("addrsym nil n sym")
}
if a.Op != ONAME {
Fatalf("addrsym a op %v", a.Op)
if a.Op() != ir.ONAME {
base.Fatalf("addrsym a op %v", a.Op())
}
s := n.Sym.Linksym()
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset)
s := n.Sym().Linksym()
s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, a.Sym().Linksym(), a.Offset())
}
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
func pfuncsym(n, f *Node) {
if n.Op != ONAME {
Fatalf("pfuncsym n op %v", n.Op)
func pfuncsym(n, f ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("pfuncsym n op %v", n.Op())
}
if n.Sym == nil {
Fatalf("pfuncsym nil n sym")
if n.Sym() == nil {
base.Fatalf("pfuncsym nil n sym")
}
if f.Class() != PFUNC {
Fatalf("pfuncsym class not PFUNC %d", f.Class())
if f.Class() != ir.PFUNC {
base.Fatalf("pfuncsym class not PFUNC %d", f.Class())
}
s := n.Sym.Linksym()
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset)
s := n.Sym().Linksym()
s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, funcsym(f.Sym()).Linksym(), f.Offset())
}
// litsym writes the static literal c to n.
// Neither n nor c is modified.
func litsym(n, c *Node, wid int) {
if n.Op != ONAME {
Fatalf("litsym n op %v", n.Op)
func litsym(n, c ir.Node, wid int) {
if n.Op() != ir.ONAME {
base.Fatalf("litsym n op %v", n.Op())
}
if c.Op != OLITERAL {
Fatalf("litsym c op %v", c.Op)
if n.Sym() == nil {
base.Fatalf("litsym nil n sym")
}
if n.Sym == nil {
Fatalf("litsym nil n sym")
if !types.Identical(n.Type(), c.Type()) {
base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type(), c, c.Type())
}
s := n.Sym.Linksym()
switch u := c.Val().U.(type) {
case bool:
i := int64(obj.Bool2int(u))
s.WriteInt(Ctxt, n.Xoffset, wid, i)
if c.Op() == ir.ONIL {
return
}
if c.Op() != ir.OLITERAL {
base.Fatalf("litsym c op %v", c.Op())
}
s := n.Sym().Linksym()
switch u := c.Val(); u.Kind() {
case constant.Bool:
i := int64(obj.Bool2int(constant.BoolVal(u)))
s.WriteInt(base.Ctxt, n.Offset(), wid, i)
case *Mpint:
s.WriteInt(Ctxt, n.Xoffset, wid, u.Int64())
case constant.Int:
s.WriteInt(base.Ctxt, n.Offset(), wid, ir.Int64Val(n.Type(), u))
case *Mpflt:
f := u.Float64()
switch n.Type.Etype {
case TFLOAT32:
s.WriteFloat32(Ctxt, n.Xoffset, float32(f))
case TFLOAT64:
s.WriteFloat64(Ctxt, n.Xoffset, f)
case constant.Float:
f, _ := constant.Float64Val(u)
switch n.Type().Etype {
case types.TFLOAT32:
s.WriteFloat32(base.Ctxt, n.Offset(), float32(f))
case types.TFLOAT64:
s.WriteFloat64(base.Ctxt, n.Offset(), f)
}
case *Mpcplx:
r := u.Real.Float64()
i := u.Imag.Float64()
switch n.Type.Etype {
case TCOMPLEX64:
s.WriteFloat32(Ctxt, n.Xoffset, float32(r))
s.WriteFloat32(Ctxt, n.Xoffset+4, float32(i))
case TCOMPLEX128:
s.WriteFloat64(Ctxt, n.Xoffset, r)
s.WriteFloat64(Ctxt, n.Xoffset+8, i)
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
switch n.Type().Etype {
case types.TCOMPLEX64:
s.WriteFloat32(base.Ctxt, n.Offset(), float32(re))
s.WriteFloat32(base.Ctxt, n.Offset()+4, float32(im))
case types.TCOMPLEX128:
s.WriteFloat64(base.Ctxt, n.Offset(), re)
s.WriteFloat64(base.Ctxt, n.Offset()+8, im)
}
case string:
symdata := stringsym(n.Pos, u)
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0)
s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(u)))
case constant.String:
i := constant.StringVal(u)
symdata := stringsym(n.Pos(), i)
s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, symdata, 0)
s.WriteInt(base.Ctxt, n.Offset()+int64(Widthptr), Widthptr, int64(len(i)))
default:
Fatalf("litsym unhandled OLITERAL %v", c)
base.Fatalf("litsym unhandled OLITERAL %v", c)
}
}

View file

@ -1,175 +0,0 @@
// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
package gc
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[OXXX-0]
_ = x[ONAME-1]
_ = x[ONONAME-2]
_ = x[OTYPE-3]
_ = x[OPACK-4]
_ = x[OLITERAL-5]
_ = x[OADD-6]
_ = x[OSUB-7]
_ = x[OOR-8]
_ = x[OXOR-9]
_ = x[OADDSTR-10]
_ = x[OADDR-11]
_ = x[OANDAND-12]
_ = x[OAPPEND-13]
_ = x[OBYTES2STR-14]
_ = x[OBYTES2STRTMP-15]
_ = x[ORUNES2STR-16]
_ = x[OSTR2BYTES-17]
_ = x[OSTR2BYTESTMP-18]
_ = x[OSTR2RUNES-19]
_ = x[OAS-20]
_ = x[OAS2-21]
_ = x[OAS2DOTTYPE-22]
_ = x[OAS2FUNC-23]
_ = x[OAS2MAPR-24]
_ = x[OAS2RECV-25]
_ = x[OASOP-26]
_ = x[OCALL-27]
_ = x[OCALLFUNC-28]
_ = x[OCALLMETH-29]
_ = x[OCALLINTER-30]
_ = x[OCALLPART-31]
_ = x[OCAP-32]
_ = x[OCLOSE-33]
_ = x[OCLOSURE-34]
_ = x[OCOMPLIT-35]
_ = x[OMAPLIT-36]
_ = x[OSTRUCTLIT-37]
_ = x[OARRAYLIT-38]
_ = x[OSLICELIT-39]
_ = x[OPTRLIT-40]
_ = x[OCONV-41]
_ = x[OCONVIFACE-42]
_ = x[OCONVNOP-43]
_ = x[OCOPY-44]
_ = x[ODCL-45]
_ = x[ODCLFUNC-46]
_ = x[ODCLFIELD-47]
_ = x[ODCLCONST-48]
_ = x[ODCLTYPE-49]
_ = x[ODELETE-50]
_ = x[ODOT-51]
_ = x[ODOTPTR-52]
_ = x[ODOTMETH-53]
_ = x[ODOTINTER-54]
_ = x[OXDOT-55]
_ = x[ODOTTYPE-56]
_ = x[ODOTTYPE2-57]
_ = x[OEQ-58]
_ = x[ONE-59]
_ = x[OLT-60]
_ = x[OLE-61]
_ = x[OGE-62]
_ = x[OGT-63]
_ = x[ODEREF-64]
_ = x[OINDEX-65]
_ = x[OINDEXMAP-66]
_ = x[OKEY-67]
_ = x[OSTRUCTKEY-68]
_ = x[OLEN-69]
_ = x[OMAKE-70]
_ = x[OMAKECHAN-71]
_ = x[OMAKEMAP-72]
_ = x[OMAKESLICE-73]
_ = x[OMAKESLICECOPY-74]
_ = x[OMUL-75]
_ = x[ODIV-76]
_ = x[OMOD-77]
_ = x[OLSH-78]
_ = x[ORSH-79]
_ = x[OAND-80]
_ = x[OANDNOT-81]
_ = x[ONEW-82]
_ = x[ONEWOBJ-83]
_ = x[ONOT-84]
_ = x[OBITNOT-85]
_ = x[OPLUS-86]
_ = x[ONEG-87]
_ = x[OOROR-88]
_ = x[OPANIC-89]
_ = x[OPRINT-90]
_ = x[OPRINTN-91]
_ = x[OPAREN-92]
_ = x[OSEND-93]
_ = x[OSLICE-94]
_ = x[OSLICEARR-95]
_ = x[OSLICESTR-96]
_ = x[OSLICE3-97]
_ = x[OSLICE3ARR-98]
_ = x[OSLICEHEADER-99]
_ = x[ORECOVER-100]
_ = x[ORECV-101]
_ = x[ORUNESTR-102]
_ = x[OSELRECV-103]
_ = x[OSELRECV2-104]
_ = x[OIOTA-105]
_ = x[OREAL-106]
_ = x[OIMAG-107]
_ = x[OCOMPLEX-108]
_ = x[OALIGNOF-109]
_ = x[OOFFSETOF-110]
_ = x[OSIZEOF-111]
_ = x[OBLOCK-112]
_ = x[OBREAK-113]
_ = x[OCASE-114]
_ = x[OCONTINUE-115]
_ = x[ODEFER-116]
_ = x[OEMPTY-117]
_ = x[OFALL-118]
_ = x[OFOR-119]
_ = x[OFORUNTIL-120]
_ = x[OGOTO-121]
_ = x[OIF-122]
_ = x[OLABEL-123]
_ = x[OGO-124]
_ = x[ORANGE-125]
_ = x[ORETURN-126]
_ = x[OSELECT-127]
_ = x[OSWITCH-128]
_ = x[OTYPESW-129]
_ = x[OTCHAN-130]
_ = x[OTMAP-131]
_ = x[OTSTRUCT-132]
_ = x[OTINTER-133]
_ = x[OTFUNC-134]
_ = x[OTARRAY-135]
_ = x[ODDD-136]
_ = x[OINLCALL-137]
_ = x[OEFACE-138]
_ = x[OITAB-139]
_ = x[OIDATA-140]
_ = x[OSPTR-141]
_ = x[OCLOSUREVAR-142]
_ = x[OCFUNC-143]
_ = x[OCHECKNIL-144]
_ = x[OVARDEF-145]
_ = x[OVARKILL-146]
_ = x[OVARLIVE-147]
_ = x[ORESULT-148]
_ = x[OINLMARK-149]
_ = x[ORETJMP-150]
_ = x[OGETG-151]
_ = x[OEND-152]
}
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 70, 82, 91, 100, 112, 121, 123, 126, 136, 143, 150, 157, 161, 165, 173, 181, 190, 198, 201, 206, 213, 220, 226, 235, 243, 251, 257, 261, 270, 277, 281, 284, 291, 299, 307, 314, 320, 323, 329, 336, 344, 348, 355, 363, 365, 367, 369, 371, 373, 375, 380, 385, 393, 396, 405, 408, 412, 420, 427, 436, 449, 452, 455, 458, 461, 464, 467, 473, 476, 482, 485, 491, 495, 498, 502, 507, 512, 518, 523, 527, 532, 540, 548, 554, 563, 574, 581, 585, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 652, 657, 661, 669, 674, 679, 683, 686, 694, 698, 700, 705, 707, 712, 718, 724, 730, 736, 741, 745, 752, 758, 763, 769, 772, 779, 784, 788, 793, 797, 807, 812, 820, 826, 833, 840, 846, 853, 859, 863, 866}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Op_name[_Op_index[i]:_Op_index[i+1]]
}

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
@ -22,35 +24,34 @@ import (
// "Portable" code generation.
var (
nBackendWorkers int // number of concurrent backend workers, set by a compiler flag
compilequeue []*Node // functions waiting to be compiled
compilequeue []ir.Node // functions waiting to be compiled
)
func emitptrargsmap(fn *Node) {
if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" {
func emitptrargsmap(fn ir.Node) {
if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" {
return
}
lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap")
lsym := base.Ctxt.Lookup(fn.Func().LSym.Name + ".args_stackmap")
nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
nptr := int(fn.Type().ArgWidth() / int64(Widthptr))
bv := bvalloc(int32(nptr) * 2)
nbitmap := 1
if fn.Type.NumResults() > 0 {
if fn.Type().NumResults() > 0 {
nbitmap = 2
}
off := duint32(lsym, 0, uint32(nbitmap))
off = duint32(lsym, off, uint32(bv.n))
if fn.IsMethod() {
onebitwalktype1(fn.Type.Recvs(), 0, bv)
if ir.IsMethod(fn) {
onebitwalktype1(fn.Type().Recvs(), 0, bv)
}
if fn.Type.NumParams() > 0 {
onebitwalktype1(fn.Type.Params(), 0, bv)
if fn.Type().NumParams() > 0 {
onebitwalktype1(fn.Type().Params(), 0, bv)
}
off = dbvec(lsym, off, bv)
if fn.Type.NumResults() > 0 {
onebitwalktype1(fn.Type.Results(), 0, bv)
if fn.Type().NumResults() > 0 {
onebitwalktype1(fn.Type().Results(), 0, bv)
off = dbvec(lsym, off, bv)
}
@ -67,40 +68,40 @@ func emitptrargsmap(fn *Node) {
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
func cmpstackvarlt(a, b *Node) bool {
if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
return b.Class() == PAUTO
func cmpstackvarlt(a, b ir.Node) bool {
if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) {
return b.Class() == ir.PAUTO
}
if a.Class() != PAUTO {
return a.Xoffset < b.Xoffset
if a.Class() != ir.PAUTO {
return a.Offset() < b.Offset()
}
if a.Name.Used() != b.Name.Used() {
return a.Name.Used()
if a.Name().Used() != b.Name().Used() {
return a.Name().Used()
}
ap := a.Type.HasPointers()
bp := b.Type.HasPointers()
ap := a.Type().HasPointers()
bp := b.Type().HasPointers()
if ap != bp {
return ap
}
ap = a.Name.Needzero()
bp = b.Name.Needzero()
ap = a.Name().Needzero()
bp = b.Name().Needzero()
if ap != bp {
return ap
}
if a.Type.Width != b.Type.Width {
return a.Type.Width > b.Type.Width
if a.Type().Width != b.Type().Width {
return a.Type().Width > b.Type().Width
}
return a.Sym.Name < b.Sym.Name
return a.Sym().Name < b.Sym().Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
type byStackVar []*Node
type byStackVar []ir.Node
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
@ -109,33 +110,33 @@ func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
fn := s.curfn.Func
fn := s.curfn.Func()
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
if ln.Class() == PAUTO {
ln.Name.SetUsed(false)
if ln.Class() == ir.PAUTO {
ln.Name().SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
ls.N.(*Node).Name.SetUsed(true)
ls.N.Name().SetUsed(true)
}
}
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
if n, ok := v.Aux.(*Node); ok {
if n, ok := v.Aux.(ir.Node); ok {
switch n.Class() {
case PPARAM, PPARAMOUT:
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != nodfp {
n.Name.SetUsed(true)
n.Name().SetUsed(true)
}
case PAUTO:
n.Name.SetUsed(true)
case ir.PAUTO:
n.Name().SetUsed(true)
}
}
if !scratchUsed {
@ -146,7 +147,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
}
if f.Config.NeedsFpScratch && scratchUsed {
s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
@ -154,18 +155,18 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
if n.Op != ONAME || n.Class() != PAUTO {
if n.Op() != ir.ONAME || n.Class() != ir.PAUTO {
continue
}
if !n.Name.Used() {
if !n.Name().Used() {
fn.Dcl = fn.Dcl[:i]
break
}
dowidth(n.Type)
w := n.Type.Width
dowidth(n.Type())
w := n.Type().Width
if w >= thearch.MAXWIDTH || w < 0 {
Fatalf("bad width")
base.Fatalf("bad width")
}
if w == 0 && lastHasPtr {
// Pad between a pointer-containing object and a zero-sized object.
@ -175,8 +176,8 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
w = 1
}
s.stksize += w
s.stksize = Rnd(s.stksize, int64(n.Type.Align))
if n.Type.HasPointers() {
s.stksize = Rnd(s.stksize, int64(n.Type().Align))
if n.Type().HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
@ -185,59 +186,58 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = Rnd(s.stksize, int64(Widthptr))
}
n.Xoffset = -s.stksize
n.SetOffset(-s.stksize)
}
s.stksize = Rnd(s.stksize, int64(Widthreg))
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
}
func funccompile(fn *Node) {
func funccompile(fn ir.Node) {
if Curfn != nil {
Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym())
}
if fn.Type == nil {
if nerrors == 0 {
Fatalf("funccompile missing type")
if fn.Type() == nil {
if base.Errors() == 0 {
base.Fatalf("funccompile missing type")
}
return
}
// assign parameter offsets
dowidth(fn.Type)
dowidth(fn.Type())
if fn.Nbody.Len() == 0 {
if fn.Body().Len() == 0 {
// Initialize ABI wrappers if necessary.
fn.Func.initLSym(false)
initLSym(fn.Func(), false)
emitptrargsmap(fn)
return
}
dclcontext = PAUTO
dclcontext = ir.PAUTO
Curfn = fn
compile(fn)
Curfn = nil
dclcontext = PEXTERN
dclcontext = ir.PEXTERN
}
func compile(fn *Node) {
saveerrors()
func compile(fn ir.Node) {
errorsBefore := base.Errors()
order(fn)
if nerrors != 0 {
if base.Errors() > errorsBefore {
return
}
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
fn.Func.initLSym(true)
initLSym(fn.Func(), true)
walk(fn)
if nerrors != 0 {
if base.Errors() > errorsBefore {
return
}
if instrumenting {
@ -247,7 +247,7 @@ func compile(fn *Node) {
// From this point, there should be no uses of Curfn. Enforce that.
Curfn = nil
if fn.funcname() == "_" {
if ir.FuncName(fn) == "_" {
// We don't need to generate code for this function, just report errors in its body.
// At this point we've generated any errors needed.
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
@ -259,15 +259,15 @@ func compile(fn *Node) {
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
for _, n := range fn.Func.Dcl {
for _, n := range fn.Func().Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT, PAUTO:
if livenessShouldTrack(n) && n.Name.Addrtaken() {
dtypesym(n.Type)
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if livenessShouldTrack(n) && n.Name().Addrtaken() {
dtypesym(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.Func.lsym.Func().StackObjects == nil {
fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
if fn.Func().LSym.Func().StackObjects == nil {
fn.Func().LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func().LSym.Name + ".stkobj")
}
}
}
@ -284,29 +284,29 @@ func compile(fn *Node) {
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
func compilenow(fn *Node) bool {
func compilenow(fn ir.Node) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
// compiling a function later on in the xtop list.
if fn.IsMethod() && isInlinableButNotInlined(fn) {
if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
return false
}
return nBackendWorkers == 1 && Debug_compilelater == 0
return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
}
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
func isInlinableButNotInlined(fn *Node) bool {
if fn.Func.Nname.Func.Inl == nil {
func isInlinableButNotInlined(fn ir.Node) bool {
if fn.Func().Nname.Func().Inl == nil {
return false
}
if fn.Sym == nil {
if fn.Sym() == nil {
return true
}
return !fn.Sym.Linksym().WasInlined()
return !fn.Sym().Linksym().WasInlined()
}
const maxStackSize = 1 << 30
@ -315,12 +315,12 @@ const maxStackSize = 1 << 30
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
func compileSSA(fn *Node, worker int) {
func compileSSA(fn ir.Node, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
@ -336,14 +336,14 @@ func compileSSA(fn *Node, worker int) {
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
fieldtrack(pp.Text.From.Sym, fn.Func().FieldTrack)
}
func init() {
@ -360,7 +360,7 @@ func compileFunctions() {
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*Node, len(compilequeue))
tmp := make([]ir.Node, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
@ -371,13 +371,13 @@ func compileFunctions() {
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
return compilequeue[i].Body().Len() > compilequeue[j].Body().Len()
})
}
var wg sync.WaitGroup
Ctxt.InParallel = true
c := make(chan *Node, nBackendWorkers)
for i := 0; i < nBackendWorkers; i++ {
base.Ctxt.InParallel = true
c := make(chan ir.Node, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
for fn := range c {
@ -392,46 +392,75 @@ func compileFunctions() {
close(c)
compilequeue = nil
wg.Wait()
Ctxt.InParallel = false
base.Ctxt.InParallel = false
sizeCalculationDisabled = false
}
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*Node)
if fn.Func.Nname != nil {
if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
fn := curfn.(ir.Node)
if fn.Func().Nname != nil {
if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
}
}
var apdecls []*Node
// Back when there were two different *Funcs for a function, this code
// was not consistent about whether a particular *Node being processed
// was an ODCLFUNC or ONAME node. Partly this is because inlined function
// bodies have no ODCLFUNC node, which was it's own inconsistency.
// In any event, the handling of the two different nodes for DWARF purposes
// was subtly different, likely in unintended ways. CL 272253 merged the
// two nodes' Func fields, so that code sees the same *Func whether it is
// holding the ODCLFUNC or the ONAME. This resulted in changes in the
// DWARF output. To preserve the existing DWARF output and leave an
// intentional change for a future CL, this code does the following when
// fn.Op == ONAME:
//
// 1. Disallow use of createComplexVars in createDwarfVars.
// It was not possible to reach that code for an ONAME before,
// because the DebugInfo was set only on the ODCLFUNC Func.
// Calling into it in the ONAME case causes an index out of bounds panic.
//
// 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
// not the ONAME Func. Populating apdecls for the ONAME case results
// in selected being populated after createSimpleVars is called in
// createDwarfVars, and then that causes the loop to skip all the entries
// in dcl, meaning that the RecordAutoType calls don't happen.
//
// These two adjustments keep toolstash -cmp working for now.
// Deciding the right answer is, as they say, future work.
isODCLFUNC := fn.Op() == ir.ODCLFUNC
var apdecls []ir.Node
// Populate decls for fn.
for _, n := range fn.Func.Dcl {
if n.Op != ONAME { // might be OTYPE or OLITERAL
if isODCLFUNC {
for _, n := range fn.Func().Dcl {
if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
continue
}
switch n.Class() {
case PAUTO:
if !n.Name.Used() {
case ir.PAUTO:
if !n.Name().Used() {
// Text == nil -> generating abstract function
if fnsym.Func().Text != nil {
Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
}
continue
}
case PPARAM, PPARAMOUT:
case ir.PPARAM, ir.PPARAMOUT:
default:
continue
}
apdecls = append(apdecls, n)
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
}
decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func(), apdecls)
// For each type referenced by the functions auto vars but not
// already referenced by a dwarf var, attach a dummy relocation to
// already referenced by a dwarf var, attach an R_USETYPE relocation to
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
@ -446,22 +475,22 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
}
fnsym.Func().Autot = nil
var varScopes []ScopeID
var varScopes []ir.ScopeID
for _, decl := range decls {
pos := declPos(decl)
varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
varScopes = append(varScopes, findScope(fn.Func().Marks, pos))
}
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
var inlcalls dwarf.InlCalls
if genDwarfInline > 0 {
if base.Flag.GenDwarfInl > 0 {
inlcalls = assembleInlines(fnsym, dwarfVars)
}
return scopes, inlcalls
}
func declPos(decl *Node) src.XPos {
if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
func declPos(decl ir.Node) src.XPos {
if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) {
// It's not clear which position is correct for captured variables here:
// * decl.Pos is the wrong position for captured variables, in the inner
// function, but it is the right position in the outer function.
@ -476,19 +505,19 @@ func declPos(decl *Node) src.XPos {
// case statement.
// This code is probably wrong for type switch variables that are also
// captured.
return decl.Name.Defn.Pos
return decl.Name().Defn.Pos()
}
return decl.Pos
return decl.Pos()
}
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
func createSimpleVars(fnsym *obj.LSym, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) {
var vars []*dwarf.Var
var decls []*Node
selected := make(map[*Node]bool)
var decls []ir.Node
selected := make(map[ir.Node]bool)
for _, n := range apDecls {
if n.IsAutoTmp() {
if ir.IsAutoTmp(n) {
continue
}
@ -499,14 +528,14 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var,
return decls, vars, selected
}
func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
func createSimpleVar(fnsym *obj.LSym, n ir.Node) *dwarf.Var {
var abbrev int
offs := n.Xoffset
offs := n.Offset()
switch n.Class() {
case PAUTO:
case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO
if Ctxt.FixedFrameSize() == 0 {
if base.Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
@ -514,32 +543,32 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
offs -= int64(Widthptr)
}
case PPARAM, PPARAMOUT:
case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM
offs += Ctxt.FixedFrameSize()
offs += base.Ctxt.FixedFrameSize()
default:
Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
}
typename := dwarf.InfoPrefix + typesymname(n.Type)
typename := dwarf.InfoPrefix + typesymname(n.Type())
delete(fnsym.Func().Autot, ngotype(n).Linksym())
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.Name.InlFormal() {
if base.Flag.GenDwarfInl > 1 {
if n.Name().InlFormal() || n.Name().InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
declpos := Ctxt.InnermostPos(declPos(n))
declpos := base.Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: n.Class() == PPARAMOUT,
IsInlFormal: n.Name.InlFormal(),
Name: n.Sym().Name,
IsReturnValue: n.Class() == ir.PPARAMOUT,
IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: Ctxt.Lookup(typename),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
@ -550,19 +579,19 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
debugInfo := fn.DebugInfo
func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
var decls []*Node
var decls []ir.Node
var vars []*dwarf.Var
ssaVars := make(map[*Node]bool)
ssaVars := make(map[ir.Node]bool)
for varID, dvar := range debugInfo.Vars {
n := dvar.(*Node)
n := dvar
ssaVars[n] = true
for _, slot := range debugInfo.VarSlots[varID] {
ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
ssaVars[debugInfo.Slots[slot].N] = true
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
@ -576,12 +605,12 @@ func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*N
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
var decls []*Node
var selected map[*Node]bool
if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
var decls []ir.Node
var selected map[ir.Node]bool
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(fnsym, apDecls)
@ -608,11 +637,11 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
if _, found := selected[n]; found {
continue
}
c := n.Sym.Name[0]
if c == '.' || n.Type.IsUntyped() {
c := n.Sym().Name[0]
if c == '.' || n.Type().IsUntyped() {
continue
}
if n.Class() == PPARAM && !canSSAType(n.Type) {
if n.Class() == ir.PPARAM && !canSSAType(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
@ -624,13 +653,13 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
decls = append(decls, n)
continue
}
typename := dwarf.InfoPrefix + typesymname(n.Type)
typename := dwarf.InfoPrefix + typesymname(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
isReturnValue := (n.Class() == PPARAMOUT)
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
isReturnValue := (n.Class() == ir.PPARAMOUT)
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
} else if n.Class() == PAUTOHEAP {
} else if n.Class() == ir.PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
@ -638,28 +667,28 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
// misleading location for the param (we want pointer-to-heap
// and not stack).
// TODO(thanm): generate a better location expression
stackcopy := n.Name.Param.Stackcopy
if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
stackcopy := n.Name().Param.Stackcopy
if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
isReturnValue = (stackcopy.Class() == PPARAMOUT)
isReturnValue = (stackcopy.Class() == ir.PPARAMOUT)
}
}
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.Name.InlFormal() {
if base.Flag.GenDwarfInl > 1 {
if n.Name().InlFormal() || n.Name().InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := Ctxt.InnermostPos(n.Pos)
declpos := base.Ctxt.InnermostPos(n.Pos())
vars = append(vars, &dwarf.Var{
Name: n.Sym.Name,
Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
StackOffset: int32(n.Xoffset),
Type: Ctxt.Lookup(typename),
StackOffset: int32(n.Offset()),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
@ -679,14 +708,14 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
func preInliningDcls(fnsym *obj.LSym) []*Node {
fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
var rdcl []*Node
for _, n := range fn.Func.Inl.Dcl {
c := n.Sym.Name[0]
func preInliningDcls(fnsym *obj.LSym) []ir.Node {
fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(ir.Node)
var rdcl []ir.Node
for _, n := range fn.Func().Inl.Dcl {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
@ -698,33 +727,33 @@ func preInliningDcls(fnsym *obj.LSym) []*Node {
// stack pointer, suitable for use in a DWARF location entry. This has nothing
// to do with its offset in the user variable.
func stackOffset(slot ssa.LocalSlot) int32 {
n := slot.N.(*Node)
var base int64
n := slot.N
var off int64
switch n.Class() {
case PAUTO:
if Ctxt.FixedFrameSize() == 0 {
base -= int64(Widthptr)
case ir.PAUTO:
if base.Ctxt.FixedFrameSize() == 0 {
off -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
base -= int64(Widthptr)
off -= int64(Widthptr)
}
case PPARAM, PPARAMOUT:
base += Ctxt.FixedFrameSize()
case ir.PPARAM, ir.PPARAMOUT:
off += base.Ctxt.FixedFrameSize()
}
return int32(base + n.Xoffset + slot.Off)
return int32(off + n.Offset() + slot.Off)
}
// createComplexVar builds a single DWARF variable entry and location list.
func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
debug := fn.DebugInfo
n := debug.Vars[varID].(*Node)
func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
debug := fn.DebugInfo.(*ssa.FuncDebug)
n := debug.Vars[varID]
var abbrev int
switch n.Class() {
case PAUTO:
case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
case PPARAM, PPARAMOUT:
case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
default:
return nil
@ -734,21 +763,21 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.Name.InlFormal() {
if base.Flag.GenDwarfInl > 1 {
if n.Name().InlFormal() || n.Name().InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := Ctxt.InnermostPos(n.Pos)
declpos := base.Ctxt.InnermostPos(n.Pos())
dvar := &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: n.Class() == PPARAMOUT,
IsInlFormal: n.Name.InlFormal(),
Name: n.Sym().Name,
IsReturnValue: n.Class() == ir.PPARAMOUT,
IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
Type: Ctxt.Lookup(typename),
Type: base.Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
@ -763,7 +792,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
list := debug.LocationLists[varID]
if len(list) != 0 {
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
}
}
return dvar

View file

@ -5,6 +5,7 @@
package gc
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"reflect"
"sort"
@ -12,129 +13,133 @@ import (
)
func typeWithoutPointers() *types.Type {
t := types.New(TSTRUCT)
f := &types.Field{Type: types.New(TINT)}
t := types.New(types.TSTRUCT)
f := &types.Field{Type: types.New(types.TINT)}
t.SetFields([]*types.Field{f})
return t
}
func typeWithPointers() *types.Type {
t := types.New(TSTRUCT)
f := &types.Field{Type: types.NewPtr(types.New(TINT))}
t := types.New(types.TSTRUCT)
f := &types.Field{Type: types.NewPtr(types.New(types.TINT))}
t.SetFields([]*types.Field{f})
return t
}
func markUsed(n *Node) *Node {
n.Name.SetUsed(true)
func markUsed(n ir.Node) ir.Node {
n.Name().SetUsed(true)
return n
}
func markNeedZero(n *Node) *Node {
n.Name.SetNeedzero(true)
func markNeedZero(n ir.Node) ir.Node {
n.Name().SetNeedzero(true)
return n
}
func nodeWithClass(n Node, c Class) *Node {
n.SetClass(c)
n.Name = new(Name)
return &n
}
// Test all code paths for cmpstackvarlt.
func TestCmpstackvar(t *testing.T) {
nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
if s == nil {
s = &types.Sym{Name: "."}
}
n := NewName(s)
n.SetType(t)
n.SetOffset(xoffset)
n.SetClass(cl)
return n
}
testdata := []struct {
a, b *Node
a, b ir.Node
lt bool
}{
{
nodeWithClass(Node{}, PAUTO),
nodeWithClass(Node{}, PFUNC),
nod(0, nil, nil, ir.PAUTO),
nod(0, nil, nil, ir.PFUNC),
false,
},
{
nodeWithClass(Node{}, PFUNC),
nodeWithClass(Node{}, PAUTO),
nod(0, nil, nil, ir.PFUNC),
nod(0, nil, nil, ir.PAUTO),
true,
},
{
nodeWithClass(Node{Xoffset: 0}, PFUNC),
nodeWithClass(Node{Xoffset: 10}, PFUNC),
nod(0, nil, nil, ir.PFUNC),
nod(10, nil, nil, ir.PFUNC),
true,
},
{
nodeWithClass(Node{Xoffset: 20}, PFUNC),
nodeWithClass(Node{Xoffset: 10}, PFUNC),
nod(20, nil, nil, ir.PFUNC),
nod(10, nil, nil, ir.PFUNC),
false,
},
{
nodeWithClass(Node{Xoffset: 10}, PFUNC),
nodeWithClass(Node{Xoffset: 10}, PFUNC),
nod(10, nil, nil, ir.PFUNC),
nod(10, nil, nil, ir.PFUNC),
false,
},
{
nodeWithClass(Node{Xoffset: 10}, PPARAM),
nodeWithClass(Node{Xoffset: 20}, PPARAMOUT),
nod(10, nil, nil, ir.PPARAM),
nod(20, nil, nil, ir.PPARAMOUT),
true,
},
{
nodeWithClass(Node{Xoffset: 10}, PPARAMOUT),
nodeWithClass(Node{Xoffset: 20}, PPARAM),
nod(10, nil, nil, ir.PPARAMOUT),
nod(20, nil, nil, ir.PPARAM),
true,
},
{
markUsed(nodeWithClass(Node{}, PAUTO)),
nodeWithClass(Node{}, PAUTO),
markUsed(nod(0, nil, nil, ir.PAUTO)),
nod(0, nil, nil, ir.PAUTO),
true,
},
{
nodeWithClass(Node{}, PAUTO),
markUsed(nodeWithClass(Node{}, PAUTO)),
nod(0, nil, nil, ir.PAUTO),
markUsed(nod(0, nil, nil, ir.PAUTO)),
false,
},
{
nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
nod(0, typeWithoutPointers(), nil, ir.PAUTO),
nod(0, typeWithPointers(), nil, ir.PAUTO),
false,
},
{
nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
nod(0, typeWithPointers(), nil, ir.PAUTO),
nod(0, typeWithoutPointers(), nil, ir.PAUTO),
true,
},
{
markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)),
nod(0, &types.Type{}, nil, ir.PAUTO),
true,
},
{
nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
nod(0, &types.Type{}, nil, ir.PAUTO),
markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)),
false,
},
{
nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
nod(0, &types.Type{Width: 1}, nil, ir.PAUTO),
nod(0, &types.Type{Width: 2}, nil, ir.PAUTO),
false,
},
{
nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
nod(0, &types.Type{Width: 2}, nil, ir.PAUTO),
nod(0, &types.Type{Width: 1}, nil, ir.PAUTO),
true,
},
{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
true,
},
{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
false,
},
{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
false,
},
}
@ -151,35 +156,42 @@ func TestCmpstackvar(t *testing.T) {
}
func TestStackvarSort(t *testing.T) {
inp := []*Node{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
n := NewName(s)
n.SetType(t)
n.SetOffset(xoffset)
n.SetClass(cl)
return n
}
want := []*Node{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
inp := []ir.Node{
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC),
markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
}
want := []ir.Node{
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC),
markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO),
}
sort.Sort(byStackVar(inp))
if !reflect.DeepEqual(want, inp) {

View file

@ -5,6 +5,7 @@
package gc
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/src"
@ -42,9 +43,9 @@ func (s *state) insertPhis() {
type phiState struct {
s *state // SSA state
f *ssa.Func // function to work on
defvars []map[*Node]*ssa.Value // defined variables at end of each block
defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
varnum map[*Node]int32 // variable numbering
varnum map[ir.Node]int32 // variable numbering
// properties of the dominator tree
idom []*ssa.Block // dominator parents
@ -59,7 +60,7 @@ type phiState struct {
hasDef *sparseSet // has a write of the variable we're processing
// miscellaneous
placeholder *ssa.Value // dummy value to use as a "not set yet" placeholder.
placeholder *ssa.Value // value to use as a "not set yet" placeholder.
}
func (s *phiState) insertPhis() {
@ -70,15 +71,15 @@ func (s *phiState) insertPhis() {
// Find all the variables for which we need to match up reads & writes.
// This step prunes any basic-block-only variables from consideration.
// Generate a numbering for these variables.
s.varnum = map[*Node]int32{}
var vars []*Node
s.varnum = map[ir.Node]int32{}
var vars []ir.Node
var vartypes []*types.Type
for _, b := range s.f.Blocks {
for _, v := range b.Values {
if v.Op != ssa.OpFwdRef {
continue
}
var_ := v.Aux.(*Node)
var_ := v.Aux.(ir.Node)
// Optimization: look back 1 block for the definition.
if len(b.Preds) == 1 {
@ -183,7 +184,7 @@ levels:
}
}
func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *types.Type) {
func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
priq := &s.priq
q := s.q
queued := s.queued
@ -318,7 +319,7 @@ func (s *phiState) resolveFwdRefs() {
if v.Op != ssa.OpFwdRef {
continue
}
n := s.varnum[v.Aux.(*Node)]
n := s.varnum[v.Aux.(ir.Node)]
v.Op = ssa.OpCopy
v.Aux = nil
v.AddArg(values[n])
@ -435,7 +436,7 @@ type simplePhiState struct {
s *state // SSA state
f *ssa.Func // function to work on
fwdrefs []*ssa.Value // list of FwdRefs to be processed
defvars []map[*Node]*ssa.Value // defined variables at end of each block
defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
reachable []bool // which blocks are reachable
}
@ -449,7 +450,7 @@ func (s *simplePhiState) insertPhis() {
continue
}
s.fwdrefs = append(s.fwdrefs, v)
var_ := v.Aux.(*Node)
var_ := v.Aux.(ir.Node)
if _, ok := s.defvars[b.ID][var_]; !ok {
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
}
@ -463,7 +464,7 @@ loop:
v := s.fwdrefs[len(s.fwdrefs)-1]
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
b := v.Block
var_ := v.Aux.(*Node)
var_ := v.Aux.(ir.Node)
if b == s.f.Entry {
// No variable should be live at entry.
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
@ -511,7 +512,7 @@ loop:
}
// lookupVarOutgoing finds the variable's value at the end of block b.
func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *Node, line src.XPos) *ssa.Value {
func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
for {
if v := s.defvars[b.ID][var_]; v != nil {
return v

View file

@ -15,6 +15,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@ -99,10 +101,10 @@ type BlockEffects struct {
// A collection of global state used by liveness analysis.
type Liveness struct {
fn *Node
fn ir.Node
f *ssa.Func
vars []*Node
idx map[*Node]int32
vars []ir.Node
idx map[ir.Node]int32
stkptrsize int64
be []BlockEffects
@ -204,20 +206,20 @@ type progeffectscache struct {
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n *Node) bool {
return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
func livenessShouldTrack(n ir.Node) bool {
return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
// and a map for looking up indices by *Node.
func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
var vars []*Node
for _, n := range fn.Func.Dcl {
func getvariables(fn ir.Node) ([]ir.Node, map[ir.Node]int32) {
var vars []ir.Node
for _, n := range fn.Func().Dcl {
if livenessShouldTrack(n) {
vars = append(vars, n)
}
}
idx := make(map[*Node]int32, len(vars))
idx := make(map[ir.Node]int32, len(vars))
for i, n := range vars {
idx[n] = int32(i)
}
@ -226,14 +228,14 @@ func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
func (lv *Liveness) initcache() {
if lv.cache.initialized {
Fatalf("liveness cache initialized twice")
base.Fatalf("liveness cache initialized twice")
return
}
lv.cache.initialized = true
for i, node := range lv.vars {
switch node.Class() {
case PPARAM:
case ir.PPARAM:
// A return instruction with a p.to is a tail return, which brings
// the stack pointer back up (if it ever went down) and then jumps
// to a new function entirely. That form of instruction must read
@ -242,7 +244,7 @@ func (lv *Liveness) initcache() {
// function runs.
lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
case PPARAMOUT:
case ir.PPARAMOUT:
// All results are live at every return point.
// Note that this point is after escaping return values
// are copied back to the stack using their PAUTOHEAP references.
@ -270,7 +272,7 @@ const (
// If v does not affect any tracked variables, it returns -1, 0.
func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
n, e := affectedNode(v)
if e == 0 || n == nil || n.Op != ONAME { // cheapest checks first
if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first
return -1, 0
}
@ -280,7 +282,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
// variable" ICEs (issue 19632).
switch v.Op {
case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
if !n.Name.Used() {
if !n.Name().Used() {
return -1, 0
}
}
@ -295,7 +297,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
if e&(ssa.SymRead|ssa.SymAddr) != 0 {
effect |= uevar
}
if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) {
effect |= varkill
}
@ -310,7 +312,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
}
// affectedNode returns the *Node affected by v
func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) {
// Special cases.
switch v.Op {
case ssa.OpLoadReg:
@ -321,9 +323,9 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
return n, ssa.SymWrite
case ssa.OpVarLive:
return v.Aux.(*Node), ssa.SymRead
return v.Aux.(ir.Node), ssa.SymRead
case ssa.OpVarDef, ssa.OpVarKill:
return v.Aux.(*Node), ssa.SymWrite
return v.Aux.(ir.Node), ssa.SymWrite
case ssa.OpKeepAlive:
n, _ := AutoVar(v.Args[0])
return n, ssa.SymRead
@ -338,10 +340,10 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
case nil, *obj.LSym:
// ok, but no node
return nil, e
case *Node:
case ir.Node:
return a, e
default:
Fatalf("weird aux: %s", v.LongString())
base.Fatalf("weird aux: %s", v.LongString())
return nil, e
}
}
@ -354,7 +356,7 @@ type livenessFuncCache struct {
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkptrsize int64) *Liveness {
func newliveness(fn ir.Node, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness {
lv := &Liveness{
fn: fn,
f: f,
@ -406,7 +408,7 @@ func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
// on future calls with the same type t.
func onebitwalktype1(t *types.Type, off int64, bv bvec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
if !t.HasPointers() {
// Note: this case ensures that pointers to go:notinheap types
@ -415,25 +417,25 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
}
switch t.Etype {
case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer
case TSTRING:
case types.TSTRING:
// struct { byte *str; intgo len; }
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
case TINTER:
case types.TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
// The first word of an interface is a pointer, but we don't
// treat it as such.
@ -449,14 +451,14 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
// well as scan itabs to update their itab._type fields).
bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
case TSLICE:
case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
case TARRAY:
case types.TARRAY:
elt := t.Elem()
if elt.Width == 0 {
// Short-circuit for #20739.
@ -467,20 +469,20 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
off += elt.Width
}
case TSTRUCT:
case types.TSTRUCT:
for _, f := range t.Fields().Slice() {
onebitwalktype1(f.Type, off+f.Offset, bv)
}
default:
Fatalf("onebitwalktype1: unexpected type, %v", t)
base.Fatalf("onebitwalktype1: unexpected type, %v", t)
}
}
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
func (lv *Liveness) pointerMap(liveout bvec, vars []ir.Node, args, locals bvec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
@ -488,11 +490,11 @@ func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
}
node := vars[i]
switch node.Class() {
case PAUTO:
onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
case ir.PAUTO:
onebitwalktype1(node.Type(), node.Offset()+lv.stkptrsize, locals)
case PPARAM, PPARAMOUT:
onebitwalktype1(node.Type, node.Xoffset, args)
case ir.PPARAM, ir.PPARAMOUT:
onebitwalktype1(node.Type(), node.Offset(), args)
}
}
}
@ -509,7 +511,7 @@ func allUnsafe(f *ssa.Func) bool {
// go:nosplit functions are similar. Since safe points used to
// be coupled with stack checks, go:nosplit often actually
// means "no safe points in this function".
return compiling_runtime || f.NoSplit
return base.Flag.CompilingRuntime || f.NoSplit
}
// markUnsafePoints finds unsafe points and computes lv.unsafePoints.
@ -786,14 +788,14 @@ func (lv *Liveness) epilogue() {
// pointers to copy values back to the stack).
// TODO: if the output parameter is heap-allocated, then we
// don't need to keep the stack copy live?
if lv.fn.Func.HasDefer() {
if lv.fn.Func().HasDefer() {
for i, n := range lv.vars {
if n.Class() == PPARAMOUT {
if n.Name.IsOutputParamHeapAddr() {
if n.Class() == ir.PPARAMOUT {
if n.Name().IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
Fatalf("variable %v both output param and heap output param", n)
base.Fatalf("variable %v both output param and heap output param", n)
}
if n.Name.Param.Heapaddr != nil {
if n.Name().Param.Heapaddr != nil {
// If this variable moved to the heap, then
// its stack copy is not live.
continue
@ -801,22 +803,22 @@ func (lv *Liveness) epilogue() {
// Note: zeroing is handled by zeroResults in walk.go.
livedefer.Set(int32(i))
}
if n.Name.IsOutputParamHeapAddr() {
if n.Name().IsOutputParamHeapAddr() {
// This variable will be overwritten early in the function
// prologue (from the result of a mallocgc) but we need to
// zero it in case that malloc causes a stack scan.
n.Name.SetNeedzero(true)
n.Name().SetNeedzero(true)
livedefer.Set(int32(i))
}
if n.Name.OpenDeferSlot() {
if n.Name().OpenDeferSlot() {
// Open-coded defer args slots must be live
// everywhere in a function, since a panic can
// occur (almost) anywhere. Because it is live
// everywhere, it must be zeroed on entry.
livedefer.Set(int32(i))
// It was already marked as Needzero when created.
if !n.Name.Needzero() {
Fatalf("all pointer-containing defer arg slots should have Needzero set")
if !n.Name().Needzero() {
base.Fatalf("all pointer-containing defer arg slots should have Needzero set")
}
}
}
@ -878,7 +880,7 @@ func (lv *Liveness) epilogue() {
if b == lv.f.Entry {
if index != 0 {
Fatalf("bad index for entry point: %v", index)
base.Fatalf("bad index for entry point: %v", index)
}
// Check to make sure only input variables are live.
@ -886,10 +888,10 @@ func (lv *Liveness) epilogue() {
if !liveout.Get(int32(i)) {
continue
}
if n.Class() == PPARAM {
if n.Class() == ir.PPARAM {
continue // ok
}
Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n)
base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func().Nname, n)
}
// Record live variables.
@ -902,7 +904,7 @@ func (lv *Liveness) epilogue() {
}
// If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.Func.OpenCodedDeferDisallowed() {
if lv.fn.Func().OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessDontCare
} else {
lv.livenessMap.deferreturn = LivenessIndex{
@ -919,8 +921,8 @@ func (lv *Liveness) epilogue() {
// the only things that can possibly be live are the
// input parameters.
for j, n := range lv.vars {
if n.Class() != PPARAM && lv.stackMaps[0].Get(int32(j)) {
lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n)
if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func().Nname, n)
}
}
}
@ -966,7 +968,7 @@ func (lv *Liveness) compact(b *ssa.Block) {
}
func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
if debuglive == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
if !(v == nil || v.Op.IsCall()) {
@ -978,14 +980,14 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
return
}
pos := lv.fn.Func.Nname.Pos
pos := lv.fn.Func().Nname.Pos()
if v != nil {
pos = v.Pos
}
s := "live at "
if v == nil {
s += fmt.Sprintf("entry to %s:", lv.fn.funcname())
s += fmt.Sprintf("entry to %s:", ir.FuncName(lv.fn))
} else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
fn := sym.Fn.Name
if pos := strings.Index(fn, "."); pos >= 0 {
@ -1002,7 +1004,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
}
}
Warnl(pos, s)
base.WarnfAt(pos, s)
}
func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
@ -1022,7 +1024,7 @@ func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
if !live.Get(int32(i)) {
continue
}
fmt.Printf("%s%s", comma, n.Sym.Name)
fmt.Printf("%s%s", comma, n.Sym().Name)
comma = ","
}
return true
@ -1040,7 +1042,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo
}
fmt.Printf("%s=", name)
if x {
fmt.Printf("%s", lv.vars[pos].Sym.Name)
fmt.Printf("%s", lv.vars[pos].Sym().Name)
}
return true
@ -1050,7 +1052,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo
// This format synthesizes the information used during the multiple passes
// into a single presentation.
func (lv *Liveness) printDebug() {
fmt.Printf("liveness: %s\n", lv.fn.funcname())
fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn))
for i, b := range lv.f.Blocks {
if i > 0 {
@ -1088,7 +1090,7 @@ func (lv *Liveness) printDebug() {
if b == lv.f.Entry {
live := lv.stackMaps[0]
fmt.Printf("(%s) function entry\n", linestr(lv.fn.Func.Nname.Pos))
fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func().Nname.Pos()))
fmt.Printf("\tlive=")
printed = false
for j, n := range lv.vars {
@ -1105,7 +1107,7 @@ func (lv *Liveness) printDebug() {
}
for _, v := range b.Values {
fmt.Printf("(%s) %v\n", linestr(v.Pos), v.LongString())
fmt.Printf("(%s) %v\n", base.FmtPos(v.Pos), v.LongString())
pcdata := lv.livenessMap.Get(v)
@ -1162,11 +1164,11 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
var maxArgNode *Node
var maxArgNode ir.Node
for _, n := range lv.vars {
switch n.Class() {
case PPARAM, PPARAMOUT:
if maxArgNode == nil || n.Xoffset > maxArgNode.Xoffset {
case ir.PPARAM, ir.PPARAMOUT:
if maxArgNode == nil || n.Offset() > maxArgNode.Offset() {
maxArgNode = n
}
}
@ -1174,7 +1176,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Next, find the offset of the largest pointer in the largest node.
var maxArgs int64
if maxArgNode != nil {
maxArgs = maxArgNode.Xoffset + typeptrdata(maxArgNode.Type)
maxArgs = maxArgNode.Offset() + typeptrdata(maxArgNode.Type())
}
// Size locals bitmaps to be stkptrsize sized.
@ -1214,7 +1216,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// These symbols will be added to Ctxt.Data by addGCLocals
// after parallel compilation is done.
makeSym := func(tmpSym *obj.LSym) *obj.LSym {
return Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
return base.Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
lsym.P = tmpSym.P
lsym.Set(obj.AttrContentAddressable, true)
})
@ -1235,7 +1237,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
lv.prologue()
lv.solve()
lv.epilogue()
if debuglive > 0 {
if base.Flag.Live > 0 {
lv.showlive(nil, lv.stackMaps[0])
for _, b := range f.Blocks {
for _, val := range b.Values {
@ -1245,7 +1247,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
}
}
}
if debuglive >= 2 {
if base.Flag.Live >= 2 {
lv.printDebug()
}
@ -1264,7 +1266,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
}
// Emit the live pointer map data structures
ls := e.curfn.Func.lsym
ls := e.curfn.Func().LSym
fninfo := ls.Func()
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
@ -1299,16 +1301,16 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
func isfat(t *types.Type) bool {
if t != nil {
switch t.Etype {
case TSLICE, TSTRING,
TINTER: // maybe remove later
case types.TSLICE, types.TSTRING,
types.TINTER: // maybe remove later
return true
case TARRAY:
case types.TARRAY:
// Array of 1 element, check if element is fat
if t.NumElem() == 1 {
return isfat(t.Elem())
}
return true
case TSTRUCT:
case types.TSTRUCT:
// Struct with 1 field, check if field is fat
if t.NumFields() == 1 {
return isfat(t.Field(0).Type)

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"cmd/internal/sys"
@ -47,9 +49,9 @@ var omit_pkgs = []string{
var norace_inst_pkgs = []string{"sync", "sync/atomic"}
func ispkgin(pkgs []string) bool {
if myimportpath != "" {
if base.Ctxt.Pkgpath != "" {
for _, p := range pkgs {
if myimportpath == p {
if base.Ctxt.Pkgpath == p {
return true
}
}
@ -58,22 +60,22 @@ func ispkgin(pkgs []string) bool {
return false
}
func instrument(fn *Node) {
if fn.Func.Pragma&Norace != 0 {
func instrument(fn ir.Node) {
if fn.Func().Pragma&ir.Norace != 0 {
return
}
if !flag_race || !ispkgin(norace_inst_pkgs) {
fn.Func.SetInstrumentBody(true)
if !base.Flag.Race || !ispkgin(norace_inst_pkgs) {
fn.Func().SetInstrumentBody(true)
}
if flag_race {
lno := lineno
lineno = src.NoXPos
if base.Flag.Race {
lno := base.Pos
base.Pos = src.NoXPos
if thearch.LinkArch.Arch.Family != sys.AMD64 {
fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
fn.Func().Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil))
} else {
// nodpc is the PC of the caller as extracted by
@ -81,13 +83,13 @@ func instrument(fn *Node) {
// This only works for amd64. This will not
// work on arm or others that might support
// race in the future.
nodpc := nodfp.copy()
nodpc.Type = types.Types[TUINTPTR]
nodpc.Xoffset = int64(-Widthptr)
fn.Func.Dcl = append(fn.Func.Dcl, nodpc)
fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
nodpc := ir.Copy(nodfp)
nodpc.SetType(types.Types[types.TUINTPTR])
nodpc.SetOffset(int64(-Widthptr))
fn.Func().Dcl = append(fn.Func().Dcl, nodpc)
fn.Func().Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil))
}
lineno = lno
base.Pos = lno
}
}

View file

@ -5,13 +5,15 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/sys"
"unicode/utf8"
)
// range
func typecheckrange(n *Node) {
func typecheckrange(n ir.Node) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
@ -25,7 +27,7 @@ func typecheckrange(n *Node) {
// second half of dance, the first half being typecheckrangeExpr
n.SetTypecheck(1)
ls := n.List.Slice()
ls := n.List().Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
@ -33,21 +35,21 @@ func typecheckrange(n *Node) {
}
decldepth++
typecheckslice(n.Nbody.Slice(), ctxStmt)
typecheckslice(n.Body().Slice(), ctxStmt)
decldepth--
}
func typecheckrangeExpr(n *Node) {
n.Right = typecheck(n.Right, ctxExpr)
func typecheckrangeExpr(n ir.Node) {
n.SetRight(typecheck(n.Right(), ctxExpr))
t := n.Right.Type
t := n.Right().Type()
if t == nil {
return
}
// delicate little dance. see typecheckas2
ls := n.List.Slice()
ls := n.List().Slice()
for i1, n1 := range ls {
if n1.Name == nil || n1.Name.Defn != n {
if n1.Name() == nil || n1.Name().Defn != n {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
@ -55,80 +57,80 @@ func typecheckrangeExpr(n *Node) {
if t.IsPtr() && t.Elem().IsArray() {
t = t.Elem()
}
n.Type = t
n.SetType(t)
var t1, t2 *types.Type
toomany := false
switch t.Etype {
default:
yyerrorl(n.Pos, "cannot range over %L", n.Right)
base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right())
return
case TARRAY, TSLICE:
t1 = types.Types[TINT]
case types.TARRAY, types.TSLICE:
t1 = types.Types[types.TINT]
t2 = t.Elem()
case TMAP:
case types.TMAP:
t1 = t.Key()
t2 = t.Elem()
case TCHAN:
case types.TCHAN:
if !t.ChanDir().CanRecv() {
yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.Right(), n.Right().Type())
return
}
t1 = t.Elem()
t2 = nil
if n.List.Len() == 2 {
if n.List().Len() == 2 {
toomany = true
}
case TSTRING:
t1 = types.Types[TINT]
case types.TSTRING:
t1 = types.Types[types.TINT]
t2 = types.Runetype
}
if n.List.Len() > 2 || toomany {
yyerrorl(n.Pos, "too many variables in range")
if n.List().Len() > 2 || toomany {
base.ErrorfAt(n.Pos(), "too many variables in range")
}
var v1, v2 *Node
if n.List.Len() != 0 {
v1 = n.List.First()
var v1, v2 ir.Node
if n.List().Len() != 0 {
v1 = n.List().First()
}
if n.List.Len() > 1 {
v2 = n.List.Second()
if n.List().Len() > 1 {
v2 = n.List().Second()
}
// this is not only an optimization but also a requirement in the spec.
// "if the second iteration variable is the blank identifier, the range
// clause is equivalent to the same clause with only the first variable
// present."
if v2.isBlank() {
if ir.IsBlank(v2) {
if v1 != nil {
n.List.Set1(v1)
n.PtrList().Set1(v1)
}
v2 = nil
}
if v1 != nil {
if v1.Name != nil && v1.Name.Defn == n {
v1.Type = t1
} else if v1.Type != nil {
if op, why := assignop(t1, v1.Type); op == OXXX {
yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
if v1.Name() != nil && v1.Name().Defn == n {
v1.SetType(t1)
} else if v1.Type() != nil {
if op, why := assignop(t1, v1.Type()); op == ir.OXXX {
base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why)
}
}
checkassign(n, v1)
}
if v2 != nil {
if v2.Name != nil && v2.Name.Defn == n {
v2.Type = t2
} else if v2.Type != nil {
if op, why := assignop(t2, v2.Type); op == OXXX {
yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
if v2.Name() != nil && v2.Name().Defn == n {
v2.SetType(t2)
} else if v2.Type() != nil {
if op, why := assignop(t2, v2.Type()); op == ir.OXXX {
base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why)
}
}
checkassign(n, v2)
@ -155,12 +157,12 @@ func cheapComputableIndex(width int64) bool {
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
func walkrange(n *Node) *Node {
func walkrange(n ir.Node) ir.Node {
if isMapClear(n) {
m := n.Right
m := n.Right()
lno := setlineno(m)
n = mapClear(m)
lineno = lno
base.Pos = lno
return n
}
@ -171,65 +173,65 @@ func walkrange(n *Node) *Node {
// hb: hidden bool
// a, v1, v2: not hidden aggregate, val 1, 2
t := n.Type
t := n.Type()
a := n.Right
a := n.Right()
lno := setlineno(a)
n.Right = nil
n.SetRight(nil)
var v1, v2 *Node
l := n.List.Len()
var v1, v2 ir.Node
l := n.List().Len()
if l > 0 {
v1 = n.List.First()
v1 = n.List().First()
}
if l > 1 {
v2 = n.List.Second()
v2 = n.List().Second()
}
if v2.isBlank() {
if ir.IsBlank(v2) {
v2 = nil
}
if v1.isBlank() && v2 == nil {
if ir.IsBlank(v1) && v2 == nil {
v1 = nil
}
if v1 == nil && v2 != nil {
Fatalf("walkrange: v2 != nil while v1 == nil")
base.Fatalf("walkrange: v2 != nil while v1 == nil")
}
// n.List has no meaning anymore, clear it
// to avoid erroneous processing by racewalk.
n.List.Set(nil)
n.PtrList().Set(nil)
var ifGuard *Node
var ifGuard ir.Node
translatedLoopOp := OFOR
translatedLoopOp := ir.OFOR
var body []*Node
var init []*Node
var body []ir.Node
var init []ir.Node
switch t.Etype {
default:
Fatalf("walkrange")
base.Fatalf("walkrange")
case TARRAY, TSLICE:
case types.TARRAY, types.TSLICE:
if arrayClear(n, v1, v2, a) {
lineno = lno
base.Pos = lno
return n
}
// order.stmt arranged for a copy of the array/slice variable if needed.
ha := a
hv1 := temp(types.Types[TINT])
hn := temp(types.Types[TINT])
hv1 := temp(types.Types[types.TINT])
hn := temp(types.Types[types.TINT])
init = append(init, nod(OAS, hv1, nil))
init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
init = append(init, ir.Nod(ir.OAS, hv1, nil))
init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil)))
n.Left = nod(OLT, hv1, hn)
n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
n.SetLeft(ir.Nod(ir.OLT, hv1, hn))
n.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// for range ha { body }
if v1 == nil {
@ -238,21 +240,21 @@ func walkrange(n *Node) *Node {
// for v1 := range ha { body }
if v2 == nil {
body = []*Node{nod(OAS, v1, hv1)}
body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
break
}
// for v1, v2 := range ha { body }
if cheapComputableIndex(n.Type.Elem().Width) {
if cheapComputableIndex(n.Type().Elem().Width) {
// v1, v2 = hv1, ha[hv1]
tmp := nod(OINDEX, ha, hv1)
tmp := ir.Nod(ir.OINDEX, ha, hv1)
tmp.SetBounded(true)
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(hv1, tmp)
body = []*Node{a}
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(hv1, tmp)
body = []ir.Node{a}
break
}
@ -268,20 +270,20 @@ func walkrange(n *Node) *Node {
// TODO(austin): OFORUNTIL inhibits bounds-check
// elimination on the index variable (see #20711).
// Enhance the prove pass to understand this.
ifGuard = nod(OIF, nil, nil)
ifGuard.Left = nod(OLT, hv1, hn)
translatedLoopOp = OFORUNTIL
ifGuard = ir.Nod(ir.OIF, nil, nil)
ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn))
translatedLoopOp = ir.OFORUNTIL
hp := temp(types.NewPtr(n.Type.Elem()))
tmp := nod(OINDEX, ha, nodintconst(0))
hp := temp(types.NewPtr(n.Type().Elem()))
tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0))
tmp.SetBounded(true)
init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
init = append(init, ir.Nod(ir.OAS, hp, ir.Nod(ir.OADDR, tmp, nil)))
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(hv1, nod(ODEREF, hp, nil))
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(hv1, ir.Nod(ir.ODEREF, hp, nil))
body = append(body, a)
// Advance pointer as part of the late increment.
@ -289,76 +291,76 @@ func walkrange(n *Node) *Node {
// This runs *after* the condition check, so we know
// advancing the pointer is safe and won't go past the
// end of the allocation.
a = nod(OAS, hp, addptr(hp, t.Elem().Width))
a = ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width))
a = typecheck(a, ctxStmt)
n.List.Set1(a)
n.PtrList().Set1(a)
case TMAP:
case types.TMAP:
// order.stmt allocated the iterator for us.
// we only use a once, so no copy needed.
ha := a
hit := prealloc[n]
th := hit.Type
n.Left = nil
th := hit.Type()
n.SetLeft(nil)
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
elemsym := th.Field(1).Sym // ditto
fn := syslook("mapiterinit")
fn = substArgTypes(fn, t.Key(), t.Elem(), th)
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil)))
n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil())
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, ir.Nod(ir.OADDR, hit, nil)))
n.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()))
fn = syslook("mapiternext")
fn = substArgTypes(fn, th)
n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil))
n.SetRight(mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil)))
key := nodSym(ODOT, hit, keysym)
key = nod(ODEREF, key, nil)
key := nodSym(ir.ODOT, hit, keysym)
key = ir.Nod(ir.ODEREF, key, nil)
if v1 == nil {
body = nil
} else if v2 == nil {
body = []*Node{nod(OAS, v1, key)}
body = []ir.Node{ir.Nod(ir.OAS, v1, key)}
} else {
elem := nodSym(ODOT, hit, elemsym)
elem = nod(ODEREF, elem, nil)
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(key, elem)
body = []*Node{a}
elem := nodSym(ir.ODOT, hit, elemsym)
elem = ir.Nod(ir.ODEREF, elem, nil)
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(key, elem)
body = []ir.Node{a}
}
case TCHAN:
case types.TCHAN:
// order.stmt arranged for a copy of the channel variable.
ha := a
n.Left = nil
n.SetLeft(nil)
hv1 := temp(t.Elem())
hv1.SetTypecheck(1)
if t.Elem().HasPointers() {
init = append(init, nod(OAS, hv1, nil))
init = append(init, ir.Nod(ir.OAS, hv1, nil))
}
hb := temp(types.Types[TBOOL])
hb := temp(types.Types[types.TBOOL])
n.Left = nod(ONE, hb, nodbool(false))
a := nod(OAS2RECV, nil, nil)
n.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false)))
a := ir.Nod(ir.OAS2RECV, nil, nil)
a.SetTypecheck(1)
a.List.Set2(hv1, hb)
a.Right = nod(ORECV, ha, nil)
n.Left.Ninit.Set1(a)
a.PtrList().Set2(hv1, hb)
a.SetRight(ir.Nod(ir.ORECV, ha, nil))
n.Left().PtrInit().Set1(a)
if v1 == nil {
body = nil
} else {
body = []*Node{nod(OAS, v1, hv1)}
body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
}
// Zero hv1. This prevents hv1 from being the sole, inaccessible
// reference to an otherwise GC-able value during the next channel receive.
// See issue 15281.
body = append(body, nod(OAS, hv1, nil))
body = append(body, ir.Nod(ir.OAS, hv1, nil))
case TSTRING:
case types.TSTRING:
// Transform string range statements like "for v1, v2 = range a" into
//
// ha := a
@ -377,84 +379,84 @@ func walkrange(n *Node) *Node {
// order.stmt arranged for a copy of the string variable.
ha := a
hv1 := temp(types.Types[TINT])
hv1t := temp(types.Types[TINT])
hv1 := temp(types.Types[types.TINT])
hv1t := temp(types.Types[types.TINT])
hv2 := temp(types.Runetype)
// hv1 := 0
init = append(init, nod(OAS, hv1, nil))
init = append(init, ir.Nod(ir.OAS, hv1, nil))
// hv1 < len(ha)
n.Left = nod(OLT, hv1, nod(OLEN, ha, nil))
n.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil)))
if v1 != nil {
// hv1t = hv1
body = append(body, nod(OAS, hv1t, hv1))
body = append(body, ir.Nod(ir.OAS, hv1t, hv1))
}
// hv2 := rune(ha[hv1])
nind := nod(OINDEX, ha, hv1)
nind := ir.Nod(ir.OINDEX, ha, hv1)
nind.SetBounded(true)
body = append(body, nod(OAS, hv2, conv(nind, types.Runetype)))
body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.Runetype)))
// if hv2 < utf8.RuneSelf
nif := nod(OIF, nil, nil)
nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf))
nif := ir.Nod(ir.OIF, nil, nil)
nif.SetLeft(ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf)))
// hv1++
nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))))
nif.PtrBody().Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// } else {
eif := nod(OAS2, nil, nil)
nif.Rlist.Set1(eif)
eif := ir.Nod(ir.OAS2, nil, nil)
nif.PtrRlist().Set1(eif)
// hv2, hv1 = decoderune(ha, hv1)
eif.List.Set2(hv2, hv1)
eif.PtrList().Set2(hv2, hv1)
fn := syslook("decoderune")
eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
eif.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1))
body = append(body, nif)
if v1 != nil {
if v2 != nil {
// v1, v2 = hv1t, hv2
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(hv1t, hv2)
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(hv1t, hv2)
body = append(body, a)
} else {
// v1 = hv1t
body = append(body, nod(OAS, v1, hv1t))
body = append(body, ir.Nod(ir.OAS, v1, hv1t))
}
}
}
n.Op = translatedLoopOp
n.SetOp(translatedLoopOp)
typecheckslice(init, ctxStmt)
if ifGuard != nil {
ifGuard.Ninit.Append(init...)
ifGuard.PtrInit().Append(init...)
ifGuard = typecheck(ifGuard, ctxStmt)
} else {
n.Ninit.Append(init...)
n.PtrInit().Append(init...)
}
typecheckslice(n.Left.Ninit.Slice(), ctxStmt)
typecheckslice(n.Left().Init().Slice(), ctxStmt)
n.Left = typecheck(n.Left, ctxExpr)
n.Left = defaultlit(n.Left, nil)
n.Right = typecheck(n.Right, ctxStmt)
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(defaultlit(n.Left(), nil))
n.SetRight(typecheck(n.Right(), ctxStmt))
typecheckslice(body, ctxStmt)
n.Nbody.Prepend(body...)
n.PtrBody().Prepend(body...)
if ifGuard != nil {
ifGuard.Nbody.Set1(n)
ifGuard.PtrBody().Set1(n)
n = ifGuard
}
n = walkstmt(n)
lineno = lno
base.Pos = lno
return n
}
@ -465,41 +467,41 @@ func walkrange(n *Node) *Node {
// }
//
// where == for keys of map m is reflexive.
func isMapClear(n *Node) bool {
if Debug.N != 0 || instrumenting {
func isMapClear(n ir.Node) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 {
if n.Op() != ir.ORANGE || n.Type().Etype != types.TMAP || n.List().Len() != 1 {
return false
}
k := n.List.First()
if k == nil || k.isBlank() {
k := n.List().First()
if k == nil || ir.IsBlank(k) {
return false
}
// Require k to be a new variable name.
if k.Name == nil || k.Name.Defn != n {
if k.Name() == nil || k.Name().Defn != n {
return false
}
if n.Nbody.Len() != 1 {
if n.Body().Len() != 1 {
return false
}
stmt := n.Nbody.First() // only stmt in body
if stmt == nil || stmt.Op != ODELETE {
stmt := n.Body().First() // only stmt in body
if stmt == nil || stmt.Op() != ir.ODELETE {
return false
}
m := n.Right
if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
m := n.Right()
if !samesafeexpr(stmt.List().First(), m) || !samesafeexpr(stmt.List().Second(), k) {
return false
}
// Keys where equality is not reflexive can not be deleted from maps.
if !isreflexive(m.Type.Key()) {
if !isreflexive(m.Type().Key()) {
return false
}
@ -507,8 +509,8 @@ func isMapClear(n *Node) bool {
}
// mapClear constructs a call to runtime.mapclear for the map m.
func mapClear(m *Node) *Node {
t := m.Type
func mapClear(m ir.Node) ir.Node {
t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
fn := syslook("mapclear")
@ -532,8 +534,8 @@ func mapClear(m *Node) *Node {
// in which the evaluation of a is side-effect-free.
//
// Parameters are as in walkrange: "for v1, v2 = range a".
func arrayClear(n, v1, v2, a *Node) bool {
if Debug.N != 0 || instrumenting {
func arrayClear(n, v1, v2, a ir.Node) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
@ -541,21 +543,21 @@ func arrayClear(n, v1, v2, a *Node) bool {
return false
}
if n.Nbody.Len() != 1 || n.Nbody.First() == nil {
if n.Body().Len() != 1 || n.Body().First() == nil {
return false
}
stmt := n.Nbody.First() // only stmt in body
if stmt.Op != OAS || stmt.Left.Op != OINDEX {
stmt := n.Body().First() // only stmt in body
if stmt.Op() != ir.OAS || stmt.Left().Op() != ir.OINDEX {
return false
}
if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
if !samesafeexpr(stmt.Left().Left(), a) || !samesafeexpr(stmt.Left().Right(), v1) {
return false
}
elemsize := n.Type.Elem().Width
if elemsize <= 0 || !isZero(stmt.Right) {
elemsize := n.Type().Elem().Width
if elemsize <= 0 || !isZero(stmt.Right()) {
return false
}
@ -566,63 +568,63 @@ func arrayClear(n, v1, v2, a *Node) bool {
// memclr{NoHeap,Has}Pointers(hp, hn)
// i = len(a) - 1
// }
n.Op = OIF
n.SetOp(ir.OIF)
n.Nbody.Set(nil)
n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
n.PtrBody().Set(nil)
n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0)))
// hp = &a[0]
hp := temp(types.Types[TUNSAFEPTR])
hp := temp(types.Types[types.TUNSAFEPTR])
tmp := nod(OINDEX, a, nodintconst(0))
tmp := ir.Nod(ir.OINDEX, a, nodintconst(0))
tmp.SetBounded(true)
tmp = nod(OADDR, tmp, nil)
tmp = convnop(tmp, types.Types[TUNSAFEPTR])
n.Nbody.Append(nod(OAS, hp, tmp))
tmp = ir.Nod(ir.OADDR, tmp, nil)
tmp = convnop(tmp, types.Types[types.TUNSAFEPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hp, tmp))
// hn = len(a) * sizeof(elem(a))
hn := temp(types.Types[TUINTPTR])
hn := temp(types.Types[types.TUINTPTR])
tmp = nod(OLEN, a, nil)
tmp = nod(OMUL, tmp, nodintconst(elemsize))
tmp = conv(tmp, types.Types[TUINTPTR])
n.Nbody.Append(nod(OAS, hn, tmp))
tmp = ir.Nod(ir.OLEN, a, nil)
tmp = ir.Nod(ir.OMUL, tmp, nodintconst(elemsize))
tmp = conv(tmp, types.Types[types.TUINTPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp))
var fn *Node
if a.Type.Elem().HasPointers() {
var fn ir.Node
if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
Curfn.Func.setWBPos(stmt.Pos)
Curfn.Func().SetWBPos(stmt.Pos())
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
} else {
// memclrNoHeapPointers(hp, hn)
fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
}
n.Nbody.Append(fn)
n.PtrBody().Append(fn)
// i = len(a) - 1
v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1)))
v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1)))
n.Nbody.Append(v1)
n.PtrBody().Append(v1)
n.Left = typecheck(n.Left, ctxExpr)
n.Left = defaultlit(n.Left, nil)
typecheckslice(n.Nbody.Slice(), ctxStmt)
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(defaultlit(n.Left(), nil))
typecheckslice(n.Body().Slice(), ctxStmt)
n = walkstmt(n)
return true
}
// addptr returns (*T)(uintptr(p) + n).
func addptr(p *Node, n int64) *Node {
t := p.Type
func addptr(p ir.Node, n int64) ir.Node {
t := p.Type()
p = nod(OCONVNOP, p, nil)
p.Type = types.Types[TUINTPTR]
p = ir.Nod(ir.OCONVNOP, p, nil)
p.SetType(types.Types[types.TUINTPTR])
p = nod(OADD, p, nodintconst(n))
p = ir.Nod(ir.OADD, p, nodintconst(n))
p = nod(OCONVNOP, p, nil)
p.Type = t
p = ir.Nod(ir.OCONVNOP, p, nil)
p.SetType(t)
return p
}

File diff suppressed because it is too large Load diff

View file

@ -4,6 +4,8 @@
package gc
import "cmd/compile/internal/ir"
// Strongly connected components.
//
// Run analysis on minimal sets of mutually recursive functions
@ -30,10 +32,10 @@ package gc
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
analyze func([]*Node, bool)
analyze func([]ir.Node, bool)
visitgen uint32
nodeID map[*Node]uint32
stack []*Node
nodeID map[ir.Node]uint32
stack []ir.Node
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
@ -49,18 +51,18 @@ type bottomUpVisitor struct {
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) {
func visitBottomUp(list []ir.Node, analyze func(list []ir.Node, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
v.nodeID = make(map[*Node]uint32)
v.nodeID = make(map[ir.Node]uint32)
for _, n := range list {
if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() {
if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() {
v.visit(n)
}
}
}
func (v *bottomUpVisitor) visit(n *Node) uint32 {
func (v *bottomUpVisitor) visit(n ir.Node) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
@ -73,42 +75,46 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 {
min := v.visitgen
v.stack = append(v.stack, n)
inspectList(n.Nbody, func(n *Node) bool {
switch n.Op {
case ONAME:
if n.Class() == PFUNC {
if n.isMethodExpression() {
n = asNode(n.Type.Nname())
}
if n != nil && n.Name.Defn != nil {
if m := v.visit(n.Name.Defn); m < min {
ir.InspectList(n.Body(), func(n ir.Node) bool {
switch n.Op() {
case ir.ONAME:
if n.Class() == ir.PFUNC {
if n != nil && n.Name().Defn != nil {
if m := v.visit(n.Name().Defn); m < min {
min = m
}
}
}
case ODOTMETH:
fn := asNode(n.Type.Nname())
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
if m := v.visit(fn.Name.Defn); m < min {
case ir.OMETHEXPR:
fn := methodExprName(n)
if fn != nil && fn.Name().Defn != nil {
if m := v.visit(fn.Name().Defn); m < min {
min = m
}
}
case OCALLPART:
fn := asNode(callpartMethod(n).Type.Nname())
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
if m := v.visit(fn.Name.Defn); m < min {
case ir.ODOTMETH:
fn := methodExprName(n)
if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
if m := v.visit(fn.Name().Defn); m < min {
min = m
}
}
case OCLOSURE:
if m := v.visit(n.Func.Closure); m < min {
case ir.OCALLPART:
fn := ir.AsNode(callpartMethod(n).Nname)
if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
if m := v.visit(fn.Name().Defn); m < min {
min = m
}
}
case ir.OCLOSURE:
if m := v.visit(n.Func().Decl); m < min {
min = m
}
}
return true
})
if (min == id || min == id+1) && !n.Func.IsHiddenClosure() {
if (min == id || min == id+1) && !n.Func().IsHiddenClosure() {
// This node is the root of a strongly connected component.
// The original min passed to visitcodelist was v.nodeID[n]+1.

View file

@ -5,6 +5,8 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
@ -13,10 +15,10 @@ import (
// See golang.org/issue/20390.
func xposBefore(p, q src.XPos) bool {
return Ctxt.PosTable.Pos(p).Before(Ctxt.PosTable.Pos(q))
return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q))
}
func findScope(marks []Mark, pos src.XPos) ScopeID {
func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
i := sort.Search(len(marks), func(i int) bool {
return xposBefore(pos, marks[i].Pos)
})
@ -26,20 +28,20 @@ func findScope(marks []Mark, pos src.XPos) ScopeID {
return marks[i-1].Scope
}
func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes []ScopeID) []dwarf.Scope {
func assembleScopes(fnsym *obj.LSym, fn ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents))
for i, parent := range fn.Func.Parents {
dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents))
for i, parent := range fn.Func().Parents {
dwarfScopes[i+1].Parent = int32(parent)
}
scopeVariables(dwarfVars, varScopes, dwarfScopes)
scopePCs(fnsym, fn.Func.Marks, dwarfScopes)
scopePCs(fnsym, fn.Func().Marks, dwarfScopes)
return compactScopes(dwarfScopes)
}
// scopeVariables assigns DWARF variable records to their scopes.
func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []dwarf.Scope) {
func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope) {
sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
i0 := 0
@ -56,7 +58,7 @@ func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []d
}
// scopePCs assigns PC ranges to their scopes.
func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) {
// If there aren't any child scopes (in particular, when scope
// tracking is disabled), we can skip a whole lot of work.
if len(marks) == 0 {
@ -89,7 +91,7 @@ func compactScopes(dwarfScopes []dwarf.Scope) []dwarf.Scope {
type varsByScopeAndOffset struct {
vars []*dwarf.Var
scopes []ScopeID
scopes []ir.ScopeID
}
func (v varsByScopeAndOffset) Len() int {

View file

@ -4,152 +4,156 @@
package gc
import "cmd/compile/internal/types"
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
// select
func typecheckselect(sel *Node) {
var def *Node
func typecheckselect(sel ir.Node) {
var def ir.Node
lno := setlineno(sel)
typecheckslice(sel.Ninit.Slice(), ctxStmt)
for _, ncase := range sel.List.Slice() {
if ncase.Op != OCASE {
typecheckslice(sel.Init().Slice(), ctxStmt)
for _, ncase := range sel.List().Slice() {
if ncase.Op() != ir.OCASE {
setlineno(ncase)
Fatalf("typecheckselect %v", ncase.Op)
base.Fatalf("typecheckselect %v", ncase.Op())
}
if ncase.List.Len() == 0 {
if ncase.List().Len() == 0 {
// default
if def != nil {
yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line())
base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
} else {
def = ncase
}
} else if ncase.List.Len() > 1 {
yyerrorl(ncase.Pos, "select cases cannot be lists")
} else if ncase.List().Len() > 1 {
base.ErrorfAt(ncase.Pos(), "select cases cannot be lists")
} else {
ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt))
n := ncase.List.First()
ncase.Left = n
ncase.List.Set(nil)
switch n.Op {
ncase.List().SetFirst(typecheck(ncase.List().First(), ctxStmt))
n := ncase.List().First()
ncase.SetLeft(n)
ncase.PtrList().Set(nil)
switch n.Op() {
default:
pos := n.Pos
if n.Op == ONAME {
pos := n.Pos()
if n.Op() == ir.ONAME {
// We don't have the right position for ONAME nodes (see #15459 and
// others). Using ncase.Pos for now as it will provide the correct
// line number (assuming the expression follows the "case" keyword
// on the same line). This matches the approach before 1.10.
pos = ncase.Pos
pos = ncase.Pos()
}
yyerrorl(pos, "select case must be receive, send or assign recv")
base.ErrorfAt(pos, "select case must be receive, send or assign recv")
// convert x = <-c into OSELRECV(x, <-c).
// remove implicit conversions; the eventual assignment
// will reintroduce them.
case OAS:
if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit() {
n.Right = n.Right.Left
case ir.OAS:
if (n.Right().Op() == ir.OCONVNOP || n.Right().Op() == ir.OCONVIFACE) && n.Right().Implicit() {
n.SetRight(n.Right().Left())
}
if n.Right.Op != ORECV {
yyerrorl(n.Pos, "select assignment must have receive on right hand side")
if n.Right().Op() != ir.ORECV {
base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
n.Op = OSELRECV
n.SetOp(ir.OSELRECV)
// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
case OAS2RECV:
if n.Right.Op != ORECV {
yyerrorl(n.Pos, "select assignment must have receive on right hand side")
case ir.OAS2RECV:
if n.Right().Op() != ir.ORECV {
base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
n.Op = OSELRECV2
n.Left = n.List.First()
n.List.Set1(n.List.Second())
n.SetOp(ir.OSELRECV2)
n.SetLeft(n.List().First())
n.PtrList().Set1(n.List().Second())
// convert <-c into OSELRECV(N, <-c)
case ORECV:
n = nodl(n.Pos, OSELRECV, nil, n)
case ir.ORECV:
n = ir.NodAt(n.Pos(), ir.OSELRECV, nil, n)
n.SetTypecheck(1)
ncase.Left = n
ncase.SetLeft(n)
case OSEND:
case ir.OSEND:
break
}
}
typecheckslice(ncase.Nbody.Slice(), ctxStmt)
typecheckslice(ncase.Body().Slice(), ctxStmt)
}
lineno = lno
base.Pos = lno
}
func walkselect(sel *Node) {
func walkselect(sel ir.Node) {
lno := setlineno(sel)
if sel.Nbody.Len() != 0 {
Fatalf("double walkselect")
if sel.Body().Len() != 0 {
base.Fatalf("double walkselect")
}
init := sel.Ninit.Slice()
sel.Ninit.Set(nil)
init := sel.Init().Slice()
sel.PtrInit().Set(nil)
init = append(init, walkselectcases(&sel.List)...)
sel.List.Set(nil)
init = append(init, walkselectcases(sel.PtrList())...)
sel.PtrList().Set(nil)
sel.Nbody.Set(init)
walkstmtlist(sel.Nbody.Slice())
sel.PtrBody().Set(init)
walkstmtlist(sel.Body().Slice())
lineno = lno
base.Pos = lno
}
func walkselectcases(cases *Nodes) []*Node {
func walkselectcases(cases *ir.Nodes) []ir.Node {
ncas := cases.Len()
sellineno := lineno
sellineno := base.Pos
// optimization: zero-case select
if ncas == 0 {
return []*Node{mkcall("block", nil, nil)}
return []ir.Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
if ncas == 1 {
cas := cases.First()
setlineno(cas)
l := cas.Ninit.Slice()
if cas.Left != nil { // not default:
n := cas.Left
l = append(l, n.Ninit.Slice()...)
n.Ninit.Set(nil)
switch n.Op {
l := cas.Init().Slice()
if cas.Left() != nil { // not default:
n := cas.Left()
l = append(l, n.Init().Slice()...)
n.PtrInit().Set(nil)
switch n.Op() {
default:
Fatalf("select %v", n.Op)
base.Fatalf("select %v", n.Op())
case OSEND:
case ir.OSEND:
// already ok
case OSELRECV, OSELRECV2:
if n.Op == OSELRECV || n.List.Len() == 0 {
if n.Left == nil {
n = n.Right
case ir.OSELRECV, ir.OSELRECV2:
if n.Op() == ir.OSELRECV || n.List().Len() == 0 {
if n.Left() == nil {
n = n.Right()
} else {
n.Op = OAS
n.SetOp(ir.OAS)
}
break
}
if n.Left == nil {
nblank = typecheck(nblank, ctxExpr|ctxAssign)
n.Left = nblank
if n.Left() == nil {
ir.BlankNode = typecheck(ir.BlankNode, ctxExpr|ctxAssign)
n.SetLeft(ir.BlankNode)
}
n.Op = OAS2
n.List.Prepend(n.Left)
n.Rlist.Set1(n.Right)
n.Right = nil
n.Left = nil
n.SetOp(ir.OAS2)
n.PtrList().Prepend(n.Left())
n.PtrRlist().Set1(n.Right())
n.SetRight(nil)
n.SetLeft(nil)
n.SetTypecheck(0)
n = typecheck(n, ctxStmt)
}
@ -157,34 +161,34 @@ func walkselectcases(cases *Nodes) []*Node {
l = append(l, n)
}
l = append(l, cas.Nbody.Slice()...)
l = append(l, nod(OBREAK, nil, nil))
l = append(l, cas.Body().Slice()...)
l = append(l, ir.Nod(ir.OBREAK, nil, nil))
return l
}
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
var dflt *Node
var dflt ir.Node
for _, cas := range cases.Slice() {
setlineno(cas)
n := cas.Left
n := cas.Left()
if n == nil {
dflt = cas
continue
}
switch n.Op {
case OSEND:
n.Right = nod(OADDR, n.Right, nil)
n.Right = typecheck(n.Right, ctxExpr)
switch n.Op() {
case ir.OSEND:
n.SetRight(ir.Nod(ir.OADDR, n.Right(), nil))
n.SetRight(typecheck(n.Right(), ctxExpr))
case OSELRECV, OSELRECV2:
if n.Op == OSELRECV2 && n.List.Len() == 0 {
n.Op = OSELRECV
case ir.OSELRECV, ir.OSELRECV2:
if n.Op() == ir.OSELRECV2 && n.List().Len() == 0 {
n.SetOp(ir.OSELRECV)
}
if n.Left != nil {
n.Left = nod(OADDR, n.Left, nil)
n.Left = typecheck(n.Left, ctxExpr)
if n.Left() != nil {
n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
n.SetLeft(typecheck(n.Left(), ctxExpr))
}
}
}
@ -196,68 +200,68 @@ func walkselectcases(cases *Nodes) []*Node {
cas = cases.Second()
}
n := cas.Left
n := cas.Left()
setlineno(n)
r := nod(OIF, nil, nil)
r.Ninit.Set(cas.Ninit.Slice())
switch n.Op {
r := ir.Nod(ir.OIF, nil, nil)
r.PtrInit().Set(cas.Init().Slice())
switch n.Op() {
default:
Fatalf("select %v", n.Op)
base.Fatalf("select %v", n.Op())
case OSEND:
case ir.OSEND:
// if selectnbsend(c, v) { body } else { default body }
ch := n.Left
r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[TBOOL], &r.Ninit, ch, n.Right)
ch := n.Left()
r.SetLeft(mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right()))
case OSELRECV:
case ir.OSELRECV:
// if selectnbrecv(&v, c) { body } else { default body }
ch := n.Right.Left
elem := n.Left
ch := n.Right().Left()
elem := n.Left()
if elem == nil {
elem = nodnil()
}
r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, ch)
r.SetLeft(mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch))
case OSELRECV2:
case ir.OSELRECV2:
// if selectnbrecv2(&v, &received, c) { body } else { default body }
ch := n.Right.Left
elem := n.Left
ch := n.Right().Left()
elem := n.Left()
if elem == nil {
elem = nodnil()
}
receivedp := nod(OADDR, n.List.First(), nil)
receivedp := ir.Nod(ir.OADDR, n.List().First(), nil)
receivedp = typecheck(receivedp, ctxExpr)
r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, receivedp, ch)
r.SetLeft(mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch))
}
r.Left = typecheck(r.Left, ctxExpr)
r.Nbody.Set(cas.Nbody.Slice())
r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
return []*Node{r, nod(OBREAK, nil, nil)}
r.SetLeft(typecheck(r.Left(), ctxExpr))
r.PtrBody().Set(cas.Body().Slice())
r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...))
return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
}
if dflt != nil {
ncas--
}
casorder := make([]*Node, ncas)
casorder := make([]ir.Node, ncas)
nsends, nrecvs := 0, 0
var init []*Node
var init []ir.Node
// generate sel-struct
lineno = sellineno
base.Pos = sellineno
selv := temp(types.NewArray(scasetype(), int64(ncas)))
r := nod(OAS, selv, nil)
r := ir.Nod(ir.OAS, selv, nil)
r = typecheck(r, ctxStmt)
init = append(init, r)
// No initialization for order; runtime.selectgo is responsible for that.
order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas)))
order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
var pc0, pcs *Node
if flag_race {
pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas)))
pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
var pc0, pcs ir.Node
if base.Flag.Race {
pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
pc0 = typecheck(ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
} else {
pc0 = nodnil()
}
@ -266,109 +270,109 @@ func walkselectcases(cases *Nodes) []*Node {
for _, cas := range cases.Slice() {
setlineno(cas)
init = append(init, cas.Ninit.Slice()...)
cas.Ninit.Set(nil)
init = append(init, cas.Init().Slice()...)
cas.PtrInit().Set(nil)
n := cas.Left
n := cas.Left()
if n == nil { // default:
continue
}
var i int
var c, elem *Node
switch n.Op {
var c, elem ir.Node
switch n.Op() {
default:
Fatalf("select %v", n.Op)
case OSEND:
base.Fatalf("select %v", n.Op())
case ir.OSEND:
i = nsends
nsends++
c = n.Left
elem = n.Right
case OSELRECV, OSELRECV2:
c = n.Left()
elem = n.Right()
case ir.OSELRECV, ir.OSELRECV2:
nrecvs++
i = ncas - nrecvs
c = n.Right.Left
elem = n.Left
c = n.Right().Left()
elem = n.Left()
}
casorder[i] = cas
setField := func(f string, val *Node) {
r := nod(OAS, nodSym(ODOT, nod(OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
setField := func(f string, val ir.Node) {
r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
r = typecheck(r, ctxStmt)
init = append(init, r)
}
c = convnop(c, types.Types[TUNSAFEPTR])
c = convnop(c, types.Types[types.TUNSAFEPTR])
setField("c", c)
if elem != nil {
elem = convnop(elem, types.Types[TUNSAFEPTR])
elem = convnop(elem, types.Types[types.TUNSAFEPTR])
setField("elem", elem)
}
// TODO(mdempsky): There should be a cleaner way to
// handle this.
if flag_race {
r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil))
if base.Flag.Race {
r = mkcall("selectsetpc", nil, nil, ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))), nil))
init = append(init, r)
}
}
if nsends+nrecvs != ncas {
Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
}
// run the select
lineno = sellineno
chosen := temp(types.Types[TINT])
recvOK := temp(types.Types[TBOOL])
r = nod(OAS2, nil, nil)
r.List.Set2(chosen, recvOK)
base.Pos = sellineno
chosen := temp(types.Types[types.TINT])
recvOK := temp(types.Types[types.TBOOL])
r = ir.Nod(ir.OAS2, nil, nil)
r.PtrList().Set2(chosen, recvOK)
fn := syslook("selectgo")
r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
r = typecheck(r, ctxStmt)
init = append(init, r)
// selv and order are no longer alive after selectgo.
init = append(init, nod(OVARKILL, selv, nil))
init = append(init, nod(OVARKILL, order, nil))
if flag_race {
init = append(init, nod(OVARKILL, pcs, nil))
init = append(init, ir.Nod(ir.OVARKILL, selv, nil))
init = append(init, ir.Nod(ir.OVARKILL, order, nil))
if base.Flag.Race {
init = append(init, ir.Nod(ir.OVARKILL, pcs, nil))
}
// dispatch cases
dispatch := func(cond, cas *Node) {
dispatch := func(cond, cas ir.Node) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
r := nod(OIF, cond, nil)
r := ir.Nod(ir.OIF, cond, nil)
if n := cas.Left; n != nil && n.Op == OSELRECV2 {
x := nod(OAS, n.List.First(), recvOK)
if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 {
x := ir.Nod(ir.OAS, n.List().First(), recvOK)
x = typecheck(x, ctxStmt)
r.Nbody.Append(x)
r.PtrBody().Append(x)
}
r.Nbody.AppendNodes(&cas.Nbody)
r.Nbody.Append(nod(OBREAK, nil, nil))
r.PtrBody().AppendNodes(cas.PtrBody())
r.PtrBody().Append(ir.Nod(ir.OBREAK, nil, nil))
init = append(init, r)
}
if dflt != nil {
setlineno(dflt)
dispatch(nod(OLT, chosen, nodintconst(0)), dflt)
dispatch(ir.Nod(ir.OLT, chosen, nodintconst(0)), dflt)
}
for i, cas := range casorder {
setlineno(cas)
dispatch(nod(OEQ, chosen, nodintconst(int64(i))), cas)
dispatch(ir.Nod(ir.OEQ, chosen, nodintconst(int64(i))), cas)
}
return init
}
// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
func bytePtrToIndex(n *Node, i int64) *Node {
s := nod(OADDR, nod(OINDEX, n, nodintconst(i)), nil)
t := types.NewPtr(types.Types[TUINT8])
func bytePtrToIndex(n ir.Node, i int64) ir.Node {
s := ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, n, nodintconst(i)), nil)
t := types.NewPtr(types.Types[types.TUINT8])
return convnop(s, t)
}
@ -377,9 +381,9 @@ var scase *types.Type
// Keep in sync with src/runtime/select.go.
func scasetype() *types.Type {
if scase == nil {
scase = tostruct([]*Node{
namedfield("c", types.Types[TUNSAFEPTR]),
namedfield("elem", types.Types[TUNSAFEPTR]),
scase = tostruct([]ir.Node{
namedfield("c", types.Types[types.TUNSAFEPTR]),
namedfield("elem", types.Types[types.TUNSAFEPTR]),
})
scase.SetNoalg(true)
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -5,43 +5,47 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"go/constant"
"go/token"
"sort"
)
// typecheckswitch typechecks a switch statement.
func typecheckswitch(n *Node) {
typecheckslice(n.Ninit.Slice(), ctxStmt)
if n.Left != nil && n.Left.Op == OTYPESW {
func typecheckswitch(n ir.Node) {
typecheckslice(n.Init().Slice(), ctxStmt)
if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
typecheckTypeSwitch(n)
} else {
typecheckExprSwitch(n)
}
}
func typecheckTypeSwitch(n *Node) {
n.Left.Right = typecheck(n.Left.Right, ctxExpr)
t := n.Left.Right.Type
func typecheckTypeSwitch(n ir.Node) {
n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr))
t := n.Left().Right().Type()
if t != nil && !t.IsInterface() {
yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", n.Left().Right())
t = nil
}
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we won't
// notice that it went unused.
if v := n.Left.Left; v != nil && !v.isBlank() && n.List.Len() == 0 {
yyerrorl(v.Pos, "%v declared but not used", v.Sym)
if v := n.Left().Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 {
base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
}
var defCase, nilCase *Node
var defCase, nilCase ir.Node
var ts typeSet
for _, ncase := range n.List.Slice() {
ls := ncase.List.Slice()
for _, ncase := range n.List().Slice() {
ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
@ -50,65 +54,65 @@ func typecheckTypeSwitch(n *Node) {
for i := range ls {
ls[i] = typecheck(ls[i], ctxExpr|ctxType)
n1 := ls[i]
if t == nil || n1.Type == nil {
if t == nil || n1.Type() == nil {
continue
}
var missing, have *types.Field
var ptr int
switch {
case n1.isNil(): // case nil:
case ir.IsNil(n1): // case nil:
if nilCase != nil {
yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line())
base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
} else {
nilCase = ncase
}
case n1.Op != OTYPE:
yyerrorl(ncase.Pos, "%L is not a type", n1)
case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke():
case n1.Op() != ir.OTYPE:
base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
case !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke():
if have != nil && !have.Broke() {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left().Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (%v method has pointer receiver)", n.Left().Right(), n1.Type(), missing.Sym)
} else {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (missing %v method)", n.Left().Right(), n1.Type(), missing.Sym)
}
}
if n1.Op == OTYPE {
ts.add(ncase.Pos, n1.Type)
if n1.Op() == ir.OTYPE {
ts.add(ncase.Pos(), n1.Type())
}
}
if ncase.Rlist.Len() != 0 {
if ncase.Rlist().Len() != 0 {
// Assign the clause variable's type.
vt := t
if len(ls) == 1 {
if ls[0].Op == OTYPE {
vt = ls[0].Type
} else if ls[0].Op != OLITERAL { // TODO(mdempsky): Should be !ls[0].isNil()
if ls[0].Op() == ir.OTYPE {
vt = ls[0].Type()
} else if !ir.IsNil(ls[0]) {
// Invalid single-type case;
// mark variable as broken.
vt = nil
}
}
// TODO(mdempsky): It should be possible to
// still typecheck the case body.
if vt == nil {
continue
}
nvar := ncase.Rlist.First()
nvar.Type = vt
nvar := ncase.Rlist().First()
nvar.SetType(vt)
if vt != nil {
nvar = typecheck(nvar, ctxExpr|ctxAssign)
ncase.Rlist.SetFirst(nvar)
} else {
// Clause variable is broken; prevent typechecking.
nvar.SetTypecheck(1)
nvar.SetWalkdef(1)
}
ncase.Rlist().SetFirst(nvar)
}
typecheckslice(ncase.Nbody.Slice(), ctxStmt)
typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
@ -133,19 +137,19 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) {
prevs := s.m[ls]
for _, prev := range prevs {
if types.Identical(typ, prev.typ) {
yyerrorl(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, linestr(prev.pos))
base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
return
}
}
s.m[ls] = append(prevs, typeSetEntry{pos, typ})
}
func typecheckExprSwitch(n *Node) {
t := types.Types[TBOOL]
if n.Left != nil {
n.Left = typecheck(n.Left, ctxExpr)
n.Left = defaultlit(n.Left, nil)
t = n.Left.Type
func typecheckExprSwitch(n ir.Node) {
t := types.Types[types.TBOOL]
if n.Left() != nil {
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(defaultlit(n.Left(), nil))
t = n.Left().Type()
}
var nilonly string
@ -153,28 +157,28 @@ func typecheckExprSwitch(n *Node) {
switch {
case t.IsMap():
nilonly = "map"
case t.Etype == TFUNC:
case t.Etype == types.TFUNC:
nilonly = "func"
case t.IsSlice():
nilonly = "slice"
case !IsComparable(t):
if t.IsStruct() {
yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type)
base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Left(), IncomparableField(t).Type)
} else {
yyerrorl(n.Pos, "cannot switch on %L", n.Left)
base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Left())
}
t = nil
}
}
var defCase *Node
var defCase ir.Node
var cs constSet
for _, ncase := range n.List.Slice() {
ls := ncase.List.Slice()
for _, ncase := range n.List().Slice() {
ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
@ -185,22 +189,22 @@ func typecheckExprSwitch(n *Node) {
ls[i] = typecheck(ls[i], ctxExpr)
ls[i] = defaultlit(ls[i], t)
n1 := ls[i]
if t == nil || n1.Type == nil {
if t == nil || n1.Type() == nil {
continue
}
if nilonly != "" && !n1.isNil() {
yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
} else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
if nilonly != "" && !ir.IsNil(n1) {
base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left())
} else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) {
base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
} else {
op1, _ := assignop(n1.Type, t)
op2, _ := assignop(t, n1.Type)
if op1 == OXXX && op2 == OXXX {
if n.Left != nil {
yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
op1, _ := assignop(n1.Type(), t)
op2, _ := assignop(t, n1.Type())
if op1 == ir.OXXX && op2 == ir.OXXX {
if n.Left() != nil {
base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left(), n1.Type(), t)
} else {
yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
}
}
}
@ -211,23 +215,23 @@ func typecheckExprSwitch(n *Node) {
// case GOARCH == "arm" && GOARM == "5":
// case GOARCH == "arm":
// which would both evaluate to false for non-ARM compiles.
if !n1.Type.IsBoolean() {
cs.add(ncase.Pos, n1, "case", "switch")
if !n1.Type().IsBoolean() {
cs.add(ncase.Pos(), n1, "case", "switch")
}
}
typecheckslice(ncase.Nbody.Slice(), ctxStmt)
typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
// walkswitch walks a switch statement.
func walkswitch(sw *Node) {
func walkswitch(sw ir.Node) {
// Guard against double walk, see #25776.
if sw.List.Len() == 0 && sw.Nbody.Len() > 0 {
if sw.List().Len() == 0 && sw.Body().Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
}
if sw.Left != nil && sw.Left.Op == OTYPESW {
if sw.Left() != nil && sw.Left().Op() == ir.OTYPESW {
walkTypeSwitch(sw)
} else {
walkExprSwitch(sw)
@ -236,11 +240,11 @@ func walkswitch(sw *Node) {
// walkExprSwitch generates an AST implementing sw. sw is an
// expression switch.
func walkExprSwitch(sw *Node) {
func walkExprSwitch(sw ir.Node) {
lno := setlineno(sw)
cond := sw.Left
sw.Left = nil
cond := sw.Left()
sw.SetLeft(nil)
// convert switch {...} to switch true {...}
if cond == nil {
@ -256,79 +260,79 @@ func walkExprSwitch(sw *Node) {
// because walkexpr will lower the string
// conversion into a runtime call.
// See issue 24937 for more discussion.
if cond.Op == OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
cond.Op = OBYTES2STRTMP
if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
cond.SetOp(ir.OBYTES2STRTMP)
}
cond = walkexpr(cond, &sw.Ninit)
if cond.Op != OLITERAL {
cond = copyexpr(cond, cond.Type, &sw.Nbody)
cond = walkexpr(cond, sw.PtrInit())
if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
cond = copyexpr(cond, cond.Type(), sw.PtrBody())
}
lineno = lno
base.Pos = lno
s := exprSwitch{
exprname: cond,
}
var defaultGoto *Node
var body Nodes
for _, ncase := range sw.List.Slice() {
var defaultGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
label := autolabel(".s")
jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
// Process case dispatch.
if ncase.List.Len() == 0 {
if ncase.List().Len() == 0 {
if defaultGoto != nil {
Fatalf("duplicate default case not detected during typechecking")
base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
for _, n1 := range ncase.List.Slice() {
s.Add(ncase.Pos, n1, jmp)
for _, n1 := range ncase.List().Slice() {
s.Add(ncase.Pos(), n1, jmp)
}
// Process body.
body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
body.Append(ncase.Nbody.Slice()...)
if fall, pos := hasFall(ncase.Nbody.Slice()); !fall {
br := nod(OBREAK, nil, nil)
br.Pos = pos
body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
body.Append(ncase.Body().Slice()...)
if fall, pos := hasFall(ncase.Body().Slice()); !fall {
br := ir.Nod(ir.OBREAK, nil, nil)
br.SetPos(pos)
body.Append(br)
}
}
sw.List.Set(nil)
sw.PtrList().Set(nil)
if defaultGoto == nil {
br := nod(OBREAK, nil, nil)
br.Pos = br.Pos.WithNotStmt()
br := ir.Nod(ir.OBREAK, nil, nil)
br.SetPos(br.Pos().WithNotStmt())
defaultGoto = br
}
s.Emit(&sw.Nbody)
sw.Nbody.Append(defaultGoto)
sw.Nbody.AppendNodes(&body)
walkstmtlist(sw.Nbody.Slice())
s.Emit(sw.PtrBody())
sw.PtrBody().Append(defaultGoto)
sw.PtrBody().AppendNodes(&body)
walkstmtlist(sw.Body().Slice())
}
// An exprSwitch walks an expression switch.
type exprSwitch struct {
exprname *Node // value being switched on
exprname ir.Node // value being switched on
done Nodes
done ir.Nodes
clauses []exprClause
}
type exprClause struct {
pos src.XPos
lo, hi *Node
jmp *Node
lo, hi ir.Node
jmp ir.Node
}
func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) {
func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
if okforcmp[s.exprname.Type.Etype] && expr.Op == OLITERAL {
if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
return
}
@ -338,7 +342,7 @@ func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) {
s.flush()
}
func (s *exprSwitch) Emit(out *Nodes) {
func (s *exprSwitch) Emit(out *ir.Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
@ -355,7 +359,7 @@ func (s *exprSwitch) flush() {
// (e.g., sort.Slice doesn't need to invoke the less function
// when there's only a single slice element).
if s.exprname.Type.IsString() && len(cc) >= 2 {
if s.exprname.Type().IsString() && len(cc) >= 2 {
// Sort strings by length and then by value. It is
// much cheaper to compare lengths than values, and
// all we need here is consistency. We respect this
@ -385,26 +389,25 @@ func (s *exprSwitch) flush() {
runs = append(runs, cc[start:])
// Perform two-level binary search.
nlen := nod(OLEN, s.exprname, nil)
binarySearch(len(runs), &s.done,
func(i int) *Node {
return nod(OLE, nlen, nodintconst(runLen(runs[i-1])))
func(i int) ir.Node {
return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1])))
},
func(i int, nif *Node) {
func(i int, nif ir.Node) {
run := runs[i]
nif.Left = nod(OEQ, nlen, nodintconst(runLen(run)))
s.search(run, &nif.Nbody)
nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
s.search(run, nif.PtrBody())
},
)
return
}
sort.Slice(cc, func(i, j int) bool {
return compareOp(cc[i].lo.Val(), OLT, cc[j].lo.Val())
return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
})
// Merge consecutive integer cases.
if s.exprname.Type.IsInteger() {
if s.exprname.Type().IsInteger() {
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
@ -420,40 +423,40 @@ func (s *exprSwitch) flush() {
s.search(cc, &s.done)
}
func (s *exprSwitch) search(cc []exprClause, out *Nodes) {
func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
binarySearch(len(cc), out,
func(i int) *Node {
return nod(OLE, s.exprname, cc[i-1].hi)
func(i int) ir.Node {
return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi)
},
func(i int, nif *Node) {
func(i int, nif ir.Node) {
c := &cc[i]
nif.Left = c.test(s.exprname)
nif.Nbody.Set1(c.jmp)
nif.SetLeft(c.test(s.exprname))
nif.PtrBody().Set1(c.jmp)
},
)
}
func (c *exprClause) test(exprname *Node) *Node {
func (c *exprClause) test(exprname ir.Node) ir.Node {
// Integer range.
if c.hi != c.lo {
low := nodl(c.pos, OGE, exprname, c.lo)
high := nodl(c.pos, OLE, exprname, c.hi)
return nodl(c.pos, OANDAND, low, high)
low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo)
high := ir.NodAt(c.pos, ir.OLE, exprname, c.hi)
return ir.NodAt(c.pos, ir.OANDAND, low, high)
}
// Optimize "switch true { ...}" and "switch false { ... }".
if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() {
if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
if exprname.BoolVal() {
return c.lo
} else {
return nodl(c.pos, ONOT, c.lo, nil)
return ir.NodAt(c.pos, ir.ONOT, c.lo, nil)
}
}
return nodl(c.pos, OEQ, exprname, c.lo)
return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo)
}
func allCaseExprsAreSideEffectFree(sw *Node) bool {
func allCaseExprsAreSideEffectFree(sw ir.Node) bool {
// In theory, we could be more aggressive, allowing any
// side-effect-free expressions in cases, but it's a bit
// tricky because some of that information is unavailable due
@ -461,12 +464,12 @@ func allCaseExprsAreSideEffectFree(sw *Node) bool {
// Restricting to constants is simple and probably powerful
// enough.
for _, ncase := range sw.List.Slice() {
if ncase.Op != OCASE {
Fatalf("switch string(byteslice) bad op: %v", ncase.Op)
for _, ncase := range sw.List().Slice() {
if ncase.Op() != ir.OCASE {
base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op())
}
for _, v := range ncase.List.Slice() {
if v.Op != OLITERAL {
for _, v := range ncase.List().Slice() {
if v.Op() != ir.OLITERAL {
return false
}
}
@ -475,7 +478,7 @@ func allCaseExprsAreSideEffectFree(sw *Node) bool {
}
// hasFall reports whether stmts ends with a "fallthrough" statement.
func hasFall(stmts []*Node) (bool, src.XPos) {
func hasFall(stmts []ir.Node) (bool, src.XPos) {
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
@ -483,30 +486,30 @@ func hasFall(stmts []*Node) (bool, src.XPos) {
// nodes will be at the end of the list.
i := len(stmts) - 1
for i >= 0 && stmts[i].Op == OVARKILL {
for i >= 0 && stmts[i].Op() == ir.OVARKILL {
i--
}
if i < 0 {
return false, src.NoXPos
}
return stmts[i].Op == OFALL, stmts[i].Pos
return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
}
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
func walkTypeSwitch(sw *Node) {
func walkTypeSwitch(sw ir.Node) {
var s typeSwitch
s.facename = sw.Left.Right
sw.Left = nil
s.facename = sw.Left().Right()
sw.SetLeft(nil)
s.facename = walkexpr(s.facename, &sw.Ninit)
s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody)
s.okname = temp(types.Types[TBOOL])
s.facename = walkexpr(s.facename, sw.PtrInit())
s.facename = copyexpr(s.facename, s.facename.Type(), sw.PtrBody())
s.okname = temp(types.Types[types.TBOOL])
// Get interface descriptor word.
// For empty interfaces this will be the type.
// For non-empty interfaces this will be the itab.
itab := nod(OITAB, s.facename, nil)
itab := ir.Nod(ir.OITAB, s.facename, nil)
// For empty interfaces, do:
// if e._type == nil {
@ -514,92 +517,92 @@ func walkTypeSwitch(sw *Node) {
// }
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
ifNil := nod(OIF, nil, nil)
ifNil.Left = nod(OEQ, itab, nodnil())
lineno = lineno.WithNotStmt() // disable statement marks after the first check.
ifNil.Left = typecheck(ifNil.Left, ctxExpr)
ifNil.Left = defaultlit(ifNil.Left, nil)
ifNil := ir.Nod(ir.OIF, nil, nil)
ifNil.SetLeft(ir.Nod(ir.OEQ, itab, nodnil()))
base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr))
ifNil.SetLeft(defaultlit(ifNil.Left(), nil))
// ifNil.Nbody assigned at end.
sw.Nbody.Append(ifNil)
sw.PtrBody().Append(ifNil)
// Load hash from type or itab.
dotHash := nodSym(ODOTPTR, itab, nil)
dotHash.Type = types.Types[TUINT32]
dotHash := nodSym(ir.ODOTPTR, itab, nil)
dotHash.SetType(types.Types[types.TUINT32])
dotHash.SetTypecheck(1)
if s.facename.Type.IsEmptyInterface() {
dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
if s.facename.Type().IsEmptyInterface() {
dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime._type
} else {
dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime.itab
}
dotHash.SetBounded(true) // guaranteed not to fault
s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody)
s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody())
br := nod(OBREAK, nil, nil)
var defaultGoto, nilGoto *Node
var body Nodes
for _, ncase := range sw.List.Slice() {
var caseVar *Node
if ncase.Rlist.Len() != 0 {
caseVar = ncase.Rlist.First()
br := ir.Nod(ir.OBREAK, nil, nil)
var defaultGoto, nilGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
var caseVar ir.Node
if ncase.Rlist().Len() != 0 {
caseVar = ncase.Rlist().First()
}
// For single-type cases with an interface type,
// we initialize the case variable as part of the type assertion.
// In other cases, we initialize it in the body.
var singleType *types.Type
if ncase.List.Len() == 1 && ncase.List.First().Op == OTYPE {
singleType = ncase.List.First().Type
if ncase.List().Len() == 1 && ncase.List().First().Op() == ir.OTYPE {
singleType = ncase.List().First().Type()
}
caseVarInitialized := false
label := autolabel(".s")
jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
if ncase.List.Len() == 0 { // default:
if ncase.List().Len() == 0 { // default:
if defaultGoto != nil {
Fatalf("duplicate default case not detected during typechecking")
base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
for _, n1 := range ncase.List.Slice() {
if n1.isNil() { // case nil:
for _, n1 := range ncase.List().Slice() {
if ir.IsNil(n1) { // case nil:
if nilGoto != nil {
Fatalf("duplicate nil case not detected during typechecking")
base.Fatalf("duplicate nil case not detected during typechecking")
}
nilGoto = jmp
continue
}
if singleType != nil && singleType.IsInterface() {
s.Add(ncase.Pos, n1.Type, caseVar, jmp)
s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
caseVarInitialized = true
} else {
s.Add(ncase.Pos, n1.Type, nil, jmp)
s.Add(ncase.Pos(), n1.Type(), nil, jmp)
}
}
body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
if caseVar != nil && !caseVarInitialized {
val := s.facename
if singleType != nil {
// We have a single concrete type. Extract the data.
if singleType.IsInterface() {
Fatalf("singleType interface should have been handled in Add")
base.Fatalf("singleType interface should have been handled in Add")
}
val = ifaceData(ncase.Pos, s.facename, singleType)
val = ifaceData(ncase.Pos(), s.facename, singleType)
}
l := []*Node{
nodl(ncase.Pos, ODCL, caseVar, nil),
nodl(ncase.Pos, OAS, caseVar, val),
l := []ir.Node{
ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil),
ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
}
body.Append(ncase.Nbody.Slice()...)
body.Append(ncase.Body().Slice()...)
body.Append(br)
}
sw.List.Set(nil)
sw.PtrList().Set(nil)
if defaultGoto == nil {
defaultGoto = br
@ -607,58 +610,58 @@ func walkTypeSwitch(sw *Node) {
if nilGoto == nil {
nilGoto = defaultGoto
}
ifNil.Nbody.Set1(nilGoto)
ifNil.PtrBody().Set1(nilGoto)
s.Emit(&sw.Nbody)
sw.Nbody.Append(defaultGoto)
sw.Nbody.AppendNodes(&body)
s.Emit(sw.PtrBody())
sw.PtrBody().Append(defaultGoto)
sw.PtrBody().AppendNodes(&body)
walkstmtlist(sw.Nbody.Slice())
walkstmtlist(sw.Body().Slice())
}
// A typeSwitch walks a type switch.
type typeSwitch struct {
// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
facename *Node // value being type-switched on
hashname *Node // type hash of the value being type-switched on
okname *Node // boolean used for comma-ok type assertions
facename ir.Node // value being type-switched on
hashname ir.Node // type hash of the value being type-switched on
okname ir.Node // boolean used for comma-ok type assertions
done Nodes
done ir.Nodes
clauses []typeClause
}
type typeClause struct {
hash uint32
body Nodes
body ir.Nodes
}
func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) {
var body Nodes
func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
var body ir.Nodes
if caseVar != nil {
l := []*Node{
nodl(pos, ODCL, caseVar, nil),
nodl(pos, OAS, caseVar, nil),
l := []ir.Node{
ir.NodAt(pos, ir.ODCL, caseVar, nil),
ir.NodAt(pos, ir.OAS, caseVar, nil),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
} else {
caseVar = nblank
caseVar = ir.BlankNode
}
// cv, ok = iface.(type)
as := nodl(pos, OAS2, nil, nil)
as.List.Set2(caseVar, s.okname) // cv, ok =
dot := nodl(pos, ODOTTYPE, s.facename, nil)
dot.Type = typ // iface.(type)
as.Rlist.Set1(dot)
as := ir.NodAt(pos, ir.OAS2, nil, nil)
as.PtrList().Set2(caseVar, s.okname) // cv, ok =
dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil)
dot.SetType(typ) // iface.(type)
as.PtrRlist().Set1(dot)
as = typecheck(as, ctxStmt)
as = walkexpr(as, &body)
body.Append(as)
// if ok { goto label }
nif := nodl(pos, OIF, nil, nil)
nif.Left = s.okname
nif.Nbody.Set1(jmp)
nif := ir.NodAt(pos, ir.OIF, nil, nil)
nif.SetLeft(s.okname)
nif.PtrBody().Set1(jmp)
body.Append(nif)
if !typ.IsInterface() {
@ -673,7 +676,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) {
s.done.AppendNodes(&body)
}
func (s *typeSwitch) Emit(out *Nodes) {
func (s *typeSwitch) Emit(out *ir.Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
@ -700,15 +703,15 @@ func (s *typeSwitch) flush() {
cc = merged
binarySearch(len(cc), &s.done,
func(i int) *Node {
return nod(OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
func(i int) ir.Node {
return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
},
func(i int, nif *Node) {
func(i int, nif ir.Node) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
nif.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
nif.Nbody.AppendNodes(&c.body)
nif.SetLeft(ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash))))
nif.PtrBody().AppendNodes(&c.body)
},
)
}
@ -720,35 +723,35 @@ func (s *typeSwitch) flush() {
// less(i) should return a boolean expression. If it evaluates true,
// then cases before i will be tested; otherwise, cases i and later.
//
// base(i, nif) should setup nif (an OIF node) to test case i. In
// leaf(i, nif) should setup nif (an OIF node) to test case i. In
// particular, it should set nif.Left and nif.Nbody.
func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, nif *Node)) {
func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif ir.Node)) {
const binarySearchMin = 4 // minimum number of cases for binary search
var do func(lo, hi int, out *Nodes)
do = func(lo, hi int, out *Nodes) {
var do func(lo, hi int, out *ir.Nodes)
do = func(lo, hi int, out *ir.Nodes) {
n := hi - lo
if n < binarySearchMin {
for i := lo; i < hi; i++ {
nif := nod(OIF, nil, nil)
base(i, nif)
lineno = lineno.WithNotStmt()
nif.Left = typecheck(nif.Left, ctxExpr)
nif.Left = defaultlit(nif.Left, nil)
nif := ir.Nod(ir.OIF, nil, nil)
leaf(i, nif)
base.Pos = base.Pos.WithNotStmt()
nif.SetLeft(typecheck(nif.Left(), ctxExpr))
nif.SetLeft(defaultlit(nif.Left(), nil))
out.Append(nif)
out = &nif.Rlist
out = nif.PtrRlist()
}
return
}
half := lo + n/2
nif := nod(OIF, nil, nil)
nif.Left = less(half)
lineno = lineno.WithNotStmt()
nif.Left = typecheck(nif.Left, ctxExpr)
nif.Left = defaultlit(nif.Left, nil)
do(lo, half, &nif.Nbody)
do(half, hi, &nif.Rlist)
nif := ir.Nod(ir.OIF, nil, nil)
nif.SetLeft(less(half))
base.Pos = base.Pos.WithNotStmt()
nif.SetLeft(typecheck(nif.Left(), ctxExpr))
nif.SetLeft(defaultlit(nif.Left(), nil))
do(lo, half, nif.PtrBody())
do(half, hi, nif.PtrRlist())
out.Append(nif)
}

View file

@ -9,6 +9,8 @@ package gc
import (
"os"
tracepkg "runtime/trace"
"cmd/compile/internal/base"
)
func init() {
@ -18,10 +20,10 @@ func init() {
func traceHandlerGo17(traceprofile string) {
f, err := os.Create(traceprofile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
if err := tracepkg.Start(f); err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
atExit(tracepkg.Stop)
base.AtExit(tracepkg.Stop)
}

File diff suppressed because it is too large Load diff

View file

@ -3,56 +3,3 @@
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
)
// convenience constants
const (
Txxx = types.Txxx
TINT8 = types.TINT8
TUINT8 = types.TUINT8
TINT16 = types.TINT16
TUINT16 = types.TUINT16
TINT32 = types.TINT32
TUINT32 = types.TUINT32
TINT64 = types.TINT64
TUINT64 = types.TUINT64
TINT = types.TINT
TUINT = types.TUINT
TUINTPTR = types.TUINTPTR
TCOMPLEX64 = types.TCOMPLEX64
TCOMPLEX128 = types.TCOMPLEX128
TFLOAT32 = types.TFLOAT32
TFLOAT64 = types.TFLOAT64
TBOOL = types.TBOOL
TPTR = types.TPTR
TFUNC = types.TFUNC
TSLICE = types.TSLICE
TARRAY = types.TARRAY
TSTRUCT = types.TSTRUCT
TCHAN = types.TCHAN
TMAP = types.TMAP
TINTER = types.TINTER
TFORW = types.TFORW
TANY = types.TANY
TSTRING = types.TSTRING
TUNSAFEPTR = types.TUNSAFEPTR
// pseudo-types for literals
TIDEAL = types.TIDEAL
TNIL = types.TNIL
TBLANK = types.TBLANK
// pseudo-types for frame layout
TFUNCARGS = types.TFUNCARGS
TCHANARGS = types.TCHANARGS
NTYPE = types.NTYPE
)

View file

@ -6,11 +6,3 @@
// TODO(gri) try to eliminate these soon
package gc
import (
"cmd/compile/internal/types"
"unsafe"
)
func asNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) }
func asTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) }

View file

@ -6,29 +6,31 @@
package gc
import "cmd/compile/internal/types"
// builtinpkg is a fake package that declares the universe block.
var builtinpkg *types.Pkg
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
)
var basicTypes = [...]struct {
name string
etype types.EType
}{
{"int8", TINT8},
{"int16", TINT16},
{"int32", TINT32},
{"int64", TINT64},
{"uint8", TUINT8},
{"uint16", TUINT16},
{"uint32", TUINT32},
{"uint64", TUINT64},
{"float32", TFLOAT32},
{"float64", TFLOAT64},
{"complex64", TCOMPLEX64},
{"complex128", TCOMPLEX128},
{"bool", TBOOL},
{"string", TSTRING},
{"int8", types.TINT8},
{"int16", types.TINT16},
{"int32", types.TINT32},
{"int64", types.TINT64},
{"uint8", types.TUINT8},
{"uint16", types.TUINT16},
{"uint32", types.TUINT32},
{"uint64", types.TUINT64},
{"float32", types.TFLOAT32},
{"float64", types.TFLOAT64},
{"complex64", types.TCOMPLEX64},
{"complex128", types.TCOMPLEX128},
{"bool", types.TBOOL},
{"string", types.TSTRING},
}
var typedefs = [...]struct {
@ -37,30 +39,30 @@ var typedefs = [...]struct {
sameas32 types.EType
sameas64 types.EType
}{
{"int", TINT, TINT32, TINT64},
{"uint", TUINT, TUINT32, TUINT64},
{"uintptr", TUINTPTR, TUINT32, TUINT64},
{"int", types.TINT, types.TINT32, types.TINT64},
{"uint", types.TUINT, types.TUINT32, types.TUINT64},
{"uintptr", types.TUINTPTR, types.TUINT32, types.TUINT64},
}
var builtinFuncs = [...]struct {
name string
op Op
op ir.Op
}{
{"append", OAPPEND},
{"cap", OCAP},
{"close", OCLOSE},
{"complex", OCOMPLEX},
{"copy", OCOPY},
{"delete", ODELETE},
{"imag", OIMAG},
{"len", OLEN},
{"make", OMAKE},
{"new", ONEW},
{"panic", OPANIC},
{"print", OPRINT},
{"println", OPRINTN},
{"real", OREAL},
{"recover", ORECOVER},
{"append", ir.OAPPEND},
{"cap", ir.OCAP},
{"close", ir.OCLOSE},
{"complex", ir.OCOMPLEX},
{"copy", ir.OCOPY},
{"delete", ir.ODELETE},
{"imag", ir.OIMAG},
{"len", ir.OLEN},
{"make", ir.OMAKE},
{"new", ir.ONEW},
{"panic", ir.OPANIC},
{"print", ir.OPRINT},
{"println", ir.OPRINTN},
{"real", ir.OREAL},
{"recover", ir.ORECOVER},
}
// isBuiltinFuncName reports whether name matches a builtin function
@ -76,11 +78,11 @@ func isBuiltinFuncName(name string) bool {
var unsafeFuncs = [...]struct {
name string
op Op
op ir.Op
}{
{"Alignof", OALIGNOF},
{"Offsetof", OOFFSETOF},
{"Sizeof", OSIZEOF},
{"Alignof", ir.OALIGNOF},
{"Offsetof", ir.OOFFSETOF},
{"Sizeof", ir.OSIZEOF},
}
// initUniverse initializes the universe block.
@ -95,121 +97,117 @@ func lexinit() {
for _, s := range &basicTypes {
etype := s.etype
if int(etype) >= len(types.Types) {
Fatalf("lexinit: %s bad etype", s.name)
base.Fatalf("lexinit: %s bad etype", s.name)
}
s2 := builtinpkg.Lookup(s.name)
s2 := ir.BuiltinPkg.Lookup(s.name)
t := types.Types[etype]
if t == nil {
t = types.New(etype)
t.Sym = s2
if etype != TANY && etype != TSTRING {
if etype != types.TANY && etype != types.TSTRING {
dowidth(t)
}
types.Types[etype] = t
}
s2.Def = asTypesNode(typenod(t))
asNode(s2.Def).Name = new(Name)
s2.Def = typenod(t)
ir.AsNode(s2.Def).SetName(new(ir.Name))
}
for _, s := range &builtinFuncs {
s2 := builtinpkg.Lookup(s.name)
s2.Def = asTypesNode(newname(s2))
asNode(s2.Def).SetSubOp(s.op)
s2 := ir.BuiltinPkg.Lookup(s.name)
s2.Def = NewName(s2)
ir.AsNode(s2.Def).SetSubOp(s.op)
}
for _, s := range &unsafeFuncs {
s2 := unsafepkg.Lookup(s.name)
s2.Def = asTypesNode(newname(s2))
asNode(s2.Def).SetSubOp(s.op)
s2.Def = NewName(s2)
ir.AsNode(s2.Def).SetSubOp(s.op)
}
types.UntypedString = types.New(TSTRING)
types.UntypedBool = types.New(TBOOL)
types.Types[TANY] = types.New(TANY)
types.UntypedString = types.New(types.TSTRING)
types.UntypedBool = types.New(types.TBOOL)
types.Types[types.TANY] = types.New(types.TANY)
s := builtinpkg.Lookup("true")
s.Def = asTypesNode(nodbool(true))
asNode(s.Def).Sym = lookup("true")
asNode(s.Def).Name = new(Name)
asNode(s.Def).Type = types.UntypedBool
s := ir.BuiltinPkg.Lookup("true")
s.Def = nodbool(true)
ir.AsNode(s.Def).SetSym(lookup("true"))
ir.AsNode(s.Def).SetName(new(ir.Name))
ir.AsNode(s.Def).SetType(types.UntypedBool)
s = builtinpkg.Lookup("false")
s.Def = asTypesNode(nodbool(false))
asNode(s.Def).Sym = lookup("false")
asNode(s.Def).Name = new(Name)
asNode(s.Def).Type = types.UntypedBool
s = ir.BuiltinPkg.Lookup("false")
s.Def = nodbool(false)
ir.AsNode(s.Def).SetSym(lookup("false"))
ir.AsNode(s.Def).SetName(new(ir.Name))
ir.AsNode(s.Def).SetType(types.UntypedBool)
s = lookup("_")
s.Block = -100
s.Def = asTypesNode(newname(s))
types.Types[TBLANK] = types.New(TBLANK)
asNode(s.Def).Type = types.Types[TBLANK]
nblank = asNode(s.Def)
s.Def = NewName(s)
types.Types[types.TBLANK] = types.New(types.TBLANK)
ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
ir.BlankNode = ir.AsNode(s.Def)
s = builtinpkg.Lookup("_")
s = ir.BuiltinPkg.Lookup("_")
s.Block = -100
s.Def = asTypesNode(newname(s))
types.Types[TBLANK] = types.New(TBLANK)
asNode(s.Def).Type = types.Types[TBLANK]
s.Def = NewName(s)
types.Types[types.TBLANK] = types.New(types.TBLANK)
ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
types.Types[TNIL] = types.New(TNIL)
s = builtinpkg.Lookup("nil")
var v Val
v.U = new(NilVal)
s.Def = asTypesNode(nodlit(v))
asNode(s.Def).Sym = s
asNode(s.Def).Name = new(Name)
types.Types[types.TNIL] = types.New(types.TNIL)
s = ir.BuiltinPkg.Lookup("nil")
s.Def = nodnil()
ir.AsNode(s.Def).SetSym(s)
ir.AsNode(s.Def).SetName(new(ir.Name))
s = builtinpkg.Lookup("iota")
s.Def = asTypesNode(nod(OIOTA, nil, nil))
asNode(s.Def).Sym = s
asNode(s.Def).Name = new(Name)
s = ir.BuiltinPkg.Lookup("iota")
s.Def = ir.Nod(ir.OIOTA, nil, nil)
ir.AsNode(s.Def).SetSym(s)
ir.AsNode(s.Def).SetName(new(ir.Name))
}
func typeinit() {
if Widthptr == 0 {
Fatalf("typeinit before betypeinit")
base.Fatalf("typeinit before betypeinit")
}
for et := types.EType(0); et < NTYPE; et++ {
for et := types.EType(0); et < types.NTYPE; et++ {
simtype[et] = et
}
types.Types[TPTR] = types.New(TPTR)
dowidth(types.Types[TPTR])
types.Types[types.TPTR] = types.New(types.TPTR)
dowidth(types.Types[types.TPTR])
t := types.New(TUNSAFEPTR)
types.Types[TUNSAFEPTR] = t
t := types.New(types.TUNSAFEPTR)
types.Types[types.TUNSAFEPTR] = t
t.Sym = unsafepkg.Lookup("Pointer")
t.Sym.Def = asTypesNode(typenod(t))
asNode(t.Sym.Def).Name = new(Name)
dowidth(types.Types[TUNSAFEPTR])
t.Sym.Def = typenod(t)
ir.AsNode(t.Sym.Def).SetName(new(ir.Name))
dowidth(types.Types[types.TUNSAFEPTR])
for et := TINT8; et <= TUINT64; et++ {
for et := types.TINT8; et <= types.TUINT64; et++ {
isInt[et] = true
}
isInt[TINT] = true
isInt[TUINT] = true
isInt[TUINTPTR] = true
isInt[types.TINT] = true
isInt[types.TUINT] = true
isInt[types.TUINTPTR] = true
isFloat[TFLOAT32] = true
isFloat[TFLOAT64] = true
isFloat[types.TFLOAT32] = true
isFloat[types.TFLOAT64] = true
isComplex[TCOMPLEX64] = true
isComplex[TCOMPLEX128] = true
isComplex[types.TCOMPLEX64] = true
isComplex[types.TCOMPLEX128] = true
// initialize okfor
for et := types.EType(0); et < NTYPE; et++ {
if isInt[et] || et == TIDEAL {
for et := types.EType(0); et < types.NTYPE; et++ {
if isInt[et] || et == types.TIDEAL {
okforeq[et] = true
okforcmp[et] = true
okforarith[et] = true
okforadd[et] = true
okforand[et] = true
okforconst[et] = true
ir.OKForConst[et] = true
issimple[et] = true
minintval[et] = new(Mpint)
maxintval[et] = new(Mpint)
}
if isFloat[et] {
@ -217,53 +215,51 @@ func typeinit() {
okforcmp[et] = true
okforadd[et] = true
okforarith[et] = true
okforconst[et] = true
ir.OKForConst[et] = true
issimple[et] = true
minfltval[et] = newMpflt()
maxfltval[et] = newMpflt()
}
if isComplex[et] {
okforeq[et] = true
okforadd[et] = true
okforarith[et] = true
okforconst[et] = true
ir.OKForConst[et] = true
issimple[et] = true
}
}
issimple[TBOOL] = true
issimple[types.TBOOL] = true
okforadd[TSTRING] = true
okforadd[types.TSTRING] = true
okforbool[TBOOL] = true
okforbool[types.TBOOL] = true
okforcap[TARRAY] = true
okforcap[TCHAN] = true
okforcap[TSLICE] = true
okforcap[types.TARRAY] = true
okforcap[types.TCHAN] = true
okforcap[types.TSLICE] = true
okforconst[TBOOL] = true
okforconst[TSTRING] = true
ir.OKForConst[types.TBOOL] = true
ir.OKForConst[types.TSTRING] = true
okforlen[TARRAY] = true
okforlen[TCHAN] = true
okforlen[TMAP] = true
okforlen[TSLICE] = true
okforlen[TSTRING] = true
okforlen[types.TARRAY] = true
okforlen[types.TCHAN] = true
okforlen[types.TMAP] = true
okforlen[types.TSLICE] = true
okforlen[types.TSTRING] = true
okforeq[TPTR] = true
okforeq[TUNSAFEPTR] = true
okforeq[TINTER] = true
okforeq[TCHAN] = true
okforeq[TSTRING] = true
okforeq[TBOOL] = true
okforeq[TMAP] = true // nil only; refined in typecheck
okforeq[TFUNC] = true // nil only; refined in typecheck
okforeq[TSLICE] = true // nil only; refined in typecheck
okforeq[TARRAY] = true // only if element type is comparable; refined in typecheck
okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
okforeq[types.TPTR] = true
okforeq[types.TUNSAFEPTR] = true
okforeq[types.TINTER] = true
okforeq[types.TCHAN] = true
okforeq[types.TSTRING] = true
okforeq[types.TBOOL] = true
okforeq[types.TMAP] = true // nil only; refined in typecheck
okforeq[types.TFUNC] = true // nil only; refined in typecheck
okforeq[types.TSLICE] = true // nil only; refined in typecheck
okforeq[types.TARRAY] = true // only if element type is comparable; refined in typecheck
okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
okforcmp[TSTRING] = true
okforcmp[types.TSTRING] = true
var i int
for i = 0; i < len(okfor); i++ {
@ -271,76 +267,51 @@ func typeinit() {
}
// binary
okfor[OADD] = okforadd[:]
okfor[OAND] = okforand[:]
okfor[OANDAND] = okforbool[:]
okfor[OANDNOT] = okforand[:]
okfor[ODIV] = okforarith[:]
okfor[OEQ] = okforeq[:]
okfor[OGE] = okforcmp[:]
okfor[OGT] = okforcmp[:]
okfor[OLE] = okforcmp[:]
okfor[OLT] = okforcmp[:]
okfor[OMOD] = okforand[:]
okfor[OMUL] = okforarith[:]
okfor[ONE] = okforeq[:]
okfor[OOR] = okforand[:]
okfor[OOROR] = okforbool[:]
okfor[OSUB] = okforarith[:]
okfor[OXOR] = okforand[:]
okfor[OLSH] = okforand[:]
okfor[ORSH] = okforand[:]
okfor[ir.OADD] = okforadd[:]
okfor[ir.OAND] = okforand[:]
okfor[ir.OANDAND] = okforbool[:]
okfor[ir.OANDNOT] = okforand[:]
okfor[ir.ODIV] = okforarith[:]
okfor[ir.OEQ] = okforeq[:]
okfor[ir.OGE] = okforcmp[:]
okfor[ir.OGT] = okforcmp[:]
okfor[ir.OLE] = okforcmp[:]
okfor[ir.OLT] = okforcmp[:]
okfor[ir.OMOD] = okforand[:]
okfor[ir.OMUL] = okforarith[:]
okfor[ir.ONE] = okforeq[:]
okfor[ir.OOR] = okforand[:]
okfor[ir.OOROR] = okforbool[:]
okfor[ir.OSUB] = okforarith[:]
okfor[ir.OXOR] = okforand[:]
okfor[ir.OLSH] = okforand[:]
okfor[ir.ORSH] = okforand[:]
// unary
okfor[OBITNOT] = okforand[:]
okfor[ONEG] = okforarith[:]
okfor[ONOT] = okforbool[:]
okfor[OPLUS] = okforarith[:]
okfor[ir.OBITNOT] = okforand[:]
okfor[ir.ONEG] = okforarith[:]
okfor[ir.ONOT] = okforbool[:]
okfor[ir.OPLUS] = okforarith[:]
// special
okfor[OCAP] = okforcap[:]
okfor[OLEN] = okforlen[:]
okfor[ir.OCAP] = okforcap[:]
okfor[ir.OLEN] = okforlen[:]
// comparison
iscmp[OLT] = true
iscmp[OGT] = true
iscmp[OGE] = true
iscmp[OLE] = true
iscmp[OEQ] = true
iscmp[ONE] = true
iscmp[ir.OLT] = true
iscmp[ir.OGT] = true
iscmp[ir.OGE] = true
iscmp[ir.OLE] = true
iscmp[ir.OEQ] = true
iscmp[ir.ONE] = true
maxintval[TINT8].SetString("0x7f")
minintval[TINT8].SetString("-0x80")
maxintval[TINT16].SetString("0x7fff")
minintval[TINT16].SetString("-0x8000")
maxintval[TINT32].SetString("0x7fffffff")
minintval[TINT32].SetString("-0x80000000")
maxintval[TINT64].SetString("0x7fffffffffffffff")
minintval[TINT64].SetString("-0x8000000000000000")
maxintval[TUINT8].SetString("0xff")
maxintval[TUINT16].SetString("0xffff")
maxintval[TUINT32].SetString("0xffffffff")
maxintval[TUINT64].SetString("0xffffffffffffffff")
// f is valid float if min < f < max. (min and max are not themselves valid.)
maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
minfltval[TFLOAT32].SetString("-33554431p103")
maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
minfltval[TFLOAT64].SetString("-18014398509481983p970")
maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
types.Types[TINTER] = types.New(TINTER) // empty interface
types.Types[types.TINTER] = types.New(types.TINTER) // empty interface
// simple aliases
simtype[TMAP] = TPTR
simtype[TCHAN] = TPTR
simtype[TFUNC] = TPTR
simtype[TUNSAFEPTR] = TPTR
simtype[types.TMAP] = types.TPTR
simtype[types.TCHAN] = types.TPTR
simtype[types.TFUNC] = types.TPTR
simtype[types.TUNSAFEPTR] = types.TPTR
slicePtrOffset = 0
sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr))
@ -350,31 +321,29 @@ func typeinit() {
// string is same as slice wo the cap
sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
dowidth(types.Types[TSTRING])
dowidth(types.Types[types.TSTRING])
dowidth(types.UntypedString)
}
func makeErrorInterface() *types.Type {
field := types.NewField()
field.Type = types.Types[TSTRING]
f := functypefield(fakeRecvField(), nil, []*types.Field{field})
sig := functypefield(fakeRecvField(), nil, []*types.Field{
types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]),
})
field = types.NewField()
field.Sym = lookup("Error")
field.Type = f
method := types.NewField(src.NoXPos, lookup("Error"), sig)
t := types.New(TINTER)
t.SetInterface([]*types.Field{field})
t := types.New(types.TINTER)
t.SetInterface([]*types.Field{method})
return t
}
func lexinit1() {
// error type
s := builtinpkg.Lookup("error")
s := ir.BuiltinPkg.Lookup("error")
types.Errortype = makeErrorInterface()
types.Errortype.Sym = s
types.Errortype.Orig = makeErrorInterface()
s.Def = asTypesNode(typenod(types.Errortype))
s.Def = typenod(types.Errortype)
dowidth(types.Errortype)
// We create separate byte and rune types for better error messages
@ -386,24 +355,24 @@ func lexinit1() {
// type aliases, albeit at the cost of having to deal with it everywhere).
// byte alias
s = builtinpkg.Lookup("byte")
types.Bytetype = types.New(TUINT8)
s = ir.BuiltinPkg.Lookup("byte")
types.Bytetype = types.New(types.TUINT8)
types.Bytetype.Sym = s
s.Def = asTypesNode(typenod(types.Bytetype))
asNode(s.Def).Name = new(Name)
s.Def = typenod(types.Bytetype)
ir.AsNode(s.Def).SetName(new(ir.Name))
dowidth(types.Bytetype)
// rune alias
s = builtinpkg.Lookup("rune")
types.Runetype = types.New(TINT32)
s = ir.BuiltinPkg.Lookup("rune")
types.Runetype = types.New(types.TINT32)
types.Runetype.Sym = s
s.Def = asTypesNode(typenod(types.Runetype))
asNode(s.Def).Name = new(Name)
s.Def = typenod(types.Runetype)
ir.AsNode(s.Def).SetName(new(ir.Name))
dowidth(types.Runetype)
// backend-dependent builtin types (e.g. int).
for _, s := range &typedefs {
s1 := builtinpkg.Lookup(s.name)
s1 := ir.BuiltinPkg.Lookup(s.name)
sameas := s.sameas32
if Widthptr == 8 {
@ -411,17 +380,13 @@ func lexinit1() {
}
simtype[s.etype] = sameas
minfltval[s.etype] = minfltval[sameas]
maxfltval[s.etype] = maxfltval[sameas]
minintval[s.etype] = minintval[sameas]
maxintval[s.etype] = maxintval[sameas]
t := types.New(s.etype)
t.Sym = s1
types.Types[s.etype] = t
s1.Def = asTypesNode(typenod(t))
asNode(s1.Def).Name = new(Name)
s1.Origpkg = builtinpkg
s1.Def = typenod(t)
ir.AsNode(s1.Def).SetName(new(ir.Name))
s1.Origpkg = ir.BuiltinPkg
dowidth(t)
}
@ -433,7 +398,7 @@ func finishUniverse() {
// that we silently skip symbols that are already declared in the
// package block rather than emitting a redeclared symbol error.
for _, s := range builtinpkg.Syms {
for _, s := range ir.BuiltinPkg.Syms {
if s.Def == nil {
continue
}
@ -446,8 +411,8 @@ func finishUniverse() {
s1.Block = s.Block
}
nodfp = newname(lookup(".fp"))
nodfp.Type = types.Types[TINT32]
nodfp.SetClass(PPARAM)
nodfp.Name.SetUsed(true)
nodfp = NewName(lookup(".fp"))
nodfp.SetType(types.Types[types.TINT32])
nodfp.SetClass(ir.PPARAM)
nodfp.Name().SetUsed(true)
}

View file

@ -4,73 +4,78 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
)
// evalunsafe evaluates a package unsafe operation and returns the result.
func evalunsafe(n *Node) int64 {
switch n.Op {
case OALIGNOF, OSIZEOF:
n.Left = typecheck(n.Left, ctxExpr)
n.Left = defaultlit(n.Left, nil)
tr := n.Left.Type
func evalunsafe(n ir.Node) int64 {
switch n.Op() {
case ir.OALIGNOF, ir.OSIZEOF:
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(defaultlit(n.Left(), nil))
tr := n.Left().Type()
if tr == nil {
return 0
}
dowidth(tr)
if n.Op == OALIGNOF {
if n.Op() == ir.OALIGNOF {
return int64(tr.Align)
}
return tr.Width
case OOFFSETOF:
case ir.OOFFSETOF:
// must be a selector.
if n.Left.Op != OXDOT {
yyerror("invalid expression %v", n)
if n.Left().Op() != ir.OXDOT {
base.Errorf("invalid expression %v", n)
return 0
}
// Remember base of selector to find it back after dot insertion.
// Since r->left may be mutated by typechecking, check it explicitly
// first to track it correctly.
n.Left.Left = typecheck(n.Left.Left, ctxExpr)
base := n.Left.Left
n.Left().SetLeft(typecheck(n.Left().Left(), ctxExpr))
sbase := n.Left().Left()
n.Left = typecheck(n.Left, ctxExpr)
if n.Left.Type == nil {
n.SetLeft(typecheck(n.Left(), ctxExpr))
if n.Left().Type() == nil {
return 0
}
switch n.Left.Op {
case ODOT, ODOTPTR:
switch n.Left().Op() {
case ir.ODOT, ir.ODOTPTR:
break
case OCALLPART:
yyerror("invalid expression %v: argument is a method value", n)
case ir.OCALLPART:
base.Errorf("invalid expression %v: argument is a method value", n)
return 0
default:
yyerror("invalid expression %v", n)
base.Errorf("invalid expression %v", n)
return 0
}
// Sum offsets for dots until we reach base.
// Sum offsets for dots until we reach sbase.
var v int64
for r := n.Left; r != base; r = r.Left {
switch r.Op {
case ODOTPTR:
for r := n.Left(); r != sbase; r = r.Left() {
switch r.Op() {
case ir.ODOTPTR:
// For Offsetof(s.f), s may itself be a pointer,
// but accessing f must not otherwise involve
// indirection via embedded pointer types.
if r.Left != base {
yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left)
if r.Left() != sbase {
base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left())
return 0
}
fallthrough
case ODOT:
v += r.Xoffset
case ir.ODOT:
v += r.Offset()
default:
Dump("unsafenmagic", n.Left)
Fatalf("impossible %#v node after dot insertion", r.Op)
ir.Dump("unsafenmagic", n.Left())
base.Fatalf("impossible %#v node after dot insertion", r.Op())
}
}
return v
}
Fatalf("unexpected op %v", n.Op)
base.Fatalf("unexpected op %v", n.Op())
return 0
}

View file

@ -8,59 +8,35 @@ import (
"os"
"runtime"
"runtime/pprof"
"cmd/compile/internal/base"
)
// Line returns n's position as a string. If n has been inlined,
// it uses the outermost position where n has been inlined.
func (n *Node) Line() string {
return linestr(n.Pos)
}
var atExitFuncs []func()
func atExit(f func()) {
atExitFuncs = append(atExitFuncs, f)
}
func Exit(code int) {
for i := len(atExitFuncs) - 1; i >= 0; i-- {
f := atExitFuncs[i]
atExitFuncs = atExitFuncs[:i]
f()
}
os.Exit(code)
}
var (
blockprofile string
cpuprofile string
memprofile string
memprofilerate int64
traceprofile string
traceHandler func(string)
mutexprofile string
)
func startProfile() {
if cpuprofile != "" {
f, err := os.Create(cpuprofile)
if base.Flag.CPUProfile != "" {
f, err := os.Create(base.Flag.CPUProfile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
atExit(pprof.StopCPUProfile)
base.AtExit(pprof.StopCPUProfile)
}
if memprofile != "" {
if base.Flag.MemProfile != "" {
if memprofilerate != 0 {
runtime.MemProfileRate = int(memprofilerate)
}
f, err := os.Create(memprofile)
f, err := os.Create(base.Flag.MemProfile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
atExit(func() {
base.AtExit(func() {
// Profile all outstanding allocations.
runtime.GC()
// compilebench parses the memory profile to extract memstats,
@ -68,36 +44,36 @@ func startProfile() {
// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
const writeLegacyFormat = 1
if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
})
} else {
// Not doing memory profiling; disable it entirely.
runtime.MemProfileRate = 0
}
if blockprofile != "" {
f, err := os.Create(blockprofile)
if base.Flag.BlockProfile != "" {
f, err := os.Create(base.Flag.BlockProfile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
runtime.SetBlockProfileRate(1)
atExit(func() {
base.AtExit(func() {
pprof.Lookup("block").WriteTo(f, 0)
f.Close()
})
}
if mutexprofile != "" {
f, err := os.Create(mutexprofile)
if base.Flag.MutexProfile != "" {
f, err := os.Create(base.Flag.MutexProfile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
startMutexProfiling()
atExit(func() {
base.AtExit(func() {
pprof.Lookup("mutex").WriteTo(f, 0)
f.Close()
})
}
if traceprofile != "" && traceHandler != nil {
traceHandler(traceprofile)
if base.Flag.TraceProfile != "" && traceHandler != nil {
traceHandler(base.Flag.TraceProfile)
}
}

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ir
type bitset8 uint8

View file

@ -1,6 +1,6 @@
// Code generated by "stringer -type=Class"; DO NOT EDIT.
package gc
package ir
import "strconv"

View file

@ -6,21 +6,23 @@
// for debugging purposes. The code is customized for Node graphs
// and may be used for an alternative view of the node structure.
package gc
package ir
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"io"
"os"
"reflect"
"regexp"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// dump is like fdump but prints to stderr.
func dump(root interface{}, filter string, depth int) {
fdump(os.Stderr, root, filter, depth)
func DumpAny(root interface{}, filter string, depth int) {
FDumpAny(os.Stderr, root, filter, depth)
}
// fdump prints the structure of a rooted data structure
@ -40,7 +42,7 @@ func dump(root interface{}, filter string, depth int) {
// rather than their type; struct fields with zero values or
// non-matching field names are omitted, and "…" means recursion
// depth has been reached or struct fields have been omitted.
func fdump(w io.Writer, root interface{}, filter string, depth int) {
func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
if root == nil {
fmt.Fprintln(w, "nil")
return
@ -146,11 +148,8 @@ func (p *dumper) dump(x reflect.Value, depth int) {
x = reflect.ValueOf(v.Slice())
case src.XPos:
p.printf("%s", linestr(v))
p.printf("%s", base.FmtPos(v))
return
case *types.Node:
x = reflect.ValueOf(asNode(v))
}
switch x.Kind() {
@ -201,9 +200,9 @@ func (p *dumper) dump(x reflect.Value, depth int) {
typ := x.Type()
isNode := false
if n, ok := x.Interface().(Node); ok {
if n, ok := x.Interface().(node); ok {
isNode = true
p.printf("%s %s {", n.Op.String(), p.addr(x))
p.printf("%s %s {", n.op.String(), p.addr(x))
} else {
p.printf("%s {", typ)
}

View file

@ -0,0 +1,12 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import "cmd/compile/internal/types"
var LocalPkg *types.Pkg // package being compiled
// builtinpkg is a fake package that declares the universe block.
var BuiltinPkg *types.Pkg

View file

@ -0,0 +1,177 @@
// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
package ir
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[OXXX-0]
_ = x[ONAME-1]
_ = x[ONONAME-2]
_ = x[OTYPE-3]
_ = x[OPACK-4]
_ = x[OLITERAL-5]
_ = x[ONIL-6]
_ = x[OADD-7]
_ = x[OSUB-8]
_ = x[OOR-9]
_ = x[OXOR-10]
_ = x[OADDSTR-11]
_ = x[OADDR-12]
_ = x[OANDAND-13]
_ = x[OAPPEND-14]
_ = x[OBYTES2STR-15]
_ = x[OBYTES2STRTMP-16]
_ = x[ORUNES2STR-17]
_ = x[OSTR2BYTES-18]
_ = x[OSTR2BYTESTMP-19]
_ = x[OSTR2RUNES-20]
_ = x[OAS-21]
_ = x[OAS2-22]
_ = x[OAS2DOTTYPE-23]
_ = x[OAS2FUNC-24]
_ = x[OAS2MAPR-25]
_ = x[OAS2RECV-26]
_ = x[OASOP-27]
_ = x[OCALL-28]
_ = x[OCALLFUNC-29]
_ = x[OCALLMETH-30]
_ = x[OCALLINTER-31]
_ = x[OCALLPART-32]
_ = x[OCAP-33]
_ = x[OCLOSE-34]
_ = x[OCLOSURE-35]
_ = x[OCOMPLIT-36]
_ = x[OMAPLIT-37]
_ = x[OSTRUCTLIT-38]
_ = x[OARRAYLIT-39]
_ = x[OSLICELIT-40]
_ = x[OPTRLIT-41]
_ = x[OCONV-42]
_ = x[OCONVIFACE-43]
_ = x[OCONVNOP-44]
_ = x[OCOPY-45]
_ = x[ODCL-46]
_ = x[ODCLFUNC-47]
_ = x[ODCLFIELD-48]
_ = x[ODCLCONST-49]
_ = x[ODCLTYPE-50]
_ = x[ODELETE-51]
_ = x[ODOT-52]
_ = x[ODOTPTR-53]
_ = x[ODOTMETH-54]
_ = x[ODOTINTER-55]
_ = x[OXDOT-56]
_ = x[ODOTTYPE-57]
_ = x[ODOTTYPE2-58]
_ = x[OEQ-59]
_ = x[ONE-60]
_ = x[OLT-61]
_ = x[OLE-62]
_ = x[OGE-63]
_ = x[OGT-64]
_ = x[ODEREF-65]
_ = x[OINDEX-66]
_ = x[OINDEXMAP-67]
_ = x[OKEY-68]
_ = x[OSTRUCTKEY-69]
_ = x[OLEN-70]
_ = x[OMAKE-71]
_ = x[OMAKECHAN-72]
_ = x[OMAKEMAP-73]
_ = x[OMAKESLICE-74]
_ = x[OMAKESLICECOPY-75]
_ = x[OMUL-76]
_ = x[ODIV-77]
_ = x[OMOD-78]
_ = x[OLSH-79]
_ = x[ORSH-80]
_ = x[OAND-81]
_ = x[OANDNOT-82]
_ = x[ONEW-83]
_ = x[ONEWOBJ-84]
_ = x[ONOT-85]
_ = x[OBITNOT-86]
_ = x[OPLUS-87]
_ = x[ONEG-88]
_ = x[OOROR-89]
_ = x[OPANIC-90]
_ = x[OPRINT-91]
_ = x[OPRINTN-92]
_ = x[OPAREN-93]
_ = x[OSEND-94]
_ = x[OSLICE-95]
_ = x[OSLICEARR-96]
_ = x[OSLICESTR-97]
_ = x[OSLICE3-98]
_ = x[OSLICE3ARR-99]
_ = x[OSLICEHEADER-100]
_ = x[ORECOVER-101]
_ = x[ORECV-102]
_ = x[ORUNESTR-103]
_ = x[OSELRECV-104]
_ = x[OSELRECV2-105]
_ = x[OIOTA-106]
_ = x[OREAL-107]
_ = x[OIMAG-108]
_ = x[OCOMPLEX-109]
_ = x[OALIGNOF-110]
_ = x[OOFFSETOF-111]
_ = x[OSIZEOF-112]
_ = x[OMETHEXPR-113]
_ = x[OBLOCK-114]
_ = x[OBREAK-115]
_ = x[OCASE-116]
_ = x[OCONTINUE-117]
_ = x[ODEFER-118]
_ = x[OEMPTY-119]
_ = x[OFALL-120]
_ = x[OFOR-121]
_ = x[OFORUNTIL-122]
_ = x[OGOTO-123]
_ = x[OIF-124]
_ = x[OLABEL-125]
_ = x[OGO-126]
_ = x[ORANGE-127]
_ = x[ORETURN-128]
_ = x[OSELECT-129]
_ = x[OSWITCH-130]
_ = x[OTYPESW-131]
_ = x[OTCHAN-132]
_ = x[OTMAP-133]
_ = x[OTSTRUCT-134]
_ = x[OTINTER-135]
_ = x[OTFUNC-136]
_ = x[OTARRAY-137]
_ = x[ODDD-138]
_ = x[OINLCALL-139]
_ = x[OEFACE-140]
_ = x[OITAB-141]
_ = x[OIDATA-142]
_ = x[OSPTR-143]
_ = x[OCLOSUREVAR-144]
_ = x[OCFUNC-145]
_ = x[OCHECKNIL-146]
_ = x[OVARDEF-147]
_ = x[OVARKILL-148]
_ = x[OVARLIVE-149]
_ = x[ORESULT-150]
_ = x[OINLMARK-151]
_ = x[ORETJMP-152]
_ = x[OGETG-153]
_ = x[OEND-154]
}
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 783, 790, 795, 799, 804, 808, 818, 823, 831, 837, 844, 851, 857, 864, 870, 874, 877}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Op_name[_Op_index[i]:_Op_index[i+1]]
}

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ir
import (
"reflect"
@ -20,10 +20,10 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
{Func{}, 124, 224},
{Name{}, 32, 56},
{Param{}, 24, 48},
{Node{}, 76, 128},
{Func{}, 152, 280},
{Name{}, 44, 80},
{Param{}, 44, 88},
{node{}, 88, 152},
}
for _, tt := range tests {

View file

@ -0,0 +1,120 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"go/constant"
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
)
func ConstType(n Node) constant.Kind {
if n == nil || n.Op() != OLITERAL {
return constant.Unknown
}
return n.Val().Kind()
}
// ValueInterface returns the constant value stored in n as an interface{}.
// It returns int64s for ints and runes, float64s for floats,
// and complex128s for complex values.
func ConstValue(n Node) interface{} {
switch v := n.Val(); v.Kind() {
default:
base.Fatalf("unexpected constant: %v", v)
panic("unreachable")
case constant.Bool:
return constant.BoolVal(v)
case constant.String:
return constant.StringVal(v)
case constant.Int:
return Int64Val(n.Type(), v)
case constant.Float:
return Float64Val(v)
case constant.Complex:
return complex(Float64Val(constant.Real(v)), Float64Val(constant.Imag(v)))
}
}
// int64Val returns v converted to int64.
// Note: if t is uint64, very large values will be converted to negative int64.
func Int64Val(t *types.Type, v constant.Value) int64 {
if t.IsUnsigned() {
if x, ok := constant.Uint64Val(v); ok {
return int64(x)
}
} else {
if x, ok := constant.Int64Val(v); ok {
return x
}
}
base.Fatalf("%v out of range for %v", v, t)
panic("unreachable")
}
func Float64Val(v constant.Value) float64 {
if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) {
return x + 0 // avoid -0 (should not be needed, but be conservative)
}
base.Fatalf("bad float64 value: %v", v)
panic("unreachable")
}
func AssertValidTypeForConst(t *types.Type, v constant.Value) {
if !ValidTypeForConst(t, v) {
base.Fatalf("%v does not represent %v", t, v)
}
}
func ValidTypeForConst(t *types.Type, v constant.Value) bool {
switch v.Kind() {
case constant.Unknown:
return OKForConst[t.Etype]
case constant.Bool:
return t.IsBoolean()
case constant.String:
return t.IsString()
case constant.Int:
return t.IsInteger()
case constant.Float:
return t.IsFloat()
case constant.Complex:
return t.IsComplex()
}
base.Fatalf("unexpected constant kind: %v", v)
panic("unreachable")
}
// nodlit returns a new untyped constant with value v.
func NewLiteral(v constant.Value) Node {
n := Nod(OLITERAL, nil, nil)
if k := v.Kind(); k != constant.Unknown {
n.SetType(idealType(k))
n.SetVal(v)
}
return n
}
func idealType(ct constant.Kind) *types.Type {
switch ct {
case constant.String:
return types.UntypedString
case constant.Bool:
return types.UntypedBool
case constant.Int:
return types.UntypedInt
case constant.Float:
return types.UntypedFloat
case constant.Complex:
return types.UntypedComplex
}
base.Fatalf("unexpected Ctype: %v", ct)
return nil
}
var OKForConst [types.NTYPE]bool

View file

@ -5,6 +5,7 @@
package mips
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/mips"
@ -18,7 +19,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else {
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
@ -28,7 +29,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1
// BNE r1, r2, loop
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1

View file

@ -7,7 +7,9 @@ package mips
import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@ -287,7 +289,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
case *gc.Node:
case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@ -766,8 +768,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpMIPSFPFlagTrue,
ssa.OpMIPSFPFlagFalse:
@ -796,7 +798,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()

View file

@ -7,7 +7,9 @@ package mips64
import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@ -261,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
case *gc.Node:
case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@ -724,8 +726,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpMIPS64FPFlagTrue,
ssa.OpMIPS64FPFlagFalse:
@ -757,7 +759,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()

View file

@ -5,6 +5,7 @@
package ppc64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
@ -16,17 +17,17 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
@ -66,7 +67,7 @@ func ginsnopdefer(pp *gc.Progs) *obj.Prog {
// on ppc64 in both shared and non-shared modes.
ginsnop(pp)
if gc.Ctxt.Flag_shared {
if base.Ctxt.Flag_shared {
p := pp.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_MEM
p.From.Offset = 24

View file

@ -5,7 +5,9 @@
package ppc64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@ -473,7 +475,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -750,7 +752,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
}
case *obj.LSym, *gc.Node:
case *obj.LSym, ir.Node:
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
@ -1784,7 +1786,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Insert a hint this is not a subroutine return.
pp.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 1})
if gc.Ctxt.Flag_shared {
if base.Ctxt.Flag_shared {
// When compiling Go into PIC, the function we just
// called via pointer might have been implemented in
// a separate module and so overwritten the TOC
@ -1852,8 +1854,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
// These should be resolved by rules and not make it here.

View file

@ -5,6 +5,7 @@
package riscv64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
@ -16,7 +17,7 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
}
// Adjust the frame to account for LR.
off += gc.Ctxt.FixedFrameSize()
off += base.Ctxt.FixedFrameSize()
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {

View file

@ -5,7 +5,9 @@
package riscv64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@ -91,7 +93,7 @@ func loadByType(t *types.Type) obj.As {
case 8:
return riscv.AMOVD
default:
gc.Fatalf("unknown float width for load %d in type %v", width, t)
base.Fatalf("unknown float width for load %d in type %v", width, t)
return 0
}
}
@ -118,7 +120,7 @@ func loadByType(t *types.Type) obj.As {
case 8:
return riscv.AMOV
default:
gc.Fatalf("unknown width for load %d in type %v", width, t)
base.Fatalf("unknown width for load %d in type %v", width, t)
return 0
}
}
@ -134,7 +136,7 @@ func storeByType(t *types.Type) obj.As {
case 8:
return riscv.AMOVD
default:
gc.Fatalf("unknown float width for store %d in type %v", width, t)
base.Fatalf("unknown float width for store %d in type %v", width, t)
return 0
}
}
@ -149,7 +151,7 @@ func storeByType(t *types.Type) obj.As {
case 8:
return riscv.AMOV
default:
gc.Fatalf("unknown width for store %d in type %v", width, t)
base.Fatalf("unknown width for store %d in type %v", width, t)
return 0
}
}
@ -322,7 +324,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
case *gc.Node:
case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@ -586,8 +588,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = riscv.REG_ZERO
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpRISCV64LoweredGetClosurePtr:
@ -598,7 +600,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(riscv.AMOV)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()

View file

@ -5,6 +5,7 @@
package s390x
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
@ -23,7 +24,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
}
// Adjust the frame to account for LR.
off += gc.Ctxt.FixedFrameSize()
off += base.Ctxt.FixedFrameSize()
reg := int16(s390x.REGSP)
// If the off cannot fit in a 12-bit unsigned displacement then we

View file

@ -7,6 +7,7 @@ package s390x
import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
@ -573,7 +574,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -642,8 +643,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpS390XMVC:
vo := v.AuxValAndOff()

View file

@ -5,6 +5,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
@ -138,7 +139,7 @@ type Frontend interface {
// Auto returns a Node for an auto variable of the given type.
// The SSA compiler uses this function to allocate space for spills.
Auto(src.XPos, *types.Type) GCNode
Auto(src.XPos, *types.Type) ir.Node
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
@ -178,24 +179,6 @@ type Frontend interface {
MyImportPath() string
}
// interface used to hold a *gc.Node (a stack variable).
// We'd use *gc.Node directly but that would lead to an import cycle.
type GCNode interface {
Typ() *types.Type
String() string
IsSynthetic() bool
IsAutoTmp() bool
StorageClass() StorageClass
}
type StorageClass uint8
const (
ClassAuto StorageClass = iota // local stack variable
ClassParam // argument
ClassParamOut // return value
)
const go116lateCallExpansion = true
// LateCallExpansionEnabledWithin returns true if late call expansion should be tested

View file

@ -5,6 +5,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
)
@ -136,9 +137,9 @@ func dse(f *Func) {
// reaches stores then we delete all the stores. The other operations will then
// be eliminated by the dead code elimination pass.
func elimDeadAutosGeneric(f *Func) {
addr := make(map[*Value]GCNode) // values that the address of the auto reaches
elim := make(map[*Value]GCNode) // values that could be eliminated if the auto is
used := make(map[GCNode]bool) // used autos that must be kept
addr := make(map[*Value]ir.Node) // values that the address of the auto reaches
elim := make(map[*Value]ir.Node) // values that could be eliminated if the auto is
used := make(map[ir.Node]bool) // used autos that must be kept
// visit the value and report whether any of the maps are updated
visit := func(v *Value) (changed bool) {
@ -146,8 +147,8 @@ func elimDeadAutosGeneric(f *Func) {
switch v.Op {
case OpAddr, OpLocalAddr:
// Propagate the address if it points to an auto.
n, ok := v.Aux.(GCNode)
if !ok || n.StorageClass() != ClassAuto {
n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
if addr[v] == nil {
@ -157,8 +158,8 @@ func elimDeadAutosGeneric(f *Func) {
return
case OpVarDef, OpVarKill:
// v should be eliminated if we eliminate the auto.
n, ok := v.Aux.(GCNode)
if !ok || n.StorageClass() != ClassAuto {
n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
if elim[v] == nil {
@ -173,8 +174,8 @@ func elimDeadAutosGeneric(f *Func) {
// for open-coded defers from being removed (since they
// may not be used by the inline code, but will be used by
// panic processing).
n, ok := v.Aux.(GCNode)
if !ok || n.StorageClass() != ClassAuto {
n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
if !used[n] {
@ -221,7 +222,7 @@ func elimDeadAutosGeneric(f *Func) {
}
// Propagate any auto addresses through v.
node := GCNode(nil)
var node ir.Node
for _, a := range args {
if n, ok := addr[a]; ok && !used[n] {
if node == nil {
@ -298,15 +299,15 @@ func elimUnreadAutos(f *Func) {
// Loop over all ops that affect autos taking note of which
// autos we need and also stores that we might be able to
// eliminate.
seen := make(map[GCNode]bool)
seen := make(map[ir.Node]bool)
var stores []*Value
for _, b := range f.Blocks {
for _, v := range b.Values {
n, ok := v.Aux.(GCNode)
n, ok := v.Aux.(ir.Node)
if !ok {
continue
}
if n.StorageClass() != ClassAuto {
if n.Class() != ir.PAUTO {
continue
}
@ -334,7 +335,7 @@ func elimUnreadAutos(f *Func) {
// Eliminate stores to unread autos.
for _, store := range stores {
n, _ := store.Aux.(GCNode)
n, _ := store.Aux.(ir.Node)
if seen[n] {
continue
}

View file

@ -5,6 +5,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"encoding/hex"
@ -24,7 +25,7 @@ type FuncDebug struct {
// Slots is all the slots used in the debug info, indexed by their SlotID.
Slots []LocalSlot
// The user variables, indexed by VarID.
Vars []GCNode
Vars []ir.Node
// The slots that make up each variable, indexed by VarID.
VarSlots [][]SlotID
// The location list data, indexed by VarID. Must be processed by PutLocationList.
@ -165,7 +166,7 @@ func (s *debugState) logf(msg string, args ...interface{}) {
type debugState struct {
// See FuncDebug.
slots []LocalSlot
vars []GCNode
vars []ir.Node
varSlots [][]SlotID
lists [][]byte
@ -189,7 +190,7 @@ type debugState struct {
// The pending location list entry for each user variable, indexed by VarID.
pendingEntries []pendingEntry
varParts map[GCNode][]SlotID
varParts map[ir.Node][]SlotID
blockDebug []BlockDebug
pendingSlotLocs []VarLoc
liveSlots []liveSlot
@ -346,7 +347,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
}
if state.varParts == nil {
state.varParts = make(map[GCNode][]SlotID)
state.varParts = make(map[ir.Node][]SlotID)
} else {
for n := range state.varParts {
delete(state.varParts, n)
@ -360,7 +361,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
state.vars = state.vars[:0]
for i, slot := range f.Names {
state.slots = append(state.slots, slot)
if slot.N.IsSynthetic() {
if ir.IsSynthetic(slot.N) {
continue
}
@ -379,8 +380,8 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
for _, b := range f.Blocks {
for _, v := range b.Values {
if v.Op == OpVarDef || v.Op == OpVarKill {
n := v.Aux.(GCNode)
if n.IsSynthetic() {
n := v.Aux.(ir.Node)
if ir.IsSynthetic(n) {
continue
}
@ -425,7 +426,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
state.initializeCache(f, len(state.varParts), len(state.slots))
for i, slot := range f.Names {
if slot.N.IsSynthetic() {
if ir.IsSynthetic(slot.N) {
continue
}
for _, value := range f.NamedValues[slot] {
@ -717,8 +718,8 @@ func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register)
switch {
case v.Op == OpVarDef, v.Op == OpVarKill:
n := v.Aux.(GCNode)
if n.IsSynthetic() {
n := v.Aux.(ir.Node)
if ir.IsSynthetic(n) {
break
}

View file

@ -247,7 +247,7 @@ func expandCalls(f *Func) {
// i.e., the struct select is generated and remains in because it is not applied to an actual structure.
// The OpLoad was created to load the single field of the IData
// This case removes that StructSelect.
if leafType != selector.Type {
if leafType != selector.Type && !selector.Type.IsEmptyInterface() { // empty interface for #42727
f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
}
leaf.copyOf(selector)

View file

@ -5,6 +5,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
@ -36,10 +37,10 @@ func testConfigArch(tb testing.TB, arch string) *Conf {
tb.Fatalf("unknown arch %s", arch)
}
if ctxt.Arch.PtrSize != 8 {
tb.Fatal("dummyTypes is 64-bit only")
tb.Fatal("testTypes is 64-bit only")
}
c := &Conf{
config: NewConfig(arch, dummyTypes, ctxt, true),
config: NewConfig(arch, testTypes, ctxt, true),
tb: tb,
}
return c
@ -53,108 +54,85 @@ type Conf struct {
func (c *Conf) Frontend() Frontend {
if c.fe == nil {
c.fe = DummyFrontend{t: c.tb, ctxt: c.config.ctxt}
c.fe = TestFrontend{t: c.tb, ctxt: c.config.ctxt}
}
return c.fe
}
// DummyFrontend is a test-only frontend.
// TestFrontend is a test-only frontend.
// It assumes 64 bit integers and pointers.
type DummyFrontend struct {
type TestFrontend struct {
t testing.TB
ctxt *obj.Link
}
type DummyAuto struct {
t *types.Type
s string
}
func (d *DummyAuto) Typ() *types.Type {
return d.t
}
func (d *DummyAuto) String() string {
return d.s
}
func (d *DummyAuto) StorageClass() StorageClass {
return ClassAuto
}
func (d *DummyAuto) IsSynthetic() bool {
return false
}
func (d *DummyAuto) IsAutoTmp() bool {
return true
}
func (DummyFrontend) StringData(s string) *obj.LSym {
func (TestFrontend) StringData(s string) *obj.LSym {
return nil
}
func (DummyFrontend) Auto(pos src.XPos, t *types.Type) GCNode {
return &DummyAuto{t: t, s: "aDummyAuto"}
func (TestFrontend) Auto(pos src.XPos, t *types.Type) ir.Node {
n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
n.SetClass(ir.PAUTO)
return n
}
func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8}
func (d TestFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8}
}
func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off + 8}
func (d TestFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off + 8}
}
func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
func (d TestFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
return LocalSlot{N: s.N, Type: s.Type.Elem().PtrTo(), Off: s.Off},
LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8},
LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 16}
LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8},
LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 16}
}
func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
func (d TestFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.Size() == 16 {
return LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off + 8}
return LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off + 8}
}
return LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off + 4}
return LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off + 4}
}
func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
func (d TestFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.IsSigned() {
return LocalSlot{N: s.N, Type: dummyTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
return LocalSlot{N: s.N, Type: testTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off}
}
return LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
return LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off}
}
func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
func (d TestFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.FieldType(i), Off: s.Off + s.Type.FieldOff(i)}
}
func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
func (d TestFrontend) SplitArray(s LocalSlot) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off}
}
func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
return LocalSlot{N: parent.N, Type: t, Off: offset}
}
func (DummyFrontend) Line(_ src.XPos) string {
func (TestFrontend) Line(_ src.XPos) string {
return "unknown.go:0"
}
func (DummyFrontend) AllocFrame(f *Func) {
func (TestFrontend) AllocFrame(f *Func) {
}
func (d DummyFrontend) Syslook(s string) *obj.LSym {
func (d TestFrontend) Syslook(s string) *obj.LSym {
return d.ctxt.Lookup(s)
}
func (DummyFrontend) UseWriteBarrier() bool {
func (TestFrontend) UseWriteBarrier() bool {
return true // only writebarrier_test cares
}
func (DummyFrontend) SetWBPos(pos src.XPos) {
func (TestFrontend) SetWBPos(pos src.XPos) {
}
func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
func (d DummyFrontend) Log() bool { return true }
func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
func (d TestFrontend) Log() bool { return true }
func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
func (d DummyFrontend) Debug_checknil() bool { return false }
func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
func (d TestFrontend) Debug_checknil() bool { return false }
func (d DummyFrontend) MyImportPath() string {
func (d TestFrontend) MyImportPath() string {
return "my/import/path"
}
var dummyTypes Types
var testTypes Types
func init() {
// Initialize just enough of the universe and the types package to make our tests function.
@ -198,12 +176,12 @@ func init() {
t.Align = uint8(typ.width)
types.Types[typ.et] = t
}
dummyTypes.SetTypPtrs()
testTypes.SetTypPtrs()
}
func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
func (d TestFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
func (d DummyFrontend) CanSSA(t *types.Type) bool {
// There are no un-SSAable types in dummy land.
func (d TestFrontend) CanSSA(t *types.Type) bool {
// There are no un-SSAable types in test land.
return true
}

View file

@ -5,6 +5,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"fmt"
)
@ -59,7 +60,7 @@ func (r *Register) GCNum() int16 {
// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
// parent = &{N: s, Type: string}
type LocalSlot struct {
N GCNode // an ONAME *gc.Node representing a stack location.
N ir.Node // an ONAME *gc.Node representing a stack location.
Type *types.Type // type of slot
Off int64 // offset of slot in N

View file

@ -5,6 +5,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/internal/objabi"
"cmd/internal/src"
)
@ -235,7 +236,7 @@ func nilcheckelim2(f *Func) {
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) {
if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(ir.Node).Type().HasPointers()) {
// These ops don't really change memory.
continue
// Note: OpVarDef requires that the defined variable not have pointers.

View file

@ -136,13 +136,13 @@ type posetNode struct {
// Most internal data structures are pre-allocated and flat, so for instance adding a
// new relation does not cause any allocation. For performance reasons,
// each node has only up to two outgoing edges (like a binary tree), so intermediate
// "dummy" nodes are required to represent more than two relations. For instance,
// "extra" nodes are required to represent more than two relations. For instance,
// to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the
// following DAG:
//
// A
// / \
// I dummy
// I extra
// / \
// J K
//
@ -223,7 +223,7 @@ func (po *poset) addchild(i1, i2 uint32, strict bool) {
po.setchr(i1, e2)
po.upush(undoSetChr, i1, 0)
} else {
// If n1 already has two children, add an intermediate dummy
// If n1 already has two children, add an intermediate extra
// node to record the relation correctly (without relating
// n2 to other existing nodes). Use a non-deterministic value
// to decide whether to append on the left or the right, to avoid
@ -231,27 +231,27 @@ func (po *poset) addchild(i1, i2 uint32, strict bool) {
//
// n1
// / \
// i1l dummy
// i1l extra
// / \
// i1r n2
//
dummy := po.newnode(nil)
extra := po.newnode(nil)
if (i1^i2)&1 != 0 { // non-deterministic
po.setchl(dummy, i1r)
po.setchr(dummy, e2)
po.setchr(i1, newedge(dummy, false))
po.setchl(extra, i1r)
po.setchr(extra, e2)
po.setchr(i1, newedge(extra, false))
po.upush(undoSetChr, i1, i1r)
} else {
po.setchl(dummy, i1l)
po.setchr(dummy, e2)
po.setchl(i1, newedge(dummy, false))
po.setchl(extra, i1l)
po.setchr(extra, e2)
po.setchl(i1, newedge(extra, false))
po.upush(undoSetChl, i1, i1l)
}
}
}
// newnode allocates a new node bound to SSA value n.
// If n is nil, this is a dummy node (= only used internally).
// If n is nil, this is an extra node (= only used internally).
func (po *poset) newnode(n *Value) uint32 {
i := po.lastidx + 1
po.lastidx++
@ -380,9 +380,9 @@ func (po *poset) newconst(n *Value) {
case higherptr != 0:
// Higher bound only. To record n < higher, we need
// a dummy root:
// an extra root:
//
// dummy
// extra
// / \
// root \
// / n
@ -395,11 +395,11 @@ func (po *poset) newconst(n *Value) {
if r2 != po.roots[0] { // all constants should be in root #0
panic("constant not in root #0")
}
dummy := po.newnode(nil)
po.changeroot(r2, dummy)
po.upush(undoChangeRoot, dummy, newedge(r2, false))
po.addchild(dummy, r2, false)
po.addchild(dummy, i, false)
extra := po.newnode(nil)
po.changeroot(r2, extra)
po.upush(undoChangeRoot, extra, newedge(r2, false))
po.addchild(extra, r2, false)
po.addchild(extra, i, false)
po.addchild(i, i2, true)
}
@ -612,7 +612,7 @@ func (po *poset) findroot(i uint32) uint32 {
panic("findroot didn't find any root")
}
// mergeroot merges two DAGs into one DAG by creating a new dummy root
// mergeroot merges two DAGs into one DAG by creating a new extra root
func (po *poset) mergeroot(r1, r2 uint32) uint32 {
// Root #0 is special as it contains all constants. Since mergeroot
// discards r2 as root and keeps r1, make sure that r2 is not root #0,
@ -1004,7 +1004,7 @@ func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
case !f1 && f2:
// n1 is not in any DAG but n2 is. If n2 is a root, we can put
// n1 in its place as a root; otherwise, we need to create a new
// dummy root to record the relation.
// extra root to record the relation.
i1 = po.newnode(n1)
if po.isroot(i2) {
@ -1020,17 +1020,17 @@ func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
// Re-parent as follows:
//
// dummy
// extra
// r / \
// \ ===> r i1
// i2 \ /
// i2
//
dummy := po.newnode(nil)
po.changeroot(r, dummy)
po.upush(undoChangeRoot, dummy, newedge(r, false))
po.addchild(dummy, r, false)
po.addchild(dummy, i1, false)
extra := po.newnode(nil)
po.changeroot(r, extra)
po.upush(undoChangeRoot, extra, newedge(r, false))
po.addchild(extra, r, false)
po.addchild(extra, i1, false)
po.addchild(i1, i2, strict)
case f1 && f2:

View file

@ -104,7 +104,7 @@
// If b3 is the primary predecessor of b2, then we use x3 in b2 and
// add a x4:CX->BX copy at the end of b4.
// But the definition of x3 doesn't dominate b2. We should really
// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep
// SSA form. For now, we ignore this problem as remaining in strict
// SSA form isn't needed after regalloc. We'll just leave the use
// of x3 not dominated by the definition of x3, and the CX->BX copy
@ -114,6 +114,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/objabi"
"cmd/internal/src"
@ -1248,7 +1249,7 @@ func (s *regAllocState) regalloc(f *Func) {
// This forces later liveness analysis to make the
// value live at this point.
v.SetArg(0, s.makeSpill(a, b))
} else if _, ok := a.Aux.(GCNode); ok && vi.rematerializeable {
} else if _, ok := a.Aux.(ir.Node); ok && vi.rematerializeable {
// Rematerializeable value with a gc.Node. This is the address of
// a stack object (e.g. an LEAQ). Keep the object live.
// Change it to VarLive, which is what plive expects for locals.

View file

@ -7,6 +7,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
@ -156,7 +157,7 @@ func (s *stackAllocState) stackalloc() {
if v.Aux == nil {
f.Fatalf("%s has nil Aux\n", v.LongString())
}
loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt}
loc := LocalSlot{N: v.Aux.(ir.Node), Type: v.Type, Off: v.AuxInt}
if f.pass.debug > stackDebug {
fmt.Printf("stackalloc %s to %s\n", v, loc)
}

View file

@ -13,7 +13,7 @@ func TestDump(t *testing.T) {
t.Skip("skipping test in short mode")
}
// provide a dummy error handler so parsing doesn't stop after first error
// provide a no-op error handler so parsing doesn't stop after first error
ast, err := ParseFile(*src_, func(error) {}, nil, CheckBranches)
if err != nil {
t.Error(err)

View file

@ -116,7 +116,7 @@ func (*decl) aDecl() {}
// All declarations belonging to the same group point to the same Group node.
type Group struct {
dummy int // not empty so we are guaranteed different Group instances
_ int // not empty so we are guaranteed different Group instances
}
// ----------------------------------------------------------------------------

View file

@ -18,7 +18,7 @@ func TestPrint(t *testing.T) {
t.Skip("skipping test in short mode")
}
// provide a dummy error handler so parsing doesn't stop after first error
// provide a no-op error handler so parsing doesn't stop after first error
ast, err := ParseFile(*src_, func(error) {}, nil, 0)
if err != nil {
t.Error(err)

View file

@ -15,7 +15,7 @@ var Block int32 // current block number
// restored once the block scope ends.
type dsym struct {
sym *Sym // sym == nil indicates stack mark
def *Node
def IRNode
block int32
lastlineno src.XPos // last declaration for diagnostic
}
@ -79,16 +79,16 @@ func IsDclstackValid() bool {
}
// PkgDef returns the definition associated with s at package scope.
func (s *Sym) PkgDef() *Node {
func (s *Sym) PkgDef() IRNode {
return *s.pkgDefPtr()
}
// SetPkgDef sets the definition associated with s at package scope.
func (s *Sym) SetPkgDef(n *Node) {
func (s *Sym) SetPkgDef(n IRNode) {
*s.pkgDefPtr() = n
}
func (s *Sym) pkgDefPtr() **Node {
func (s *Sym) pkgDefPtr() *IRNode {
// Look for outermost saved declaration, which must be the
// package scope definition, if present.
for _, d := range dclstack {

View file

@ -20,11 +20,11 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
{Sym{}, 52, 88},
{Type{}, 52, 88},
{Sym{}, 60, 104},
{Type{}, 56, 96},
{Map{}, 20, 40},
{Forward{}, 20, 32},
{Func{}, 32, 56},
{Func{}, 28, 48},
{Struct{}, 16, 32},
{Interface{}, 8, 16},
{Chan{}, 8, 16},

Some files were not shown because too many files have changed in this diff Show more