all: merge branch dev.regabi (d3cd4830ad) into master

This CL merges the dev.regabi branch to the master branch.

In the dev.regabi branch we have refactored the compiler, and laid
some preliminary work for enabling a register-based ABI (issue #40724),
including improved late call/return lowering, improved ABI wrapper
generation, reflect call prepared for the new ABI, and reserving
special registers in the internal ABI. The actual register-based ABI
has not been enabled for the moment. The ABI-related changes are behind
GOEXPERIMENT=regabi and currently off by default.

Updates #40724, #44222.
Fixes #44224.

Change-Id: Id5de9f734d14099267ab717167aaaeef31fdba70
This commit is contained in:
Cherry Zhang 2021-02-16 16:55:30 -05:00
commit 84825599dc
506 changed files with 51224 additions and 45066 deletions

View file

@ -109,6 +109,10 @@ func archX86(linkArch *obj.LinkArch) *Arch {
register["SB"] = RSB
register["FP"] = RFP
register["PC"] = RPC
if linkArch == &x86.Linkamd64 {
// Alias g to R14
register["g"] = x86.REGG
}
// Register prefix not used on this architecture.
instructions := make(map[string]obj.As)

View file

@ -259,6 +259,7 @@ var amd64OperandTests = []operandTest{
{"R15", "R15"},
{"R8", "R8"},
{"R9", "R9"},
{"g", "R14"},
{"SI", "SI"},
{"SP", "SP"},
{"X0", "X0"},

View file

@ -305,7 +305,7 @@ func (p *Parser) pseudo(word string, operands [][]lex.Token) bool {
// references and writes symabis information to w.
//
// The symabis format is documented at
// cmd/compile/internal/gc.readSymABIs.
// cmd/compile/internal/ssagen.ReadSymABIs.
func (p *Parser) symDefRef(w io.Writer, word string, operands [][]lex.Token) {
switch word {
case "TEXT":

View file

@ -1,599 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements TestFormats; a test that verifies
// format strings in the compiler (this directory and all
// subdirectories, recursively).
//
// TestFormats finds potential (Printf, etc.) format strings.
// If they are used in a call, the format verbs are verified
// based on the matching argument type against a precomputed
// map of valid formats (knownFormats). This map can be used to
// automatically rewrite format strings across all compiler
// files with the -r flag.
//
// The format map needs to be updated whenever a new (type,
// format) combination is found and the format verb is not
// 'v' or 'T' (as in "%v" or "%T"). To update the map auto-
// matically from the compiler source's use of format strings,
// use the -u flag. (Whether formats are valid for the values
// to be formatted must be verified manually, of course.)
//
// The -v flag prints out the names of all functions called
// with a format string, the names of files that were not
// processed, and any format rewrites made (with -r).
//
// Run as: go test -run Formats [-r][-u][-v]
//
// Known shortcomings:
// - indexed format strings ("%[2]s", etc.) are not supported
// (the test will fail)
// - format strings that are not simple string literals cannot
// be updated automatically
// (the test will fail with respective warnings)
// - format strings in _test packages outside the current
// package are not processed
// (the test will report those files)
//
package main_test
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/build"
"go/constant"
"go/format"
"go/importer"
"go/parser"
"go/token"
"go/types"
"internal/testenv"
"io"
"io/fs"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"unicode/utf8"
)
var (
rewrite = flag.Bool("r", false, "rewrite format strings")
update = flag.Bool("u", false, "update known formats")
)
// The following variables collect information across all processed files.
var (
fset = token.NewFileSet()
formatStrings = make(map[*ast.BasicLit]bool) // set of all potential format strings found
foundFormats = make(map[string]bool) // set of all formats found
callSites = make(map[*ast.CallExpr]*callSite) // map of all calls
)
// A File is a corresponding (filename, ast) pair.
type File struct {
name string
ast *ast.File
}
func TestFormats(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skip("Skipping in short mode")
}
testenv.MustHaveGoBuild(t) // more restrictive than necessary, but that's ok
// process all directories
filepath.WalkDir(".", func(path string, info fs.DirEntry, err error) error {
if info.IsDir() {
if info.Name() == "testdata" {
return filepath.SkipDir
}
importPath := filepath.Join("cmd/compile", path)
if ignoredPackages[filepath.ToSlash(importPath)] {
return filepath.SkipDir
}
pkg, err := build.Import(importPath, path, 0)
if err != nil {
if _, ok := err.(*build.NoGoError); ok {
return nil // nothing to do here
}
t.Fatal(err)
}
collectPkgFormats(t, pkg)
}
return nil
})
// test and rewrite formats
updatedFiles := make(map[string]File) // files that were rewritten
for _, p := range callSites {
// test current format literal and determine updated one
out := formatReplace(p.str, func(index int, in string) string {
if in == "*" {
return in // cannot rewrite '*' (as in "%*d")
}
// in != '*'
typ := p.types[index]
format := typ + " " + in // e.g., "*Node %n"
// check if format is known
out, known := knownFormats[format]
// record format if not yet found
_, found := foundFormats[format]
if !found {
foundFormats[format] = true
}
// report an error if the format is unknown and this is the first
// time we see it; ignore "%v" and "%T" which are always valid
if !known && !found && in != "%v" && in != "%T" {
t.Errorf("%s: unknown format %q for %s argument", posString(p.arg), in, typ)
}
if out == "" {
out = in
}
return out
})
// replace existing format literal if it changed
if out != p.str {
// we cannot replace the argument if it's not a string literal for now
// (e.g., it may be "foo" + "bar")
lit, ok := p.arg.(*ast.BasicLit)
if !ok {
delete(callSites, p.call) // treat as if we hadn't found this site
continue
}
if testing.Verbose() {
fmt.Printf("%s:\n\t- %q\n\t+ %q\n", posString(p.arg), p.str, out)
}
// find argument index of format argument
index := -1
for i, arg := range p.call.Args {
if p.arg == arg {
index = i
break
}
}
if index < 0 {
// we may have processed the same call site twice,
// but that shouldn't happen
panic("internal error: matching argument not found")
}
// replace literal
new := *lit // make a copy
new.Value = strconv.Quote(out) // this may introduce "-quotes where there were `-quotes
p.call.Args[index] = &new
updatedFiles[p.file.name] = p.file
}
}
// write dirty files back
var filesUpdated bool
if len(updatedFiles) > 0 && *rewrite {
for _, file := range updatedFiles {
var buf bytes.Buffer
if err := format.Node(&buf, fset, file.ast); err != nil {
t.Errorf("WARNING: gofmt %s failed: %v", file.name, err)
continue
}
if err := ioutil.WriteFile(file.name, buf.Bytes(), 0x666); err != nil {
t.Errorf("WARNING: writing %s failed: %v", file.name, err)
continue
}
fmt.Printf("updated %s\n", file.name)
filesUpdated = true
}
}
// report the names of all functions called with a format string
if len(callSites) > 0 && testing.Verbose() {
set := make(map[string]bool)
for _, p := range callSites {
set[nodeString(p.call.Fun)] = true
}
var list []string
for s := range set {
list = append(list, s)
}
fmt.Println("\nFunctions called with a format string")
writeList(os.Stdout, list)
}
// update formats
if len(foundFormats) > 0 && *update {
var list []string
for s := range foundFormats {
list = append(list, fmt.Sprintf("%q: \"\",", s))
}
var buf bytes.Buffer
buf.WriteString(knownFormatsHeader)
writeList(&buf, list)
buf.WriteString("}\n")
out, err := format.Source(buf.Bytes())
const outfile = "fmtmap_test.go"
if err != nil {
t.Errorf("WARNING: gofmt %s failed: %v", outfile, err)
out = buf.Bytes() // continue with unformatted source
}
if err = ioutil.WriteFile(outfile, out, 0644); err != nil {
t.Errorf("WARNING: updating format map failed: %v", err)
}
}
// check that knownFormats is up to date
if !*rewrite && !*update {
var mismatch bool
for s := range foundFormats {
if _, ok := knownFormats[s]; !ok {
mismatch = true
break
}
}
if !mismatch {
for s := range knownFormats {
if _, ok := foundFormats[s]; !ok {
mismatch = true
break
}
}
}
if mismatch {
t.Errorf("format map is out of date; run 'go test -u' to update and manually verify correctness of change'")
}
}
// all format strings of calls must be in the formatStrings set (self-verification)
for _, p := range callSites {
if lit, ok := p.arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
if formatStrings[lit] {
// ok
delete(formatStrings, lit)
} else {
// this should never happen
panic(fmt.Sprintf("internal error: format string not found (%s)", posString(lit)))
}
}
}
// if we have any strings left, we may need to update them manually
if len(formatStrings) > 0 && filesUpdated {
var list []string
for lit := range formatStrings {
list = append(list, fmt.Sprintf("%s: %s", posString(lit), nodeString(lit)))
}
fmt.Println("\nWARNING: Potentially missed format strings")
writeList(os.Stdout, list)
t.Fail()
}
fmt.Println()
}
// A callSite describes a function call that appears to contain
// a format string.
type callSite struct {
file File
call *ast.CallExpr // call containing the format string
arg ast.Expr // format argument (string literal or constant)
str string // unquoted format string
types []string // argument types
}
func collectPkgFormats(t *testing.T, pkg *build.Package) {
// collect all files
var filenames []string
filenames = append(filenames, pkg.GoFiles...)
filenames = append(filenames, pkg.CgoFiles...)
filenames = append(filenames, pkg.TestGoFiles...)
// TODO(gri) verify _test files outside package
for _, name := range pkg.XTestGoFiles {
// don't process this test itself
if name != "fmt_test.go" && testing.Verbose() {
fmt.Printf("WARNING: %s not processed\n", filepath.Join(pkg.Dir, name))
}
}
// make filenames relative to .
for i, name := range filenames {
filenames[i] = filepath.Join(pkg.Dir, name)
}
// parse all files
files := make([]*ast.File, len(filenames))
for i, filename := range filenames {
f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
if err != nil {
t.Fatal(err)
}
files[i] = f
}
// typecheck package
conf := types.Config{Importer: importer.Default()}
etypes := make(map[ast.Expr]types.TypeAndValue)
if _, err := conf.Check(pkg.ImportPath, fset, files, &types.Info{Types: etypes}); err != nil {
t.Fatal(err)
}
// collect all potential format strings (for extra verification later)
for _, file := range files {
ast.Inspect(file, func(n ast.Node) bool {
if s, ok := stringLit(n); ok && isFormat(s) {
formatStrings[n.(*ast.BasicLit)] = true
}
return true
})
}
// collect all formats/arguments of calls with format strings
for index, file := range files {
ast.Inspect(file, func(n ast.Node) bool {
if call, ok := n.(*ast.CallExpr); ok {
if ignoredFunctions[nodeString(call.Fun)] {
return true
}
// look for an arguments that might be a format string
for i, arg := range call.Args {
if s, ok := stringVal(etypes[arg]); ok && isFormat(s) {
// make sure we have enough arguments
n := numFormatArgs(s)
if i+1+n > len(call.Args) {
t.Errorf("%s: not enough format args (ignore %s?)", posString(call), nodeString(call.Fun))
break // ignore this call
}
// assume last n arguments are to be formatted;
// determine their types
argTypes := make([]string, n)
for i, arg := range call.Args[len(call.Args)-n:] {
if tv, ok := etypes[arg]; ok {
argTypes[i] = typeString(tv.Type)
}
}
// collect call site
if callSites[call] != nil {
panic("internal error: file processed twice?")
}
callSites[call] = &callSite{
file: File{filenames[index], file},
call: call,
arg: arg,
str: s,
types: argTypes,
}
break // at most one format per argument list
}
}
}
return true
})
}
}
// writeList writes list in sorted order to w.
func writeList(w io.Writer, list []string) {
sort.Strings(list)
for _, s := range list {
fmt.Fprintln(w, "\t", s)
}
}
// posString returns a string representation of n's position
// in the form filename:line:col: .
func posString(n ast.Node) string {
if n == nil {
return ""
}
return fset.Position(n.Pos()).String()
}
// nodeString returns a string representation of n.
func nodeString(n ast.Node) string {
var buf bytes.Buffer
if err := format.Node(&buf, fset, n); err != nil {
log.Fatal(err) // should always succeed
}
return buf.String()
}
// typeString returns a string representation of n.
func typeString(typ types.Type) string {
return filepath.ToSlash(typ.String())
}
// stringLit returns the unquoted string value and true if
// n represents a string literal; otherwise it returns ""
// and false.
func stringLit(n ast.Node) (string, bool) {
if lit, ok := n.(*ast.BasicLit); ok && lit.Kind == token.STRING {
s, err := strconv.Unquote(lit.Value)
if err != nil {
log.Fatal(err) // should not happen with correct ASTs
}
return s, true
}
return "", false
}
// stringVal returns the (unquoted) string value and true if
// tv is a string constant; otherwise it returns "" and false.
func stringVal(tv types.TypeAndValue) (string, bool) {
if tv.IsValue() && tv.Value != nil && tv.Value.Kind() == constant.String {
return constant.StringVal(tv.Value), true
}
return "", false
}
// formatIter iterates through the string s in increasing
// index order and calls f for each format specifier '%..v'.
// The arguments for f describe the specifier's index range.
// If a format specifier contains a "*", f is called with
// the index range for "*" alone, before being called for
// the entire specifier. The result of f is the index of
// the rune at which iteration continues.
func formatIter(s string, f func(i, j int) int) {
i := 0 // index after current rune
var r rune // current rune
next := func() {
r1, w := utf8.DecodeRuneInString(s[i:])
if w == 0 {
r1 = -1 // signal end-of-string
}
r = r1
i += w
}
flags := func() {
for r == ' ' || r == '#' || r == '+' || r == '-' || r == '0' {
next()
}
}
index := func() {
if r == '[' {
log.Fatalf("cannot handle indexed arguments: %s", s)
}
}
digits := func() {
index()
if r == '*' {
i = f(i-1, i)
next()
return
}
for '0' <= r && r <= '9' {
next()
}
}
for next(); r >= 0; next() {
if r == '%' {
i0 := i
next()
flags()
digits()
if r == '.' {
next()
digits()
}
index()
// accept any letter (a-z, A-Z) as format verb;
// ignore anything else
if 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' {
i = f(i0-1, i)
}
}
}
}
// isFormat reports whether s contains format specifiers.
func isFormat(s string) (yes bool) {
formatIter(s, func(i, j int) int {
yes = true
return len(s) // stop iteration
})
return
}
// oneFormat reports whether s is exactly one format specifier.
func oneFormat(s string) (yes bool) {
formatIter(s, func(i, j int) int {
yes = i == 0 && j == len(s)
return j
})
return
}
// numFormatArgs returns the number of format specifiers in s.
func numFormatArgs(s string) int {
count := 0
formatIter(s, func(i, j int) int {
count++
return j
})
return count
}
// formatReplace replaces the i'th format specifier s in the incoming
// string in with the result of f(i, s) and returns the new string.
func formatReplace(in string, f func(i int, s string) string) string {
var buf []byte
i0 := 0
index := 0
formatIter(in, func(i, j int) int {
if sub := in[i:j]; sub != "*" { // ignore calls for "*" width/length specifiers
buf = append(buf, in[i0:i]...)
buf = append(buf, f(index, sub)...)
i0 = j
}
index++
return j
})
return string(append(buf, in[i0:]...))
}
// ignoredPackages is the set of packages which can
// be ignored.
var ignoredPackages = map[string]bool{}
// ignoredFunctions is the set of functions which may have
// format-like arguments but which don't do any formatting and
// thus may be ignored.
var ignoredFunctions = map[string]bool{}
func init() {
// verify that knownFormats entries are correctly formatted
for key, val := range knownFormats {
// key must be "typename format", and format starts with a '%'
// (formats containing '*' alone are not collected in this map)
i := strings.Index(key, "%")
if i < 0 || !oneFormat(key[i:]) {
log.Fatalf("incorrect knownFormats key: %q", key)
}
// val must be "format" or ""
if val != "" && !oneFormat(val) {
log.Fatalf("incorrect knownFormats value: %q (key = %q)", val, key)
}
}
}
const knownFormatsHeader = `// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements the knownFormats map which records the valid
// formats for a given type. The valid formats must correspond to
// supported compiler formats implemented in fmt.go, or whatever
// other format verbs are implemented for the given type. The map may
// also be used to change the use of a format verb across all compiler
// sources automatically (for instance, if the implementation of fmt.go
// changes), by using the -r option together with the new formats in the
// map. To generate this file automatically from the existing source,
// run: go test -run Formats -u.
//
// See the package comment in fmt_test.go for additional information.
package main_test
// knownFormats entries are of the form "typename format" -> "newformat".
// An absent entry means that the format is not recognized as valid.
// An empty new format means that the format should remain unchanged.
var knownFormats = map[string]string{
`

View file

@ -1,211 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements the knownFormats map which records the valid
// formats for a given type. The valid formats must correspond to
// supported compiler formats implemented in fmt.go, or whatever
// other format verbs are implemented for the given type. The map may
// also be used to change the use of a format verb across all compiler
// sources automatically (for instance, if the implementation of fmt.go
// changes), by using the -r option together with the new formats in the
// map. To generate this file automatically from the existing source,
// run: go test -run Formats -u.
//
// See the package comment in fmt_test.go for additional information.
package main_test
// knownFormats entries are of the form "typename format" -> "newformat".
// An absent entry means that the format is not recognized as valid.
// An empty new format means that the format should remain unchanged.
var knownFormats = map[string]string{
"*bytes.Buffer %s": "",
"*cmd/compile/internal/gc.EscLocation %v": "",
"*cmd/compile/internal/gc.Mpflt %v": "",
"*cmd/compile/internal/gc.Mpint %v": "",
"*cmd/compile/internal/gc.Node %#v": "",
"*cmd/compile/internal/gc.Node %+S": "",
"*cmd/compile/internal/gc.Node %+v": "",
"*cmd/compile/internal/gc.Node %L": "",
"*cmd/compile/internal/gc.Node %S": "",
"*cmd/compile/internal/gc.Node %j": "",
"*cmd/compile/internal/gc.Node %p": "",
"*cmd/compile/internal/gc.Node %v": "",
"*cmd/compile/internal/ssa.Block %s": "",
"*cmd/compile/internal/ssa.Block %v": "",
"*cmd/compile/internal/ssa.Func %s": "",
"*cmd/compile/internal/ssa.Func %v": "",
"*cmd/compile/internal/ssa.Register %s": "",
"*cmd/compile/internal/ssa.Register %v": "",
"*cmd/compile/internal/ssa.SparseTreeNode %v": "",
"*cmd/compile/internal/ssa.Value %s": "",
"*cmd/compile/internal/ssa.Value %v": "",
"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
"*cmd/compile/internal/types.Field %p": "",
"*cmd/compile/internal/types.Field %v": "",
"*cmd/compile/internal/types.Sym %0S": "",
"*cmd/compile/internal/types.Sym %S": "",
"*cmd/compile/internal/types.Sym %p": "",
"*cmd/compile/internal/types.Sym %v": "",
"*cmd/compile/internal/types.Type %#L": "",
"*cmd/compile/internal/types.Type %#v": "",
"*cmd/compile/internal/types.Type %+v": "",
"*cmd/compile/internal/types.Type %-S": "",
"*cmd/compile/internal/types.Type %0S": "",
"*cmd/compile/internal/types.Type %L": "",
"*cmd/compile/internal/types.Type %S": "",
"*cmd/compile/internal/types.Type %p": "",
"*cmd/compile/internal/types.Type %s": "",
"*cmd/compile/internal/types.Type %v": "",
"*cmd/internal/obj.Addr %v": "",
"*cmd/internal/obj.LSym %v": "",
"*math/big.Float %f": "",
"*math/big.Int %#x": "",
"*math/big.Int %s": "",
"*math/big.Int %v": "",
"[16]byte %x": "",
"[]*cmd/compile/internal/ssa.Block %v": "",
"[]*cmd/compile/internal/ssa.Value %v": "",
"[][]string %q": "",
"[]byte %s": "",
"[]byte %x": "",
"[]cmd/compile/internal/ssa.Edge %v": "",
"[]cmd/compile/internal/ssa.ID %v": "",
"[]cmd/compile/internal/ssa.posetNode %v": "",
"[]cmd/compile/internal/ssa.posetUndo %v": "",
"[]cmd/compile/internal/syntax.token %s": "",
"[]string %v": "",
"[]uint32 %v": "",
"bool %v": "",
"byte %08b": "",
"byte %c": "",
"byte %q": "",
"byte %v": "",
"cmd/compile/internal/arm.shift %d": "",
"cmd/compile/internal/gc.Class %d": "",
"cmd/compile/internal/gc.Class %s": "",
"cmd/compile/internal/gc.Class %v": "",
"cmd/compile/internal/gc.Ctype %d": "",
"cmd/compile/internal/gc.Ctype %v": "",
"cmd/compile/internal/gc.Nodes %#v": "",
"cmd/compile/internal/gc.Nodes %+v": "",
"cmd/compile/internal/gc.Nodes %.v": "",
"cmd/compile/internal/gc.Nodes %v": "",
"cmd/compile/internal/gc.Op %#v": "",
"cmd/compile/internal/gc.Op %v": "",
"cmd/compile/internal/gc.Val %#v": "",
"cmd/compile/internal/gc.Val %T": "",
"cmd/compile/internal/gc.Val %v": "",
"cmd/compile/internal/gc.fmtMode %d": "",
"cmd/compile/internal/gc.initKind %d": "",
"cmd/compile/internal/gc.itag %v": "",
"cmd/compile/internal/ssa.BranchPrediction %d": "",
"cmd/compile/internal/ssa.Edge %v": "",
"cmd/compile/internal/ssa.GCNode %v": "",
"cmd/compile/internal/ssa.ID %d": "",
"cmd/compile/internal/ssa.ID %v": "",
"cmd/compile/internal/ssa.LocalSlot %s": "",
"cmd/compile/internal/ssa.LocalSlot %v": "",
"cmd/compile/internal/ssa.Location %s": "",
"cmd/compile/internal/ssa.Op %s": "",
"cmd/compile/internal/ssa.Op %v": "",
"cmd/compile/internal/ssa.Sym %v": "",
"cmd/compile/internal/ssa.ValAndOff %s": "",
"cmd/compile/internal/ssa.domain %v": "",
"cmd/compile/internal/ssa.flagConstant %s": "",
"cmd/compile/internal/ssa.posetNode %v": "",
"cmd/compile/internal/ssa.posetTestOp %v": "",
"cmd/compile/internal/ssa.rbrank %d": "",
"cmd/compile/internal/ssa.regMask %d": "",
"cmd/compile/internal/ssa.register %d": "",
"cmd/compile/internal/ssa.relation %s": "",
"cmd/compile/internal/syntax.Error %q": "",
"cmd/compile/internal/syntax.Expr %#v": "",
"cmd/compile/internal/syntax.LitKind %d": "",
"cmd/compile/internal/syntax.Node %T": "",
"cmd/compile/internal/syntax.Operator %s": "",
"cmd/compile/internal/syntax.Pos %s": "",
"cmd/compile/internal/syntax.Pos %v": "",
"cmd/compile/internal/syntax.position %s": "",
"cmd/compile/internal/syntax.token %q": "",
"cmd/compile/internal/syntax.token %s": "",
"cmd/compile/internal/types.EType %d": "",
"cmd/compile/internal/types.EType %s": "",
"cmd/compile/internal/types.EType %v": "",
"cmd/internal/obj.ABI %v": "",
"error %v": "",
"float64 %.2f": "",
"float64 %.3f": "",
"float64 %.6g": "",
"float64 %g": "",
"int %#x": "",
"int %-12d": "",
"int %-6d": "",
"int %-8o": "",
"int %02d": "",
"int %6d": "",
"int %c": "",
"int %d": "",
"int %v": "",
"int %x": "",
"int16 %d": "",
"int16 %x": "",
"int32 %#x": "",
"int32 %d": "",
"int32 %v": "",
"int32 %x": "",
"int64 %#x": "",
"int64 %+d": "",
"int64 %-10d": "",
"int64 %.5d": "",
"int64 %d": "",
"int64 %v": "",
"int64 %x": "",
"int8 %d": "",
"int8 %v": "",
"int8 %x": "",
"interface{} %#v": "",
"interface{} %T": "",
"interface{} %p": "",
"interface{} %q": "",
"interface{} %s": "",
"interface{} %v": "",
"map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
"map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
"map[cmd/compile/internal/ssa.ID]uint32 %v": "",
"map[int64]uint32 %v": "",
"math/big.Accuracy %s": "",
"reflect.Type %s": "",
"rune %#U": "",
"rune %c": "",
"rune %q": "",
"string %-*s": "",
"string %-16s": "",
"string %-6s": "",
"string %q": "",
"string %s": "",
"string %v": "",
"time.Duration %d": "",
"time.Duration %v": "",
"uint %04x": "",
"uint %5d": "",
"uint %d": "",
"uint %x": "",
"uint16 %d": "",
"uint16 %x": "",
"uint32 %#U": "",
"uint32 %#x": "",
"uint32 %d": "",
"uint32 %v": "",
"uint32 %x": "",
"uint64 %08x": "",
"uint64 %b": "",
"uint64 %d": "",
"uint64 %x": "",
"uint8 %#x": "",
"uint8 %d": "",
"uint8 %v": "",
"uint8 %x": "",
"uintptr %d": "",
}

View file

@ -0,0 +1,628 @@
# Go internal ABI specification
This document describes Gos internal application binary interface
(ABI), known as ABIInternal.
Go's ABI defines the layout of data in memory and the conventions for
calling between Go functions.
This ABI is *unstable* and will change between Go versions.
If youre writing assembly code, please instead refer to Gos
[assembly documentation](/doc/asm.html), which describes Gos stable
ABI, known as ABI0.
All functions defined in Go source follow ABIInternal.
However, ABIInternal and ABI0 functions are able to call each other
through transparent *ABI wrappers*, described in the [internal calling
convention proposal](https://golang.org/design/27539-internal-abi).
Go uses a common ABI design across all architectures.
We first describe the common ABI, and then cover per-architecture
specifics.
*Rationale*: For the reasoning behind using a common ABI across
architectures instead of the platform ABI, see the [register-based Go
calling convention proposal](https://golang.org/design/40724-register-calling).
## Memory layout
Go's built-in types have the following sizes and alignments.
Many, though not all, of these sizes are guaranteed by the [language
specification](/doc/go_spec.html#Size_and_alignment_guarantees).
Those that aren't guaranteed may change in future versions of Go (for
example, we've considered changing the alignment of int64 on 32-bit).
| Type | 64-bit | | 32-bit | |
| --- | --- | --- | --- | --- |
| | Size | Align | Size | Align |
| bool, uint8, int8 | 1 | 1 | 1 | 1 |
| uint16, int16 | 2 | 2 | 2 | 2 |
| uint32, int32 | 4 | 4 | 4 | 4 |
| uint64, int64 | 8 | 8 | 8 | 4 |
| int, uint | 8 | 8 | 4 | 4 |
| float32 | 4 | 4 | 4 | 4 |
| float64 | 8 | 8 | 8 | 4 |
| complex64 | 8 | 4 | 8 | 4 |
| complex128 | 16 | 8 | 16 | 4 |
| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 |
The types `byte` and `rune` are aliases for `uint8` and `int32`,
respectively, and hence have the same size and alignment as these
types.
The layout of `map`, `chan`, and `func` types is equivalent to *T.
To describe the layout of the remaining composite types, we first
define the layout of a *sequence* S of N fields with types
t<sub>1</sub>, t<sub>2</sub>, ..., t<sub>N</sub>.
We define the byte offset at which each field begins relative to a
base address of 0, as well as the size and alignment of the sequence
as follows:
```
offset(S, i) = 0 if i = 1
= align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i))
alignof(S) = 1 if N = 0
= max(alignof(t_i) | 1 <= i <= N)
sizeof(S) = 0 if N = 0
= align(offset(S, N) + sizeof(t_N), alignof(S))
```
Where sizeof(T) and alignof(T) are the size and alignment of type T,
respectively, and align(x, y) rounds x up to a multiple of y.
The `interface{}` type is a sequence of 1. a pointer to the runtime type
description for the interface's dynamic type and 2. an `unsafe.Pointer`
data field.
Any other interface type (besides the empty interface) is a sequence
of 1. a pointer to the runtime "itab" that gives the method pointers and
the type of the data field and 2. an `unsafe.Pointer` data field.
An interface can be "direct" or "indirect" depending on the dynamic
type: a direct interface stores the value directly in the data field,
and an indirect interface stores a pointer to the value in the data
field.
An interface can only be direct if the value consists of a single
pointer word.
An array type `[N]T` is a sequence of N fields of type T.
The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice
backing store, an `int` giving the `len` of the slice, and an `int`
giving the `cap` of the slice.
The `string` type is a sequence of a `*[len]byte` pointer to the
string backing store, and an `int` giving the `len` of the string.
A struct type `struct { f1 t1; ...; fM tM }` is laid out as the
sequence t1, ..., tM, tP, where tP is either:
- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0.
- Empty (size 0 and align 1) otherwise.
The padding byte prevents creating a past-the-end pointer by taking
the address of the final, empty fN field.
Note that user-written assembly code should generally not depend on Go
type layout and should instead use the constants defined in
[`go_asm.h`](/doc/asm.html#data-offsets).
## Function call argument and result passing
Function calls pass arguments and results using a combination of the
stack and machine registers.
Each argument or result is passed either entirely in registers or
entirely on the stack.
Because access to registers is generally faster than access to the
stack, arguments and results are preferentially passed in registers.
However, any argument or result that contains a non-trivial array or
does not fit entirely in the remaining available registers is passed
on the stack.
Each architecture defines a sequence of integer registers and a
sequence of floating-point registers.
At a high level, arguments and results are recursively broken down
into values of base types and these base values are assigned to
registers from these sequences.
Arguments and results can share the same registers, but do not share
the same stack space.
Beyond the arguments and results passed on the stack, the caller also
reserves spill space on the stack for all register-based arguments
(but does not populate this space).
The receiver, arguments, and results of function or method F are
assigned to registers or the stack using the following algorithm:
1. Let NI and NFP be the length of integer and floating-point register
sequences defined by the architecture.
Let I and FP be 0; these are the indexes of the next integer and
floating-pointer register.
Let S, the type sequence defining the stack frame, be empty.
1. If F is a method, assign Fs receiver.
1. For each argument A of F, assign A.
1. Add a pointer-alignment field to S. This has size 0 and the same
alignment as `uintptr`.
1. Reset I and FP to 0.
1. For each result R of F, assign R.
1. Add a pointer-alignment field to S.
1. For each register-assigned receiver and argument of F, let T be its
type and add T to the stack sequence S.
This is the argument's (or receiver's) spill space and will be
uninitialized at the call.
1. Add a pointer-alignment field to S.
Assigning a receiver, argument, or result V of underlying type T works
as follows:
1. Remember I and FP.
1. Try to register-assign V.
1. If step 2 failed, reset I and FP to the values from step 1, add T
to the stack sequence S, and assign V to this field in S.
Register-assignment of a value V of underlying type T works as follows:
1. If T is a boolean or integral type that fits in an integer
register, assign V to register I and increment I.
1. If T is an integral type that fits in two integer registers, assign
the least significant and most significant halves of V to registers
I and I+1, respectively, and increment I by 2
1. If T is a floating-point type and can be represented without loss
of precision in a floating-point register, assign V to register FP
and increment FP.
1. If T is a complex type, recursively register-assign its real and
imaginary parts.
1. If T is a pointer type, map type, channel type, or function type,
assign V to register I and increment I.
1. If T is a string type, interface type, or slice type, recursively
register-assign Vs components (2 for strings and interfaces, 3 for
slices).
1. If T is a struct type, recursively register-assign each field of V.
1. If T is an array type of length 0, do nothing.
1. If T is an array type of length 1, recursively register-assign its
one element.
1. If T is an array type of length > 1, fail.
1. If I > NI or FP > NFP, fail.
1. If any recursive assignment above fails, fail.
The above algorithm produces an assignment of each receiver, argument,
and result to registers or to a field in the stack sequence.
The final stack sequence looks like: stack-assigned receiver,
stack-assigned arguments, pointer-alignment, stack-assigned results,
pointer-alignment, spill space for each register-assigned argument,
pointer-alignment.
The following diagram shows what this stack frame looks like on the
stack, using the typical convention where address 0 is at the bottom:
+------------------------------+
| . . . |
| 2nd reg argument spill space |
| 1st reg argument spill space |
| <pointer-sized alignment> |
| . . . |
| 2nd stack-assigned result |
| 1st stack-assigned result |
| <pointer-sized alignment> |
| . . . |
| 2nd stack-assigned argument |
| 1st stack-assigned argument |
| stack-assigned receiver |
+------------------------------+ ↓ lower addresses
To perform a call, the caller reserves space starting at the lowest
address in its stack frame for the call stack frame, stores arguments
in the registers and argument stack fields determined by the above
algorithm, and performs the call.
At the time of a call, spill space, result stack fields, and result
registers are left uninitialized.
Upon return, the callee must have stored results to all result
registers and result stack fields determined by the above algorithm.
There are no callee-save registers, so a call may overwrite any
register that doesnt have a fixed meaning, including argument
registers.
### Example
Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1
struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit
architecture with hypothetical integer registers R0R9.
On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the
stack frame is laid out in the following sequence:
a2 [2]uintptr
r1.x uintptr
r1.y [2]uintptr
a1Spill uint8
a2Spill uint8
_ [6]uint8 // alignment padding
In the stack frame, only the `a2` field is initialized on entry; the
rest of the frame is left uninitialized.
On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`,
and `r1.x` and `r1.y` are initialized in the stack frame.
There are several things to note in this example.
First, `a2` and `r1` are stack-assigned because they contain arrays.
The other arguments and results are register-assigned.
Result `r2` is decomposed into its components, which are individually
register-assigned.
On the stack, the stack-assigned arguments appear at lower addresses
than the stack-assigned results, which appear at lower addresses than
the argument spill area.
Only arguments, not results, are assigned a spill area on the stack.
### Rationale
Each base value is assigned to its own register to optimize
construction and access.
An alternative would be to pack multiple sub-word values into
registers, or to simply map an argument's in-memory layout to
registers (this is common in C ABIs), but this typically adds cost to
pack and unpack these values.
Modern architectures have more than enough registers to pass all
arguments and results this way for nearly all functions (see the
appendix), so theres little downside to spreading base values across
registers.
Arguments that cant be fully assigned to registers are passed
entirely on the stack in case the callee takes the address of that
argument.
If an argument could be split across the stack and registers and the
callee took its address, it would need to be reconstructed in memory,
a process that would be proportional to the size of the argument.
Non-trivial arrays are always passed on the stack because indexing
into an array typically requires a computed offset, which generally
isnt possible with registers.
Arrays in general are rare in function signatures (only 0.7% of
functions in the Go 1.15 standard library and 0.2% in kubelet).
We considered allowing array fields to be passed on the stack while
the rest of an arguments fields are passed in registers, but this
creates the same problems as other large structs if the callee takes
the address of an argument, and would benefit <0.1% of functions in
kubelet (and even these very little).
We make exceptions for 0 and 1-element arrays because these dont
require computed offsets, and 1-element arrays are already decomposed
in the compilers SSA representation.
The ABI assignment algorithm above is equivalent to Gos stack-based
ABI0 calling convention if there are zero architecture registers.
This is intended to ease the transition to the register-based internal
ABI and make it easy for the compiler to generate either calling
convention.
An architecture may still define register meanings that arent
compatible with ABI0, but these differences should be easy to account
for in the compiler.
The algorithm reserves spill space for arguments in the callers frame
so that the compiler can generate a stack growth path that spills into
this reserved space.
If the callee has to grow the stack, it may not be able to reserve
enough additional stack space in its own frame to spill these, which
is why its important that the caller do so.
These slots also act as the home location if these arguments need to
be spilled for any other reason, which simplifies traceback printing.
There are several options for how to lay out the argument spill space.
We chose to lay out each argument according to its type's usual memory
layout but to separate the spill space from the regular argument
space.
Using the usual memory layout simplifies the compiler because it
already understands this layout.
Also, if a function takes the address of a register-assigned argument,
the compiler must spill that argument to memory in its usual memory
layout and it's more convenient to use the argument spill space for
this purpose.
Alternatively, the spill space could be structured around argument
registers.
In this approach, the stack growth spill path would spill each
argument register to a register-sized stack word.
However, if the function takes the address of a register-assigned
argument, the compiler would have to reconstruct it in memory layout
elsewhere on the stack.
The spill space could also be interleaved with the stack-assigned
arguments so the arguments appear in order whether they are register-
or stack-assigned.
This would be close to ABI0, except that register-assigned arguments
would be uninitialized on the stack and there's no need to reserve
stack space for register-assigned results.
We expect separating the spill space to perform better because of
memory locality.
Separating the space is also potentially simpler for `reflect` calls
because this allows `reflect` to summarize the spill space as a single
number.
Finally, the long-term intent is to remove reserved spill slots
entirely allowing most functions to be called without any stack
setup and easing the introduction of callee-save registers and
separating the spill space makes that transition easier.
## Closures
A func value (e.g., `var x func()`) is a pointer to a closure object.
A closure object begins with a pointer-sized program counter
representing the entry point of the function, followed by zero or more
bytes containing the closed-over environment.
Closure calls follow the same conventions as static function and
method calls, with one addition. Each architecture specifies a
*closure context pointer* register and calls to closures store the
address of the closure object in the closure context pointer register
prior to the call.
## Software floating-point mode
In "softfloat" mode, the ABI simply treats the hardware as having zero
floating-point registers.
As a result, any arguments containing floating-point values will be
passed on the stack.
*Rationale*: Softfloat mode is about compatibility over performance
and is not commonly used.
Hence, we keep the ABI as simple as possible in this case, rather than
adding additional rules for passing floating-point values in integer
registers.
## Architecture specifics
This section describes per-architecture register mappings, as well as
other per-architecture special cases.
### amd64 architecture
The amd64 architecture uses the following sequence of 9 registers for
integer arguments and results:
RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11
It uses X0 X14 for floating-point arguments and results.
*Rationale*: These sequences are chosen from the available registers
to be relatively easy to remember.
Registers R12 and R13 are permanent scratch registers.
R15 is a scratch register except in dynamically linked binaries.
*Rationale*: Some operations such as stack growth and reflection calls
need dedicated scratch registers in order to manipulate call frames
without corrupting arguments or results.
Special-purpose registers are as follows:
| Register | Call meaning | Body meaning |
| --- | --- | --- |
| RSP | Stack pointer | Fixed |
| RBP | Frame pointer | Fixed |
| RDX | Closure context pointer | Scratch |
| R12 | None | Scratch |
| R13 | None | Scratch |
| R14 | Current goroutine | Scratch |
| R15 | GOT reference temporary | Fixed if dynlink |
| X15 | Zero value | Fixed |
TODO: We may start with the existing TLS-based g and move to R14
later.
*Rationale*: These register meanings are compatible with Gos
stack-based calling convention except for R14 and X15, which will have
to be restored on transitions from ABI0 code to ABIInternal code.
In ABI0, these are undefined, so transitions from ABIInternal to ABI0
can ignore these registers.
*Rationale*: For the current goroutine pointer, we chose a register
that requires an additional REX byte.
While this adds one byte to every function prologue, it is hardly ever
accessed outside the function prologue and we expect making more
single-byte registers available to be a net win.
*Rationale*: We designate X15 as a fixed zero register because
functions often have to bulk zero their stack frames, and this is more
efficient with a designated zero register.
#### Stack layout
The stack pointer, RSP, grows down and is always aligned to 8 bytes.
The amd64 architecture does not use a link register.
A function's stack frame is laid out as follows:
+------------------------------+
| return PC |
| RBP on entry |
| ... locals ... |
| ... outgoing arguments ... |
+------------------------------+ ↓ lower addresses
The "return PC" is pushed as part of the standard amd64 `CALL`
operation.
On entry, a function subtracts from RSP to open its stack frame and
saves the value of RBP directly below the return PC.
A leaf function that does not require any stack space may omit the
saved RBP.
The Go ABI's use of RBP as a frame pointer register is compatible with
amd64 platform conventions so that Go can inter-operate with platform
debuggers and profilers.
#### Flags
The direction flag (D) is always cleared (set to the “forward”
direction) at a call.
The arithmetic status flags are treated like scratch registers and not
preserved across calls.
All other bits in RFLAGS are system flags.
The CPU is always in MMX technology state (not x87 mode).
*Rationale*: Go on amd64 uses the XMM registers and never uses the x87
registers, so it makes sense to assume the CPU is in MMX mode.
Otherwise, any function that used the XMM registers would have to
execute an EMMS instruction before calling another function or
returning (this is the case in the SysV ABI).
At calls, the MXCSR control bits are always set as follows:
| Flag | Bit | Value | Meaning |
| --- | --- | --- | --- |
| FZ | 15 | 0 | Do not flush to zero |
| RC | 14/13 | 0 (RN) | Round to nearest |
| PM | 12 | 1 | Precision masked |
| UM | 11 | 1 | Underflow masked |
| OM | 10 | 1 | Overflow masked |
| ZM | 9 | 1 | Divide-by-zero masked |
| DM | 8 | 1 | Denormal operations masked |
| IM | 7 | 1 | Invalid operations masked |
| DAZ | 6 | 0 | Do not zero de-normals |
The MXCSR status bits are callee-save.
*Rationale*: Having a fixed MXCSR control configuration allows Go
functions to use SSE operations without modifying or saving the MXCSR.
Functions are allowed to modify it between calls (as long as they
restore it), but as of this writing Go code never does.
The above fixed configuration matches the process initialization
control bits specified by the ELF AMD64 ABI.
The x87 floating-point control word is not used by Go on amd64.
## Future directions
### Spill path improvements
The ABI currently reserves spill space for argument registers so the
compiler can statically generate an argument spill path before calling
into `runtime.morestack` to grow the stack.
This ensures there will be sufficient spill space even when the stack
is nearly exhausted and keeps stack growth and stack scanning
essentially unchanged from ABI0.
However, this wastes stack space (the median wastage is 16 bytes per
call), resulting in larger stacks and increased cache footprint.
A better approach would be to reserve stack space only when spilling.
One way to ensure enough space is available to spill would be for
every function to ensure there is enough space for the function's own
frame *as well as* the spill space of all functions it calls.
For most functions, this would change the threshold for the prologue
stack growth check.
For `nosplit` functions, this would change the threshold used in the
linker's static stack size check.
Allocating spill space in the callee rather than the caller may also
allow for faster reflection calls in the common case where a function
takes only register arguments, since it would allow reflection to make
these calls directly without allocating any frame.
The statically-generated spill path also increases code size.
It is possible to instead have a generic spill path in the runtime, as
part of `morestack`.
However, this complicates reserving the spill space, since spilling
all possible register arguments would, in most cases, take
significantly more space than spilling only those used by a particular
function.
Some options are to spill to a temporary space and copy back only the
registers used by the function, or to grow the stack if necessary
before spilling to it (using a temporary space if necessary), or to
use a heap-allocated space if insufficient stack space is available.
These options all add enough complexity that we will have to make this
decision based on the actual code size growth caused by the static
spill paths.
### Clobber sets
As defined, the ABI does not use callee-save registers.
This significantly simplifies the garbage collector and the compiler's
register allocator, but at some performance cost.
A potentially better balance for Go code would be to use *clobber
sets*: for each function, the compiler records the set of registers it
clobbers (including those clobbered by functions it calls) and any
register not clobbered by function F can remain live across calls to
F.
This is generally a good fit for Go because Go's package DAG allows
function metadata like the clobber set to flow up the call graph, even
across package boundaries.
Clobber sets would require relatively little change to the garbage
collector, unlike general callee-save registers.
One disadvantage of clobber sets over callee-save registers is that
they don't help with indirect function calls or interface method
calls, since static information isn't available in these cases.
### Large aggregates
Go encourages passing composite values by value, and this simplifies
reasoning about mutation and races.
However, this comes at a performance cost for large composite values.
It may be possible to instead transparently pass large composite
values by reference and delay copying until it is actually necessary.
## Appendix: Register usage analysis
In order to understand the impacts of the above design on register
usage, we
[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the
impact of the above ABI on a large code base: cmd/kubelet from
[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8.
The following table shows the impact of different numbers of available
integer and floating-point registers on argument assignment:
```
| | | | stack args | spills | stack total |
| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 |
| 0 | 0 | 6.3% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
| 0 | 8 | 6.4% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
| 1 | 8 | 21.3% | 24 | 144 | 248 | 8 | 8 | 8 | 32 | 152 | 256 |
| 2 | 8 | 38.9% | 16 | 128 | 224 | 8 | 16 | 16 | 24 | 136 | 240 |
| 3 | 8 | 57.0% | 0 | 120 | 224 | 16 | 24 | 24 | 24 | 136 | 240 |
| 4 | 8 | 73.0% | 0 | 120 | 216 | 16 | 32 | 32 | 24 | 136 | 232 |
| 5 | 8 | 83.3% | 0 | 112 | 216 | 16 | 40 | 40 | 24 | 136 | 232 |
| 6 | 8 | 87.5% | 0 | 112 | 208 | 16 | 48 | 48 | 24 | 136 | 232 |
| 7 | 8 | 89.8% | 0 | 112 | 208 | 16 | 48 | 56 | 24 | 136 | 232 |
| 8 | 8 | 91.3% | 0 | 112 | 200 | 16 | 56 | 64 | 24 | 136 | 232 |
| 9 | 8 | 92.1% | 0 | 112 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
| 10 | 8 | 92.6% | 0 | 104 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
| 11 | 8 | 93.1% | 0 | 104 | 184 | 16 | 56 | 80 | 24 | 128 | 232 |
| 12 | 8 | 93.4% | 0 | 104 | 176 | 16 | 56 | 88 | 24 | 128 | 232 |
| 13 | 8 | 94.0% | 0 | 88 | 176 | 16 | 56 | 96 | 24 | 128 | 232 |
| 14 | 8 | 94.4% | 0 | 80 | 152 | 16 | 64 | 104 | 24 | 128 | 232 |
| 15 | 8 | 94.6% | 0 | 80 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
| 16 | 8 | 94.9% | 0 | 16 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
| ∞ | 8 | 99.8% | 0 | 0 | 0 | 24 | 112 | 216 | 24 | 120 | 216 |
```
The first two columns show the number of available integer and
floating-point registers.
The first row shows the results for 0 integer and 0 floating-point
registers, which is equivalent to ABI0.
We found that any reasonable number of floating-point registers has
the same effect, so we fixed it at 8 for all other rows.
The “% fit” column gives the fraction of functions where all arguments
and results are register-assigned and no arguments are passed on the
stack.
The three “stack args” columns give the median, 95th and 99th
percentile number of bytes of stack arguments.
The “spills” columns likewise summarize the number of bytes in
on-stack spill space.
And “stack total” summarizes the sum of stack arguments and on-stack
spill slots.
Note that these are three different distributions; for example,
theres no single function that takes 0 stack argument bytes, 16 spill
bytes, and 24 total stack bytes.
From this, we can see that the fraction of functions that fit entirely
in registers grows very slowly once it reaches about 90%, though
curiously there is a small minority of functions that could benefit
from a huge number of registers.
Making 9 integer registers available on amd64 puts it in this realm.
We also see that the stack space required for most functions is fairly
small.
While the increasing space required for spills largely balances out
the decreasing space required for stack arguments as the number of
available registers increases, there is a general reduction in the
total stack space required with more available registers.
This does, however, suggest that eliminating spill slots in the future
would noticeably reduce stack requirements.

View file

@ -0,0 +1,461 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package abi
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"sync"
)
//......................................................................
//
// Public/exported bits of the ABI utilities.
//
// ABIParamResultInfo stores the results of processing a given
// function type to compute stack layout and register assignments. For
// each input and output parameter we capture whether the param was
// register-assigned (and to which register(s)) or the stack offset
// for the param if is not going to be passed in registers according
// to the rules in the Go internal ABI specification (1.17).
type ABIParamResultInfo struct {
inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer.
outparams []ABIParamAssignment
offsetToSpillArea int64
spillAreaSize int64
config *ABIConfig // to enable String() method
}
func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
return a.inparams
}
func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
return a.outparams
}
func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment {
return a.inparams[i]
}
func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment {
return a.outparams[i]
}
func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
return a.offsetToSpillArea
}
func (a *ABIParamResultInfo) SpillAreaSize() int64 {
return a.spillAreaSize
}
// RegIndex stores the index into the set of machine registers used by
// the ABI on a specific architecture for parameter passing. RegIndex
// values 0 through N-1 (where N is the number of integer registers
// used for param passing according to the ABI rules) describe integer
// registers; values N through M (where M is the number of floating
// point registers used). Thus if the ABI says there are 5 integer
// registers and 7 floating point registers, then RegIndex value of 4
// indicates the 5th integer register, and a RegIndex value of 11
// indicates the 7th floating point register.
type RegIndex uint8
// ABIParamAssignment holds information about how a specific param or
// result will be passed: in registers (in which case 'Registers' is
// populated) or on the stack (in which case 'Offset' is set to a
// non-negative stack offset. The values in 'Registers' are indices (as
// described above), not architected registers.
type ABIParamAssignment struct {
Type *types.Type
Registers []RegIndex
offset int32
}
// Offset returns the stack offset for addressing the parameter that "a" describes.
// This will panic if "a" describes a register-allocated parameter.
func (a *ABIParamAssignment) Offset() int32 {
if len(a.Registers) > 0 {
panic("Register allocated parameters have no offset")
}
return a.offset
}
// SpillOffset returns the offset *within the spill area* for the parameter that "a" describes.
// Registers will be spilled here; if a memory home is needed (for a pointer method e.g.)
// then that will be the address.
// This will panic if "a" describes a stack-allocated parameter.
func (a *ABIParamAssignment) SpillOffset() int32 {
if len(a.Registers) == 0 {
panic("Stack-allocated parameters have no spill offset")
}
return a.offset
}
// RegAmounts holds a specified number of integer/float registers.
type RegAmounts struct {
intRegs int
floatRegs int
}
// ABIConfig captures the number of registers made available
// by the ABI rules for parameter passing and result returning.
type ABIConfig struct {
// Do we need anything more than this?
regAmounts RegAmounts
regsForTypeCache map[*types.Type]int
}
// NewABIConfig returns a new ABI configuration for an architecture with
// iRegsCount integer/pointer registers and fRegsCount floating point registers.
func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig {
return &ABIConfig{regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)}
}
// NumParamRegs returns the number of parameter registers used for a given type,
// without regard for the number available.
func (a *ABIConfig) NumParamRegs(t *types.Type) int {
if n, ok := a.regsForTypeCache[t]; ok {
return n
}
if t.IsScalar() || t.IsPtrShaped() {
var n int
if t.IsComplex() {
n = 2
} else {
n = (int(t.Size()) + types.RegSize - 1) / types.RegSize
}
a.regsForTypeCache[t] = n
return n
}
typ := t.Kind()
n := 0
switch typ {
case types.TARRAY:
n = a.NumParamRegs(t.Elem()) * int(t.NumElem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
n += a.NumParamRegs(f.Type)
}
case types.TSLICE:
n = a.NumParamRegs(synthSlice)
case types.TSTRING:
n = a.NumParamRegs(synthString)
case types.TINTER:
n = a.NumParamRegs(synthIface)
}
a.regsForTypeCache[t] = n
return n
}
// ABIAnalyze takes a function type 't' and an ABI rules description
// 'config' and analyzes the function to determine how its parameters
// and results will be passed (in registers or on the stack), returning
// an ABIParamResultInfo object that holds the results of the analysis.
func (config *ABIConfig) ABIAnalyze(t *types.Type) ABIParamResultInfo {
setup()
s := assignState{
rTotal: config.regAmounts,
}
result := ABIParamResultInfo{config: config}
// Receiver
ft := t.FuncType()
if t.NumRecvs() != 0 {
rfsl := ft.Receiver.FieldSlice()
result.inparams = append(result.inparams,
s.assignParamOrReturn(rfsl[0].Type, false))
}
// Inputs
ifsl := ft.Params.FieldSlice()
for _, f := range ifsl {
result.inparams = append(result.inparams,
s.assignParamOrReturn(f.Type, false))
}
s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
// Outputs
s.rUsed = RegAmounts{}
ofsl := ft.Results.FieldSlice()
for _, f := range ofsl {
result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, true))
}
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
// TODO in theory could align offset only to minimum required by spilled data types.
result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
return result
}
//......................................................................
//
// Non-public portions.
// regString produces a human-readable version of a RegIndex.
func (c *RegAmounts) regString(r RegIndex) string {
if int(r) < c.intRegs {
return fmt.Sprintf("I%d", int(r))
} else if int(r) < c.intRegs+c.floatRegs {
return fmt.Sprintf("F%d", int(r)-c.intRegs)
}
return fmt.Sprintf("<?>%d", r)
}
// toString method renders an ABIParamAssignment in human-readable
// form, suitable for debugging or unit testing.
func (ri *ABIParamAssignment) toString(config *ABIConfig) string {
regs := "R{"
offname := "spilloffset" // offset is for spill for register(s)
if len(ri.Registers) == 0 {
offname = "offset" // offset is for memory arg
}
for _, r := range ri.Registers {
regs += " " + config.regAmounts.regString(r)
}
return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type)
}
// toString method renders an ABIParamResultInfo in human-readable
// form, suitable for debugging or unit testing.
func (ri *ABIParamResultInfo) String() string {
res := ""
for k, p := range ri.inparams {
res += fmt.Sprintf("IN %d: %s\n", k, p.toString(ri.config))
}
for k, r := range ri.outparams {
res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config))
}
res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d",
ri.offsetToSpillArea, ri.spillAreaSize)
return res
}
// assignState holds intermediate state during the register assigning process
// for a given function signature.
type assignState struct {
rTotal RegAmounts // total reg amounts from ABI rules
rUsed RegAmounts // regs used by params completely assigned so far
pUsed RegAmounts // regs used by the current param (or pieces therein)
stackOffset int64 // current stack offset
spillOffset int64 // current spill offset
}
// align returns a rounded up to t's alignment
func align(a int64, t *types.Type) int64 {
return alignTo(a, int(t.Align))
}
// alignTo returns a rounded up to t, where t must be 0 or a power of 2.
func alignTo(a int64, t int) int64 {
if t == 0 {
return a
}
return types.Rnd(a, int64(t))
}
// stackSlot returns a stack offset for a param or result of the
// specified type.
func (state *assignState) stackSlot(t *types.Type) int64 {
rv := align(state.stackOffset, t)
state.stackOffset = rv + t.Width
return rv
}
// allocateRegs returns a set of register indices for a parameter or result
// that we've just determined to be register-assignable. The number of registers
// needed is assumed to be stored in state.pUsed.
func (state *assignState) allocateRegs() []RegIndex {
regs := []RegIndex{}
// integer
for r := state.rUsed.intRegs; r < state.rUsed.intRegs+state.pUsed.intRegs; r++ {
regs = append(regs, RegIndex(r))
}
state.rUsed.intRegs += state.pUsed.intRegs
// floating
for r := state.rUsed.floatRegs; r < state.rUsed.floatRegs+state.pUsed.floatRegs; r++ {
regs = append(regs, RegIndex(r+state.rTotal.intRegs))
}
state.rUsed.floatRegs += state.pUsed.floatRegs
return regs
}
// regAllocate creates a register ABIParamAssignment object for a param
// or result with the specified type, as a final step (this assumes
// that all of the safety/suitability analysis is complete).
func (state *assignState) regAllocate(t *types.Type, isReturn bool) ABIParamAssignment {
spillLoc := int64(-1)
if !isReturn {
// Spill for register-resident t must be aligned for storage of a t.
spillLoc = align(state.spillOffset, t)
state.spillOffset = spillLoc + t.Size()
}
return ABIParamAssignment{
Type: t,
Registers: state.allocateRegs(),
offset: int32(spillLoc),
}
}
// stackAllocate creates a stack memory ABIParamAssignment object for
// a param or result with the specified type, as a final step (this
// assumes that all of the safety/suitability analysis is complete).
func (state *assignState) stackAllocate(t *types.Type) ABIParamAssignment {
return ABIParamAssignment{
Type: t,
offset: int32(state.stackSlot(t)),
}
}
// intUsed returns the number of integer registers consumed
// at a given point within an assignment stage.
func (state *assignState) intUsed() int {
return state.rUsed.intRegs + state.pUsed.intRegs
}
// floatUsed returns the number of floating point registers consumed at
// a given point within an assignment stage.
func (state *assignState) floatUsed() int {
return state.rUsed.floatRegs + state.pUsed.floatRegs
}
// regassignIntegral examines a param/result of integral type 't' to
// determines whether it can be register-assigned. Returns TRUE if we
// can register allocate, FALSE otherwise (and updates state
// accordingly).
func (state *assignState) regassignIntegral(t *types.Type) bool {
regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize))
if t.IsComplex() {
regsNeeded = 2
}
// Floating point and complex.
if t.IsFloat() || t.IsComplex() {
if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
// not enough regs
return false
}
state.pUsed.floatRegs += regsNeeded
return true
}
// Non-floating point
if regsNeeded+state.intUsed() > state.rTotal.intRegs {
// not enough regs
return false
}
state.pUsed.intRegs += regsNeeded
return true
}
// regassignArray processes an array type (or array component within some
// other enclosing type) to determine if it can be register assigned.
// Returns TRUE if we can register allocate, FALSE otherwise.
func (state *assignState) regassignArray(t *types.Type) bool {
nel := t.NumElem()
if nel == 0 {
return true
}
if nel > 1 {
// Not an array of length 1: stack assign
return false
}
// Visit element
return state.regassign(t.Elem())
}
// regassignStruct processes a struct type (or struct component within
// some other enclosing type) to determine if it can be register
// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
func (state *assignState) regassignStruct(t *types.Type) bool {
for _, field := range t.FieldSlice() {
if !state.regassign(field.Type) {
return false
}
}
return true
}
// synthOnce ensures that we only create the synth* fake types once.
var synthOnce sync.Once
// synthSlice, synthString, and syncIface are synthesized struct types
// meant to capture the underlying implementations of string/slice/interface.
var synthSlice *types.Type
var synthString *types.Type
var synthIface *types.Type
// setup performs setup for the register assignment utilities, manufacturing
// a small set of synthesized types that we'll need along the way.
func setup() {
synthOnce.Do(func() {
fname := types.BuiltinPkg.Lookup
nxp := src.NoXPos
unsp := types.Types[types.TUNSAFEPTR]
ui := types.Types[types.TUINTPTR]
synthSlice = types.NewStruct(types.NoPkg, []*types.Field{
types.NewField(nxp, fname("ptr"), unsp),
types.NewField(nxp, fname("len"), ui),
types.NewField(nxp, fname("cap"), ui),
})
synthString = types.NewStruct(types.NoPkg, []*types.Field{
types.NewField(nxp, fname("data"), unsp),
types.NewField(nxp, fname("len"), ui),
})
synthIface = types.NewStruct(types.NoPkg, []*types.Field{
types.NewField(nxp, fname("f1"), unsp),
types.NewField(nxp, fname("f2"), unsp),
})
})
}
// regassign examines a given param type (or component within some
// composite) to determine if it can be register assigned. Returns
// TRUE if we can register allocate, FALSE otherwise.
func (state *assignState) regassign(pt *types.Type) bool {
typ := pt.Kind()
if pt.IsScalar() || pt.IsPtrShaped() {
return state.regassignIntegral(pt)
}
switch typ {
case types.TARRAY:
return state.regassignArray(pt)
case types.TSTRUCT:
return state.regassignStruct(pt)
case types.TSLICE:
return state.regassignStruct(synthSlice)
case types.TSTRING:
return state.regassignStruct(synthString)
case types.TINTER:
return state.regassignStruct(synthIface)
default:
panic("not expected")
}
}
// assignParamOrReturn processes a given receiver, param, or result
// of type 'pt' to determine whether it can be register assigned.
// The result of the analysis is recorded in the result
// ABIParamResultInfo held in 'state'.
func (state *assignState) assignParamOrReturn(pt *types.Type, isReturn bool) ABIParamAssignment {
state.pUsed = RegAmounts{}
if pt.Width == types.BADWIDTH {
panic("should never happen")
} else if pt.Width == 0 {
return state.stackAllocate(pt)
} else if state.regassign(pt) {
return state.regAllocate(pt, isReturn)
} else {
return state.stackAllocate(pt)
}
}

View file

@ -5,13 +5,13 @@
package amd64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/x86"
)
var leaptr = x86.ALEAQ
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &x86.Linkamd64
arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50

View file

@ -5,7 +5,10 @@
package amd64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
@ -19,8 +22,8 @@ var isPlan9 = objabi.GOOS == "plan9"
const (
dzBlocks = 16 // number of MOV/ADD blocks
dzBlockLen = 4 // number of clears per block
dzBlockSize = 19 // size of instructions in a single block
dzMovSize = 4 // size of single MOV instruction w/ offset
dzBlockSize = 23 // size of instructions in a single block
dzMovSize = 5 // size of single MOV instruction w/ offset
dzLeaqSize = 4 // size of single LEAQ instruction
dzClearStep = 16 // number of bytes cleared by each MOV instruction
@ -51,7 +54,7 @@ func dzDI(b int64) int64 {
return -dzClearStep * (dzBlockLen - tailSteps)
}
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
ax = 1 << iota
x0
@ -61,67 +64,67 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
return p
}
if cnt%int64(gc.Widthreg) != 0 {
if cnt%int64(types.RegSize) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
if cnt%int64(types.PtrSize) != 0 {
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
off += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
off += int64(types.PtrSize)
cnt -= int64(types.PtrSize)
}
if cnt == 8 {
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
for i := int64(0); i < cnt/16; i++ {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
if cnt%16 != 0 {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = gc.Duffzero
p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = ir.Syms.Duffzero
if cnt%16 != 0 {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
}
} else {
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
// This is a hardware nop (1-byte 0x90) instruction,
// even though we describe it as an explicit XCHGL here.
// Particularly, this does not zero the high 32 bits

View file

@ -8,16 +8,18 @@ import (
"fmt"
"math"
"cmd/compile/internal/gc"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
@ -110,7 +112,7 @@ func moveByType(t *types.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@ -164,7 +166,35 @@ func duff(size int64) (int64, int64) {
return off, adj
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func getgFromTLS(s *ssagen.State, r int16) {
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(base.Ctxt) {
// MOVQ (TLS), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else {
// MOVQ TLS, r
// MOVQ (r)(TLS*1), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
q := s.Prog(x86.AMOVQ)
q.From.Type = obj.TYPE_MEM
q.From.Reg = r
q.From.Index = x86.REG_TLS
q.From.Scale = 1
q.To.Type = obj.TYPE_REG
q.To.Reg = r
}
}
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpAMD64VFMADD231SD:
p := s.Prog(v.Op.Asm())
@ -630,12 +660,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = o
}
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
@ -671,7 +701,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
@ -679,20 +709,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux2(&p.From, v, sc.Off())
ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[2].Reg()
case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
sc := v.AuxValAndOff()
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux2(&p.From, v, sc.Off())
ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
@ -732,14 +762,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
@ -751,7 +781,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
@ -763,7 +793,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
memIdx(&p.To, v)
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
@ -786,7 +816,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(asm)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
@ -801,7 +831,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := s.Prog(v.Op.Asm())
@ -810,7 +840,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVOstorezero:
if s.ABI != obj.ABIInternal {
v.Fatalf("MOVOstorezero can be only used in ABIInternal functions")
}
if !base.Flag.ABIWrap {
// zeroing X15 manually if wrappers are not used
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_X15
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
@ -835,7 +879,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_NONE
}
memIdx(&p.To, v)
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
@ -865,7 +909,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@ -891,13 +935,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
case ssa.OpAMD64DUFFZERO:
if s.ABI != obj.ABIInternal {
v.Fatalf("MOVOconst can be only used in ABIInternal functions")
}
if !base.Flag.ABIWrap {
// zeroing X15 manually if wrappers are not used
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
off := duffStart(v.AuxInt)
adj := duffAdj(v.AuxInt)
var p *obj.Prog
@ -911,18 +962,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffzero
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = off
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
v.Fatalf("MOVOconst can only do constant=0")
}
r := v.Reg()
opregreg(s, x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffcopy
p.To.Sym = ir.Syms.Duffcopy
if v.AuxInt%16 != 0 {
v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
}
@ -949,7 +994,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -961,44 +1006,37 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpAMD64LoweredHasCPUFeature:
p := s.Prog(x86.AMOVBQZX)
p.From.Type = obj.TYPE_MEM
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64LoweredGetClosurePtr:
// Closure pointer is DX.
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) {
// MOVQ (TLS), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else {
// MOVQ TLS, r
// MOVQ (r)(TLS*1), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
q := s.Prog(x86.AMOVQ)
q.From.Type = obj.TYPE_MEM
q.From.Reg = r
q.From.Index = x86.REG_TLS
q.From.Scale = 1
q.To.Type = obj.TYPE_REG
q.To.Reg = r
if base.Flag.ABIWrap {
v.Fatalf("LoweredGetG should not appear in new ABI")
}
case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
r := v.Reg()
getgFromTLS(s, r)
case ssa.OpAMD64CALLstatic:
if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
s.Call(v)
if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
s.Call(v)
case ssa.OpAMD64LoweredGetCallerPC:
@ -1012,12 +1050,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64LoweredGetCallerSP:
// caller's SP is the address of the first arg
mov := x86.AMOVQ
if gc.Widthptr == 4 {
if types.PtrSize == 4 {
mov = x86.AMOVL
}
p := s.Prog(mov)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -1027,14 +1065,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
// arg0 is in DI. Set sym to match where regalloc put arg1.
p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
@ -1115,7 +1153,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64SETNEF:
p := s.Prog(v.Op.Asm())
@ -1164,14 +1202,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
@ -1184,7 +1222,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
r := v.Reg0()
if r != v.Args[0].Reg() {
@ -1196,7 +1234,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString())
@ -1207,7 +1245,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
@ -1218,20 +1256,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p.To.Offset += 4
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
@ -1257,22 +1295,22 @@ var blockJump = [...]struct {
ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
}
var eqfJumps = [2][2]gc.IndexJump{
var eqfJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
}
var nefJumps = [2][2]gc.IndexJump{
var nefJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in rax:
@ -1285,16 +1323,22 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.To.Reg = x86.REG_AX
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN

View file

@ -5,13 +5,13 @@
package arm
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/arm"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm.Linkarm
arch.REGSP = arm.REGSP
arch.MAXWIDTH = (1 << 32) - 1
@ -20,7 +20,7 @@ func Init(arch *gc.Arch) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -5,49 +5,51 @@
package arm
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if *r0 == 0 {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
} else if cnt <= int64(128*types.PtrSize) {
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p
p.Scond |= arm.C_PBIT
p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
p.To.SetTarget(p1)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm.AAND)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm.REG_R0

View file

@ -9,9 +9,11 @@ import (
"math"
"math/bits"
"cmd/compile/internal/gc"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
@ -91,7 +93,7 @@ func makeshift(reg int16, typ int64, s int64) shift {
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeshift(r1, typ, n))
@ -109,7 +111,7 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift {
}
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeregshift(r1, typ, r2))
@ -143,7 +145,7 @@ func getBFC(v uint32) (uint32, uint32) {
return 0xffffffff, 0
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARMMOVWreg:
if v.Type.IsMemory() {
@ -181,7 +183,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -192,7 +194,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpARMADD,
ssa.OpARMADC,
ssa.OpARMSUB,
@ -543,10 +545,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
case *gc.Node:
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
@ -566,7 +568,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMMOVBstore,
@ -579,7 +581,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
// this is just shift 0 bits
fallthrough
@ -700,7 +702,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Udiv
p.To.Sym = ir.Syms.Udiv
case ssa.OpARMLoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@ -710,39 +712,39 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.OpARMDUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpARMLoweredZero:
// MOVW.P Rarg2, 4(R1)
@ -777,7 +779,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.Reg = arm.REG_R1
p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpARMLoweredMove:
// MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2)
@ -818,7 +820,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.Reg = arm.REG_R1
p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p4.To.SetTarget(p)
case ssa.OpARMEqual,
ssa.OpARMNotEqual,
ssa.OpARMLessThan,
@ -844,12 +846,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpARMLoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -899,24 +901,24 @@ var blockJump = map[ssa.BlockKind]struct {
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
var leJumps = [2][2]gc.IndexJump{
var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
var gtJumps = [2][2]gc.IndexJump{
var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
@ -929,11 +931,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.Reg = arm.REG_R0
p = s.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:

View file

@ -5,12 +5,12 @@
package arm64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/arm64"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm64.Linkarm64
arch.REGSP = arm64.REGSP
arch.MAXWIDTH = 1 << 50
@ -20,7 +20,7 @@ func Init(arch *gc.Arch) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -5,7 +5,9 @@
package arm64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
"cmd/internal/objabi"
@ -22,52 +24,52 @@ func padframe(frame int64) int64 {
return frame
}
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
if cnt%(2*int64(gc.Widthptr)) != 0 {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
off += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
} else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
if cnt%(2*int64(types.PtrSize)) != 0 {
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
off += int64(types.PtrSize)
cnt -= int64(types.PtrSize)
}
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p.Reg = arm64.REG_R20
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
} else {
// Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
// We are at the function entry, where no register is live, so it is okay to clobber
// other registers
const rtmp = arm64.REG_R20
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p.Reg = arm64.REGRT1
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
p.Scond = arm64.C_XPRE
p1 := p
p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm64.REGRT2
p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
p.To.SetTarget(p1)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm64.AHINT)
p.From.Type = obj.TYPE_CONST
return p

View file

@ -7,9 +7,11 @@ package arm64
import (
"math"
"cmd/compile/internal/gc"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
@ -81,7 +83,7 @@ func makeshift(reg int16, typ int64, s int64) int64 {
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = makeshift(r1, typ, n)
@ -110,7 +112,7 @@ func genIndexedOperand(v *ssa.Value) obj.Addr {
return mop
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARM64MOVDreg:
if v.Type.IsMemory() {
@ -148,7 +150,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -159,7 +161,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
@ -393,10 +395,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
case *gc.Node:
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVD $off(SP), R
wantreg = "SP"
@ -417,7 +419,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64MOVBloadidx,
@ -444,7 +446,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpARM64MOVBstore,
@ -461,7 +463,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstoreidx,
ssa.OpARM64MOVHstoreidx,
ssa.OpARM64MOVWstoreidx,
@ -482,7 +484,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = int64(v.Args[2].Reg())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezero,
ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero,
@ -492,7 +494,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezeroidx,
ssa.OpARM64MOVHstorezeroidx,
ssa.OpARM64MOVWstorezeroidx,
@ -511,7 +513,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = int64(arm64.REGZERO)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64BFI,
ssa.OpARM64BFXIL:
r := v.Reg()
@ -580,7 +582,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p)
p2.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicExchange64Variant,
ssa.OpARM64LoweredAtomicExchange32Variant:
swap := arm64.ASWPALD
@ -634,7 +636,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAdd64Variant,
ssa.OpARM64LoweredAtomicAdd32Variant:
// LDADDAL Rarg1, (Rarg0), Rout
@ -698,13 +700,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p4.To.SetTarget(p)
p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
gc.Patch(p2, p5)
p2.To.SetTarget(p5)
case ssa.OpARM64LoweredAtomicCas64Variant,
ssa.OpARM64LoweredAtomicCas32Variant:
// Rarg0: ptr
@ -792,7 +794,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAnd8Variant,
ssa.OpARM64LoweredAtomicAnd32Variant:
atomic_clear := arm64.ALDCLRALW
@ -959,7 +961,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredZero:
// STP.P (ZR,ZR), 16(R16)
@ -980,12 +982,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.Reg = arm64.REG_R16
p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpARM64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredMove:
// MOVD.P 8(R16), Rtmp
@ -1013,7 +1015,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.Reg = arm64.REG_R16
p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p4.To.SetTarget(p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v)
case ssa.OpARM64LoweredWB:
@ -1025,21 +1027,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpARM64Equal,
ssa.OpARM64NotEqual,
@ -1067,12 +1069,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARM64LoweredGetClosurePtr:
// Closure pointer is R26 (arm64.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpARM64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -1142,24 +1144,24 @@ var blockJump = map[ssa.BlockKind]struct {
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
var leJumps = [2][2]gc.IndexJump{
var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
var gtJumps = [2][2]gc.IndexJump{
var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
@ -1172,11 +1174,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.Reg = arm64.REG_R0
p = s.Prog(arm64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:

View file

@ -1,15 +1,43 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package base
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"cmd/internal/sys"
"os"
)
var atExitFuncs []func()
func AtExit(f func()) {
atExitFuncs = append(atExitFuncs, f)
}
func Exit(code int) {
for i := len(atExitFuncs) - 1; i >= 0; i-- {
f := atExitFuncs[i]
atExitFuncs = atExitFuncs[:i]
f()
}
os.Exit(code)
}
// To enable tracing support (-t flag), set EnableTrace to true.
const EnableTrace = false
func Compiling(pkgs []string) bool {
if Ctxt.Pkgpath != "" {
for _, p := range pkgs {
if Ctxt.Pkgpath == p {
return true
}
}
}
return false
}
// The racewalk pass is currently handled in three parts.
//
// First, for flag_race, it inserts calls to racefuncenter and
@ -32,7 +60,7 @@ import (
// Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion.
var omit_pkgs = []string{
var NoInstrumentPkgs = []string{
"runtime/internal/atomic",
"runtime/internal/sys",
"runtime/internal/math",
@ -44,50 +72,4 @@ var omit_pkgs = []string{
// Don't insert racefuncenterfp/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives.
var norace_inst_pkgs = []string{"sync", "sync/atomic"}
func ispkgin(pkgs []string) bool {
if myimportpath != "" {
for _, p := range pkgs {
if myimportpath == p {
return true
}
}
}
return false
}
func instrument(fn *Node) {
if fn.Func.Pragma&Norace != 0 {
return
}
if !flag_race || !ispkgin(norace_inst_pkgs) {
fn.Func.SetInstrumentBody(true)
}
if flag_race {
lno := lineno
lineno = src.NoXPos
if thearch.LinkArch.Arch.Family != sys.AMD64 {
fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
} else {
// nodpc is the PC of the caller as extracted by
// getcallerpc. We use -widthptr(FP) for x86.
// This only works for amd64. This will not
// work on arm or others that might support
// race in the future.
nodpc := nodfp.copy()
nodpc.Type = types.Types[TUINTPTR]
nodpc.Xoffset = int64(-Widthptr)
fn.Func.Dcl = append(fn.Func.Dcl, nodpc)
fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
}
lineno = lno
}
}
var NoRacePkgs = []string{"sync", "sync/atomic"}

View file

@ -0,0 +1,194 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Debug arguments, set by -d flag.
package base
import (
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
"cmd/internal/objabi"
)
// Debug holds the parsed debugging configuration values.
var Debug = DebugFlags{
Fieldtrack: &objabi.Fieldtrack_enabled,
}
// DebugFlags defines the debugging configuration values (see var Debug).
// Each struct field is a different value, named for the lower-case of the field name.
// Each field must be an int or string and must have a `help` struct tag.
//
// The -d option takes a comma-separated list of settings.
// Each setting is name=value; for ints, name is short for name=1.
type DebugFlags struct {
Append int `help:"print information about append compilation"`
Checkptr int `help:"instrument unsafe pointer conversions"`
Closure int `help:"print information about closure compilation"`
DclStack int `help:"run internal dclstack check"`
Defer int `help:"print information about defer compilation"`
DisableNil int `help:"disable nil checks"`
DumpPtrs int `help:"show Node pointers values in dump output"`
DwarfInl int `help:"print information about DWARF inlined function creation"`
Export int `help:"print export data"`
Fieldtrack *int `help:"enable field tracking"`
GCProg int `help:"print dump of GC programs"`
Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
LocationLists int `help:"print information about DWARF location list creation"`
Nil int `help:"print information about nil checks"`
PCTab string `help:"print named pc-value table"`
Panic int `help:"show all compiler panics"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code"`
TypeAssert int `help:"print information about type assertion inlining"`
TypecheckInl int `help:"eager typechecking of inline function bodies"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`
any bool // set when any of the values have been set
}
// Any reports whether any of the debug flags have been set.
func (d *DebugFlags) Any() bool { return d.any }
type debugField struct {
name string
help string
val interface{} // *int or *string
}
var debugTab []debugField
func init() {
v := reflect.ValueOf(&Debug).Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Name == "any" {
continue
}
name := strings.ToLower(f.Name)
help := f.Tag.Get("help")
if help == "" {
panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
}
ptr := v.Field(i).Addr().Interface()
switch ptr.(type) {
default:
panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
case *int, *string:
// ok
case **int:
ptr = *ptr.(**int) // record the *int itself
}
debugTab = append(debugTab, debugField{name, help, ptr})
}
}
// DebugSSA is called to set a -d ssa/... option.
// If nil, those options are reported as invalid options.
// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
var DebugSSA func(phase, flag string, val int, valString string) string
// parseDebug parses the -d debug string argument.
func parseDebug(debugstr string) {
// parse -d argument
if debugstr == "" {
return
}
Debug.any = true
Split:
for _, name := range strings.Split(debugstr, ",") {
if name == "" {
continue
}
// display help about the -d option itself and quit
if name == "help" {
fmt.Print(debugHelpHeader)
maxLen := len("ssa/help")
for _, t := range debugTab {
if len(t.name) > maxLen {
maxLen = len(t.name)
}
}
for _, t := range debugTab {
fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
}
// ssa options have their own help
fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
fmt.Print(debugHelpFooter)
os.Exit(0)
}
val, valstring, haveInt := 1, "", true
if i := strings.IndexAny(name, "=:"); i >= 0 {
var err error
name, valstring = name[:i], name[i+1:]
val, err = strconv.Atoi(valstring)
if err != nil {
val, haveInt = 1, false
}
}
for _, t := range debugTab {
if t.name != name {
continue
}
switch vp := t.val.(type) {
case nil:
// Ignore
case *string:
*vp = valstring
case *int:
if !haveInt {
log.Fatalf("invalid debug value %v", name)
}
*vp = val
default:
panic("bad debugtab type")
}
continue Split
}
// special case for ssa for now
if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
// expect form ssa/phase/flag
// e.g. -d=ssa/generic_cse/time
// _ in phase name also matches space
phase := name[4:]
flag := "debug" // default flag is debug
if i := strings.Index(phase, "/"); i >= 0 {
flag = phase[i+1:]
phase = phase[:i]
}
err := DebugSSA(phase, flag, val, valstring)
if err != "" {
log.Fatalf(err)
}
continue Split
}
log.Fatalf("unknown debug key -d %s\n", name)
}
}
const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
<key> is one of:
`
const debugHelpFooter = `
<value> is key-specific.
Key "checkptr" supports values:
"0": instrumentation disabled
"1": conversions involving unsafe.Pointer are instrumented
"2": conversions to unsafe.Pointer force heap allocation
Key "pctab" supports values:
"pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
`

View file

@ -0,0 +1,459 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"reflect"
"runtime"
"strings"
"cmd/internal/objabi"
"cmd/internal/sys"
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
objabi.Flagprint(os.Stderr)
Exit(2)
}
// Flag holds the parsed command-line flags.
// See ParseFlag for non-zero defaults.
var Flag CmdFlags
// A CountFlag is a counting integer flag.
// It accepts -name=value to set the value directly,
// but it also accepts -name with no =value to increment the count.
type CountFlag int
// CmdFlags defines the command-line flags (see var Flag).
// Each struct field is a different flag, by default named for the lower-case of the field name.
// If the flag name is a single letter, the default flag name is left upper-case.
// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
//
// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
// but this should be done only in exceptional circumstances: it helps everyone if the flag name
// is obvious from the field name when the flag is used elsewhere in the compiler sources.
// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
//
// Each field must have a `help` struct tag giving the flag help message.
//
// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
type CmdFlags struct {
// Single letters
B CountFlag "help:\"disable bounds checking\""
C CountFlag "help:\"disable printing of columns in error messages\""
D string "help:\"set relative `path` for local imports\""
E CountFlag "help:\"debug symbol export\""
I func(string) "help:\"add `directory` to import search path\""
K CountFlag "help:\"debug missing line numbers\""
L CountFlag "help:\"show full file names in error messages\""
N CountFlag "help:\"disable optimizations\""
S CountFlag "help:\"print assembly listing\""
// V is added by objabi.AddVersionFlag
W CountFlag "help:\"debug parse tree after type checking\""
LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
LowerD func(string) "help:\"enable debugging settings; try -d help\""
LowerE CountFlag "help:\"no limit on number of errors reported\""
LowerH CountFlag "help:\"halt on error\""
LowerJ CountFlag "help:\"debug runtime-initialized variables\""
LowerL CountFlag "help:\"disable inlining\""
LowerM CountFlag "help:\"print optimization decisions\""
LowerO string "help:\"write output to `file`\""
LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
LowerR CountFlag "help:\"debug generated wrappers\""
LowerT bool "help:\"enable tracing for debugging the compiler\""
LowerW CountFlag "help:\"debug type checking\""
LowerV *bool "help:\"increase debug verbosity\""
// Special characters
Percent int "flag:\"%\" help:\"debug non-static initializers\""
CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
// Longer names
ABIWrap bool "help:\"enable generation of ABI wrappers\""
ABIWrapLimit int "help:\"emit at most N ABI wrappers (for debugging)\""
AsmHdr string "help:\"write assembly header to `file`\""
Bench string "help:\"append benchmark times to `file`\""
BlockProfile string "help:\"write block profile to `file`\""
BuildID string "help:\"record `id` as the build id in the export metadata\""
CPUProfile string "help:\"write cpu profile to `file`\""
Complete bool "help:\"compiling complete package (no C or assembly)\""
Dwarf bool "help:\"generate DWARF symbols\""
DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
GoVersion string "help:\"required version of the runtime\""
ImportCfg func(string) "help:\"read import configuration from `file`\""
ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
InstallSuffix string "help:\"set pkg directory `suffix`\""
JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
Lang string "help:\"Go language version source code expects\""
LinkObj string "help:\"write linker-specific object to `file`\""
LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
Live CountFlag "help:\"debug liveness analysis\""
MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
MemProfile string "help:\"write memory profile to `file`\""
MemProfileRate int64 "help:\"set runtime.MemProfileRate to `rate`\""
MutexProfile string "help:\"write mutex profile to `file`\""
NoLocalImports bool "help:\"reject local (relative) imports\""
Pack bool "help:\"write to file.a instead of file.o\""
Race bool "help:\"enable race detector\""
Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732
Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\""
Std bool "help:\"compiling standard library\""
SymABIs string "help:\"read symbol ABIs from `file`\""
TraceProfile string "help:\"write an execution trace to `file`\""
TrimPath string "help:\"remove `prefix` from recorded source file paths\""
WB bool "help:\"enable write barrier\"" // TODO: remove
// Configuration derived from flags; not a flag itself.
Cfg struct {
Embed struct { // set by -embedcfg
Patterns map[string][]string
Files map[string]string
}
ImportDirs []string // appended to by -I
ImportMap map[string]string // set by -importmap OR -importcfg
PackageFile map[string]string // set by -importcfg; nil means not in use
SpectreIndex bool // set by -spectre=index or -spectre=all
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
Instrumenting bool
}
}
// ParseFlags parses the command-line flags into Flag.
func ParseFlags() {
Flag.I = addImportDir
Flag.LowerC = 1
Flag.LowerD = parseDebug
Flag.LowerP = &Ctxt.Pkgpath
Flag.LowerV = &Ctxt.Debugvlog
Flag.ABIWrap = objabi.Regabi_enabled != 0
Flag.Dwarf = objabi.GOARCH != "wasm"
Flag.DwarfBASEntries = &Ctxt.UseBASEntries
Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
*Flag.DwarfLocationLists = true
Flag.Dynlink = &Ctxt.Flag_dynlink
Flag.EmbedCfg = readEmbedCfg
Flag.GenDwarfInl = 2
Flag.ImportCfg = readImportCfg
Flag.ImportMap = addImportMap
Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true
Flag.Cfg.ImportMap = make(map[string]string)
objabi.AddVersionFlag() // -V
registerFlags()
objabi.Flagparse(usage)
if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH)
}
if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH)
}
if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
}
parseSpectre(Flag.Spectre) // left as string for RecordFlags
Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
Ctxt.Flag_optimize = Flag.N == 0
Ctxt.Debugasm = int(Flag.S)
if flag.NArg() < 1 {
usage()
}
if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
Exit(2)
}
if Flag.LowerO == "" {
p := flag.Arg(0)
if i := strings.LastIndex(p, "/"); i >= 0 {
p = p[i+1:]
}
if runtime.GOOS == "windows" {
if i := strings.LastIndex(p, `\`); i >= 0 {
p = p[i+1:]
}
}
if i := strings.LastIndex(p, "."); i >= 0 {
p = p[:i]
}
suffix := ".o"
if Flag.Pack {
suffix = ".a"
}
Flag.LowerO = p + suffix
}
if Flag.Race && Flag.MSan {
log.Fatal("cannot use both -race and -msan")
}
if Flag.Race || Flag.MSan {
// -race and -msan imply -d=checkptr for now.
Debug.Checkptr = 1
}
if Flag.CompilingRuntime && Flag.N != 0 {
log.Fatal("cannot disable optimizations while compiling runtime")
}
if Flag.LowerC < 1 {
log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
}
if Flag.LowerC > 1 && !concurrentBackendAllowed() {
log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
}
if Flag.CompilingRuntime {
// Runtime can't use -d=checkptr, at least not yet.
Debug.Checkptr = 0
// Fuzzing the runtime isn't interesting either.
Debug.Libfuzzer = 0
}
// set via a -d flag
Ctxt.Debugpcln = Debug.PCTab
}
// registerFlags adds flag registrations for all the fields in Flag.
// See the comment on type CmdFlags for the rules.
func registerFlags() {
var (
boolType = reflect.TypeOf(bool(false))
intType = reflect.TypeOf(int(0))
stringType = reflect.TypeOf(string(""))
ptrBoolType = reflect.TypeOf(new(bool))
ptrIntType = reflect.TypeOf(new(int))
ptrStringType = reflect.TypeOf(new(string))
countType = reflect.TypeOf(CountFlag(0))
funcType = reflect.TypeOf((func(string))(nil))
)
v := reflect.ValueOf(&Flag).Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Name == "Cfg" {
continue
}
var name string
if len(f.Name) == 1 {
name = f.Name
} else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
name = string(rune(f.Name[5] + 'a' - 'A'))
} else {
name = strings.ToLower(f.Name)
}
if tag := f.Tag.Get("flag"); tag != "" {
name = tag
}
help := f.Tag.Get("help")
if help == "" {
panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
}
if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
}
switch f.Type {
case boolType:
p := v.Field(i).Addr().Interface().(*bool)
flag.BoolVar(p, name, *p, help)
case intType:
p := v.Field(i).Addr().Interface().(*int)
flag.IntVar(p, name, *p, help)
case stringType:
p := v.Field(i).Addr().Interface().(*string)
flag.StringVar(p, name, *p, help)
case ptrBoolType:
p := v.Field(i).Interface().(*bool)
flag.BoolVar(p, name, *p, help)
case ptrIntType:
p := v.Field(i).Interface().(*int)
flag.IntVar(p, name, *p, help)
case ptrStringType:
p := v.Field(i).Interface().(*string)
flag.StringVar(p, name, *p, help)
case countType:
p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
objabi.Flagcount(name, help, p)
case funcType:
f := v.Field(i).Interface().(func(string))
objabi.Flagfn1(name, help, f)
}
}
}
// concurrentFlagOk reports whether the current compiler flags
// are compatible with concurrent compilation.
func concurrentFlagOk() bool {
// TODO(rsc): Many of these are fine. Remove them.
return Flag.Percent == 0 &&
Flag.E == 0 &&
Flag.K == 0 &&
Flag.L == 0 &&
Flag.LowerH == 0 &&
Flag.LowerJ == 0 &&
Flag.LowerM == 0 &&
Flag.LowerR == 0
}
func concurrentBackendAllowed() bool {
if !concurrentFlagOk() {
return false
}
// Debug.S by itself is ok, because all printing occurs
// while writing the object file, and that is non-concurrent.
// Adding Debug_vlog, however, causes Debug.S to also print
// while flushing the plist, which happens concurrently.
if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
return false
}
// TODO: Test and delete this condition.
if objabi.Fieldtrack_enabled != 0 {
return false
}
// TODO: fix races and enable the following flags
if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race {
return false
}
return true
}
func addImportDir(dir string) {
if dir != "" {
Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
}
}
func addImportMap(s string) {
if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string)
}
if strings.Count(s, "=") != 1 {
log.Fatal("-importmap argument must be of the form source=actual")
}
i := strings.Index(s, "=")
source, actual := s[:i], s[i+1:]
if source == "" || actual == "" {
log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
}
Flag.Cfg.ImportMap[source] = actual
}
func readImportCfg(file string) {
if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string)
}
Flag.Cfg.PackageFile = map[string]string{}
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-importcfg: %v", err)
}
for lineNum, line := range strings.Split(string(data), "\n") {
lineNum++ // 1-based
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
var verb, args string
if i := strings.Index(line, " "); i < 0 {
verb = line
} else {
verb, args = line[:i], strings.TrimSpace(line[i+1:])
}
var before, after string
if i := strings.Index(args, "="); i >= 0 {
before, after = args[:i], args[i+1:]
}
switch verb {
default:
log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
case "importmap":
if before == "" || after == "" {
log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
}
Flag.Cfg.ImportMap[before] = after
case "packagefile":
if before == "" || after == "" {
log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
}
Flag.Cfg.PackageFile[before] = after
}
}
}
func readEmbedCfg(file string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-embedcfg: %v", err)
}
if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
log.Fatalf("%s: %v", file, err)
}
if Flag.Cfg.Embed.Patterns == nil {
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
}
if Flag.Cfg.Embed.Files == nil {
log.Fatalf("%s: invalid embedcfg: missing Files", file)
}
}
// parseSpectre parses the spectre configuration from the string s.
func parseSpectre(s string) {
for _, f := range strings.Split(s, ",") {
f = strings.TrimSpace(f)
switch f {
default:
log.Fatalf("unknown setting -spectre=%s", f)
case "":
// nothing
case "all":
Flag.Cfg.SpectreIndex = true
Ctxt.Retpoline = true
case "index":
Flag.Cfg.SpectreIndex = true
case "ret":
Ctxt.Retpoline = true
}
}
if Flag.Cfg.SpectreIndex {
switch objabi.GOARCH {
case "amd64":
// ok
default:
log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
}
}
}

View file

@ -0,0 +1,36 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"cmd/internal/obj"
)
var Ctxt *obj.Link
// TODO(mdempsky): These should probably be obj.Link methods.
// PkgLinksym returns the linker symbol for name within the given
// package prefix. For user packages, prefix should be the package
// path encoded with objabi.PathToPrefix.
func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
if name == "_" {
// TODO(mdempsky): Cleanup callers and Fatalf instead.
return linksym(prefix, "_", abi)
}
return linksym(prefix, prefix+"."+name, abi)
}
// Linkname returns the linker symbol for the given name as it might
// appear within a //go:linkname directive.
func Linkname(name string, abi obj.ABI) *obj.LSym {
return linksym("_", name, abi)
}
// linksym is an internal helper function for implementing the above
// exported APIs.
func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
}

View file

@ -0,0 +1,264 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"fmt"
"os"
"runtime/debug"
"sort"
"strings"
"cmd/internal/objabi"
"cmd/internal/src"
)
// An errorMsg is a queued error message, waiting to be printed.
type errorMsg struct {
pos src.XPos
msg string
}
// Pos is the current source position being processed,
// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
var Pos src.XPos
var (
errorMsgs []errorMsg
numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings)
numSyntaxErrors int
)
// Errors returns the number of errors reported.
func Errors() int {
return numErrors
}
// SyntaxErrors returns the number of syntax errors reported
func SyntaxErrors() int {
return numSyntaxErrors
}
// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
// Only add the position if know the position.
// See issue golang.org/issue/11361.
if pos.IsKnown() {
msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
}
errorMsgs = append(errorMsgs, errorMsg{
pos: pos,
msg: msg + "\n",
})
}
// FmtPos formats pos as a file:line string.
func FmtPos(pos src.XPos) string {
if Ctxt == nil {
return "???"
}
return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
}
// byPos sorts errors by source position.
type byPos []errorMsg
func (x byPos) Len() int { return len(x) }
func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
// FlushErrors sorts errors seen so far by line number, prints them to stdout,
// and empties the errors array.
func FlushErrors() {
if Ctxt != nil && Ctxt.Bso != nil {
Ctxt.Bso.Flush()
}
if len(errorMsgs) == 0 {
return
}
sort.Stable(byPos(errorMsgs))
for i, err := range errorMsgs {
if i == 0 || err.msg != errorMsgs[i-1].msg {
fmt.Printf("%s", err.msg)
}
}
errorMsgs = errorMsgs[:0]
}
// lasterror keeps track of the most recently issued error,
// to avoid printing multiple error messages on the same line.
var lasterror struct {
syntax src.XPos // source position of last syntax error
other src.XPos // source position of last non-syntax error
msg string // error message of last non-syntax error
}
// sameline reports whether two positions a, b are on the same line.
func sameline(a, b src.XPos) bool {
p := Ctxt.PosTable.Pos(a)
q := Ctxt.PosTable.Pos(b)
return p.Base() == q.Base() && p.Line() == q.Line()
}
// Errorf reports a formatted error at the current line.
func Errorf(format string, args ...interface{}) {
ErrorfAt(Pos, format, args...)
}
// ErrorfAt reports a formatted error message at pos.
func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if strings.HasPrefix(msg, "syntax error") {
numSyntaxErrors++
// only one syntax error per line, no matter what error
if sameline(lasterror.syntax, pos) {
return
}
lasterror.syntax = pos
} else {
// only one of multiple equal non-syntax errors per line
// (FlushErrors shows only one of them, so we filter them
// here as best as we can (they may not appear in order)
// so that we don't count them here and exit early, and
// then have nothing to show for.)
if sameline(lasterror.other, pos) && lasterror.msg == msg {
return
}
lasterror.other = pos
lasterror.msg = msg
}
addErrorMsg(pos, "%s", msg)
numErrors++
hcrash()
if numErrors >= 10 && Flag.LowerE == 0 {
FlushErrors()
fmt.Printf("%v: too many errors\n", FmtPos(pos))
ErrorExit()
}
}
// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
func ErrorfVers(lang string, format string, args ...interface{}) {
Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
}
// UpdateErrorDot is a clumsy hack that rewrites the last error,
// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
// It is used to give better error messages for dot (selector) expressions.
func UpdateErrorDot(line string, name, expr string) {
if len(errorMsgs) == 0 {
return
}
e := &errorMsgs[len(errorMsgs)-1]
if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
}
}
// Warnf reports a formatted warning at the current line.
// In general the Go compiler does NOT generate warnings,
// so this should be used only when the user has opted in
// to additional output by setting a particular flag.
func Warn(format string, args ...interface{}) {
WarnfAt(Pos, format, args...)
}
// WarnfAt reports a formatted warning at pos.
// In general the Go compiler does NOT generate warnings,
// so this should be used only when the user has opted in
// to additional output by setting a particular flag.
func WarnfAt(pos src.XPos, format string, args ...interface{}) {
addErrorMsg(pos, format, args...)
if Flag.LowerM != 0 {
FlushErrors()
}
}
// Fatalf reports a fatal error - an internal problem - at the current line and exits.
// If other errors have already been printed, then Fatalf just quietly exits.
// (The internal problem may have been caused by incomplete information
// after the already-reported errors, so best to let users fix those and
// try again without being bothered about a spurious internal error.)
//
// But if no errors have been printed, or if -d panic has been specified,
// Fatalf prints the error as an "internal compiler error". In a released build,
// it prints an error asking to file a bug report. In development builds, it
// prints a stack trace.
//
// If -h has been specified, Fatalf panics to force the usual runtime info dump.
func Fatalf(format string, args ...interface{}) {
FatalfAt(Pos, format, args...)
}
// FatalfAt reports a fatal error - an internal problem - at pos and exits.
// If other errors have already been printed, then FatalfAt just quietly exits.
// (The internal problem may have been caused by incomplete information
// after the already-reported errors, so best to let users fix those and
// try again without being bothered about a spurious internal error.)
//
// But if no errors have been printed, or if -d panic has been specified,
// FatalfAt prints the error as an "internal compiler error". In a released build,
// it prints an error asking to file a bug report. In development builds, it
// prints a stack trace.
//
// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
func FatalfAt(pos src.XPos, format string, args ...interface{}) {
FlushErrors()
if Debug.Panic != 0 || numErrors == 0 {
fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
fmt.Printf(format, args...)
fmt.Printf("\n")
// If this is a released compiler version, ask for a bug report.
if strings.HasPrefix(objabi.Version, "go") {
fmt.Printf("\n")
fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
fmt.Printf("https://golang.org/issue/new\n")
} else {
// Not a release; dump a stack trace, too.
fmt.Println()
os.Stdout.Write(debug.Stack())
fmt.Println()
}
}
hcrash()
ErrorExit()
}
// hcrash crashes the compiler when -h is set, to find out where a message is generated.
func hcrash() {
if Flag.LowerH != 0 {
FlushErrors()
if Flag.LowerO != "" {
os.Remove(Flag.LowerO)
}
panic("-h")
}
}
// ErrorExit handles an error-status exit.
// It flushes any pending errors, removes the output file, and exits.
func ErrorExit() {
FlushErrors()
if Flag.LowerO != "" {
os.Remove(Flag.LowerO)
}
os.Exit(2)
}
// ExitIfErrors calls ErrorExit if any errors have been reported.
func ExitIfErrors() {
if Errors() > 0 {
ErrorExit()
}
}
var AutogeneratedPos src.XPos

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package base
import (
"fmt"
@ -11,6 +11,8 @@ import (
"time"
)
var Timer Timings
// Timings collects the execution times of labeled phases
// which are added trough a sequence of Start/Stop calls.
// Events may be associated with each phase via AddEvent.

View file

@ -0,0 +1,190 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bitvec
import (
"math/bits"
"cmd/compile/internal/base"
)
const (
wordBits = 32
wordMask = wordBits - 1
wordShift = 5
)
// A BitVec is a bit vector.
type BitVec struct {
N int32 // number of bits in vector
B []uint32 // words holding bits
}
func New(n int32) BitVec {
nword := (n + wordBits - 1) / wordBits
return BitVec{n, make([]uint32, nword)}
}
type Bulk struct {
words []uint32
nbit int32
nword int32
}
func NewBulk(nbit int32, count int32) Bulk {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return Bulk{
words: make([]uint32, size),
nbit: nbit,
nword: nword,
}
}
func (b *Bulk) Next() BitVec {
out := BitVec{b.nbit, b.words[:b.nword]}
b.words = b.words[b.nword:]
return out
}
func (bv1 BitVec) Eq(bv2 BitVec) bool {
if bv1.N != bv2.N {
base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
}
for i, x := range bv1.B {
if x != bv2.B[i] {
return false
}
}
return true
}
func (dst BitVec) Copy(src BitVec) {
copy(dst.B, src.B)
}
func (bv BitVec) Get(i int32) bool {
if i < 0 || i >= bv.N {
base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
return bv.B[i>>wordShift]&mask != 0
}
func (bv BitVec) Set(i int32) {
if i < 0 || i >= bv.N {
base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
bv.B[i/wordBits] |= mask
}
func (bv BitVec) Unset(i int32) {
if i < 0 || i >= bv.N {
base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
bv.B[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv BitVec) Next(i int32) int32 {
if i >= bv.N {
return -1
}
// Jump i ahead to next word with bits.
if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
i &^= wordMask
i += wordBits
for i < bv.N && bv.B[i>>wordShift] == 0 {
i += wordBits
}
}
if i >= bv.N {
return -1
}
// Find 1 bit.
w := bv.B[i>>wordShift] >> uint(i&wordMask)
i += int32(bits.TrailingZeros32(w))
return i
}
func (bv BitVec) IsEmpty() bool {
for _, x := range bv.B {
if x != 0 {
return false
}
}
return true
}
func (bv BitVec) Not() {
for i, x := range bv.B {
bv.B[i] = ^x
}
}
// union
func (dst BitVec) Or(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x | src2.B[i]
}
}
// intersection
func (dst BitVec) And(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x & src2.B[i]
}
}
// difference
func (dst BitVec) AndNot(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x &^ src2.B[i]
}
}
func (bv BitVec) String() string {
s := make([]byte, 2+bv.N)
copy(s, "#*")
for i := int32(0); i < bv.N; i++ {
ch := byte('0')
if bv.Get(i) {
ch = '1'
}
s[2+i] = ch
}
return string(s)
}
func (bv BitVec) Clear() {
for i := range bv.B {
bv.B[i] = 0
}
}

View file

@ -0,0 +1,152 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deadcode
import (
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
)
func Func(fn *ir.Func) {
stmts(&fn.Body)
if len(fn.Body) == 0 {
return
}
for _, n := range fn.Body {
if len(n.Init()) > 0 {
return
}
switch n.Op() {
case ir.OIF:
n := n.(*ir.IfStmt)
if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
return
}
case ir.OFOR:
n := n.(*ir.ForStmt)
if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
return
}
default:
return
}
}
fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
}
func stmts(nn *ir.Nodes) {
var lastLabel = -1
for i, n := range *nn {
if n != nil && n.Op() == ir.OLABEL {
lastLabel = i
}
}
for i, n := range *nn {
// Cut is set to true when all nodes after i'th position
// should be removed.
// In other words, it marks whole slice "tail" as dead.
cut := false
if n == nil {
continue
}
if n.Op() == ir.OIF {
n := n.(*ir.IfStmt)
n.Cond = expr(n.Cond)
if ir.IsConst(n.Cond, constant.Bool) {
var body ir.Nodes
if ir.BoolVal(n.Cond) {
n.Else = ir.Nodes{}
body = n.Body
} else {
n.Body = ir.Nodes{}
body = n.Else
}
// If "then" or "else" branch ends with panic or return statement,
// it is safe to remove all statements after this node.
// isterminating is not used to avoid goto-related complications.
// We must be careful not to deadcode-remove labels, as they
// might be the target of a goto. See issue 28616.
if body := body; len(body) != 0 {
switch body[(len(body) - 1)].Op() {
case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
if i > lastLabel {
cut = true
}
}
}
}
}
if len(n.Init()) != 0 {
stmts(n.(ir.InitNode).PtrInit())
}
switch n.Op() {
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
stmts(&n.List)
case ir.OFOR:
n := n.(*ir.ForStmt)
stmts(&n.Body)
case ir.OIF:
n := n.(*ir.IfStmt)
stmts(&n.Body)
stmts(&n.Else)
case ir.ORANGE:
n := n.(*ir.RangeStmt)
stmts(&n.Body)
case ir.OSELECT:
n := n.(*ir.SelectStmt)
for _, cas := range n.Cases {
stmts(&cas.Body)
}
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
for _, cas := range n.Cases {
stmts(&cas.Body)
}
}
if cut {
*nn = (*nn)[:i+1]
break
}
}
}
func expr(n ir.Node) ir.Node {
// Perform dead-code elimination on short-circuited boolean
// expressions involving constants with the intent of
// producing a constant 'if' condition.
switch n.Op() {
case ir.OANDAND:
n := n.(*ir.LogicalExpr)
n.X = expr(n.X)
n.Y = expr(n.Y)
if ir.IsConst(n.X, constant.Bool) {
if ir.BoolVal(n.X) {
return n.Y // true && x => x
} else {
return n.X // false && x => false
}
}
case ir.OOROR:
n := n.(*ir.LogicalExpr)
n.X = expr(n.X)
n.Y = expr(n.Y)
if ir.IsConst(n.X, constant.Bool) {
if ir.BoolVal(n.X) {
return n.X // true || x => true
} else {
return n.Y // false || x => x
}
}
}
return n
}

View file

@ -0,0 +1,85 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package devirtualize implements a simple "devirtualization"
// optimization pass, which replaces interface method calls with
// direct concrete-type method calls where possible.
package devirtualize
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
)
// Func devirtualizes calls within fn where possible.
func Func(fn *ir.Func) {
ir.CurFunc = fn
ir.VisitList(fn.Body, func(n ir.Node) {
if call, ok := n.(*ir.CallExpr); ok {
Call(call)
}
})
}
// Call devirtualizes the given call if possible.
func Call(call *ir.CallExpr) {
if call.Op() != ir.OCALLINTER {
return
}
sel := call.X.(*ir.SelectorExpr)
r := ir.StaticValue(sel.X)
if r.Op() != ir.OCONVIFACE {
return
}
recv := r.(*ir.ConvExpr)
typ := recv.X.Type()
if typ.IsInterface() {
return
}
dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
dt.SetType(typ)
x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
switch x.Op() {
case ir.ODOTMETH:
x := x.(*ir.SelectorExpr)
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLMETH)
call.X = x
case ir.ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
x := x.(*ir.SelectorExpr)
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLINTER)
call.X = x
default:
// TODO(mdempsky): Turn back into Fatalf after more testing.
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
}
return
}
// Duplicated logic from typecheck for function call return
// value types.
//
// Receiver parameter size may have changed; need to update
// call.Type to get correct stack offsets for result
// parameters.
types.CheckSize(x.Type())
switch ft := x.Type(); ft.NumResults() {
case 0:
case 1:
call.SetType(ft.Results().Field(0).Type)
default:
call.SetType(ft.Results())
}
}

View file

@ -0,0 +1,458 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarfgen
import (
"bytes"
"flag"
"fmt"
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*ir.Func)
if fn.Nname != nil {
expect := fn.Linksym()
if fnsym.ABI() == obj.ABI0 {
expect = fn.LinksymABI(obj.ABI0)
}
if fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
}
}
// Back when there were two different *Funcs for a function, this code
// was not consistent about whether a particular *Node being processed
// was an ODCLFUNC or ONAME node. Partly this is because inlined function
// bodies have no ODCLFUNC node, which was it's own inconsistency.
// In any event, the handling of the two different nodes for DWARF purposes
// was subtly different, likely in unintended ways. CL 272253 merged the
// two nodes' Func fields, so that code sees the same *Func whether it is
// holding the ODCLFUNC or the ONAME. This resulted in changes in the
// DWARF output. To preserve the existing DWARF output and leave an
// intentional change for a future CL, this code does the following when
// fn.Op == ONAME:
//
// 1. Disallow use of createComplexVars in createDwarfVars.
// It was not possible to reach that code for an ONAME before,
// because the DebugInfo was set only on the ODCLFUNC Func.
// Calling into it in the ONAME case causes an index out of bounds panic.
//
// 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
// not the ONAME Func. Populating apdecls for the ONAME case results
// in selected being populated after createSimpleVars is called in
// createDwarfVars, and then that causes the loop to skip all the entries
// in dcl, meaning that the RecordAutoType calls don't happen.
//
// These two adjustments keep toolstash -cmp working for now.
// Deciding the right answer is, as they say, future work.
//
// We can tell the difference between the old ODCLFUNC and ONAME
// cases by looking at the infosym.Name. If it's empty, DebugInfo is
// being called from (*obj.Link).populateDWARF, which used to use
// the ODCLFUNC. If it's non-empty (the name will end in $abstract),
// DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
// which used to use the ONAME form.
isODCLFUNC := infosym.Name == ""
var apdecls []*ir.Name
// Populate decls for fn.
if isODCLFUNC {
for _, n := range fn.Dcl {
if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
continue
}
switch n.Class {
case ir.PAUTO:
if !n.Used() {
// Text == nil -> generating abstract function
if fnsym.Func().Text != nil {
base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
}
continue
}
case ir.PPARAM, ir.PPARAMOUT:
default:
continue
}
apdecls = append(apdecls, n)
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
}
}
decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
// For each type referenced by the functions auto vars but not
// already referenced by a dwarf var, attach an R_USETYPE relocation to
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
for t, _ := range fnsym.Func().Autot {
typesyms = append(typesyms, t)
}
sort.Sort(obj.BySymName(typesyms))
for _, sym := range typesyms {
r := obj.Addrel(infosym)
r.Sym = sym
r.Type = objabi.R_USETYPE
}
fnsym.Func().Autot = nil
var varScopes []ir.ScopeID
for _, decl := range decls {
pos := declPos(decl)
varScopes = append(varScopes, findScope(fn.Marks, pos))
}
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
var inlcalls dwarf.InlCalls
if base.Flag.GenDwarfInl > 0 {
inlcalls = assembleInlines(fnsym, dwarfVars)
}
return scopes, inlcalls
}
func declPos(decl *ir.Name) src.XPos {
return decl.Canonical().Pos()
}
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
var decls []*ir.Name
var selected ir.NameSet
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(fnsym, apDecls)
}
dcl := apDecls
if fnsym.WasInlined() {
dcl = preInliningDcls(fnsym)
}
// If optimization is enabled, the list above will typically be
// missing some of the original pre-optimization variables in the
// function (they may have been promoted to registers, folded into
// constants, dead-coded away, etc). Input arguments not eligible
// for SSA optimization are also missing. Here we add back in entries
// for selected missing vars. Note that the recipe below creates a
// conservative location. The idea here is that we want to
// communicate to the user that "yes, there is a variable named X
// in this function, but no, I don't have enough information to
// reliably report its contents."
// For non-SSA-able arguments, however, the correct information
// is known -- they have a single home on the stack.
for _, n := range dcl {
if selected.Has(n) {
continue
}
c := n.Sym().Name[0]
if c == '.' || n.Type().IsUntyped() {
continue
}
if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
// with VARDEF, which creates location lists.
// Args not of SSA-able type are treated here; they
// are homed on the stack in a single place for the
// entire call.
vars = append(vars, createSimpleVar(fnsym, n))
decls = append(decls, n)
continue
}
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
isReturnValue := (n.Class == ir.PPARAMOUT)
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
if n.Esc() == ir.EscHeap {
// The variable in question has been promoted to the heap.
// Its address is in n.Heapaddr.
// TODO(thanm): generate a better location expression
}
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.InlFormal() || n.InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := base.Ctxt.InnermostPos(n.Pos())
vars = append(vars, &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
StackOffset: int32(n.FrameOffset()),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
}
return decls, vars
}
// Given a function that was inlined at some point during the
// compilation, return a sorted list of nodes corresponding to the
// autos/locals in that function prior to inlining. If this is a
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
var rdcl []*ir.Name
for _, n := range fn.Inl.Dcl {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
}
return rdcl
}
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
var vars []*dwarf.Var
var decls []*ir.Name
var selected ir.NameSet
for _, n := range apDecls {
if ir.IsAutoTmp(n) {
continue
}
decls = append(decls, n)
vars = append(vars, createSimpleVar(fnsym, n))
selected.Add(n)
}
return decls, vars, selected
}
func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
var abbrev int
var offs int64
switch n.Class {
case ir.PAUTO:
offs = n.FrameOffset()
abbrev = dwarf.DW_ABRV_AUTO
if base.Ctxt.FixedFrameSize() == 0 {
offs -= int64(types.PtrSize)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
offs -= int64(types.PtrSize)
}
case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM
offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
default:
base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n)
}
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type()))
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.InlFormal() || n.InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
declpos := base.Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: n.Class == ir.PPARAMOUT,
IsInlFormal: n.InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
}
}
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
var decls []*ir.Name
var vars []*dwarf.Var
var ssaVars ir.NameSet
for varID, dvar := range debugInfo.Vars {
n := dvar
ssaVars.Add(n)
for _, slot := range debugInfo.VarSlots[varID] {
ssaVars.Add(debugInfo.Slots[slot].N)
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
decls = append(decls, n)
vars = append(vars, dvar)
}
}
return decls, vars, ssaVars
}
// createComplexVar builds a single DWARF variable entry and location list.
func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
debug := fn.DebugInfo.(*ssa.FuncDebug)
n := debug.Vars[varID]
var abbrev int
switch n.Class {
case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
default:
return nil
}
gotype := reflectdata.TypeLinksym(n.Type())
delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.InlFormal() || n.InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := base.Ctxt.InnermostPos(n.Pos())
dvar := &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: n.Class == ir.PPARAMOUT,
IsInlFormal: n.InlFormal(),
Abbrev: abbrev,
Type: base.Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
// location, but it's not obvious how to do better.
StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
}
list := debug.LocationLists[varID]
if len(list) != 0 {
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
}
}
return dvar
}
// RecordFlags records the specified command-line flags to be placed
// in the DWARF info.
func RecordFlags(flags ...string) {
if base.Ctxt.Pkgpath == "" {
// We can't record the flags if we don't know what the
// package name is.
return
}
type BoolFlag interface {
IsBoolFlag() bool
}
type CountFlag interface {
IsCountFlag() bool
}
var cmd bytes.Buffer
for _, name := range flags {
f := flag.Lookup(name)
if f == nil {
continue
}
getter := f.Value.(flag.Getter)
if getter.String() == f.DefValue {
// Flag has default value, so omit it.
continue
}
if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
val, ok := getter.Get().(bool)
if ok && val {
fmt.Fprintf(&cmd, " -%s", f.Name)
continue
}
}
if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
val, ok := getter.Get().(int)
if ok && val == 1 {
fmt.Fprintf(&cmd, " -%s", f.Name)
continue
}
}
fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
}
if cmd.Len() == 0 {
return
}
s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
base.Ctxt.Data = append(base.Ctxt.Data, s)
s.P = cmd.Bytes()[1:]
}
// RecordPackageName records the name of the package being
// compiled, so that the linker can save it in the compile unit's DIE.
func RecordPackageName() {
s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
base.Ctxt.Data = append(base.Ctxt.Data, s)
s.P = []byte(types.LocalPkg.Name)
}

View file

@ -2,14 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package dwarfgen
import (
"fmt"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
"strings"
)
// To identify variables by original source position.
@ -26,8 +29,8 @@ type varPos struct {
func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
var inlcalls dwarf.InlCalls
if Debug_gendwarfinl != 0 {
Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
if base.Debug.DwarfInl != 0 {
base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
}
// This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
@ -106,7 +109,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
}
m = makePreinlineDclMap(fnsym)
} else {
ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
m = makePreinlineDclMap(ifnlsym)
}
@ -181,7 +184,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
}
// Debugging
if Debug_gendwarfinl != 0 {
if base.Debug.DwarfInl != 0 {
dumpInlCalls(inlcalls)
dumpInlVars(dwVars)
}
@ -204,16 +207,17 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
// late in the compilation when it is determined that we need an
// abstract function DIE for an inlined routine imported from a
// previously compiled package.
func genAbstractFunc(fn *obj.LSym) {
ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
func AbstractFunc(fn *obj.LSym) {
ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
if ifn == nil {
Ctxt.Diag("failed to locate precursor fn for %v", fn)
base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
return
}
if Debug_gendwarfinl != 0 {
Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
_ = ifn.(*ir.Func)
if base.Debug.DwarfInl != 0 {
base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
}
Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
}
// Undo any versioning performed when a name was written
@ -235,15 +239,15 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
dcl := preInliningDcls(fnsym)
m := make(map[varPos]int)
for i, n := range dcl {
pos := Ctxt.InnermostPos(n.Pos)
pos := base.Ctxt.InnermostPos(n.Pos())
vp := varPos{
DeclName: unversion(n.Sym.Name),
DeclName: unversion(n.Sym().Name),
DeclFile: pos.RelFilename(),
DeclLine: pos.RelLine(),
DeclCol: pos.Col(),
}
if _, found := m[vp]; found {
Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name)
base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name)
}
m[vp] = i
}
@ -260,17 +264,17 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// is one. We do this first so that parents appear before their
// children in the resulting table.
parCallIdx := -1
parInlIdx := Ctxt.InlTree.Parent(inlIdx)
parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
if parInlIdx >= 0 {
parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
}
// Create new entry for this inline
inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
callXPos := Ctxt.InlTree.CallPos(inlIdx)
absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
pb := Ctxt.PosTable.Pos(callXPos).Base()
callFileSym := Ctxt.Lookup(pb.SymFilename())
inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
pb := base.Ctxt.PosTable.Pos(callXPos).Base()
callFileSym := base.Ctxt.Lookup(pb.SymFilename())
ic := dwarf.InlCall{
InlIndex: inlIdx,
CallFile: callFileSym,
@ -298,7 +302,7 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// the index for a node from the inlined body of D will refer to the
// call to D from C. Whew.
func posInlIndex(xpos src.XPos) int {
pos := Ctxt.PosTable.Pos(xpos)
pos := base.Ctxt.PosTable.Pos(xpos)
if b := pos.Base(); b != nil {
ii := b.InliningIndex()
if ii >= 0 {
@ -324,7 +328,7 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
// Append range to correct inlined call
callIdx, found := imap[ii]
if !found {
Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
}
call := &calls[callIdx]
call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
@ -332,23 +336,23 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
for i := 0; i < ilevel; i++ {
Ctxt.Logf(" ")
base.Ctxt.Logf(" ")
}
ic := inlcalls.Calls[idx]
callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
for _, f := range ic.InlVars {
Ctxt.Logf(" %v", f.Name)
base.Ctxt.Logf(" %v", f.Name)
}
Ctxt.Logf(" ) C: (")
base.Ctxt.Logf(" ) C: (")
for _, k := range ic.Children {
Ctxt.Logf(" %v", k)
base.Ctxt.Logf(" %v", k)
}
Ctxt.Logf(" ) R:")
base.Ctxt.Logf(" ) R:")
for _, r := range ic.Ranges {
Ctxt.Logf(" [%d,%d)", r.Start, r.End)
base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
}
Ctxt.Logf("\n")
base.Ctxt.Logf("\n")
for _, k := range ic.Children {
dumpInlCall(inlcalls, k, ilevel+1)
}
@ -373,7 +377,7 @@ func dumpInlVars(dwvars []*dwarf.Var) {
if dwv.IsInAbstract {
ia = 1
}
Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
}
}
@ -410,7 +414,7 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
// Callee
ic := inlCalls.Calls[idx]
callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
calleeRanges := ic.Ranges
// Caller
@ -418,14 +422,14 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
if parentIdx != -1 {
pic := inlCalls.Calls[parentIdx]
caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
parentRanges = pic.Ranges
}
// Callee ranges contained in caller ranges?
c, m := rangesContainsAll(parentRanges, calleeRanges)
if !c {
Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
}
// Now visit kids

View file

@ -0,0 +1,94 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarfgen
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/src"
)
// A ScopeMarker tracks scope nesting and boundaries for later use
// during DWARF generation.
type ScopeMarker struct {
parents []ir.ScopeID
marks []ir.Mark
}
// checkPos validates the given position and returns the current scope.
func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID {
if !pos.IsKnown() {
base.Fatalf("unknown scope position")
}
if len(m.marks) == 0 {
return 0
}
last := &m.marks[len(m.marks)-1]
if xposBefore(pos, last.Pos) {
base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos))
}
return last.Scope
}
// Push records a transition to a new child scope of the current scope.
func (m *ScopeMarker) Push(pos src.XPos) {
current := m.checkPos(pos)
m.parents = append(m.parents, current)
child := ir.ScopeID(len(m.parents))
m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child})
}
// Pop records a transition back to the current scope's parent.
func (m *ScopeMarker) Pop(pos src.XPos) {
current := m.checkPos(pos)
parent := m.parents[current-1]
m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent})
}
// Unpush removes the current scope, which must be empty.
func (m *ScopeMarker) Unpush() {
i := len(m.marks) - 1
current := m.marks[i].Scope
if current != ir.ScopeID(len(m.parents)) {
base.FatalfAt(m.marks[i].Pos, "current scope is not empty")
}
m.parents = m.parents[:current-1]
m.marks = m.marks[:i]
}
// WriteTo writes the recorded scope marks to the given function,
// and resets the marker for reuse.
func (m *ScopeMarker) WriteTo(fn *ir.Func) {
m.compactMarks()
fn.Parents = make([]ir.ScopeID, len(m.parents))
copy(fn.Parents, m.parents)
m.parents = m.parents[:0]
fn.Marks = make([]ir.Mark, len(m.marks))
copy(fn.Marks, m.marks)
m.marks = m.marks[:0]
}
func (m *ScopeMarker) compactMarks() {
n := 0
for _, next := range m.marks {
if n > 0 && next.Pos == m.marks[n-1].Pos {
m.marks[n-1].Scope = next.Scope
continue
}
m.marks[n] = next
n++
}
m.marks = m.marks[:n]
}

View file

@ -2,21 +2,24 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package dwarfgen
import (
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
"sort"
)
// See golang.org/issue/20390.
func xposBefore(p, q src.XPos) bool {
return Ctxt.PosTable.Pos(p).Before(Ctxt.PosTable.Pos(q))
return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q))
}
func findScope(marks []Mark, pos src.XPos) ScopeID {
func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
i := sort.Search(len(marks), func(i int) bool {
return xposBefore(pos, marks[i].Pos)
})
@ -26,20 +29,20 @@ func findScope(marks []Mark, pos src.XPos) ScopeID {
return marks[i-1].Scope
}
func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes []ScopeID) []dwarf.Scope {
func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents))
for i, parent := range fn.Func.Parents {
dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents))
for i, parent := range fn.Parents {
dwarfScopes[i+1].Parent = int32(parent)
}
scopeVariables(dwarfVars, varScopes, dwarfScopes)
scopePCs(fnsym, fn.Func.Marks, dwarfScopes)
scopePCs(fnsym, fn.Marks, dwarfScopes)
return compactScopes(dwarfScopes)
}
// scopeVariables assigns DWARF variable records to their scopes.
func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []dwarf.Scope) {
func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope) {
sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
i0 := 0
@ -56,7 +59,7 @@ func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []d
}
// scopePCs assigns PC ranges to their scopes.
func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) {
// If there aren't any child scopes (in particular, when scope
// tracking is disabled), we can skip a whole lot of work.
if len(marks) == 0 {
@ -89,7 +92,7 @@ func compactScopes(dwarfScopes []dwarf.Scope) []dwarf.Scope {
type varsByScopeAndOffset struct {
vars []*dwarf.Var
scopes []ScopeID
scopes []ir.ScopeID
}
func (v varsByScopeAndOffset) Len() int {

View file

@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc_test
package dwarfgen
import (
"cmd/internal/objfile"
"debug/dwarf"
"fmt"
"internal/testenv"
@ -18,6 +17,8 @@ import (
"strconv"
"strings"
"testing"
"cmd/internal/objfile"
)
type testline struct {

File diff suppressed because it is too large Load diff

View file

@ -1,959 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
"sort"
)
// AlgKind describes the kind of algorithms used for comparing and
// hashing a Type.
type AlgKind int
//go:generate stringer -type AlgKind -trimprefix A
const (
// These values are known by runtime.
ANOEQ AlgKind = iota
AMEM0
AMEM8
AMEM16
AMEM32
AMEM64
AMEM128
ASTRING
AINTER
ANILINTER
AFLOAT32
AFLOAT64
ACPLX64
ACPLX128
// Type can be compared/hashed as regular memory.
AMEM AlgKind = 100
// Type needs special comparison/hashing functions.
ASPECIAL AlgKind = -1
)
// IsComparable reports whether t is a comparable type.
func IsComparable(t *types.Type) bool {
a, _ := algtype1(t)
return a != ANOEQ
}
// IsRegularMemory reports whether t can be compared/hashed as regular memory.
func IsRegularMemory(t *types.Type) bool {
a, _ := algtype1(t)
return a == AMEM
}
// IncomparableField returns an incomparable Field of struct Type t, if any.
func IncomparableField(t *types.Type) *types.Field {
for _, f := range t.FieldSlice() {
if !IsComparable(f.Type) {
return f
}
}
return nil
}
// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
func EqCanPanic(t *types.Type) bool {
switch t.Etype {
default:
return false
case TINTER:
return true
case TARRAY:
return EqCanPanic(t.Elem())
case TSTRUCT:
for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
return true
}
}
return false
}
}
// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
// instead of the general AMEM kind when possible.
func algtype(t *types.Type) AlgKind {
a, _ := algtype1(t)
if a == AMEM {
switch t.Width {
case 0:
return AMEM0
case 1:
return AMEM8
case 2:
return AMEM16
case 4:
return AMEM32
case 8:
return AMEM64
case 16:
return AMEM128
}
}
return a
}
// algtype1 returns the AlgKind used for comparing and hashing Type t.
// If it returns ANOEQ, it also returns the component type of t that
// makes it incomparable.
func algtype1(t *types.Type) (AlgKind, *types.Type) {
if t.Broke() {
return AMEM, nil
}
if t.Noalg() {
return ANOEQ, t
}
switch t.Etype {
case TANY, TFORW:
// will be defined later.
return ANOEQ, t
case TINT8, TUINT8, TINT16, TUINT16,
TINT32, TUINT32, TINT64, TUINT64,
TINT, TUINT, TUINTPTR,
TBOOL, TPTR,
TCHAN, TUNSAFEPTR:
return AMEM, nil
case TFUNC, TMAP:
return ANOEQ, t
case TFLOAT32:
return AFLOAT32, nil
case TFLOAT64:
return AFLOAT64, nil
case TCOMPLEX64:
return ACPLX64, nil
case TCOMPLEX128:
return ACPLX128, nil
case TSTRING:
return ASTRING, nil
case TINTER:
if t.IsEmptyInterface() {
return ANILINTER, nil
}
return AINTER, nil
case TSLICE:
return ANOEQ, t
case TARRAY:
a, bad := algtype1(t.Elem())
switch a {
case AMEM:
return AMEM, nil
case ANOEQ:
return ANOEQ, bad
}
switch t.NumElem() {
case 0:
// We checked above that the element type is comparable.
return AMEM, nil
case 1:
// Single-element array is same as its lone element.
return a, nil
}
return ASPECIAL, nil
case TSTRUCT:
fields := t.FieldSlice()
// One-field struct is same as that one field alone.
if len(fields) == 1 && !fields[0].Sym.IsBlank() {
return algtype1(fields[0].Type)
}
ret := AMEM
for i, f := range fields {
// All fields must be comparable.
a, bad := algtype1(f.Type)
if a == ANOEQ {
return ANOEQ, bad
}
// Blank fields, padded fields, fields with non-memory
// equality need special compare.
if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) {
ret = ASPECIAL
}
}
return ret, nil
}
Fatalf("algtype1: unexpected type %v", t)
return 0, nil
}
// genhash returns a symbol which is the closure used to compute
// the hash of a value of type t.
// Note: the generated function must match runtime.typehash exactly.
func genhash(t *types.Type) *obj.LSym {
switch algtype(t) {
default:
// genhash is only called for types that have equality
Fatalf("genhash %v", t)
case AMEM0:
return sysClosure("memhash0")
case AMEM8:
return sysClosure("memhash8")
case AMEM16:
return sysClosure("memhash16")
case AMEM32:
return sysClosure("memhash32")
case AMEM64:
return sysClosure("memhash64")
case AMEM128:
return sysClosure("memhash128")
case ASTRING:
return sysClosure("strhash")
case AINTER:
return sysClosure("interhash")
case ANILINTER:
return sysClosure("nilinterhash")
case AFLOAT32:
return sysClosure("f32hash")
case AFLOAT64:
return sysClosure("f64hash")
case ACPLX64:
return sysClosure("c64hash")
case ACPLX128:
return sysClosure("c128hash")
case AMEM:
// For other sizes of plain memory, we build a closure
// that calls memhash_varlen. The size of the memory is
// encoded in the first slot of the closure.
closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
if memhashvarlen == nil {
memhashvarlen = sysfunc("memhash_varlen")
}
ot := 0
ot = dsymptr(closure, ot, memhashvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case ASPECIAL:
break
}
closure := typesymprefix(".hashfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
// Generate hash functions for subtypes.
// There are cases where we might not use these hashes,
// but in that case they will get dead-code eliminated.
// (And the closure generated by genhash will also get
// dead-code eliminated, as we call the subtype hashers
// directly.)
switch t.Etype {
case types.TARRAY:
genhash(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
genhash(f.Type)
}
}
sym := typesymprefix(".hash", t)
if Debug.r != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
lineno = autogeneratedPos // less confusing than end of input
dclcontext = PEXTERN
// func sym(p *T, h uintptr) uintptr
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set2(
namedfield("p", types.NewPtr(t)),
namedfield("h", types.Types[TUINTPTR]),
)
tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nh := asNode(tfn.Type.Params().Field(1).Nname)
switch t.Etype {
case types.TARRAY:
// An array of pure memory would be handled by the
// standard algorithm, so the element type must not be
// pure memory.
hashel := hashfor(t.Elem())
n := nod(ORANGE, nil, nod(ODEREF, np, nil))
ni := newname(lookup("i"))
ni.Type = types.Types[TINT]
n.List.Set1(ni)
n.SetColas(true)
colasdefn(n.List.Slice(), n)
ni = n.List.First()
// h = hashel(&p[i], h)
call := nod(OCALL, hashel, nil)
nx := nod(OINDEX, np, ni)
nx.SetBounded(true)
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
n.Nbody.Append(nod(OAS, nh, call))
fn.Nbody.Append(n)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
// and calling specific hash functions for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
f := fields[i]
// Skip blank fields.
if f.Sym.IsBlank() {
i++
continue
}
// Hash non-memory fields with appropriate hash function.
if !IsRegularMemory(f.Type) {
hashel := hashfor(f.Type)
call := nod(OCALL, hashel, nil)
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
fn.Nbody.Append(nod(OAS, nh, call))
i++
continue
}
// Otherwise, hash a maximal length run of raw memory.
size, next := memrun(t, i)
// h = hashel(&p.first, size, h)
hashel := hashmem(f.Type)
call := nod(OCALL, hashel, nil)
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
call.List.Append(nodintconst(size))
fn.Nbody.Append(nod(OAS, nh, call))
i = next
}
}
r := nod(ORETURN, nil, nil)
r.List.Append(nh)
fn.Nbody.Append(r)
if Debug.r != 0 {
dumplist("genhash body", fn.Nbody)
}
funcbody()
fn.Func.SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
typecheckslice(fn.Nbody.Slice(), ctxStmt)
Curfn = nil
if debug_dclstack != 0 {
testdclstack()
}
fn.Func.SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
dsymptr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
return closure
}
func hashfor(t *types.Type) *Node {
var sym *types.Sym
switch a, _ := algtype1(t); a {
case AMEM:
Fatalf("hashfor with AMEM type")
case AINTER:
sym = Runtimepkg.Lookup("interhash")
case ANILINTER:
sym = Runtimepkg.Lookup("nilinterhash")
case ASTRING:
sym = Runtimepkg.Lookup("strhash")
case AFLOAT32:
sym = Runtimepkg.Lookup("f32hash")
case AFLOAT64:
sym = Runtimepkg.Lookup("f64hash")
case ACPLX64:
sym = Runtimepkg.Lookup("c64hash")
case ACPLX128:
sym = Runtimepkg.Lookup("c128hash")
default:
// Note: the caller of hashfor ensured that this symbol
// exists and has a body by calling genhash for t.
sym = typesymprefix(".hash", t)
}
n := newname(sym)
setNodeNameFunc(n)
n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[TUINTPTR]),
}, []*Node{
anonfield(types.Types[TUINTPTR]),
})
return n
}
// sysClosure returns a closure which will call the
// given runtime function (with no closed-over variables).
func sysClosure(name string) *obj.LSym {
s := sysvar(name + "·f")
if len(s.P) == 0 {
f := sysfunc(name)
dsymptr(s, 0, f, 0)
ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
return s
}
// geneq returns a symbol which is the closure used to compute
// equality for two objects of type t.
func geneq(t *types.Type) *obj.LSym {
switch algtype(t) {
case ANOEQ:
// The runtime will panic if it tries to compare
// a type with a nil equality function.
return nil
case AMEM0:
return sysClosure("memequal0")
case AMEM8:
return sysClosure("memequal8")
case AMEM16:
return sysClosure("memequal16")
case AMEM32:
return sysClosure("memequal32")
case AMEM64:
return sysClosure("memequal64")
case AMEM128:
return sysClosure("memequal128")
case ASTRING:
return sysClosure("strequal")
case AINTER:
return sysClosure("interequal")
case ANILINTER:
return sysClosure("nilinterequal")
case AFLOAT32:
return sysClosure("f32equal")
case AFLOAT64:
return sysClosure("f64equal")
case ACPLX64:
return sysClosure("c64equal")
case ACPLX128:
return sysClosure("c128equal")
case AMEM:
// make equality closure. The size of the type
// is encoded in the closure.
closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
if len(closure.P) != 0 {
return closure
}
if memequalvarlen == nil {
memequalvarlen = sysvar("memequal_varlen") // asm func
}
ot := 0
ot = dsymptr(closure, ot, memequalvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width))
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case ASPECIAL:
break
}
closure := typesymprefix(".eqfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
sym := typesymprefix(".eq", t)
if Debug.r != 0 {
fmt.Printf("geneq %v\n", t)
}
// Autogenerate code for equality of structs and arrays.
lineno = autogeneratedPos // less confusing than end of input
dclcontext = PEXTERN
// func sym(p, q *T) bool
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set2(
namedfield("p", types.NewPtr(t)),
namedfield("q", types.NewPtr(t)),
)
tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname)
nr := asNode(tfn.Type.Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
// so t must be either an array or a struct.
switch t.Etype {
default:
Fatalf("geneq %v", t)
case TARRAY:
nelem := t.NumElem()
// checkAll generates code to check the equality of all array elements.
// If unroll is greater than nelem, checkAll generates:
//
// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
// } else {
// return
// }
//
// And so on.
//
// Otherwise it generates:
//
// for i := 0; i < nelem; i++ {
// if eq(p[i], q[i]) {
// } else {
// goto neq
// }
// }
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i *Node) *Node {
// pi := p[i]
pi := nod(OINDEX, np, i)
pi.SetBounded(true)
pi.Type = t.Elem()
// qi := q[i]
qi := nod(OINDEX, nq, i)
qi.SetBounded(true)
qi.Type = t.Elem()
return eq(pi, qi)
}
if nelem <= unroll {
if last {
// Do last comparison in a different manner.
nelem--
}
// Generate a series of checks.
for i := int64(0); i < nelem; i++ {
// if check {} else { goto neq }
nif := nod(OIF, checkIdx(nodintconst(i)), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(nif)
}
if last {
fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
i := temp(types.Types[TINT])
init := nod(OAS, i, nodintconst(0))
cond := nod(OLT, i, nodintconst(nelem))
post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
loop := nod(OFOR, cond, post)
loop.Ninit.Append(init)
// if eq(pi, qi) {} else { goto neq }
nif := nod(OIF, checkIdx(i), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
loop.Nbody.Append(nif)
fn.Nbody.Append(loop)
if last {
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
}
}
}
switch t.Elem().Etype {
case TSTRING:
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
checkAll(3, false, func(pi, qi *Node) *Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
checkAll(1, true, func(pi, qi *Node) *Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case TFLOAT32, TFLOAT64:
checkAll(2, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
checkAll(1, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
}
case TSTRUCT:
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
var conds [][]*Node
conds = append(conds, []*Node{})
and := func(n *Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
// Walk the struct using memequal for runs of AMEM
// and calling specific equality tests for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
f := fields[i]
// Skip blank-named fields.
if f.Sym.IsBlank() {
i++
continue
}
// Compare non-memory fields with field equality.
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []*Node{})
}
p := nodSym(OXDOT, np, f.Sym)
q := nodSym(OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
eqlen, eqmem := eqstring(p, q)
and(eqlen)
and(eqmem)
default:
and(nod(OEQ, p, q))
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []*Node{})
}
i++
continue
}
// Find maximal length run of memory-only fields.
size, next := memrun(t, i)
// TODO(rsc): All the calls to newname are wrong for
// cross-package unexported fields.
if s := fields[i:next]; len(s) <= 2 {
// Two or fewer fields: use plain field equality.
for _, f := range s {
and(eqfield(np, nq, f.Sym))
}
} else {
// More than two fields: use memequal.
and(eqmem(np, nq, f.Sym, size))
}
i = next
}
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
var flatConds []*Node
for _, c := range conds {
isCall := func(n *Node) bool {
return n.Op == OCALL || n.Op == OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
})
flatConds = append(flatConds, c...)
}
if len(flatConds) == 0 {
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
} else {
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
n := nod(OIF, c, nil)
n.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(n)
}
fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
}
}
// ret:
// return
ret := autolabel(".ret")
fn.Nbody.Append(nodSym(OLABEL, nil, ret))
fn.Nbody.Append(nod(ORETURN, nil, nil))
// neq:
// r = false
// return (or goto ret)
fn.Nbody.Append(nodSym(OLABEL, nil, neq))
fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
if EqCanPanic(t) || hasCall(fn) {
// Epilogue is large, so share it with the equal case.
fn.Nbody.Append(nodSym(OGOTO, nil, ret))
} else {
// Epilogue is small, so don't bother sharing.
fn.Nbody.Append(nod(ORETURN, nil, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if Debug.r != 0 {
dumplist("geneq body", fn.Nbody)
}
funcbody()
fn.Func.SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
typecheckslice(fn.Nbody.Slice(), ctxStmt)
Curfn = nil
if debug_dclstack != 0 {
testdclstack()
}
// Disable checknils while compiling this code.
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
fn.Func.SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Generate a closure which points at the function we just generated.
dsymptr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
return closure
}
func hasCall(n *Node) bool {
if n.Op == OCALL || n.Op == OCALLFUNC {
return true
}
if n.Left != nil && hasCall(n.Left) {
return true
}
if n.Right != nil && hasCall(n.Right) {
return true
}
for _, x := range n.Ninit.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Nbody.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.List.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Rlist.Slice() {
if hasCall(x) {
return true
}
}
return false
}
// eqfield returns the node
// p.field == q.field
func eqfield(p *Node, q *Node, field *types.Sym) *Node {
nx := nodSym(OXDOT, p, field)
ny := nodSym(OXDOT, q, field)
ne := nod(OEQ, nx, ny)
return ne
}
// eqstring returns the nodes
// len(s) == len(t)
// and
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
func eqstring(s, t *Node) (eqlen, eqmem *Node) {
s = conv(s, types.Types[TSTRING])
t = conv(t, types.Types[TSTRING])
sptr := nod(OSPTR, s, nil)
tptr := nod(OSPTR, t, nil)
slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
fn := syslook("memequal")
fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
call := nod(OCALL, fn, nil)
call.List.Append(sptr, tptr, slen.copy())
call = typecheck(call, ctxExpr|ctxMultiOK)
cmp := nod(OEQ, slen, tlen)
cmp = typecheck(cmp, ctxExpr)
cmp.Type = types.Types[TBOOL]
return cmp, call
}
// eqinterface returns the nodes
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
// and
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
if !types.Identical(s.Type, t.Type) {
Fatalf("eqinterface %v %v", s.Type, t.Type)
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
var fn *Node
if s.Type.IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
fn = syslook("ifaceeq")
}
stab := nod(OITAB, s, nil)
ttab := nod(OITAB, t, nil)
sdata := nod(OIDATA, s, nil)
tdata := nod(OIDATA, t, nil)
sdata.Type = types.Types[TUNSAFEPTR]
tdata.Type = types.Types[TUNSAFEPTR]
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := nod(OCALL, fn, nil)
call.List.Append(stab, sdata, tdata)
call = typecheck(call, ctxExpr|ctxMultiOK)
cmp := nod(OEQ, stab, ttab)
cmp = typecheck(cmp, ctxExpr)
cmp.Type = types.Types[TBOOL]
return cmp, call
}
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
nx = typecheck(nx, ctxExpr)
ny = typecheck(ny, ctxExpr)
fn, needsize := eqmemfunc(size, nx.Type.Elem())
call := nod(OCALL, fn, nil)
call.List.Append(nx)
call.List.Append(ny)
if needsize {
call.List.Append(nodintconst(size))
}
return call
}
func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
switch size {
default:
fn = syslook("memequal")
needsize = true
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
fn = syslook(buf)
}
fn = substArgTypes(fn, t, t)
return fn, needsize
}
// memrun finds runs of struct fields for which memory-only algs are appropriate.
// t is the parent struct type, and start is the field index at which to start the run.
// size is the length in bytes of the memory included in the run.
// next is the index just after the end of the memory run.
func memrun(t *types.Type, start int) (size int64, next int) {
next = start
for {
next++
if next == t.NumFields() {
break
}
// Stop run after a padded field.
if ispaddedfield(t, next-1) {
break
}
// Also, stop before a blank or non-memory field.
if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
break
}
}
return t.Field(next-1).End() - t.Field(start).Offset, next
}
// ispaddedfield reports whether the i'th field of struct type t is followed
// by padding.
func ispaddedfield(t *types.Type, i int) bool {
if !t.IsStruct() {
Fatalf("ispaddedfield called non-struct %v", t)
}
end := t.Width
if i+1 < t.NumFields() {
end = t.Field(i + 1).Offset
}
return t.Field(i).End() != end
}

View file

@ -1,177 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
)
type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
if p.marked[t] {
return
}
p.marked[t] = true
// If this is a named type, mark all of its associated
// methods. Skip interface types because t.Methods contains
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym != nil && t.Etype != TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markType(m.Type)
}
}
}
// Recursively mark any types that can be produced given a
// value of type t: dereferencing a pointer; indexing or
// iterating over an array, slice, or map; receiving from a
// channel; accessing a struct field or interface method; or
// calling a function.
//
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
switch t.Etype {
case TPTR, TARRAY, TSLICE:
p.markType(t.Elem())
case TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case TFUNC:
// If t is the type of a function or method, then
// t.Nname() is its ONAME. Mark its inline body and
// any recursively called functions for export.
inlFlood(asNode(t.Nname()))
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
}
}
}
}
// ----------------------------------------------------------------------------
// Export format
// Tags. Must be < 0.
const (
// Objects
packageTag = -(iota + 1)
constTag
typeTag
varTag
funcTag
endTag
// Types
namedTag
arrayTag
sliceTag
dddTag
structTag
pointerTag
signatureTag
interfaceTag
mapTag
chanTag
// Values
falseTag
trueTag
int64Tag
floatTag
fractionTag // not used by gc
complexTag
stringTag
nilTag
unknownTag // not used by gc (only appears in packages with errors)
// Type aliases
aliasTag
)
var predecl []*types.Type // initialized lazily
func predeclared() []*types.Type {
if predecl == nil {
// initialize lazily to be sure that all
// elements have been initialized before
predecl = []*types.Type{
// basic types
types.Types[TBOOL],
types.Types[TINT],
types.Types[TINT8],
types.Types[TINT16],
types.Types[TINT32],
types.Types[TINT64],
types.Types[TUINT],
types.Types[TUINT8],
types.Types[TUINT16],
types.Types[TUINT32],
types.Types[TUINT64],
types.Types[TUINTPTR],
types.Types[TFLOAT32],
types.Types[TFLOAT64],
types.Types[TCOMPLEX64],
types.Types[TCOMPLEX128],
types.Types[TSTRING],
// basic type aliases
types.Bytetype,
types.Runetype,
// error
types.Errortype,
// untyped types
types.UntypedBool,
types.UntypedInt,
types.UntypedRune,
types.UntypedFloat,
types.UntypedComplex,
types.UntypedString,
types.Types[TNIL],
// package unsafe
types.Types[TUNSAFEPTR],
// invalid type (package contains errors)
types.Types[Txxx],
// any type, for builtin export data
types.Types[TANY],
}
}
return predecl
}

View file

@ -1,24 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/internal/src"
)
// numImport tracks how often a package with a given name is imported.
// It is used to provide a better error message (by using the package
// path to disambiguate) if a package that appears multiple times with
// the same name appears in an error message.
var numImport = make(map[string]int)
func npos(pos src.XPos, n *Node) *Node {
n.Pos = pos
return n
}
func builtinCall(op Op) *Node {
return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
}

View file

@ -6,8 +6,11 @@
package gc
import "runtime"
import (
"cmd/compile/internal/base"
"runtime"
)
func startMutexProfiling() {
Fatalf("mutex profiling unavailable in version %v", runtime.Version())
base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
}

View file

@ -1,340 +0,0 @@
// Code generated by mkbuiltin.go. DO NOT EDIT.
package gc
import "cmd/compile/internal/types"
var runtimeDecls = [...]struct {
name string
tag int
typ int
}{
{"newobject", funcTag, 4},
{"mallocgc", funcTag, 8},
{"panicdivide", funcTag, 9},
{"panicshift", funcTag, 9},
{"panicmakeslicelen", funcTag, 9},
{"panicmakeslicecap", funcTag, 9},
{"throwinit", funcTag, 9},
{"panicwrap", funcTag, 9},
{"gopanic", funcTag, 11},
{"gorecover", funcTag, 14},
{"goschedguarded", funcTag, 9},
{"goPanicIndex", funcTag, 16},
{"goPanicIndexU", funcTag, 18},
{"goPanicSliceAlen", funcTag, 16},
{"goPanicSliceAlenU", funcTag, 18},
{"goPanicSliceAcap", funcTag, 16},
{"goPanicSliceAcapU", funcTag, 18},
{"goPanicSliceB", funcTag, 16},
{"goPanicSliceBU", funcTag, 18},
{"goPanicSlice3Alen", funcTag, 16},
{"goPanicSlice3AlenU", funcTag, 18},
{"goPanicSlice3Acap", funcTag, 16},
{"goPanicSlice3AcapU", funcTag, 18},
{"goPanicSlice3B", funcTag, 16},
{"goPanicSlice3BU", funcTag, 18},
{"goPanicSlice3C", funcTag, 16},
{"goPanicSlice3CU", funcTag, 18},
{"printbool", funcTag, 19},
{"printfloat", funcTag, 21},
{"printint", funcTag, 23},
{"printhex", funcTag, 25},
{"printuint", funcTag, 25},
{"printcomplex", funcTag, 27},
{"printstring", funcTag, 29},
{"printpointer", funcTag, 30},
{"printuintptr", funcTag, 31},
{"printiface", funcTag, 30},
{"printeface", funcTag, 30},
{"printslice", funcTag, 30},
{"printnl", funcTag, 9},
{"printsp", funcTag, 9},
{"printlock", funcTag, 9},
{"printunlock", funcTag, 9},
{"concatstring2", funcTag, 34},
{"concatstring3", funcTag, 35},
{"concatstring4", funcTag, 36},
{"concatstring5", funcTag, 37},
{"concatstrings", funcTag, 39},
{"cmpstring", funcTag, 40},
{"intstring", funcTag, 43},
{"slicebytetostring", funcTag, 44},
{"slicebytetostringtmp", funcTag, 45},
{"slicerunetostring", funcTag, 48},
{"stringtoslicebyte", funcTag, 50},
{"stringtoslicerune", funcTag, 53},
{"slicecopy", funcTag, 54},
{"decoderune", funcTag, 55},
{"countrunes", funcTag, 56},
{"convI2I", funcTag, 57},
{"convT16", funcTag, 58},
{"convT32", funcTag, 58},
{"convT64", funcTag, 58},
{"convTstring", funcTag, 58},
{"convTslice", funcTag, 58},
{"convT2E", funcTag, 59},
{"convT2Enoptr", funcTag, 59},
{"convT2I", funcTag, 59},
{"convT2Inoptr", funcTag, 59},
{"assertE2I", funcTag, 57},
{"assertE2I2", funcTag, 60},
{"assertI2I", funcTag, 57},
{"assertI2I2", funcTag, 60},
{"panicdottypeE", funcTag, 61},
{"panicdottypeI", funcTag, 61},
{"panicnildottype", funcTag, 62},
{"ifaceeq", funcTag, 64},
{"efaceeq", funcTag, 64},
{"fastrand", funcTag, 66},
{"makemap64", funcTag, 68},
{"makemap", funcTag, 69},
{"makemap_small", funcTag, 70},
{"mapaccess1", funcTag, 71},
{"mapaccess1_fast32", funcTag, 72},
{"mapaccess1_fast64", funcTag, 72},
{"mapaccess1_faststr", funcTag, 72},
{"mapaccess1_fat", funcTag, 73},
{"mapaccess2", funcTag, 74},
{"mapaccess2_fast32", funcTag, 75},
{"mapaccess2_fast64", funcTag, 75},
{"mapaccess2_faststr", funcTag, 75},
{"mapaccess2_fat", funcTag, 76},
{"mapassign", funcTag, 71},
{"mapassign_fast32", funcTag, 72},
{"mapassign_fast32ptr", funcTag, 72},
{"mapassign_fast64", funcTag, 72},
{"mapassign_fast64ptr", funcTag, 72},
{"mapassign_faststr", funcTag, 72},
{"mapiterinit", funcTag, 77},
{"mapdelete", funcTag, 77},
{"mapdelete_fast32", funcTag, 78},
{"mapdelete_fast64", funcTag, 78},
{"mapdelete_faststr", funcTag, 78},
{"mapiternext", funcTag, 79},
{"mapclear", funcTag, 80},
{"makechan64", funcTag, 82},
{"makechan", funcTag, 83},
{"chanrecv1", funcTag, 85},
{"chanrecv2", funcTag, 86},
{"chansend1", funcTag, 88},
{"closechan", funcTag, 30},
{"writeBarrier", varTag, 90},
{"typedmemmove", funcTag, 91},
{"typedmemclr", funcTag, 92},
{"typedslicecopy", funcTag, 93},
{"selectnbsend", funcTag, 94},
{"selectnbrecv", funcTag, 95},
{"selectnbrecv2", funcTag, 97},
{"selectsetpc", funcTag, 98},
{"selectgo", funcTag, 99},
{"block", funcTag, 9},
{"makeslice", funcTag, 100},
{"makeslice64", funcTag, 101},
{"makeslicecopy", funcTag, 102},
{"growslice", funcTag, 104},
{"memmove", funcTag, 105},
{"memclrNoHeapPointers", funcTag, 106},
{"memclrHasPointers", funcTag, 106},
{"memequal", funcTag, 107},
{"memequal0", funcTag, 108},
{"memequal8", funcTag, 108},
{"memequal16", funcTag, 108},
{"memequal32", funcTag, 108},
{"memequal64", funcTag, 108},
{"memequal128", funcTag, 108},
{"f32equal", funcTag, 109},
{"f64equal", funcTag, 109},
{"c64equal", funcTag, 109},
{"c128equal", funcTag, 109},
{"strequal", funcTag, 109},
{"interequal", funcTag, 109},
{"nilinterequal", funcTag, 109},
{"memhash", funcTag, 110},
{"memhash0", funcTag, 111},
{"memhash8", funcTag, 111},
{"memhash16", funcTag, 111},
{"memhash32", funcTag, 111},
{"memhash64", funcTag, 111},
{"memhash128", funcTag, 111},
{"f32hash", funcTag, 111},
{"f64hash", funcTag, 111},
{"c64hash", funcTag, 111},
{"c128hash", funcTag, 111},
{"strhash", funcTag, 111},
{"interhash", funcTag, 111},
{"nilinterhash", funcTag, 111},
{"int64div", funcTag, 112},
{"uint64div", funcTag, 113},
{"int64mod", funcTag, 112},
{"uint64mod", funcTag, 113},
{"float64toint64", funcTag, 114},
{"float64touint64", funcTag, 115},
{"float64touint32", funcTag, 116},
{"int64tofloat64", funcTag, 117},
{"uint64tofloat64", funcTag, 118},
{"uint32tofloat64", funcTag, 119},
{"complex128div", funcTag, 120},
{"racefuncenter", funcTag, 31},
{"racefuncenterfp", funcTag, 9},
{"racefuncexit", funcTag, 9},
{"raceread", funcTag, 31},
{"racewrite", funcTag, 31},
{"racereadrange", funcTag, 121},
{"racewriterange", funcTag, 121},
{"msanread", funcTag, 121},
{"msanwrite", funcTag, 121},
{"msanmove", funcTag, 122},
{"checkptrAlignment", funcTag, 123},
{"checkptrArithmetic", funcTag, 125},
{"libfuzzerTraceCmp1", funcTag, 127},
{"libfuzzerTraceCmp2", funcTag, 129},
{"libfuzzerTraceCmp4", funcTag, 130},
{"libfuzzerTraceCmp8", funcTag, 131},
{"libfuzzerTraceConstCmp1", funcTag, 127},
{"libfuzzerTraceConstCmp2", funcTag, 129},
{"libfuzzerTraceConstCmp4", funcTag, 130},
{"libfuzzerTraceConstCmp8", funcTag, 131},
{"x86HasPOPCNT", varTag, 6},
{"x86HasSSE41", varTag, 6},
{"x86HasFMA", varTag, 6},
{"armHasVFPv4", varTag, 6},
{"arm64HasATOMICS", varTag, 6},
}
func runtimeTypes() []*types.Type {
var typs [132]*types.Type
typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[TANY]
typs[3] = types.NewPtr(typs[2])
typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[5] = types.Types[TUINTPTR]
typs[6] = types.Types[TBOOL]
typs[7] = types.Types[TUNSAFEPTR]
typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
typs[9] = functype(nil, nil, nil)
typs[10] = types.Types[TINTER]
typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
typs[12] = types.Types[TINT32]
typs[13] = types.NewPtr(typs[12])
typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
typs[15] = types.Types[TINT]
typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
typs[17] = types.Types[TUINT]
typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
typs[20] = types.Types[TFLOAT64]
typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
typs[22] = types.Types[TINT64]
typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
typs[24] = types.Types[TUINT64]
typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
typs[26] = types.Types[TCOMPLEX128]
typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
typs[28] = types.Types[TSTRING]
typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[46] = types.Runetype
typs[47] = types.NewSlice(typs[46])
typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[65] = types.Types[TUINT32]
typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[123] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[124] = types.NewSlice(typs[7])
typs[125] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[124])}, nil)
typs[126] = types.Types[TUINT8]
typs[127] = functype(nil, []*Node{anonfield(typs[126]), anonfield(typs[126])}, nil)
typs[128] = types.Types[TUINT16]
typs[129] = functype(nil, []*Node{anonfield(typs[128]), anonfield(typs[128])}, nil)
typs[130] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[131] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}

View file

@ -1,278 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"math/bits"
)
const (
wordBits = 32
wordMask = wordBits - 1
wordShift = 5
)
// A bvec is a bit vector.
type bvec struct {
n int32 // number of bits in vector
b []uint32 // words holding bits
}
func bvalloc(n int32) bvec {
nword := (n + wordBits - 1) / wordBits
return bvec{n, make([]uint32, nword)}
}
type bulkBvec struct {
words []uint32
nbit int32
nword int32
}
func bvbulkalloc(nbit int32, count int32) bulkBvec {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return bulkBvec{
words: make([]uint32, size),
nbit: nbit,
nword: nword,
}
}
func (b *bulkBvec) next() bvec {
out := bvec{b.nbit, b.words[:b.nword]}
b.words = b.words[b.nword:]
return out
}
func (bv1 bvec) Eq(bv2 bvec) bool {
if bv1.n != bv2.n {
Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
return false
}
}
return true
}
func (dst bvec) Copy(src bvec) {
copy(dst.b, src.b)
}
func (bv bvec) Get(i int32) bool {
if i < 0 || i >= bv.n {
Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
return bv.b[i>>wordShift]&mask != 0
}
func (bv bvec) Set(i int32) {
if i < 0 || i >= bv.n {
Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] |= mask
}
func (bv bvec) Unset(i int32) {
if i < 0 || i >= bv.n {
Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv bvec) Next(i int32) int32 {
if i >= bv.n {
return -1
}
// Jump i ahead to next word with bits.
if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
i &^= wordMask
i += wordBits
for i < bv.n && bv.b[i>>wordShift] == 0 {
i += wordBits
}
}
if i >= bv.n {
return -1
}
// Find 1 bit.
w := bv.b[i>>wordShift] >> uint(i&wordMask)
i += int32(bits.TrailingZeros32(w))
return i
}
func (bv bvec) IsEmpty() bool {
for _, x := range bv.b {
if x != 0 {
return false
}
}
return true
}
func (bv bvec) Not() {
for i, x := range bv.b {
bv.b[i] = ^x
}
}
// union
func (dst bvec) Or(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x | src2.b[i]
}
}
// intersection
func (dst bvec) And(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x & src2.b[i]
}
}
// difference
func (dst bvec) AndNot(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x &^ src2.b[i]
}
}
func (bv bvec) String() string {
s := make([]byte, 2+bv.n)
copy(s, "#*")
for i := int32(0); i < bv.n; i++ {
ch := byte('0')
if bv.Get(i) {
ch = '1'
}
s[2+i] = ch
}
return string(s)
}
func (bv bvec) Clear() {
for i := range bv.b {
bv.b[i] = 0
}
}
// FNV-1 hash function constants.
const (
H0 = 2166136261
Hp = 16777619
)
func hashbitmap(h uint32, bv bvec) uint32 {
n := int((bv.n + 31) / 32)
for i := 0; i < n; i++ {
w := bv.b[i]
h = (h * Hp) ^ (w & 0xff)
h = (h * Hp) ^ ((w >> 8) & 0xff)
h = (h * Hp) ^ ((w >> 16) & 0xff)
h = (h * Hp) ^ ((w >> 24) & 0xff)
}
return h
}
// bvecSet is a set of bvecs, in initial insertion order.
type bvecSet struct {
index []int // hash -> uniq index. -1 indicates empty slot.
uniq []bvec // unique bvecs, in insertion order
}
func (m *bvecSet) grow() {
// Allocate new index.
n := len(m.index) * 2
if n == 0 {
n = 32
}
newIndex := make([]int, n)
for i := range newIndex {
newIndex[i] = -1
}
// Rehash into newIndex.
for i, bv := range m.uniq {
h := hashbitmap(H0, bv) % uint32(len(newIndex))
for {
j := newIndex[h]
if j < 0 {
newIndex[h] = i
break
}
h++
if h == uint32(len(newIndex)) {
h = 0
}
}
}
m.index = newIndex
}
// add adds bv to the set and returns its index in m.extractUniqe.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bvec) int {
if len(m.uniq)*4 >= len(m.index) {
m.grow()
}
index := m.index
h := hashbitmap(H0, bv) % uint32(len(index))
for {
j := index[h]
if j < 0 {
// New bvec.
index[h] = len(m.uniq)
m.uniq = append(m.uniq, bv)
return len(m.uniq) - 1
}
jlive := m.uniq[j]
if bv.Eq(jlive) {
// Existing bvec.
return j
}
h++
if h == uint32(len(index)) {
h = 0
}
}
}
// extractUniqe returns this slice of unique bit vectors in m, as
// indexed by the result of bvecSet.add.
func (m *bvecSet) extractUniqe() []bvec {
return m.uniq
}

View file

@ -1,594 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"fmt"
)
func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
xfunc := p.nod(expr, ODCLFUNC, nil, nil)
xfunc.Func.SetIsHiddenClosure(Curfn != nil)
xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
xfunc.Func.Nname.Name.Param.Ntype = xtype
xfunc.Func.Nname.Name.Defn = xfunc
clo := p.nod(expr, OCLOSURE, nil, nil)
clo.Func.Ntype = ntype
xfunc.Func.Closure = clo
clo.Func.Closure = xfunc
p.funcBody(xfunc, expr.Body)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
for _, v := range xfunc.Func.Cvars.Slice() {
// Unlink from v1; see comment in syntax.go type Param for these fields.
v1 := v.Name.Defn
v1.Name.Param.Innermost = v.Name.Param.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
// of the function in which v appeared,
// look up v.Sym in the enclosing function
// and keep it around for use in the compiled code.
//
// That is, suppose we just finished parsing the innermost
// closure f4 in this code:
//
// func f() {
// v := 1
// func() { // f2
// use(v)
// func() { // f3
// func() { // f4
// use(v)
// }()
// }()
// }()
// }
//
// At this point v.Outer is f2's v; there is no f3's v.
// To construct the closure f4 from within f3,
// we need to use f3's v and in this case we need to create f3's v.
// We are now in the context of f3, so calling oldname(v.Sym)
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
v.Name.Param.Outer = oldname(v.Sym)
}
return clo
}
// typecheckclosure typechecks an OCLOSURE node. It also creates the named
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
func typecheckclosure(clo *Node, top int) {
xfunc := clo.Func.Closure
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
xfunc.SetIota(x)
}
clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
clo.Type = clo.Func.Ntype.Type
clo.Func.Top = top
// Do not typecheck xfunc twice, otherwise, we will end up pushing
// xfunc to xtop multiple times, causing initLSym called twice.
// See #30709
if xfunc.Typecheck() == 1 {
return
}
for _, ln := range xfunc.Func.Cvars.Slice() {
n := ln.Name.Defn
if !n.Name.Captured() {
n.Name.SetCaptured(true)
if n.Name.Decldepth == 0 {
Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
if n.Name.Decldepth == decldepth {
n.Name.SetAssigned(false)
}
}
}
xfunc.Func.Nname.Sym = closurename(Curfn)
setNodeNameFunc(xfunc.Func.Nname)
xfunc = typecheck(xfunc, ctxStmt)
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to xtop.
if Curfn != nil && clo.Type != nil {
oldfn := Curfn
Curfn = xfunc
olddd := decldepth
decldepth = 1
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
decldepth = olddd
Curfn = oldfn
}
xtop = append(xtop, xfunc)
}
// globClosgen is like Func.Closgen, but for the global scope.
var globClosgen int
// closurename generates a new unique name for a closure within
// outerfunc.
func closurename(outerfunc *Node) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfunc != nil {
if outerfunc.Func.Closure != nil {
prefix = ""
}
outer = outerfunc.funcname()
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
if !outerfunc.Func.Nname.isBlank() {
gen = &outerfunc.Func.Closgen
}
}
*gen++
return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
}
// capturevarscomplete is set to true when the capturevars phase is done.
var capturevarscomplete bool
// capturevars is called in a separate phase after all typechecking is done.
// It decides whether each variable captured by a closure should be captured
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
func capturevars(xfunc *Node) {
lno := lineno
lineno = xfunc.Pos
clo := xfunc.Func.Closure
cvars := xfunc.Func.Cvars.Slice()
out := cvars[:0]
for _, v := range cvars {
if v.Type == nil {
// If v.Type is nil, it means v looked like it
// was going to be used in the closure, but
// isn't. This happens in struct literals like
// s{f: x} where we can't distinguish whether
// f is a field identifier or expression until
// resolving s.
continue
}
out = append(out, v)
// type check the & of closed variables outside the closure,
// so that the outer frame also grabs them and knows they escape.
dowidth(v.Type)
outer := v.Name.Param.Outer
outermost := v.Name.Defn
// out parameters will be assigned to implicitly upon return.
if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
v.Name.SetByval(true)
} else {
outermost.Name.SetAddrtaken(true)
outer = nod(OADDR, outer, nil)
}
if Debug.m > 1 {
var name *types.Sym
if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
name = v.Name.Curfn.Func.Nname.Sym
}
how := "ref"
if v.Name.Byval() {
how = "value"
}
Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
}
outer = typecheck(outer, ctxExpr)
clo.Func.Enter.Append(outer)
}
xfunc.Func.Cvars.Set(out)
lineno = lno
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
func transformclosure(xfunc *Node) {
lno := lineno
lineno = xfunc.Pos
clo := xfunc.Func.Closure
if clo.Func.Top&ctxCallee != 0 {
// If the closure is directly called, we transform it to a plain function call
// with variables passed as args. This avoids allocation of a closure object.
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
// will complete the transformation later.
// For illustration, the following closure:
// func(a int) {
// println(byval)
// byref++
// }(42)
// becomes:
// func(byval int, &byref *int, a int) {
// println(byval)
// (*&byref)++
// }(byval, &byref, 42)
// f is ONAME of the actual function.
f := xfunc.Func.Nname
// We are going to insert captured variables before input args.
var params []*types.Field
var decls []*Node
for _, v := range xfunc.Func.Cvars.Slice() {
if !v.Name.Byval() {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
addr := newname(lookup("&" + v.Sym.Name))
addr.Type = types.NewPtr(v.Type)
v.Name.Param.Heapaddr = addr
v = addr
}
v.SetClass(PPARAM)
decls = append(decls, v)
fld := types.NewField()
fld.Nname = asTypesNode(v)
fld.Type = v.Type
fld.Sym = v.Sym
params = append(params, fld)
}
if len(params) > 0 {
// Prepend params and decls.
f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
}
dowidth(f.Type)
xfunc.Type = f.Type // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
var body []*Node
offset := int64(Widthptr)
for _, v := range xfunc.Func.Cvars.Slice() {
// cv refers to the field inside of closure OSTRUCTLIT.
cv := nod(OCLOSUREVAR, nil, nil)
cv.Type = v.Type
if !v.Name.Byval() {
cv.Type = types.NewPtr(v.Type)
}
offset = Rnd(offset, int64(cv.Type.Align))
cv.Xoffset = offset
offset += cv.Type.Width
if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
v.SetClass(PAUTO)
xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
body = append(body, nod(OAS, v, cv))
} else {
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
addr := newname(lookup("&" + v.Sym.Name))
addr.Type = types.NewPtr(v.Type)
addr.SetClass(PAUTO)
addr.Name.SetUsed(true)
addr.Name.Curfn = xfunc
xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
v.Name.Param.Heapaddr = addr
if v.Name.Byval() {
cv = nod(OADDR, cv, nil)
}
body = append(body, nod(OAS, addr, cv))
}
}
if len(body) > 0 {
typecheckslice(body, ctxStmt)
xfunc.Func.Enter.Set(body)
xfunc.Func.SetNeedctxt(true)
}
}
lineno = lno
}
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
func hasemptycvars(clo *Node) bool {
xfunc := clo.Func.Closure
return xfunc.Func.Cvars.Len() == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
func closuredebugruntimecheck(clo *Node) {
if Debug_closure > 0 {
xfunc := clo.Func.Closure
if clo.Esc == EscHeap {
Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
} else {
Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
}
}
if compiling_runtime && clo.Esc == EscHeap {
yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
}
}
// closureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
func closureType(clo *Node) *types.Type {
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
// the generated code looks like:
//
// clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
//
// The use of the struct provides type information to the garbage
// collector so that it can walk the closure. We could use (in this case)
// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
fields := []*Node{
namedfield(".F", types.Types[TUINTPTR]),
}
for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
typ := v.Type
if !v.Name.Byval() {
typ = types.NewPtr(typ)
}
fields = append(fields, symfield(v.Sym, typ))
}
typ := tostruct(fields)
typ.SetNoalg(true)
return typ
}
func walkclosure(clo *Node, init *Nodes) *Node {
xfunc := clo.Func.Closure
// If no closure vars, don't bother wrapping.
if hasemptycvars(clo) {
if Debug_closure > 0 {
Warnl(clo.Pos, "closure converted to global")
}
return xfunc.Func.Nname
}
closuredebugruntimecheck(clo)
typ := closureType(clo)
clos := nod(OCOMPLIT, nil, typenod(typ))
clos.Esc = clo.Esc
clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
clos = nod(OADDR, clos, nil)
clos.Esc = clo.Esc
// Force type conversion from *struct to the func type.
clos = convnop(clos, clo.Type)
// non-escaping temp to use, if any.
if x := prealloc[clo]; x != nil {
if !types.Identical(typ, x.Type) {
panic("closure type does not match order's assigned type")
}
clos.Left.Right = x
delete(prealloc, clo)
}
return walkexpr(clos, init)
}
func typecheckpartialcall(fn *Node, sym *types.Sym) {
switch fn.Op {
case ODOTINTER, ODOTMETH:
break
default:
Fatalf("invalid typecheckpartialcall")
}
// Create top-level function.
xfunc := makepartialcall(fn, fn.Type, sym)
fn.Func = xfunc.Func
fn.Func.SetWrapper(true)
fn.Right = newname(sym)
fn.Op = OCALLPART
fn.Type = xfunc.Type
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
rcvrtype := fn.Left.Type
sym := methodSymSuffix(rcvrtype, meth, "-fm")
if sym.Uniq() {
return asNode(sym.Def)
}
sym.SetUniq(true)
savecurfn := Curfn
saveLineNo := lineno
Curfn = nil
// Set line number equal to the line number where the method is declared.
var m *types.Field
if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
lineno = m.Pos
}
// Note: !m.Pos.IsKnown() happens for method expressions where
// the method is implicitly declared. The Error method of the
// built-in error type is one such method. We leave the line
// number at the use of the method expression in this
// case. See issue 29389.
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set(structargs(t0.Params(), true))
tfn.Rlist.Set(structargs(t0.Results(), false))
xfunc := dclfunc(sym, tfn)
xfunc.Func.SetDupok(true)
xfunc.Func.SetNeedctxt(true)
tfn.Type.SetPkg(t0.Pkg())
// Declare and initialize variable holding receiver.
cv := nod(OCLOSUREVAR, nil, nil)
cv.Type = rcvrtype
cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
ptr := newname(lookup(".this"))
declare(ptr, PAUTO)
ptr.Name.SetUsed(true)
var body []*Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
ptr.Type = rcvrtype
body = append(body, nod(OAS, ptr, cv))
} else {
ptr.Type = types.NewPtr(rcvrtype)
body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
}
call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
call.List.Set(paramNnames(tfn.Type))
call.SetIsDDD(tfn.Type.IsVariadic())
if t0.NumResults() != 0 {
n := nod(ORETURN, nil, nil)
n.List.Set1(call)
call = n
}
body = append(body, call)
xfunc.Nbody.Set(body)
funcbody()
xfunc = typecheck(xfunc, ctxStmt)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
Curfn = xfunc
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
sym.Def = asTypesNode(xfunc)
xtop = append(xtop, xfunc)
Curfn = savecurfn
lineno = saveLineNo
return xfunc
}
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
func partialCallType(n *Node) *types.Type {
t := tostruct([]*Node{
namedfield("F", types.Types[TUINTPTR]),
namedfield("R", n.Left.Type),
})
t.SetNoalg(true)
return t
}
func walkpartialcall(n *Node, init *Nodes) *Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
// clos = &struct{F uintptr; R T}{T.M·f, x}
//
// Like walkclosure above.
if n.Left.Type.IsInterface() {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
n.Left = cheapexpr(n.Left, init)
n.Left = walkexpr(n.Left, nil)
tab := nod(OITAB, n.Left, nil)
tab = typecheck(tab, ctxExpr)
c := nod(OCHECKNIL, tab, nil)
c.SetTypecheck(1)
init.Append(c)
}
typ := partialCallType(n)
clos := nod(OCOMPLIT, nil, typenod(typ))
clos.Esc = n.Esc
clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
clos = nod(OADDR, clos, nil)
clos.Esc = n.Esc
// Force type conversion from *struct to the func type.
clos = convnop(clos, n.Type)
// non-escaping temp to use, if any.
if x := prealloc[n]; x != nil {
if !types.Identical(typ, x.Type) {
panic("partial call type does not match order's assigned type")
}
clos.Left.Right = x
delete(prealloc, n)
}
return walkexpr(clos, init)
}
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
func callpartMethod(n *Node) *types.Field {
if n.Op != OCALLPART {
Fatalf("expected OCALLPART, got %v", n)
}
// TODO(mdempsky): Optimize this. If necessary,
// makepartialcall could save m for us somewhere.
var m *types.Field
if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
Fatalf("failed to find field for OCALLPART")
}
return m
}

View file

@ -0,0 +1,147 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"internal/race"
"math/rand"
"sort"
"sync"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/walk"
)
// "Portable" code generation.
var (
compilequeue []*ir.Func // functions waiting to be compiled
)
func enqueueFunc(fn *ir.Func) {
if ir.CurFunc != nil {
base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc)
}
if ir.FuncName(fn) == "_" {
// Skip compiling blank functions.
// Frontend already reported any spec-mandated errors (#29870).
return
}
if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
return // we'll get this as part of its enclosing function
}
if len(fn.Body) == 0 {
// Initialize ABI wrappers if necessary.
ssagen.InitLSym(fn, false)
liveness.WriteFuncMap(fn)
return
}
errorsBefore := base.Errors()
todo := []*ir.Func{fn}
for len(todo) > 0 {
next := todo[len(todo)-1]
todo = todo[:len(todo)-1]
prepareFunc(next)
todo = append(todo, next.Closures...)
}
if base.Errors() > errorsBefore {
return
}
// Enqueue just fn itself. compileFunctions will handle
// scheduling compilation of its closures after it's done.
compilequeue = append(compilequeue, fn)
}
// prepareFunc handles any remaining frontend compilation tasks that
// aren't yet safe to perform concurrently.
func prepareFunc(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in MarkTypeUsedInInterface).
ssagen.InitLSym(fn, true)
// Calculate parameter offsets.
types.CalcSize(fn.Type())
typecheck.DeclContext = ir.PAUTO
ir.CurFunc = fn
walk.Walk(fn)
ir.CurFunc = nil // enforce no further uses of CurFunc
typecheck.DeclContext = ir.PEXTERN
}
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
func compileFunctions() {
if len(compilequeue) == 0 {
return
}
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*ir.Func, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
copy(compilequeue, tmp)
} else {
// Compile the longest functions first,
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
return len(compilequeue[i].Body) > len(compilequeue[j].Body)
})
}
// We queue up a goroutine per function that needs to be
// compiled, but require them to grab an available worker ID
// before doing any substantial work to limit parallelism.
workerIDs := make(chan int, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
workerIDs <- i
}
var wg sync.WaitGroup
var asyncCompile func(*ir.Func)
asyncCompile = func(fn *ir.Func) {
wg.Add(1)
go func() {
worker := <-workerIDs
ssagen.Compile(fn, worker)
workerIDs <- worker
// Done compiling fn. Schedule it's closures for compilation.
for _, closure := range fn.Closures {
asyncCompile(closure)
}
wg.Done()
}()
}
types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
base.Ctxt.InParallel = true
for _, fn := range compilequeue {
asyncCompile(fn)
}
compilequeue = nil
wg.Wait()
base.Ctxt.InParallel = false
types.CalcSizeDisabled = false
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,256 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/obj"
"encoding/json"
"io/ioutil"
"log"
"path"
"sort"
"strconv"
"strings"
)
var embedlist []*Node
var embedCfg struct {
Patterns map[string][]string
Files map[string]string
}
func readEmbedCfg(file string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-embedcfg: %v", err)
}
if err := json.Unmarshal(data, &embedCfg); err != nil {
log.Fatalf("%s: %v", file, err)
}
if embedCfg.Patterns == nil {
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
}
if embedCfg.Files == nil {
log.Fatalf("%s: invalid embedcfg: missing Files", file)
}
}
const (
embedUnknown = iota
embedBytes
embedString
embedFiles
)
func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
if !ok {
// imports always come first
break
}
path, _ := strconv.Unquote(imp.Path.Value)
if path == "embed" {
haveEmbed = true
break
}
}
pos := embeds[0].Pos
if !haveEmbed {
p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
return
}
if len(names) > 1 {
p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
return
}
if len(exprs) > 0 {
p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
return
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
p.yyerrorpos(pos, "go:embed cannot apply to var without type")
return
}
if dclcontext != PEXTERN {
p.yyerrorpos(pos, "go:embed cannot apply to var inside func")
return
}
var list []irEmbed
for _, e := range embeds {
list = append(list, irEmbed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
}
v := names[0]
v.Name.Param.SetEmbedList(list)
embedlist = append(embedlist, v)
}
func embedFileList(v *Node, kind int) []string {
// Build list of files to store.
have := make(map[string]bool)
var list []string
for _, e := range v.Name.Param.EmbedList() {
for _, pattern := range e.Patterns {
files, ok := embedCfg.Patterns[pattern]
if !ok {
yyerrorl(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
}
for _, file := range files {
if embedCfg.Files[file] == "" {
yyerrorl(e.Pos, "invalid go:embed: build system did not map file: %s", file)
continue
}
if !have[file] {
have[file] = true
list = append(list, file)
}
if kind == embedFiles {
for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
have[dir] = true
list = append(list, dir+"/")
}
}
}
}
}
sort.Slice(list, func(i, j int) bool {
return embedFileLess(list[i], list[j])
})
if kind == embedString || kind == embedBytes {
if len(list) > 1 {
yyerrorl(v.Pos, "invalid go:embed: multiple files for type %v", v.Type)
return nil
}
}
return list
}
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
return embedFiles
}
if typ.Etype == types.TSTRING {
return embedString
}
if typ.Etype == types.TSLICE && typ.Elem().Etype == types.TUINT8 {
return embedBytes
}
return embedUnknown
}
func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
if name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}
i := len(name) - 1
for i >= 0 && name[i] != '/' {
i--
}
if i < 0 {
return ".", name, isDir
}
return name[:i], name[i+1:], isDir
}
// embedFileLess implements the sort order for a list of embedded files.
// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
func embedFileLess(x, y string) bool {
xdir, xelem, _ := embedFileNameSplit(x)
ydir, yelem, _ := embedFileNameSplit(y)
return xdir < ydir || xdir == ydir && xelem < yelem
}
func dumpembeds() {
for _, v := range embedlist {
initEmbed(v)
}
}
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func initEmbed(v *Node) {
commentPos := v.Name.Param.EmbedList()[0].Pos
if !langSupported(1, 16, localpkg) {
lno := lineno
lineno = commentPos
yyerrorv("go1.16", "go:embed")
lineno = lno
return
}
if embedCfg.Patterns == nil {
yyerrorl(commentPos, "invalid go:embed: build system did not supply embed configuration")
return
}
kind := embedKind(v.Type)
if kind == embedUnknown {
yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
return
}
files := embedFileList(v, kind)
switch kind {
case embedString, embedBytes:
file := files[0]
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
}
sym := v.Sym.Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
if kind == embedBytes {
duintptr(sym, off, uint64(size)) // cap for slice
}
case embedFiles:
slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
off = duintptr(slicedata, off, uint64(len(files)))
off = duintptr(slicedata, off, uint64(len(files)))
// embed/embed.go type file is:
// name string
// data string
// hash [16]byte
// Emit one of these per file in the set.
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
off = duintptr(slicedata, off, 0)
off = duintptr(slicedata, off, 0)
off += hashSize
} else {
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym.Linksym()
dsymptr(sym, 0, slicedata, 0)
}
}

View file

@ -1,472 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"fmt"
)
func escapes(all []*Node) {
visitBottomUp(all, escapeFuncs)
}
const (
EscFuncUnknown = 0 + iota
EscFuncPlanned
EscFuncStarted
EscFuncTagged
)
func min8(a, b int8) int8 {
if a < b {
return a
}
return b
}
func max8(a, b int8) int8 {
if a > b {
return a
}
return b
}
const (
EscUnknown = iota
EscNone // Does not escape to heap, result, or parameters.
EscHeap // Reachable from the heap
EscNever // By construction will not escape.
)
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
func funcSym(fn *Node) *types.Sym {
if fn == nil || fn.Func.Nname == nil {
return nil
}
return fn.Func.Nname.Sym
}
// Mark labels that have no backjumps to them as not increasing e.loopdepth.
// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
// and set it to one of the following two. Then in esc we'll clear it again.
var (
looping Node
nonlooping Node
)
func isSliceSelfAssign(dst, src *Node) bool {
// Detect the following special case.
//
// func (b *Buffer) Foo() {
// n, m := ...
// b.buf = b.buf[n:m]
// }
//
// This assignment is a no-op for escape analysis,
// it does not store any new pointers into b that were not already there.
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
// Here we assume that the statement will not contain calls,
// that is, that order will move any calls to init.
// Otherwise base ONAME value could change between the moments
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
return false
}
// src is a slice operation.
switch src.Op {
case OSLICE, OSLICE3, OSLICESTR:
// OK.
case OSLICEARR, OSLICE3ARR:
// Since arrays are embedded into containing object,
// slice of non-pointer array will introduce a new pointer into b that was not already there
// (pointer to b itself). After such assignment, if b contents escape,
// b escapes as well. If we ignore such OSLICEARR, we will conclude
// that b does not escape when b contents do.
//
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
if src.Left.Op == OADDR {
return false
}
default:
return false
}
// slice is applied to ONAME dereference.
if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
return false
}
// dst and src reference the same base ONAME.
return dst.Left == src.Left.Left
}
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
func isSelfAssign(dst, src *Node) bool {
if isSliceSelfAssign(dst, src) {
return true
}
// Detect trivial assignments that assign back to the same object.
//
// It covers these cases:
// val.x = val.y
// val.x[i] = val.y[j]
// val.x1.x2 = val.x1.y2
// ... etc
//
// These assignments do not change assigned object lifetime.
if dst == nil || src == nil || dst.Op != src.Op {
return false
}
switch dst.Op {
case ODOT, ODOTPTR:
// Safe trailing accessors that are permitted to differ.
case OINDEX:
if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
return false
}
default:
return false
}
// The expression prefix must be both "safe" and identical.
return samesafeexpr(dst.Left, src.Left)
}
// mayAffectMemory reports whether evaluation of n may affect the program's
// memory state. If the expression can't affect memory state, then it can be
// safely ignored by the escape analysis.
func mayAffectMemory(n *Node) bool {
// We may want to use a list of "memory safe" ops instead of generally
// "side-effect free", which would include all calls and other ops that can
// allocate or change global state. For now, it's safer to start with the latter.
//
// We're ignoring things like division by zero, index out of range,
// and nil pointer dereference here.
switch n.Op {
case ONAME, OCLOSUREVAR, OLITERAL:
return false
// Left+Right group.
case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
// Left group.
case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
return mayAffectMemory(n.Left)
default:
return true
}
}
// heapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
func heapAllocReason(n *Node) string {
if n.Type == nil {
return ""
}
// Parameters are always passed via the stack.
if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
return ""
}
if n.Type.Width > maxStackVarSize {
return "too large for stack"
}
if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OMAKESLICE {
r := n.Right
if r == nil {
r = n.Left
}
if !smallintconst(r) {
return "non-constant size"
}
if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
return "too large for stack"
}
}
return ""
}
// addrescapes tags node n as having had its address taken
// by "increasing" the "value" of n.Esc to EscHeap.
// Storage is allocated as necessary to allow the address
// to be taken.
func addrescapes(n *Node) {
switch n.Op {
default:
// Unexpected Op, probably due to a previous type error. Ignore.
case ODEREF, ODOTPTR:
// Nothing to do.
case ONAME:
if n == nodfp {
break
}
// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
// on PPARAM it means something different.
if n.Class() == PAUTO && n.Esc == EscNever {
break
}
// If a closure reference escapes, mark the outer variable as escaping.
if n.Name.IsClosureVar() {
addrescapes(n.Name.Defn)
break
}
if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
break
}
// This is a plain parameter or local variable that needs to move to the heap,
// but possibly for the function outside the one we're compiling.
// That is, if we have:
//
// func f(x int) {
// func() {
// global = &x
// }
// }
//
// then we're analyzing the inner closure but we need to move x to the
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
oldfn := Curfn
Curfn = n.Name.Curfn
if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
Curfn = Curfn.Func.Closure
}
ln := lineno
lineno = Curfn.Pos
moveToHeap(n)
Curfn = oldfn
lineno = ln
// ODOTPTR has already been introduced,
// so these are the non-pointer ODOT and OINDEX.
// In &x[0], if x is a slice, then x does not
// escape--the pointer inside x does, but that
// is always a heap pointer anyway.
case ODOT, OINDEX, OPAREN, OCONVNOP:
if !n.Left.Type.IsSlice() {
addrescapes(n.Left)
}
}
}
// moveToHeap records the parameter or local variable n as moved to the heap.
func moveToHeap(n *Node) {
if Debug.r != 0 {
Dump("MOVE", n)
}
if compiling_runtime {
yyerror("%v escapes to heap, not allowed in runtime", n)
}
if n.Class() == PAUTOHEAP {
Dump("n", n)
Fatalf("double move to heap")
}
// Allocate a local stack variable to hold the pointer to the heap copy.
// temp will add it to the function declaration list automatically.
heapaddr := temp(types.NewPtr(n.Type))
heapaddr.Sym = lookup("&" + n.Sym.Name)
heapaddr.Orig.Sym = heapaddr.Sym
heapaddr.Pos = n.Pos
// Unset AutoTemp to persist the &foo variable name through SSA to
// liveness analysis.
// TODO(mdempsky/drchase): Cleaner solution?
heapaddr.Name.SetAutoTemp(false)
// Parameters have a local stack copy used at function start/end
// in addition to the copy in the heap that may live longer than
// the function.
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
if n.Xoffset == BADWIDTH {
Fatalf("addrescapes before param assignment")
}
// We rewrite n below to be a heap variable (indirection of heapaddr).
// Preserve a copy so we can still write code referring to the original,
// and substitute that copy into the function declaration list
// so that analyses of the local (on-stack) variables use it.
stackcopy := newname(n.Sym)
stackcopy.Type = n.Type
stackcopy.Xoffset = n.Xoffset
stackcopy.SetClass(n.Class())
stackcopy.Name.Param.Heapaddr = heapaddr
if n.Class() == PPARAMOUT {
// Make sure the pointer to the heap copy is kept live throughout the function.
// The function could panic at any point, and then a defer could recover.
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
heapaddr.Name.SetIsOutputParamHeapAddr(true)
}
n.Name.Param.Stackcopy = stackcopy
// Substitute the stackcopy into the function variable list so that
// liveness and other analyses use the underlying stack slot
// and not the now-pseudo-variable n.
found := false
for i, d := range Curfn.Func.Dcl {
if d == n {
Curfn.Func.Dcl[i] = stackcopy
found = true
break
}
// Parameters are before locals, so can stop early.
// This limits the search even in functions with many local variables.
if d.Class() == PAUTO {
break
}
}
if !found {
Fatalf("cannot find %v in local variable list", n)
}
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
n.SetClass(PAUTOHEAP)
n.Xoffset = 0
n.Name.Param.Heapaddr = heapaddr
n.Esc = EscHeap
if Debug.m != 0 {
Warnl(n.Pos, "moved to heap: %v", n)
}
}
// This special tag is applied to uintptr variables
// that we believe may hold unsafe.Pointers for
// calls into assembly functions.
const unsafeUintptrTag = "unsafe-uintptr"
// This special tag is applied to uintptr parameters of functions
// marked go:uintptrescapes.
const uintptrEscapesTag = "uintptr-escapes"
func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
name := func() string {
if f.Sym != nil {
return f.Sym.Name
}
return fmt.Sprintf("arg#%d", narg)
}
if fn.Nbody.Len() == 0 {
// Assume that uintptr arguments must be held live across the call.
// This is most important for syscall.Syscall.
// See golang.org/issue/13372.
// This really doesn't have much to do with escape analysis per se,
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
if f.Type.IsUintptr() {
if Debug.m != 0 {
Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
}
return unsafeUintptrTag
}
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
var esc EscLeaks
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
if fn.Func.Pragma&Noescape != 0 {
if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "%v does not escape", name())
}
} else {
if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "leaking param: %v", name())
}
esc.AddHeap(0)
}
return esc.Encode()
}
if fn.Func.Pragma&UintptrEscapes != 0 {
if f.Type.IsUintptr() {
if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping uintptr", name())
}
return uintptrEscapesTag
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
}
return uintptrEscapesTag
}
}
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
// Unnamed parameters are unused and therefore do not escape.
if f.Sym == nil || f.Sym.IsBlank() {
var esc EscLeaks
return esc.Encode()
}
n := asNode(f.Nname)
loc := e.oldLoc(n)
esc := loc.paramEsc
esc.Optimize()
if Debug.m != 0 && !loc.escapes {
if esc.Empty() {
Warnl(f.Pos, "%v does not escape", name())
}
if x := esc.Heap(); x >= 0 {
if x == 0 {
Warnl(f.Pos, "leaking param: %v", name())
} else {
// TODO(mdempsky): Mention level=x like below?
Warnl(f.Pos, "leaking param content: %v", name())
}
}
for i := 0; i < numEscResults; i++ {
if x := esc.Result(i); x >= 0 {
res := fn.Type.Results().Field(i).Sym
Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
}
}
}
return esc.Encode()
}

File diff suppressed because it is too large Load diff

View file

@ -5,225 +5,68 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/src"
"fmt"
)
var (
Debug_export int // if set, print debugging information about export data
"go/constant"
)
func exportf(bout *bio.Writer, format string, args ...interface{}) {
fmt.Fprintf(bout, format, args...)
if Debug_export != 0 {
if base.Debug.Export != 0 {
fmt.Printf(format, args...)
}
}
var asmlist []*Node
// exportsym marks n for export (or reexport).
func exportsym(n *Node) {
if n.Sym.OnExportList() {
return
}
n.Sym.SetOnExportList(true)
if Debug.E != 0 {
fmt.Printf("export symbol %v\n", n.Sym)
}
exportlist = append(exportlist, n)
}
func initname(s string) bool {
return s == "init"
}
func autoexport(n *Node, ctxt Class) {
if n.Sym.Pkg != localpkg {
return
}
if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
return
}
if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
return
}
if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
exportsym(n)
}
if asmhdr != "" && !n.Sym.Asm() {
n.Sym.SetAsm(true)
asmlist = append(asmlist, n)
}
}
func dumpexport(bout *bio.Writer) {
p := &exporter{marked: make(map[*types.Type]bool)}
for _, n := range typecheck.Target.Exports {
p.markObject(n)
}
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
iexport(bout.Writer)
typecheck.WriteExports(bout.Writer)
size := bout.Offset() - off
exportf(bout, "\n$$\n")
if Debug_export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
}
}
func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
n := asNode(s.PkgDef())
if n == nil {
// iimport should have created a stub ONONAME
// declaration for all imported symbols. The exception
// is declarations for Runtimepkg, which are populated
// by loadsys instead.
if s.Pkg != Runtimepkg {
Fatalf("missing ONONAME for %v\n", s)
}
n = dclname(s)
s.SetPkgDef(asTypesNode(n))
s.Importdef = ipkg
}
if n.Op != ONONAME && n.Op != op {
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return n
}
// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
n := importsym(ipkg, s, OTYPE)
if n.Op != OTYPE {
t := types.New(TFORW)
t.Sym = s
t.Nod = asTypesNode(n)
n.Op = OTYPE
n.Pos = pos
n.Type = t
n.SetClass(PEXTERN)
}
t := n.Type
if t == nil {
Fatalf("importtype %v", s)
}
return t
}
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
n := importsym(ipkg, s, op)
if n.Op != ONONAME {
if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return nil
}
n.Op = op
n.Pos = pos
n.SetClass(ctxt)
if ctxt == PFUNC {
n.Sym.SetFunc(true)
}
n.Type = t
return n
}
// importconst declares symbol s as an imported constant with type t and value val.
// ipkg is the package being imported
func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
if n == nil { // TODO: Check that value matches.
return
}
n.SetVal(val)
if Debug.E != 0 {
fmt.Printf("import const %v %L = %v\n", s, t, val)
}
}
// importfunc declares symbol s as an imported function with type t.
// ipkg is the package being imported
func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
if n == nil {
return
}
n.Func = new(Func)
t.SetNname(asTypesNode(n))
if Debug.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
}
// importvar declares symbol s as an imported variable with type t.
// ipkg is the package being imported
func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
if n == nil {
return
}
if Debug.E != 0 {
fmt.Printf("import var %v %L\n", s, t)
}
}
// importalias declares symbol s as an imported type alias with type t.
// ipkg is the package being imported
func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
if n == nil {
return
}
if Debug.E != 0 {
fmt.Printf("import type %v = %L\n", s, t)
if base.Debug.Export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
}
}
func dumpasmhdr() {
b, err := bio.Create(asmhdr)
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
for _, n := range asmlist {
if n.Sym.IsBlank() {
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
for _, n := range typecheck.Target.Asms {
if n.Sym().IsBlank() {
continue
}
switch n.Op {
case OLITERAL:
t := n.Val().Ctype()
if t == CTFLT || t == CTCPLX {
switch n.Op() {
case ir.OLITERAL:
t := n.Val().Kind()
if t == constant.Float || t == constant.Complex {
break
}
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
case OTYPE:
t := n.Type
case ir.OTYPE:
t := n.Type()
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
break
}
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
for _, f := range t.Fields().Slice() {
if !f.Sym.IsBlank() {
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
}
}
}
@ -231,3 +74,83 @@ func dumpasmhdr() {
b.Close()
}
type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class == ir.PFUNC {
inline.Inline_Flood(n, typecheck.Export)
}
}
p.markType(n.Type())
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
if p.marked[t] {
return
}
p.marked[t] = true
// If this is a named type, mark all of its associated
// methods. Skip interface types because t.Methods contains
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym() != nil && t.Kind() != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markObject(ir.AsNode(m.Nname))
}
}
}
// Recursively mark any types that can be produced given a
// value of type t: dereferencing a pointer; indexing or
// iterating over an array, slice, or map; receiving from a
// channel; accessing a struct field or interface method; or
// calling a function.
//
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
switch t.Kind() {
case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case types.TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,86 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"strconv"
)
// sysfunc looks up Go function name in package runtime. This function
// must follow the internal calling convention.
func sysfunc(name string) *obj.LSym {
s := Runtimepkg.Lookup(name)
s.SetFunc(true)
return s.Linksym()
}
// sysvar looks up a variable (or assembly function) name in package
// runtime. If this is a function, it may have a special calling
// convention.
func sysvar(name string) *obj.LSym {
return Runtimepkg.Lookup(name).Linksym()
}
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
func (n *Node) isParamStackCopy() bool {
return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
func (n *Node) isParamHeapCopy() bool {
return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil
}
// autotmpname returns the name for an autotmp variable numbered n.
func autotmpname(n int) string {
// Give each tmp a different name so that they can be registerized.
// Add a preceding . to avoid clashing with legal names.
const prefix = ".autotmp_"
// Start with a buffer big enough to hold a large n.
b := []byte(prefix + " ")[:len(prefix)]
b = strconv.AppendInt(b, int64(n), 10)
return types.InternString(b)
}
// make a new Node off the books
func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
if curfn == nil {
Fatalf("no curfn for tempAt")
}
if curfn.Func.Closure != nil && curfn.Op == OCLOSURE {
Dump("tempAt", curfn)
Fatalf("adding tempAt to wrong closure function")
}
if t == nil {
Fatalf("tempAt called with nil type")
}
s := &types.Sym{
Name: autotmpname(len(curfn.Func.Dcl)),
Pkg: localpkg,
}
n := newnamel(pos, s)
s.Def = asTypesNode(n)
n.Type = t
n.SetClass(PAUTO)
n.Esc = EscNever
n.Name.Curfn = curfn
n.Name.SetUsed(true)
n.Name.SetAutoTemp(true)
curfn.Func.Dcl = append(curfn.Func.Dcl, n)
dowidth(t)
return n.Orig
}
func temp(t *types.Type) *Node {
return tempAt(lineno, Curfn, t)
}

View file

@ -1,349 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"sync"
)
const (
BADWIDTH = types.BADWIDTH
)
var (
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
// Note: the flag smallframes can update this value.
maxStackVarSize = int64(10 * 1024 * 1024)
// maximum size of implicit variables that we will allocate on the stack.
// p := new(T) allocating T on the stack
// p := &T{} allocating T on the stack
// s := make([]T, n) allocating [n]T on the stack
// s := []byte("...") allocating [n]byte on the stack
// Note: the flag smallframes can update this value.
maxImplicitStackVarSize = int64(64 * 1024)
// smallArrayBytes is the maximum size of an array which is considered small.
// Small arrays will be initialized directly with a sequence of constant stores.
// Large arrays will be initialized by copying from a static temp.
// 256 bytes was chosen to minimize generated code + statictmp size.
smallArrayBytes = int64(256)
)
// isRuntimePkg reports whether p is package runtime.
func isRuntimePkg(p *types.Pkg) bool {
if compiling_runtime && p == localpkg {
return true
}
return p.Path == "runtime"
}
// isReflectPkg reports whether p is package reflect.
func isReflectPkg(p *types.Pkg) bool {
if p == localpkg {
return myimportpath == "reflect"
}
return p.Path == "reflect"
}
// The Class of a variable/function describes the "storage class"
// of a variable or function. During parsing, storage classes are
// called declaration contexts.
type Class uint8
//go:generate stringer -type=Class
const (
Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
PEXTERN // global variables
PAUTO // local variables
PAUTOHEAP // local variables or parameters moved to heap
PPARAM // input arguments
PPARAMOUT // output results
PFUNC // global functions
// Careful: Class is stored in three bits in Node.flags.
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
)
// Slices in the runtime are represented by three components:
//
// type slice struct {
// ptr unsafe.Pointer
// len int
// cap int
// }
//
// Strings in the runtime are represented by two components:
//
// type string struct {
// ptr unsafe.Pointer
// len int
// }
//
// These variables are the offsets of fields and sizes of these structs.
var (
slicePtrOffset int64
sliceLenOffset int64
sliceCapOffset int64
sizeofSlice int64
sizeofString int64
)
var pragcgobuf [][]string
var outfile string
var linkobj string
// nerrors is the number of compiler errors reported
// since the last call to saveerrors.
var nerrors int
// nsavederrors is the total number of compiler errors
// reported before the last call to saveerrors.
var nsavederrors int
var nsyntaxerrors int
var decldepth int32
var nolocalimports bool
// gc debug flags
type DebugFlags struct {
P, B, C, E,
K, L, N, S,
W, e, h, j,
l, m, r, w int
}
var Debug DebugFlags
var debugstr string
var Debug_checknil int
var Debug_typeassert int
var localpkg *types.Pkg // package being compiled
var inimport bool // set during import
var itabpkg *types.Pkg // fake pkg for itab entries
var itablinkpkg *types.Pkg // fake package for runtime itab entries
var Runtimepkg *types.Pkg // fake package runtime
var racepkg *types.Pkg // package runtime/race
var msanpkg *types.Pkg // package runtime/msan
var unsafepkg *types.Pkg // package unsafe
var trackpkg *types.Pkg // fake package for field tracking
var mappkg *types.Pkg // fake package for map zero value
var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver types
var zerosize int64
var myimportpath string
var localimport string
var asmhdr string
var simtype [NTYPE]types.EType
var (
isInt [NTYPE]bool
isFloat [NTYPE]bool
isComplex [NTYPE]bool
issimple [NTYPE]bool
)
var (
okforeq [NTYPE]bool
okforadd [NTYPE]bool
okforand [NTYPE]bool
okfornone [NTYPE]bool
okforcmp [NTYPE]bool
okforbool [NTYPE]bool
okforcap [NTYPE]bool
okforlen [NTYPE]bool
okforarith [NTYPE]bool
okforconst [NTYPE]bool
)
var (
okfor [OEND][]bool
iscmp [OEND]bool
)
var minintval [NTYPE]*Mpint
var maxintval [NTYPE]*Mpint
var minfltval [NTYPE]*Mpflt
var maxfltval [NTYPE]*Mpflt
var xtop []*Node
var exportlist []*Node
var importlist []*Node // imported functions and methods with inlinable bodies
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
funcsyms []*types.Sym
)
var dclcontext Class // PEXTERN/PAUTO
var Curfn *Node
var Widthptr int
var Widthreg int
var nblank *Node
var typecheckok bool
var compiling_runtime bool
// Compiling the standard library
var compiling_std bool
var use_writebarrier bool
var pure_go bool
var flag_installsuffix string
var flag_race bool
var flag_msan bool
var flagDWARF bool
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
var instrumenting bool
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
// Controls generation of DWARF inlined instance records. Zero
// disables, 1 emits inlined routines but suppresses var info,
// and 2 emits inlined routines with tracking of formals/locals.
var genDwarfInline int
var debuglive int
var Ctxt *obj.Link
var writearchive bool
var nodfp *Node
var disable_checknil int
var autogeneratedPos src.XPos
// interface to back end
type Arch struct {
LinkArch *obj.LinkArch
REGSP int
MAXWIDTH int64
SoftFloat bool
PadFrame func(int64) int64
// ZeroRange zeroes a range of memory on stack. It is only inserted
// at function entry, and it is ok to clobber registers.
ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
Ginsnop func(*Progs) *obj.Prog
Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*SSAGenState, *ssa.Block)
// SSAGenValue emits Prog(s) for the Value.
SSAGenValue func(*SSAGenState, *ssa.Value)
// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
}
var thearch Arch
var (
staticuint64s,
zerobase *Node
assertE2I,
assertE2I2,
assertI2I,
assertI2I2,
deferproc,
deferprocStack,
Deferreturn,
Duffcopy,
Duffzero,
gcWriteBarrier,
goschedguarded,
growslice,
msanread,
msanwrite,
msanmove,
newobject,
newproc,
panicdivide,
panicshift,
panicdottypeE,
panicdottypeI,
panicnildottype,
panicoverflow,
raceread,
racereadrange,
racewrite,
racewriterange,
x86HasPOPCNT,
x86HasSSE41,
x86HasFMA,
armHasVFPv4,
arm64HasATOMICS,
typedmemclr,
typedmemmove,
Udiv,
writeBarrier,
zerobaseSym *obj.LSym
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
// Wasm
WasmMove,
WasmZero,
WasmDiv,
WasmTruncS,
WasmTruncU,
SigPanic *obj.LSym
)
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
var GCWriteBarrierReg map[int16]*obj.LSym

View file

@ -1,333 +0,0 @@
// Derived from Inferno utils/6c/txt.c
// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package gc
import (
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
// Progs accumulates Progs for a function and converts them into machine code.
type Progs struct {
Text *obj.Prog // ATEXT Prog for this function
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
curfn *Node // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
nextLive LivenessIndex // liveness index for the next Prog
prevLive LivenessIndex // last emitted liveness index
}
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
func newProgs(fn *Node, worker int) *Progs {
pp := new(Progs)
if Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / nBackendWorkers
pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.curfn = fn
// prime the pump
pp.next = pp.NewProg()
pp.clearp(pp.next)
pp.pos = fn.Pos
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, false}
pp.nextLive = pp.prevLive
return pp
}
func (pp *Progs) NewProg() *obj.Prog {
var p *obj.Prog
if pp.cacheidx < len(pp.progcache) {
p = &pp.progcache[pp.cacheidx]
pp.cacheidx++
} else {
p = new(obj.Prog)
}
p.Ctxt = Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
if Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.progcache[:pp.cacheidx]
for i := range s {
s[i] = obj.Prog{}
}
}
// Clear pp to avoid abuse.
*pp = Progs{}
}
// Prog adds a Prog with instruction As to pp.
func (pp *Progs) Prog(as obj.As) *obj.Prog {
if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
// Emit stack map index change.
idx := pp.nextLive.stackMapIndex
pp.prevLive.stackMapIndex = idx
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
}
if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
// Emit unsafe-point marker.
pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
if pp.nextLive.isUnsafePoint {
Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
} else {
Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
}
}
p := pp.next
pp.next = pp.NewProg()
pp.clearp(pp.next)
p.Link = pp.next
if !pp.pos.IsKnown() && Debug.K != 0 {
Warn("prog: unknown position (line 0)")
}
p.As = as
p.Pos = pp.pos
if pp.pos.IsStmt() == src.PosIsStmt {
// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
if ssa.LosesStmtMark(as) {
return p
}
pp.pos = pp.pos.WithNotStmt()
}
return p
}
func (pp *Progs) clearp(p *obj.Prog) {
obj.Nopout(p)
p.As = obj.AEND
p.Pc = pp.pc
pp.pc++
}
func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
q := pp.NewProg()
pp.clearp(q)
q.As = as
q.Pos = p.Pos
q.From.Type = ftype
q.From.Reg = freg
q.From.Offset = foffset
q.To.Type = ttype
q.To.Reg = treg
q.To.Offset = toffset
q.Link = p.Link
p.Link = q
return q
}
func (pp *Progs) settext(fn *Node) {
if pp.Text != nil {
Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
fn.Func.lsym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.Func.lsym
}
// initLSym defines f's obj.LSym and initializes it based on the
// properties of f. This includes setting the symbol flags and ABI and
// creating and initializing related DWARF symbols.
//
// initLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
func (f *Func) initLSym(hasBody bool) {
if f.lsym != nil {
Fatalf("Func.initLSym called twice")
}
if nam := f.Nname; !nam.isBlank() {
f.lsym = nam.Sym.Linksym()
if f.Pragma&Systemstack != 0 {
f.lsym.Set(obj.AttrCFunc, true)
}
var aliasABI obj.ABI
needABIAlias := false
defABI, hasDefABI := symabiDefs[f.lsym.Name]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
f.lsym.SetABI(obj.ABI0)
needABIAlias, aliasABI = true, obj.ABIInternal
} else {
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
if f.lsym.ABI() != want {
Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want)
}
}
isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
// given a linkname, indicating that it may be
// referenced from another package. Create an
// ABI0 -> Internal wrapper so it can be
// called as ABI0. In case 2, it's important
// that we know it's defined in this package
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
if f.lsym.ABI() != obj.ABI0 {
needABIAlias, aliasABI = true, obj.ABI0
}
}
if needABIAlias {
// These LSyms have the same name as the
// native function, so we create them directly
// rather than looking them up. The uniqueness
// of f.lsym ensures uniqueness of asym.
asym := &obj.LSym{
Name: f.lsym.Name,
Type: objabi.SABIALIAS,
R: []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational"
}
asym.SetABI(aliasABI)
asym.Set(obj.AttrDuplicateOK, true)
Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym)
}
}
if !hasBody {
// For body-less functions, we only create the LSym.
return
}
var flag int
if f.Dupok() {
flag |= obj.DUPOK
}
if f.Wrapper() {
flag |= obj.WRAPPER
}
if f.Needctxt() {
flag |= obj.NEEDCTXT
}
if f.Pragma&Nosplit != 0 {
flag |= obj.NOSPLIT
}
if f.ReflectMethod() {
flag |= obj.REFLECTMETHOD
}
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
if myimportpath == "reflect" {
switch f.Nname.Sym.Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
}
Ctxt.InitTextSym(f.lsym, flag)
}
func ggloblnod(nam *Node) {
s := nam.Sym.Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
if nam.Name.Readonly() {
flags = obj.RODATA
}
if nam.Type != nil && !nam.Type.HasPointers() {
flags |= obj.NOPTR
}
Ctxt.Globl(s, nam.Type.Width, flags)
if nam.Name.LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym.Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Pkg = "_"
}
}
func ggloblsym(s *obj.LSym, width int32, flags int16) {
if flags&obj.LOCAL != 0 {
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
Ctxt.Globl(s, int64(width), int(flags))
}
func Addrconst(a *obj.Addr, v int64) {
a.Sym = nil
a.Type = obj.TYPE_CONST
a.Offset = v
}
func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH {
Fatalf("patch: not a branch")
}
p.To.SetTarget(to)
p.To.Offset = to.Pc
}

File diff suppressed because it is too large Load diff

View file

@ -1,109 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
)
// A function named init is a special case.
// It is called by the initialization before main is run.
// To make it unique within a package and also uncallable,
// the name, normally "pkg.init", is altered to "pkg.init.0".
var renameinitgen int
// Dummy function for autotmps generated during typechecking.
var dummyInitFn = nod(ODCLFUNC, nil, nil)
func renameinit() *types.Sym {
s := lookupN("init.", renameinitgen)
renameinitgen++
return s
}
// fninit makes an initialization record for the package.
// See runtime/proc.go:initTask for its layout.
// The 3 tasks for initialization are:
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
func fninit(n []*Node) {
nf := initOrder(n)
var deps []*obj.LSym // initTask records for packages the current package depends on
var fns []*obj.LSym // functions to call for package initialization
// Find imported packages with init tasks.
for _, s := range types.InitSyms {
deps = append(deps, s.Linksym())
}
// Make a function that contains all the initialization statements.
if len(nf) > 0 {
lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
initializers := lookup("init")
fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
for _, dcl := range dummyInitFn.Func.Dcl {
dcl.Name.Curfn = fn
}
fn.Func.Dcl = append(fn.Func.Dcl, dummyInitFn.Func.Dcl...)
dummyInitFn.Func.Dcl = nil
fn.Nbody.Set(nf)
funcbody()
fn = typecheck(fn, ctxStmt)
Curfn = fn
typecheckslice(nf, ctxStmt)
Curfn = nil
xtop = append(xtop, fn)
fns = append(fns, initializers.Linksym())
}
if dummyInitFn.Func.Dcl != nil {
// We only generate temps using dummyInitFn if there
// are package-scope initialization statements, so
// something's weird if we get here.
Fatalf("dummyInitFn still has declarations")
}
dummyInitFn = nil
// Record user init functions.
for i := 0; i < renameinitgen; i++ {
s := lookupN("init.", i)
fn := asNode(s.Def).Name.Defn
// Skip init functions with empty bodies.
if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY {
continue
}
fns = append(fns, s.Linksym())
}
if len(deps) == 0 && len(fns) == 0 && localpkg.Name != "main" && localpkg.Name != "runtime" {
return // nothing to initialize
}
// Make an .inittask structure.
sym := lookup(".inittask")
nn := newname(sym)
nn.Type = types.Types[TUINT8] // dummy type
nn.SetClass(PEXTERN)
sym.Def = asTypesNode(nn)
exportsym(nn)
lsym := sym.Linksym()
ot := 0
ot = duintptr(lsym, ot, 0) // state: not initialized yet
ot = duintptr(lsym, ot, uint64(len(deps)))
ot = duintptr(lsym, ot, uint64(len(fns)))
for _, d := range deps {
ot = dsymptr(lsym, ot, d, 0)
}
for _, f := range fns {
ot = dsymptr(lsym, ot, f, 0)
}
// An initTask has pointers, but none into the Go heap.
// It's not quite read only, the state field must be modifiable.
ggloblsym(lsym, int32(ot), obj.NOPTR)
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,357 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"fmt"
"math"
"math/big"
)
// implements float arithmetic
const (
// Maximum size in bits for Mpints before signalling
// overflow and also mantissa precision for Mpflts.
Mpprec = 512
// Turn on for constant arithmetic debugging output.
Mpdebug = false
)
// Mpflt represents a floating-point constant.
type Mpflt struct {
Val big.Float
}
// Mpcplx represents a complex constant.
type Mpcplx struct {
Real Mpflt
Imag Mpflt
}
// Use newMpflt (not new(Mpflt)!) to get the correct default precision.
func newMpflt() *Mpflt {
var a Mpflt
a.Val.SetPrec(Mpprec)
return &a
}
// Use newMpcmplx (not new(Mpcplx)!) to get the correct default precision.
func newMpcmplx() *Mpcplx {
var a Mpcplx
a.Real = *newMpflt()
a.Imag = *newMpflt()
return &a
}
func (a *Mpflt) SetInt(b *Mpint) {
if b.checkOverflow(0) {
// sign doesn't really matter but copy anyway
a.Val.SetInf(b.Val.Sign() < 0)
return
}
a.Val.SetInt(&b.Val)
}
func (a *Mpflt) Set(b *Mpflt) {
a.Val.Set(&b.Val)
}
func (a *Mpflt) Add(b *Mpflt) {
if Mpdebug {
fmt.Printf("\n%v + %v", a, b)
}
a.Val.Add(&a.Val, &b.Val)
if Mpdebug {
fmt.Printf(" = %v\n\n", a)
}
}
func (a *Mpflt) AddFloat64(c float64) {
var b Mpflt
b.SetFloat64(c)
a.Add(&b)
}
func (a *Mpflt) Sub(b *Mpflt) {
if Mpdebug {
fmt.Printf("\n%v - %v", a, b)
}
a.Val.Sub(&a.Val, &b.Val)
if Mpdebug {
fmt.Printf(" = %v\n\n", a)
}
}
func (a *Mpflt) Mul(b *Mpflt) {
if Mpdebug {
fmt.Printf("%v\n * %v\n", a, b)
}
a.Val.Mul(&a.Val, &b.Val)
if Mpdebug {
fmt.Printf(" = %v\n\n", a)
}
}
func (a *Mpflt) MulFloat64(c float64) {
var b Mpflt
b.SetFloat64(c)
a.Mul(&b)
}
func (a *Mpflt) Quo(b *Mpflt) {
if Mpdebug {
fmt.Printf("%v\n / %v\n", a, b)
}
a.Val.Quo(&a.Val, &b.Val)
if Mpdebug {
fmt.Printf(" = %v\n\n", a)
}
}
func (a *Mpflt) Cmp(b *Mpflt) int {
return a.Val.Cmp(&b.Val)
}
func (a *Mpflt) CmpFloat64(c float64) int {
if c == 0 {
return a.Val.Sign() // common case shortcut
}
return a.Val.Cmp(big.NewFloat(c))
}
func (a *Mpflt) Float64() float64 {
x, _ := a.Val.Float64()
// check for overflow
if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpflt Float64")
}
return x + 0 // avoid -0 (should not be needed, but be conservative)
}
func (a *Mpflt) Float32() float64 {
x32, _ := a.Val.Float32()
x := float64(x32)
// check for overflow
if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpflt Float32")
}
return x + 0 // avoid -0 (should not be needed, but be conservative)
}
func (a *Mpflt) SetFloat64(c float64) {
if Mpdebug {
fmt.Printf("\nconst %g", c)
}
// convert -0 to 0
if c == 0 {
c = 0
}
a.Val.SetFloat64(c)
if Mpdebug {
fmt.Printf(" = %v\n", a)
}
}
func (a *Mpflt) Neg() {
// avoid -0
if a.Val.Sign() != 0 {
a.Val.Neg(&a.Val)
}
}
func (a *Mpflt) SetString(as string) {
f, _, err := a.Val.Parse(as, 0)
if err != nil {
yyerror("malformed constant: %s (%v)", as, err)
a.Val.SetFloat64(0)
return
}
if f.IsInf() {
yyerror("constant too large: %s", as)
a.Val.SetFloat64(0)
return
}
// -0 becomes 0
if f.Sign() == 0 && f.Signbit() {
a.Val.SetFloat64(0)
}
}
func (f *Mpflt) String() string {
return f.Val.Text('b', 0)
}
func (fvp *Mpflt) GoString() string {
// determine sign
sign := ""
f := &fvp.Val
if f.Sign() < 0 {
sign = "-"
f = new(big.Float).Abs(f)
}
// Don't try to convert infinities (will not terminate).
if f.IsInf() {
return sign + "Inf"
}
// Use exact fmt formatting if in float64 range (common case):
// proceed if f doesn't underflow to 0 or overflow to inf.
if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
return fmt.Sprintf("%s%.6g", sign, x)
}
// Out of float64 range. Do approximate manual to decimal
// conversion to avoid precise but possibly slow Float
// formatting.
// f = mant * 2**exp
var mant big.Float
exp := f.MantExp(&mant) // 0.5 <= mant < 1.0
// approximate float64 mantissa m and decimal exponent d
// f ~ m * 10**d
m, _ := mant.Float64() // 0.5 <= m < 1.0
d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
// adjust m for truncated (integer) decimal exponent e
e := int64(d)
m *= math.Pow(10, d-float64(e))
// ensure 1 <= m < 10
switch {
case m < 1-0.5e-6:
// The %.6g format below rounds m to 5 digits after the
// decimal point. Make sure that m*10 < 10 even after
// rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
m *= 10
e--
case m >= 10:
m /= 10
e++
}
return fmt.Sprintf("%s%.6ge%+d", sign, m, e)
}
// complex multiply v *= rv
// (a, b) * (c, d) = (a*c - b*d, b*c + a*d)
func (v *Mpcplx) Mul(rv *Mpcplx) {
var ac, ad, bc, bd Mpflt
ac.Set(&v.Real)
ac.Mul(&rv.Real) // ac
bd.Set(&v.Imag)
bd.Mul(&rv.Imag) // bd
bc.Set(&v.Imag)
bc.Mul(&rv.Real) // bc
ad.Set(&v.Real)
ad.Mul(&rv.Imag) // ad
v.Real.Set(&ac)
v.Real.Sub(&bd) // ac-bd
v.Imag.Set(&bc)
v.Imag.Add(&ad) // bc+ad
}
// complex divide v /= rv
// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
func (v *Mpcplx) Div(rv *Mpcplx) bool {
if rv.Real.CmpFloat64(0) == 0 && rv.Imag.CmpFloat64(0) == 0 {
return false
}
var ac, ad, bc, bd, cc_plus_dd Mpflt
cc_plus_dd.Set(&rv.Real)
cc_plus_dd.Mul(&rv.Real) // cc
ac.Set(&rv.Imag)
ac.Mul(&rv.Imag) // dd
cc_plus_dd.Add(&ac) // cc+dd
// We already checked that c and d are not both zero, but we can't
// assume that c²+d² != 0 follows, because for tiny values of c
// and/or d c²+d² can underflow to zero. Check that c²+d² is
// nonzero, return if it's not.
if cc_plus_dd.CmpFloat64(0) == 0 {
return false
}
ac.Set(&v.Real)
ac.Mul(&rv.Real) // ac
bd.Set(&v.Imag)
bd.Mul(&rv.Imag) // bd
bc.Set(&v.Imag)
bc.Mul(&rv.Real) // bc
ad.Set(&v.Real)
ad.Mul(&rv.Imag) // ad
v.Real.Set(&ac)
v.Real.Add(&bd) // ac+bd
v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd)
v.Imag.Set(&bc)
v.Imag.Sub(&ad) // bc-ad
v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd)
return true
}
func (v *Mpcplx) String() string {
return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
}
func (v *Mpcplx) GoString() string {
var re string
sre := v.Real.CmpFloat64(0)
if sre != 0 {
re = v.Real.GoString()
}
var im string
sim := v.Imag.CmpFloat64(0)
if sim != 0 {
im = v.Imag.GoString()
}
switch {
case sre == 0 && sim == 0:
return "0"
case sre == 0:
return im + "i"
case sim == 0:
return re
case sim < 0:
return fmt.Sprintf("(%s%si)", re, im)
default:
return fmt.Sprintf("(%s+%si)", re, im)
}
}

View file

@ -1,304 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"fmt"
"math/big"
)
// implements integer arithmetic
// Mpint represents an integer constant.
type Mpint struct {
Val big.Int
Ovf bool // set if Val overflowed compiler limit (sticky)
Rune bool // set if syntax indicates default type rune
}
func (a *Mpint) SetOverflow() {
a.Val.SetUint64(1) // avoid spurious div-zero errors
a.Ovf = true
}
func (a *Mpint) checkOverflow(extra int) bool {
// We don't need to be precise here, any reasonable upper limit would do.
// For now, use existing limit so we pass all the tests unchanged.
if a.Val.BitLen()+extra > Mpprec {
a.SetOverflow()
}
return a.Ovf
}
func (a *Mpint) Set(b *Mpint) {
a.Val.Set(&b.Val)
}
func (a *Mpint) SetFloat(b *Mpflt) bool {
// avoid converting huge floating-point numbers to integers
// (2*Mpprec is large enough to permit all tests to pass)
if b.Val.MantExp(nil) > 2*Mpprec {
a.SetOverflow()
return false
}
if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
return true
}
const delta = 16 // a reasonably small number of bits > 0
var t big.Float
t.SetPrec(Mpprec - delta)
// try rounding down a little
t.SetMode(big.ToZero)
t.Set(&b.Val)
if _, acc := t.Int(&a.Val); acc == big.Exact {
return true
}
// try rounding up a little
t.SetMode(big.AwayFromZero)
t.Set(&b.Val)
if _, acc := t.Int(&a.Val); acc == big.Exact {
return true
}
a.Ovf = false
return false
}
func (a *Mpint) Add(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Add")
}
a.SetOverflow()
return
}
a.Val.Add(&a.Val, &b.Val)
if a.checkOverflow(0) {
yyerror("constant addition overflow")
}
}
func (a *Mpint) Sub(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Sub")
}
a.SetOverflow()
return
}
a.Val.Sub(&a.Val, &b.Val)
if a.checkOverflow(0) {
yyerror("constant subtraction overflow")
}
}
func (a *Mpint) Mul(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Mul")
}
a.SetOverflow()
return
}
a.Val.Mul(&a.Val, &b.Val)
if a.checkOverflow(0) {
yyerror("constant multiplication overflow")
}
}
func (a *Mpint) Quo(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Quo")
}
a.SetOverflow()
return
}
a.Val.Quo(&a.Val, &b.Val)
if a.checkOverflow(0) {
// can only happen for div-0 which should be checked elsewhere
yyerror("constant division overflow")
}
}
func (a *Mpint) Rem(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Rem")
}
a.SetOverflow()
return
}
a.Val.Rem(&a.Val, &b.Val)
if a.checkOverflow(0) {
// should never happen
yyerror("constant modulo overflow")
}
}
func (a *Mpint) Or(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Or")
}
a.SetOverflow()
return
}
a.Val.Or(&a.Val, &b.Val)
}
func (a *Mpint) And(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint And")
}
a.SetOverflow()
return
}
a.Val.And(&a.Val, &b.Val)
}
func (a *Mpint) AndNot(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint AndNot")
}
a.SetOverflow()
return
}
a.Val.AndNot(&a.Val, &b.Val)
}
func (a *Mpint) Xor(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Xor")
}
a.SetOverflow()
return
}
a.Val.Xor(&a.Val, &b.Val)
}
func (a *Mpint) Lsh(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Lsh")
}
a.SetOverflow()
return
}
s := b.Int64()
if s < 0 || s >= Mpprec {
msg := "shift count too large"
if s < 0 {
msg = "invalid negative shift count"
}
yyerror("%s: %d", msg, s)
a.SetInt64(0)
return
}
if a.checkOverflow(int(s)) {
yyerror("constant shift overflow")
return
}
a.Val.Lsh(&a.Val, uint(s))
}
func (a *Mpint) Rsh(b *Mpint) {
if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("ovf in Mpint Rsh")
}
a.SetOverflow()
return
}
s := b.Int64()
if s < 0 {
yyerror("invalid negative shift count: %d", s)
if a.Val.Sign() < 0 {
a.SetInt64(-1)
} else {
a.SetInt64(0)
}
return
}
a.Val.Rsh(&a.Val, uint(s))
}
func (a *Mpint) Cmp(b *Mpint) int {
return a.Val.Cmp(&b.Val)
}
func (a *Mpint) CmpInt64(c int64) int {
if c == 0 {
return a.Val.Sign() // common case shortcut
}
return a.Val.Cmp(big.NewInt(c))
}
func (a *Mpint) Neg() {
a.Val.Neg(&a.Val)
}
func (a *Mpint) Int64() int64 {
if a.Ovf {
if nsavederrors+nerrors == 0 {
Fatalf("constant overflow")
}
return 0
}
return a.Val.Int64()
}
func (a *Mpint) SetInt64(c int64) {
a.Val.SetInt64(c)
}
func (a *Mpint) SetString(as string) {
_, ok := a.Val.SetString(as, 0)
if !ok {
// The lexer checks for correct syntax of the literal
// and reports detailed errors. Thus SetString should
// never fail (in theory it might run out of memory,
// but that wouldn't be reported as an error here).
Fatalf("malformed integer constant: %s", as)
return
}
if a.checkOverflow(0) {
yyerror("constant too large: %s", as)
}
}
func (a *Mpint) GoString() string {
return a.Val.String()
}
func (a *Mpint) String() string {
return fmt.Sprintf("%#x", &a.Val)
}

File diff suppressed because it is too large Load diff

View file

@ -5,28 +5,21 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/archive"
"cmd/internal/bio"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
)
// architecture-independent object file output
const ArhdrSize = 60
func formathdr(arhdr []byte, name string, size int64) {
copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
}
// These modes say which kind of object file to generate.
// The default use of the toolchain is to set both bits,
// generating a combined compiler+linker object, one that
@ -46,20 +39,20 @@ const (
)
func dumpobj() {
if linkobj == "" {
dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
if base.Flag.LinkObj == "" {
dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj)
return
}
dumpobj1(outfile, modeCompilerObj)
dumpobj1(linkobj, modeLinkerObj)
dumpobj1(base.Flag.LowerO, modeCompilerObj)
dumpobj1(base.Flag.LinkObj, modeLinkerObj)
}
func dumpobj1(outfile string, mode int) {
bout, err := bio.Create(outfile)
if err != nil {
flusherrors()
base.FlushErrors()
fmt.Printf("can't create %s: %v\n", outfile, err)
errorexit()
base.ErrorExit()
}
defer bout.Close()
bout.WriteString("!<arch>\n")
@ -78,17 +71,17 @@ func dumpobj1(outfile string, mode int) {
func printObjHeader(bout *bio.Writer) {
fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
if buildid != "" {
fmt.Fprintf(bout, "build id %q\n", buildid)
if base.Flag.BuildID != "" {
fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID)
}
if localpkg.Name == "main" {
if types.LocalPkg.Name == "main" {
fmt.Fprintf(bout, "main\n")
}
fmt.Fprintf(bout, "\n") // header ends with blank line
}
func startArchiveEntry(bout *bio.Writer) int64 {
var arhdr [ArhdrSize]byte
var arhdr [archive.HeaderSize]byte
bout.Write(arhdr[:])
return bout.Offset()
}
@ -99,10 +92,10 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
if size&1 != 0 {
bout.WriteByte(0)
}
bout.MustSeek(start-ArhdrSize, 0)
bout.MustSeek(start-archive.HeaderSize, 0)
var arhdr [ArhdrSize]byte
formathdr(arhdr[:], name, size)
var arhdr [archive.HeaderSize]byte
archive.FormatHeader(arhdr[:], name, size)
bout.Write(arhdr[:])
bout.Flush()
bout.MustSeek(start+size+(size&1), 0)
@ -114,22 +107,21 @@ func dumpCompilerObj(bout *bio.Writer) {
}
func dumpdata() {
externs := len(externdcl)
xtops := len(xtop)
numExterns := len(typecheck.Target.Externs)
numDecls := len(typecheck.Target.Decls)
dumpglobls()
addptabs()
exportlistLen := len(exportlist)
addsignats(externdcl)
dumpsignats()
dumptabs()
ptabsLen := len(ptabs)
itabsLen := len(itabs)
dumpimportstrings()
dumpbasictypes()
dumpglobls(typecheck.Target.Externs)
reflectdata.CollectPTabs()
numExports := len(typecheck.Target.Exports)
addsignats(typecheck.Target.Externs)
reflectdata.WriteRuntimeTypes()
reflectdata.WriteTabs()
numPTabs, numITabs := reflectdata.CountTabs()
reflectdata.WriteImportStrings()
reflectdata.WriteBasicTypes()
dumpembeds()
// Calls to dumpsignats can generate functions,
// Calls to WriteRuntimeTypes can generate functions,
// like method wrappers and hash and equality routines.
// Compile any generated functions, process any new resulting types, repeat.
// This can't loop forever, because there is no way to generate an infinite
@ -137,169 +129,108 @@ func dumpdata() {
// In the typical case, we loop 0 or 1 times.
// It was not until issue 24761 that we found any code that required a loop at all.
for {
for i := xtops; i < len(xtop); i++ {
n := xtop[i]
if n.Op == ODCLFUNC {
funccompile(n)
for i := numDecls; i < len(typecheck.Target.Decls); i++ {
if n, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
enqueueFunc(n)
}
}
xtops = len(xtop)
numDecls = len(typecheck.Target.Decls)
compileFunctions()
dumpsignats()
if xtops == len(xtop) {
reflectdata.WriteRuntimeTypes()
if numDecls == len(typecheck.Target.Decls) {
break
}
}
// Dump extra globals.
tmp := externdcl
dumpglobls(typecheck.Target.Externs[numExterns:])
if externdcl != nil {
externdcl = externdcl[externs:]
}
dumpglobls()
externdcl = tmp
if zerosize > 0 {
zero := mappkg.Lookup("zero")
ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
if reflectdata.ZeroSize > 0 {
zero := base.PkgLinksym("go.map", "zero", obj.ABI0)
objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
}
staticdata.WriteFuncSyms()
addGCLocals()
if exportlistLen != len(exportlist) {
Fatalf("exportlist changed after compile functions loop")
if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
if ptabsLen != len(ptabs) {
Fatalf("ptabs changed after compile functions loop")
newNumPTabs, newNumITabs := reflectdata.CountTabs()
if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop")
}
if itabsLen != len(itabs) {
Fatalf("itabs changed after compile functions loop")
if newNumITabs != numITabs {
base.Fatalf("itabs changed after compile functions loop")
}
}
func dumpLinkerObj(bout *bio.Writer) {
printObjHeader(bout)
if len(pragcgobuf) != 0 {
if len(typecheck.Target.CgoPragmas) != 0 {
// write empty export section; must be before cgo section
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
fmt.Fprintf(bout, "\n$$ // cgo\n")
if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil {
Fatalf("serializing pragcgobuf: %v", err)
if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil {
base.Fatalf("serializing pragcgobuf: %v", err)
}
fmt.Fprintf(bout, "\n$$\n\n")
}
fmt.Fprintf(bout, "\n!\n")
obj.WriteObjFile(Ctxt, bout)
obj.WriteObjFile(base.Ctxt, bout)
}
func addptabs() {
if !Ctxt.Flag_dynlink || localpkg.Name != "main" {
func dumpGlobal(n *ir.Name) {
if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
}
if n.Class == ir.PFUNC {
return
}
for _, exportn := range exportlist {
s := exportn.Sym
n := asNode(s.Def)
if n == nil {
continue
}
if n.Op != ONAME {
continue
}
if !types.IsExported(s.Name) {
continue
}
if s.Pkg.Name != "main" {
continue
}
if n.Type.Etype == TFUNC && n.Class() == PFUNC {
// function
ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type})
} else {
// variable
ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)})
}
}
}
func dumpGlobal(n *Node) {
if n.Type == nil {
Fatalf("external %v nil type\n", n)
}
if n.Class() == PFUNC {
if n.Sym().Pkg != types.LocalPkg {
return
}
if n.Sym.Pkg != localpkg {
return
}
dowidth(n.Type)
types.CalcSize(n.Type())
ggloblnod(n)
}
func dumpGlobalConst(n *Node) {
func dumpGlobalConst(n ir.Node) {
// only export typed constants
t := n.Type
t := n.Type()
if t == nil {
return
}
if n.Sym.Pkg != localpkg {
if n.Sym().Pkg != types.LocalPkg {
return
}
// only export integer constants for now
switch t.Etype {
case TINT8:
case TINT16:
case TINT32:
case TINT64:
case TINT:
case TUINT8:
case TUINT16:
case TUINT32:
case TUINT64:
case TUINT:
case TUINTPTR:
// ok
case TIDEAL:
if !Isconst(n, CTINT) {
return
}
x := n.Val().U.(*Mpint)
if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 {
return
}
// Ideal integers we export as int (if they fit).
t = types.Types[TINT]
default:
if !t.IsInteger() {
return
}
Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
v := n.Val()
if t.IsUntyped() {
// Export untyped integers as int (if they fit).
t = types.Types[types.TINT]
if ir.ConstOverflow(v, t) {
return
}
}
base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
}
func dumpglobls() {
func dumpglobls(externs []ir.Node) {
// add globals
for _, n := range externdcl {
switch n.Op {
case ONAME:
dumpGlobal(n)
case OLITERAL:
for _, n := range externs {
switch n.Op() {
case ir.ONAME:
dumpGlobal(n.(*ir.Name))
case ir.OLITERAL:
dumpGlobalConst(n)
}
}
sort.Slice(funcsyms, func(i, j int) bool {
return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
})
for _, s := range funcsyms {
sf := s.Pkg.Lookup(funcsymname(s)).Linksym()
dsymptr(sf, 0, s.Linksym(), 0)
ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
// Do not reprocess funcsyms on next dumpglobls call.
funcsyms = nil
}
// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
@ -307,332 +238,60 @@ func dumpglobls() {
// This is done during the sequential phase after compilation, since
// global symbols can't be declared during parallel compilation.
func addGCLocals() {
for _, s := range Ctxt.Text {
for _, s := range base.Ctxt.Text {
fn := s.Func()
if fn == nil {
continue
}
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
if gcsym != nil && !gcsym.OnList() {
ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
}
}
if x := fn.StackObjects; x != nil {
attr := int16(obj.RODATA)
ggloblsym(x, int32(len(x.P)), attr)
objw.Global(x, int32(len(x.P)), attr)
x.Set(obj.AttrStatic, true)
}
if x := fn.OpenCodedDeferInfo; x != nil {
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
}
}
}
func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
if off&(wid-1) != 0 {
Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
func ggloblnod(nam *ir.Name) {
s := nam.Linksym()
s.Gotype = reflectdata.TypeLinksym(nam.Type())
flags := 0
if nam.Readonly() {
flags = obj.RODATA
}
if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
base.Ctxt.Globl(s, nam.Type().Width, flags)
if nam.LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Pkg = "_"
}
s.WriteInt(Ctxt, int64(off), wid, int64(v))
return off + wid
}
func duint8(s *obj.LSym, off int, v uint8) int {
return duintxx(s, off, uint64(v), 1)
func dumpembeds() {
for _, v := range typecheck.Target.Embeds {
staticdata.WriteEmbed(v)
}
}
func duint16(s *obj.LSym, off int, v uint16) int {
return duintxx(s, off, uint64(v), 2)
}
func duint32(s *obj.LSym, off int, v uint32) int {
return duintxx(s, off, uint64(v), 4)
}
func duintptr(s *obj.LSym, off int, v uint64) int {
return duintxx(s, off, v, Widthptr)
}
func dbvec(s *obj.LSym, off int, bv bvec) int {
// Runtime reads the bitmaps as byte arrays. Oblige.
for j := 0; int32(j) < bv.n; j += 8 {
word := bv.b[j/32]
off = duint8(s, off, uint8(word>>(uint(j)%32)))
}
return off
}
const (
stringSymPrefix = "go.string."
stringSymPattern = ".gostring.%d.%x"
)
// stringsym returns a symbol containing the string s.
// The symbol contains the string data, not a string header.
func stringsym(pos src.XPos, s string) (data *obj.LSym) {
var symname string
if len(s) > 100 {
// Huge strings are hashed to avoid long names in object files.
// Indulge in some paranoia by writing the length of s, too,
// as protection against length extension attacks.
// Same pattern is known to fileStringSym below.
h := sha256.New()
io.WriteString(h, s)
symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
} else {
// Small strings get named directly by their contents.
symname = strconv.Quote(s)
}
symdata := Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
return symdata
}
// fileStringSym returns a symbol for the contents and the size of file.
// If readonly is true, the symbol shares storage with any literal string
// or other file with the same content and is placed in a read-only section.
// If readonly is false, the symbol is a read-write copy separate from any other,
// for use as the backing store of a []byte.
// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
// The returned symbol contains the data itself, not a string header.
func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
f, err := os.Open(file)
if err != nil {
return nil, 0, err
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return nil, 0, err
}
if !info.Mode().IsRegular() {
return nil, 0, fmt.Errorf("not a regular file")
}
size := info.Size()
if size <= 1*1024 {
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, 0, err
func addsignats(dcls []ir.Node) {
// copy types from dcl list to signatset
for _, n := range dcls {
if n.Op() == ir.OTYPE {
reflectdata.NeedRuntimeType(n.Type())
}
if int64(len(data)) != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
var sym *obj.LSym
if readonly {
sym = stringsym(pos, string(data))
} else {
sym = slicedata(pos, string(data)).Sym.Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
copy(hash, sum[:])
}
return sym, size, nil
}
if size > 2e9 {
// ggloblsym takes an int32,
// and probably the rest of the toolchain
// can't handle such big symbols either.
// See golang.org/issue/9862.
return nil, 0, fmt.Errorf("file too large")
}
// File is too big to read and keep in memory.
// Compute hash if needed for read-only content hashing or if the caller wants it.
var sum []byte
if readonly || len(hash) > 0 {
h := sha256.New()
n, err := io.Copy(h, f)
if err != nil {
return nil, 0, err
}
if n != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
sum = h.Sum(nil)
copy(hash, sum)
}
var symdata *obj.LSym
if readonly {
symname := fmt.Sprintf(stringSymPattern, size, sum)
symdata = Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
// Note: AttrContentAddressable cannot be set here,
// because the content-addressable-handling code
// does not know about file symbols.
}
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
symdata = slicedata(pos, "").Sym.Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
}
return symdata, size, nil
}
var slicedataGen int
func slicedata(pos src.XPos, s string) *Node {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := localpkg.Lookup(symname)
symnode := newname(sym)
sym.Def = asTypesNode(symnode)
lsym := sym.Linksym()
off := dstringdata(lsym, 0, s, pos, "slice")
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
return symnode
}
func slicebytes(nam *Node, s string) {
if nam.Op != ONAME {
Fatalf("slicebytes %v", nam)
}
slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// Objects that are too large will cause the data section to overflow right away,
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.
if int64(len(t)) > 2e9 {
yyerrorl(pos, "%v with length %v is too big", what, len(t))
return 0
}
s.WriteString(Ctxt, int64(off), len(t), t)
return off + len(t)
}
func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
off = int(Rnd(int64(off), int64(Widthptr)))
s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff))
off += Widthptr
return off
}
func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteOff(Ctxt, int64(off), x, 0)
off += 4
return off
}
func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteWeakOff(Ctxt, int64(off), x, 0)
off += 4
return off
}
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
// arr must be an ONAME. slicesym does not modify n.
func slicesym(n, arr *Node, lencap int64) {
s := n.Sym.Linksym()
base := n.Xoffset
if arr.Op != ONAME {
Fatalf("slicesym non-name arr %v", arr)
}
s.WriteAddr(Ctxt, base, Widthptr, arr.Sym.Linksym(), arr.Xoffset)
s.WriteInt(Ctxt, base+sliceLenOffset, Widthptr, lencap)
s.WriteInt(Ctxt, base+sliceCapOffset, Widthptr, lencap)
}
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
func addrsym(n, a *Node) {
if n.Op != ONAME {
Fatalf("addrsym n op %v", n.Op)
}
if n.Sym == nil {
Fatalf("addrsym nil n sym")
}
if a.Op != ONAME {
Fatalf("addrsym a op %v", a.Op)
}
s := n.Sym.Linksym()
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset)
}
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
func pfuncsym(n, f *Node) {
if n.Op != ONAME {
Fatalf("pfuncsym n op %v", n.Op)
}
if n.Sym == nil {
Fatalf("pfuncsym nil n sym")
}
if f.Class() != PFUNC {
Fatalf("pfuncsym class not PFUNC %d", f.Class())
}
s := n.Sym.Linksym()
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset)
}
// litsym writes the static literal c to n.
// Neither n nor c is modified.
func litsym(n, c *Node, wid int) {
if n.Op != ONAME {
Fatalf("litsym n op %v", n.Op)
}
if c.Op != OLITERAL {
Fatalf("litsym c op %v", c.Op)
}
if n.Sym == nil {
Fatalf("litsym nil n sym")
}
s := n.Sym.Linksym()
switch u := c.Val().U.(type) {
case bool:
i := int64(obj.Bool2int(u))
s.WriteInt(Ctxt, n.Xoffset, wid, i)
case *Mpint:
s.WriteInt(Ctxt, n.Xoffset, wid, u.Int64())
case *Mpflt:
f := u.Float64()
switch n.Type.Etype {
case TFLOAT32:
s.WriteFloat32(Ctxt, n.Xoffset, float32(f))
case TFLOAT64:
s.WriteFloat64(Ctxt, n.Xoffset, f)
}
case *Mpcplx:
r := u.Real.Float64()
i := u.Imag.Float64()
switch n.Type.Etype {
case TCOMPLEX64:
s.WriteFloat32(Ctxt, n.Xoffset, float32(r))
s.WriteFloat32(Ctxt, n.Xoffset+4, float32(i))
case TCOMPLEX128:
s.WriteFloat64(Ctxt, n.Xoffset, r)
s.WriteFloat64(Ctxt, n.Xoffset+8, i)
}
case string:
symdata := stringsym(n.Pos, u)
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0)
s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(u)))
default:
Fatalf("litsym unhandled OLITERAL %v", c)
}
}

View file

@ -1,175 +0,0 @@
// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
package gc
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[OXXX-0]
_ = x[ONAME-1]
_ = x[ONONAME-2]
_ = x[OTYPE-3]
_ = x[OPACK-4]
_ = x[OLITERAL-5]
_ = x[OADD-6]
_ = x[OSUB-7]
_ = x[OOR-8]
_ = x[OXOR-9]
_ = x[OADDSTR-10]
_ = x[OADDR-11]
_ = x[OANDAND-12]
_ = x[OAPPEND-13]
_ = x[OBYTES2STR-14]
_ = x[OBYTES2STRTMP-15]
_ = x[ORUNES2STR-16]
_ = x[OSTR2BYTES-17]
_ = x[OSTR2BYTESTMP-18]
_ = x[OSTR2RUNES-19]
_ = x[OAS-20]
_ = x[OAS2-21]
_ = x[OAS2DOTTYPE-22]
_ = x[OAS2FUNC-23]
_ = x[OAS2MAPR-24]
_ = x[OAS2RECV-25]
_ = x[OASOP-26]
_ = x[OCALL-27]
_ = x[OCALLFUNC-28]
_ = x[OCALLMETH-29]
_ = x[OCALLINTER-30]
_ = x[OCALLPART-31]
_ = x[OCAP-32]
_ = x[OCLOSE-33]
_ = x[OCLOSURE-34]
_ = x[OCOMPLIT-35]
_ = x[OMAPLIT-36]
_ = x[OSTRUCTLIT-37]
_ = x[OARRAYLIT-38]
_ = x[OSLICELIT-39]
_ = x[OPTRLIT-40]
_ = x[OCONV-41]
_ = x[OCONVIFACE-42]
_ = x[OCONVNOP-43]
_ = x[OCOPY-44]
_ = x[ODCL-45]
_ = x[ODCLFUNC-46]
_ = x[ODCLFIELD-47]
_ = x[ODCLCONST-48]
_ = x[ODCLTYPE-49]
_ = x[ODELETE-50]
_ = x[ODOT-51]
_ = x[ODOTPTR-52]
_ = x[ODOTMETH-53]
_ = x[ODOTINTER-54]
_ = x[OXDOT-55]
_ = x[ODOTTYPE-56]
_ = x[ODOTTYPE2-57]
_ = x[OEQ-58]
_ = x[ONE-59]
_ = x[OLT-60]
_ = x[OLE-61]
_ = x[OGE-62]
_ = x[OGT-63]
_ = x[ODEREF-64]
_ = x[OINDEX-65]
_ = x[OINDEXMAP-66]
_ = x[OKEY-67]
_ = x[OSTRUCTKEY-68]
_ = x[OLEN-69]
_ = x[OMAKE-70]
_ = x[OMAKECHAN-71]
_ = x[OMAKEMAP-72]
_ = x[OMAKESLICE-73]
_ = x[OMAKESLICECOPY-74]
_ = x[OMUL-75]
_ = x[ODIV-76]
_ = x[OMOD-77]
_ = x[OLSH-78]
_ = x[ORSH-79]
_ = x[OAND-80]
_ = x[OANDNOT-81]
_ = x[ONEW-82]
_ = x[ONEWOBJ-83]
_ = x[ONOT-84]
_ = x[OBITNOT-85]
_ = x[OPLUS-86]
_ = x[ONEG-87]
_ = x[OOROR-88]
_ = x[OPANIC-89]
_ = x[OPRINT-90]
_ = x[OPRINTN-91]
_ = x[OPAREN-92]
_ = x[OSEND-93]
_ = x[OSLICE-94]
_ = x[OSLICEARR-95]
_ = x[OSLICESTR-96]
_ = x[OSLICE3-97]
_ = x[OSLICE3ARR-98]
_ = x[OSLICEHEADER-99]
_ = x[ORECOVER-100]
_ = x[ORECV-101]
_ = x[ORUNESTR-102]
_ = x[OSELRECV-103]
_ = x[OSELRECV2-104]
_ = x[OIOTA-105]
_ = x[OREAL-106]
_ = x[OIMAG-107]
_ = x[OCOMPLEX-108]
_ = x[OALIGNOF-109]
_ = x[OOFFSETOF-110]
_ = x[OSIZEOF-111]
_ = x[OBLOCK-112]
_ = x[OBREAK-113]
_ = x[OCASE-114]
_ = x[OCONTINUE-115]
_ = x[ODEFER-116]
_ = x[OEMPTY-117]
_ = x[OFALL-118]
_ = x[OFOR-119]
_ = x[OFORUNTIL-120]
_ = x[OGOTO-121]
_ = x[OIF-122]
_ = x[OLABEL-123]
_ = x[OGO-124]
_ = x[ORANGE-125]
_ = x[ORETURN-126]
_ = x[OSELECT-127]
_ = x[OSWITCH-128]
_ = x[OTYPESW-129]
_ = x[OTCHAN-130]
_ = x[OTMAP-131]
_ = x[OTSTRUCT-132]
_ = x[OTINTER-133]
_ = x[OTFUNC-134]
_ = x[OTARRAY-135]
_ = x[ODDD-136]
_ = x[OINLCALL-137]
_ = x[OEFACE-138]
_ = x[OITAB-139]
_ = x[OIDATA-140]
_ = x[OSPTR-141]
_ = x[OCLOSUREVAR-142]
_ = x[OCFUNC-143]
_ = x[OCHECKNIL-144]
_ = x[OVARDEF-145]
_ = x[OVARKILL-146]
_ = x[OVARLIVE-147]
_ = x[ORESULT-148]
_ = x[OINLMARK-149]
_ = x[ORETJMP-150]
_ = x[OGETG-151]
_ = x[OEND-152]
}
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 70, 82, 91, 100, 112, 121, 123, 126, 136, 143, 150, 157, 161, 165, 173, 181, 190, 198, 201, 206, 213, 220, 226, 235, 243, 251, 257, 261, 270, 277, 281, 284, 291, 299, 307, 314, 320, 323, 329, 336, 344, 348, 355, 363, 365, 367, 369, 371, 373, 375, 380, 385, 393, 396, 405, 408, 412, 420, 427, 436, 449, 452, 455, 458, 461, 464, 467, 473, 476, 482, 485, 491, 495, 498, 502, 507, 512, 518, 523, 527, 532, 540, 548, 554, 563, 574, 581, 585, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 652, 657, 661, 669, 674, 679, 683, 686, 694, 698, 700, 705, 707, 712, 718, 724, 730, 736, 741, 745, 752, 758, 763, 769, 772, 779, 784, 788, 793, 797, 807, 812, 820, 826, 833, 840, 846, 853, 859, 863, 866}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Op_name[_Op_index[i]:_Op_index[i+1]]
}

File diff suppressed because it is too large Load diff

View file

@ -1,798 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
"internal/race"
"math/rand"
"sort"
"sync"
"time"
)
// "Portable" code generation.
var (
nBackendWorkers int // number of concurrent backend workers, set by a compiler flag
compilequeue []*Node // functions waiting to be compiled
)
func emitptrargsmap(fn *Node) {
if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" {
return
}
lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap")
nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
bv := bvalloc(int32(nptr) * 2)
nbitmap := 1
if fn.Type.NumResults() > 0 {
nbitmap = 2
}
off := duint32(lsym, 0, uint32(nbitmap))
off = duint32(lsym, off, uint32(bv.n))
if fn.IsMethod() {
onebitwalktype1(fn.Type.Recvs(), 0, bv)
}
if fn.Type.NumParams() > 0 {
onebitwalktype1(fn.Type.Params(), 0, bv)
}
off = dbvec(lsym, off, bv)
if fn.Type.NumResults() > 0 {
onebitwalktype1(fn.Type.Results(), 0, bv)
off = dbvec(lsym, off, bv)
}
ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
}
// cmpstackvarlt reports whether the stack variable a sorts before b.
//
// Sort the list of stack variables. Autos after anything else,
// within autos, unused after used, within used, things with
// pointers first, zeroed things first, and then decreasing size.
// Because autos are laid out in decreasing addresses
// on the stack, pointers first, zeroed things first and decreasing size
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
func cmpstackvarlt(a, b *Node) bool {
if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
return b.Class() == PAUTO
}
if a.Class() != PAUTO {
return a.Xoffset < b.Xoffset
}
if a.Name.Used() != b.Name.Used() {
return a.Name.Used()
}
ap := a.Type.HasPointers()
bp := b.Type.HasPointers()
if ap != bp {
return ap
}
ap = a.Name.Needzero()
bp = b.Name.Needzero()
if ap != bp {
return ap
}
if a.Type.Width != b.Type.Width {
return a.Type.Width > b.Type.Width
}
return a.Sym.Name < b.Sym.Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
type byStackVar []*Node
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
fn := s.curfn.Func
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
if ln.Class() == PAUTO {
ln.Name.SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
ls.N.(*Node).Name.SetUsed(true)
}
}
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
if n, ok := v.Aux.(*Node); ok {
switch n.Class() {
case PPARAM, PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != nodfp {
n.Name.SetUsed(true)
}
case PAUTO:
n.Name.SetUsed(true)
}
}
if !scratchUsed {
scratchUsed = v.Op.UsesScratch()
}
}
}
if f.Config.NeedsFpScratch && scratchUsed {
s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
if n.Op != ONAME || n.Class() != PAUTO {
continue
}
if !n.Name.Used() {
fn.Dcl = fn.Dcl[:i]
break
}
dowidth(n.Type)
w := n.Type.Width
if w >= thearch.MAXWIDTH || w < 0 {
Fatalf("bad width")
}
if w == 0 && lastHasPtr {
// Pad between a pointer-containing object and a zero-sized object.
// This prevents a pointer to the zero-sized object from being interpreted
// as a pointer to the pointer-containing object (and causing it
// to be scanned when it shouldn't be). See issue 24993.
w = 1
}
s.stksize += w
s.stksize = Rnd(s.stksize, int64(n.Type.Align))
if n.Type.HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
lastHasPtr = false
}
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = Rnd(s.stksize, int64(Widthptr))
}
n.Xoffset = -s.stksize
}
s.stksize = Rnd(s.stksize, int64(Widthreg))
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
}
func funccompile(fn *Node) {
if Curfn != nil {
Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
}
if fn.Type == nil {
if nerrors == 0 {
Fatalf("funccompile missing type")
}
return
}
// assign parameter offsets
dowidth(fn.Type)
if fn.Nbody.Len() == 0 {
// Initialize ABI wrappers if necessary.
fn.Func.initLSym(false)
emitptrargsmap(fn)
return
}
dclcontext = PAUTO
Curfn = fn
compile(fn)
Curfn = nil
dclcontext = PEXTERN
}
func compile(fn *Node) {
saveerrors()
order(fn)
if nerrors != 0 {
return
}
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
fn.Func.initLSym(true)
walk(fn)
if nerrors != 0 {
return
}
if instrumenting {
instrument(fn)
}
// From this point, there should be no uses of Curfn. Enforce that.
Curfn = nil
if fn.funcname() == "_" {
// We don't need to generate code for this function, just report errors in its body.
// At this point we've generated any errors needed.
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
// See issue 29870.
return
}
// Make sure type syms are declared for all types that might
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT, PAUTO:
if livenessShouldTrack(n) && n.Name.Addrtaken() {
dtypesym(n.Type)
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.Func.lsym.Func().StackObjects == nil {
fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
}
}
}
}
if compilenow(fn) {
compileSSA(fn, 0)
} else {
compilequeue = append(compilequeue, fn)
}
}
// compilenow reports whether to compile immediately.
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
func compilenow(fn *Node) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
// compiling a function later on in the xtop list.
if fn.IsMethod() && isInlinableButNotInlined(fn) {
return false
}
return nBackendWorkers == 1 && Debug_compilelater == 0
}
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
func isInlinableButNotInlined(fn *Node) bool {
if fn.Func.Nname.Func.Inl == nil {
return false
}
if fn.Sym == nil {
return true
}
return !fn.Sym.Linksym().WasInlined()
}
const maxStackSize = 1 << 30
// compileSSA builds an SSA backend function,
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
func compileSSA(fn *Node, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
largeStackFramesMu.Unlock()
return
}
pp := newProgs(fn, worker)
defer pp.Free()
genssa(f, pp)
// Check frame size again.
// The check above included only the space needed for local variables.
// After genssa, the space needed includes local variables and the callee arg region.
// We must do this check prior to calling pp.Flush.
// If there are any oversized stack frames,
// the assembler may emit inscrutable complaints about invalid instructions.
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
}
func init() {
if race.Enabled {
rand.Seed(time.Now().UnixNano())
}
}
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
func compileFunctions() {
if len(compilequeue) != 0 {
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*Node, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
copy(compilequeue, tmp)
} else {
// Compile the longest functions first,
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
})
}
var wg sync.WaitGroup
Ctxt.InParallel = true
c := make(chan *Node, nBackendWorkers)
for i := 0; i < nBackendWorkers; i++ {
wg.Add(1)
go func(worker int) {
for fn := range c {
compileSSA(fn, worker)
}
wg.Done()
}(i)
}
for _, fn := range compilequeue {
c <- fn
}
close(c)
compilequeue = nil
wg.Wait()
Ctxt.InParallel = false
sizeCalculationDisabled = false
}
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*Node)
if fn.Func.Nname != nil {
if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
}
}
var apdecls []*Node
// Populate decls for fn.
for _, n := range fn.Func.Dcl {
if n.Op != ONAME { // might be OTYPE or OLITERAL
continue
}
switch n.Class() {
case PAUTO:
if !n.Name.Used() {
// Text == nil -> generating abstract function
if fnsym.Func().Text != nil {
Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
}
continue
}
case PPARAM, PPARAMOUT:
default:
continue
}
apdecls = append(apdecls, n)
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
// For each type referenced by the functions auto vars but not
// already referenced by a dwarf var, attach a dummy relocation to
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
for t, _ := range fnsym.Func().Autot {
typesyms = append(typesyms, t)
}
sort.Sort(obj.BySymName(typesyms))
for _, sym := range typesyms {
r := obj.Addrel(infosym)
r.Sym = sym
r.Type = objabi.R_USETYPE
}
fnsym.Func().Autot = nil
var varScopes []ScopeID
for _, decl := range decls {
pos := declPos(decl)
varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
}
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
var inlcalls dwarf.InlCalls
if genDwarfInline > 0 {
inlcalls = assembleInlines(fnsym, dwarfVars)
}
return scopes, inlcalls
}
func declPos(decl *Node) src.XPos {
if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
// It's not clear which position is correct for captured variables here:
// * decl.Pos is the wrong position for captured variables, in the inner
// function, but it is the right position in the outer function.
// * decl.Name.Defn is nil for captured variables that were arguments
// on the outer function, however the decl.Pos for those seems to be
// correct.
// * decl.Name.Defn is the "wrong" thing for variables declared in the
// header of a type switch, it's their position in the header, rather
// than the position of the case statement. In principle this is the
// right thing, but here we prefer the latter because it makes each
// instance of the header variable local to the lexical block of its
// case statement.
// This code is probably wrong for type switch variables that are also
// captured.
return decl.Name.Defn.Pos
}
return decl.Pos
}
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
var vars []*dwarf.Var
var decls []*Node
selected := make(map[*Node]bool)
for _, n := range apDecls {
if n.IsAutoTmp() {
continue
}
decls = append(decls, n)
vars = append(vars, createSimpleVar(fnsym, n))
selected[n] = true
}
return decls, vars, selected
}
func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
var abbrev int
offs := n.Xoffset
switch n.Class() {
case PAUTO:
abbrev = dwarf.DW_ABRV_AUTO
if Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
offs -= int64(Widthptr)
}
case PPARAM, PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM
offs += Ctxt.FixedFrameSize()
default:
Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
}
typename := dwarf.InfoPrefix + typesymname(n.Type)
delete(fnsym.Func().Autot, ngotype(n).Linksym())
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
declpos := Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: n.Class() == PPARAMOUT,
IsInlFormal: n.Name.InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
}
}
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
debugInfo := fn.DebugInfo
// Produce a DWARF variable entry for each user variable.
var decls []*Node
var vars []*dwarf.Var
ssaVars := make(map[*Node]bool)
for varID, dvar := range debugInfo.Vars {
n := dvar.(*Node)
ssaVars[n] = true
for _, slot := range debugInfo.VarSlots[varID] {
ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
decls = append(decls, n)
vars = append(vars, dvar)
}
}
return decls, vars, ssaVars
}
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
var decls []*Node
var selected map[*Node]bool
if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(fnsym, apDecls)
}
dcl := apDecls
if fnsym.WasInlined() {
dcl = preInliningDcls(fnsym)
}
// If optimization is enabled, the list above will typically be
// missing some of the original pre-optimization variables in the
// function (they may have been promoted to registers, folded into
// constants, dead-coded away, etc). Input arguments not eligible
// for SSA optimization are also missing. Here we add back in entries
// for selected missing vars. Note that the recipe below creates a
// conservative location. The idea here is that we want to
// communicate to the user that "yes, there is a variable named X
// in this function, but no, I don't have enough information to
// reliably report its contents."
// For non-SSA-able arguments, however, the correct information
// is known -- they have a single home on the stack.
for _, n := range dcl {
if _, found := selected[n]; found {
continue
}
c := n.Sym.Name[0]
if c == '.' || n.Type.IsUntyped() {
continue
}
if n.Class() == PPARAM && !canSSAType(n.Type) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
// with VARDEF, which creates location lists.
// Args not of SSA-able type are treated here; they
// are homed on the stack in a single place for the
// entire call.
vars = append(vars, createSimpleVar(fnsym, n))
decls = append(decls, n)
continue
}
typename := dwarf.InfoPrefix + typesymname(n.Type)
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
isReturnValue := (n.Class() == PPARAMOUT)
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
} else if n.Class() == PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
// signature in the abstract function DIE, but leaves a
// misleading location for the param (we want pointer-to-heap
// and not stack).
// TODO(thanm): generate a better location expression
stackcopy := n.Name.Param.Stackcopy
if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
isReturnValue = (stackcopy.Class() == PPARAMOUT)
}
}
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := Ctxt.InnermostPos(n.Pos)
vars = append(vars, &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
StackOffset: int32(n.Xoffset),
Type: Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
return decls, vars
}
// Given a function that was inlined at some point during the
// compilation, return a sorted list of nodes corresponding to the
// autos/locals in that function prior to inlining. If this is a
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
func preInliningDcls(fnsym *obj.LSym) []*Node {
fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
var rdcl []*Node
for _, n := range fn.Func.Inl.Dcl {
c := n.Sym.Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
continue
}
rdcl = append(rdcl, n)
}
return rdcl
}
// stackOffset returns the stack location of a LocalSlot relative to the
// stack pointer, suitable for use in a DWARF location entry. This has nothing
// to do with its offset in the user variable.
func stackOffset(slot ssa.LocalSlot) int32 {
n := slot.N.(*Node)
var base int64
switch n.Class() {
case PAUTO:
if Ctxt.FixedFrameSize() == 0 {
base -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
base -= int64(Widthptr)
}
case PPARAM, PPARAMOUT:
base += Ctxt.FixedFrameSize()
}
return int32(base + n.Xoffset + slot.Off)
}
// createComplexVar builds a single DWARF variable entry and location list.
func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
debug := fn.DebugInfo
n := debug.Vars[varID].(*Node)
var abbrev int
switch n.Class() {
case PAUTO:
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
case PPARAM, PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
default:
return nil
}
gotype := ngotype(n).Linksym()
delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := Ctxt.InnermostPos(n.Pos)
dvar := &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: n.Class() == PPARAMOUT,
IsInlFormal: n.Name.InlFormal(),
Abbrev: abbrev,
Type: Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
// location, but it's not obvious how to do better.
StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
}
list := debug.LocationLists[varID]
if len(list) != 0 {
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
}
}
return dvar
}
// fieldtrack adds R_USEFIELD relocations to fnsym to record any
// struct fields that it used.
func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
if fnsym == nil {
return
}
if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
return
}
trackSyms := make([]*types.Sym, 0, len(tracked))
for sym := range tracked {
trackSyms = append(trackSyms, sym)
}
sort.Sort(symByName(trackSyms))
for _, sym := range trackSyms {
r := obj.Addrel(fnsym)
r.Sym = sym.Linksym()
r.Type = objabi.R_USEFIELD
}
}
type symByName []*types.Sym
func (a symByName) Len() int { return len(a) }
func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }

View file

@ -1,196 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"reflect"
"sort"
"testing"
)
func typeWithoutPointers() *types.Type {
t := types.New(TSTRUCT)
f := &types.Field{Type: types.New(TINT)}
t.SetFields([]*types.Field{f})
return t
}
func typeWithPointers() *types.Type {
t := types.New(TSTRUCT)
f := &types.Field{Type: types.NewPtr(types.New(TINT))}
t.SetFields([]*types.Field{f})
return t
}
func markUsed(n *Node) *Node {
n.Name.SetUsed(true)
return n
}
func markNeedZero(n *Node) *Node {
n.Name.SetNeedzero(true)
return n
}
func nodeWithClass(n Node, c Class) *Node {
n.SetClass(c)
n.Name = new(Name)
return &n
}
// Test all code paths for cmpstackvarlt.
func TestCmpstackvar(t *testing.T) {
testdata := []struct {
a, b *Node
lt bool
}{
{
nodeWithClass(Node{}, PAUTO),
nodeWithClass(Node{}, PFUNC),
false,
},
{
nodeWithClass(Node{}, PFUNC),
nodeWithClass(Node{}, PAUTO),
true,
},
{
nodeWithClass(Node{Xoffset: 0}, PFUNC),
nodeWithClass(Node{Xoffset: 10}, PFUNC),
true,
},
{
nodeWithClass(Node{Xoffset: 20}, PFUNC),
nodeWithClass(Node{Xoffset: 10}, PFUNC),
false,
},
{
nodeWithClass(Node{Xoffset: 10}, PFUNC),
nodeWithClass(Node{Xoffset: 10}, PFUNC),
false,
},
{
nodeWithClass(Node{Xoffset: 10}, PPARAM),
nodeWithClass(Node{Xoffset: 20}, PPARAMOUT),
true,
},
{
nodeWithClass(Node{Xoffset: 10}, PPARAMOUT),
nodeWithClass(Node{Xoffset: 20}, PPARAM),
true,
},
{
markUsed(nodeWithClass(Node{}, PAUTO)),
nodeWithClass(Node{}, PAUTO),
true,
},
{
nodeWithClass(Node{}, PAUTO),
markUsed(nodeWithClass(Node{}, PAUTO)),
false,
},
{
nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
false,
},
{
nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
true,
},
{
markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
true,
},
{
nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
false,
},
{
nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
false,
},
{
nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
true,
},
{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
true,
},
{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
false,
},
{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
false,
},
}
for _, d := range testdata {
got := cmpstackvarlt(d.a, d.b)
if got != d.lt {
t.Errorf("want %#v < %#v", d.a, d.b)
}
// If we expect a < b to be true, check that b < a is false.
if d.lt && cmpstackvarlt(d.b, d.a) {
t.Errorf("unexpected %#v < %#v", d.b, d.a)
}
}
}
func TestStackvarSort(t *testing.T) {
inp := []*Node{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
}
want := []*Node{
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
}
sort.Sort(byStackVar(inp))
if !reflect.DeepEqual(want, inp) {
t.Error("sort failed")
for i := range inp {
g := inp[i]
w := want[i]
eq := reflect.DeepEqual(w, g)
if !eq {
t.Log(i, w, g)
}
}
}
}

View file

@ -1,628 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/sys"
"unicode/utf8"
)
// range
func typecheckrange(n *Node) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
// 1. typecheck produced values,
// this part can declare new vars and so it must be typechecked before body,
// because body can contain a closure that captures the vars.
// 2. decldepth++ to denote loop body.
// 3. typecheck body.
// 4. decldepth--.
typecheckrangeExpr(n)
// second half of dance, the first half being typecheckrangeExpr
n.SetTypecheck(1)
ls := n.List.Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
decldepth++
typecheckslice(n.Nbody.Slice(), ctxStmt)
decldepth--
}
func typecheckrangeExpr(n *Node) {
n.Right = typecheck(n.Right, ctxExpr)
t := n.Right.Type
if t == nil {
return
}
// delicate little dance. see typecheckas2
ls := n.List.Slice()
for i1, n1 := range ls {
if n1.Name == nil || n1.Name.Defn != n {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
if t.IsPtr() && t.Elem().IsArray() {
t = t.Elem()
}
n.Type = t
var t1, t2 *types.Type
toomany := false
switch t.Etype {
default:
yyerrorl(n.Pos, "cannot range over %L", n.Right)
return
case TARRAY, TSLICE:
t1 = types.Types[TINT]
t2 = t.Elem()
case TMAP:
t1 = t.Key()
t2 = t.Elem()
case TCHAN:
if !t.ChanDir().CanRecv() {
yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
return
}
t1 = t.Elem()
t2 = nil
if n.List.Len() == 2 {
toomany = true
}
case TSTRING:
t1 = types.Types[TINT]
t2 = types.Runetype
}
if n.List.Len() > 2 || toomany {
yyerrorl(n.Pos, "too many variables in range")
}
var v1, v2 *Node
if n.List.Len() != 0 {
v1 = n.List.First()
}
if n.List.Len() > 1 {
v2 = n.List.Second()
}
// this is not only an optimization but also a requirement in the spec.
// "if the second iteration variable is the blank identifier, the range
// clause is equivalent to the same clause with only the first variable
// present."
if v2.isBlank() {
if v1 != nil {
n.List.Set1(v1)
}
v2 = nil
}
if v1 != nil {
if v1.Name != nil && v1.Name.Defn == n {
v1.Type = t1
} else if v1.Type != nil {
if op, why := assignop(t1, v1.Type); op == OXXX {
yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
}
}
checkassign(n, v1)
}
if v2 != nil {
if v2.Name != nil && v2.Name.Defn == n {
v2.Type = t2
} else if v2.Type != nil {
if op, why := assignop(t2, v2.Type); op == OXXX {
yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
}
}
checkassign(n, v2)
}
}
func cheapComputableIndex(width int64) bool {
switch thearch.LinkArch.Family {
// MIPS does not have R+R addressing
// Arm64 may lack ability to generate this code in our assembler,
// but the architecture supports it.
case sys.PPC64, sys.S390X:
return width == 1
case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
switch width {
case 1, 2, 4, 8:
return true
}
}
return false
}
// walkrange transforms various forms of ORANGE into
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
func walkrange(n *Node) *Node {
if isMapClear(n) {
m := n.Right
lno := setlineno(m)
n = mapClear(m)
lineno = lno
return n
}
// variable name conventions:
// ohv1, hv1, hv2: hidden (old) val 1, 2
// ha, hit: hidden aggregate, iterator
// hn, hp: hidden len, pointer
// hb: hidden bool
// a, v1, v2: not hidden aggregate, val 1, 2
t := n.Type
a := n.Right
lno := setlineno(a)
n.Right = nil
var v1, v2 *Node
l := n.List.Len()
if l > 0 {
v1 = n.List.First()
}
if l > 1 {
v2 = n.List.Second()
}
if v2.isBlank() {
v2 = nil
}
if v1.isBlank() && v2 == nil {
v1 = nil
}
if v1 == nil && v2 != nil {
Fatalf("walkrange: v2 != nil while v1 == nil")
}
// n.List has no meaning anymore, clear it
// to avoid erroneous processing by racewalk.
n.List.Set(nil)
var ifGuard *Node
translatedLoopOp := OFOR
var body []*Node
var init []*Node
switch t.Etype {
default:
Fatalf("walkrange")
case TARRAY, TSLICE:
if arrayClear(n, v1, v2, a) {
lineno = lno
return n
}
// order.stmt arranged for a copy of the array/slice variable if needed.
ha := a
hv1 := temp(types.Types[TINT])
hn := temp(types.Types[TINT])
init = append(init, nod(OAS, hv1, nil))
init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
n.Left = nod(OLT, hv1, hn)
n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
// for range ha { body }
if v1 == nil {
break
}
// for v1 := range ha { body }
if v2 == nil {
body = []*Node{nod(OAS, v1, hv1)}
break
}
// for v1, v2 := range ha { body }
if cheapComputableIndex(n.Type.Elem().Width) {
// v1, v2 = hv1, ha[hv1]
tmp := nod(OINDEX, ha, hv1)
tmp.SetBounded(true)
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(hv1, tmp)
body = []*Node{a}
break
}
// TODO(austin): OFORUNTIL is a strange beast, but is
// necessary for expressing the control flow we need
// while also making "break" and "continue" work. It
// would be nice to just lower ORANGE during SSA, but
// racewalk needs to see many of the operations
// involved in ORANGE's implementation. If racewalk
// moves into SSA, consider moving ORANGE into SSA and
// eliminating OFORUNTIL.
// TODO(austin): OFORUNTIL inhibits bounds-check
// elimination on the index variable (see #20711).
// Enhance the prove pass to understand this.
ifGuard = nod(OIF, nil, nil)
ifGuard.Left = nod(OLT, hv1, hn)
translatedLoopOp = OFORUNTIL
hp := temp(types.NewPtr(n.Type.Elem()))
tmp := nod(OINDEX, ha, nodintconst(0))
tmp.SetBounded(true)
init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(hv1, nod(ODEREF, hp, nil))
body = append(body, a)
// Advance pointer as part of the late increment.
//
// This runs *after* the condition check, so we know
// advancing the pointer is safe and won't go past the
// end of the allocation.
a = nod(OAS, hp, addptr(hp, t.Elem().Width))
a = typecheck(a, ctxStmt)
n.List.Set1(a)
case TMAP:
// order.stmt allocated the iterator for us.
// we only use a once, so no copy needed.
ha := a
hit := prealloc[n]
th := hit.Type
n.Left = nil
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
elemsym := th.Field(1).Sym // ditto
fn := syslook("mapiterinit")
fn = substArgTypes(fn, t.Key(), t.Elem(), th)
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil)))
n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil())
fn = syslook("mapiternext")
fn = substArgTypes(fn, th)
n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil))
key := nodSym(ODOT, hit, keysym)
key = nod(ODEREF, key, nil)
if v1 == nil {
body = nil
} else if v2 == nil {
body = []*Node{nod(OAS, v1, key)}
} else {
elem := nodSym(ODOT, hit, elemsym)
elem = nod(ODEREF, elem, nil)
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(key, elem)
body = []*Node{a}
}
case TCHAN:
// order.stmt arranged for a copy of the channel variable.
ha := a
n.Left = nil
hv1 := temp(t.Elem())
hv1.SetTypecheck(1)
if t.Elem().HasPointers() {
init = append(init, nod(OAS, hv1, nil))
}
hb := temp(types.Types[TBOOL])
n.Left = nod(ONE, hb, nodbool(false))
a := nod(OAS2RECV, nil, nil)
a.SetTypecheck(1)
a.List.Set2(hv1, hb)
a.Right = nod(ORECV, ha, nil)
n.Left.Ninit.Set1(a)
if v1 == nil {
body = nil
} else {
body = []*Node{nod(OAS, v1, hv1)}
}
// Zero hv1. This prevents hv1 from being the sole, inaccessible
// reference to an otherwise GC-able value during the next channel receive.
// See issue 15281.
body = append(body, nod(OAS, hv1, nil))
case TSTRING:
// Transform string range statements like "for v1, v2 = range a" into
//
// ha := a
// for hv1 := 0; hv1 < len(ha); {
// hv1t := hv1
// hv2 := rune(ha[hv1])
// if hv2 < utf8.RuneSelf {
// hv1++
// } else {
// hv2, hv1 = decoderune(ha, hv1)
// }
// v1, v2 = hv1t, hv2
// // original body
// }
// order.stmt arranged for a copy of the string variable.
ha := a
hv1 := temp(types.Types[TINT])
hv1t := temp(types.Types[TINT])
hv2 := temp(types.Runetype)
// hv1 := 0
init = append(init, nod(OAS, hv1, nil))
// hv1 < len(ha)
n.Left = nod(OLT, hv1, nod(OLEN, ha, nil))
if v1 != nil {
// hv1t = hv1
body = append(body, nod(OAS, hv1t, hv1))
}
// hv2 := rune(ha[hv1])
nind := nod(OINDEX, ha, hv1)
nind.SetBounded(true)
body = append(body, nod(OAS, hv2, conv(nind, types.Runetype)))
// if hv2 < utf8.RuneSelf
nif := nod(OIF, nil, nil)
nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf))
// hv1++
nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))))
// } else {
eif := nod(OAS2, nil, nil)
nif.Rlist.Set1(eif)
// hv2, hv1 = decoderune(ha, hv1)
eif.List.Set2(hv2, hv1)
fn := syslook("decoderune")
eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
body = append(body, nif)
if v1 != nil {
if v2 != nil {
// v1, v2 = hv1t, hv2
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(hv1t, hv2)
body = append(body, a)
} else {
// v1 = hv1t
body = append(body, nod(OAS, v1, hv1t))
}
}
}
n.Op = translatedLoopOp
typecheckslice(init, ctxStmt)
if ifGuard != nil {
ifGuard.Ninit.Append(init...)
ifGuard = typecheck(ifGuard, ctxStmt)
} else {
n.Ninit.Append(init...)
}
typecheckslice(n.Left.Ninit.Slice(), ctxStmt)
n.Left = typecheck(n.Left, ctxExpr)
n.Left = defaultlit(n.Left, nil)
n.Right = typecheck(n.Right, ctxStmt)
typecheckslice(body, ctxStmt)
n.Nbody.Prepend(body...)
if ifGuard != nil {
ifGuard.Nbody.Set1(n)
n = ifGuard
}
n = walkstmt(n)
lineno = lno
return n
}
// isMapClear checks if n is of the form:
//
// for k := range m {
// delete(m, k)
// }
//
// where == for keys of map m is reflexive.
func isMapClear(n *Node) bool {
if Debug.N != 0 || instrumenting {
return false
}
if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 {
return false
}
k := n.List.First()
if k == nil || k.isBlank() {
return false
}
// Require k to be a new variable name.
if k.Name == nil || k.Name.Defn != n {
return false
}
if n.Nbody.Len() != 1 {
return false
}
stmt := n.Nbody.First() // only stmt in body
if stmt == nil || stmt.Op != ODELETE {
return false
}
m := n.Right
if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
return false
}
// Keys where equality is not reflexive can not be deleted from maps.
if !isreflexive(m.Type.Key()) {
return false
}
return true
}
// mapClear constructs a call to runtime.mapclear for the map m.
func mapClear(m *Node) *Node {
t := m.Type
// instantiate mapclear(typ *type, hmap map[any]any)
fn := syslook("mapclear")
fn = substArgTypes(fn, t.Key(), t.Elem())
n := mkcall1(fn, nil, nil, typename(t), m)
n = typecheck(n, ctxStmt)
n = walkstmt(n)
return n
}
// Lower n into runtime·memclr if possible, for
// fast zeroing of slices and arrays (issue 5373).
// Look for instances of
//
// for i := range a {
// a[i] = zero
// }
//
// in which the evaluation of a is side-effect-free.
//
// Parameters are as in walkrange: "for v1, v2 = range a".
func arrayClear(n, v1, v2, a *Node) bool {
if Debug.N != 0 || instrumenting {
return false
}
if v1 == nil || v2 != nil {
return false
}
if n.Nbody.Len() != 1 || n.Nbody.First() == nil {
return false
}
stmt := n.Nbody.First() // only stmt in body
if stmt.Op != OAS || stmt.Left.Op != OINDEX {
return false
}
if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
return false
}
elemsize := n.Type.Elem().Width
if elemsize <= 0 || !isZero(stmt.Right) {
return false
}
// Convert to
// if len(a) != 0 {
// hp = &a[0]
// hn = len(a)*sizeof(elem(a))
// memclr{NoHeap,Has}Pointers(hp, hn)
// i = len(a) - 1
// }
n.Op = OIF
n.Nbody.Set(nil)
n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
// hp = &a[0]
hp := temp(types.Types[TUNSAFEPTR])
tmp := nod(OINDEX, a, nodintconst(0))
tmp.SetBounded(true)
tmp = nod(OADDR, tmp, nil)
tmp = convnop(tmp, types.Types[TUNSAFEPTR])
n.Nbody.Append(nod(OAS, hp, tmp))
// hn = len(a) * sizeof(elem(a))
hn := temp(types.Types[TUINTPTR])
tmp = nod(OLEN, a, nil)
tmp = nod(OMUL, tmp, nodintconst(elemsize))
tmp = conv(tmp, types.Types[TUINTPTR])
n.Nbody.Append(nod(OAS, hn, tmp))
var fn *Node
if a.Type.Elem().HasPointers() {
// memclrHasPointers(hp, hn)
Curfn.Func.setWBPos(stmt.Pos)
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
} else {
// memclrNoHeapPointers(hp, hn)
fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
}
n.Nbody.Append(fn)
// i = len(a) - 1
v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1)))
n.Nbody.Append(v1)
n.Left = typecheck(n.Left, ctxExpr)
n.Left = defaultlit(n.Left, nil)
typecheckslice(n.Nbody.Slice(), ctxStmt)
n = walkstmt(n)
return true
}
// addptr returns (*T)(uintptr(p) + n).
func addptr(p *Node, n int64) *Node {
t := p.Type
p = nod(OCONVNOP, p, nil)
p.Type = types.Types[TUINTPTR]
p = nod(OADD, p, nodintconst(n))
p = nod(OCONVNOP, p, nil)
p.Type = t
return p
}

View file

@ -1,387 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "cmd/compile/internal/types"
// select
func typecheckselect(sel *Node) {
var def *Node
lno := setlineno(sel)
typecheckslice(sel.Ninit.Slice(), ctxStmt)
for _, ncase := range sel.List.Slice() {
if ncase.Op != OCASE {
setlineno(ncase)
Fatalf("typecheckselect %v", ncase.Op)
}
if ncase.List.Len() == 0 {
// default
if def != nil {
yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line())
} else {
def = ncase
}
} else if ncase.List.Len() > 1 {
yyerrorl(ncase.Pos, "select cases cannot be lists")
} else {
ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt))
n := ncase.List.First()
ncase.Left = n
ncase.List.Set(nil)
switch n.Op {
default:
pos := n.Pos
if n.Op == ONAME {
// We don't have the right position for ONAME nodes (see #15459 and
// others). Using ncase.Pos for now as it will provide the correct
// line number (assuming the expression follows the "case" keyword
// on the same line). This matches the approach before 1.10.
pos = ncase.Pos
}
yyerrorl(pos, "select case must be receive, send or assign recv")
// convert x = <-c into OSELRECV(x, <-c).
// remove implicit conversions; the eventual assignment
// will reintroduce them.
case OAS:
if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit() {
n.Right = n.Right.Left
}
if n.Right.Op != ORECV {
yyerrorl(n.Pos, "select assignment must have receive on right hand side")
break
}
n.Op = OSELRECV
// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
case OAS2RECV:
if n.Right.Op != ORECV {
yyerrorl(n.Pos, "select assignment must have receive on right hand side")
break
}
n.Op = OSELRECV2
n.Left = n.List.First()
n.List.Set1(n.List.Second())
// convert <-c into OSELRECV(N, <-c)
case ORECV:
n = nodl(n.Pos, OSELRECV, nil, n)
n.SetTypecheck(1)
ncase.Left = n
case OSEND:
break
}
}
typecheckslice(ncase.Nbody.Slice(), ctxStmt)
}
lineno = lno
}
func walkselect(sel *Node) {
lno := setlineno(sel)
if sel.Nbody.Len() != 0 {
Fatalf("double walkselect")
}
init := sel.Ninit.Slice()
sel.Ninit.Set(nil)
init = append(init, walkselectcases(&sel.List)...)
sel.List.Set(nil)
sel.Nbody.Set(init)
walkstmtlist(sel.Nbody.Slice())
lineno = lno
}
func walkselectcases(cases *Nodes) []*Node {
ncas := cases.Len()
sellineno := lineno
// optimization: zero-case select
if ncas == 0 {
return []*Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
if ncas == 1 {
cas := cases.First()
setlineno(cas)
l := cas.Ninit.Slice()
if cas.Left != nil { // not default:
n := cas.Left
l = append(l, n.Ninit.Slice()...)
n.Ninit.Set(nil)
switch n.Op {
default:
Fatalf("select %v", n.Op)
case OSEND:
// already ok
case OSELRECV, OSELRECV2:
if n.Op == OSELRECV || n.List.Len() == 0 {
if n.Left == nil {
n = n.Right
} else {
n.Op = OAS
}
break
}
if n.Left == nil {
nblank = typecheck(nblank, ctxExpr|ctxAssign)
n.Left = nblank
}
n.Op = OAS2
n.List.Prepend(n.Left)
n.Rlist.Set1(n.Right)
n.Right = nil
n.Left = nil
n.SetTypecheck(0)
n = typecheck(n, ctxStmt)
}
l = append(l, n)
}
l = append(l, cas.Nbody.Slice()...)
l = append(l, nod(OBREAK, nil, nil))
return l
}
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
var dflt *Node
for _, cas := range cases.Slice() {
setlineno(cas)
n := cas.Left
if n == nil {
dflt = cas
continue
}
switch n.Op {
case OSEND:
n.Right = nod(OADDR, n.Right, nil)
n.Right = typecheck(n.Right, ctxExpr)
case OSELRECV, OSELRECV2:
if n.Op == OSELRECV2 && n.List.Len() == 0 {
n.Op = OSELRECV
}
if n.Left != nil {
n.Left = nod(OADDR, n.Left, nil)
n.Left = typecheck(n.Left, ctxExpr)
}
}
}
// optimization: two-case select but one is default: single non-blocking op.
if ncas == 2 && dflt != nil {
cas := cases.First()
if cas == dflt {
cas = cases.Second()
}
n := cas.Left
setlineno(n)
r := nod(OIF, nil, nil)
r.Ninit.Set(cas.Ninit.Slice())
switch n.Op {
default:
Fatalf("select %v", n.Op)
case OSEND:
// if selectnbsend(c, v) { body } else { default body }
ch := n.Left
r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[TBOOL], &r.Ninit, ch, n.Right)
case OSELRECV:
// if selectnbrecv(&v, c) { body } else { default body }
ch := n.Right.Left
elem := n.Left
if elem == nil {
elem = nodnil()
}
r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, ch)
case OSELRECV2:
// if selectnbrecv2(&v, &received, c) { body } else { default body }
ch := n.Right.Left
elem := n.Left
if elem == nil {
elem = nodnil()
}
receivedp := nod(OADDR, n.List.First(), nil)
receivedp = typecheck(receivedp, ctxExpr)
r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, receivedp, ch)
}
r.Left = typecheck(r.Left, ctxExpr)
r.Nbody.Set(cas.Nbody.Slice())
r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
return []*Node{r, nod(OBREAK, nil, nil)}
}
if dflt != nil {
ncas--
}
casorder := make([]*Node, ncas)
nsends, nrecvs := 0, 0
var init []*Node
// generate sel-struct
lineno = sellineno
selv := temp(types.NewArray(scasetype(), int64(ncas)))
r := nod(OAS, selv, nil)
r = typecheck(r, ctxStmt)
init = append(init, r)
// No initialization for order; runtime.selectgo is responsible for that.
order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas)))
var pc0, pcs *Node
if flag_race {
pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas)))
pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
} else {
pc0 = nodnil()
}
// register cases
for _, cas := range cases.Slice() {
setlineno(cas)
init = append(init, cas.Ninit.Slice()...)
cas.Ninit.Set(nil)
n := cas.Left
if n == nil { // default:
continue
}
var i int
var c, elem *Node
switch n.Op {
default:
Fatalf("select %v", n.Op)
case OSEND:
i = nsends
nsends++
c = n.Left
elem = n.Right
case OSELRECV, OSELRECV2:
nrecvs++
i = ncas - nrecvs
c = n.Right.Left
elem = n.Left
}
casorder[i] = cas
setField := func(f string, val *Node) {
r := nod(OAS, nodSym(ODOT, nod(OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
r = typecheck(r, ctxStmt)
init = append(init, r)
}
c = convnop(c, types.Types[TUNSAFEPTR])
setField("c", c)
if elem != nil {
elem = convnop(elem, types.Types[TUNSAFEPTR])
setField("elem", elem)
}
// TODO(mdempsky): There should be a cleaner way to
// handle this.
if flag_race {
r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil))
init = append(init, r)
}
}
if nsends+nrecvs != ncas {
Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
}
// run the select
lineno = sellineno
chosen := temp(types.Types[TINT])
recvOK := temp(types.Types[TBOOL])
r = nod(OAS2, nil, nil)
r.List.Set2(chosen, recvOK)
fn := syslook("selectgo")
r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
r = typecheck(r, ctxStmt)
init = append(init, r)
// selv and order are no longer alive after selectgo.
init = append(init, nod(OVARKILL, selv, nil))
init = append(init, nod(OVARKILL, order, nil))
if flag_race {
init = append(init, nod(OVARKILL, pcs, nil))
}
// dispatch cases
dispatch := func(cond, cas *Node) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
r := nod(OIF, cond, nil)
if n := cas.Left; n != nil && n.Op == OSELRECV2 {
x := nod(OAS, n.List.First(), recvOK)
x = typecheck(x, ctxStmt)
r.Nbody.Append(x)
}
r.Nbody.AppendNodes(&cas.Nbody)
r.Nbody.Append(nod(OBREAK, nil, nil))
init = append(init, r)
}
if dflt != nil {
setlineno(dflt)
dispatch(nod(OLT, chosen, nodintconst(0)), dflt)
}
for i, cas := range casorder {
setlineno(cas)
dispatch(nod(OEQ, chosen, nodintconst(int64(i))), cas)
}
return init
}
// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
func bytePtrToIndex(n *Node, i int64) *Node {
s := nod(OADDR, nod(OINDEX, n, nodintconst(i)), nil)
t := types.NewPtr(types.Types[TUINT8])
return convnop(s, t)
}
var scase *types.Type
// Keep in sync with src/runtime/select.go.
func scasetype() *types.Type {
if scase == nil {
scase = tostruct([]*Node{
namedfield("c", types.Types[TUNSAFEPTR]),
namedfield("elem", types.Types[TUNSAFEPTR]),
})
scase.SetNoalg(true)
}
return scase
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,756 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"sort"
)
// typecheckswitch typechecks a switch statement.
func typecheckswitch(n *Node) {
typecheckslice(n.Ninit.Slice(), ctxStmt)
if n.Left != nil && n.Left.Op == OTYPESW {
typecheckTypeSwitch(n)
} else {
typecheckExprSwitch(n)
}
}
func typecheckTypeSwitch(n *Node) {
n.Left.Right = typecheck(n.Left.Right, ctxExpr)
t := n.Left.Right.Type
if t != nil && !t.IsInterface() {
yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
t = nil
}
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we won't
// notice that it went unused.
if v := n.Left.Left; v != nil && !v.isBlank() && n.List.Len() == 0 {
yyerrorl(v.Pos, "%v declared but not used", v.Sym)
}
var defCase, nilCase *Node
var ts typeSet
for _, ncase := range n.List.Slice() {
ls := ncase.List.Slice()
if len(ls) == 0 { // default:
if defCase != nil {
yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
} else {
defCase = ncase
}
}
for i := range ls {
ls[i] = typecheck(ls[i], ctxExpr|ctxType)
n1 := ls[i]
if t == nil || n1.Type == nil {
continue
}
var missing, have *types.Field
var ptr int
switch {
case n1.isNil(): // case nil:
if nilCase != nil {
yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line())
} else {
nilCase = ncase
}
case n1.Op != OTYPE:
yyerrorl(ncase.Pos, "%L is not a type", n1)
case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke():
if have != nil && !have.Broke() {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
} else {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
}
}
if n1.Op == OTYPE {
ts.add(ncase.Pos, n1.Type)
}
}
if ncase.Rlist.Len() != 0 {
// Assign the clause variable's type.
vt := t
if len(ls) == 1 {
if ls[0].Op == OTYPE {
vt = ls[0].Type
} else if ls[0].Op != OLITERAL { // TODO(mdempsky): Should be !ls[0].isNil()
// Invalid single-type case;
// mark variable as broken.
vt = nil
}
}
// TODO(mdempsky): It should be possible to
// still typecheck the case body.
if vt == nil {
continue
}
nvar := ncase.Rlist.First()
nvar.Type = vt
nvar = typecheck(nvar, ctxExpr|ctxAssign)
ncase.Rlist.SetFirst(nvar)
}
typecheckslice(ncase.Nbody.Slice(), ctxStmt)
}
}
type typeSet struct {
m map[string][]typeSetEntry
}
type typeSetEntry struct {
pos src.XPos
typ *types.Type
}
func (s *typeSet) add(pos src.XPos, typ *types.Type) {
if s.m == nil {
s.m = make(map[string][]typeSetEntry)
}
// LongString does not uniquely identify types, so we need to
// disambiguate collisions with types.Identical.
// TODO(mdempsky): Add a method that *is* unique.
ls := typ.LongString()
prevs := s.m[ls]
for _, prev := range prevs {
if types.Identical(typ, prev.typ) {
yyerrorl(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, linestr(prev.pos))
return
}
}
s.m[ls] = append(prevs, typeSetEntry{pos, typ})
}
func typecheckExprSwitch(n *Node) {
t := types.Types[TBOOL]
if n.Left != nil {
n.Left = typecheck(n.Left, ctxExpr)
n.Left = defaultlit(n.Left, nil)
t = n.Left.Type
}
var nilonly string
if t != nil {
switch {
case t.IsMap():
nilonly = "map"
case t.Etype == TFUNC:
nilonly = "func"
case t.IsSlice():
nilonly = "slice"
case !IsComparable(t):
if t.IsStruct() {
yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type)
} else {
yyerrorl(n.Pos, "cannot switch on %L", n.Left)
}
t = nil
}
}
var defCase *Node
var cs constSet
for _, ncase := range n.List.Slice() {
ls := ncase.List.Slice()
if len(ls) == 0 { // default:
if defCase != nil {
yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
} else {
defCase = ncase
}
}
for i := range ls {
setlineno(ncase)
ls[i] = typecheck(ls[i], ctxExpr)
ls[i] = defaultlit(ls[i], t)
n1 := ls[i]
if t == nil || n1.Type == nil {
continue
}
if nilonly != "" && !n1.isNil() {
yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
} else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
} else {
op1, _ := assignop(n1.Type, t)
op2, _ := assignop(t, n1.Type)
if op1 == OXXX && op2 == OXXX {
if n.Left != nil {
yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
} else {
yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
}
}
}
// Don't check for duplicate bools. Although the spec allows it,
// (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
// (2) it would disallow useful things like
// case GOARCH == "arm" && GOARM == "5":
// case GOARCH == "arm":
// which would both evaluate to false for non-ARM compiles.
if !n1.Type.IsBoolean() {
cs.add(ncase.Pos, n1, "case", "switch")
}
}
typecheckslice(ncase.Nbody.Slice(), ctxStmt)
}
}
// walkswitch walks a switch statement.
func walkswitch(sw *Node) {
// Guard against double walk, see #25776.
if sw.List.Len() == 0 && sw.Nbody.Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
}
if sw.Left != nil && sw.Left.Op == OTYPESW {
walkTypeSwitch(sw)
} else {
walkExprSwitch(sw)
}
}
// walkExprSwitch generates an AST implementing sw. sw is an
// expression switch.
func walkExprSwitch(sw *Node) {
lno := setlineno(sw)
cond := sw.Left
sw.Left = nil
// convert switch {...} to switch true {...}
if cond == nil {
cond = nodbool(true)
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
}
// Given "switch string(byteslice)",
// with all cases being side-effect free,
// use a zero-cost alias of the byte slice.
// Do this before calling walkexpr on cond,
// because walkexpr will lower the string
// conversion into a runtime call.
// See issue 24937 for more discussion.
if cond.Op == OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
cond.Op = OBYTES2STRTMP
}
cond = walkexpr(cond, &sw.Ninit)
if cond.Op != OLITERAL {
cond = copyexpr(cond, cond.Type, &sw.Nbody)
}
lineno = lno
s := exprSwitch{
exprname: cond,
}
var defaultGoto *Node
var body Nodes
for _, ncase := range sw.List.Slice() {
label := autolabel(".s")
jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
// Process case dispatch.
if ncase.List.Len() == 0 {
if defaultGoto != nil {
Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
for _, n1 := range ncase.List.Slice() {
s.Add(ncase.Pos, n1, jmp)
}
// Process body.
body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
body.Append(ncase.Nbody.Slice()...)
if fall, pos := hasFall(ncase.Nbody.Slice()); !fall {
br := nod(OBREAK, nil, nil)
br.Pos = pos
body.Append(br)
}
}
sw.List.Set(nil)
if defaultGoto == nil {
br := nod(OBREAK, nil, nil)
br.Pos = br.Pos.WithNotStmt()
defaultGoto = br
}
s.Emit(&sw.Nbody)
sw.Nbody.Append(defaultGoto)
sw.Nbody.AppendNodes(&body)
walkstmtlist(sw.Nbody.Slice())
}
// An exprSwitch walks an expression switch.
type exprSwitch struct {
exprname *Node // value being switched on
done Nodes
clauses []exprClause
}
type exprClause struct {
pos src.XPos
lo, hi *Node
jmp *Node
}
func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
if okforcmp[s.exprname.Type.Etype] && expr.Op == OLITERAL {
s.clauses = append(s.clauses, c)
return
}
s.flush()
s.clauses = append(s.clauses, c)
s.flush()
}
func (s *exprSwitch) Emit(out *Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
func (s *exprSwitch) flush() {
cc := s.clauses
s.clauses = nil
if len(cc) == 0 {
return
}
// Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
// The code below is structured to implicitly handle this case
// (e.g., sort.Slice doesn't need to invoke the less function
// when there's only a single slice element).
if s.exprname.Type.IsString() && len(cc) >= 2 {
// Sort strings by length and then by value. It is
// much cheaper to compare lengths than values, and
// all we need here is consistency. We respect this
// sorting below.
sort.Slice(cc, func(i, j int) bool {
si := cc[i].lo.StringVal()
sj := cc[j].lo.StringVal()
if len(si) != len(sj) {
return len(si) < len(sj)
}
return si < sj
})
// runLen returns the string length associated with a
// particular run of exprClauses.
runLen := func(run []exprClause) int64 { return int64(len(run[0].lo.StringVal())) }
// Collapse runs of consecutive strings with the same length.
var runs [][]exprClause
start := 0
for i := 1; i < len(cc); i++ {
if runLen(cc[start:]) != runLen(cc[i:]) {
runs = append(runs, cc[start:i])
start = i
}
}
runs = append(runs, cc[start:])
// Perform two-level binary search.
nlen := nod(OLEN, s.exprname, nil)
binarySearch(len(runs), &s.done,
func(i int) *Node {
return nod(OLE, nlen, nodintconst(runLen(runs[i-1])))
},
func(i int, nif *Node) {
run := runs[i]
nif.Left = nod(OEQ, nlen, nodintconst(runLen(run)))
s.search(run, &nif.Nbody)
},
)
return
}
sort.Slice(cc, func(i, j int) bool {
return compareOp(cc[i].lo.Val(), OLT, cc[j].lo.Val())
})
// Merge consecutive integer cases.
if s.exprname.Type.IsInteger() {
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
if last.jmp == c.jmp && last.hi.Int64Val()+1 == c.lo.Int64Val() {
last.hi = c.lo
} else {
merged = append(merged, c)
}
}
cc = merged
}
s.search(cc, &s.done)
}
func (s *exprSwitch) search(cc []exprClause, out *Nodes) {
binarySearch(len(cc), out,
func(i int) *Node {
return nod(OLE, s.exprname, cc[i-1].hi)
},
func(i int, nif *Node) {
c := &cc[i]
nif.Left = c.test(s.exprname)
nif.Nbody.Set1(c.jmp)
},
)
}
func (c *exprClause) test(exprname *Node) *Node {
// Integer range.
if c.hi != c.lo {
low := nodl(c.pos, OGE, exprname, c.lo)
high := nodl(c.pos, OLE, exprname, c.hi)
return nodl(c.pos, OANDAND, low, high)
}
// Optimize "switch true { ...}" and "switch false { ... }".
if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() {
if exprname.BoolVal() {
return c.lo
} else {
return nodl(c.pos, ONOT, c.lo, nil)
}
}
return nodl(c.pos, OEQ, exprname, c.lo)
}
func allCaseExprsAreSideEffectFree(sw *Node) bool {
// In theory, we could be more aggressive, allowing any
// side-effect-free expressions in cases, but it's a bit
// tricky because some of that information is unavailable due
// to the introduction of temporaries during order.
// Restricting to constants is simple and probably powerful
// enough.
for _, ncase := range sw.List.Slice() {
if ncase.Op != OCASE {
Fatalf("switch string(byteslice) bad op: %v", ncase.Op)
}
for _, v := range ncase.List.Slice() {
if v.Op != OLITERAL {
return false
}
}
}
return true
}
// hasFall reports whether stmts ends with a "fallthrough" statement.
func hasFall(stmts []*Node) (bool, src.XPos) {
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
// list contains autotmp_ variables), one or more OVARKILL
// nodes will be at the end of the list.
i := len(stmts) - 1
for i >= 0 && stmts[i].Op == OVARKILL {
i--
}
if i < 0 {
return false, src.NoXPos
}
return stmts[i].Op == OFALL, stmts[i].Pos
}
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
func walkTypeSwitch(sw *Node) {
var s typeSwitch
s.facename = sw.Left.Right
sw.Left = nil
s.facename = walkexpr(s.facename, &sw.Ninit)
s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody)
s.okname = temp(types.Types[TBOOL])
// Get interface descriptor word.
// For empty interfaces this will be the type.
// For non-empty interfaces this will be the itab.
itab := nod(OITAB, s.facename, nil)
// For empty interfaces, do:
// if e._type == nil {
// do nil case if it exists, otherwise default
// }
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
ifNil := nod(OIF, nil, nil)
ifNil.Left = nod(OEQ, itab, nodnil())
lineno = lineno.WithNotStmt() // disable statement marks after the first check.
ifNil.Left = typecheck(ifNil.Left, ctxExpr)
ifNil.Left = defaultlit(ifNil.Left, nil)
// ifNil.Nbody assigned at end.
sw.Nbody.Append(ifNil)
// Load hash from type or itab.
dotHash := nodSym(ODOTPTR, itab, nil)
dotHash.Type = types.Types[TUINT32]
dotHash.SetTypecheck(1)
if s.facename.Type.IsEmptyInterface() {
dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
} else {
dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
}
dotHash.SetBounded(true) // guaranteed not to fault
s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody)
br := nod(OBREAK, nil, nil)
var defaultGoto, nilGoto *Node
var body Nodes
for _, ncase := range sw.List.Slice() {
var caseVar *Node
if ncase.Rlist.Len() != 0 {
caseVar = ncase.Rlist.First()
}
// For single-type cases with an interface type,
// we initialize the case variable as part of the type assertion.
// In other cases, we initialize it in the body.
var singleType *types.Type
if ncase.List.Len() == 1 && ncase.List.First().Op == OTYPE {
singleType = ncase.List.First().Type
}
caseVarInitialized := false
label := autolabel(".s")
jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
if ncase.List.Len() == 0 { // default:
if defaultGoto != nil {
Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
for _, n1 := range ncase.List.Slice() {
if n1.isNil() { // case nil:
if nilGoto != nil {
Fatalf("duplicate nil case not detected during typechecking")
}
nilGoto = jmp
continue
}
if singleType != nil && singleType.IsInterface() {
s.Add(ncase.Pos, n1.Type, caseVar, jmp)
caseVarInitialized = true
} else {
s.Add(ncase.Pos, n1.Type, nil, jmp)
}
}
body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
if caseVar != nil && !caseVarInitialized {
val := s.facename
if singleType != nil {
// We have a single concrete type. Extract the data.
if singleType.IsInterface() {
Fatalf("singleType interface should have been handled in Add")
}
val = ifaceData(ncase.Pos, s.facename, singleType)
}
l := []*Node{
nodl(ncase.Pos, ODCL, caseVar, nil),
nodl(ncase.Pos, OAS, caseVar, val),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
}
body.Append(ncase.Nbody.Slice()...)
body.Append(br)
}
sw.List.Set(nil)
if defaultGoto == nil {
defaultGoto = br
}
if nilGoto == nil {
nilGoto = defaultGoto
}
ifNil.Nbody.Set1(nilGoto)
s.Emit(&sw.Nbody)
sw.Nbody.Append(defaultGoto)
sw.Nbody.AppendNodes(&body)
walkstmtlist(sw.Nbody.Slice())
}
// A typeSwitch walks a type switch.
type typeSwitch struct {
// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
facename *Node // value being type-switched on
hashname *Node // type hash of the value being type-switched on
okname *Node // boolean used for comma-ok type assertions
done Nodes
clauses []typeClause
}
type typeClause struct {
hash uint32
body Nodes
}
func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) {
var body Nodes
if caseVar != nil {
l := []*Node{
nodl(pos, ODCL, caseVar, nil),
nodl(pos, OAS, caseVar, nil),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
} else {
caseVar = nblank
}
// cv, ok = iface.(type)
as := nodl(pos, OAS2, nil, nil)
as.List.Set2(caseVar, s.okname) // cv, ok =
dot := nodl(pos, ODOTTYPE, s.facename, nil)
dot.Type = typ // iface.(type)
as.Rlist.Set1(dot)
as = typecheck(as, ctxStmt)
as = walkexpr(as, &body)
body.Append(as)
// if ok { goto label }
nif := nodl(pos, OIF, nil, nil)
nif.Left = s.okname
nif.Nbody.Set1(jmp)
body.Append(nif)
if !typ.IsInterface() {
s.clauses = append(s.clauses, typeClause{
hash: typehash(typ),
body: body,
})
return
}
s.flush()
s.done.AppendNodes(&body)
}
func (s *typeSwitch) Emit(out *Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
func (s *typeSwitch) flush() {
cc := s.clauses
s.clauses = nil
if len(cc) == 0 {
return
}
sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
// Combine adjacent cases with the same hash.
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
if last.hash == c.hash {
last.body.AppendNodes(&c.body)
} else {
merged = append(merged, c)
}
}
cc = merged
binarySearch(len(cc), &s.done,
func(i int) *Node {
return nod(OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
},
func(i int, nif *Node) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
nif.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
nif.Nbody.AppendNodes(&c.body)
},
)
}
// binarySearch constructs a binary search tree for handling n cases,
// and appends it to out. It's used for efficiently implementing
// switch statements.
//
// less(i) should return a boolean expression. If it evaluates true,
// then cases before i will be tested; otherwise, cases i and later.
//
// base(i, nif) should setup nif (an OIF node) to test case i. In
// particular, it should set nif.Left and nif.Nbody.
func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, nif *Node)) {
const binarySearchMin = 4 // minimum number of cases for binary search
var do func(lo, hi int, out *Nodes)
do = func(lo, hi int, out *Nodes) {
n := hi - lo
if n < binarySearchMin {
for i := lo; i < hi; i++ {
nif := nod(OIF, nil, nil)
base(i, nif)
lineno = lineno.WithNotStmt()
nif.Left = typecheck(nif.Left, ctxExpr)
nif.Left = defaultlit(nif.Left, nil)
out.Append(nif)
out = &nif.Rlist
}
return
}
half := lo + n/2
nif := nod(OIF, nil, nil)
nif.Left = less(half)
lineno = lineno.WithNotStmt()
nif.Left = typecheck(nif.Left, ctxExpr)
nif.Left = defaultlit(nif.Left, nil)
do(lo, half, &nif.Nbody)
do(half, hi, &nif.Rlist)
out.Append(nif)
}
do(0, n, out)
}

File diff suppressed because it is too large Load diff

View file

@ -9,6 +9,8 @@ package gc
import (
"os"
tracepkg "runtime/trace"
"cmd/compile/internal/base"
)
func init() {
@ -18,10 +20,10 @@ func init() {
func traceHandlerGo17(traceprofile string) {
f, err := os.Create(traceprofile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
if err := tracepkg.Start(f); err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
atExit(tracepkg.Stop)
base.AtExit(tracepkg.Stop)
}

File diff suppressed because it is too large Load diff

View file

@ -1,58 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
)
// convenience constants
const (
Txxx = types.Txxx
TINT8 = types.TINT8
TUINT8 = types.TUINT8
TINT16 = types.TINT16
TUINT16 = types.TUINT16
TINT32 = types.TINT32
TUINT32 = types.TUINT32
TINT64 = types.TINT64
TUINT64 = types.TUINT64
TINT = types.TINT
TUINT = types.TUINT
TUINTPTR = types.TUINTPTR
TCOMPLEX64 = types.TCOMPLEX64
TCOMPLEX128 = types.TCOMPLEX128
TFLOAT32 = types.TFLOAT32
TFLOAT64 = types.TFLOAT64
TBOOL = types.TBOOL
TPTR = types.TPTR
TFUNC = types.TFUNC
TSLICE = types.TSLICE
TARRAY = types.TARRAY
TSTRUCT = types.TSTRUCT
TCHAN = types.TCHAN
TMAP = types.TMAP
TINTER = types.TINTER
TFORW = types.TFORW
TANY = types.TANY
TSTRING = types.TSTRING
TUNSAFEPTR = types.TUNSAFEPTR
// pseudo-types for literals
TIDEAL = types.TIDEAL
TNIL = types.TNIL
TBLANK = types.TBLANK
// pseudo-types for frame layout
TFUNCARGS = types.TFUNCARGS
TCHANARGS = types.TCHANARGS
NTYPE = types.NTYPE
)

View file

@ -1,16 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements convertions between *types.Node and *Node.
// TODO(gri) try to eliminate these soon
package gc
import (
"cmd/compile/internal/types"
"unsafe"
)
func asNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) }
func asTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) }

View file

@ -1,453 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO(gri) This file should probably become part of package types.
package gc
import "cmd/compile/internal/types"
// builtinpkg is a fake package that declares the universe block.
var builtinpkg *types.Pkg
var basicTypes = [...]struct {
name string
etype types.EType
}{
{"int8", TINT8},
{"int16", TINT16},
{"int32", TINT32},
{"int64", TINT64},
{"uint8", TUINT8},
{"uint16", TUINT16},
{"uint32", TUINT32},
{"uint64", TUINT64},
{"float32", TFLOAT32},
{"float64", TFLOAT64},
{"complex64", TCOMPLEX64},
{"complex128", TCOMPLEX128},
{"bool", TBOOL},
{"string", TSTRING},
}
var typedefs = [...]struct {
name string
etype types.EType
sameas32 types.EType
sameas64 types.EType
}{
{"int", TINT, TINT32, TINT64},
{"uint", TUINT, TUINT32, TUINT64},
{"uintptr", TUINTPTR, TUINT32, TUINT64},
}
var builtinFuncs = [...]struct {
name string
op Op
}{
{"append", OAPPEND},
{"cap", OCAP},
{"close", OCLOSE},
{"complex", OCOMPLEX},
{"copy", OCOPY},
{"delete", ODELETE},
{"imag", OIMAG},
{"len", OLEN},
{"make", OMAKE},
{"new", ONEW},
{"panic", OPANIC},
{"print", OPRINT},
{"println", OPRINTN},
{"real", OREAL},
{"recover", ORECOVER},
}
// isBuiltinFuncName reports whether name matches a builtin function
// name.
func isBuiltinFuncName(name string) bool {
for _, fn := range &builtinFuncs {
if fn.name == name {
return true
}
}
return false
}
var unsafeFuncs = [...]struct {
name string
op Op
}{
{"Alignof", OALIGNOF},
{"Offsetof", OOFFSETOF},
{"Sizeof", OSIZEOF},
}
// initUniverse initializes the universe block.
func initUniverse() {
lexinit()
typeinit()
lexinit1()
}
// lexinit initializes known symbols and the basic types.
func lexinit() {
for _, s := range &basicTypes {
etype := s.etype
if int(etype) >= len(types.Types) {
Fatalf("lexinit: %s bad etype", s.name)
}
s2 := builtinpkg.Lookup(s.name)
t := types.Types[etype]
if t == nil {
t = types.New(etype)
t.Sym = s2
if etype != TANY && etype != TSTRING {
dowidth(t)
}
types.Types[etype] = t
}
s2.Def = asTypesNode(typenod(t))
asNode(s2.Def).Name = new(Name)
}
for _, s := range &builtinFuncs {
s2 := builtinpkg.Lookup(s.name)
s2.Def = asTypesNode(newname(s2))
asNode(s2.Def).SetSubOp(s.op)
}
for _, s := range &unsafeFuncs {
s2 := unsafepkg.Lookup(s.name)
s2.Def = asTypesNode(newname(s2))
asNode(s2.Def).SetSubOp(s.op)
}
types.UntypedString = types.New(TSTRING)
types.UntypedBool = types.New(TBOOL)
types.Types[TANY] = types.New(TANY)
s := builtinpkg.Lookup("true")
s.Def = asTypesNode(nodbool(true))
asNode(s.Def).Sym = lookup("true")
asNode(s.Def).Name = new(Name)
asNode(s.Def).Type = types.UntypedBool
s = builtinpkg.Lookup("false")
s.Def = asTypesNode(nodbool(false))
asNode(s.Def).Sym = lookup("false")
asNode(s.Def).Name = new(Name)
asNode(s.Def).Type = types.UntypedBool
s = lookup("_")
s.Block = -100
s.Def = asTypesNode(newname(s))
types.Types[TBLANK] = types.New(TBLANK)
asNode(s.Def).Type = types.Types[TBLANK]
nblank = asNode(s.Def)
s = builtinpkg.Lookup("_")
s.Block = -100
s.Def = asTypesNode(newname(s))
types.Types[TBLANK] = types.New(TBLANK)
asNode(s.Def).Type = types.Types[TBLANK]
types.Types[TNIL] = types.New(TNIL)
s = builtinpkg.Lookup("nil")
var v Val
v.U = new(NilVal)
s.Def = asTypesNode(nodlit(v))
asNode(s.Def).Sym = s
asNode(s.Def).Name = new(Name)
s = builtinpkg.Lookup("iota")
s.Def = asTypesNode(nod(OIOTA, nil, nil))
asNode(s.Def).Sym = s
asNode(s.Def).Name = new(Name)
}
func typeinit() {
if Widthptr == 0 {
Fatalf("typeinit before betypeinit")
}
for et := types.EType(0); et < NTYPE; et++ {
simtype[et] = et
}
types.Types[TPTR] = types.New(TPTR)
dowidth(types.Types[TPTR])
t := types.New(TUNSAFEPTR)
types.Types[TUNSAFEPTR] = t
t.Sym = unsafepkg.Lookup("Pointer")
t.Sym.Def = asTypesNode(typenod(t))
asNode(t.Sym.Def).Name = new(Name)
dowidth(types.Types[TUNSAFEPTR])
for et := TINT8; et <= TUINT64; et++ {
isInt[et] = true
}
isInt[TINT] = true
isInt[TUINT] = true
isInt[TUINTPTR] = true
isFloat[TFLOAT32] = true
isFloat[TFLOAT64] = true
isComplex[TCOMPLEX64] = true
isComplex[TCOMPLEX128] = true
// initialize okfor
for et := types.EType(0); et < NTYPE; et++ {
if isInt[et] || et == TIDEAL {
okforeq[et] = true
okforcmp[et] = true
okforarith[et] = true
okforadd[et] = true
okforand[et] = true
okforconst[et] = true
issimple[et] = true
minintval[et] = new(Mpint)
maxintval[et] = new(Mpint)
}
if isFloat[et] {
okforeq[et] = true
okforcmp[et] = true
okforadd[et] = true
okforarith[et] = true
okforconst[et] = true
issimple[et] = true
minfltval[et] = newMpflt()
maxfltval[et] = newMpflt()
}
if isComplex[et] {
okforeq[et] = true
okforadd[et] = true
okforarith[et] = true
okforconst[et] = true
issimple[et] = true
}
}
issimple[TBOOL] = true
okforadd[TSTRING] = true
okforbool[TBOOL] = true
okforcap[TARRAY] = true
okforcap[TCHAN] = true
okforcap[TSLICE] = true
okforconst[TBOOL] = true
okforconst[TSTRING] = true
okforlen[TARRAY] = true
okforlen[TCHAN] = true
okforlen[TMAP] = true
okforlen[TSLICE] = true
okforlen[TSTRING] = true
okforeq[TPTR] = true
okforeq[TUNSAFEPTR] = true
okforeq[TINTER] = true
okforeq[TCHAN] = true
okforeq[TSTRING] = true
okforeq[TBOOL] = true
okforeq[TMAP] = true // nil only; refined in typecheck
okforeq[TFUNC] = true // nil only; refined in typecheck
okforeq[TSLICE] = true // nil only; refined in typecheck
okforeq[TARRAY] = true // only if element type is comparable; refined in typecheck
okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
okforcmp[TSTRING] = true
var i int
for i = 0; i < len(okfor); i++ {
okfor[i] = okfornone[:]
}
// binary
okfor[OADD] = okforadd[:]
okfor[OAND] = okforand[:]
okfor[OANDAND] = okforbool[:]
okfor[OANDNOT] = okforand[:]
okfor[ODIV] = okforarith[:]
okfor[OEQ] = okforeq[:]
okfor[OGE] = okforcmp[:]
okfor[OGT] = okforcmp[:]
okfor[OLE] = okforcmp[:]
okfor[OLT] = okforcmp[:]
okfor[OMOD] = okforand[:]
okfor[OMUL] = okforarith[:]
okfor[ONE] = okforeq[:]
okfor[OOR] = okforand[:]
okfor[OOROR] = okforbool[:]
okfor[OSUB] = okforarith[:]
okfor[OXOR] = okforand[:]
okfor[OLSH] = okforand[:]
okfor[ORSH] = okforand[:]
// unary
okfor[OBITNOT] = okforand[:]
okfor[ONEG] = okforarith[:]
okfor[ONOT] = okforbool[:]
okfor[OPLUS] = okforarith[:]
// special
okfor[OCAP] = okforcap[:]
okfor[OLEN] = okforlen[:]
// comparison
iscmp[OLT] = true
iscmp[OGT] = true
iscmp[OGE] = true
iscmp[OLE] = true
iscmp[OEQ] = true
iscmp[ONE] = true
maxintval[TINT8].SetString("0x7f")
minintval[TINT8].SetString("-0x80")
maxintval[TINT16].SetString("0x7fff")
minintval[TINT16].SetString("-0x8000")
maxintval[TINT32].SetString("0x7fffffff")
minintval[TINT32].SetString("-0x80000000")
maxintval[TINT64].SetString("0x7fffffffffffffff")
minintval[TINT64].SetString("-0x8000000000000000")
maxintval[TUINT8].SetString("0xff")
maxintval[TUINT16].SetString("0xffff")
maxintval[TUINT32].SetString("0xffffffff")
maxintval[TUINT64].SetString("0xffffffffffffffff")
// f is valid float if min < f < max. (min and max are not themselves valid.)
maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
minfltval[TFLOAT32].SetString("-33554431p103")
maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
minfltval[TFLOAT64].SetString("-18014398509481983p970")
maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
types.Types[TINTER] = types.New(TINTER) // empty interface
// simple aliases
simtype[TMAP] = TPTR
simtype[TCHAN] = TPTR
simtype[TFUNC] = TPTR
simtype[TUNSAFEPTR] = TPTR
slicePtrOffset = 0
sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr))
sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr))
// string is same as slice wo the cap
sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
dowidth(types.Types[TSTRING])
dowidth(types.UntypedString)
}
func makeErrorInterface() *types.Type {
field := types.NewField()
field.Type = types.Types[TSTRING]
f := functypefield(fakeRecvField(), nil, []*types.Field{field})
field = types.NewField()
field.Sym = lookup("Error")
field.Type = f
t := types.New(TINTER)
t.SetInterface([]*types.Field{field})
return t
}
func lexinit1() {
// error type
s := builtinpkg.Lookup("error")
types.Errortype = makeErrorInterface()
types.Errortype.Sym = s
types.Errortype.Orig = makeErrorInterface()
s.Def = asTypesNode(typenod(types.Errortype))
dowidth(types.Errortype)
// We create separate byte and rune types for better error messages
// rather than just creating type alias *types.Sym's for the uint8 and
// int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false.
// TODO(gri) Should we get rid of this special case (at the cost
// of less informative error messages involving bytes and runes)?
// (Alternatively, we could introduce an OTALIAS node representing
// type aliases, albeit at the cost of having to deal with it everywhere).
// byte alias
s = builtinpkg.Lookup("byte")
types.Bytetype = types.New(TUINT8)
types.Bytetype.Sym = s
s.Def = asTypesNode(typenod(types.Bytetype))
asNode(s.Def).Name = new(Name)
dowidth(types.Bytetype)
// rune alias
s = builtinpkg.Lookup("rune")
types.Runetype = types.New(TINT32)
types.Runetype.Sym = s
s.Def = asTypesNode(typenod(types.Runetype))
asNode(s.Def).Name = new(Name)
dowidth(types.Runetype)
// backend-dependent builtin types (e.g. int).
for _, s := range &typedefs {
s1 := builtinpkg.Lookup(s.name)
sameas := s.sameas32
if Widthptr == 8 {
sameas = s.sameas64
}
simtype[s.etype] = sameas
minfltval[s.etype] = minfltval[sameas]
maxfltval[s.etype] = maxfltval[sameas]
minintval[s.etype] = minintval[sameas]
maxintval[s.etype] = maxintval[sameas]
t := types.New(s.etype)
t.Sym = s1
types.Types[s.etype] = t
s1.Def = asTypesNode(typenod(t))
asNode(s1.Def).Name = new(Name)
s1.Origpkg = builtinpkg
dowidth(t)
}
}
// finishUniverse makes the universe block visible within the current package.
func finishUniverse() {
// Operationally, this is similar to a dot import of builtinpkg, except
// that we silently skip symbols that are already declared in the
// package block rather than emitting a redeclared symbol error.
for _, s := range builtinpkg.Syms {
if s.Def == nil {
continue
}
s1 := lookup(s.Name)
if s1.Def != nil {
continue
}
s1.Def = s.Def
s1.Block = s.Block
}
nodfp = newname(lookup(".fp"))
nodfp.Type = types.Types[TINT32]
nodfp.SetClass(PPARAM)
nodfp.Name.SetUsed(true)
}

View file

@ -1,76 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
// evalunsafe evaluates a package unsafe operation and returns the result.
func evalunsafe(n *Node) int64 {
switch n.Op {
case OALIGNOF, OSIZEOF:
n.Left = typecheck(n.Left, ctxExpr)
n.Left = defaultlit(n.Left, nil)
tr := n.Left.Type
if tr == nil {
return 0
}
dowidth(tr)
if n.Op == OALIGNOF {
return int64(tr.Align)
}
return tr.Width
case OOFFSETOF:
// must be a selector.
if n.Left.Op != OXDOT {
yyerror("invalid expression %v", n)
return 0
}
// Remember base of selector to find it back after dot insertion.
// Since r->left may be mutated by typechecking, check it explicitly
// first to track it correctly.
n.Left.Left = typecheck(n.Left.Left, ctxExpr)
base := n.Left.Left
n.Left = typecheck(n.Left, ctxExpr)
if n.Left.Type == nil {
return 0
}
switch n.Left.Op {
case ODOT, ODOTPTR:
break
case OCALLPART:
yyerror("invalid expression %v: argument is a method value", n)
return 0
default:
yyerror("invalid expression %v", n)
return 0
}
// Sum offsets for dots until we reach base.
var v int64
for r := n.Left; r != base; r = r.Left {
switch r.Op {
case ODOTPTR:
// For Offsetof(s.f), s may itself be a pointer,
// but accessing f must not otherwise involve
// indirection via embedded pointer types.
if r.Left != base {
yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left)
return 0
}
fallthrough
case ODOT:
v += r.Xoffset
default:
Dump("unsafenmagic", n.Left)
Fatalf("impossible %#v node after dot insertion", r.Op)
}
}
return v
}
Fatalf("unexpected op %v", n.Op)
return 0
}

View file

@ -8,59 +8,35 @@ import (
"os"
"runtime"
"runtime/pprof"
"cmd/compile/internal/base"
)
// Line returns n's position as a string. If n has been inlined,
// it uses the outermost position where n has been inlined.
func (n *Node) Line() string {
return linestr(n.Pos)
}
var atExitFuncs []func()
func atExit(f func()) {
atExitFuncs = append(atExitFuncs, f)
}
func Exit(code int) {
for i := len(atExitFuncs) - 1; i >= 0; i-- {
f := atExitFuncs[i]
atExitFuncs = atExitFuncs[:i]
f()
}
os.Exit(code)
}
var (
blockprofile string
cpuprofile string
memprofile string
memprofilerate int64
traceprofile string
traceHandler func(string)
mutexprofile string
)
func startProfile() {
if cpuprofile != "" {
f, err := os.Create(cpuprofile)
if base.Flag.CPUProfile != "" {
f, err := os.Create(base.Flag.CPUProfile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
atExit(pprof.StopCPUProfile)
base.AtExit(pprof.StopCPUProfile)
}
if memprofile != "" {
if base.Flag.MemProfile != "" {
if memprofilerate != 0 {
runtime.MemProfileRate = int(memprofilerate)
}
f, err := os.Create(memprofile)
f, err := os.Create(base.Flag.MemProfile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
atExit(func() {
base.AtExit(func() {
// Profile all outstanding allocations.
runtime.GC()
// compilebench parses the memory profile to extract memstats,
@ -68,36 +44,36 @@ func startProfile() {
// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
const writeLegacyFormat = 1
if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
})
} else {
// Not doing memory profiling; disable it entirely.
runtime.MemProfileRate = 0
}
if blockprofile != "" {
f, err := os.Create(blockprofile)
if base.Flag.BlockProfile != "" {
f, err := os.Create(base.Flag.BlockProfile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
runtime.SetBlockProfileRate(1)
atExit(func() {
base.AtExit(func() {
pprof.Lookup("block").WriteTo(f, 0)
f.Close()
})
}
if mutexprofile != "" {
f, err := os.Create(mutexprofile)
if base.Flag.MutexProfile != "" {
f, err := os.Create(base.Flag.MutexProfile)
if err != nil {
Fatalf("%v", err)
base.Fatalf("%v", err)
}
startMutexProfiling()
atExit(func() {
base.AtExit(func() {
pprof.Lookup("mutex").WriteTo(f, 0)
f.Close()
})
}
if traceprofile != "" && traceHandler != nil {
traceHandler(traceprofile)
if base.Flag.TraceProfile != "" && traceHandler != nil {
traceHandler(base.Flag.TraceProfile)
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ir
type bitset8 uint8
@ -14,6 +14,18 @@ func (f *bitset8) set(mask uint8, b bool) {
}
}
func (f bitset8) get2(shift uint8) uint8 {
return uint8(f>>shift) & 3
}
// set2 sets two bits in f using the bottom two bits of b.
func (f *bitset8) set2(shift uint8, b uint8) {
// Clear old bits.
*(*uint8)(f) &^= 3 << shift
// Set new bits.
*(*uint8)(f) |= uint8(b&3) << shift
}
type bitset16 uint16
func (f *bitset16) set(mask uint16, b bool) {

View file

@ -0,0 +1,26 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
var (
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
// Note: the flag smallframes can update this value.
MaxStackVarSize = int64(10 * 1024 * 1024)
// maximum size of implicit variables that we will allocate on the stack.
// p := new(T) allocating T on the stack
// p := &T{} allocating T on the stack
// s := make([]T, n) allocating [n]T on the stack
// s := []byte("...") allocating [n]byte on the stack
// Note: the flag smallframes can update this value.
MaxImplicitStackVarSize = int64(64 * 1024)
// MaxSmallArraySize is the maximum size of an array which is considered small.
// Small arrays will be initialized directly with a sequence of constant stores.
// Large arrays will be initialized by copying from a static temp.
// 256 bytes was chosen to minimize generated code + statictmp size.
MaxSmallArraySize = int64(256)
)

View file

@ -1,6 +1,6 @@
// Code generated by "stringer -type=Class"; DO NOT EDIT.
// Code generated by "stringer -type=Class name.go"; DO NOT EDIT.
package gc
package ir
import "strconv"

View file

@ -0,0 +1,99 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"go/constant"
"math"
"math/big"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
)
func NewBool(b bool) Node {
return NewLiteral(constant.MakeBool(b))
}
func NewInt(v int64) Node {
return NewLiteral(constant.MakeInt64(v))
}
func NewString(s string) Node {
return NewLiteral(constant.MakeString(s))
}
const (
// Maximum size in bits for big.Ints before signalling
// overflow and also mantissa precision for big.Floats.
ConstPrec = 512
)
func BigFloat(v constant.Value) *big.Float {
f := new(big.Float)
f.SetPrec(ConstPrec)
switch u := constant.Val(v).(type) {
case int64:
f.SetInt64(u)
case *big.Int:
f.SetInt(u)
case *big.Float:
f.Set(u)
case *big.Rat:
f.SetRat(u)
default:
base.Fatalf("unexpected: %v", u)
}
return f
}
// ConstOverflow reports whether constant value v is too large
// to represent with type t.
func ConstOverflow(v constant.Value, t *types.Type) bool {
switch {
case t.IsInteger():
bits := uint(8 * t.Size())
if t.IsUnsigned() {
x, ok := constant.Uint64Val(v)
return !ok || x>>bits != 0
}
x, ok := constant.Int64Val(v)
if x < 0 {
x = ^x
}
return !ok || x>>(bits-1) != 0
case t.IsFloat():
switch t.Size() {
case 4:
f, _ := constant.Float32Val(v)
return math.IsInf(float64(f), 0)
case 8:
f, _ := constant.Float64Val(v)
return math.IsInf(f, 0)
}
case t.IsComplex():
ft := types.FloatForComplex(t)
return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)
}
base.Fatalf("ConstOverflow: %v, %v", v, t)
panic("unreachable")
}
// IsConstNode reports whether n is a Go language constant (as opposed to a
// compile-time constant).
//
// Expressions derived from nil, like string([]byte(nil)), while they
// may be known at compile time, are not Go language constants.
func IsConstNode(n Node) bool {
return n.Op() == OLITERAL
}
func IsSmallIntConst(n Node) bool {
if n.Op() == OLITERAL {
v, ok := constant.Int64Val(n.Val())
return ok && int64(int32(v)) == v
}
return false
}

View file

@ -0,0 +1,102 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"cmd/compile/internal/base"
"cmd/internal/src"
)
// A Node may implement the Orig and SetOrig method to
// maintain a pointer to the "unrewritten" form of a Node.
// If a Node does not implement OrigNode, it is its own Orig.
//
// Note that both SepCopy and Copy have definitions compatible
// with a Node that does not implement OrigNode: such a Node
// is its own Orig, and in that case, that's what both want to return
// anyway (SepCopy unconditionally, and Copy only when the input
// is its own Orig as well, but if the output does not implement
// OrigNode, then neither does the input, making the condition true).
type OrigNode interface {
Node
Orig() Node
SetOrig(Node)
}
// origNode may be embedded into a Node to make it implement OrigNode.
type origNode struct {
orig Node `mknode:"-"`
}
func (n *origNode) Orig() Node { return n.orig }
func (n *origNode) SetOrig(o Node) { n.orig = o }
// Orig returns the “original” node for n.
// If n implements OrigNode, Orig returns n.Orig().
// Otherwise Orig returns n itself.
func Orig(n Node) Node {
if n, ok := n.(OrigNode); ok {
o := n.Orig()
if o == nil {
Dump("Orig nil", n)
base.Fatalf("Orig returned nil")
}
return o
}
return n
}
// SepCopy returns a separate shallow copy of n,
// breaking any Orig link to any other nodes.
func SepCopy(n Node) Node {
n = n.copy()
if n, ok := n.(OrigNode); ok {
n.SetOrig(n)
}
return n
}
// Copy returns a shallow copy of n.
// If Orig(n) == n, then Orig(Copy(n)) == the copy.
// Otherwise the Orig link is preserved as well.
//
// The specific semantics surrounding Orig are subtle but right for most uses.
// See issues #26855 and #27765 for pitfalls.
func Copy(n Node) Node {
c := n.copy()
if n, ok := n.(OrigNode); ok && n.Orig() == n {
c.(OrigNode).SetOrig(c)
}
return c
}
// DeepCopy returns a “deep” copy of n, with its entire structure copied
// (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE).
// If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos.
func DeepCopy(pos src.XPos, n Node) Node {
var edit func(Node) Node
edit = func(x Node) Node {
switch x.Op() {
case OPACK, ONAME, ONONAME, OLITERAL, ONIL, OTYPE:
return x
}
x = Copy(x)
if pos.IsKnown() {
x.SetPos(pos)
}
EditChildren(x, edit)
return x
}
return edit(n)
}
// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list.
func DeepCopyList(pos src.XPos, list []Node) []Node {
var out []Node
for _, n := range list {
out = append(out, DeepCopy(pos, n))
}
return out
}

View file

@ -6,21 +6,23 @@
// for debugging purposes. The code is customized for Node graphs
// and may be used for an alternative view of the node structure.
package gc
package ir
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"io"
"os"
"reflect"
"regexp"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// dump is like fdump but prints to stderr.
func dump(root interface{}, filter string, depth int) {
fdump(os.Stderr, root, filter, depth)
func DumpAny(root interface{}, filter string, depth int) {
FDumpAny(os.Stderr, root, filter, depth)
}
// fdump prints the structure of a rooted data structure
@ -40,7 +42,7 @@ func dump(root interface{}, filter string, depth int) {
// rather than their type; struct fields with zero values or
// non-matching field names are omitted, and "…" means recursion
// depth has been reached or struct fields have been omitted.
func fdump(w io.Writer, root interface{}, filter string, depth int) {
func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
if root == nil {
fmt.Fprintln(w, "nil")
return
@ -138,19 +140,9 @@ func (p *dumper) dump(x reflect.Value, depth int) {
return
}
// special cases
switch v := x.Interface().(type) {
case Nodes:
// unpack Nodes since reflect cannot look inside
// due to the unexported field in its struct
x = reflect.ValueOf(v.Slice())
case src.XPos:
p.printf("%s", linestr(v))
if pos, ok := x.Interface().(src.XPos); ok {
p.printf("%s", base.FmtPos(pos))
return
case *types.Node:
x = reflect.ValueOf(asNode(v))
}
switch x.Kind() {
@ -203,7 +195,7 @@ func (p *dumper) dump(x reflect.Value, depth int) {
isNode := false
if n, ok := x.Interface().(Node); ok {
isNode = true
p.printf("%s %s {", n.Op.String(), p.addr(x))
p.printf("%s %s {", n.Op().String(), p.addr(x))
} else {
p.printf("%s {", typ)
}
@ -230,7 +222,7 @@ func (p *dumper) dump(x reflect.Value, depth int) {
omitted = true
continue // exclude zero-valued fields
}
if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
if n, ok := x.Interface().(Nodes); ok && len(n) == 0 {
omitted = true
continue // exclude empty Nodes slices
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,284 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
)
// A Func corresponds to a single function in a Go program
// (and vice versa: each function is denoted by exactly one *Func).
//
// There are multiple nodes that represent a Func in the IR.
//
// The ONAME node (Func.Nname) is used for plain references to it.
// The ODCLFUNC node (the Func itself) is used for its declaration code.
// The OCLOSURE node (Func.OClosure) is used for a reference to a
// function literal.
//
// An imported function will have an ONAME node which points to a Func
// with an empty body.
// A declared function or method has an ODCLFUNC (the Func itself) and an ONAME.
// A function literal is represented directly by an OCLOSURE, but it also
// has an ODCLFUNC (and a matching ONAME) representing the compiled
// underlying form of the closure, which accesses the captured variables
// using a special data structure passed in a register.
//
// A method declaration is represented like functions, except f.Sym
// will be the qualified method name (e.g., "T.m") and
// f.Func.Shortname is the bare method name (e.g., "m").
//
// A method expression (T.M) is represented as an OMETHEXPR node,
// in which n.Left and n.Right point to the type and method, respectively.
// Each distinct mention of a method expression in the source code
// constructs a fresh node.
//
// A method value (t.M) is represented by ODOTMETH/ODOTINTER
// when it is called directly and by OCALLPART otherwise.
// These are like method expressions, except that for ODOTMETH/ODOTINTER,
// the method name is stored in Sym instead of Right.
// Each OCALLPART ends up being implemented as a new
// function, a bit like a closure, with its own ODCLFUNC.
// The OCALLPART uses n.Func to record the linkage to
// the generated ODCLFUNC, but there is no
// pointer from the Func back to the OCALLPART.
type Func struct {
miniNode
Body Nodes
Iota int64
Nname *Name // ONAME node
OClosure *ClosureExpr // OCLOSURE node
Shortname *types.Sym
// Extra entry code for the function. For example, allocate and initialize
// memory for escaping parameters.
Enter Nodes
Exit Nodes
// ONAME nodes for all params/locals for this func/closure, does NOT
// include closurevars until transforming closures during walk.
// Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs,
// with PPARAMs and PPARAMOUTs in order corresponding to the function signature.
// However, as anonymous or blank PPARAMs are not actually declared,
// they are omitted from Dcl.
// Anonymous and blank PPARAMOUTs are declared as ~rNN and ~bNN Names, respectively.
Dcl []*Name
// ClosureVars lists the free variables that are used within a
// function literal, but formally declared in an enclosing
// function. The variables in this slice are the closure function's
// own copy of the variables, which are used within its function
// body. They will also each have IsClosureVar set, and will have
// Byval set if they're captured by value.
ClosureVars []*Name
// Enclosed functions that need to be compiled.
// Populated during walk.
Closures []*Func
// Parents records the parent scope of each scope within a
// function. The root scope (0) has no parent, so the i'th
// scope's parent is stored at Parents[i-1].
Parents []ScopeID
// Marks records scope boundary changes.
Marks []Mark
FieldTrack map[*obj.LSym]struct{}
DebugInfo interface{}
LSym *obj.LSym
Inl *Inline
// Closgen tracks how many closures have been generated within
// this function. Used by closurename for creating unique
// function names.
Closgen int32
Label int32 // largest auto-generated label in this function
Endlineno src.XPos
WBPos src.XPos // position of first write barrier; see SetWBPos
Pragma PragmaFlag // go:xxx function annotations
flags bitset16
NumDefers int32 // number of defer calls in the function
NumReturns int32 // number of explicit returns in the function
// nwbrCalls records the LSyms of functions called by this
// function for go:nowritebarrierrec analysis. Only filled in
// if nowritebarrierrecCheck != nil.
NWBRCalls *[]SymAndPos
}
func NewFunc(pos src.XPos) *Func {
f := new(Func)
f.pos = pos
f.op = ODCLFUNC
f.Iota = -1
return f
}
func (f *Func) isStmt() {}
func (n *Func) copy() Node { panic(n.no("copy")) }
func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) }
func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) }
func (f *Func) Type() *types.Type { return f.Nname.Type() }
func (f *Func) Sym() *types.Sym { return f.Nname.Sym() }
func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() }
func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi) }
// An Inline holds fields used for function bodies that can be inlined.
type Inline struct {
Cost int32 // heuristic cost of inlining this function
// Copies of Func.Dcl and Nbody for use during inlining.
Dcl []*Name
Body []Node
}
// A Mark represents a scope boundary.
type Mark struct {
// Pos is the position of the token that marks the scope
// change.
Pos src.XPos
// Scope identifies the innermost scope to the right of Pos.
Scope ScopeID
}
// A ScopeID represents a lexical scope within a function.
type ScopeID int32
const (
funcDupok = 1 << iota // duplicate definitions ok
funcWrapper // is method wrapper
funcNeedctxt // function uses context register (has closure variables)
funcReflectMethod // function calls reflect.Type.Method or MethodByName
// true if closure inside a function; false if a simple function or a
// closure in a global variable initialization
funcIsHiddenClosure
funcHasDefer // contains a defer statement
funcNilCheckDisabled // disable nil checks when compiling this function
funcInlinabilityChecked // inliner has already determined whether the function is inlinable
funcExportInline // include inline body in export data
funcInstrumentBody // add race/msan instrumentation during SSA construction
funcOpenCodedDeferDisallowed // can't do open-coded defers
funcClosureCalled // closure is only immediately called
)
type SymAndPos struct {
Sym *obj.LSym // LSym of callee
Pos src.XPos // line of call
}
func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 }
func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
func (f *Func) ClosureCalled() bool { return f.flags&funcClosureCalled != 0 }
func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) }
func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
func (f *Func) SetClosureCalled(b bool) { f.flags.set(funcClosureCalled, b) }
func (f *Func) SetWBPos(pos src.XPos) {
if base.Debug.WB != 0 {
base.WarnfAt(pos, "write barrier")
}
if !f.WBPos.IsKnown() {
f.WBPos = pos
}
}
// funcname returns the name (without the package) of the function n.
func FuncName(f *Func) string {
if f == nil || f.Nname == nil {
return "<nil>"
}
return f.Sym().Name
}
// pkgFuncName returns the name of the function referenced by n, with package prepended.
// This differs from the compiler's internal convention where local functions lack a package
// because the ultimate consumer of this is a human looking at an IDE; package is only empty
// if the compilation package is actually the empty string.
func PkgFuncName(f *Func) string {
if f == nil || f.Nname == nil {
return "<nil>"
}
s := f.Sym()
pkg := s.Pkg
p := base.Ctxt.Pkgpath
if pkg != nil && pkg.Path != "" {
p = pkg.Path
}
if p == "" {
return s.Name
}
return p + "." + s.Name
}
var CurFunc *Func
func FuncSymName(s *types.Sym) string {
return s.Name + "·f"
}
// MarkFunc marks a node as a function.
func MarkFunc(n *Name) {
if n.Op() != ONAME || n.Class != Pxxx {
base.Fatalf("expected ONAME/Pxxx node, got %v", n)
}
n.Class = PFUNC
n.Sym().SetFunc(true)
}
// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
// and compiling runtime
func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
if base.Debug.Closure > 0 {
if clo.Esc() == EscHeap {
base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
} else {
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
}
}
if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
}
}
// IsTrivialClosure reports whether closure clo has an
// empty list of captured vars.
func IsTrivialClosure(clo *ClosureExpr) bool {
return len(clo.Func.ClosureVars) == 0
}

View file

@ -0,0 +1,5 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir

View file

@ -0,0 +1,92 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run -mod=mod mknode.go
package ir
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"go/constant"
)
// A miniNode is a minimal node implementation,
// meant to be embedded as the first field in a larger node implementation,
// at a cost of 8 bytes.
//
// A miniNode is NOT a valid Node by itself: the embedding struct
// must at the least provide:
//
// func (n *MyNode) String() string { return fmt.Sprint(n) }
// func (n *MyNode) rawCopy() Node { c := *n; return &c }
// func (n *MyNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
//
// The embedding struct should also fill in n.op in its constructor,
// for more useful panic messages when invalid methods are called,
// instead of implementing Op itself.
//
type miniNode struct {
pos src.XPos // uint32
op Op // uint8
bits bitset8
esc uint16
}
// posOr returns pos if known, or else n.pos.
// For use in DeepCopy.
func (n *miniNode) posOr(pos src.XPos) src.XPos {
if pos.IsKnown() {
return pos
}
return n.pos
}
// op can be read, but not written.
// An embedding implementation can provide a SetOp if desired.
// (The panicking SetOp is with the other panics below.)
func (n *miniNode) Op() Op { return n.op }
func (n *miniNode) Pos() src.XPos { return n.pos }
func (n *miniNode) SetPos(x src.XPos) { n.pos = x }
func (n *miniNode) Esc() uint16 { return n.esc }
func (n *miniNode) SetEsc(x uint16) { n.esc = x }
const (
miniWalkdefShift = 0 // TODO(mdempsky): Move to Name.flags.
miniTypecheckShift = 2
miniDiag = 1 << 4
miniWalked = 1 << 5 // to prevent/catch re-walking
)
func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
func (n *miniNode) SetTypecheck(x uint8) {
if x > 3 {
panic(fmt.Sprintf("cannot SetTypecheck %d", x))
}
n.bits.set2(miniTypecheckShift, x)
}
func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 }
func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) }
func (n *miniNode) Walked() bool { return n.bits&miniWalked != 0 }
func (n *miniNode) SetWalked(x bool) { n.bits.set(miniWalked, x) }
// Empty, immutable graph structure.
func (n *miniNode) Init() Nodes { return Nodes{} }
// Additional functionality unavailable.
func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() }
func (n *miniNode) Type() *types.Type { return nil }
func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) }
func (n *miniNode) Name() *Name { return nil }
func (n *miniNode) Sym() *types.Sym { return nil }
func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
func (n *miniNode) NonNil() bool { return false }
func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }

View file

@ -0,0 +1,228 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"fmt"
"go/format"
"go/types"
"io/ioutil"
"log"
"reflect"
"sort"
"strings"
"golang.org/x/tools/go/packages"
)
var irPkg *types.Package
var buf bytes.Buffer
func main() {
cfg := &packages.Config{
Mode: packages.NeedSyntax | packages.NeedTypes,
}
pkgs, err := packages.Load(cfg, "cmd/compile/internal/ir")
if err != nil {
log.Fatal(err)
}
irPkg = pkgs[0].Types
fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.")
fmt.Fprintln(&buf)
fmt.Fprintln(&buf, "package ir")
fmt.Fprintln(&buf)
fmt.Fprintln(&buf, `import "fmt"`)
scope := irPkg.Scope()
for _, name := range scope.Names() {
if strings.HasPrefix(name, "mini") {
continue
}
obj, ok := scope.Lookup(name).(*types.TypeName)
if !ok {
continue
}
typ := obj.Type().(*types.Named)
if !implementsNode(types.NewPointer(typ)) {
continue
}
fmt.Fprintf(&buf, "\n")
fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }\n", name)
switch name {
case "Name", "Func":
// Too specialized to automate.
continue
}
forNodeFields(typ,
"func (n *%[1]s) copy() Node { c := *n\n",
"",
"c.%[1]s = copy%[2]s(c.%[1]s)",
"return &c }\n")
forNodeFields(typ,
"func (n *%[1]s) doChildren(do func(Node) bool) bool {\n",
"if n.%[1]s != nil && do(n.%[1]s) { return true }",
"if do%[2]s(n.%[1]s, do) { return true }",
"return false }\n")
forNodeFields(typ,
"func (n *%[1]s) editChildren(edit func(Node) Node) {\n",
"if n.%[1]s != nil { n.%[1]s = edit(n.%[1]s).(%[2]s) }",
"edit%[2]s(n.%[1]s, edit)",
"}\n")
}
makeHelpers()
out, err := format.Source(buf.Bytes())
if err != nil {
// write out mangled source so we can see the bug.
out = buf.Bytes()
}
err = ioutil.WriteFile("node_gen.go", out, 0666)
if err != nil {
log.Fatal(err)
}
}
// needHelper maps needed slice helpers from their base name to their
// respective slice-element type.
var needHelper = map[string]string{}
func makeHelpers() {
var names []string
for name := range needHelper {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
fmt.Fprintf(&buf, sliceHelperTmpl, name, needHelper[name])
}
}
const sliceHelperTmpl = `
func copy%[1]s(list []%[2]s) []%[2]s {
if list == nil {
return nil
}
c := make([]%[2]s, len(list))
copy(c, list)
return c
}
func do%[1]s(list []%[2]s, do func(Node) bool) bool {
for _, x := range list {
if x != nil && do(x) {
return true
}
}
return false
}
func edit%[1]s(list []%[2]s, edit func(Node) Node) {
for i, x := range list {
if x != nil {
list[i] = edit(x).(%[2]s)
}
}
}
`
func forNodeFields(named *types.Named, prologue, singleTmpl, sliceTmpl, epilogue string) {
fmt.Fprintf(&buf, prologue, named.Obj().Name())
anyField(named.Underlying().(*types.Struct), func(f *types.Var) bool {
if f.Embedded() {
return false
}
name, typ := f.Name(), f.Type()
slice, _ := typ.Underlying().(*types.Slice)
if slice != nil {
typ = slice.Elem()
}
tmpl, what := singleTmpl, types.TypeString(typ, types.RelativeTo(irPkg))
if implementsNode(typ) {
if slice != nil {
helper := strings.TrimPrefix(what, "*") + "s"
needHelper[helper] = what
tmpl, what = sliceTmpl, helper
}
} else if what == "*Field" {
// Special case for *Field.
tmpl = sliceTmpl
if slice != nil {
what = "Fields"
} else {
what = "Field"
}
} else {
return false
}
if tmpl == "" {
return false
}
// Allow template to not use all arguments without
// upsetting fmt.Printf.
s := fmt.Sprintf(tmpl+"\x00 %[1]s %[2]s", name, what)
fmt.Fprintln(&buf, s[:strings.LastIndex(s, "\x00")])
return false
})
fmt.Fprintf(&buf, epilogue)
}
func implementsNode(typ types.Type) bool {
if _, ok := typ.Underlying().(*types.Interface); ok {
// TODO(mdempsky): Check the interface implements Node.
// Worst case, node_gen.go will fail to compile if we're wrong.
return true
}
if ptr, ok := typ.(*types.Pointer); ok {
if str, ok := ptr.Elem().Underlying().(*types.Struct); ok {
return anyField(str, func(f *types.Var) bool {
return f.Embedded() && f.Name() == "miniNode"
})
}
}
return false
}
func anyField(typ *types.Struct, pred func(f *types.Var) bool) bool {
for i, n := 0, typ.NumFields(); i < n; i++ {
if value, ok := reflect.StructTag(typ.Tag(i)).Lookup("mknode"); ok {
if value != "-" {
panic(fmt.Sprintf("unexpected tag value: %q", value))
}
continue
}
f := typ.Field(i)
if pred(f) {
return true
}
if f.Embedded() {
if typ, ok := f.Type().Underlying().(*types.Struct); ok {
if anyField(typ, pred) {
return true
}
}
}
}
return false
}

View file

@ -0,0 +1,512 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"fmt"
"go/constant"
)
// An Ident is an identifier, possibly qualified.
type Ident struct {
miniExpr
sym *types.Sym
}
func NewIdent(pos src.XPos, sym *types.Sym) *Ident {
n := new(Ident)
n.op = ONONAME
n.pos = pos
n.sym = sym
return n
}
func (n *Ident) Sym() *types.Sym { return n.sym }
func (*Ident) CanBeNtype() {}
// Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL).
type Name struct {
miniExpr
BuiltinOp Op // uint8
Class Class // uint8
pragma PragmaFlag // int16
flags bitset16
sym *types.Sym
Func *Func
Offset_ int64
val constant.Value
Opt interface{} // for use by escape analysis
Embed *[]Embed // list of embedded files, for ONAME var
PkgName *PkgName // real package for import . names
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
// For a closure var, the ONAME node of the outer captured variable
Defn Node
// The function, method, or closure in which local variable or param is declared.
Curfn *Func
Ntype Ntype
Heapaddr *Name // temp holding heap address of param
// ONAME closure linkage
// Consider:
//
// func f() {
// x := 1 // x1
// func() {
// use(x) // x2
// func() {
// use(x) // x3
// --- parser is here ---
// }()
// }()
// }
//
// There is an original declaration of x and then a chain of mentions of x
// leading into the current function. Each time x is mentioned in a new closure,
// we create a variable representing x for use in that specific closure,
// since the way you get to x is different in each closure.
//
// Let's number the specific variables as shown in the code:
// x1 is the original x, x2 is when mentioned in the closure,
// and x3 is when mentioned in the closure in the closure.
//
// We keep these linked (assume N > 1):
//
// - x1.Defn = original declaration statement for x (like most variables)
// - x1.Innermost = current innermost closure x (in this case x3), or nil for none
// - x1.IsClosureVar() = false
//
// - xN.Defn = x1, N > 1
// - xN.IsClosureVar() = true, N > 1
// - x2.Outer = nil
// - xN.Outer = x(N-1), N > 2
//
//
// When we look up x in the symbol table, we always get x1.
// Then we can use x1.Innermost (if not nil) to get the x
// for the innermost known closure function,
// but the first reference in a closure will find either no x1.Innermost
// or an x1.Innermost with .Funcdepth < Funcdepth.
// In that case, a new xN must be created, linked in with:
//
// xN.Defn = x1
// xN.Outer = x1.Innermost
// x1.Innermost = xN
//
// When we finish the function, we'll process its closure variables
// and find xN and pop it off the list using:
//
// x1 := xN.Defn
// x1.Innermost = xN.Outer
//
// We leave x1.Innermost set so that we can still get to the original
// variable quickly. Not shown here, but once we're
// done parsing a function and no longer need xN.Outer for the
// lexical x reference links as described above, funcLit
// recomputes xN.Outer as the semantic x reference link tree,
// even filling in x in intermediate closures that might not
// have mentioned it along the way to inner closures that did.
// See funcLit for details.
//
// During the eventual compilation, then, for closure variables we have:
//
// xN.Defn = original variable
// xN.Outer = variable captured in next outward scope
// to make closure where xN appears
//
// Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
// and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
Innermost *Name
Outer *Name
}
func (n *Name) isExpr() {}
func (n *Name) copy() Node { panic(n.no("copy")) }
func (n *Name) doChildren(do func(Node) bool) bool { return false }
func (n *Name) editChildren(edit func(Node) Node) {}
// TypeDefn returns the type definition for a named OTYPE.
// That is, given "type T Defn", it returns Defn.
// It is used by package types.
func (n *Name) TypeDefn() *types.Type {
return n.Ntype.Type()
}
// RecordFrameOffset records the frame offset for the name.
// It is used by package types when laying out function arguments.
func (n *Name) RecordFrameOffset(offset int64) {
n.SetFrameOffset(offset)
}
// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting Curfn.
func NewNameAt(pos src.XPos, sym *types.Sym) *Name {
if sym == nil {
base.Fatalf("NewNameAt nil")
}
return newNameAt(pos, ONAME, sym)
}
// NewIota returns a new OIOTA Node.
func NewIota(pos src.XPos, sym *types.Sym) *Name {
if sym == nil {
base.Fatalf("NewIota nil")
}
return newNameAt(pos, OIOTA, sym)
}
// NewDeclNameAt returns a new Name associated with symbol s at position pos.
// The caller is responsible for setting Curfn.
func NewDeclNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
if sym == nil {
base.Fatalf("NewDeclNameAt nil")
}
switch op {
case ONAME, OTYPE, OLITERAL:
// ok
default:
base.Fatalf("NewDeclNameAt op %v", op)
}
return newNameAt(pos, op, sym)
}
// NewConstAt returns a new OLITERAL Node associated with symbol s at position pos.
func NewConstAt(pos src.XPos, sym *types.Sym, typ *types.Type, val constant.Value) *Name {
if sym == nil {
base.Fatalf("NewConstAt nil")
}
n := newNameAt(pos, OLITERAL, sym)
n.SetType(typ)
n.SetVal(val)
return n
}
// newNameAt is like NewNameAt but allows sym == nil.
func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
n := new(Name)
n.op = op
n.pos = pos
n.sym = sym
return n
}
func (n *Name) Name() *Name { return n }
func (n *Name) Sym() *types.Sym { return n.sym }
func (n *Name) SetSym(x *types.Sym) { n.sym = x }
func (n *Name) SubOp() Op { return n.BuiltinOp }
func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
func (n *Name) SetFunc(x *Func) { n.Func = x }
func (n *Name) Offset() int64 { panic("Name.Offset") }
func (n *Name) SetOffset(x int64) {
if x != 0 {
panic("Name.SetOffset")
}
}
func (n *Name) FrameOffset() int64 { return n.Offset_ }
func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x }
func (n *Name) Iota() int64 { return n.Offset_ }
func (n *Name) SetIota(x int64) { n.Offset_ = x }
func (n *Name) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) }
func (n *Name) SetWalkdef(x uint8) {
if x > 3 {
panic(fmt.Sprintf("cannot SetWalkdef %d", x))
}
n.bits.set2(miniWalkdefShift, x)
}
func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() }
func (n *Name) LinksymABI(abi obj.ABI) *obj.LSym { return n.sym.LinksymABI(abi) }
func (*Name) CanBeNtype() {}
func (*Name) CanBeAnSSASym() {}
func (*Name) CanBeAnSSAAux() {}
// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
func (n *Name) Pragma() PragmaFlag { return n.pragma }
// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
func (n *Name) SetPragma(flag PragmaFlag) { n.pragma = flag }
// Alias reports whether p, which must be for an OTYPE, is a type alias.
func (n *Name) Alias() bool { return n.flags&nameAlias != 0 }
// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) }
const (
nameReadonly = 1 << iota
nameByval // is the variable captured by value or by reference
nameNeedzero // if it contains pointers, needs to be zeroed on function entry
nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
nameUsed // for variable declared and not used error
nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original (if any) at n.Defn
nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
nameAddrtaken // address taken, even if not moved to heap
nameInlFormal // PAUTO created by inliner, derived from callee formal
nameInlLocal // PAUTO created by inliner, derived from callee local
nameOpenDeferSlot // if temporary var storing info for open-coded defers
nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section
nameAlias // is type name an alias
)
func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 }
func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 }
func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 }
func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) }
func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
// OnStack reports whether variable n may reside on the stack.
func (n *Name) OnStack() bool {
if n.Op() == ONAME {
switch n.Class {
case PPARAM, PPARAMOUT, PAUTO:
return n.Esc() != EscHeap
case PEXTERN, PAUTOHEAP:
return false
}
}
// Note: fmt.go:dumpNodeHeader calls all "func() bool"-typed
// methods, but it can only recover from panics, not Fatalf.
panic(fmt.Sprintf("%v: not a variable: %v", base.FmtPos(n.Pos()), n))
}
// MarkReadonly indicates that n is an ONAME with readonly contents.
func (n *Name) MarkReadonly() {
if n.Op() != ONAME {
base.Fatalf("Node.MarkReadonly %v", n.Op())
}
n.setReadonly(true)
// Mark the linksym as readonly immediately
// so that the SSA backend can use this information.
// It will be overridden later during dumpglobls.
n.Linksym().Type = objabi.SRODATA
}
// Val returns the constant.Value for the node.
func (n *Name) Val() constant.Value {
if n.val == nil {
return constant.MakeUnknown()
}
return n.val
}
// SetVal sets the constant.Value for the node.
func (n *Name) SetVal(v constant.Value) {
if n.op != OLITERAL {
panic(n.no("SetVal"))
}
AssertValidTypeForConst(n.Type(), v)
n.val = v
}
// Canonical returns the logical declaration that n represents. If n
// is a closure variable, then Canonical returns the original Name as
// it appears in the function that immediately contains the
// declaration. Otherwise, Canonical simply returns n itself.
func (n *Name) Canonical() *Name {
if n.IsClosureVar() && n.Defn != nil {
n = n.Defn.(*Name)
}
return n
}
func (n *Name) SetByval(b bool) {
if n.Canonical() != n {
base.Fatalf("SetByval called on non-canonical variable: %v", n)
}
n.flags.set(nameByval, b)
}
func (n *Name) Byval() bool {
// We require byval to be set on the canonical variable, but we
// allow it to be accessed from any instance.
return n.Canonical().flags&nameByval != 0
}
// CaptureName returns a Name suitable for referring to n from within function
// fn or from the package block if fn is nil. If n is a free variable declared
// within a function that encloses fn, then CaptureName returns a closure
// variable that refers to n and adds it to fn.ClosureVars. Otherwise, it simply
// returns n.
func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
if n.IsClosureVar() {
base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
}
if n.Op() != ONAME || n.Curfn == nil || n.Curfn == fn {
return n // okay to use directly
}
if fn == nil {
base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
}
c := n.Innermost
if c != nil && c.Curfn == fn {
return c
}
// Do not have a closure var for the active closure yet; make one.
c = NewNameAt(pos, n.Sym())
c.Curfn = fn
c.Class = PAUTOHEAP
c.SetIsClosureVar(true)
c.Defn = n
// Link into list of active closure variables.
// Popped from list in FinishCaptureNames.
c.Outer = n.Innermost
n.Innermost = c
fn.ClosureVars = append(fn.ClosureVars, c)
return c
}
// FinishCaptureNames handles any work leftover from calling CaptureName
// earlier. outerfn should be the function that immediately encloses fn.
func FinishCaptureNames(pos src.XPos, outerfn, fn *Func) {
// closure-specific variables are hanging off the
// ordinary ones; see CaptureName above.
// unhook them.
// make the list of pointers for the closure call.
for _, cv := range fn.ClosureVars {
// Unlink from n; see comment in syntax.go type Param for these fields.
n := cv.Defn.(*Name)
n.Innermost = cv.Outer
// If the closure usage of n is not dense, we need to make it
// dense by recapturing n within the enclosing function.
//
// That is, suppose we just finished parsing the innermost
// closure f4 in this code:
//
// func f() {
// n := 1
// func() { // f2
// use(n)
// func() { // f3
// func() { // f4
// use(n)
// }()
// }()
// }()
// }
//
// At this point cv.Outer is f2's n; there is no n for f3. To
// construct the closure f4 from within f3, we need to use f3's
// n and in this case we need to create f3's n with CaptureName.
//
// We'll decide later in walk whether to use v directly or &v.
cv.Outer = CaptureName(pos, outerfn, n)
}
}
// SameSource reports whether two nodes refer to the same source
// element.
//
// It exists to help incrementally migrate the compiler towards
// allowing the introduction of IdentExpr (#42990). Once we have
// IdentExpr, it will no longer be safe to directly compare Node
// values to tell if they refer to the same Name. Instead, code will
// need to explicitly get references to the underlying Name object(s),
// and compare those instead.
//
// It will still be safe to compare Nodes directly for checking if two
// nodes are syntactically the same. The SameSource function exists to
// indicate code that intentionally compares Nodes for syntactic
// equality as opposed to code that has yet to be updated in
// preparation for IdentExpr.
func SameSource(n1, n2 Node) bool {
return n1 == n2
}
// Uses reports whether expression x is a (direct) use of the given
// variable.
func Uses(x Node, v *Name) bool {
if v == nil || v.Op() != ONAME {
base.Fatalf("RefersTo bad Name: %v", v)
}
return x.Op() == ONAME && x.Name() == v
}
// DeclaredBy reports whether expression x refers (directly) to a
// variable that was declared by the given statement.
func DeclaredBy(x, stmt Node) bool {
if stmt == nil {
base.Fatalf("DeclaredBy nil")
}
return x.Op() == ONAME && SameSource(x.Name().Defn, stmt)
}
// The Class of a variable/function describes the "storage class"
// of a variable or function. During parsing, storage classes are
// called declaration contexts.
type Class uint8
//go:generate stringer -type=Class name.go
const (
Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
PEXTERN // global variables
PAUTO // local variables
PAUTOHEAP // local variables or parameters moved to heap
PPARAM // input arguments
PPARAMOUT // output results
PFUNC // global functions
// Careful: Class is stored in three bits in Node.flags.
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
)
type Embed struct {
Pos src.XPos
Patterns []string
}
// A Pack is an identifier referring to an imported package.
type PkgName struct {
miniNode
sym *types.Sym
Pkg *types.Pkg
Used bool
}
func (p *PkgName) Sym() *types.Sym { return p.sym }
func (*PkgName) CanBeNtype() {}
func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName {
p := &PkgName{sym: sym, Pkg: pkg}
p.op = OPACK
p.pos = pos
return p
}
var RegFP *Name

View file

@ -0,0 +1,591 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// “Abstract” syntax representation.
package ir
import (
"fmt"
"go/constant"
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// A Node is the abstract interface to an IR node.
type Node interface {
// Formatting
Format(s fmt.State, verb rune)
// Source position.
Pos() src.XPos
SetPos(x src.XPos)
// For making copies. For Copy and SepCopy.
copy() Node
doChildren(func(Node) bool) bool
editChildren(func(Node) Node)
// Abstract graph structure, for generic traversals.
Op() Op
Init() Nodes
// Fields specific to certain Ops only.
Type() *types.Type
SetType(t *types.Type)
Name() *Name
Sym() *types.Sym
Val() constant.Value
SetVal(v constant.Value)
// Storage for analysis passes.
Esc() uint16
SetEsc(x uint16)
Diag() bool
SetDiag(x bool)
Typecheck() uint8
SetTypecheck(x uint8)
NonNil() bool
MarkNonNil()
}
// Line returns n's position as a string. If n has been inlined,
// it uses the outermost position where n has been inlined.
func Line(n Node) string {
return base.FmtPos(n.Pos())
}
func IsSynthetic(n Node) bool {
name := n.Sym().Name
return name[0] == '.' || name[0] == '~'
}
// IsAutoTmp indicates if n was created by the compiler as a temporary,
// based on the setting of the .AutoTemp flag in n's Name.
func IsAutoTmp(n Node) bool {
if n == nil || n.Op() != ONAME {
return false
}
return n.Name().AutoTemp()
}
// mayBeShared reports whether n may occur in multiple places in the AST.
// Extra care must be taken when mutating such a node.
func MayBeShared(n Node) bool {
switch n.Op() {
case ONAME, OLITERAL, ONIL, OTYPE:
return true
}
return false
}
type InitNode interface {
Node
PtrInit() *Nodes
SetInit(x Nodes)
}
func TakeInit(n Node) Nodes {
init := n.Init()
if len(init) != 0 {
n.(InitNode).SetInit(nil)
}
return init
}
//go:generate stringer -type=Op -trimprefix=O node.go
type Op uint8
// Node ops.
const (
OXXX Op = iota
// names
ONAME // var or func name
// Unnamed arg or return value: f(int, string) (int, error) { etc }
// Also used for a qualified package identifier that hasn't been resolved yet.
ONONAME
OTYPE // type name
OPACK // import
OLITERAL // literal
ONIL // nil
// expressions
OADD // Left + Right
OSUB // Left - Right
OOR // Left | Right
OXOR // Left ^ Right
OADDSTR // +{List} (string addition, list elements are strings)
OADDR // &Left
OANDAND // Left && Right
OAPPEND // append(List); after walk, Left may contain elem type descriptor
OBYTES2STR // Type(Left) (Type is string, Left is a []byte)
OBYTES2STRTMP // Type(Left) (Type is string, Left is a []byte, ephemeral)
ORUNES2STR // Type(Left) (Type is string, Left is a []rune)
OSTR2BYTES // Type(Left) (Type is []byte, Left is a string)
OSTR2BYTESTMP // Type(Left) (Type is []byte, Left is a string, ephemeral)
OSTR2RUNES // Type(Left) (Type is []rune, Left is a string)
// Left = Right or (if Colas=true) Left := Right
// If Colas, then Ninit includes a DCL node for Left.
OAS
// List = Rlist (x, y, z = a, b, c) or (if Colas=true) List := Rlist
// If Colas, then Ninit includes DCL nodes for List
OAS2
OAS2DOTTYPE // List = Right (x, ok = I.(int))
OAS2FUNC // List = Right (x, y = f())
OAS2MAPR // List = Right (x, ok = m["foo"])
OAS2RECV // List = Right (x, ok = <-c)
OASOP // Left Etype= Right (x += y)
OCALL // Left(List) (function call, method call or type conversion)
// OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
// Prior to walk, they are: Left(List), where List is all regular arguments.
// After walk, List is a series of assignments to temporaries,
// and Rlist is an updated set of arguments.
// Nbody is all OVARLIVE nodes that are attached to OCALLxxx.
// TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
OCALLFUNC // Left(List/Rlist) (function call f(args))
OCALLMETH // Left(List/Rlist) (direct method call x.Method(args))
OCALLINTER // Left(List/Rlist) (interface method call x.Method(args))
OCALLPART // Left.Right (method expression x.Method, not called)
OCAP // cap(Left)
OCLOSE // close(Left)
OCLOSURE // func Type { Func.Closure.Nbody } (func literal)
OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
OMAPLIT // Type{List} (composite literal, Type is map)
OSTRUCTLIT // Type{List} (composite literal, Type is struct)
OARRAYLIT // Type{List} (composite literal, Type is array)
OSLICELIT // Type{List} (composite literal, Type is slice) Right.Int64() = slice length.
OPTRLIT // &Left (left is composite literal)
OCONV // Type(Left) (type conversion)
OCONVIFACE // Type(Left) (type conversion, to interface)
OCONVNOP // Type(Left) (type conversion, no effect)
OCOPY // copy(Left, Right)
ODCL // var Left (declares Left of type Left.Type)
// Used during parsing but don't last.
ODCLFUNC // func f() or func (r) f()
ODCLCONST // const pi = 3.14
ODCLTYPE // type Int int or type Int = int
ODELETE // delete(List)
ODOT // Left.Sym (Left is of struct type)
ODOTPTR // Left.Sym (Left is of pointer to struct type)
ODOTMETH // Left.Sym (Left is non-interface, Right is method name)
ODOTINTER // Left.Sym (Left is interface, Right is method name)
OXDOT // Left.Sym (before rewrite to one of the preceding)
ODOTTYPE // Left.Right or Left.Type (.Right during parsing, .Type once resolved); after walk, .Right contains address of interface type descriptor and .Right.Right contains address of concrete type descriptor
ODOTTYPE2 // Left.Right or Left.Type (.Right during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, .Right contains address of interface type descriptor
OEQ // Left == Right
ONE // Left != Right
OLT // Left < Right
OLE // Left <= Right
OGE // Left >= Right
OGT // Left > Right
ODEREF // *Left
OINDEX // Left[Right] (index of array or slice)
OINDEXMAP // Left[Right] (index of map)
OKEY // Left:Right (key:value in struct/array/map literal)
OSTRUCTKEY // Sym:Left (key:value in struct literal, after type checking)
OLEN // len(Left)
OMAKE // make(List) (before type checking converts to one of the following)
OMAKECHAN // make(Type, Left) (type is chan)
OMAKEMAP // make(Type, Left) (type is map)
OMAKESLICE // make(Type, Left, Right) (type is slice)
OMAKESLICECOPY // makeslicecopy(Type, Left, Right) (type is slice; Left is length and Right is the copied from slice)
// OMAKESLICECOPY is created by the order pass and corresponds to:
// s = make(Type, Left); copy(s, Right)
//
// Bounded can be set on the node when Left == len(Right) is known at compile time.
//
// This node is created so the walk pass can optimize this pattern which would
// otherwise be hard to detect after the order pass.
OMUL // Left * Right
ODIV // Left / Right
OMOD // Left % Right
OLSH // Left << Right
ORSH // Left >> Right
OAND // Left & Right
OANDNOT // Left &^ Right
ONEW // new(Left); corresponds to calls to new in source code
ONOT // !Left
OBITNOT // ^Left
OPLUS // +Left
ONEG // -Left
OOROR // Left || Right
OPANIC // panic(Left)
OPRINT // print(List)
OPRINTN // println(List)
OPAREN // (Left)
OSEND // Left <- Right
OSLICE // Left[List[0] : List[1]] (Left is untypechecked or slice)
OSLICEARR // Left[List[0] : List[1]] (Left is pointer to array)
OSLICESTR // Left[List[0] : List[1]] (Left is string)
OSLICE3 // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice)
OSLICE3ARR // Left[List[0] : List[1] : List[2]] (Left is pointer to array)
OSLICEHEADER // sliceheader{Left, List[0], List[1]} (Left is unsafe.Pointer, List[0] is length, List[1] is capacity)
ORECOVER // recover()
ORECV // <-Left
ORUNESTR // Type(Left) (Type is string, Left is rune)
OSELRECV2 // like OAS2: List = Rlist where len(List)=2, len(Rlist)=1, Rlist[0].Op = ORECV (appears as .Left of OCASE)
OIOTA // iota
OREAL // real(Left)
OIMAG // imag(Left)
OCOMPLEX // complex(Left, Right) or complex(List[0]) where List[0] is a 2-result function call
OALIGNOF // unsafe.Alignof(Left)
OOFFSETOF // unsafe.Offsetof(Left)
OSIZEOF // unsafe.Sizeof(Left)
OMETHEXPR // method expression
OSTMTEXPR // statement expression (Init; Left)
// statements
OBLOCK // { List } (block of code)
OBREAK // break [Sym]
// OCASE: case List: Nbody (List==nil means default)
// For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
// for nil), and, if a type-switch variable is specified, Rlist is an
// ONAME for the version of the type-switch variable with the specified
// type.
OCASE
OCONTINUE // continue [Sym]
ODEFER // defer Left (Left must be call)
OFALL // fallthrough
OFOR // for Ninit; Left; Right { Nbody }
// OFORUNTIL is like OFOR, but the test (Left) is applied after the body:
// Ninit
// top: { Nbody } // Execute the body at least once
// cont: Right
// if Left { // And then test the loop condition
// List // Before looping to top, execute List
// goto top
// }
// OFORUNTIL is created by walk. There's no way to write this in Go code.
OFORUNTIL
OGOTO // goto Sym
OIF // if Ninit; Left { Nbody } else { Rlist }
OLABEL // Sym:
OGO // go Left (Left must be call)
ORANGE // for List = range Right { Nbody }
ORETURN // return List
OSELECT // select { List } (List is list of OCASE)
OSWITCH // switch Ninit; Left { List } (List is a list of OCASE)
// OTYPESW: Left := Right.(type) (appears as .Left of OSWITCH)
// Left is nil if there is no type-switch variable
OTYPESW
// types
OTCHAN // chan int
OTMAP // map[string]int
OTSTRUCT // struct{}
OTINTER // interface{}
// OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is
// list of result fields.
OTFUNC
OTARRAY // [8]int or [...]int
OTSLICE // []int
// misc
// intermediate representation of an inlined call. Uses Init (assignments
// for the captured variables, parameters, retvars, & INLMARK op),
// Body (body of the inlined function), and ReturnVars (list of
// return values)
OINLCALL // intermediary representation of an inlined call.
OEFACE // itable and data words of an empty-interface value.
OITAB // itable word of an interface value.
OIDATA // data word of an interface value in Left
OSPTR // base pointer of a slice or string.
OCFUNC // reference to c function pointer (not go func value)
OCHECKNIL // emit code to ensure pointer/interface not nil
OVARDEF // variable is about to be fully initialized
OVARKILL // variable is dead
OVARLIVE // variable is alive
ORESULT // result of a function call; Xoffset is stack offset
OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
OLINKSYMOFFSET // offset within a name
// arch-specific opcodes
OTAILCALL // tail call to another function
OGETG // runtime.getg() (read g pointer)
OEND
)
// Nodes is a pointer to a slice of *Node.
// For fields that are not used in most nodes, this is used instead of
// a slice to save space.
type Nodes []Node
// Append appends entries to Nodes.
func (n *Nodes) Append(a ...Node) {
if len(a) == 0 {
return
}
*n = append(*n, a...)
}
// Prepend prepends entries to Nodes.
// If a slice is passed in, this will take ownership of it.
func (n *Nodes) Prepend(a ...Node) {
if len(a) == 0 {
return
}
*n = append(a, *n...)
}
// Take clears n, returning its former contents.
func (n *Nodes) Take() []Node {
ret := *n
*n = nil
return ret
}
// Copy returns a copy of the content of the slice.
func (n Nodes) Copy() Nodes {
if n == nil {
return nil
}
c := make(Nodes, len(n))
copy(c, n)
return c
}
// NameQueue is a FIFO queue of *Name. The zero value of NameQueue is
// a ready-to-use empty queue.
type NameQueue struct {
ring []*Name
head, tail int
}
// Empty reports whether q contains no Names.
func (q *NameQueue) Empty() bool {
return q.head == q.tail
}
// PushRight appends n to the right of the queue.
func (q *NameQueue) PushRight(n *Name) {
if len(q.ring) == 0 {
q.ring = make([]*Name, 16)
} else if q.head+len(q.ring) == q.tail {
// Grow the ring.
nring := make([]*Name, len(q.ring)*2)
// Copy the old elements.
part := q.ring[q.head%len(q.ring):]
if q.tail-q.head <= len(part) {
part = part[:q.tail-q.head]
copy(nring, part)
} else {
pos := copy(nring, part)
copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
}
q.ring, q.head, q.tail = nring, 0, q.tail-q.head
}
q.ring[q.tail%len(q.ring)] = n
q.tail++
}
// PopLeft pops a Name from the left of the queue. It panics if q is
// empty.
func (q *NameQueue) PopLeft() *Name {
if q.Empty() {
panic("dequeue empty")
}
n := q.ring[q.head%len(q.ring)]
q.head++
return n
}
// NameSet is a set of Names.
type NameSet map[*Name]struct{}
// Has reports whether s contains n.
func (s NameSet) Has(n *Name) bool {
_, isPresent := s[n]
return isPresent
}
// Add adds n to s.
func (s *NameSet) Add(n *Name) {
if *s == nil {
*s = make(map[*Name]struct{})
}
(*s)[n] = struct{}{}
}
// Sorted returns s sorted according to less.
func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name {
var res []*Name
for n := range s {
res = append(res, n)
}
sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
return res
}
type PragmaFlag int16
const (
// Func pragmas.
Nointerface PragmaFlag = 1 << iota
Noescape // func parameters don't escape
Norace // func must not have race detector annotations
Nosplit // func should not execute on separate stack
Noinline // func should not be inlined
NoCheckPtr // func should not be instrumented by checkptr
CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
UintptrEscapes // pointers converted to uintptr escape
// Runtime-only func pragmas.
// See ../../../../runtime/README.md for detailed descriptions.
Systemstack // func must run on system stack
Nowritebarrier // emit compiler error instead of write barrier
Nowritebarrierrec // error on write barrier in this or recursive callees
Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
// Runtime and cgo type pragmas
NotInHeap // values of this type must not be heap allocated
// Go command pragmas
GoBuildPragma
RegisterParams // TODO remove after register abi is working
)
func AsNode(n types.Object) Node {
if n == nil {
return nil
}
return n.(Node)
}
var BlankNode Node
func IsConst(n Node, ct constant.Kind) bool {
return ConstType(n) == ct
}
// isNil reports whether n represents the universal untyped zero value "nil".
func IsNil(n Node) bool {
// Check n.Orig because constant propagation may produce typed nil constants,
// which don't exist in the Go spec.
return n != nil && Orig(n).Op() == ONIL
}
func IsBlank(n Node) bool {
if n == nil {
return false
}
return n.Sym().IsBlank()
}
// IsMethod reports whether n is a method.
// n must be a function or a method.
func IsMethod(n Node) bool {
return n.Type().Recv() != nil
}
func HasNamedResults(fn *Func) bool {
typ := fn.Type()
return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
}
// HasUniquePos reports whether n has a unique position that can be
// used for reporting error messages.
//
// It's primarily used to distinguish references to named objects,
// whose Pos will point back to their declaration position rather than
// their usage position.
func HasUniquePos(n Node) bool {
switch n.Op() {
case ONAME, OPACK:
return false
case OLITERAL, ONIL, OTYPE:
if n.Sym() != nil {
return false
}
}
if !n.Pos().IsKnown() {
if base.Flag.K != 0 {
base.Warn("setlineno: unknown position (line 0)")
}
return false
}
return true
}
func SetPos(n Node) src.XPos {
lno := base.Pos
if n != nil && HasUniquePos(n) {
base.Pos = n.Pos()
}
return lno
}
// The result of InitExpr MUST be assigned back to n, e.g.
// n.Left = InitExpr(init, n.Left)
func InitExpr(init []Node, expr Node) Node {
if len(init) == 0 {
return expr
}
n, ok := expr.(InitNode)
if !ok || MayBeShared(n) {
// Introduce OCONVNOP to hold init list.
n = NewConvExpr(base.Pos, OCONVNOP, nil, expr)
n.SetType(expr.Type())
n.SetTypecheck(1)
}
n.PtrInit().Prepend(init...)
return n
}
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
func OuterValue(n Node) Node {
for {
switch nn := n; nn.Op() {
case OXDOT:
base.Fatalf("OXDOT in walk")
case ODOT:
nn := nn.(*SelectorExpr)
n = nn.X
continue
case OPAREN:
nn := nn.(*ParenExpr)
n = nn.X
continue
case OCONVNOP:
nn := nn.(*ConvExpr)
n = nn.X
continue
case OINDEX:
nn := nn.(*IndexExpr)
if nn.X.Type() == nil {
base.Fatalf("OuterValue needs type for %v", nn.X)
}
if nn.X.Type().IsArray() {
n = nn.X
continue
}
}
return n
}
}
const (
EscUnknown = iota
EscNone // Does not escape to heap, result, or parameters.
EscHeap // Reachable from the heap
EscNever // By construction will not escape.
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,174 @@
// Code generated by "stringer -type=Op -trimprefix=O node.go"; DO NOT EDIT.
package ir
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[OXXX-0]
_ = x[ONAME-1]
_ = x[ONONAME-2]
_ = x[OTYPE-3]
_ = x[OPACK-4]
_ = x[OLITERAL-5]
_ = x[ONIL-6]
_ = x[OADD-7]
_ = x[OSUB-8]
_ = x[OOR-9]
_ = x[OXOR-10]
_ = x[OADDSTR-11]
_ = x[OADDR-12]
_ = x[OANDAND-13]
_ = x[OAPPEND-14]
_ = x[OBYTES2STR-15]
_ = x[OBYTES2STRTMP-16]
_ = x[ORUNES2STR-17]
_ = x[OSTR2BYTES-18]
_ = x[OSTR2BYTESTMP-19]
_ = x[OSTR2RUNES-20]
_ = x[OAS-21]
_ = x[OAS2-22]
_ = x[OAS2DOTTYPE-23]
_ = x[OAS2FUNC-24]
_ = x[OAS2MAPR-25]
_ = x[OAS2RECV-26]
_ = x[OASOP-27]
_ = x[OCALL-28]
_ = x[OCALLFUNC-29]
_ = x[OCALLMETH-30]
_ = x[OCALLINTER-31]
_ = x[OCALLPART-32]
_ = x[OCAP-33]
_ = x[OCLOSE-34]
_ = x[OCLOSURE-35]
_ = x[OCOMPLIT-36]
_ = x[OMAPLIT-37]
_ = x[OSTRUCTLIT-38]
_ = x[OARRAYLIT-39]
_ = x[OSLICELIT-40]
_ = x[OPTRLIT-41]
_ = x[OCONV-42]
_ = x[OCONVIFACE-43]
_ = x[OCONVNOP-44]
_ = x[OCOPY-45]
_ = x[ODCL-46]
_ = x[ODCLFUNC-47]
_ = x[ODCLCONST-48]
_ = x[ODCLTYPE-49]
_ = x[ODELETE-50]
_ = x[ODOT-51]
_ = x[ODOTPTR-52]
_ = x[ODOTMETH-53]
_ = x[ODOTINTER-54]
_ = x[OXDOT-55]
_ = x[ODOTTYPE-56]
_ = x[ODOTTYPE2-57]
_ = x[OEQ-58]
_ = x[ONE-59]
_ = x[OLT-60]
_ = x[OLE-61]
_ = x[OGE-62]
_ = x[OGT-63]
_ = x[ODEREF-64]
_ = x[OINDEX-65]
_ = x[OINDEXMAP-66]
_ = x[OKEY-67]
_ = x[OSTRUCTKEY-68]
_ = x[OLEN-69]
_ = x[OMAKE-70]
_ = x[OMAKECHAN-71]
_ = x[OMAKEMAP-72]
_ = x[OMAKESLICE-73]
_ = x[OMAKESLICECOPY-74]
_ = x[OMUL-75]
_ = x[ODIV-76]
_ = x[OMOD-77]
_ = x[OLSH-78]
_ = x[ORSH-79]
_ = x[OAND-80]
_ = x[OANDNOT-81]
_ = x[ONEW-82]
_ = x[ONOT-83]
_ = x[OBITNOT-84]
_ = x[OPLUS-85]
_ = x[ONEG-86]
_ = x[OOROR-87]
_ = x[OPANIC-88]
_ = x[OPRINT-89]
_ = x[OPRINTN-90]
_ = x[OPAREN-91]
_ = x[OSEND-92]
_ = x[OSLICE-93]
_ = x[OSLICEARR-94]
_ = x[OSLICESTR-95]
_ = x[OSLICE3-96]
_ = x[OSLICE3ARR-97]
_ = x[OSLICEHEADER-98]
_ = x[ORECOVER-99]
_ = x[ORECV-100]
_ = x[ORUNESTR-101]
_ = x[OSELRECV2-102]
_ = x[OIOTA-103]
_ = x[OREAL-104]
_ = x[OIMAG-105]
_ = x[OCOMPLEX-106]
_ = x[OALIGNOF-107]
_ = x[OOFFSETOF-108]
_ = x[OSIZEOF-109]
_ = x[OMETHEXPR-110]
_ = x[OSTMTEXPR-111]
_ = x[OBLOCK-112]
_ = x[OBREAK-113]
_ = x[OCASE-114]
_ = x[OCONTINUE-115]
_ = x[ODEFER-116]
_ = x[OFALL-117]
_ = x[OFOR-118]
_ = x[OFORUNTIL-119]
_ = x[OGOTO-120]
_ = x[OIF-121]
_ = x[OLABEL-122]
_ = x[OGO-123]
_ = x[ORANGE-124]
_ = x[ORETURN-125]
_ = x[OSELECT-126]
_ = x[OSWITCH-127]
_ = x[OTYPESW-128]
_ = x[OTCHAN-129]
_ = x[OTMAP-130]
_ = x[OTSTRUCT-131]
_ = x[OTINTER-132]
_ = x[OTFUNC-133]
_ = x[OTARRAY-134]
_ = x[OTSLICE-135]
_ = x[OINLCALL-136]
_ = x[OEFACE-137]
_ = x[OITAB-138]
_ = x[OIDATA-139]
_ = x[OSPTR-140]
_ = x[OCFUNC-141]
_ = x[OCHECKNIL-142]
_ = x[OVARDEF-143]
_ = x[OVARKILL-144]
_ = x[OVARLIVE-145]
_ = x[ORESULT-146]
_ = x[OINLMARK-147]
_ = x[OLINKSYMOFFSET-148]
_ = x[OTAILCALL-149]
_ = x[OGETG-150]
_ = x[OEND-151]
}
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 852, 860, 864, 867}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Op_name[_Op_index[i]:_Op_index[i+1]]
}

View file

@ -0,0 +1,35 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import "cmd/compile/internal/types"
// A Package holds information about the package being compiled.
type Package struct {
// Imports, listed in source order.
// See golang.org/issue/31636.
Imports []*types.Pkg
// Init functions, listed in source order.
Inits []*Func
// Top-level declarations.
Decls []Node
// Extern (package global) declarations.
Externs []Node
// Assembly function declarations.
Asms []*Name
// Cgo directives.
CgoPragmas [][]string
// Variables with //go:embed lines.
Embeds []*Name
// Exported (or re-exported) symbols.
Exports []*Name
}

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ir
// Strongly connected components.
//
@ -30,13 +30,13 @@ package gc
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
analyze func([]*Node, bool)
analyze func([]*Func, bool)
visitgen uint32
nodeID map[*Node]uint32
stack []*Node
nodeID map[*Func]uint32
stack []*Func
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
// It calls analyze with successive groups of functions, working from
// the bottom of the call graph upward. Each time analyze is called with
// a list of functions, every function on that list only calls other functions
@ -49,18 +49,21 @@ type bottomUpVisitor struct {
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) {
func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
v.nodeID = make(map[*Node]uint32)
v.nodeID = make(map[*Func]uint32)
for _, n := range list {
if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() {
v.visit(n)
if n.Op() == ODCLFUNC {
n := n.(*Func)
if !n.IsHiddenClosure() {
v.visit(n)
}
}
}
}
func (v *bottomUpVisitor) visit(n *Node) uint32 {
func (v *bottomUpVisitor) visit(n *Func) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
@ -73,42 +76,31 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 {
min := v.visitgen
v.stack = append(v.stack, n)
inspectList(n.Nbody, func(n *Node) bool {
switch n.Op {
case ONAME:
if n.Class() == PFUNC {
if n.isMethodExpression() {
n = asNode(n.Type.Nname())
}
if n != nil && n.Name.Defn != nil {
if m := v.visit(n.Name.Defn); m < min {
min = m
}
}
}
case ODOTMETH:
fn := asNode(n.Type.Nname())
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
if m := v.visit(fn.Name.Defn); m < min {
min = m
}
}
case OCALLPART:
fn := asNode(callpartMethod(n).Type.Nname())
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
if m := v.visit(fn.Name.Defn); m < min {
min = m
}
}
case OCLOSURE:
if m := v.visit(n.Func.Closure); m < min {
do := func(defn Node) {
if defn != nil {
if m := v.visit(defn.(*Func)); m < min {
min = m
}
}
return true
}
Visit(n, func(n Node) {
switch n.Op() {
case ONAME:
if n := n.(*Name); n.Class == PFUNC {
do(n.Defn)
}
case ODOTMETH, OCALLPART, OMETHEXPR:
if fn := MethodExprName(n); fn != nil {
do(fn.Defn)
}
case OCLOSURE:
n := n.(*ClosureExpr)
do(n.Func)
}
})
if (min == id || min == id+1) && !n.Func.IsHiddenClosure() {
if (min == id || min == id+1) && !n.IsHiddenClosure() {
// This node is the root of a strongly connected component.
// The original min passed to visitcodelist was v.nodeID[n]+1.

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ir
import (
"reflect"
@ -20,10 +20,8 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
{Func{}, 124, 224},
{Name{}, 32, 56},
{Param{}, 24, 48},
{Node{}, 76, 128},
{Func{}, 188, 328},
{Name{}, 112, 200},
}
for _, tt := range tests {

View file

@ -0,0 +1,414 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// A Decl is a declaration of a const, type, or var. (A declared func is a Func.)
type Decl struct {
miniNode
X *Name // the thing being declared
}
func NewDecl(pos src.XPos, op Op, x *Name) *Decl {
n := &Decl{X: x}
n.pos = pos
switch op {
default:
panic("invalid Decl op " + op.String())
case ODCL, ODCLCONST, ODCLTYPE:
n.op = op
}
return n
}
func (*Decl) isStmt() {}
// A Stmt is a Node that can appear as a statement.
// This includes statement-like expressions such as f().
//
// (It's possible it should include <-c, but that would require
// splitting ORECV out of UnaryExpr, which hasn't yet been
// necessary. Maybe instead we will introduce ExprStmt at
// some point.)
type Stmt interface {
Node
isStmt()
}
// A miniStmt is a miniNode with extra fields common to statements.
type miniStmt struct {
miniNode
init Nodes
}
func (*miniStmt) isStmt() {}
func (n *miniStmt) Init() Nodes { return n.init }
func (n *miniStmt) SetInit(x Nodes) { n.init = x }
func (n *miniStmt) PtrInit() *Nodes { return &n.init }
// An AssignListStmt is an assignment statement with
// more than one item on at least one side: Lhs = Rhs.
// If Def is true, the assignment is a :=.
type AssignListStmt struct {
miniStmt
Lhs Nodes
Def bool
Rhs Nodes
}
func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt {
n := &AssignListStmt{}
n.pos = pos
n.SetOp(op)
n.Lhs = lhs
n.Rhs = rhs
return n
}
func (n *AssignListStmt) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
n.op = op
}
}
// An AssignStmt is a simple assignment statement: X = Y.
// If Def is true, the assignment is a :=.
type AssignStmt struct {
miniStmt
X Node
Def bool
Y Node
}
func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt {
n := &AssignStmt{X: x, Y: y}
n.pos = pos
n.op = OAS
return n
}
func (n *AssignStmt) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OAS:
n.op = op
}
}
// An AssignOpStmt is an AsOp= assignment statement: X AsOp= Y.
type AssignOpStmt struct {
miniStmt
X Node
AsOp Op // OADD etc
Y Node
IncDec bool // actually ++ or --
}
func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt {
n := &AssignOpStmt{AsOp: asOp, X: x, Y: y}
n.pos = pos
n.op = OASOP
return n
}
// A BlockStmt is a block: { List }.
type BlockStmt struct {
miniStmt
List Nodes
}
func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
n := &BlockStmt{}
n.pos = pos
if !pos.IsKnown() {
n.pos = base.Pos
if len(list) > 0 {
n.pos = list[0].Pos()
}
}
n.op = OBLOCK
n.List = list
return n
}
// A BranchStmt is a break, continue, fallthrough, or goto statement.
type BranchStmt struct {
miniStmt
Label *types.Sym // label if present
}
func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt {
switch op {
case OBREAK, OCONTINUE, OFALL, OGOTO:
// ok
default:
panic("NewBranch " + op.String())
}
n := &BranchStmt{Label: label}
n.pos = pos
n.op = op
return n
}
func (n *BranchStmt) Sym() *types.Sym { return n.Label }
// A CaseClause is a case statement in a switch or select: case List: Body.
type CaseClause struct {
miniStmt
Var *Name // declared variable for this case in type switch
List Nodes // list of expressions for switch, early select
Body Nodes
}
func NewCaseStmt(pos src.XPos, list, body []Node) *CaseClause {
n := &CaseClause{List: list, Body: body}
n.pos = pos
n.op = OCASE
return n
}
type CommClause struct {
miniStmt
Comm Node // communication case
Body Nodes
}
func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommClause {
n := &CommClause{Comm: comm, Body: body}
n.pos = pos
n.op = OCASE
return n
}
// A ForStmt is a non-range for loop: for Init; Cond; Post { Body }
// Op can be OFOR or OFORUNTIL (!Cond).
type ForStmt struct {
miniStmt
Label *types.Sym
Cond Node
Late Nodes
Post Node
Body Nodes
HasBreak bool
}
func NewForStmt(pos src.XPos, init Node, cond, post Node, body []Node) *ForStmt {
n := &ForStmt{Cond: cond, Post: post}
n.pos = pos
n.op = OFOR
if init != nil {
n.init = []Node{init}
}
n.Body = body
return n
}
func (n *ForStmt) SetOp(op Op) {
if op != OFOR && op != OFORUNTIL {
panic(n.no("SetOp " + op.String()))
}
n.op = op
}
// A GoDeferStmt is a go or defer statement: go Call / defer Call.
//
// The two opcodes use a signle syntax because the implementations
// are very similar: both are concerned with saving Call and running it
// in a different context (a separate goroutine or a later time).
type GoDeferStmt struct {
miniStmt
Call Node
}
func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt {
n := &GoDeferStmt{Call: call}
n.pos = pos
switch op {
case ODEFER, OGO:
n.op = op
default:
panic("NewGoDeferStmt " + op.String())
}
return n
}
// A IfStmt is a return statement: if Init; Cond { Then } else { Else }.
type IfStmt struct {
miniStmt
Cond Node
Body Nodes
Else Nodes
Likely bool // code layout hint
}
func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
n := &IfStmt{Cond: cond}
n.pos = pos
n.op = OIF
n.Body = body
n.Else = els
return n
}
// An InlineMarkStmt is a marker placed just before an inlined body.
type InlineMarkStmt struct {
miniStmt
Index int64
}
func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt {
n := &InlineMarkStmt{Index: index}
n.pos = pos
n.op = OINLMARK
return n
}
func (n *InlineMarkStmt) Offset() int64 { return n.Index }
func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x }
// A LabelStmt is a label statement (just the label, not including the statement it labels).
type LabelStmt struct {
miniStmt
Label *types.Sym // "Label:"
}
func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt {
n := &LabelStmt{Label: label}
n.pos = pos
n.op = OLABEL
return n
}
func (n *LabelStmt) Sym() *types.Sym { return n.Label }
// A RangeStmt is a range loop: for Key, Value = range X { Body }
type RangeStmt struct {
miniStmt
Label *types.Sym
Def bool
X Node
Key Node
Value Node
Body Nodes
HasBreak bool
Prealloc *Name
}
func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt {
n := &RangeStmt{X: x, Key: key, Value: value}
n.pos = pos
n.op = ORANGE
n.Body = body
return n
}
// A ReturnStmt is a return statement.
type ReturnStmt struct {
miniStmt
origNode // for typecheckargs rewrite
Results Nodes // return list
}
func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt {
n := &ReturnStmt{}
n.pos = pos
n.op = ORETURN
n.orig = n
n.Results = results
return n
}
// A SelectStmt is a block: { Cases }.
type SelectStmt struct {
miniStmt
Label *types.Sym
Cases []*CommClause
HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
Compiled Nodes // compiled form, after walkSwitch
}
func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt {
n := &SelectStmt{Cases: cases}
n.pos = pos
n.op = OSELECT
return n
}
// A SendStmt is a send statement: X <- Y.
type SendStmt struct {
miniStmt
Chan Node
Value Node
}
func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt {
n := &SendStmt{Chan: ch, Value: value}
n.pos = pos
n.op = OSEND
return n
}
// A SwitchStmt is a switch statement: switch Init; Expr { Cases }.
type SwitchStmt struct {
miniStmt
Tag Node
Cases []*CaseClause
Label *types.Sym
HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
Compiled Nodes // compiled form, after walkSwitch
}
func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
n := &SwitchStmt{Tag: tag, Cases: cases}
n.pos = pos
n.op = OSWITCH
return n
}
// A TailCallStmt is a tail call statement, which is used for back-end
// code generation to jump directly to another function entirely.
type TailCallStmt struct {
miniStmt
Target *Name
}
func NewTailCallStmt(pos src.XPos, target *Name) *TailCallStmt {
if target.Op() != ONAME || target.Class != PFUNC {
base.FatalfAt(pos, "tail call to non-func %v", target)
}
n := &TailCallStmt{Target: target}
n.pos = pos
n.op = OTAILCALL
return n
}
// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
type TypeSwitchGuard struct {
miniNode
Tag *Ident
X Node
Used bool
}
func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard {
n := &TypeSwitchGuard{Tag: tag, X: x}
n.pos = pos
n.op = OTYPESW
return n
}

View file

@ -0,0 +1,72 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
)
// Syms holds known symbols.
var Syms struct {
AssertE2I *obj.LSym
AssertE2I2 *obj.LSym
AssertI2I *obj.LSym
AssertI2I2 *obj.LSym
Deferproc *obj.LSym
DeferprocStack *obj.LSym
Deferreturn *obj.LSym
Duffcopy *obj.LSym
Duffzero *obj.LSym
GCWriteBarrier *obj.LSym
Goschedguarded *obj.LSym
Growslice *obj.LSym
Msanread *obj.LSym
Msanwrite *obj.LSym
Msanmove *obj.LSym
Newobject *obj.LSym
Newproc *obj.LSym
Panicdivide *obj.LSym
Panicshift *obj.LSym
PanicdottypeE *obj.LSym
PanicdottypeI *obj.LSym
Panicnildottype *obj.LSym
Panicoverflow *obj.LSym
Raceread *obj.LSym
Racereadrange *obj.LSym
Racewrite *obj.LSym
Racewriterange *obj.LSym
// Wasm
SigPanic *obj.LSym
Staticuint64s *obj.LSym
Typedmemclr *obj.LSym
Typedmemmove *obj.LSym
Udiv *obj.LSym
WriteBarrier *obj.LSym
Zerobase *obj.LSym
ARM64HasATOMICS *obj.LSym
ARMHasVFPv4 *obj.LSym
X86HasFMA *obj.LSym
X86HasPOPCNT *obj.LSym
X86HasSSE41 *obj.LSym
// Wasm
WasmDiv *obj.LSym
// Wasm
WasmMove *obj.LSym
// Wasm
WasmZero *obj.LSym
// Wasm
WasmTruncS *obj.LSym
// Wasm
WasmTruncU *obj.LSym
}
// Pkgs holds known packages.
var Pkgs struct {
Go *types.Pkg
Itab *types.Pkg
Runtime *types.Pkg
Unsafe *types.Pkg
}

View file

@ -0,0 +1,310 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
)
// Nodes that represent the syntax of a type before type-checking.
// After type-checking, they serve only as shells around a *types.Type.
// Calling TypeNode converts a *types.Type to a Node shell.
// An Ntype is a Node that syntactically looks like a type.
// It can be the raw syntax for a type before typechecking,
// or it can be an OTYPE with Type() set to a *types.Type.
// Note that syntax doesn't guarantee it's a type: an expression
// like *fmt is an Ntype (we don't know whether names are types yet),
// but at least 1+1 is not an Ntype.
type Ntype interface {
Node
CanBeNtype()
}
// A miniType is a minimal type syntax Node implementation,
// to be embedded as the first field in a larger node implementation.
type miniType struct {
miniNode
typ *types.Type
}
func (*miniType) CanBeNtype() {}
func (n *miniType) Type() *types.Type { return n.typ }
// setOTYPE changes n to be an OTYPE node returning t.
// Rewriting the node in place this way should not be strictly
// necessary (we should be able to update the uses with
// proper OTYPE nodes), but it's mostly harmless and easy
// to keep doing for now.
//
// setOTYPE also records t.Nod = self if t.Nod is not already set.
// (Some types are shared by multiple OTYPE nodes, so only
// the first such node is used as t.Nod.)
func (n *miniType) setOTYPE(t *types.Type, self Ntype) {
if n.typ != nil {
panic(n.op.String() + " SetType: type already set")
}
n.op = OTYPE
n.typ = t
t.SetNod(self)
}
func (n *miniType) Sym() *types.Sym { return nil } // for Format OTYPE
func (n *miniType) Implicit() bool { return false } // for Format OTYPE
// A ChanType represents a chan Elem syntax with the direction Dir.
type ChanType struct {
miniType
Elem Ntype
Dir types.ChanDir
}
func NewChanType(pos src.XPos, elem Ntype, dir types.ChanDir) *ChanType {
n := &ChanType{Elem: elem, Dir: dir}
n.op = OTCHAN
n.pos = pos
return n
}
func (n *ChanType) SetOTYPE(t *types.Type) {
n.setOTYPE(t, n)
n.Elem = nil
}
// A MapType represents a map[Key]Value type syntax.
type MapType struct {
miniType
Key Ntype
Elem Ntype
}
func NewMapType(pos src.XPos, key, elem Ntype) *MapType {
n := &MapType{Key: key, Elem: elem}
n.op = OTMAP
n.pos = pos
return n
}
func (n *MapType) SetOTYPE(t *types.Type) {
n.setOTYPE(t, n)
n.Key = nil
n.Elem = nil
}
// A StructType represents a struct { ... } type syntax.
type StructType struct {
miniType
Fields []*Field
}
func NewStructType(pos src.XPos, fields []*Field) *StructType {
n := &StructType{Fields: fields}
n.op = OTSTRUCT
n.pos = pos
return n
}
func (n *StructType) SetOTYPE(t *types.Type) {
n.setOTYPE(t, n)
n.Fields = nil
}
// An InterfaceType represents a struct { ... } type syntax.
type InterfaceType struct {
miniType
Methods []*Field
}
func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType {
n := &InterfaceType{Methods: methods}
n.op = OTINTER
n.pos = pos
return n
}
func (n *InterfaceType) SetOTYPE(t *types.Type) {
n.setOTYPE(t, n)
n.Methods = nil
}
// A FuncType represents a func(Args) Results type syntax.
type FuncType struct {
miniType
Recv *Field
Params []*Field
Results []*Field
}
func NewFuncType(pos src.XPos, rcvr *Field, args, results []*Field) *FuncType {
n := &FuncType{Recv: rcvr, Params: args, Results: results}
n.op = OTFUNC
n.pos = pos
return n
}
func (n *FuncType) SetOTYPE(t *types.Type) {
n.setOTYPE(t, n)
n.Recv = nil
n.Params = nil
n.Results = nil
}
// A Field is a declared struct field, interface method, or function argument.
// It is not a Node.
type Field struct {
Pos src.XPos
Sym *types.Sym
Ntype Ntype
Type *types.Type
Embedded bool
IsDDD bool
Note string
Decl *Name
}
func NewField(pos src.XPos, sym *types.Sym, ntyp Ntype, typ *types.Type) *Field {
return &Field{Pos: pos, Sym: sym, Ntype: ntyp, Type: typ}
}
func (f *Field) String() string {
var typ string
if f.Type != nil {
typ = fmt.Sprint(f.Type)
} else {
typ = fmt.Sprint(f.Ntype)
}
if f.Sym != nil {
return fmt.Sprintf("%v %v", f.Sym, typ)
}
return typ
}
// TODO(mdempsky): Make Field a Node again so these can be generated?
// Fields are Nodes in go/ast and cmd/compile/internal/syntax.
func copyField(f *Field) *Field {
if f == nil {
return nil
}
c := *f
return &c
}
func doField(f *Field, do func(Node) bool) bool {
if f == nil {
return false
}
if f.Decl != nil && do(f.Decl) {
return true
}
if f.Ntype != nil && do(f.Ntype) {
return true
}
return false
}
func editField(f *Field, edit func(Node) Node) {
if f == nil {
return
}
if f.Decl != nil {
f.Decl = edit(f.Decl).(*Name)
}
if f.Ntype != nil {
f.Ntype = edit(f.Ntype).(Ntype)
}
}
func copyFields(list []*Field) []*Field {
out := make([]*Field, len(list))
for i, f := range list {
out[i] = copyField(f)
}
return out
}
func doFields(list []*Field, do func(Node) bool) bool {
for _, x := range list {
if doField(x, do) {
return true
}
}
return false
}
func editFields(list []*Field, edit func(Node) Node) {
for _, f := range list {
editField(f, edit)
}
}
// A SliceType represents a []Elem type syntax.
// If DDD is true, it's the ...Elem at the end of a function list.
type SliceType struct {
miniType
Elem Ntype
DDD bool
}
func NewSliceType(pos src.XPos, elem Ntype) *SliceType {
n := &SliceType{Elem: elem}
n.op = OTSLICE
n.pos = pos
return n
}
func (n *SliceType) SetOTYPE(t *types.Type) {
n.setOTYPE(t, n)
n.Elem = nil
}
// An ArrayType represents a [Len]Elem type syntax.
// If Len is nil, the type is a [...]Elem in an array literal.
type ArrayType struct {
miniType
Len Node
Elem Ntype
}
func NewArrayType(pos src.XPos, len Node, elem Ntype) *ArrayType {
n := &ArrayType{Len: len, Elem: elem}
n.op = OTARRAY
n.pos = pos
return n
}
func (n *ArrayType) SetOTYPE(t *types.Type) {
n.setOTYPE(t, n)
n.Len = nil
n.Elem = nil
}
// A typeNode is a Node wrapper for type t.
type typeNode struct {
miniNode
typ *types.Type
}
func newTypeNode(pos src.XPos, typ *types.Type) *typeNode {
n := &typeNode{typ: typ}
n.pos = pos
n.op = OTYPE
return n
}
func (n *typeNode) Type() *types.Type { return n.typ }
func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() }
func (n *typeNode) CanBeNtype() {}
// TypeNode returns the Node representing the type t.
func TypeNode(t *types.Type) Ntype {
if n := t.Obj(); n != nil {
if n.Type() != t {
base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
}
return n.(Ntype)
}
return newTypeNode(src.NoXPos, t)
}

View file

@ -0,0 +1,171 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"go/constant"
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
)
func ConstType(n Node) constant.Kind {
if n == nil || n.Op() != OLITERAL {
return constant.Unknown
}
return n.Val().Kind()
}
// ValueInterface returns the constant value stored in n as an interface{}.
// It returns int64s for ints and runes, float64s for floats,
// and complex128s for complex values.
func ConstValue(n Node) interface{} {
switch v := n.Val(); v.Kind() {
default:
base.Fatalf("unexpected constant: %v", v)
panic("unreachable")
case constant.Bool:
return constant.BoolVal(v)
case constant.String:
return constant.StringVal(v)
case constant.Int:
return IntVal(n.Type(), v)
case constant.Float:
return Float64Val(v)
case constant.Complex:
return complex(Float64Val(constant.Real(v)), Float64Val(constant.Imag(v)))
}
}
// int64Val returns v converted to int64.
// Note: if t is uint64, very large values will be converted to negative int64.
func IntVal(t *types.Type, v constant.Value) int64 {
if t.IsUnsigned() {
if x, ok := constant.Uint64Val(v); ok {
return int64(x)
}
} else {
if x, ok := constant.Int64Val(v); ok {
return x
}
}
base.Fatalf("%v out of range for %v", v, t)
panic("unreachable")
}
func Float64Val(v constant.Value) float64 {
if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) {
return x + 0 // avoid -0 (should not be needed, but be conservative)
}
base.Fatalf("bad float64 value: %v", v)
panic("unreachable")
}
func AssertValidTypeForConst(t *types.Type, v constant.Value) {
if !ValidTypeForConst(t, v) {
base.Fatalf("%v does not represent %v", t, v)
}
}
func ValidTypeForConst(t *types.Type, v constant.Value) bool {
switch v.Kind() {
case constant.Unknown:
return OKForConst[t.Kind()]
case constant.Bool:
return t.IsBoolean()
case constant.String:
return t.IsString()
case constant.Int:
return t.IsInteger()
case constant.Float:
return t.IsFloat()
case constant.Complex:
return t.IsComplex()
}
base.Fatalf("unexpected constant kind: %v", v)
panic("unreachable")
}
// nodlit returns a new untyped constant with value v.
func NewLiteral(v constant.Value) Node {
return NewBasicLit(base.Pos, v)
}
func idealType(ct constant.Kind) *types.Type {
switch ct {
case constant.String:
return types.UntypedString
case constant.Bool:
return types.UntypedBool
case constant.Int:
return types.UntypedInt
case constant.Float:
return types.UntypedFloat
case constant.Complex:
return types.UntypedComplex
}
base.Fatalf("unexpected Ctype: %v", ct)
return nil
}
var OKForConst [types.NTYPE]bool
// CanInt64 reports whether it is safe to call Int64Val() on n.
func CanInt64(n Node) bool {
if !IsConst(n, constant.Int) {
return false
}
// if the value inside n cannot be represented as an int64, the
// return value of Int64 is undefined
_, ok := constant.Int64Val(n.Val())
return ok
}
// Int64Val returns n as an int64.
// n must be an integer or rune constant.
func Int64Val(n Node) int64 {
if !IsConst(n, constant.Int) {
base.Fatalf("Int64Val(%v)", n)
}
x, ok := constant.Int64Val(n.Val())
if !ok {
base.Fatalf("Int64Val(%v)", n)
}
return x
}
// Uint64Val returns n as an uint64.
// n must be an integer or rune constant.
func Uint64Val(n Node) uint64 {
if !IsConst(n, constant.Int) {
base.Fatalf("Uint64Val(%v)", n)
}
x, ok := constant.Uint64Val(n.Val())
if !ok {
base.Fatalf("Uint64Val(%v)", n)
}
return x
}
// BoolVal returns n as a bool.
// n must be a boolean constant.
func BoolVal(n Node) bool {
if !IsConst(n, constant.Bool) {
base.Fatalf("BoolVal(%v)", n)
}
return constant.BoolVal(n.Val())
}
// StringVal returns the value of a literal string Node as a string.
// n must be a string constant.
func StringVal(n Node) string {
if !IsConst(n, constant.String) {
base.Fatalf("StringVal(%v)", n)
}
return constant.StringVal(n.Val())
}

View file

@ -0,0 +1,186 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// IR visitors for walking the IR tree.
//
// The lowest level helpers are DoChildren and EditChildren, which
// nodes help implement and provide control over whether and when
// recursion happens during the walk of the IR.
//
// Although these are both useful directly, two simpler patterns
// are fairly common and also provided: Visit and Any.
package ir
// DoChildren calls do(x) on each of n's non-nil child nodes x.
// If any call returns true, DoChildren stops and returns true.
// Otherwise, DoChildren returns false.
//
// Note that DoChildren(n, do) only calls do(x) for n's immediate children.
// If x's children should be processed, then do(x) must call DoChildren(x, do).
//
// DoChildren allows constructing general traversals of the IR graph
// that can stop early if needed. The most general usage is:
//
// var do func(ir.Node) bool
// do = func(x ir.Node) bool {
// ... processing BEFORE visting children ...
// if ... should visit children ... {
// ir.DoChildren(x, do)
// ... processing AFTER visting children ...
// }
// if ... should stop parent DoChildren call from visiting siblings ... {
// return true
// }
// return false
// }
// do(root)
//
// Since DoChildren does not return true itself, if the do function
// never wants to stop the traversal, it can assume that DoChildren
// itself will always return false, simplifying to:
//
// var do func(ir.Node) bool
// do = func(x ir.Node) bool {
// ... processing BEFORE visting children ...
// if ... should visit children ... {
// ir.DoChildren(x, do)
// }
// ... processing AFTER visting children ...
// return false
// }
// do(root)
//
// The Visit function illustrates a further simplification of the pattern,
// only processing before visiting children and never stopping:
//
// func Visit(n ir.Node, visit func(ir.Node)) {
// if n == nil {
// return
// }
// var do func(ir.Node) bool
// do = func(x ir.Node) bool {
// visit(x)
// return ir.DoChildren(x, do)
// }
// do(n)
// }
//
// The Any function illustrates a different simplification of the pattern,
// visiting each node and then its children, recursively, until finding
// a node x for which cond(x) returns true, at which point the entire
// traversal stops and returns true.
//
// func Any(n ir.Node, cond(ir.Node) bool) bool {
// if n == nil {
// return false
// }
// var do func(ir.Node) bool
// do = func(x ir.Node) bool {
// return cond(x) || ir.DoChildren(x, do)
// }
// return do(n)
// }
//
// Visit and Any are presented above as examples of how to use
// DoChildren effectively, but of course, usage that fits within the
// simplifications captured by Visit or Any will be best served
// by directly calling the ones provided by this package.
func DoChildren(n Node, do func(Node) bool) bool {
if n == nil {
return false
}
return n.doChildren(do)
}
// Visit visits each non-nil node x in the IR tree rooted at n
// in a depth-first preorder traversal, calling visit on each node visited.
func Visit(n Node, visit func(Node)) {
if n == nil {
return
}
var do func(Node) bool
do = func(x Node) bool {
visit(x)
return DoChildren(x, do)
}
do(n)
}
// VisitList calls Visit(x, visit) for each node x in the list.
func VisitList(list Nodes, visit func(Node)) {
for _, x := range list {
Visit(x, visit)
}
}
// Any looks for a non-nil node x in the IR tree rooted at n
// for which cond(x) returns true.
// Any considers nodes in a depth-first, preorder traversal.
// When Any finds a node x such that cond(x) is true,
// Any ends the traversal and returns true immediately.
// Otherwise Any returns false after completing the entire traversal.
func Any(n Node, cond func(Node) bool) bool {
if n == nil {
return false
}
var do func(Node) bool
do = func(x Node) bool {
return cond(x) || DoChildren(x, do)
}
return do(n)
}
// AnyList calls Any(x, cond) for each node x in the list, in order.
// If any call returns true, AnyList stops and returns true.
// Otherwise, AnyList returns false after calling Any(x, cond)
// for every x in the list.
func AnyList(list Nodes, cond func(Node) bool) bool {
for _, x := range list {
if Any(x, cond) {
return true
}
}
return false
}
// EditChildren edits the child nodes of n, replacing each child x with edit(x).
//
// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children.
// If x's children should be processed, then edit(x) must call EditChildren(x, edit).
//
// EditChildren allows constructing general editing passes of the IR graph.
// The most general usage is:
//
// var edit func(ir.Node) ir.Node
// edit = func(x ir.Node) ir.Node {
// ... processing BEFORE editing children ...
// if ... should edit children ... {
// EditChildren(x, edit)
// ... processing AFTER editing children ...
// }
// ... return x ...
// }
// n = edit(n)
//
// EditChildren edits the node in place. To edit a copy, call Copy first.
// As an example, a simple deep copy implementation would be:
//
// func deepCopy(n ir.Node) ir.Node {
// var edit func(ir.Node) ir.Node
// edit = func(x ir.Node) ir.Node {
// x = ir.Copy(x)
// ir.EditChildren(x, edit)
// return x
// }
// return edit(n)
// }
//
// Of course, in this case it is better to call ir.DeepCopy than to build one anew.
func EditChildren(n Node, edit func(Node) Node) {
if n == nil {
return
}
n.editChildren(edit)
}

Some files were not shown because too many files have changed in this diff Show more