mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: add specialized malloc functions for sizes up to 512 bytes
This CL adds a generator function in runtime/_mkmalloc to generate specialized mallocgc functions for sizes up throuht 512 bytes. (That's the limit where it's possible to end up in the no header case when there are scan bits, and where the benefits of the specialized functions significantly diminish according to microbenchmarks). If the specializedmalloc GOEXPERIMENT is turned on, mallocgc will call one of these functions in the no header case. malloc_generated.go is the generated file containing the specialized malloc functions. malloc_stubs.go contains the templates that will be stamped to create the specialized malloc functions. malloc_tables_generated contains the tables that mallocgc will use to select the specialized function to call. I've had to update the two stdlib_test.go files to account for the new submodule mkmalloc is in. mprof_test accounts for the changes in the stacks since different functions can be called in some cases. I still need to investigate heapsampling.go. Change-Id: Ia0f68dccdf1c6a200554ae88657cf4d686ace819 Reviewed-on: https://go-review.googlesource.com/c/go/+/665835 Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: Michael Matloob <matloob@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
parent
d7a38adf4c
commit
411c250d64
15 changed files with 10860 additions and 59 deletions
|
|
@ -360,6 +360,7 @@ func TestStdKen(t *testing.T) {
|
|||
var excluded = map[string]bool{
|
||||
"builtin": true,
|
||||
"cmd/compile/internal/ssa/_gen": true,
|
||||
"runtime/_mkmalloc": true,
|
||||
}
|
||||
|
||||
// printPackageMu synchronizes the printing of type-checked package files in
|
||||
|
|
|
|||
|
|
@ -362,6 +362,7 @@ func TestStdKen(t *testing.T) {
|
|||
var excluded = map[string]bool{
|
||||
"builtin": true,
|
||||
"cmd/compile/internal/ssa/_gen": true,
|
||||
"runtime/_mkmalloc": true,
|
||||
}
|
||||
|
||||
// printPackageMu synchronizes the printing of type-checked package files in
|
||||
|
|
|
|||
|
|
@ -91,6 +91,8 @@ const (
|
|||
PageShift = 13
|
||||
MaxObjsPerSpan = 1024
|
||||
MaxSizeClassNPages = 10
|
||||
TinySize = 16
|
||||
TinySizeClass = 2
|
||||
)
|
||||
|
||||
var SizeClassToSize = [NumSizeClasses]uint16{0, 8, 16, 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768}
|
||||
|
|
|
|||
29
src/runtime/_mkmalloc/constants.go
Normal file
29
src/runtime/_mkmalloc/constants.go
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
const (
|
||||
// Constants that we use and will transfer to the runtime.
|
||||
minHeapAlign = 8
|
||||
maxSmallSize = 32 << 10
|
||||
smallSizeDiv = 8
|
||||
smallSizeMax = 1024
|
||||
largeSizeDiv = 128
|
||||
pageShift = 13
|
||||
tinySize = 16
|
||||
|
||||
// Derived constants.
|
||||
pageSize = 1 << pageShift
|
||||
)
|
||||
|
||||
const (
|
||||
maxPtrSize = max(4, 8)
|
||||
maxPtrBits = 8 * maxPtrSize
|
||||
|
||||
// Maximum size smallScanNoHeader would be called for, which is the
|
||||
// maximum value gc.MinSizeForMallocHeader can have on any platform.
|
||||
// gc.MinSizeForMallocHeader is defined as goarch.PtrSize * goarch.PtrBits.
|
||||
smallScanNoHeaderMax = maxPtrSize * maxPtrBits
|
||||
)
|
||||
5
src/runtime/_mkmalloc/go.mod
Normal file
5
src/runtime/_mkmalloc/go.mod
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
module runtime/_mkmalloc
|
||||
|
||||
go 1.24
|
||||
|
||||
require golang.org/x/tools v0.33.0
|
||||
2
src/runtime/_mkmalloc/go.sum
Normal file
2
src/runtime/_mkmalloc/go.sum
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
605
src/runtime/_mkmalloc/mkmalloc.go
Normal file
605
src/runtime/_mkmalloc/mkmalloc.go
Normal file
|
|
@ -0,0 +1,605 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
|
||||
internalastutil "runtime/_mkmalloc/astutil"
|
||||
)
|
||||
|
||||
var stdout = flag.Bool("stdout", false, "write sizeclasses source to stdout instead of sizeclasses.go")
|
||||
|
||||
func makeSizeToSizeClass(classes []class) []uint8 {
|
||||
sc := uint8(0)
|
||||
ret := make([]uint8, smallScanNoHeaderMax+1)
|
||||
for i := range ret {
|
||||
if i > classes[sc].size {
|
||||
sc++
|
||||
}
|
||||
ret[i] = sc
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix("mkmalloc: ")
|
||||
|
||||
classes := makeClasses()
|
||||
sizeToSizeClass := makeSizeToSizeClass(classes)
|
||||
|
||||
if *stdout {
|
||||
if _, err := os.Stdout.Write(mustFormat(generateSizeClasses(classes))); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
sizeclasesesfile := "../../internal/runtime/gc/sizeclasses.go"
|
||||
if err := os.WriteFile(sizeclasesesfile, mustFormat(generateSizeClasses(classes)), 0666); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
outfile := "../malloc_generated.go"
|
||||
if err := os.WriteFile(outfile, mustFormat(inline(specializedMallocConfig(classes, sizeToSizeClass))), 0666); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tablefile := "../malloc_tables_generated.go"
|
||||
if err := os.WriteFile(tablefile, mustFormat(generateTable(sizeToSizeClass)), 0666); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// withLineNumbers returns b with line numbers added to help debugging.
|
||||
func withLineNumbers(b []byte) []byte {
|
||||
var buf bytes.Buffer
|
||||
i := 1
|
||||
for line := range bytes.Lines(b) {
|
||||
fmt.Fprintf(&buf, "%d: %s", i, line)
|
||||
i++
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// mustFormat formats the input source, or exits if there's an error.
|
||||
func mustFormat(b []byte) []byte {
|
||||
formatted, err := format.Source(b)
|
||||
if err != nil {
|
||||
log.Fatalf("error formatting source: %v\nsource:\n%s\n", err, withLineNumbers(b))
|
||||
}
|
||||
return formatted
|
||||
}
|
||||
|
||||
// generatorConfig is the configuration for the generator. It uses the given file to find
|
||||
// its templates, and generates each of the functions specified by specs.
|
||||
type generatorConfig struct {
|
||||
file string
|
||||
specs []spec
|
||||
}
|
||||
|
||||
// spec is the specification for a function for the inliner to produce. The function gets
|
||||
// the given name, and is produced by starting with the function with the name given by
|
||||
// templateFunc and applying each of the ops.
|
||||
type spec struct {
|
||||
name string
|
||||
templateFunc string
|
||||
ops []op
|
||||
}
|
||||
|
||||
// replacementKind specifies the operation to ben done by a op.
|
||||
type replacementKind int
|
||||
|
||||
const (
|
||||
inlineFunc = replacementKind(iota)
|
||||
subBasicLit
|
||||
)
|
||||
|
||||
// op is a single inlining operation for the inliner. Any calls to the function
|
||||
// from are replaced with the inlined body of to. For non-functions, uses of from are
|
||||
// replaced with the basic literal expression given by to.
|
||||
type op struct {
|
||||
kind replacementKind
|
||||
from string
|
||||
to string
|
||||
}
|
||||
|
||||
func smallScanNoHeaderSCFuncName(sc, scMax uint8) string {
|
||||
if sc == 0 || sc > scMax {
|
||||
return "mallocPanic"
|
||||
}
|
||||
return fmt.Sprintf("mallocgcSmallScanNoHeaderSC%d", sc)
|
||||
}
|
||||
|
||||
func tinyFuncName(size uintptr) string {
|
||||
if size == 0 || size > smallScanNoHeaderMax {
|
||||
return "mallocPanic"
|
||||
}
|
||||
return fmt.Sprintf("mallocTiny%d", size)
|
||||
}
|
||||
|
||||
func smallNoScanSCFuncName(sc, scMax uint8) string {
|
||||
if sc < 2 || sc > scMax {
|
||||
return "mallocPanic"
|
||||
}
|
||||
return fmt.Sprintf("mallocgcSmallNoScanSC%d", sc)
|
||||
}
|
||||
|
||||
// specializedMallocConfig produces an inlining config to stamp out the definitions of the size-specialized
|
||||
// malloc functions to be written by mkmalloc.
|
||||
func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generatorConfig {
|
||||
config := generatorConfig{file: "../malloc_stubs.go"}
|
||||
|
||||
// Only generate specialized functions for sizes that don't have
|
||||
// a header on 64-bit platforms. (They may have a header on 32-bit, but
|
||||
// we will fall back to the non-specialized versions in that case)
|
||||
scMax := sizeToSizeClass[smallScanNoHeaderMax]
|
||||
|
||||
str := fmt.Sprint
|
||||
|
||||
// allocations with pointer bits
|
||||
{
|
||||
const noscan = 0
|
||||
for sc := uint8(0); sc <= scMax; sc++ {
|
||||
if sc == 0 {
|
||||
continue
|
||||
}
|
||||
name := smallScanNoHeaderSCFuncName(sc, scMax)
|
||||
elemsize := classes[sc].size
|
||||
config.specs = append(config.specs, spec{
|
||||
templateFunc: "mallocStub",
|
||||
name: name,
|
||||
ops: []op{
|
||||
{inlineFunc, "inlinedMalloc", "smallScanNoHeaderStub"},
|
||||
{inlineFunc, "heapSetTypeNoHeaderStub", "heapSetTypeNoHeaderStub"},
|
||||
{inlineFunc, "nextFreeFastStub", "nextFreeFastStub"},
|
||||
{inlineFunc, "writeHeapBitsSmallStub", "writeHeapBitsSmallStub"},
|
||||
{subBasicLit, "elemsize_", str(elemsize)},
|
||||
{subBasicLit, "sizeclass_", str(sc)},
|
||||
{subBasicLit, "noscanint_", str(noscan)},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// allocations without pointer bits
|
||||
{
|
||||
const noscan = 1
|
||||
|
||||
// tiny
|
||||
tinySizeClass := sizeToSizeClass[tinySize]
|
||||
for s := range uintptr(16) {
|
||||
if s == 0 {
|
||||
continue
|
||||
}
|
||||
name := tinyFuncName(s)
|
||||
elemsize := classes[tinySizeClass].size
|
||||
config.specs = append(config.specs, spec{
|
||||
templateFunc: "mallocStub",
|
||||
name: name,
|
||||
ops: []op{
|
||||
{inlineFunc, "inlinedMalloc", "tinyStub"},
|
||||
{inlineFunc, "nextFreeFastTiny", "nextFreeFastTiny"},
|
||||
{subBasicLit, "elemsize_", str(elemsize)},
|
||||
{subBasicLit, "sizeclass_", str(tinySizeClass)},
|
||||
{subBasicLit, "size_", str(s)},
|
||||
{subBasicLit, "noscanint_", str(noscan)},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// non-tiny
|
||||
for sc := uint8(tinySizeClass); sc <= scMax; sc++ {
|
||||
name := smallNoScanSCFuncName(sc, scMax)
|
||||
elemsize := classes[sc].size
|
||||
config.specs = append(config.specs, spec{
|
||||
templateFunc: "mallocStub",
|
||||
name: name,
|
||||
ops: []op{
|
||||
{inlineFunc, "inlinedMalloc", "smallNoScanStub"},
|
||||
{inlineFunc, "nextFreeFastStub", "nextFreeFastStub"},
|
||||
{subBasicLit, "elemsize_", str(elemsize)},
|
||||
{subBasicLit, "sizeclass_", str(sc)},
|
||||
{subBasicLit, "noscanint_", str(noscan)},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// inline applies the inlining operations given by the config.
|
||||
func inline(config generatorConfig) []byte {
|
||||
var out bytes.Buffer
|
||||
|
||||
// Read the template file in.
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, config.file, nil, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing %s: %v", config.file, err)
|
||||
}
|
||||
|
||||
// Collect the function and import declarations. The function
|
||||
// declarations in the template file provide both the templates
|
||||
// that will be stamped out, and the functions that will be inlined
|
||||
// into them. The imports from the template file will be copied
|
||||
// straight to the output.
|
||||
funcDecls := map[string]*ast.FuncDecl{}
|
||||
importDecls := []*ast.GenDecl{}
|
||||
for _, decl := range f.Decls {
|
||||
switch decl := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
funcDecls[decl.Name.Name] = decl
|
||||
case *ast.GenDecl:
|
||||
if decl.Tok.String() == "import" {
|
||||
importDecls = append(importDecls, decl)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write out the package and import declarations.
|
||||
out.WriteString("// Code generated by mkmalloc.go; DO NOT EDIT.\n\n")
|
||||
out.WriteString("package " + f.Name.Name + "\n\n")
|
||||
for _, importDecl := range importDecls {
|
||||
out.Write(mustFormatNode(fset, importDecl))
|
||||
out.WriteString("\n\n")
|
||||
}
|
||||
|
||||
// Produce each of the inlined functions specified by specs.
|
||||
for _, spec := range config.specs {
|
||||
// Start with a renamed copy of the template function.
|
||||
containingFuncCopy := internalastutil.CloneNode(funcDecls[spec.templateFunc])
|
||||
if containingFuncCopy == nil {
|
||||
log.Fatal("did not find", spec.templateFunc)
|
||||
}
|
||||
containingFuncCopy.Name.Name = spec.name
|
||||
|
||||
// Apply each of the ops given by the specs
|
||||
stamped := ast.Node(containingFuncCopy)
|
||||
for _, repl := range spec.ops {
|
||||
if toDecl, ok := funcDecls[repl.to]; ok {
|
||||
stamped = inlineFunction(stamped, repl.from, toDecl)
|
||||
} else {
|
||||
stamped = substituteWithBasicLit(stamped, repl.from, repl.to)
|
||||
}
|
||||
}
|
||||
|
||||
out.Write(mustFormatNode(fset, stamped))
|
||||
out.WriteString("\n\n")
|
||||
}
|
||||
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
// substituteWithBasicLit recursively renames identifiers in the provided AST
|
||||
// according to 'from' and 'to'.
|
||||
func substituteWithBasicLit(node ast.Node, from, to string) ast.Node {
|
||||
// The op is a substitution of an identifier with an basic literal.
|
||||
toExpr, err := parser.ParseExpr(to)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing expr %q: %v", to, err)
|
||||
}
|
||||
if _, ok := toExpr.(*ast.BasicLit); !ok {
|
||||
log.Fatalf("op 'to' expr %q is not a basic literal", to)
|
||||
}
|
||||
return astutil.Apply(node, func(cursor *astutil.Cursor) bool {
|
||||
if isIdentWithName(cursor.Node(), from) {
|
||||
cursor.Replace(toExpr)
|
||||
}
|
||||
return true
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// inlineFunction recursively replaces calls to the function 'from' with the body of the function
|
||||
// 'toDecl'. All calls to 'from' must appear in assignment statements.
|
||||
// The replacement is very simple: it doesn't substitute the arguments for the parameters, so the
|
||||
// arguments to the function call must be the same identifier as the parameters to the function
|
||||
// declared by 'toDecl'. If there are any calls to from where that's not the case there will be a fatal error.
|
||||
func inlineFunction(node ast.Node, from string, toDecl *ast.FuncDecl) ast.Node {
|
||||
return astutil.Apply(node, func(cursor *astutil.Cursor) bool {
|
||||
switch node := cursor.Node().(type) {
|
||||
case *ast.AssignStmt:
|
||||
// TODO(matloob) CHECK function args have same name
|
||||
// as parameters (or parameter is "_").
|
||||
if len(node.Rhs) == 1 && isCallTo(node.Rhs[0], from) {
|
||||
args := node.Rhs[0].(*ast.CallExpr).Args
|
||||
if !argsMatchParameters(args, toDecl.Type.Params) {
|
||||
log.Fatalf("applying op: arguments to %v don't match parameter names of %v: %v", from, toDecl.Name, debugPrint(args...))
|
||||
}
|
||||
replaceAssignment(cursor, node, toDecl)
|
||||
}
|
||||
return false
|
||||
case *ast.CallExpr:
|
||||
// double check that all calls to from appear within an assignment
|
||||
if isCallTo(node, from) {
|
||||
if _, ok := cursor.Parent().(*ast.AssignStmt); !ok {
|
||||
log.Fatalf("applying op: all calls to function %q being replaced must appear in an assignment statement, appears in %T", from, cursor.Parent())
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// argsMatchParameters reports whether the arguments given by args are all identifiers
|
||||
// whose names are the same as the corresponding parameters in params.
|
||||
func argsMatchParameters(args []ast.Expr, params *ast.FieldList) bool {
|
||||
var paramIdents []*ast.Ident
|
||||
for _, f := range params.List {
|
||||
paramIdents = append(paramIdents, f.Names...)
|
||||
}
|
||||
|
||||
if len(args) != len(paramIdents) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
if !isIdentWithName(args[i], paramIdents[i].Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// isIdentWithName reports whether the expression is an identifier with the given name.
|
||||
func isIdentWithName(expr ast.Node, name string) bool {
|
||||
ident, ok := expr.(*ast.Ident)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return ident.Name == name
|
||||
}
|
||||
|
||||
// isCallTo reports whether the expression is a call expression to the function with the given name.
|
||||
func isCallTo(expr ast.Expr, name string) bool {
|
||||
callexpr, ok := expr.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return isIdentWithName(callexpr.Fun, name)
|
||||
}
|
||||
|
||||
// replaceAssignment replaces an assignment statement where the right hand side is a function call
|
||||
// whose arguments have the same names as the parameters to funcdecl with the body of funcdecl.
|
||||
// It sets the left hand side of the assignment to the return values of the function.
|
||||
func replaceAssignment(cursor *astutil.Cursor, assign *ast.AssignStmt, funcdecl *ast.FuncDecl) {
|
||||
if !hasTerminatingReturn(funcdecl.Body) {
|
||||
log.Fatal("function being inlined must have a return at the end")
|
||||
}
|
||||
|
||||
body := internalastutil.CloneNode(funcdecl.Body)
|
||||
if hasTerminatingAndNonterminatingReturn(funcdecl.Body) {
|
||||
// The function has multiple return points. Add the code that we'd continue with in the caller
|
||||
// after each of the return points. The calling function must have a terminating return
|
||||
// so we don't continue execution in the replaced function after we finish executing the
|
||||
// continue block that we add.
|
||||
body = addContinues(cursor, assign, body, everythingFollowingInParent(cursor)).(*ast.BlockStmt)
|
||||
}
|
||||
|
||||
if len(body.List) < 1 {
|
||||
log.Fatal("replacing with empty bodied function")
|
||||
}
|
||||
|
||||
// The op happens in two steps: first we insert the body of the function being inlined (except for
|
||||
// the final return) before the assignment, and then we change the assignment statement to replace the function call
|
||||
// with the expressions being returned.
|
||||
|
||||
// Determine the expressions being returned.
|
||||
beforeReturn, ret := body.List[:len(body.List)-1], body.List[len(body.List)-1]
|
||||
returnStmt, ok := ret.(*ast.ReturnStmt)
|
||||
if !ok {
|
||||
log.Fatal("last stmt in function we're replacing with should be a return")
|
||||
}
|
||||
results := returnStmt.Results
|
||||
|
||||
// Insert the body up to the final return.
|
||||
for _, stmt := range beforeReturn {
|
||||
cursor.InsertBefore(stmt)
|
||||
}
|
||||
|
||||
// Rewrite the assignment statement.
|
||||
replaceWithAssignment(cursor, assign.Lhs, results, assign.Tok)
|
||||
}
|
||||
|
||||
// hasTerminatingReturn reparts whether the block ends in a return statement.
|
||||
func hasTerminatingReturn(block *ast.BlockStmt) bool {
|
||||
_, ok := block.List[len(block.List)-1].(*ast.ReturnStmt)
|
||||
return ok
|
||||
}
|
||||
|
||||
// hasTerminatingAndNonterminatingReturn reports whether the block ends in a return
|
||||
// statement, and also has a return elsewhere in it.
|
||||
func hasTerminatingAndNonterminatingReturn(block *ast.BlockStmt) bool {
|
||||
if !hasTerminatingReturn(block) {
|
||||
return false
|
||||
}
|
||||
var ret bool
|
||||
for i := range block.List[:len(block.List)-1] {
|
||||
ast.Inspect(block.List[i], func(node ast.Node) bool {
|
||||
_, ok := node.(*ast.ReturnStmt)
|
||||
if ok {
|
||||
ret = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// everythingFollowingInParent returns a block with everything in the parent block node of the cursor after
|
||||
// the cursor itself. The cursor must point to an element in a block node's list.
|
||||
func everythingFollowingInParent(cursor *astutil.Cursor) *ast.BlockStmt {
|
||||
parent := cursor.Parent()
|
||||
block, ok := parent.(*ast.BlockStmt)
|
||||
if !ok {
|
||||
log.Fatal("internal error: in everythingFollowingInParent, cursor doesn't point to element in block list")
|
||||
}
|
||||
|
||||
blockcopy := internalastutil.CloneNode(block) // get a clean copy
|
||||
blockcopy.List = blockcopy.List[cursor.Index()+1:] // and remove everything before and including stmt
|
||||
|
||||
if _, ok := blockcopy.List[len(blockcopy.List)-1].(*ast.ReturnStmt); !ok {
|
||||
log.Printf("%s", mustFormatNode(token.NewFileSet(), blockcopy))
|
||||
log.Fatal("internal error: parent doesn't end in a return")
|
||||
}
|
||||
return blockcopy
|
||||
}
|
||||
|
||||
// in the case that there's a return in the body being inlined (toBlock), addContinues
|
||||
// replaces those returns that are not at the end of the function with the code in the
|
||||
// caller after the function call that execution would continue with after the return.
|
||||
// The block being added must end in a return.
|
||||
func addContinues(cursor *astutil.Cursor, assignNode *ast.AssignStmt, toBlock *ast.BlockStmt, continueBlock *ast.BlockStmt) ast.Node {
|
||||
if !hasTerminatingReturn(continueBlock) {
|
||||
log.Fatal("the block being continued to in addContinues must end in a return")
|
||||
}
|
||||
applyFunc := func(cursor *astutil.Cursor) bool {
|
||||
ret, ok := cursor.Node().(*ast.ReturnStmt)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if cursor.Parent() == toBlock && cursor.Index() == len(toBlock.List)-1 {
|
||||
return false
|
||||
}
|
||||
|
||||
// This is the opposite of replacing a function call with the body. First
|
||||
// we replace the return statement with the assignment from the caller, and
|
||||
// then add the code we continue with.
|
||||
replaceWithAssignment(cursor, assignNode.Lhs, ret.Results, assignNode.Tok)
|
||||
cursor.InsertAfter(internalastutil.CloneNode(continueBlock))
|
||||
|
||||
return false
|
||||
}
|
||||
return astutil.Apply(toBlock, applyFunc, nil)
|
||||
}
|
||||
|
||||
// debugPrint prints out the expressions given by nodes for debugging.
|
||||
func debugPrint(nodes ...ast.Expr) string {
|
||||
var b strings.Builder
|
||||
for i, node := range nodes {
|
||||
b.Write(mustFormatNode(token.NewFileSet(), node))
|
||||
if i != len(nodes)-1 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// mustFormatNode produces the formatted Go code for the given node.
|
||||
func mustFormatNode(fset *token.FileSet, node any) []byte {
|
||||
var buf bytes.Buffer
|
||||
format.Node(&buf, fset, node)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// mustMatchExprs makes sure that the expression lists have the same length,
|
||||
// and returns the lists of the expressions on the lhs and rhs where the
|
||||
// identifiers are not the same. These are used to produce assignment statements
|
||||
// where the expressions on the right are assigned to the identifiers on the left.
|
||||
func mustMatchExprs(lhs []ast.Expr, rhs []ast.Expr) ([]ast.Expr, []ast.Expr) {
|
||||
if len(lhs) != len(rhs) {
|
||||
log.Fatal("exprs don't match", debugPrint(lhs...), debugPrint(rhs...))
|
||||
}
|
||||
|
||||
var newLhs, newRhs []ast.Expr
|
||||
for i := range lhs {
|
||||
lhsIdent, ok1 := lhs[i].(*ast.Ident)
|
||||
rhsIdent, ok2 := rhs[i].(*ast.Ident)
|
||||
if ok1 && ok2 && lhsIdent.Name == rhsIdent.Name {
|
||||
continue
|
||||
}
|
||||
newLhs = append(newLhs, lhs[i])
|
||||
newRhs = append(newRhs, rhs[i])
|
||||
}
|
||||
|
||||
return newLhs, newRhs
|
||||
}
|
||||
|
||||
// replaceWithAssignment replaces the node pointed to by the cursor with an assignment of the
|
||||
// left hand side to the righthand side, removing any redundant assignments of a variable to itself,
|
||||
// and replacing an assignment to a single basic literal with a constant declaration.
|
||||
func replaceWithAssignment(cursor *astutil.Cursor, lhs, rhs []ast.Expr, tok token.Token) {
|
||||
newLhs, newRhs := mustMatchExprs(lhs, rhs)
|
||||
if len(newLhs) == 0 {
|
||||
cursor.Delete()
|
||||
return
|
||||
}
|
||||
if len(newRhs) == 1 {
|
||||
if lit, ok := newRhs[0].(*ast.BasicLit); ok {
|
||||
constDecl := &ast.DeclStmt{
|
||||
Decl: &ast.GenDecl{
|
||||
Tok: token.CONST,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{newLhs[0].(*ast.Ident)},
|
||||
Values: []ast.Expr{lit},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cursor.Replace(constDecl)
|
||||
return
|
||||
}
|
||||
}
|
||||
newAssignment := &ast.AssignStmt{
|
||||
Lhs: newLhs,
|
||||
Rhs: newRhs,
|
||||
Tok: tok,
|
||||
}
|
||||
cursor.Replace(newAssignment)
|
||||
}
|
||||
|
||||
// generateTable generates the file with the jump tables for the specialized malloc functions.
|
||||
func generateTable(sizeToSizeClass []uint8) []byte {
|
||||
scMax := sizeToSizeClass[smallScanNoHeaderMax]
|
||||
|
||||
var b bytes.Buffer
|
||||
fmt.Fprintln(&b, `// Code generated by mkmalloc.go; DO NOT EDIT.
|
||||
//go:build !plan9
|
||||
|
||||
package runtime
|
||||
|
||||
import "unsafe"
|
||||
|
||||
var mallocScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{`)
|
||||
|
||||
for i := range uintptr(smallScanNoHeaderMax + 1) {
|
||||
fmt.Fprintf(&b, "%s,\n", smallScanNoHeaderSCFuncName(sizeToSizeClass[i], scMax))
|
||||
}
|
||||
|
||||
fmt.Fprintln(&b, `
|
||||
}
|
||||
|
||||
var mallocNoScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{`)
|
||||
for i := range uintptr(smallScanNoHeaderMax + 1) {
|
||||
if i < 16 {
|
||||
fmt.Fprintf(&b, "%s,\n", tinyFuncName(i))
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s,\n", smallNoScanSCFuncName(sizeToSizeClass[i], scMax))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(&b, `
|
||||
}`)
|
||||
|
||||
return b.Bytes()
|
||||
}
|
||||
36
src/runtime/_mkmalloc/mkmalloc_test.go
Normal file
36
src/runtime/_mkmalloc/mkmalloc_test.go
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNoChange(t *testing.T) {
|
||||
classes := makeClasses()
|
||||
sizeToSizeClass := makeSizeToSizeClass(classes)
|
||||
|
||||
outfile := "../malloc_generated.go"
|
||||
want, err := os.ReadFile(outfile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := mustFormat(inline(specializedMallocConfig(classes, sizeToSizeClass)))
|
||||
if !bytes.Equal(want, got) {
|
||||
t.Fatalf("want:\n%s\ngot:\n%s\n", withLineNumbers(want), withLineNumbers(got))
|
||||
}
|
||||
|
||||
tablefile := "../malloc_tables_generated.go"
|
||||
wanttable, err := os.ReadFile(tablefile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotTable := mustFormat(generateTable(sizeToSizeClass))
|
||||
if !bytes.Equal(wanttable, gotTable) {
|
||||
t.Fatalf("want:\n%s\ngot:\n%s\n", withLineNumbers(wanttable), withLineNumbers(gotTable))
|
||||
}
|
||||
}
|
||||
|
|
@ -31,19 +31,14 @@ import (
|
|||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"math/bits"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Generate internal/runtime/gc/msize.go
|
||||
|
||||
var stdout = flag.Bool("stdout", false, "write to stdout instead of sizeclasses.go")
|
||||
|
||||
func main() {
|
||||
func generateSizeClasses(classes []class) []byte {
|
||||
flag.Parse()
|
||||
|
||||
var b bytes.Buffer
|
||||
|
|
@ -51,39 +46,14 @@ func main() {
|
|||
fmt.Fprintln(&b, "//go:generate go -C ../../../runtime/_mkmalloc run mksizeclasses.go")
|
||||
fmt.Fprintln(&b)
|
||||
fmt.Fprintln(&b, "package gc")
|
||||
classes := makeClasses()
|
||||
|
||||
printComment(&b, classes)
|
||||
|
||||
printClasses(&b, classes)
|
||||
|
||||
out, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *stdout {
|
||||
_, err = os.Stdout.Write(out)
|
||||
} else {
|
||||
err = os.WriteFile("../../internal/runtime/gc/sizeclasses.go", out, 0666)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
const (
|
||||
// Constants that we use and will transfer to the runtime.
|
||||
minHeapAlign = 8
|
||||
maxSmallSize = 32 << 10
|
||||
smallSizeDiv = 8
|
||||
smallSizeMax = 1024
|
||||
largeSizeDiv = 128
|
||||
pageShift = 13
|
||||
|
||||
// Derived constants.
|
||||
pageSize = 1 << pageShift
|
||||
)
|
||||
|
||||
type class struct {
|
||||
size int // max size
|
||||
npages int // number of pages
|
||||
|
|
@ -294,6 +264,15 @@ func maxNPages(classes []class) int {
|
|||
}
|
||||
|
||||
func printClasses(w io.Writer, classes []class) {
|
||||
sizeToSizeClass := func(size int) int {
|
||||
for j, c := range classes {
|
||||
if c.size >= size {
|
||||
return j
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
fmt.Fprintln(w, "const (")
|
||||
fmt.Fprintf(w, "MinHeapAlign = %d\n", minHeapAlign)
|
||||
fmt.Fprintf(w, "MaxSmallSize = %d\n", maxSmallSize)
|
||||
|
|
@ -304,6 +283,8 @@ func printClasses(w io.Writer, classes []class) {
|
|||
fmt.Fprintf(w, "PageShift = %d\n", pageShift)
|
||||
fmt.Fprintf(w, "MaxObjsPerSpan = %d\n", maxObjsPerSpan(classes))
|
||||
fmt.Fprintf(w, "MaxSizeClassNPages = %d\n", maxNPages(classes))
|
||||
fmt.Fprintf(w, "TinySize = %d\n", tinySize)
|
||||
fmt.Fprintf(w, "TinySizeClass = %d\n", sizeToSizeClass(tinySize))
|
||||
fmt.Fprintln(w, ")")
|
||||
|
||||
fmt.Fprint(w, "var SizeClassToSize = [NumSizeClasses]uint16 {")
|
||||
|
|
@ -332,12 +313,7 @@ func printClasses(w io.Writer, classes []class) {
|
|||
sc := make([]int, smallSizeMax/smallSizeDiv+1)
|
||||
for i := range sc {
|
||||
size := i * smallSizeDiv
|
||||
for j, c := range classes {
|
||||
if c.size >= size {
|
||||
sc[i] = j
|
||||
break
|
||||
}
|
||||
}
|
||||
sc[i] = sizeToSizeClass(size)
|
||||
}
|
||||
fmt.Fprint(w, "var SizeToSizeClass8 = [SmallSizeMax/SmallSizeDiv+1]uint8 {")
|
||||
for _, v := range sc {
|
||||
|
|
@ -349,12 +325,7 @@ func printClasses(w io.Writer, classes []class) {
|
|||
sc = make([]int, (maxSmallSize-smallSizeMax)/largeSizeDiv+1)
|
||||
for i := range sc {
|
||||
size := smallSizeMax + i*largeSizeDiv
|
||||
for j, c := range classes {
|
||||
if c.size >= size {
|
||||
sc[i] = j
|
||||
break
|
||||
}
|
||||
}
|
||||
sc[i] = sizeToSizeClass(size)
|
||||
}
|
||||
fmt.Fprint(w, "var SizeToSizeClass128 = [(MaxSmallSize-SmallSizeMax)/LargeSizeDiv+1]uint8 {")
|
||||
for _, v := range sc {
|
||||
|
|
|
|||
|
|
@ -127,8 +127,8 @@ const (
|
|||
_64bit = 1 << (^uintptr(0) >> 63) / 2
|
||||
|
||||
// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
|
||||
_TinySize = 16
|
||||
_TinySizeClass = int8(2)
|
||||
_TinySize = gc.TinySize
|
||||
_TinySizeClass = int8(gc.TinySizeClass)
|
||||
|
||||
_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
|
||||
|
||||
|
|
@ -1080,6 +1080,12 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger
|
|||
// at scale.
|
||||
const doubleCheckMalloc = false
|
||||
|
||||
// sizeSpecializedMallocEnabled is the set of conditions where we enable the size-specialized
|
||||
// mallocgc implementation: the experiment must be enabled, and none of the sanitizers should
|
||||
// be enabled. The tables used to select the size-specialized malloc function do not compile
|
||||
// properly on plan9, so size-specialized malloc is also disabled on plan9.
|
||||
const sizeSpecializedMallocEnabled = goexperiment.SizeSpecializedMalloc && GOOS != "plan9" && !asanenabled && !raceenabled && !msanenabled && !valgrindenabled
|
||||
|
||||
// Allocate an object of size bytes.
|
||||
// Small objects are allocated from the per-P cache's free lists.
|
||||
// Large objects (> 32 kB) are allocated straight from the heap.
|
||||
|
|
@ -1110,6 +1116,17 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
|||
return unsafe.Pointer(&zerobase)
|
||||
}
|
||||
|
||||
if sizeSpecializedMallocEnabled && heapBitsInSpan(size) {
|
||||
if typ == nil || !typ.Pointers() {
|
||||
return mallocNoScanTable[size](size, typ, needzero)
|
||||
} else {
|
||||
if !needzero {
|
||||
throw("objects with pointers must be zeroed")
|
||||
}
|
||||
return mallocScanTable[size](size, typ, needzero)
|
||||
}
|
||||
}
|
||||
|
||||
// It's possible for any malloc to trigger sweeping, which may in
|
||||
// turn queue finalizers. Record this dynamic lock edge.
|
||||
// N.B. Compiled away if lockrank experiment is not enabled.
|
||||
|
|
@ -1138,25 +1155,41 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
|||
// Actually do the allocation.
|
||||
var x unsafe.Pointer
|
||||
var elemsize uintptr
|
||||
if size <= maxSmallSize-gc.MallocHeaderSize {
|
||||
if typ == nil || !typ.Pointers() {
|
||||
if size < maxTinySize {
|
||||
x, elemsize = mallocgcTiny(size, typ)
|
||||
} else {
|
||||
if sizeSpecializedMallocEnabled {
|
||||
// we know that heapBitsInSpan is true.
|
||||
if size <= maxSmallSize-gc.MallocHeaderSize {
|
||||
if typ == nil || !typ.Pointers() {
|
||||
x, elemsize = mallocgcSmallNoscan(size, typ, needzero)
|
||||
}
|
||||
} else {
|
||||
if !needzero {
|
||||
throw("objects with pointers must be zeroed")
|
||||
}
|
||||
if heapBitsInSpan(size) {
|
||||
x, elemsize = mallocgcSmallScanNoHeader(size, typ)
|
||||
} else {
|
||||
if !needzero {
|
||||
throw("objects with pointers must be zeroed")
|
||||
}
|
||||
x, elemsize = mallocgcSmallScanHeader(size, typ)
|
||||
}
|
||||
} else {
|
||||
x, elemsize = mallocgcLarge(size, typ, needzero)
|
||||
}
|
||||
} else {
|
||||
x, elemsize = mallocgcLarge(size, typ, needzero)
|
||||
if size <= maxSmallSize-gc.MallocHeaderSize {
|
||||
if typ == nil || !typ.Pointers() {
|
||||
if size < maxTinySize {
|
||||
x, elemsize = mallocgcTiny(size, typ)
|
||||
} else {
|
||||
x, elemsize = mallocgcSmallNoscan(size, typ, needzero)
|
||||
}
|
||||
} else {
|
||||
if !needzero {
|
||||
throw("objects with pointers must be zeroed")
|
||||
}
|
||||
if heapBitsInSpan(size) {
|
||||
x, elemsize = mallocgcSmallScanNoHeader(size, typ)
|
||||
} else {
|
||||
x, elemsize = mallocgcSmallScanHeader(size, typ)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
x, elemsize = mallocgcLarge(size, typ, needzero)
|
||||
}
|
||||
}
|
||||
|
||||
// Notify sanitizers, if enabled.
|
||||
|
|
|
|||
8468
src/runtime/malloc_generated.go
Normal file
8468
src/runtime/malloc_generated.go
Normal file
File diff suppressed because it is too large
Load diff
586
src/runtime/malloc_stubs.go
Normal file
586
src/runtime/malloc_stubs.go
Normal file
|
|
@ -0,0 +1,586 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains stub functions that are not meant to be called directly,
|
||||
// but that will be assembled together using the inlining logic in runtime/_mkmalloc
|
||||
// to produce a full mallocgc function that's specialized for a span class
|
||||
// or specific size in the case of the tiny allocator.
|
||||
//
|
||||
// To assemble a mallocgc function, the mallocStub function is cloned, and the call to
|
||||
// inlinedMalloc is replaced with the inlined body of smallScanNoHeaderStub,
|
||||
// smallNoScanStub or tinyStub, depending on the parameters being specialized.
|
||||
//
|
||||
// The size_ (for the tiny case) and elemsize_, sizeclass_, and noscanint_ (for all three cases)
|
||||
// identifiers are replaced with the value of the parameter in the specialized case.
|
||||
// The nextFreeFastStub, nextFreeFastTiny, heapSetTypeNoHeaderStub, and writeHeapBitsSmallStub
|
||||
// functions are also inlined by _mkmalloc.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"internal/goarch"
|
||||
"internal/runtime/sys"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// These identifiers will all be replaced by the inliner. So their values don't
|
||||
// really matter: they just need to be set so that the stub functions, which
|
||||
// will never be used on their own, can compile. elemsize_ can't be set to
|
||||
// zero because we divide by it in nextFreeFastTiny, and the compiler would
|
||||
// complain about a division by zero. Its replaced value will always be greater
|
||||
// than zero.
|
||||
const elemsize_ = 8
|
||||
const sizeclass_ = 0
|
||||
const noscanint_ = 0
|
||||
const size_ = 0
|
||||
|
||||
func malloc0(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||
if doubleCheckMalloc {
|
||||
if gcphase == _GCmarktermination {
|
||||
throw("mallocgc called with gcphase == _GCmarktermination")
|
||||
}
|
||||
}
|
||||
|
||||
// Short-circuit zero-sized allocation requests.
|
||||
return unsafe.Pointer(&zerobase)
|
||||
}
|
||||
|
||||
func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||
panic("not defined for sizeclass")
|
||||
}
|
||||
|
||||
func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||
if doubleCheckMalloc {
|
||||
if gcphase == _GCmarktermination {
|
||||
throw("mallocgc called with gcphase == _GCmarktermination")
|
||||
}
|
||||
}
|
||||
|
||||
// It's possible for any malloc to trigger sweeping, which may in
|
||||
// turn queue finalizers. Record this dynamic lock edge.
|
||||
// N.B. Compiled away if lockrank experiment is not enabled.
|
||||
lockRankMayQueueFinalizer()
|
||||
|
||||
// Pre-malloc debug hooks.
|
||||
if debug.malloc {
|
||||
if x := preMallocgcDebug(size, typ); x != nil {
|
||||
return x
|
||||
}
|
||||
}
|
||||
|
||||
// Assist the GC if needed.
|
||||
if gcBlackenEnabled != 0 {
|
||||
deductAssistCredit(size)
|
||||
}
|
||||
|
||||
// Actually do the allocation.
|
||||
x, elemsize := inlinedMalloc(size, typ, needzero)
|
||||
|
||||
// Adjust our GC assist debt to account for internal fragmentation.
|
||||
if gcBlackenEnabled != 0 && elemsize != 0 {
|
||||
if assistG := getg().m.curg; assistG != nil {
|
||||
assistG.gcAssistBytes -= int64(elemsize - size)
|
||||
}
|
||||
}
|
||||
|
||||
// Post-malloc debug hooks.
|
||||
if debug.malloc {
|
||||
postMallocgcDebug(x, elemsize, typ)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// inlinedMalloc will never be called. It is defined just so that the compiler can compile
|
||||
// the mallocStub function, which will also never be called, but instead used as a template
|
||||
// to generate a size-specialized malloc function. The call to inlinedMalloc in mallocStub
|
||||
// will be replaced with the inlined body of smallScanNoHeaderStub, smallNoScanStub, or tinyStub
|
||||
// when generating the size-specialized malloc function. See the comment at the top of this
|
||||
// file for more information.
|
||||
func inlinedMalloc(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
|
||||
return unsafe.Pointer(uintptr(0)), 0
|
||||
}
|
||||
|
||||
func doubleCheckSmallScanNoHeader(size uintptr, typ *_type, mp *m) {
|
||||
if mp.mallocing != 0 {
|
||||
throw("malloc deadlock")
|
||||
}
|
||||
if mp.gsignal == getg() {
|
||||
throw("malloc during signal")
|
||||
}
|
||||
if typ == nil || !typ.Pointers() {
|
||||
throw("noscan allocated in scan-only path")
|
||||
}
|
||||
if !heapBitsInSpan(size) {
|
||||
throw("heap bits in not in span for non-header-only path")
|
||||
}
|
||||
}
|
||||
|
||||
func smallScanNoHeaderStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
|
||||
const sizeclass = sizeclass_
|
||||
const elemsize = elemsize_
|
||||
|
||||
// Set mp.mallocing to keep from being preempted by GC.
|
||||
mp := acquirem()
|
||||
if doubleCheckMalloc {
|
||||
doubleCheckSmallScanNoHeader(size, typ, mp)
|
||||
}
|
||||
mp.mallocing = 1
|
||||
|
||||
checkGCTrigger := false
|
||||
c := getMCache(mp)
|
||||
const spc = spanClass(sizeclass<<1) | spanClass(noscanint_)
|
||||
span := c.alloc[spc]
|
||||
v := nextFreeFastStub(span)
|
||||
if v == 0 {
|
||||
v, span, checkGCTrigger = c.nextFree(spc)
|
||||
}
|
||||
x := unsafe.Pointer(v)
|
||||
if span.needzero != 0 {
|
||||
memclrNoHeapPointers(x, elemsize)
|
||||
}
|
||||
if goarch.PtrSize == 8 && sizeclass == 1 {
|
||||
// initHeapBits already set the pointer bits for the 8-byte sizeclass
|
||||
// on 64-bit platforms.
|
||||
c.scanAlloc += 8
|
||||
} else {
|
||||
dataSize := size // make the inliner happy
|
||||
x := uintptr(x)
|
||||
scanSize := heapSetTypeNoHeaderStub(x, dataSize, typ, span)
|
||||
c.scanAlloc += scanSize
|
||||
}
|
||||
|
||||
// Ensure that the stores above that initialize x to
|
||||
// type-safe memory and set the heap bits occur before
|
||||
// the caller can make x observable to the garbage
|
||||
// collector. Otherwise, on weakly ordered machines,
|
||||
// the garbage collector could follow a pointer to x,
|
||||
// but see uninitialized memory or stale heap bits.
|
||||
publicationBarrier()
|
||||
|
||||
if writeBarrier.enabled {
|
||||
// Allocate black during GC.
|
||||
// All slots hold nil so no scanning is needed.
|
||||
// This may be racing with GC so do it atomically if there can be
|
||||
// a race marking the bit.
|
||||
gcmarknewobject(span, uintptr(x))
|
||||
} else {
|
||||
// Track the last free index before the mark phase. This field
|
||||
// is only used by the garbage collector. During the mark phase
|
||||
// this is used by the conservative scanner to filter out objects
|
||||
// that are both free and recently-allocated. It's safe to do that
|
||||
// because we allocate-black if the GC is enabled. The conservative
|
||||
// scanner produces pointers out of thin air, so without additional
|
||||
// synchronization it might otherwise observe a partially-initialized
|
||||
// object, which could crash the program.
|
||||
span.freeIndexForScan = span.freeindex
|
||||
}
|
||||
|
||||
// Note cache c only valid while m acquired; see #47302
|
||||
//
|
||||
// N.B. Use the full size because that matches how the GC
|
||||
// will update the mem profile on the "free" side.
|
||||
//
|
||||
// TODO(mknyszek): We should really count the header as part
|
||||
// of gc_sys or something. The code below just pretends it is
|
||||
// internal fragmentation and matches the GC's accounting by
|
||||
// using the whole allocation slot.
|
||||
c.nextSample -= int64(elemsize)
|
||||
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
|
||||
profilealloc(mp, x, elemsize)
|
||||
}
|
||||
mp.mallocing = 0
|
||||
releasem(mp)
|
||||
|
||||
if checkGCTrigger {
|
||||
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
|
||||
gcStart(t)
|
||||
}
|
||||
}
|
||||
|
||||
return x, elemsize
|
||||
}
|
||||
|
||||
func doubleCheckSmallNoScan(typ *_type, mp *m) {
|
||||
if mp.mallocing != 0 {
|
||||
throw("malloc deadlock")
|
||||
}
|
||||
if mp.gsignal == getg() {
|
||||
throw("malloc during signal")
|
||||
}
|
||||
if typ != nil && typ.Pointers() {
|
||||
throw("expected noscan type for noscan alloc")
|
||||
}
|
||||
}
|
||||
|
||||
func smallNoScanStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
|
||||
// TODO(matloob): Add functionality to mkmalloc to allow us to inline a non-constant
|
||||
// sizeclass_ and elemsize_ value (instead just set to the expressions to look up the size class
|
||||
// and elemsize. We'd also need to teach mkmalloc that values that are touched by these (specifically
|
||||
// spc below) should turn into vars. This would allow us to generate mallocgcSmallNoScan itself,
|
||||
// so that its code could not diverge from the generated functions.
|
||||
const sizeclass = sizeclass_
|
||||
const elemsize = elemsize_
|
||||
|
||||
// Set mp.mallocing to keep from being preempted by GC.
|
||||
mp := acquirem()
|
||||
if doubleCheckMalloc {
|
||||
doubleCheckSmallNoScan(typ, mp)
|
||||
}
|
||||
mp.mallocing = 1
|
||||
|
||||
checkGCTrigger := false
|
||||
c := getMCache(mp)
|
||||
const spc = spanClass(sizeclass<<1) | spanClass(noscanint_)
|
||||
span := c.alloc[spc]
|
||||
v := nextFreeFastStub(span)
|
||||
if v == 0 {
|
||||
v, span, checkGCTrigger = c.nextFree(spc)
|
||||
}
|
||||
x := unsafe.Pointer(v)
|
||||
if needzero && span.needzero != 0 {
|
||||
memclrNoHeapPointers(x, elemsize)
|
||||
}
|
||||
|
||||
// Ensure that the stores above that initialize x to
|
||||
// type-safe memory and set the heap bits occur before
|
||||
// the caller can make x observable to the garbage
|
||||
// collector. Otherwise, on weakly ordered machines,
|
||||
// the garbage collector could follow a pointer to x,
|
||||
// but see uninitialized memory or stale heap bits.
|
||||
publicationBarrier()
|
||||
|
||||
if writeBarrier.enabled {
|
||||
// Allocate black during GC.
|
||||
// All slots hold nil so no scanning is needed.
|
||||
// This may be racing with GC so do it atomically if there can be
|
||||
// a race marking the bit.
|
||||
gcmarknewobject(span, uintptr(x))
|
||||
} else {
|
||||
// Track the last free index before the mark phase. This field
|
||||
// is only used by the garbage collector. During the mark phase
|
||||
// this is used by the conservative scanner to filter out objects
|
||||
// that are both free and recently-allocated. It's safe to do that
|
||||
// because we allocate-black if the GC is enabled. The conservative
|
||||
// scanner produces pointers out of thin air, so without additional
|
||||
// synchronization it might otherwise observe a partially-initialized
|
||||
// object, which could crash the program.
|
||||
span.freeIndexForScan = span.freeindex
|
||||
}
|
||||
|
||||
// Note cache c only valid while m acquired; see #47302
|
||||
//
|
||||
// N.B. Use the full size because that matches how the GC
|
||||
// will update the mem profile on the "free" side.
|
||||
//
|
||||
// TODO(mknyszek): We should really count the header as part
|
||||
// of gc_sys or something. The code below just pretends it is
|
||||
// internal fragmentation and matches the GC's accounting by
|
||||
// using the whole allocation slot.
|
||||
c.nextSample -= int64(elemsize)
|
||||
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
|
||||
profilealloc(mp, x, elemsize)
|
||||
}
|
||||
mp.mallocing = 0
|
||||
releasem(mp)
|
||||
|
||||
if checkGCTrigger {
|
||||
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
|
||||
gcStart(t)
|
||||
}
|
||||
}
|
||||
return x, elemsize
|
||||
}
|
||||
|
||||
func doubleCheckTiny(size uintptr, typ *_type, mp *m) {
|
||||
if mp.mallocing != 0 {
|
||||
throw("malloc deadlock")
|
||||
}
|
||||
if mp.gsignal == getg() {
|
||||
throw("malloc during signal")
|
||||
}
|
||||
if typ != nil && typ.Pointers() {
|
||||
throw("expected noscan for tiny alloc")
|
||||
}
|
||||
}
|
||||
|
||||
func tinyStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
|
||||
const constsize = size_
|
||||
const elemsize = elemsize_
|
||||
|
||||
// Set mp.mallocing to keep from being preempted by GC.
|
||||
mp := acquirem()
|
||||
if doubleCheckMalloc {
|
||||
doubleCheckTiny(constsize, typ, mp)
|
||||
}
|
||||
mp.mallocing = 1
|
||||
|
||||
// Tiny allocator.
|
||||
//
|
||||
// Tiny allocator combines several tiny allocation requests
|
||||
// into a single memory block. The resulting memory block
|
||||
// is freed when all subobjects are unreachable. The subobjects
|
||||
// must be noscan (don't have pointers), this ensures that
|
||||
// the amount of potentially wasted memory is bounded.
|
||||
//
|
||||
// Size of the memory block used for combining (maxTinySize) is tunable.
|
||||
// Current setting is 16 bytes, which relates to 2x worst case memory
|
||||
// wastage (when all but one subobjects are unreachable).
|
||||
// 8 bytes would result in no wastage at all, but provides less
|
||||
// opportunities for combining.
|
||||
// 32 bytes provides more opportunities for combining,
|
||||
// but can lead to 4x worst case wastage.
|
||||
// The best case winning is 8x regardless of block size.
|
||||
//
|
||||
// Objects obtained from tiny allocator must not be freed explicitly.
|
||||
// So when an object will be freed explicitly, we ensure that
|
||||
// its size >= maxTinySize.
|
||||
//
|
||||
// SetFinalizer has a special case for objects potentially coming
|
||||
// from tiny allocator, it such case it allows to set finalizers
|
||||
// for an inner byte of a memory block.
|
||||
//
|
||||
// The main targets of tiny allocator are small strings and
|
||||
// standalone escaping variables. On a json benchmark
|
||||
// the allocator reduces number of allocations by ~12% and
|
||||
// reduces heap size by ~20%.
|
||||
c := getMCache(mp)
|
||||
off := c.tinyoffset
|
||||
// Align tiny pointer for required (conservative) alignment.
|
||||
if constsize&7 == 0 {
|
||||
off = alignUp(off, 8)
|
||||
} else if goarch.PtrSize == 4 && constsize == 12 {
|
||||
// Conservatively align 12-byte objects to 8 bytes on 32-bit
|
||||
// systems so that objects whose first field is a 64-bit
|
||||
// value is aligned to 8 bytes and does not cause a fault on
|
||||
// atomic access. See issue 37262.
|
||||
// TODO(mknyszek): Remove this workaround if/when issue 36606
|
||||
// is resolved.
|
||||
off = alignUp(off, 8)
|
||||
} else if constsize&3 == 0 {
|
||||
off = alignUp(off, 4)
|
||||
} else if constsize&1 == 0 {
|
||||
off = alignUp(off, 2)
|
||||
}
|
||||
if off+constsize <= maxTinySize && c.tiny != 0 {
|
||||
// The object fits into existing tiny block.
|
||||
x := unsafe.Pointer(c.tiny + off)
|
||||
c.tinyoffset = off + constsize
|
||||
c.tinyAllocs++
|
||||
mp.mallocing = 0
|
||||
releasem(mp)
|
||||
return x, 0
|
||||
}
|
||||
// Allocate a new maxTinySize block.
|
||||
checkGCTrigger := false
|
||||
span := c.alloc[tinySpanClass]
|
||||
v := nextFreeFastTiny(span)
|
||||
if v == 0 {
|
||||
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
|
||||
}
|
||||
x := unsafe.Pointer(v)
|
||||
(*[2]uint64)(x)[0] = 0 // Always zero
|
||||
(*[2]uint64)(x)[1] = 0
|
||||
// See if we need to replace the existing tiny block with the new one
|
||||
// based on amount of remaining free space.
|
||||
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
|
||||
// Note: disabled when race detector is on, see comment near end of this function.
|
||||
c.tiny = uintptr(x)
|
||||
c.tinyoffset = constsize
|
||||
}
|
||||
|
||||
// Ensure that the stores above that initialize x to
|
||||
// type-safe memory and set the heap bits occur before
|
||||
// the caller can make x observable to the garbage
|
||||
// collector. Otherwise, on weakly ordered machines,
|
||||
// the garbage collector could follow a pointer to x,
|
||||
// but see uninitialized memory or stale heap bits.
|
||||
publicationBarrier()
|
||||
|
||||
if writeBarrier.enabled {
|
||||
// Allocate black during GC.
|
||||
// All slots hold nil so no scanning is needed.
|
||||
// This may be racing with GC so do it atomically if there can be
|
||||
// a race marking the bit.
|
||||
gcmarknewobject(span, uintptr(x))
|
||||
} else {
|
||||
// Track the last free index before the mark phase. This field
|
||||
// is only used by the garbage collector. During the mark phase
|
||||
// this is used by the conservative scanner to filter out objects
|
||||
// that are both free and recently-allocated. It's safe to do that
|
||||
// because we allocate-black if the GC is enabled. The conservative
|
||||
// scanner produces pointers out of thin air, so without additional
|
||||
// synchronization it might otherwise observe a partially-initialized
|
||||
// object, which could crash the program.
|
||||
span.freeIndexForScan = span.freeindex
|
||||
}
|
||||
|
||||
// Note cache c only valid while m acquired; see #47302
|
||||
//
|
||||
// N.B. Use the full size because that matches how the GC
|
||||
// will update the mem profile on the "free" side.
|
||||
//
|
||||
// TODO(mknyszek): We should really count the header as part
|
||||
// of gc_sys or something. The code below just pretends it is
|
||||
// internal fragmentation and matches the GC's accounting by
|
||||
// using the whole allocation slot.
|
||||
c.nextSample -= int64(elemsize)
|
||||
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
|
||||
profilealloc(mp, x, elemsize)
|
||||
}
|
||||
mp.mallocing = 0
|
||||
releasem(mp)
|
||||
|
||||
if checkGCTrigger {
|
||||
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
|
||||
gcStart(t)
|
||||
}
|
||||
}
|
||||
|
||||
if raceenabled {
|
||||
// Pad tinysize allocations so they are aligned with the end
|
||||
// of the tinyalloc region. This ensures that any arithmetic
|
||||
// that goes off the top end of the object will be detectable
|
||||
// by checkptr (issue 38872).
|
||||
// Note that we disable tinyalloc when raceenabled for this to work.
|
||||
// TODO: This padding is only performed when the race detector
|
||||
// is enabled. It would be nice to enable it if any package
|
||||
// was compiled with checkptr, but there's no easy way to
|
||||
// detect that (especially at compile time).
|
||||
// TODO: enable this padding for all allocations, not just
|
||||
// tinyalloc ones. It's tricky because of pointer maps.
|
||||
// Maybe just all noscan objects?
|
||||
x = add(x, elemsize-constsize)
|
||||
}
|
||||
return x, elemsize
|
||||
}
|
||||
|
||||
// TODO(matloob): Should we let the go compiler inline this instead of using mkmalloc?
|
||||
// We won't be able to use elemsize_ but that's probably ok.
|
||||
func nextFreeFastTiny(span *mspan) gclinkptr {
|
||||
const nbytes = 8192
|
||||
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / elemsize_)
|
||||
var nextFreeFastResult gclinkptr
|
||||
if span.allocCache != 0 {
|
||||
theBit := sys.TrailingZeros64(span.allocCache) // Is there a free object in the allocCache?
|
||||
result := span.freeindex + uint16(theBit)
|
||||
if result < nelems {
|
||||
freeidx := result + 1
|
||||
if !(freeidx%64 == 0 && freeidx != nelems) {
|
||||
span.allocCache >>= uint(theBit + 1)
|
||||
span.freeindex = freeidx
|
||||
span.allocCount++
|
||||
nextFreeFastResult = gclinkptr(uintptr(result)*elemsize_ + span.base())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nextFreeFastResult
|
||||
}
|
||||
|
||||
func nextFreeFastStub(span *mspan) gclinkptr {
|
||||
var nextFreeFastResult gclinkptr
|
||||
if span.allocCache != 0 {
|
||||
theBit := sys.TrailingZeros64(span.allocCache) // Is there a free object in the allocCache?
|
||||
result := span.freeindex + uint16(theBit)
|
||||
if result < span.nelems {
|
||||
freeidx := result + 1
|
||||
if !(freeidx%64 == 0 && freeidx != span.nelems) {
|
||||
span.allocCache >>= uint(theBit + 1)
|
||||
span.freeindex = freeidx
|
||||
span.allocCount++
|
||||
nextFreeFastResult = gclinkptr(uintptr(result)*elemsize_ + span.base())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nextFreeFastResult
|
||||
}
|
||||
|
||||
func heapSetTypeNoHeaderStub(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
|
||||
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(elemsize_)) {
|
||||
throw("tried to write heap bits, but no heap bits in span")
|
||||
}
|
||||
scanSize := writeHeapBitsSmallStub(span, x, dataSize, typ)
|
||||
if doubleCheckHeapSetType {
|
||||
doubleCheckHeapType(x, dataSize, typ, nil, span)
|
||||
}
|
||||
return scanSize
|
||||
}
|
||||
|
||||
// writeHeapBitsSmallStub writes the heap bits for small objects whose ptr/scalar data is
|
||||
// stored as a bitmap at the end of the span.
|
||||
//
|
||||
// Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span.
|
||||
// heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_.
|
||||
//
|
||||
//go:nosplit
|
||||
func writeHeapBitsSmallStub(span *mspan, x, dataSize uintptr, typ *_type) uintptr {
|
||||
// The objects here are always really small, so a single load is sufficient.
|
||||
src0 := readUintptr(getGCMask(typ))
|
||||
|
||||
const elemsize = elemsize_
|
||||
|
||||
// Create repetitions of the bitmap if we have a small slice backing store.
|
||||
scanSize := typ.PtrBytes
|
||||
src := src0
|
||||
if typ.Size_ == goarch.PtrSize {
|
||||
src = (1 << (dataSize / goarch.PtrSize)) - 1
|
||||
} else {
|
||||
// N.B. We rely on dataSize being an exact multiple of the type size.
|
||||
// The alternative is to be defensive and mask out src to the length
|
||||
// of dataSize. The purpose is to save on one additional masking operation.
|
||||
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
|
||||
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
|
||||
}
|
||||
for i := typ.Size_; i < dataSize; i += typ.Size_ {
|
||||
src |= src0 << (i / goarch.PtrSize)
|
||||
scanSize += typ.Size_
|
||||
}
|
||||
}
|
||||
|
||||
// Since we're never writing more than one uintptr's worth of bits, we're either going
|
||||
// to do one or two writes.
|
||||
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
|
||||
dst := unsafe.Pointer(dstBase)
|
||||
o := (x - span.base()) / goarch.PtrSize
|
||||
i := o / ptrBits
|
||||
j := o % ptrBits
|
||||
const bits uintptr = elemsize / goarch.PtrSize
|
||||
// In the if statement below, we have to do two uintptr writes if the bits
|
||||
// we need to write straddle across two different memory locations. But if
|
||||
// the number of bits we're writing divides evenly into the number of bits
|
||||
// in the uintptr we're writing, this can never happen. Since bitsIsPowerOfTwo
|
||||
// is a compile-time constant in the generated code, in the case where the size is
|
||||
// a power of two less than or equal to ptrBits, the compiler can remove the
|
||||
// 'two writes' branch of the if statement and always do only one write without
|
||||
// the check.
|
||||
const bitsIsPowerOfTwo = bits&(bits-1) == 0
|
||||
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
|
||||
// Two writes.
|
||||
bits0 := ptrBits - j
|
||||
bits1 := bits - bits0
|
||||
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
|
||||
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
|
||||
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
|
||||
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
|
||||
} else {
|
||||
// One write.
|
||||
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
|
||||
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) // We're taking the min so this compiles on 32 bit platforms. But if bits > ptrbits we always take the other branch
|
||||
}
|
||||
|
||||
const doubleCheck = false
|
||||
if doubleCheck {
|
||||
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
|
||||
}
|
||||
return scanSize
|
||||
}
|
||||
|
||||
func writeHeapBitsDoubleCheck(span *mspan, x, dataSize, src, src0, i, j, bits uintptr, typ *_type) {
|
||||
srcRead := span.heapBitsSmallForAddr(x)
|
||||
if srcRead != src {
|
||||
print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
|
||||
print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
|
||||
print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
|
||||
throw("bad pointer bits written for small object")
|
||||
}
|
||||
}
|
||||
1038
src/runtime/malloc_tables_generated.go
Normal file
1038
src/runtime/malloc_tables_generated.go
Normal file
File diff suppressed because it is too large
Load diff
14
src/runtime/malloc_tables_plan9.go
Normal file
14
src/runtime/malloc_tables_plan9.go
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build plan9
|
||||
|
||||
package runtime
|
||||
|
||||
import "unsafe"
|
||||
|
||||
var (
|
||||
mallocScanTable []func(size uintptr, typ *_type, needzero bool) unsafe.Pointer
|
||||
mallocNoScanTable []func(size uintptr, typ *_type, needzero bool) unsafe.Pointer
|
||||
)
|
||||
|
|
@ -452,3 +452,13 @@ func BenchmarkGoroutineIdle(b *testing.B) {
|
|||
close(quit)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
func TestMkmalloc(t *testing.T) {
|
||||
testenv.MustHaveGoRun(t)
|
||||
testenv.MustHaveExternalNetwork(t) // To download the golang.org/x/tools dependency.
|
||||
output, err := exec.Command("go", "-C", "_mkmalloc", "test").CombinedOutput()
|
||||
t.Logf("test output:\n%s", output)
|
||||
if err != nil {
|
||||
t.Errorf("_mkmalloc tests failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue