runtime: support runtime.freegc in size-specialized mallocs for noscan objects

This CL is part of a set of CLs that attempt to reduce how much work the
GC must do. See the design in https://go.dev/design/74299-runtime-freegc

This CL updates the smallNoScanStub stub in malloc_stubs.go to reuse
heap objects that have been freed by runtime.freegc calls, and generates
the corresponding size-specialized code in malloc_generated.go.

This CL only adds support in the specialized mallocs for noscan
heap objects (objects without pointers). A later CL handles objects
with pointers.

While we are here, we leave a couple of breadcrumbs in mkmalloc.go on
how to do the generation.

Updates #74299

Change-Id: I2657622601a27211554ee862fce057e101767a70
Reviewed-on: https://go-review.googlesource.com/c/go/+/715761
Reviewed-by: Junyang Shao <shaojunyang@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
thepudds 2025-11-09 09:24:22 -05:00 committed by t hepudds
parent c3708350a4
commit 50128a2154
5 changed files with 693 additions and 10 deletions

View file

@ -254,7 +254,8 @@ func inline(config generatorConfig) []byte {
} }
// Write out the package and import declarations. // Write out the package and import declarations.
out.WriteString("// Code generated by mkmalloc.go; DO NOT EDIT.\n\n") out.WriteString("// Code generated by mkmalloc.go; DO NOT EDIT.\n")
out.WriteString("// See overview in malloc_stubs.go.\n\n")
out.WriteString("package " + f.Name.Name + "\n\n") out.WriteString("package " + f.Name.Name + "\n\n")
for _, importDecl := range importDecls { for _, importDecl := range importDecls {
out.Write(mustFormatNode(fset, importDecl)) out.Write(mustFormatNode(fset, importDecl))

View file

@ -1094,6 +1094,8 @@ const sizeSpecializedMallocEnabled = goexperiment.SizeSpecializedMalloc && GOOS
// implementation and the corresponding allocation-related changes: the experiment must be // implementation and the corresponding allocation-related changes: the experiment must be
// enabled, and none of the memory sanitizers should be enabled. We allow the race detector, // enabled, and none of the memory sanitizers should be enabled. We allow the race detector,
// in contrast to sizeSpecializedMallocEnabled. // in contrast to sizeSpecializedMallocEnabled.
// TODO(thepudds): it would be nice to check Valgrind integration, though there are some hints
// there might not be any canned tests in tree for Go's integration with Valgrind.
const runtimeFreegcEnabled = goexperiment.RuntimeFreegc && !asanenabled && !msanenabled && !valgrindenabled const runtimeFreegcEnabled = goexperiment.RuntimeFreegc && !asanenabled && !msanenabled && !valgrindenabled
// Allocate an object of size bytes. // Allocate an object of size bytes.
@ -1966,10 +1968,15 @@ const (
// or roughly when the liveness analysis of the compiler // or roughly when the liveness analysis of the compiler
// would otherwise have determined ptr's object is reclaimable by the GC. // would otherwise have determined ptr's object is reclaimable by the GC.
func freegc(ptr unsafe.Pointer, size uintptr, noscan bool) bool { func freegc(ptr unsafe.Pointer, size uintptr, noscan bool) bool {
if !runtimeFreegcEnabled || sizeSpecializedMallocEnabled || !reusableSize(size) { if !runtimeFreegcEnabled || !reusableSize(size) {
// TODO(thepudds): temporarily disable freegc with SizeSpecializedMalloc until we finish integrating.
return false return false
} }
if sizeSpecializedMallocEnabled && !noscan {
// TODO(thepudds): temporarily disable freegc with SizeSpecializedMalloc for pointer types
// until we finish integrating.
return false
}
if ptr == nil { if ptr == nil {
throw("freegc nil") throw("freegc nil")
} }

View file

@ -1,4 +1,5 @@
// Code generated by mkmalloc.go; DO NOT EDIT. // Code generated by mkmalloc.go; DO NOT EDIT.
// See overview in malloc_stubs.go.
package runtime package runtime
@ -6400,6 +6401,32 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -6497,6 +6524,32 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -6594,6 +6647,32 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -6691,6 +6770,32 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -6788,6 +6893,32 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -6885,6 +7016,32 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -6982,6 +7139,32 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7079,6 +7262,32 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7176,6 +7385,32 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7273,6 +7508,32 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7370,6 +7631,32 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7467,6 +7754,32 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7564,6 +7877,32 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7661,6 +8000,32 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7758,6 +8123,32 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7855,6 +8246,32 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -7952,6 +8369,32 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -8049,6 +8492,32 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -8146,6 +8615,32 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -8243,6 +8738,32 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -8340,6 +8861,32 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -8437,6 +8984,32 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -8534,6 +9107,32 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -8631,6 +9230,32 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)
@ -8728,6 +9353,32 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi
const spc = spanClass(sizeclass<<1) | spanClass(1) const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc] span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr var nextFreeFastResult gclinkptr
if span.allocCache != 0 { if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) theBit := sys.TrailingZeros64(span.allocCache)

View file

@ -7,6 +7,8 @@
// to produce a full mallocgc function that's specialized for a span class // to produce a full mallocgc function that's specialized for a span class
// or specific size in the case of the tiny allocator. // or specific size in the case of the tiny allocator.
// //
// To generate the specialized mallocgc functions, do 'go run .' inside runtime/_mkmalloc.
//
// To assemble a mallocgc function, the mallocStub function is cloned, and the call to // To assemble a mallocgc function, the mallocStub function is cloned, and the call to
// inlinedMalloc is replaced with the inlined body of smallScanNoHeaderStub, // inlinedMalloc is replaced with the inlined body of smallScanNoHeaderStub,
// smallNoScanStub or tinyStub, depending on the parameters being specialized. // smallNoScanStub or tinyStub, depending on the parameters being specialized.
@ -71,7 +73,8 @@ func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
} }
} }
// Assist the GC if needed. // Assist the GC if needed. (On the reuse path, we currently compensate for this;
// changes here might require changes there.)
if gcBlackenEnabled != 0 { if gcBlackenEnabled != 0 {
deductAssistCredit(size) deductAssistCredit(size)
} }
@ -242,6 +245,23 @@ func smallNoScanStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, u
c := getMCache(mp) c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(noscanint_) const spc = spanClass(sizeclass<<1) | spanClass(noscanint_)
span := c.alloc[spc] span := c.alloc[spc]
// First, check for a reusable object.
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
// We have a reusable object, use it.
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
// TODO(thepudds): note that the generated return path is essentially duplicated
// by the generator. For example, see the two postMallocgcDebug calls and
// related duplicated code on the return path currently in the generated
// mallocgcSmallNoScanSC2 function. One set of those correspond to this
// return here. We might be able to de-duplicate the generated return path
// by updating the generator, perhaps by jumping to a shared return or similar.
return v, elemsize
}
v := nextFreeFastStub(span) v := nextFreeFastStub(span)
if v == 0 { if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc) v, span, checkGCTrigger = c.nextFree(spc)

View file

@ -349,8 +349,10 @@ func testFreegc[T comparable](noscan bool) func(*testing.T) {
t.Run("allocs-with-free", func(t *testing.T) { t.Run("allocs-with-free", func(t *testing.T) {
// Same allocations, but now using explicit free so that // Same allocations, but now using explicit free so that
// no allocs get reported. (Again, not the desired long-term behavior). // no allocs get reported. (Again, not the desired long-term behavior).
if SizeSpecializedMallocEnabled { if SizeSpecializedMallocEnabled && !noscan {
t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc") // TODO(thepudds): skip at this point in the stack for size-specialized malloc
// with !noscan. Additional integration with sizespecializedmalloc is in a later CL.
t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
} }
if !RuntimeFreegcEnabled { if !RuntimeFreegcEnabled {
t.Skip("skipping alloc tests with runtime.freegc disabled") t.Skip("skipping alloc tests with runtime.freegc disabled")
@ -370,8 +372,10 @@ func testFreegc[T comparable](noscan bool) func(*testing.T) {
// Multiple allocations outstanding before explicitly freeing, // Multiple allocations outstanding before explicitly freeing,
// but still within the limit of our smallest free list size // but still within the limit of our smallest free list size
// so that no allocs are reported. (Again, not long-term behavior). // so that no allocs are reported. (Again, not long-term behavior).
if SizeSpecializedMallocEnabled { if SizeSpecializedMallocEnabled && !noscan {
t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc") // TODO(thepudds): skip at this point in the stack for size-specialized malloc
// with !noscan. Additional integration with sizespecializedmalloc is in a later CL.
t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
} }
if !RuntimeFreegcEnabled { if !RuntimeFreegcEnabled {
t.Skip("skipping alloc tests with runtime.freegc disabled") t.Skip("skipping alloc tests with runtime.freegc disabled")
@ -514,10 +518,10 @@ func testFreegc[T comparable](noscan bool) func(*testing.T) {
// See https://go.dev/cl/717520 for some additional discussion, // See https://go.dev/cl/717520 for some additional discussion,
// including how we can deliberately cause the test to fail currently // including how we can deliberately cause the test to fail currently
// if we purposefully introduce some assist credit bugs. // if we purposefully introduce some assist credit bugs.
if SizeSpecializedMallocEnabled { if SizeSpecializedMallocEnabled && !noscan {
// TODO(thepudds): skip this test at this point in the stack; later CL has // TODO(thepudds): skip this test at this point in the stack; later CL has
// integration with sizespecializedmalloc. // integration with sizespecializedmalloc.
t.Skip("temporarily skip assist credit test for GOEXPERIMENT=sizespecializedmalloc") t.Skip("temporarily skip assist credit tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
} }
if !RuntimeFreegcEnabled { if !RuntimeFreegcEnabled {
t.Skip("skipping assist credit test with runtime.freegc disabled") t.Skip("skipping assist credit test with runtime.freegc disabled")