mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: add scavenging code for new page allocator
This change adds a scavenger for the new page allocator along with tests. The scavenger walks over the heap backwards once per GC, looking for memory to scavenge. It walks across the heap without any lock held, searching optimistically. If it finds what appears to be a scavenging candidate it acquires the heap lock and attempts to verify it. Upon verification it then scavenges. Notably, unlike the old scavenger, it doesn't show any preference for huge pages and instead follows a more strict last-page-first policy. Updates #35112. Change-Id: I0621ef73c999a471843eab2d1307ae5679dd18d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/195697 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
39e8cb0faa
commit
73317080e1
7 changed files with 880 additions and 21 deletions
|
|
@ -23,8 +23,12 @@ func checkPageAlloc(t *testing.T, want, got *PageAlloc) {
|
|||
|
||||
for i := gotStart; i < gotEnd; i++ {
|
||||
// Check the bitmaps.
|
||||
if !checkPallocBits(t, got.PallocBits(i), want.PallocBits(i)) {
|
||||
t.Logf("in chunk %d", i)
|
||||
gb, wb := got.PallocData(i), want.PallocData(i)
|
||||
if !checkPallocBits(t, gb.PallocBits(), wb.PallocBits()) {
|
||||
t.Logf("in chunk %d (mallocBits)", i)
|
||||
}
|
||||
if !checkPallocBits(t, gb.Scavenged(), wb.Scavenged()) {
|
||||
t.Logf("in chunk %d (scavenged)", i)
|
||||
}
|
||||
}
|
||||
// TODO(mknyszek): Verify summaries too?
|
||||
|
|
@ -310,7 +314,7 @@ func TestPageAllocAlloc(t *testing.T) {
|
|||
for name, v := range tests {
|
||||
v := v
|
||||
t.Run(name, func(t *testing.T) {
|
||||
b := NewPageAlloc(v.before)
|
||||
b := NewPageAlloc(v.before, nil)
|
||||
defer FreePageAlloc(b)
|
||||
|
||||
for iter, i := range v.hits {
|
||||
|
|
@ -318,7 +322,7 @@ func TestPageAllocAlloc(t *testing.T) {
|
|||
t.Fatalf("bad alloc #%d: want 0x%x, got 0x%x", iter+1, i.base, a)
|
||||
}
|
||||
}
|
||||
want := NewPageAlloc(v.after)
|
||||
want := NewPageAlloc(v.after, nil)
|
||||
defer FreePageAlloc(want)
|
||||
|
||||
checkPageAlloc(t, want, b)
|
||||
|
|
@ -335,7 +339,7 @@ func TestPageAllocExhaust(t *testing.T) {
|
|||
for i := ChunkIdx(0); i < 4; i++ {
|
||||
bDesc[BaseChunkIdx+i] = []BitRange{}
|
||||
}
|
||||
b := NewPageAlloc(bDesc)
|
||||
b := NewPageAlloc(bDesc, nil)
|
||||
defer FreePageAlloc(b)
|
||||
|
||||
// Allocate into b with npages until we've exhausted the heap.
|
||||
|
|
@ -366,7 +370,7 @@ func TestPageAllocExhaust(t *testing.T) {
|
|||
wantDesc[BaseChunkIdx+i] = []BitRange{}
|
||||
}
|
||||
}
|
||||
want := NewPageAlloc(wantDesc)
|
||||
want := NewPageAlloc(wantDesc, nil)
|
||||
defer FreePageAlloc(want)
|
||||
|
||||
// Check to make sure the heap b matches what we want.
|
||||
|
|
@ -590,14 +594,15 @@ func TestPageAllocFree(t *testing.T) {
|
|||
for name, v := range tests {
|
||||
v := v
|
||||
t.Run(name, func(t *testing.T) {
|
||||
b := NewPageAlloc(v.before)
|
||||
b := NewPageAlloc(v.before, nil)
|
||||
defer FreePageAlloc(b)
|
||||
|
||||
for _, addr := range v.frees {
|
||||
b.Free(addr, v.npages)
|
||||
}
|
||||
|
||||
want := NewPageAlloc(v.after)
|
||||
want := NewPageAlloc(v.after, nil)
|
||||
defer FreePageAlloc(want)
|
||||
|
||||
checkPageAlloc(t, want, b)
|
||||
})
|
||||
}
|
||||
|
|
@ -641,7 +646,7 @@ func TestPageAllocAllocAndFree(t *testing.T) {
|
|||
for name, v := range tests {
|
||||
v := v
|
||||
t.Run(name, func(t *testing.T) {
|
||||
b := NewPageAlloc(v.init)
|
||||
b := NewPageAlloc(v.init, nil)
|
||||
defer FreePageAlloc(b)
|
||||
|
||||
for iter, i := range v.hits {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue