mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: integrate new page allocator into runtime
This change integrates all the bits and pieces of the new page allocator into the runtime, behind a global constant. Updates #35112. Change-Id: I6696bde7bab098a498ab37ed2a2caad2a05d30ec Reviewed-on: https://go-review.googlesource.com/c/go/+/201764 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
21445b091e
commit
689f6f77f0
6 changed files with 199 additions and 19 deletions
|
|
@ -12,6 +12,8 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
const OldPageAllocator = oldPageAllocator
|
||||
|
||||
var Fadd64 = fadd64
|
||||
var Fsub64 = fsub64
|
||||
var Fmul64 = fmul64
|
||||
|
|
@ -354,8 +356,15 @@ func ReadMemStatsSlow() (base, slow MemStats) {
|
|||
slow.BySize[i].Frees = bySize[i].Frees
|
||||
}
|
||||
|
||||
for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
|
||||
slow.HeapReleased += uint64(i.span().released())
|
||||
if oldPageAllocator {
|
||||
for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
|
||||
slow.HeapReleased += uint64(i.span().released())
|
||||
}
|
||||
} else {
|
||||
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
|
||||
pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
|
||||
slow.HeapReleased += uint64(pg) * pageSize
|
||||
}
|
||||
}
|
||||
|
||||
// Unused space in the current arena also counts as released space.
|
||||
|
|
@ -974,3 +983,49 @@ var BaseChunkIdx = ChunkIdx(chunkIndex((0xc000*pageAlloc64Bit + 0x200*pageAlloc3
|
|||
func PageBase(c ChunkIdx, pageIdx uint) uintptr {
|
||||
return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
|
||||
}
|
||||
|
||||
type BitsMismatch struct {
|
||||
Base uintptr
|
||||
Got, Want uint64
|
||||
}
|
||||
|
||||
func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
|
||||
ok = true
|
||||
|
||||
// Run on the system stack to avoid stack growth allocation.
|
||||
systemstack(func() {
|
||||
getg().m.mallocing++
|
||||
|
||||
// Lock so that we can safely access the bitmap.
|
||||
lock(&mheap_.lock)
|
||||
chunkLoop:
|
||||
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
|
||||
chunk := &mheap_.pages.chunks[i]
|
||||
for j := 0; j < pallocChunkPages/64; j++ {
|
||||
// Run over each 64-bit bitmap section and ensure
|
||||
// scavenged is being cleared properly on allocation.
|
||||
// If a used bit and scavenged bit are both set, that's
|
||||
// an error, and could indicate a larger problem, or
|
||||
// an accounting problem.
|
||||
want := chunk.scavenged[j] &^ chunk.pallocBits[j]
|
||||
got := chunk.scavenged[j]
|
||||
if want != got {
|
||||
ok = false
|
||||
if n >= len(mismatches) {
|
||||
break chunkLoop
|
||||
}
|
||||
mismatches[n] = BitsMismatch{
|
||||
Base: chunkBase(i) + uintptr(j)*64*pageSize,
|
||||
Got: got,
|
||||
Want: want,
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
}
|
||||
unlock(&mheap_.lock)
|
||||
|
||||
getg().m.mallocing--
|
||||
})
|
||||
return
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue