runtime: remove old page allocator

This change removes the old page allocator from the runtime.

Updates #35112.

Change-Id: Ib20e1c030f869b6318cd6f4288a9befdbae1b771
Reviewed-on: https://go-review.googlesource.com/c/go/+/195700
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2019-09-04 16:12:10 +00:00 committed by Michael Knyszek
parent e6135c2768
commit 33dfd3529b
8 changed files with 26 additions and 1605 deletions

View file

@ -12,8 +12,6 @@ import (
"unsafe"
)
const OldPageAllocator = oldPageAllocator
var Fadd64 = fadd64
var Fsub64 = fsub64
var Fmul64 = fmul64
@ -356,15 +354,9 @@ func ReadMemStatsSlow() (base, slow MemStats) {
slow.BySize[i].Frees = bySize[i].Frees
}
if oldPageAllocator {
for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
slow.HeapReleased += uint64(i.span().released())
}
} else {
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
// Unused space in the current arena also counts as released space.
@ -543,170 +535,6 @@ func MapTombstoneCheck(m map[int]int) {
}
}
// UnscavHugePagesSlow returns the value of mheap_.freeHugePages
// and the number of unscavenged huge pages calculated by
// scanning the heap.
func UnscavHugePagesSlow() (uintptr, uintptr) {
var base, slow uintptr
// Run on the system stack to avoid deadlock from stack growth
// trying to acquire the heap lock.
systemstack(func() {
lock(&mheap_.lock)
base = mheap_.free.unscavHugePages
for _, s := range mheap_.allspans {
if s.state.get() == mSpanFree && !s.scavenged {
slow += s.hugePages()
}
}
unlock(&mheap_.lock)
})
return base, slow
}
// Span is a safe wrapper around an mspan, whose memory
// is managed manually.
type Span struct {
*mspan
}
func AllocSpan(base, npages uintptr, scavenged bool) Span {
var s *mspan
systemstack(func() {
lock(&mheap_.lock)
s = (*mspan)(mheap_.spanalloc.alloc())
unlock(&mheap_.lock)
})
s.init(base, npages)
s.scavenged = scavenged
return Span{s}
}
func (s *Span) Free() {
systemstack(func() {
lock(&mheap_.lock)
mheap_.spanalloc.free(unsafe.Pointer(s.mspan))
unlock(&mheap_.lock)
})
s.mspan = nil
}
func (s Span) Base() uintptr {
return s.mspan.base()
}
func (s Span) Pages() uintptr {
return s.mspan.npages
}
type TreapIterType treapIterType
const (
TreapIterScav TreapIterType = TreapIterType(treapIterScav)
TreapIterHuge = TreapIterType(treapIterHuge)
TreapIterBits = treapIterBits
)
type TreapIterFilter treapIterFilter
func TreapFilter(mask, match TreapIterType) TreapIterFilter {
return TreapIterFilter(treapFilter(treapIterType(mask), treapIterType(match)))
}
func (s Span) MatchesIter(mask, match TreapIterType) bool {
return treapFilter(treapIterType(mask), treapIterType(match)).matches(s.treapFilter())
}
type TreapIter struct {
treapIter
}
func (t TreapIter) Span() Span {
return Span{t.span()}
}
func (t TreapIter) Valid() bool {
return t.valid()
}
func (t TreapIter) Next() TreapIter {
return TreapIter{t.next()}
}
func (t TreapIter) Prev() TreapIter {
return TreapIter{t.prev()}
}
// Treap is a safe wrapper around mTreap for testing.
//
// It must never be heap-allocated because mTreap is
// notinheap.
//
//go:notinheap
type Treap struct {
mTreap
}
func (t *Treap) Start(mask, match TreapIterType) TreapIter {
return TreapIter{t.start(treapIterType(mask), treapIterType(match))}
}
func (t *Treap) End(mask, match TreapIterType) TreapIter {
return TreapIter{t.end(treapIterType(mask), treapIterType(match))}
}
func (t *Treap) Insert(s Span) {
// mTreap uses a fixalloc in mheap_ for treapNode
// allocation which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock.
systemstack(func() {
lock(&mheap_.lock)
t.insert(s.mspan)
unlock(&mheap_.lock)
})
t.CheckInvariants()
}
func (t *Treap) Find(npages uintptr) TreapIter {
return TreapIter{t.find(npages)}
}
func (t *Treap) Erase(i TreapIter) {
// mTreap uses a fixalloc in mheap_ for treapNode
// freeing which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock.
systemstack(func() {
lock(&mheap_.lock)
t.erase(i.treapIter)
unlock(&mheap_.lock)
})
t.CheckInvariants()
}
func (t *Treap) RemoveSpan(s Span) {
// See Erase about locking.
systemstack(func() {
lock(&mheap_.lock)
t.removeSpan(s.mspan)
unlock(&mheap_.lock)
})
t.CheckInvariants()
}
func (t *Treap) Size() int {
i := 0
t.mTreap.treap.walkTreap(func(t *treapNode) {
i++
})
return i
}
func (t *Treap) CheckInvariants() {
t.mTreap.treap.walkTreap(checkTreapNode)
t.mTreap.treap.validateInvariants()
}
func RunGetgThreadSwitchTest() {
// Test that getg works correctly with thread switch.
// With gccgo, if we generate getg inlined, the backend