mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: make the heap bitmap sparse
This splits the heap bitmap into separate chunks for every 64MB of the heap and introduces an index mapping from virtual address to metadata. It modifies the heapBits abstraction to use this two-level structure. Finally, it modifies heapBitsSetType to unroll the bitmap into the object itself and then copy it out if the bitmap would span discontiguous bitmap chunks. This is a step toward supporting general sparse heaps, which will eliminate address space conflict failures as well as the limit on the heap size. It's also advantageous for 32-bit. 32-bit already supports discontiguous heaps by always starting the arena at address 0. However, as a result, with a contiguous bitmap, if the kernel chooses a high address (near 2GB) for a heap mapping, the runtime is forced to map up to 128MB of heap bitmap. Now the runtime can map sections of the bitmap for just the parts of the address space used by the heap. Updates #10460. This slightly slows down the x/garbage and compilebench benchmarks. However, I think the slowdown is acceptably small. name old time/op new time/op delta Template 178ms ± 1% 180ms ± 1% +0.78% (p=0.029 n=10+10) Unicode 85.7ms ± 2% 86.5ms ± 2% ~ (p=0.089 n=10+10) GoTypes 594ms ± 0% 599ms ± 1% +0.70% (p=0.000 n=9+9) Compiler 2.86s ± 0% 2.87s ± 0% +0.40% (p=0.001 n=9+9) SSA 7.23s ± 2% 7.29s ± 2% +0.94% (p=0.029 n=10+10) Flate 116ms ± 1% 117ms ± 1% +0.99% (p=0.000 n=9+9) GoParser 146ms ± 1% 146ms ± 0% ~ (p=0.193 n=10+7) Reflect 399ms ± 0% 403ms ± 1% +0.89% (p=0.001 n=10+10) Tar 173ms ± 1% 174ms ± 1% +0.91% (p=0.013 n=10+9) XML 208ms ± 1% 210ms ± 1% +0.93% (p=0.000 n=10+10) [Geo mean] 368ms 371ms +0.79% name old time/op new time/op delta Garbage/benchmem-MB=64-12 2.17ms ± 1% 2.21ms ± 1% +2.15% (p=0.000 n=20+20) Change-Id: I037fd283221976f4f61249119d6b97b100bcbc66 Reviewed-on: https://go-review.googlesource.com/85883 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
f61057c497
commit
c0392d2e7f
3 changed files with 252 additions and 75 deletions
|
|
@ -114,9 +114,6 @@ type mheap struct {
|
|||
nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
|
||||
|
||||
// range of addresses we might see in the heap
|
||||
bitmap_start uintptr // Points to first byte of bitmap
|
||||
bitmap_mapped uintptr
|
||||
bitmap_delta uintptr // Used to map heap address to bitmap address
|
||||
|
||||
// The arena_* fields indicate the addresses of the Go heap.
|
||||
//
|
||||
|
|
@ -143,6 +140,21 @@ type mheap struct {
|
|||
// here and *must* clobber it to use it.
|
||||
arena_reserved bool
|
||||
|
||||
// arenas is the heap arena index. arenas[va/heapArenaBytes]
|
||||
// points to the metadata for the heap arena containing va.
|
||||
//
|
||||
// For regions of the address space that are not backed by the
|
||||
// Go heap, the arena index contains nil.
|
||||
//
|
||||
// Modifications are protected by mheap_.lock. Reads can be
|
||||
// performed without locking; however, a given entry can
|
||||
// transition from nil to non-nil at any time when the lock
|
||||
// isn't held. (Entries never transitions back to nil.)
|
||||
//
|
||||
// This structure is fully mapped by mallocinit, so it's safe
|
||||
// to probe any index.
|
||||
arenas *[memLimit / heapArenaBytes]*heapArena
|
||||
|
||||
//_ uint32 // ensure 64-bit alignment
|
||||
|
||||
// central free lists for small size classes.
|
||||
|
|
@ -167,6 +179,23 @@ type mheap struct {
|
|||
|
||||
var mheap_ mheap
|
||||
|
||||
// A heapArena stores metadata for a heap arena. heapArenas are stored
|
||||
// outside of the Go heap and accessed via the mheap_.arenas index.
|
||||
//
|
||||
// This gets allocated directly from the OS, so ideally it should be a
|
||||
// multiple of the system page size. For example, avoid adding small
|
||||
// fields.
|
||||
//
|
||||
//go:notinheap
|
||||
type heapArena struct {
|
||||
// bitmap stores the pointer/scalar bitmap for the words in
|
||||
// this arena. See mbitmap.go for a description. Use the
|
||||
// heapBits type to access this.
|
||||
bitmap [heapArenaBitmapBytes]byte
|
||||
|
||||
// TODO: Also store the spans map here.
|
||||
}
|
||||
|
||||
// An MSpan is a run of pages.
|
||||
//
|
||||
// When a MSpan is in the heap free list, state == MSpanFree
|
||||
|
|
@ -507,8 +536,21 @@ func (h *mheap) setArenaUsed(arena_used uintptr, racemap bool) {
|
|||
// avoids faults when other threads try access these regions immediately
|
||||
// after observing the change to arena_used.
|
||||
|
||||
// Map the bitmap.
|
||||
h.mapBits(arena_used)
|
||||
// Allocate heap arena metadata.
|
||||
for ri := h.arena_used / heapArenaBytes; ri < (arena_used+heapArenaBytes-1)/heapArenaBytes; ri++ {
|
||||
if h.arenas[ri] != nil {
|
||||
continue
|
||||
}
|
||||
r := (*heapArena)(persistentalloc(unsafe.Sizeof(heapArena{}), sys.PtrSize, &memstats.gc_sys))
|
||||
if r == nil {
|
||||
throw("runtime: out of memory allocating heap arena metadata")
|
||||
}
|
||||
// Store atomically just in case an object from the
|
||||
// new heap arena becomes visible before the heap lock
|
||||
// is released (which shouldn't happen, but there's
|
||||
// little downside to this).
|
||||
atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri]), unsafe.Pointer(r))
|
||||
}
|
||||
|
||||
// Map spans array.
|
||||
h.mapSpans(arena_used)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue