mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: fully initialize span in alloc_m
Currently, several important fields of a heap span are set by heapBits.initSpan, which happens after the span has already been published and returned from the locked region of alloc_m. In particular, allocBits is set very late, which makes mspan.isFree unsafe even if you were to lock the heap because it tries to access allocBits. This CL fixes this by populating these fields in alloc_m. The next CL builds on this to only publish the span once it is fully initialized. Together, they'll make it safe to check allocBits even if there is a race with alloc_m. For #10958, #24543, but a good fix in general. Change-Id: I7fde90023af0f497e826b637efa4d19c32840c08 Reviewed-on: https://go-review.googlesource.com/c/go/+/203285 Run-TryBot: Austin Clements <austin@google.com> Reviewed-by: Cherry Zhang <cherryyz@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
parent
d5caea771e
commit
a9b37ae026
2 changed files with 28 additions and 15 deletions
|
|
@ -1012,6 +1012,23 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
|
|||
h.reclaim(npage)
|
||||
}
|
||||
|
||||
// Compute size information.
|
||||
nbytes := npage << _PageShift
|
||||
var elemSize, nelems uintptr
|
||||
if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
|
||||
elemSize = nbytes
|
||||
nelems = 1
|
||||
} else {
|
||||
elemSize = uintptr(class_to_size[sizeclass])
|
||||
nelems = nbytes / elemSize
|
||||
}
|
||||
|
||||
// Allocate mark and allocation bits before we take the heap
|
||||
// lock. We'll drop these on the floor if we fail to allocate
|
||||
// the span, but in that case we'll panic soon.
|
||||
gcmarkBits := newMarkBits(nelems)
|
||||
allocBits := newAllocBits(nelems)
|
||||
|
||||
lock(&h.lock)
|
||||
// transfer stats from cache to global
|
||||
memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
|
||||
|
|
@ -1028,14 +1045,13 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
|
|||
s.state = mSpanInUse
|
||||
s.allocCount = 0
|
||||
s.spanclass = spanclass
|
||||
s.elemsize = elemSize
|
||||
if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
|
||||
s.elemsize = s.npages << _PageShift
|
||||
s.divShift = 0
|
||||
s.divMul = 0
|
||||
s.divShift2 = 0
|
||||
s.baseMask = 0
|
||||
} else {
|
||||
s.elemsize = uintptr(class_to_size[sizeclass])
|
||||
m := &class_to_divmagic[sizeclass]
|
||||
s.divShift = m.shift
|
||||
s.divMul = m.mul
|
||||
|
|
@ -1043,6 +1059,13 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
|
|||
s.baseMask = m.baseMask
|
||||
}
|
||||
|
||||
// Initialize mark and allocation structures.
|
||||
s.freeindex = 0
|
||||
s.allocCache = ^uint64(0) // all 1s indicating all free.
|
||||
s.nelems = nelems
|
||||
s.gcmarkBits = gcmarkBits
|
||||
s.allocBits = allocBits
|
||||
|
||||
// Mark in-use span in arena page bitmap.
|
||||
arena, pageIdx, pageMask := pageIndexOf(s.base())
|
||||
arena.pageInUse[pageIdx] |= pageMask
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue