mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.garbage] runtime: cleanup and optimize span.base()
Prior to this CL the base of a span was calculated in various places using shifts or calls to base(). This CL now always calls base() which has been optimized to calculate the base of the span when the span is initialized and store that value in the span structure. Change-Id: I661f2bfa21e3748a249cdf049ef9062db6e78100 Reviewed-on: https://go-review.googlesource.com/20703 Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
8dda1c4c08
commit
f8d0d4fd59
6 changed files with 17 additions and 14 deletions
|
|
@ -711,7 +711,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
||||||
s = largeAlloc(size, flags)
|
s = largeAlloc(size, flags)
|
||||||
})
|
})
|
||||||
s.freeindex = 1
|
s.freeindex = 1
|
||||||
x = unsafe.Pointer(uintptr(s.start << pageShift))
|
x = unsafe.Pointer(s.base())
|
||||||
size = s.elemsize
|
size = s.elemsize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -833,7 +833,7 @@ func largeAlloc(size uintptr, flag uint32) *mspan {
|
||||||
if s == nil {
|
if s == nil {
|
||||||
throw("out of memory")
|
throw("out of memory")
|
||||||
}
|
}
|
||||||
s.limit = uintptr(s.start)<<_PageShift + size
|
s.limit = s.base() + size
|
||||||
heapBitsForSpan(s.base()).initSpan(s)
|
heapBitsForSpan(s.base()).initSpan(s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -457,7 +457,7 @@ func heapBitsForObject(p, refBase, refOff uintptr) (base uintptr, hbits heapBits
|
||||||
} else {
|
} else {
|
||||||
print(" to unused region of span")
|
print(" to unused region of span")
|
||||||
}
|
}
|
||||||
print("idx=", hex(idx), " span.start=", hex(s.start<<_PageShift), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
|
print("idx=", hex(idx), " span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
|
||||||
if refBase != 0 {
|
if refBase != 0 {
|
||||||
print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
|
print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
|
||||||
gcDumpObject("object", refBase, refOff)
|
gcDumpObject("object", refBase, refOff)
|
||||||
|
|
|
||||||
|
|
@ -212,7 +212,7 @@ func (c *mcentral) grow() *mspan {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
p := uintptr(s.start << _PageShift)
|
p := s.base()
|
||||||
s.limit = p + size*n
|
s.limit = p + size*n
|
||||||
|
|
||||||
heapBitsForSpan(s.base()).initSpan(s)
|
heapBitsForSpan(s.base()).initSpan(s)
|
||||||
|
|
|
||||||
|
|
@ -287,7 +287,7 @@ func markrootSpans(gcw *gcWork, shard int) {
|
||||||
// retain everything it points to.
|
// retain everything it points to.
|
||||||
spf := (*specialfinalizer)(unsafe.Pointer(sp))
|
spf := (*specialfinalizer)(unsafe.Pointer(sp))
|
||||||
// A finalizer can be set for an inner byte of an object, find object beginning.
|
// A finalizer can be set for an inner byte of an object, find object beginning.
|
||||||
p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
|
p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
|
||||||
|
|
||||||
// Mark everything that can be reached from
|
// Mark everything that can be reached from
|
||||||
// the object (but *not* the object itself or
|
// the object (but *not* the object itself or
|
||||||
|
|
|
||||||
|
|
@ -211,13 +211,13 @@ func (s *mspan) sweep(preserve bool) bool {
|
||||||
special := *specialp
|
special := *specialp
|
||||||
for special != nil {
|
for special != nil {
|
||||||
// A finalizer can be set for an inner byte of an object, find object beginning.
|
// A finalizer can be set for an inner byte of an object, find object beginning.
|
||||||
p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
|
p := s.base() + uintptr(special.offset)/size*size
|
||||||
mbits := s.markBitsForAddr(p)
|
mbits := s.markBitsForAddr(p)
|
||||||
if !mbits.isMarked() {
|
if !mbits.isMarked() {
|
||||||
// This object is not marked and has at least one special record.
|
// This object is not marked and has at least one special record.
|
||||||
// Pass 1: see if it has at least one finalizer.
|
// Pass 1: see if it has at least one finalizer.
|
||||||
hasFin := false
|
hasFin := false
|
||||||
endOffset := p - uintptr(s.start<<_PageShift) + size
|
endOffset := p - s.base() + size
|
||||||
for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
|
for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
|
||||||
if tmp.kind == _KindSpecialFinalizer {
|
if tmp.kind == _KindSpecialFinalizer {
|
||||||
// Stop freeing of object if it has a finalizer.
|
// Stop freeing of object if it has a finalizer.
|
||||||
|
|
@ -230,7 +230,7 @@ func (s *mspan) sweep(preserve bool) bool {
|
||||||
for special != nil && uintptr(special.offset) < endOffset {
|
for special != nil && uintptr(special.offset) < endOffset {
|
||||||
// Find the exact byte for which the special was setup
|
// Find the exact byte for which the special was setup
|
||||||
// (as opposed to object beginning).
|
// (as opposed to object beginning).
|
||||||
p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
|
p := s.base() + uintptr(special.offset)
|
||||||
if special.kind == _KindSpecialFinalizer || !hasFin {
|
if special.kind == _KindSpecialFinalizer || !hasFin {
|
||||||
// Splice out special record.
|
// Splice out special record.
|
||||||
y := special
|
y := special
|
||||||
|
|
@ -311,7 +311,7 @@ func (s *mspan) sweep(preserve bool) bool {
|
||||||
// implement and then call some kind of MHeap_DeleteSpan.
|
// implement and then call some kind of MHeap_DeleteSpan.
|
||||||
if debug.efence > 0 {
|
if debug.efence > 0 {
|
||||||
s.limit = 0 // prevent mlookup from finding this span
|
s.limit = 0 // prevent mlookup from finding this span
|
||||||
sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
|
sysFault(unsafe.Pointer(s.base()), size)
|
||||||
} else {
|
} else {
|
||||||
mheap_.freeSpan(s, 1)
|
mheap_.freeSpan(s, 1)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -116,7 +116,8 @@ type mspan struct {
|
||||||
next *mspan // next span in list, or nil if none
|
next *mspan // next span in list, or nil if none
|
||||||
prev **mspan // previous span's next field, or list head's first field if none
|
prev **mspan // previous span's next field, or list head's first field if none
|
||||||
list *mSpanList // For debugging. TODO: Remove.
|
list *mSpanList // For debugging. TODO: Remove.
|
||||||
|
//TODO:(rlh) Eliminate start field and use startAddr >> PageShift instead.
|
||||||
|
startAddr uintptr // uintptr(s.start << _PageShift) aka s.base()
|
||||||
start pageID // starting page number
|
start pageID // starting page number
|
||||||
npages uintptr // number of pages in span
|
npages uintptr // number of pages in span
|
||||||
stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
|
stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
|
||||||
|
|
@ -184,7 +185,7 @@ type mspan struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *mspan) base() uintptr {
|
func (s *mspan) base() uintptr {
|
||||||
return uintptr(s.start << _PageShift)
|
return s.startAddr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *mspan) layout() (size, n, total uintptr) {
|
func (s *mspan) layout() (size, n, total uintptr) {
|
||||||
|
|
@ -300,7 +301,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
p := uintptr(s.start) << _PageShift
|
p := s.base()
|
||||||
if s.sizeclass == 0 {
|
if s.sizeclass == 0 {
|
||||||
// Large object.
|
// Large object.
|
||||||
if base != nil {
|
if base != nil {
|
||||||
|
|
@ -542,7 +543,7 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
|
||||||
|
|
||||||
if s != nil {
|
if s != nil {
|
||||||
if needzero && s.needzero != 0 {
|
if needzero && s.needzero != 0 {
|
||||||
memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
|
memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift)
|
||||||
}
|
}
|
||||||
s.needzero = 0
|
s.needzero = 0
|
||||||
}
|
}
|
||||||
|
|
@ -610,7 +611,7 @@ HaveSpan:
|
||||||
throw("still in list")
|
throw("still in list")
|
||||||
}
|
}
|
||||||
if s.npreleased > 0 {
|
if s.npreleased > 0 {
|
||||||
sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
|
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
|
||||||
memstats.heap_released -= uint64(s.npreleased << _PageShift)
|
memstats.heap_released -= uint64(s.npreleased << _PageShift)
|
||||||
s.npreleased = 0
|
s.npreleased = 0
|
||||||
}
|
}
|
||||||
|
|
@ -826,6 +827,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
|
||||||
t := h_spans[p-1]
|
t := h_spans[p-1]
|
||||||
if t != nil && t.state == _MSpanFree {
|
if t != nil && t.state == _MSpanFree {
|
||||||
s.start = t.start
|
s.start = t.start
|
||||||
|
s.startAddr = uintptr(s.start << _PageShift)
|
||||||
s.npages += t.npages
|
s.npages += t.npages
|
||||||
s.npreleased = t.npreleased // absorb released pages
|
s.npreleased = t.npreleased // absorb released pages
|
||||||
s.needzero |= t.needzero
|
s.needzero |= t.needzero
|
||||||
|
|
@ -925,6 +927,7 @@ func (span *mspan) init(start pageID, npages uintptr) {
|
||||||
span.prev = nil
|
span.prev = nil
|
||||||
span.list = nil
|
span.list = nil
|
||||||
span.start = start
|
span.start = start
|
||||||
|
span.startAddr = uintptr(start << _PageShift)
|
||||||
span.npages = npages
|
span.npages = npages
|
||||||
span.allocCount = 0
|
span.allocCount = 0
|
||||||
span.sizeclass = 0
|
span.sizeclass = 0
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue