mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.garbage] runtime: cleanup and optimize span.base()
Prior to this CL the base of a span was calculated in various places using shifts or calls to base(). This CL now always calls base() which has been optimized to calculate the base of the span when the span is initialized and store that value in the span structure. Change-Id: I661f2bfa21e3748a249cdf049ef9062db6e78100 Reviewed-on: https://go-review.googlesource.com/20703 Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
8dda1c4c08
commit
f8d0d4fd59
6 changed files with 17 additions and 14 deletions
|
|
@ -116,7 +116,8 @@ type mspan struct {
|
|||
next *mspan // next span in list, or nil if none
|
||||
prev **mspan // previous span's next field, or list head's first field if none
|
||||
list *mSpanList // For debugging. TODO: Remove.
|
||||
|
||||
//TODO:(rlh) Eliminate start field and use startAddr >> PageShift instead.
|
||||
startAddr uintptr // uintptr(s.start << _PageShift) aka s.base()
|
||||
start pageID // starting page number
|
||||
npages uintptr // number of pages in span
|
||||
stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
|
||||
|
|
@ -184,7 +185,7 @@ type mspan struct {
|
|||
}
|
||||
|
||||
func (s *mspan) base() uintptr {
|
||||
return uintptr(s.start << _PageShift)
|
||||
return s.startAddr
|
||||
}
|
||||
|
||||
func (s *mspan) layout() (size, n, total uintptr) {
|
||||
|
|
@ -300,7 +301,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
|
|||
return 0
|
||||
}
|
||||
|
||||
p := uintptr(s.start) << _PageShift
|
||||
p := s.base()
|
||||
if s.sizeclass == 0 {
|
||||
// Large object.
|
||||
if base != nil {
|
||||
|
|
@ -542,7 +543,7 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
|
|||
|
||||
if s != nil {
|
||||
if needzero && s.needzero != 0 {
|
||||
memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
|
||||
memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift)
|
||||
}
|
||||
s.needzero = 0
|
||||
}
|
||||
|
|
@ -610,7 +611,7 @@ HaveSpan:
|
|||
throw("still in list")
|
||||
}
|
||||
if s.npreleased > 0 {
|
||||
sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
|
||||
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
|
||||
memstats.heap_released -= uint64(s.npreleased << _PageShift)
|
||||
s.npreleased = 0
|
||||
}
|
||||
|
|
@ -826,6 +827,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
|
|||
t := h_spans[p-1]
|
||||
if t != nil && t.state == _MSpanFree {
|
||||
s.start = t.start
|
||||
s.startAddr = uintptr(s.start << _PageShift)
|
||||
s.npages += t.npages
|
||||
s.npreleased = t.npreleased // absorb released pages
|
||||
s.needzero |= t.needzero
|
||||
|
|
@ -925,6 +927,7 @@ func (span *mspan) init(start pageID, npages uintptr) {
|
|||
span.prev = nil
|
||||
span.list = nil
|
||||
span.start = start
|
||||
span.startAddr = uintptr(start << _PageShift)
|
||||
span.npages = npages
|
||||
span.allocCount = 0
|
||||
span.sizeclass = 0
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue