runtime: use offAddr in more parts of the runtime

This change uses the new offAddr type in more parts of the runtime where
we've been implicitly switching from the default address space to a
contiguous view. The purpose of offAddr is to represent addresses in the
contiguous view of the address space, and to make direct computations
between real addresses and offset addresses impossible. This change thus
improves readability in the runtime.

Updates #35788.

Change-Id: I4e1c5fed3ed68aa12f49a42b82eb3f46aba82fc1
Reviewed-on: https://go-review.googlesource.com/c/go/+/230718
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2020-04-28 21:30:57 +00:00 committed by Michael Knyszek
parent d69509ff99
commit ea5f9b666c
4 changed files with 99 additions and 86 deletions

View file

@ -452,8 +452,8 @@ func (s *pageAlloc) scavengeStartGen() {
s.inUse.cloneInto(&s.scav.inUse) s.inUse.cloneInto(&s.scav.inUse)
// Pick the new starting address for the scavenger cycle. // Pick the new starting address for the scavenger cycle.
var startAddr uintptr var startAddr offAddr
if s.scav.scavLWM < s.scav.freeHWM { if s.scav.scavLWM.lessThan(s.scav.freeHWM) {
// The "free" high watermark exceeds the "scavenged" low watermark, // The "free" high watermark exceeds the "scavenged" low watermark,
// so there are free scavengable pages in parts of the address space // so there are free scavengable pages in parts of the address space
// that the scavenger already searched, the high watermark being the // that the scavenger already searched, the high watermark being the
@ -467,7 +467,7 @@ func (s *pageAlloc) scavengeStartGen() {
// scavenging from where we were. // scavenging from where we were.
startAddr = s.scav.scavLWM startAddr = s.scav.scavLWM
} }
s.scav.inUse.removeGreaterEqual(startAddr) s.scav.inUse.removeGreaterEqual(startAddr.addr())
// reservationBytes may be zero if s.inUse.totalBytes is small, or if // reservationBytes may be zero if s.inUse.totalBytes is small, or if
// scavengeReservationShards is large. This case is fine as the scavenger // scavengeReservationShards is large. This case is fine as the scavenger
@ -478,8 +478,8 @@ func (s *pageAlloc) scavengeStartGen() {
s.scav.reservationBytes = alignUp(s.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards s.scav.reservationBytes = alignUp(s.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
s.scav.gen++ s.scav.gen++
s.scav.released = 0 s.scav.released = 0
s.scav.freeHWM = 0 s.scav.freeHWM = minOffAddr
s.scav.scavLWM = maxSearchAddr s.scav.scavLWM = maxOffAddr
} }
// scavengeReserve reserves a contiguous range of the address space // scavengeReserve reserves a contiguous range of the address space
@ -698,8 +698,8 @@ func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr
addr := chunkBase(ci) + uintptr(base)*pageSize addr := chunkBase(ci) + uintptr(base)*pageSize
// Update the scavenge low watermark. // Update the scavenge low watermark.
if addr < s.scav.scavLWM { if oAddr := (offAddr{addr}); oAddr.lessThan(s.scav.scavLWM) {
s.scav.scavLWM = addr s.scav.scavLWM = oAddr
} }
// Only perform the actual scavenging if we're not in a test. // Only perform the actual scavenging if we're not in a test.

View file

@ -81,15 +81,14 @@ const (
// there should this change. // there should this change.
pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
pallocChunksL1Shift = pallocChunksL2Bits pallocChunksL1Shift = pallocChunksL2Bits
// Maximum searchAddr value, which indicates that the heap has no free space.
//
// We subtract arenaBaseOffset because we want this to represent the maximum
// value in the shifted address space, but searchAddr is stored as a regular
// memory address. See arenaBaseOffset for details.
maxSearchAddr = ^uintptr(0) - arenaBaseOffset
) )
// Maximum searchAddr value, which indicates that the heap has no free space.
//
// We alias maxOffAddr just to make it clear that this is the maximum address
// for the page allocator's search space. See maxOffAddr for details.
var maxSearchAddr = maxOffAddr
// Global chunk index. // Global chunk index.
// //
// Represents an index into the leaf level of the radix tree. // Represents an index into the leaf level of the radix tree.
@ -134,6 +133,18 @@ func (i chunkIdx) l2() uint {
} }
} }
// offAddrToLevelIndex converts an address in the offset address space
// to the index into summary[level] containing addr.
func offAddrToLevelIndex(level int, addr offAddr) int {
return int((addr.a + arenaBaseOffset) >> levelShift[level])
}
// levelIndexToOffAddr converts an index into summary[level] into
// the corresponding address in the offset address space.
func levelIndexToOffAddr(level, idx int) offAddr {
return offAddr{(uintptr(idx) << levelShift[level]) - arenaBaseOffset}
}
// addrsToSummaryRange converts base and limit pointers into a range // addrsToSummaryRange converts base and limit pointers into a range
// of entries for the given summary level. // of entries for the given summary level.
// //
@ -232,7 +243,7 @@ type pageAlloc struct {
// Note that adding in arenaBaseOffset transforms addresses // Note that adding in arenaBaseOffset transforms addresses
// to a new address space with a linear view of the full address // to a new address space with a linear view of the full address
// space on architectures with segmented address spaces. // space on architectures with segmented address spaces.
searchAddr uintptr searchAddr offAddr
// start and end represent the chunk indices // start and end represent the chunk indices
// which pageAlloc knows about. It assumes // which pageAlloc knows about. It assumes
@ -271,13 +282,13 @@ type pageAlloc struct {
// released is the amount of memory released this generation. // released is the amount of memory released this generation.
released uintptr released uintptr
// scavLWM is the lowest address that the scavenger reached this // scavLWM is the lowest (offset) address that the scavenger reached this
// scavenge generation. // scavenge generation.
scavLWM uintptr scavLWM offAddr
// freeHWM is the highest address of a page that was freed to // freeHWM is the highest (offset) address of a page that was freed to
// the page allocator this scavenge generation. // the page allocator this scavenge generation.
freeHWM uintptr freeHWM offAddr
} }
// mheap_.lock. This level of indirection makes it possible // mheap_.lock. This level of indirection makes it possible
@ -319,29 +330,6 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
s.scav.scavLWM = maxSearchAddr s.scav.scavLWM = maxSearchAddr
} }
// compareSearchAddrTo compares an address against s.searchAddr in a linearized
// view of the address space on systems with discontinuous process address spaces.
// This linearized view is the same one generated by chunkIndex and arenaIndex,
// done by adding arenaBaseOffset.
//
// On systems without a discontinuous address space, it's just a normal comparison.
//
// Returns < 0 if addr is less than s.searchAddr in the linearized address space.
// Returns > 0 if addr is greater than s.searchAddr in the linearized address space.
// Returns 0 if addr and s.searchAddr are equal.
func (s *pageAlloc) compareSearchAddrTo(addr uintptr) int {
// Compare with arenaBaseOffset added because it gives us a linear, contiguous view
// of the heap on architectures with signed address spaces.
lAddr := addr + arenaBaseOffset
lSearchAddr := s.searchAddr + arenaBaseOffset
if lAddr < lSearchAddr {
return -1
} else if lAddr > lSearchAddr {
return 1
}
return 0
}
// chunkOf returns the chunk at the given chunk index. // chunkOf returns the chunk at the given chunk index.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData { func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()] return &s.chunks[ci.l1()][ci.l2()]
@ -378,10 +366,10 @@ func (s *pageAlloc) grow(base, size uintptr) {
s.inUse.add(makeAddrRange(base, limit)) s.inUse.add(makeAddrRange(base, limit))
// A grow operation is a lot like a free operation, so if our // A grow operation is a lot like a free operation, so if our
// chunk ends up below the (linearized) s.searchAddr, update // chunk ends up below s.searchAddr, update s.searchAddr to the
// s.searchAddr to the new address, just like in free. // new address, just like in free.
if s.compareSearchAddrTo(base) < 0 { if b := (offAddr{base}); b.lessThan(s.searchAddr) {
s.searchAddr = base s.searchAddr = b
} }
// Add entries into chunks, which is sparse, if needed. Then, // Add entries into chunks, which is sparse, if needed. Then,
@ -545,7 +533,7 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
// searchAddr returned is invalid and must be ignored. // searchAddr returned is invalid and must be ignored.
// //
// s.mheapLock must be held. // s.mheapLock must be held.
func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) { func (s *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
// Search algorithm. // Search algorithm.
// //
// This algorithm walks each level l of the radix tree from the root level // This algorithm walks each level l of the radix tree from the root level
@ -585,13 +573,13 @@ func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
// firstFree is updated by calling foundFree each time free space in the // firstFree is updated by calling foundFree each time free space in the
// heap is discovered. // heap is discovered.
// //
// At the end of the search, base-arenaBaseOffset is the best new // At the end of the search, base.addr() is the best new
// searchAddr we could deduce in this search. // searchAddr we could deduce in this search.
firstFree := struct { firstFree := struct {
base, bound uintptr base, bound offAddr
}{ }{
base: 0, base: minOffAddr,
bound: (1<<heapAddrBits - 1), bound: maxOffAddr,
} }
// foundFree takes the given address range [addr, addr+size) and // foundFree takes the given address range [addr, addr+size) and
// updates firstFree if it is a narrower range. The input range must // updates firstFree if it is a narrower range. The input range must
@ -602,17 +590,17 @@ func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
// pages on the root level and narrow that down if we descend into // pages on the root level and narrow that down if we descend into
// that summary. But as soon as we need to iterate beyond that summary // that summary. But as soon as we need to iterate beyond that summary
// in a level to find a large enough range, we'll stop narrowing. // in a level to find a large enough range, we'll stop narrowing.
foundFree := func(addr, size uintptr) { foundFree := func(addr offAddr, size uintptr) {
if firstFree.base <= addr && addr+size-1 <= firstFree.bound { if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) {
// This range fits within the current firstFree window, so narrow // This range fits within the current firstFree window, so narrow
// down the firstFree window to the base and bound of this range. // down the firstFree window to the base and bound of this range.
firstFree.base = addr firstFree.base = addr
firstFree.bound = addr + size - 1 firstFree.bound = addr.add(size - 1)
} else if !(addr+size-1 < firstFree.base || addr > firstFree.bound) { } else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) {
// This range only partially overlaps with the firstFree range, // This range only partially overlaps with the firstFree range,
// so throw. // so throw.
print("runtime: addr = ", hex(addr), ", size = ", size, "\n") print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n")
print("runtime: base = ", hex(firstFree.base), ", bound = ", hex(firstFree.bound), "\n") print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n")
throw("range partially overlaps") throw("range partially overlaps")
} }
} }
@ -642,7 +630,7 @@ nextLevel:
// searchAddr on the previous level or we're on the root leve, in which // searchAddr on the previous level or we're on the root leve, in which
// case the searchAddr should be the same as i after levelShift. // case the searchAddr should be the same as i after levelShift.
j0 := 0 j0 := 0
if searchIdx := int((s.searchAddr + arenaBaseOffset) >> levelShift[l]); searchIdx&^(entriesPerBlock-1) == i { if searchIdx := offAddrToLevelIndex(l, s.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
j0 = searchIdx & (entriesPerBlock - 1) j0 = searchIdx & (entriesPerBlock - 1)
} }
@ -668,7 +656,7 @@ nextLevel:
// We've encountered a non-zero summary which means // We've encountered a non-zero summary which means
// free memory, so update firstFree. // free memory, so update firstFree.
foundFree(uintptr((i+j)<<levelShift[l]), (uintptr(1)<<logMaxPages)*pageSize) foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
s := sum.start() s := sum.start()
if size+s >= uint(npages) { if size+s >= uint(npages) {
@ -706,8 +694,8 @@ nextLevel:
if size >= uint(npages) { if size >= uint(npages) {
// We found a sufficiently large run of free pages straddling // We found a sufficiently large run of free pages straddling
// some boundary, so compute the address and return it. // some boundary, so compute the address and return it.
addr := uintptr(i<<levelShift[l]) - arenaBaseOffset + uintptr(base)*pageSize addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
return addr, firstFree.base - arenaBaseOffset return addr, firstFree.base
} }
if l == 0 { if l == 0 {
// We're at level zero, so that means we've exhausted our search. // We're at level zero, so that means we've exhausted our search.
@ -719,7 +707,7 @@ nextLevel:
// lied to us. In either case, dump some useful state and throw. // lied to us. In either case, dump some useful state and throw.
print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n") print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n") print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
print("runtime: s.searchAddr = ", hex(s.searchAddr), ", i = ", i, "\n") print("runtime: s.searchAddr = ", hex(s.searchAddr.addr()), ", i = ", i, "\n")
print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n") print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
for j := 0; j < len(entries); j++ { for j := 0; j < len(entries); j++ {
sum := entries[j] sum := entries[j]
@ -752,8 +740,8 @@ nextLevel:
// Since we actually searched the chunk, we may have // Since we actually searched the chunk, we may have
// found an even narrower free window. // found an even narrower free window.
searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
foundFree(searchAddr+arenaBaseOffset, chunkBase(ci+1)-searchAddr) foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
return addr, firstFree.base - arenaBaseOffset return addr, firstFree.base
} }
// alloc allocates npages worth of memory from the page heap, returning the base // alloc allocates npages worth of memory from the page heap, returning the base
@ -767,25 +755,25 @@ nextLevel:
func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
// If the searchAddr refers to a region which has a higher address than // If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory. // any known chunk, then we know we're out of memory.
if chunkIndex(s.searchAddr) >= s.end { if chunkIndex(s.searchAddr.addr()) >= s.end {
return 0, 0 return 0, 0
} }
// If npages has a chance of fitting in the chunk where the searchAddr is, // If npages has a chance of fitting in the chunk where the searchAddr is,
// search it directly. // search it directly.
searchAddr := uintptr(0) searchAddr := minOffAddr
if pallocChunkPages-chunkPageIndex(s.searchAddr) >= uint(npages) { if pallocChunkPages-chunkPageIndex(s.searchAddr.addr()) >= uint(npages) {
// npages is guaranteed to be no greater than pallocChunkPages here. // npages is guaranteed to be no greater than pallocChunkPages here.
i := chunkIndex(s.searchAddr) i := chunkIndex(s.searchAddr.addr())
if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) { if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) {
j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr)) j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr.addr()))
if j == ^uint(0) { if j == ^uint(0) {
print("runtime: max = ", max, ", npages = ", npages, "\n") print("runtime: max = ", max, ", npages = ", npages, "\n")
print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr), ", s.searchAddr = ", hex(s.searchAddr), "\n") print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr.addr()), ", s.searchAddr = ", hex(s.searchAddr.addr()), "\n")
throw("bad summary data") throw("bad summary data")
} }
addr = chunkBase(i) + uintptr(j)*pageSize addr = chunkBase(i) + uintptr(j)*pageSize
searchAddr = chunkBase(i) + uintptr(searchIdx)*pageSize searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
goto Found goto Found
} }
} }
@ -807,10 +795,10 @@ Found:
// Go ahead and actually mark the bits now that we have an address. // Go ahead and actually mark the bits now that we have an address.
scav = s.allocRange(addr, npages) scav = s.allocRange(addr, npages)
// If we found a higher (linearized) searchAddr, we know that all the // If we found a higher searchAddr, we know that all the
// heap memory before that searchAddr in a linear address space is // heap memory before that searchAddr in an offset address space is
// allocated, so bump s.searchAddr up to the new one. // allocated, so bump s.searchAddr up to the new one.
if s.compareSearchAddrTo(searchAddr) > 0 { if s.searchAddr.lessThan(searchAddr) {
s.searchAddr = searchAddr s.searchAddr = searchAddr
} }
return addr, scav return addr, scav
@ -820,14 +808,14 @@ Found:
// //
// s.mheapLock must be held. // s.mheapLock must be held.
func (s *pageAlloc) free(base, npages uintptr) { func (s *pageAlloc) free(base, npages uintptr) {
// If we're freeing pages below the (linearized) s.searchAddr, update searchAddr. // If we're freeing pages below the s.searchAddr, update searchAddr.
if s.compareSearchAddrTo(base) < 0 { if b := (offAddr{base}); b.lessThan(s.searchAddr) {
s.searchAddr = base s.searchAddr = b
} }
// Update the free high watermark for the scavenger. // Update the free high watermark for the scavenger.
limit := base + npages*pageSize - 1 limit := base + npages*pageSize - 1
if s.scav.freeHWM < limit { if offLimit := (offAddr{limit}); s.scav.freeHWM.lessThan(offLimit) {
s.scav.freeHWM = limit s.scav.freeHWM = offLimit
} }
if npages == 1 { if npages == 1 {
// Fast path: we're clearing a single bit, and we know exactly // Fast path: we're clearing a single bit, and we know exactly

View file

@ -91,8 +91,8 @@ func (c *pageCache) flush(s *pageAlloc) {
} }
// Since this is a lot like a free, we need to make sure // Since this is a lot like a free, we need to make sure
// we update the searchAddr just like free does. // we update the searchAddr just like free does.
if s.compareSearchAddrTo(c.base) < 0 { if b := (offAddr{c.base}); b.lessThan(s.searchAddr) {
s.searchAddr = c.base s.searchAddr = b
} }
s.update(c.base, pageCachePages, false, false) s.update(c.base, pageCachePages, false, false)
*c = pageCache{} *c = pageCache{}
@ -106,15 +106,15 @@ func (c *pageCache) flush(s *pageAlloc) {
func (s *pageAlloc) allocToCache() pageCache { func (s *pageAlloc) allocToCache() pageCache {
// If the searchAddr refers to a region which has a higher address than // If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory. // any known chunk, then we know we're out of memory.
if chunkIndex(s.searchAddr) >= s.end { if chunkIndex(s.searchAddr.addr()) >= s.end {
return pageCache{} return pageCache{}
} }
c := pageCache{} c := pageCache{}
ci := chunkIndex(s.searchAddr) // chunk index ci := chunkIndex(s.searchAddr.addr()) // chunk index
if s.summary[len(s.summary)-1][ci] != 0 { if s.summary[len(s.summary)-1][ci] != 0 {
// Fast path: there's free pages at or near the searchAddr address. // Fast path: there's free pages at or near the searchAddr address.
chunk := s.chunkOf(ci) chunk := s.chunkOf(ci)
j, _ := chunk.find(1, chunkPageIndex(s.searchAddr)) j, _ := chunk.find(1, chunkPageIndex(s.searchAddr.addr()))
if j == ^uint(0) { if j == ^uint(0) {
throw("bad summary data") throw("bad summary data")
} }
@ -156,6 +156,6 @@ func (s *pageAlloc) allocToCache() pageCache {
// However, s.searchAddr is not allowed to point into unmapped heap memory // However, s.searchAddr is not allowed to point into unmapped heap memory
// unless it is maxSearchAddr, so make it the last page as opposed to // unless it is maxSearchAddr, so make it the last page as opposed to
// the page after. // the page after.
s.searchAddr = c.base + pageSize*(pageCachePages-1) s.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
return c return c
} }

View file

@ -69,6 +69,31 @@ func (a addrRange) subtract(b addrRange) addrRange {
return a return a
} }
var (
// minOffAddr is the minimum address in the offset space, and
// it corresponds to the virtual address -arenaBaseOffset.
//
// We don't initialize this with offAddrFromRaw because allocation
// may happen during bootstrapping, and we rely on this value
// being initialized.
//
// As a result, creating this value in Go is tricky because of
// overflow not being allowed in constants. In order to get
// the value we want, we take arenaBaseOffset and do a manual
// two's complement negation, then mask that into what can fit
// into a uintptr.
minOffAddr = offAddr{((^arenaBaseOffset) + 1) & uintptrMask}
// maxOffAddr is the maximum address in the offset address
// space, and it corresponds to the virtual address
// ^uintptr(0) - arenaBaseOffset.
//
// We don't initialize this with offAddrFromRaw because allocation
// may happen during bootstrapping, and we rely on this value
// being initialized.
maxOffAddr = offAddr{^uintptr(0) - arenaBaseOffset}
)
// offAddr represents an address in a contiguous view // offAddr represents an address in a contiguous view
// of the address space on systems where the address space is // of the address space on systems where the address space is
// segmented. On other systems, it's just a normal address. // segmented. On other systems, it's just a normal address.
@ -268,7 +293,7 @@ func (a *addrRanges) removeGreaterEqual(addr uintptr) {
} }
if r := a.ranges[pivot-1]; r.contains(addr) { if r := a.ranges[pivot-1]; r.contains(addr) {
removed += r.size() removed += r.size()
r = r.subtract(makeAddrRange(addr, maxSearchAddr)) r = r.subtract(makeAddrRange(addr, maxOffAddr.addr()))
if r.size() == 0 { if r.size() == 0 {
pivot-- pivot--
} else { } else {