mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: clean up power-of-two rounding code with align functions
This change renames the "round" function to the more appropriately named "alignUp" which rounds an integer up to the next multiple of a power of two. This change also adds the alignDown function, which is almost like alignUp but rounds down to the previous multiple of a power of two. With these two functions, we also go and replace manual rounding code with it where we can. Change-Id: Ie1487366280484dcb2662972b01b4f7135f72fec Reviewed-on: https://go-review.googlesource.com/c/go/+/190618 Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
parent
2566e21f24
commit
383b447e0d
10 changed files with 40 additions and 34 deletions
|
|
@ -37,6 +37,8 @@ func TestIntendedInlining(t *testing.T) {
|
||||||
"addb",
|
"addb",
|
||||||
"adjustpanics",
|
"adjustpanics",
|
||||||
"adjustpointer",
|
"adjustpointer",
|
||||||
|
"alignDown",
|
||||||
|
"alignUp",
|
||||||
"bucketMask",
|
"bucketMask",
|
||||||
"bucketShift",
|
"bucketShift",
|
||||||
"chanbuf",
|
"chanbuf",
|
||||||
|
|
@ -56,7 +58,6 @@ func TestIntendedInlining(t *testing.T) {
|
||||||
"readUnaligned32",
|
"readUnaligned32",
|
||||||
"readUnaligned64",
|
"readUnaligned64",
|
||||||
"releasem",
|
"releasem",
|
||||||
"round",
|
|
||||||
"roundupsize",
|
"roundupsize",
|
||||||
"stackmapdata",
|
"stackmapdata",
|
||||||
"stringStructOf",
|
"stringStructOf",
|
||||||
|
|
|
||||||
|
|
@ -568,7 +568,7 @@ func mallocinit() {
|
||||||
if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
|
if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
|
||||||
p = mheap_.heapArenaAlloc.end
|
p = mheap_.heapArenaAlloc.end
|
||||||
}
|
}
|
||||||
p = round(p+(256<<10), heapArenaBytes)
|
p = alignUp(p+(256<<10), heapArenaBytes)
|
||||||
// Because we're worried about fragmentation on
|
// Because we're worried about fragmentation on
|
||||||
// 32-bit, we try to make a large initial reservation.
|
// 32-bit, we try to make a large initial reservation.
|
||||||
arenaSizes := []uintptr{
|
arenaSizes := []uintptr{
|
||||||
|
|
@ -601,7 +601,7 @@ func mallocinit() {
|
||||||
//
|
//
|
||||||
// h must be locked.
|
// h must be locked.
|
||||||
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
|
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
|
||||||
n = round(n, heapArenaBytes)
|
n = alignUp(n, heapArenaBytes)
|
||||||
|
|
||||||
// First, try the arena pre-reservation.
|
// First, try the arena pre-reservation.
|
||||||
v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
|
v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
|
||||||
|
|
@ -784,7 +784,7 @@ retry:
|
||||||
// re-reserve the aligned sub-region. This may race,
|
// re-reserve the aligned sub-region. This may race,
|
||||||
// so we may have to try again.
|
// so we may have to try again.
|
||||||
sysFree(unsafe.Pointer(p), size+align, nil)
|
sysFree(unsafe.Pointer(p), size+align, nil)
|
||||||
p = round(p, align)
|
p = alignUp(p, align)
|
||||||
p2 := sysReserve(unsafe.Pointer(p), size)
|
p2 := sysReserve(unsafe.Pointer(p), size)
|
||||||
if p != uintptr(p2) {
|
if p != uintptr(p2) {
|
||||||
// Must have raced. Try again.
|
// Must have raced. Try again.
|
||||||
|
|
@ -798,7 +798,7 @@ retry:
|
||||||
return p2, size
|
return p2, size
|
||||||
default:
|
default:
|
||||||
// Trim off the unaligned parts.
|
// Trim off the unaligned parts.
|
||||||
pAligned := round(p, align)
|
pAligned := alignUp(p, align)
|
||||||
sysFree(unsafe.Pointer(p), pAligned-p, nil)
|
sysFree(unsafe.Pointer(p), pAligned-p, nil)
|
||||||
end := pAligned + size
|
end := pAligned + size
|
||||||
endLen := (p + size + align) - end
|
endLen := (p + size + align) - end
|
||||||
|
|
@ -976,11 +976,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||||
off := c.tinyoffset
|
off := c.tinyoffset
|
||||||
// Align tiny pointer for required (conservative) alignment.
|
// Align tiny pointer for required (conservative) alignment.
|
||||||
if size&7 == 0 {
|
if size&7 == 0 {
|
||||||
off = round(off, 8)
|
off = alignUp(off, 8)
|
||||||
} else if size&3 == 0 {
|
} else if size&3 == 0 {
|
||||||
off = round(off, 4)
|
off = alignUp(off, 4)
|
||||||
} else if size&1 == 0 {
|
} else if size&1 == 0 {
|
||||||
off = round(off, 2)
|
off = alignUp(off, 2)
|
||||||
}
|
}
|
||||||
if off+size <= maxTinySize && c.tiny != 0 {
|
if off+size <= maxTinySize && c.tiny != 0 {
|
||||||
// The object fits into existing tiny block.
|
// The object fits into existing tiny block.
|
||||||
|
|
@ -1313,7 +1313,7 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
|
||||||
lock(&globalAlloc.mutex)
|
lock(&globalAlloc.mutex)
|
||||||
persistent = &globalAlloc.persistentAlloc
|
persistent = &globalAlloc.persistentAlloc
|
||||||
}
|
}
|
||||||
persistent.off = round(persistent.off, align)
|
persistent.off = alignUp(persistent.off, align)
|
||||||
if persistent.off+size > persistentChunkSize || persistent.base == nil {
|
if persistent.off+size > persistentChunkSize || persistent.base == nil {
|
||||||
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
|
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
|
||||||
if persistent.base == nil {
|
if persistent.base == nil {
|
||||||
|
|
@ -1331,7 +1331,7 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
persistent.off = round(sys.PtrSize, align)
|
persistent.off = alignUp(sys.PtrSize, align)
|
||||||
}
|
}
|
||||||
p := persistent.base.add(persistent.off)
|
p := persistent.base.add(persistent.off)
|
||||||
persistent.off += size
|
persistent.off += size
|
||||||
|
|
@ -1377,12 +1377,12 @@ func (l *linearAlloc) init(base, size uintptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
|
func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
|
||||||
p := round(l.next, align)
|
p := alignUp(l.next, align)
|
||||||
if p+size > l.end {
|
if p+size > l.end {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
l.next = p + size
|
l.next = p + size
|
||||||
if pEnd := round(l.next-1, physPageSize); pEnd > l.mapped {
|
if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
|
||||||
// Transition from Reserved to Prepared to Ready.
|
// Transition from Reserved to Prepared to Ready.
|
||||||
sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
|
sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
|
||||||
sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
|
sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
|
||||||
|
|
|
||||||
|
|
@ -70,11 +70,11 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
|
||||||
var head, tail uintptr
|
var head, tail uintptr
|
||||||
if uintptr(v)&(physHugePageSize-1) != 0 {
|
if uintptr(v)&(physHugePageSize-1) != 0 {
|
||||||
// Compute huge page containing v.
|
// Compute huge page containing v.
|
||||||
head = uintptr(v) &^ (physHugePageSize - 1)
|
head = alignDown(uintptr(v), physHugePageSize)
|
||||||
}
|
}
|
||||||
if (uintptr(v)+n)&(physHugePageSize-1) != 0 {
|
if (uintptr(v)+n)&(physHugePageSize-1) != 0 {
|
||||||
// Compute huge page containing v+n-1.
|
// Compute huge page containing v+n-1.
|
||||||
tail = (uintptr(v) + n - 1) &^ (physHugePageSize - 1)
|
tail = alignDown(uintptr(v)+n-1, physHugePageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note that madvise will return EINVAL if the flag is
|
// Note that madvise will return EINVAL if the flag is
|
||||||
|
|
@ -131,9 +131,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
|
||||||
func sysHugePage(v unsafe.Pointer, n uintptr) {
|
func sysHugePage(v unsafe.Pointer, n uintptr) {
|
||||||
if physHugePageSize != 0 {
|
if physHugePageSize != 0 {
|
||||||
// Round v up to a huge page boundary.
|
// Round v up to a huge page boundary.
|
||||||
beg := (uintptr(v) + (physHugePageSize - 1)) &^ (physHugePageSize - 1)
|
beg := alignUp(uintptr(v), physHugePageSize)
|
||||||
// Round v+n down to a huge page boundary.
|
// Round v+n down to a huge page boundary.
|
||||||
end := (uintptr(v) + n) &^ (physHugePageSize - 1)
|
end := alignDown(uintptr(v)+n, physHugePageSize)
|
||||||
|
|
||||||
if beg < end {
|
if beg < end {
|
||||||
madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
|
madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
|
||||||
|
|
|
||||||
|
|
@ -407,9 +407,9 @@ okarg:
|
||||||
// compute size needed for return parameters
|
// compute size needed for return parameters
|
||||||
nret := uintptr(0)
|
nret := uintptr(0)
|
||||||
for _, t := range ft.out() {
|
for _, t := range ft.out() {
|
||||||
nret = round(nret, uintptr(t.align)) + uintptr(t.size)
|
nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
|
||||||
}
|
}
|
||||||
nret = round(nret, sys.PtrSize)
|
nret = alignUp(nret, sys.PtrSize)
|
||||||
|
|
||||||
// make sure we have a finalizer goroutine
|
// make sure we have a finalizer goroutine
|
||||||
createfing()
|
createfing()
|
||||||
|
|
|
||||||
|
|
@ -462,8 +462,8 @@ func (s *mspan) physPageBounds() (uintptr, uintptr) {
|
||||||
end := start + s.npages<<_PageShift
|
end := start + s.npages<<_PageShift
|
||||||
if physPageSize > _PageSize {
|
if physPageSize > _PageSize {
|
||||||
// Round start and end in.
|
// Round start and end in.
|
||||||
start = (start + physPageSize - 1) &^ (physPageSize - 1)
|
start = alignUp(start, physPageSize)
|
||||||
end &^= physPageSize - 1
|
end = alignDown(end, physPageSize)
|
||||||
}
|
}
|
||||||
return start, end
|
return start, end
|
||||||
}
|
}
|
||||||
|
|
@ -529,9 +529,9 @@ func (h *mheap) coalesce(s *mspan) {
|
||||||
// scavenged span.
|
// scavenged span.
|
||||||
boundary := b.startAddr
|
boundary := b.startAddr
|
||||||
if a.scavenged {
|
if a.scavenged {
|
||||||
boundary &^= (physPageSize - 1)
|
boundary = alignDown(boundary, physPageSize)
|
||||||
} else {
|
} else {
|
||||||
boundary = (boundary + physPageSize - 1) &^ (physPageSize - 1)
|
boundary = alignUp(boundary, physPageSize)
|
||||||
}
|
}
|
||||||
a.npages = (boundary - a.startAddr) / pageSize
|
a.npages = (boundary - a.startAddr) / pageSize
|
||||||
b.npages = (b.startAddr + b.npages*pageSize - boundary) / pageSize
|
b.npages = (b.startAddr + b.npages*pageSize - boundary) / pageSize
|
||||||
|
|
@ -595,8 +595,8 @@ func (s *mspan) hugePages() uintptr {
|
||||||
end := start + s.npages*pageSize
|
end := start + s.npages*pageSize
|
||||||
if physHugePageSize > pageSize {
|
if physHugePageSize > pageSize {
|
||||||
// Round start and end in.
|
// Round start and end in.
|
||||||
start = (start + physHugePageSize - 1) &^ (physHugePageSize - 1)
|
start = alignUp(start, physHugePageSize)
|
||||||
end &^= physHugePageSize - 1
|
end = alignDown(end, physHugePageSize)
|
||||||
}
|
}
|
||||||
if start < end {
|
if start < end {
|
||||||
return (end - start) >> physHugePageShift
|
return (end - start) >> physHugePageShift
|
||||||
|
|
@ -1307,7 +1307,7 @@ HaveSpan:
|
||||||
func (h *mheap) grow(npage uintptr) bool {
|
func (h *mheap) grow(npage uintptr) bool {
|
||||||
ask := npage << _PageShift
|
ask := npage << _PageShift
|
||||||
|
|
||||||
nBase := round(h.curArena.base+ask, physPageSize)
|
nBase := alignUp(h.curArena.base+ask, physPageSize)
|
||||||
if nBase > h.curArena.end {
|
if nBase > h.curArena.end {
|
||||||
// Not enough room in the current arena. Allocate more
|
// Not enough room in the current arena. Allocate more
|
||||||
// arena space. This may not be contiguous with the
|
// arena space. This may not be contiguous with the
|
||||||
|
|
@ -1347,7 +1347,7 @@ func (h *mheap) grow(npage uintptr) bool {
|
||||||
memstats.heap_idle += uint64(asize)
|
memstats.heap_idle += uint64(asize)
|
||||||
|
|
||||||
// Recalculate nBase
|
// Recalculate nBase
|
||||||
nBase = round(h.curArena.base+ask, physPageSize)
|
nBase = alignUp(h.curArena.base+ask, physPageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grow into the current arena.
|
// Grow into the current arena.
|
||||||
|
|
@ -1492,11 +1492,11 @@ func (h *mheap) scavengeSplit(t treapIter, size uintptr) *mspan {
|
||||||
if base <= start {
|
if base <= start {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if physHugePageSize > pageSize && base&^(physHugePageSize-1) >= start {
|
if physHugePageSize > pageSize && alignDown(base, physHugePageSize) >= start {
|
||||||
// We're in danger of breaking apart a huge page, so include the entire
|
// We're in danger of breaking apart a huge page, so include the entire
|
||||||
// huge page in the bound by rounding down to the huge page size.
|
// huge page in the bound by rounding down to the huge page size.
|
||||||
// base should still be aligned to pageSize.
|
// base should still be aligned to pageSize.
|
||||||
base &^= physHugePageSize - 1
|
base = alignDown(base, physHugePageSize)
|
||||||
}
|
}
|
||||||
if base == start {
|
if base == start {
|
||||||
// After all that we rounded base down to s.base(), so no need to split.
|
// After all that we rounded base down to s.base(), so no need to split.
|
||||||
|
|
|
||||||
|
|
@ -21,5 +21,5 @@ func roundupsize(size uintptr) uintptr {
|
||||||
if size+_PageSize < size {
|
if size+_PageSize < size {
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
return round(size, _PageSize)
|
return alignUp(size, _PageSize)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -385,7 +385,7 @@ func raceinit() (gctx, pctx uintptr) {
|
||||||
if end < firstmoduledata.ebss {
|
if end < firstmoduledata.ebss {
|
||||||
end = firstmoduledata.ebss
|
end = firstmoduledata.ebss
|
||||||
}
|
}
|
||||||
size := round(end-start, _PageSize)
|
size := alignUp(end-start, _PageSize)
|
||||||
racecall(&__tsan_map_shadow, start, size, 0, 0)
|
racecall(&__tsan_map_shadow, start, size, 0, 0)
|
||||||
racedatastart = start
|
racedatastart = start
|
||||||
racedataend = start + size
|
racedataend = start + size
|
||||||
|
|
|
||||||
|
|
@ -337,7 +337,7 @@ func stackalloc(n uint32) stack {
|
||||||
}
|
}
|
||||||
|
|
||||||
if debug.efence != 0 || stackFromSystem != 0 {
|
if debug.efence != 0 || stackFromSystem != 0 {
|
||||||
n = uint32(round(uintptr(n), physPageSize))
|
n = uint32(alignUp(uintptr(n), physPageSize))
|
||||||
v := sysAlloc(uintptr(n), &memstats.stacks_sys)
|
v := sysAlloc(uintptr(n), &memstats.stacks_sys)
|
||||||
if v == nil {
|
if v == nil {
|
||||||
throw("out of memory (stackalloc)")
|
throw("out of memory (stackalloc)")
|
||||||
|
|
|
||||||
|
|
@ -290,11 +290,16 @@ func call1073741824(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
|
||||||
|
|
||||||
func systemstack_switch()
|
func systemstack_switch()
|
||||||
|
|
||||||
// round n up to a multiple of a. a must be a power of 2.
|
// alignUp rounds n up to a multiple of a. a must be a power of 2.
|
||||||
func round(n, a uintptr) uintptr {
|
func alignUp(n, a uintptr) uintptr {
|
||||||
return (n + a - 1) &^ (a - 1)
|
return (n + a - 1) &^ (a - 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// alignDown rounds n down to a multiple of a. a must be a power of 2.
|
||||||
|
func alignDown(n, a uintptr) uintptr {
|
||||||
|
return n &^ (a - 1)
|
||||||
|
}
|
||||||
|
|
||||||
// checkASM reports whether assembly runtime checks have passed.
|
// checkASM reports whether assembly runtime checks have passed.
|
||||||
func checkASM() bool
|
func checkASM() bool
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -932,7 +932,7 @@ func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(u
|
||||||
|
|
||||||
// alloc allocates n-byte block.
|
// alloc allocates n-byte block.
|
||||||
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
|
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
|
||||||
n = round(n, sys.PtrSize)
|
n = alignUp(n, sys.PtrSize)
|
||||||
if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
|
if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
|
||||||
if n > uintptr(len(a.head.ptr().data)) {
|
if n > uintptr(len(a.head.ptr().data)) {
|
||||||
throw("trace: alloc too large")
|
throw("trace: alloc too large")
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue