mirror of
https://github.com/golang/go.git
synced 2026-02-06 09:50:02 +00:00
runtime: fix self-deadlock on sbrk platforms
The sbrk mem.go implementation doesn't enforce being called on the systemstack, but it can call back into itself if there's a stack growth. Because the sbrk implementation requires acquiring memlock, it can self-deadlock. For the most part the mem.go API is called on the system stack, but there are cases where we call sysAlloc on the regular Go stack. This is fine in general, except on sbrk platforms because of the aforementioned deadlock. This change, rather than adding a new invariant to mem.go, switches to the systemstack in the mem.go API implementation for sbrk platforms. Change-Id: Ie0f0ea80a8d7578cdeabc8252107e64a5e633856 Reviewed-on: https://go-review.googlesource.com/c/go/+/709775 Reviewed-by: Cherry Mui <cherryyz@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
parent
2e52060084
commit
11d5484190
1 changed files with 104 additions and 56 deletions
|
|
@ -48,6 +48,16 @@ type memHdrPtr uintptr
|
|||
func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
|
||||
func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }
|
||||
|
||||
// memAlloc allocates n bytes from the brk reservation, or if it's full,
|
||||
// the system.
|
||||
//
|
||||
// memlock must be held.
|
||||
//
|
||||
// memAlloc must be called on the system stack, otherwise a stack growth
|
||||
// could cause us to call back into it. Since memlock is held, that could
|
||||
// lead to a self-deadlock.
|
||||
//
|
||||
//go:systemstack
|
||||
func memAlloc(n uintptr) unsafe.Pointer {
|
||||
if p := memAllocNoGrow(n); p != nil {
|
||||
return p
|
||||
|
|
@ -55,6 +65,15 @@ func memAlloc(n uintptr) unsafe.Pointer {
|
|||
return sbrk(n)
|
||||
}
|
||||
|
||||
// memAllocNoGrow attempts to allocate n bytes from the existing brk.
|
||||
//
|
||||
// memlock must be held.
|
||||
//
|
||||
// memAlloc must be called on the system stack, otherwise a stack growth
|
||||
// could cause us to call back into it. Since memlock is held, that could
|
||||
// lead to a self-deadlock.
|
||||
//
|
||||
//go:systemstack
|
||||
func memAllocNoGrow(n uintptr) unsafe.Pointer {
|
||||
n = memRound(n)
|
||||
var prevp *memHdr
|
||||
|
|
@ -78,6 +97,15 @@ func memAllocNoGrow(n uintptr) unsafe.Pointer {
|
|||
return nil
|
||||
}
|
||||
|
||||
// memFree makes [ap, ap+n) available for reallocation by memAlloc.
|
||||
//
|
||||
// memlock must be held.
|
||||
//
|
||||
// memAlloc must be called on the system stack, otherwise a stack growth
|
||||
// could cause us to call back into it. Since memlock is held, that could
|
||||
// lead to a self-deadlock.
|
||||
//
|
||||
//go:systemstack
|
||||
func memFree(ap unsafe.Pointer, n uintptr) {
|
||||
n = memRound(n)
|
||||
memclrNoHeapPointers(ap, n)
|
||||
|
|
@ -122,6 +150,15 @@ func memFree(ap unsafe.Pointer, n uintptr) {
|
|||
}
|
||||
}
|
||||
|
||||
// memCheck checks invariants around free list management.
|
||||
//
|
||||
// memlock must be held.
|
||||
//
|
||||
// memAlloc must be called on the system stack, otherwise a stack growth
|
||||
// could cause us to call back into it. Since memlock is held, that could
|
||||
// lead to a self-deadlock.
|
||||
//
|
||||
//go:systemstack
|
||||
func memCheck() {
|
||||
if !memDebug {
|
||||
return
|
||||
|
|
@ -158,26 +195,31 @@ func initBloc() {
|
|||
}
|
||||
|
||||
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
||||
lock(&memlock)
|
||||
p := memAlloc(n)
|
||||
memCheck()
|
||||
unlock(&memlock)
|
||||
return p
|
||||
var p uintptr
|
||||
systemstack(func() {
|
||||
lock(&memlock)
|
||||
p = uintptr(memAlloc(n))
|
||||
memCheck()
|
||||
unlock(&memlock)
|
||||
})
|
||||
return unsafe.Pointer(p)
|
||||
}
|
||||
|
||||
func sysFreeOS(v unsafe.Pointer, n uintptr) {
|
||||
lock(&memlock)
|
||||
if uintptr(v)+n == bloc {
|
||||
// Address range being freed is at the end of memory,
|
||||
// so record a new lower value for end of memory.
|
||||
// Can't actually shrink address space because segment is shared.
|
||||
memclrNoHeapPointers(v, n)
|
||||
bloc -= n
|
||||
} else {
|
||||
memFree(v, n)
|
||||
memCheck()
|
||||
}
|
||||
unlock(&memlock)
|
||||
systemstack(func() {
|
||||
lock(&memlock)
|
||||
if uintptr(v)+n == bloc {
|
||||
// Address range being freed is at the end of memory,
|
||||
// so record a new lower value for end of memory.
|
||||
// Can't actually shrink address space because segment is shared.
|
||||
memclrNoHeapPointers(v, n)
|
||||
bloc -= n
|
||||
} else {
|
||||
memFree(v, n)
|
||||
memCheck()
|
||||
}
|
||||
unlock(&memlock)
|
||||
})
|
||||
}
|
||||
|
||||
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
||||
|
|
@ -202,49 +244,55 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) {
|
|||
}
|
||||
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
|
||||
lock(&memlock)
|
||||
var p unsafe.Pointer
|
||||
if uintptr(v) == bloc {
|
||||
// Address hint is the current end of memory,
|
||||
// so try to extend the address space.
|
||||
p = sbrk(n)
|
||||
}
|
||||
if p == nil && v == nil {
|
||||
p = memAlloc(n)
|
||||
memCheck()
|
||||
}
|
||||
unlock(&memlock)
|
||||
return p
|
||||
var p uintptr
|
||||
systemstack(func() {
|
||||
lock(&memlock)
|
||||
if uintptr(v) == bloc {
|
||||
// Address hint is the current end of memory,
|
||||
// so try to extend the address space.
|
||||
p = uintptr(sbrk(n))
|
||||
}
|
||||
if p == 0 && v == nil {
|
||||
p = uintptr(memAlloc(n))
|
||||
memCheck()
|
||||
}
|
||||
unlock(&memlock)
|
||||
})
|
||||
return unsafe.Pointer(p)
|
||||
}
|
||||
|
||||
func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
|
||||
lock(&memlock)
|
||||
if p := memAllocNoGrow(size + align); p != nil {
|
||||
// We can satisfy the reservation from the free list.
|
||||
// Trim off the unaligned parts.
|
||||
pAligned := alignUp(uintptr(p), align)
|
||||
if startLen := pAligned - uintptr(p); startLen > 0 {
|
||||
memFree(p, startLen)
|
||||
var p uintptr
|
||||
systemstack(func() {
|
||||
lock(&memlock)
|
||||
if base := memAllocNoGrow(size + align); base != nil {
|
||||
// We can satisfy the reservation from the free list.
|
||||
// Trim off the unaligned parts.
|
||||
start := alignUp(uintptr(base), align)
|
||||
if startLen := start - uintptr(base); startLen > 0 {
|
||||
memFree(base, startLen)
|
||||
}
|
||||
end := start + size
|
||||
if endLen := (uintptr(base) + size + align) - end; endLen > 0 {
|
||||
memFree(unsafe.Pointer(end), endLen)
|
||||
}
|
||||
memCheck()
|
||||
unlock(&memlock)
|
||||
p = start
|
||||
return
|
||||
}
|
||||
end := pAligned + size
|
||||
if endLen := (uintptr(p) + size + align) - end; endLen > 0 {
|
||||
memFree(unsafe.Pointer(end), endLen)
|
||||
}
|
||||
memCheck()
|
||||
unlock(&memlock)
|
||||
return unsafe.Pointer(pAligned), size
|
||||
}
|
||||
|
||||
// Round up bloc to align, then allocate size.
|
||||
p := alignUp(bloc, align)
|
||||
r := sbrk(p + size - bloc)
|
||||
if r == nil {
|
||||
p, size = 0, 0
|
||||
} else if l := p - uintptr(r); l > 0 {
|
||||
// Free the area we skipped over for alignment.
|
||||
memFree(r, l)
|
||||
memCheck()
|
||||
}
|
||||
unlock(&memlock)
|
||||
// Round up bloc to align, then allocate size.
|
||||
p = alignUp(bloc, align)
|
||||
r := sbrk(p + size - bloc)
|
||||
if r == nil {
|
||||
p, size = 0, 0
|
||||
} else if l := p - uintptr(r); l > 0 {
|
||||
// Free the area we skipped over for alignment.
|
||||
memFree(r, l)
|
||||
memCheck()
|
||||
}
|
||||
unlock(&memlock)
|
||||
})
|
||||
return unsafe.Pointer(p), size
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue