mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: remove scavAddr in favor of address ranges
This change removes the concept of s.scavAddr in favor of explicitly reserving and unreserving address ranges. s.scavAddr has several problems with raciness that can cause the scavenger to miss updates, or move it back unnecessarily, forcing future scavenge calls to iterate over searched address space unnecessarily. This change achieves this by replacing scavAddr with a second addrRanges which is cloned from s.inUse at the end of each sweep phase. Ranges from this second addrRanges are then reserved by scavengers (with the reservation size proportional to the heap size) who are then able to safely iterate over those ranges without worry of another scavenger coming in. Fixes #35788. Change-Id: Ief01ae170384174875118742f6c26b2a41cbb66d Reviewed-on: https://go-review.googlesource.com/c/go/+/208378 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
b1a48af7e8
commit
55ec5182d7
8 changed files with 312 additions and 178 deletions
|
|
@ -735,9 +735,12 @@ func (p *PageAlloc) Free(base, npages uintptr) {
|
|||
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
|
||||
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
|
||||
}
|
||||
func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) {
|
||||
func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
|
||||
pp := (*pageAlloc)(p)
|
||||
systemstack(func() {
|
||||
r = (*pageAlloc)(p).scavenge(nbytes, locked)
|
||||
lock(pp.mheapLock)
|
||||
r = pp.scavenge(nbytes, mayUnlock)
|
||||
unlock(pp.mheapLock)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
|
@ -819,7 +822,6 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
|
|||
}
|
||||
}
|
||||
}
|
||||
p.resetScavengeAddr()
|
||||
|
||||
// Apply alloc state.
|
||||
for _, s := range init {
|
||||
|
|
@ -833,6 +835,11 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
|
|||
// Update heap metadata for the allocRange calls above.
|
||||
p.update(addr, pallocChunkPages, false, false)
|
||||
}
|
||||
systemstack(func() {
|
||||
lock(p.mheapLock)
|
||||
p.scavengeStartGen()
|
||||
unlock(p.mheapLock)
|
||||
})
|
||||
return (*PageAlloc)(p)
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue