runtime: randomize heap base address

During initialization, allow randomizing the heap base address by
generating a random uint64 and using its bits to randomize various
portions of the heap base address.

We use the following method to randomize the base address:

* We first generate a random heapArenaBytes aligned address that we use
  for generating the hints.
* On the first call to mheap.grow, we then generate a random
  PallocChunkBytes aligned offset into the mmap'd heap region, which we
  use as the base for the heap region.
* We then mark a random number of pages within the page allocator as
  allocated.

Our final randomized "heap base address" becomes the first byte of
the first available page returned by the page allocator. This results
in an address with at least heapAddrBits-gc.PageShift-1 bits of
entropy.

Fixes #27583

Change-Id: Ideb4450a5ff747a132f702d563d2a516dec91a88
Reviewed-on: https://go-review.googlesource.com/c/go/+/674835
Reviewed-by: Michael Knyszek <mknyszek@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
Roland Shoemaker 2025-05-21 02:03:44 +00:00 committed by Roland Shoemaker
parent 26338a7f69
commit 6669aa3b14
8 changed files with 170 additions and 3 deletions

View file

@ -972,6 +972,45 @@ func (p *pageAlloc) free(base, npages uintptr) {
p.update(base, npages, true, false)
}
// markRandomPaddingPages marks the range of memory [base, base+npages*pageSize]
// as both allocated and scavenged. This is used for randomizing the base heap
// address. Both the alloc and scav bits are set so that the pages are not used
// and so the memory accounting stats are correctly calculated.
//
// Similar to allocRange, it also updates the summaries to reflect the
// newly-updated bitmap.
//
// p.mheapLock must be held.
func (p *pageAlloc) markRandomPaddingPages(base uintptr, npages uintptr) {
assertLockHeld(p.mheapLock)
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
if sc == ec {
chunk := p.chunkOf(sc)
chunk.allocRange(si, ei+1-si)
p.scav.index.alloc(sc, ei+1-si)
chunk.scavenged.setRange(si, ei+1-si)
} else {
chunk := p.chunkOf(sc)
chunk.allocRange(si, pallocChunkPages-si)
p.scav.index.alloc(sc, pallocChunkPages-si)
chunk.scavenged.setRange(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
chunk := p.chunkOf(c)
chunk.allocAll()
p.scav.index.alloc(c, pallocChunkPages)
chunk.scavenged.setAll()
}
chunk = p.chunkOf(ec)
chunk.allocRange(0, ei+1)
p.scav.index.alloc(ec, ei+1)
chunk.scavenged.setRange(0, ei+1)
}
p.update(base, npages, true, true)
}
const (
pallocSumBytes = unsafe.Sizeof(pallocSum(0))