mirror of
https://github.com/golang/go.git
synced 2025-10-19 19:13:18 +00:00
runtime: use a smaller arena size on Wasm
On Wasm, some programs have very small heap. Currently, we use 4 MB arena size (like all other 32-bit platforms). For a very small program, it needs to allocate one heap arena, 4 MB size at a 4 MB aligned address. So we'll need 8 MB of linear memory, whereas only a smaller portion is actually used by the program. On Wasm, samll programs are not uncommon (e.g. WASI plugins), and users are concerned about the memory usage. This CL switches to a smaller arena size, as well as a smaller page allocator chunk size (both are now 512 KB). So the heap will be grown in 512 KB granularity. For a helloworld program, it now uses less than 3 MB of linear memory, instead of 8 MB. Change-Id: Ibd66c1fa6e794a12c00906cbacc8f2e410f196c4 Reviewed-on: https://go-review.googlesource.com/c/go/+/683296 Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
parent
3a5df9d2b2
commit
53009b26dd
9 changed files with 378 additions and 326 deletions
|
@ -233,19 +233,22 @@ const (
|
|||
// ios/arm64 40 4MB 1 256K (2MB)
|
||||
// */32-bit 32 4MB 1 1024 (4KB)
|
||||
// */mips(le) 31 4MB 1 512 (2KB)
|
||||
// wasm 32 512KB 1 8192 (64KB)
|
||||
|
||||
// heapArenaBytes is the size of a heap arena. The heap
|
||||
// consists of mappings of size heapArenaBytes, aligned to
|
||||
// heapArenaBytes. The initial heap mapping is one arena.
|
||||
//
|
||||
// This is currently 64MB on 64-bit non-Windows and 4MB on
|
||||
// 32-bit and on Windows. We use smaller arenas on Windows
|
||||
// because all committed memory is charged to the process,
|
||||
// even if it's not touched. Hence, for processes with small
|
||||
// heaps, the mapped arena space needs to be commensurate.
|
||||
// This is particularly important with the race detector,
|
||||
// since it significantly amplifies the cost of committed
|
||||
// memory.
|
||||
// This is currently 64MB on 64-bit non-Windows, 4MB on
|
||||
// 32-bit and on Windows, and 512KB on Wasm. We use smaller
|
||||
// arenas on Windows because all committed memory is charged
|
||||
// to the process, even if it's not touched. Hence, for
|
||||
// processes with small heaps, the mapped arena space needs
|
||||
// to be commensurate. This is particularly important with
|
||||
// the race detector, since it significantly amplifies the
|
||||
// cost of committed memory. We use smaller arenas on Wasm
|
||||
// because some Wasm programs have very small heap, and
|
||||
// everything in the Wasm linear memory is charged.
|
||||
heapArenaBytes = 1 << logHeapArenaBytes
|
||||
|
||||
heapArenaWords = heapArenaBytes / goarch.PtrSize
|
||||
|
@ -253,7 +256,7 @@ const (
|
|||
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
|
||||
// prefer using heapArenaBytes where possible (we need the
|
||||
// constant to compute some other constants).
|
||||
logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
|
||||
logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (9+10)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
|
||||
|
||||
// heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.
|
||||
heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
|
||||
|
|
|
@ -50,7 +50,7 @@ const (
|
|||
//
|
||||
// Must be a multiple of the pageInUse bitmap element size and
|
||||
// must also evenly divide pagesPerArena.
|
||||
pagesPerSpanRoot = 512
|
||||
pagesPerSpanRoot = min(512, pagesPerArena)
|
||||
)
|
||||
|
||||
// gcPrepareMarkRoots queues root scanning jobs (stacks, globals, and
|
||||
|
|
|
@ -121,13 +121,17 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
|
|||
max: PallocChunkPages,
|
||||
want: BitRange{41, 1},
|
||||
},
|
||||
"MultiMin1": {
|
||||
alloc: []BitRange{{0, 63}, {65, 20}, {87, PallocChunkPages - 87}},
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
// avoid constant overflow when PallocChunkPages is small
|
||||
var pallocChunkPages uint = PallocChunkPages
|
||||
tests["MultiMin1"] = test{
|
||||
alloc: []BitRange{{0, 63}, {65, 20}, {87, pallocChunkPages - 87}},
|
||||
scavenged: []BitRange{{86, 1}},
|
||||
min: 1,
|
||||
max: PallocChunkPages,
|
||||
want: BitRange{85, 1},
|
||||
},
|
||||
}
|
||||
}
|
||||
// Try out different page minimums.
|
||||
for m := uintptr(1); m <= 64; m *= 2 {
|
||||
|
@ -162,6 +166,7 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
|
|||
max: PallocChunkPages,
|
||||
want: BitRange{PallocChunkPages - uint(m), uint(m)},
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
tests["Straddle64"+suffix] = test{
|
||||
alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}},
|
||||
min: m,
|
||||
|
@ -182,6 +187,7 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
|
|||
max: 3 * m,
|
||||
want: BitRange{128, 3 * uint(m)},
|
||||
}
|
||||
}
|
||||
tests["Max0"+suffix] = test{
|
||||
scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
|
||||
min: m,
|
||||
|
@ -204,12 +210,17 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
|
|||
}
|
||||
}
|
||||
if m > 1 {
|
||||
if PallocChunkPages >= m*2 {
|
||||
tests["MaxUnaligned"+suffix] = test{
|
||||
scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}},
|
||||
min: m,
|
||||
max: m - 2,
|
||||
want: BitRange{PallocChunkPages - uint(m), uint(m)},
|
||||
}
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
// avoid constant overflow when PallocChunkPages is small
|
||||
var PallocChunkPages uint = PallocChunkPages
|
||||
tests["SkipSmall"+suffix] = test{
|
||||
alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}},
|
||||
min: m,
|
||||
|
@ -222,6 +233,7 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
|
|||
max: m,
|
||||
want: BitRange{64 - uint(m), uint(m)},
|
||||
}
|
||||
}
|
||||
tests["MaxLessThan"+suffix] = test{
|
||||
scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
|
||||
min: m,
|
||||
|
@ -641,7 +653,7 @@ func TestScavengeIndex(t *testing.T) {
|
|||
mark func(markFunc)
|
||||
find func(findFunc)
|
||||
}
|
||||
for _, test := range []testCase{
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "Uninitialized",
|
||||
mark: func(_ markFunc) {},
|
||||
|
@ -692,26 +704,6 @@ func TestScavengeIndex(t *testing.T) {
|
|||
find(BaseChunkIdx, PallocChunkPages-1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "TwoChunks",
|
||||
mark: func(mark markFunc) {
|
||||
mark(PageBase(BaseChunkIdx, 128), PageBase(BaseChunkIdx+1, 128))
|
||||
},
|
||||
find: func(find findFunc) {
|
||||
find(BaseChunkIdx+1, 127)
|
||||
find(BaseChunkIdx, PallocChunkPages-1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "TwoChunksOffset",
|
||||
mark: func(mark markFunc) {
|
||||
mark(PageBase(BaseChunkIdx+7, 128), PageBase(BaseChunkIdx+8, 129))
|
||||
},
|
||||
find: func(find findFunc) {
|
||||
find(BaseChunkIdx+8, 128)
|
||||
find(BaseChunkIdx+7, PallocChunkPages-1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SevenChunksOffset",
|
||||
mark: func(mark markFunc) {
|
||||
|
@ -793,7 +785,32 @@ func TestScavengeIndex(t *testing.T) {
|
|||
}
|
||||
},
|
||||
},
|
||||
} {
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
tests = append(tests,
|
||||
testCase{
|
||||
name: "TwoChunks",
|
||||
mark: func(mark markFunc) {
|
||||
mark(PageBase(BaseChunkIdx, 128), PageBase(BaseChunkIdx+1, 128))
|
||||
},
|
||||
find: func(find findFunc) {
|
||||
find(BaseChunkIdx+1, 127)
|
||||
find(BaseChunkIdx, PallocChunkPages-1)
|
||||
},
|
||||
},
|
||||
testCase{
|
||||
name: "TwoChunksOffset",
|
||||
mark: func(mark markFunc) {
|
||||
mark(PageBase(BaseChunkIdx+7, 128), PageBase(BaseChunkIdx+8, 129))
|
||||
},
|
||||
find: func(find findFunc) {
|
||||
find(BaseChunkIdx+8, 128)
|
||||
find(BaseChunkIdx+7, PallocChunkPages-1)
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run("Bg/"+test.name, func(t *testing.T) {
|
||||
mark, find, nextGen := setup(t, false)
|
||||
|
@ -830,9 +847,11 @@ func TestScavengeIndex(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestScavChunkDataPack(t *testing.T) {
|
||||
if PallocChunkPages >= 512 {
|
||||
if !CheckPackScavChunkData(1918237402, 512, 512, 0b11) {
|
||||
t.Error("failed pack/unpack check for scavChunkData 1")
|
||||
}
|
||||
}
|
||||
if !CheckPackScavChunkData(^uint32(0), 12, 0, 0b00) {
|
||||
t.Error("failed pack/unpack check for scavChunkData 2")
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ const (
|
|||
//
|
||||
// Must be a multiple of the pageInUse bitmap element size and
|
||||
// must also evenly divide pagesPerArena.
|
||||
pagesPerReclaimerChunk = 512
|
||||
pagesPerReclaimerChunk = min(512, pagesPerArena)
|
||||
|
||||
// physPageAlignedStacks indicates whether stack allocations must be
|
||||
// physical page aligned. This is a requirement for MAP_STACK on
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"internal/goarch"
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"unsafe"
|
||||
|
@ -55,10 +56,12 @@ import (
|
|||
|
||||
const (
|
||||
// The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider
|
||||
// in the bitmap at once.
|
||||
// in the bitmap at once. It is 4MB on most platforms, except on Wasm it is 512KB.
|
||||
// We use a smaller chuck size on Wasm for the same reason as the smaller arena
|
||||
// size (see heapArenaBytes).
|
||||
pallocChunkPages = 1 << logPallocChunkPages
|
||||
pallocChunkBytes = pallocChunkPages * pageSize
|
||||
logPallocChunkPages = 9
|
||||
logPallocChunkPages = 9*(1-goarch.IsWasm) + 6*goarch.IsWasm
|
||||
logPallocChunkBytes = logPallocChunkPages + gc.PageShift
|
||||
|
||||
// The number of radix bits for each level.
|
||||
|
@ -220,6 +223,7 @@ type pageAlloc struct {
|
|||
// heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
|
||||
// ------------------------------------------------
|
||||
// 32 | 0 | 10 | 128 KiB
|
||||
// 32 (wasm) | 0 | 13 | 128 KiB
|
||||
// 33 (iOS) | 0 | 11 | 256 KiB
|
||||
// 48 | 13 | 13 | 1 MiB
|
||||
//
|
||||
|
|
|
@ -343,38 +343,6 @@ func TestPageAllocAlloc(t *testing.T) {
|
|||
BaseChunkIdx: {{0, 25}},
|
||||
},
|
||||
},
|
||||
"AllFree64": {
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{21, 1}, {63, 65}},
|
||||
},
|
||||
hits: []hit{
|
||||
{64, PageBase(BaseChunkIdx, 0), 2 * PageSize},
|
||||
{64, PageBase(BaseChunkIdx, 64), 64 * PageSize},
|
||||
{64, PageBase(BaseChunkIdx, 128), 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 192}},
|
||||
},
|
||||
},
|
||||
"AllFree65": {
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{129, 1}},
|
||||
},
|
||||
hits: []hit{
|
||||
{65, PageBase(BaseChunkIdx, 0), 0},
|
||||
{65, PageBase(BaseChunkIdx, 65), PageSize},
|
||||
{65, PageBase(BaseChunkIdx, 130), 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 195}},
|
||||
},
|
||||
},
|
||||
"ExhaustPallocChunkPages-3": {
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
|
@ -410,25 +378,6 @@ func TestPageAllocAlloc(t *testing.T) {
|
|||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
},
|
||||
},
|
||||
"StraddlePallocChunkPages": {
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages / 2}},
|
||||
BaseChunkIdx + 1: {{PallocChunkPages / 2, PallocChunkPages / 2}},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
BaseChunkIdx + 1: {{3, 100}},
|
||||
},
|
||||
hits: []hit{
|
||||
{PallocChunkPages, PageBase(BaseChunkIdx, PallocChunkPages/2), 100 * PageSize},
|
||||
{PallocChunkPages, 0, 0},
|
||||
{1, 0, 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 1: {{0, PallocChunkPages}},
|
||||
},
|
||||
},
|
||||
"StraddlePallocChunkPages+1": {
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages / 2}},
|
||||
|
@ -489,28 +438,6 @@ func TestPageAllocAlloc(t *testing.T) {
|
|||
BaseChunkIdx + 0x41: {{0, PallocChunkPages}},
|
||||
},
|
||||
},
|
||||
"StraddlePallocChunkPages*2": {
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages / 2}},
|
||||
BaseChunkIdx + 1: {},
|
||||
BaseChunkIdx + 2: {{PallocChunkPages / 2, PallocChunkPages / 2}},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 7}},
|
||||
BaseChunkIdx + 1: {{3, 5}, {121, 10}},
|
||||
BaseChunkIdx + 2: {{PallocChunkPages/2 + 12, 2}},
|
||||
},
|
||||
hits: []hit{
|
||||
{PallocChunkPages * 2, PageBase(BaseChunkIdx, PallocChunkPages/2), 15 * PageSize},
|
||||
{PallocChunkPages * 2, 0, 0},
|
||||
{1, 0, 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 1: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 2: {{0, PallocChunkPages}},
|
||||
},
|
||||
},
|
||||
"StraddlePallocChunkPages*5/4": {
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
|
@ -536,7 +463,60 @@ func TestPageAllocAlloc(t *testing.T) {
|
|||
BaseChunkIdx + 3: {{0, PallocChunkPages}},
|
||||
},
|
||||
},
|
||||
"AllFreePallocChunkPages*7+5": {
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
tests["AllFree64"] = test{
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{21, 1}, {63, 65}},
|
||||
},
|
||||
hits: []hit{
|
||||
{64, PageBase(BaseChunkIdx, 0), 2 * PageSize},
|
||||
{64, PageBase(BaseChunkIdx, 64), 64 * PageSize},
|
||||
{64, PageBase(BaseChunkIdx, 128), 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 192}},
|
||||
},
|
||||
}
|
||||
tests["AllFree65"] = test{
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{129, 1}},
|
||||
},
|
||||
hits: []hit{
|
||||
{65, PageBase(BaseChunkIdx, 0), 0},
|
||||
{65, PageBase(BaseChunkIdx, 65), PageSize},
|
||||
{65, PageBase(BaseChunkIdx, 130), 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 195}},
|
||||
},
|
||||
}
|
||||
tests["StraddlePallocChunkPages"] = test{
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages / 2}},
|
||||
BaseChunkIdx + 1: {{PallocChunkPages / 2, PallocChunkPages / 2}},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
BaseChunkIdx + 1: {{3, 100}},
|
||||
},
|
||||
hits: []hit{
|
||||
{PallocChunkPages, PageBase(BaseChunkIdx, PallocChunkPages/2), 100 * PageSize},
|
||||
{PallocChunkPages, 0, 0},
|
||||
{1, 0, 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 1: {{0, PallocChunkPages}},
|
||||
},
|
||||
}
|
||||
tests["AllFreePallocChunkPages*7+5"] = test{
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
BaseChunkIdx + 1: {},
|
||||
|
@ -572,7 +552,29 @@ func TestPageAllocAlloc(t *testing.T) {
|
|||
BaseChunkIdx + 6: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 7: {{0, 6}},
|
||||
},
|
||||
}
|
||||
tests["StraddlePallocChunkPages*2"] = test{
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages / 2}},
|
||||
BaseChunkIdx + 1: {},
|
||||
BaseChunkIdx + 2: {{PallocChunkPages / 2, PallocChunkPages / 2}},
|
||||
},
|
||||
scav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 7}},
|
||||
BaseChunkIdx + 1: {{3, 5}, {121, 10}},
|
||||
BaseChunkIdx + 2: {{PallocChunkPages/2 + 12, 2}},
|
||||
},
|
||||
hits: []hit{
|
||||
{PallocChunkPages * 2, PageBase(BaseChunkIdx, PallocChunkPages/2), 15 * PageSize},
|
||||
{PallocChunkPages * 2, 0, 0},
|
||||
{1, 0, 0},
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 1: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 2: {{0, PallocChunkPages}},
|
||||
},
|
||||
}
|
||||
}
|
||||
// Disable these tests on iOS since we have a small address space.
|
||||
// See #46860.
|
||||
|
@ -754,12 +756,13 @@ func TestPageAllocFree(t *testing.T) {
|
|||
if GOOS == "openbsd" && testing.Short() {
|
||||
t.Skip("skipping because virtual memory is limited; see #36210")
|
||||
}
|
||||
tests := map[string]struct {
|
||||
type test struct {
|
||||
before map[ChunkIdx][]BitRange
|
||||
after map[ChunkIdx][]BitRange
|
||||
npages uintptr
|
||||
frees []uintptr
|
||||
}{
|
||||
}
|
||||
tests := map[string]test{
|
||||
"Free1": {
|
||||
npages: 1,
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
|
@ -840,34 +843,6 @@ func TestPageAllocFree(t *testing.T) {
|
|||
BaseChunkIdx: {{25, PallocChunkPages - 25}},
|
||||
},
|
||||
},
|
||||
"Free64": {
|
||||
npages: 64,
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
},
|
||||
frees: []uintptr{
|
||||
PageBase(BaseChunkIdx, 0),
|
||||
PageBase(BaseChunkIdx, 64),
|
||||
PageBase(BaseChunkIdx, 128),
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{192, PallocChunkPages - 192}},
|
||||
},
|
||||
},
|
||||
"Free65": {
|
||||
npages: 65,
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
},
|
||||
frees: []uintptr{
|
||||
PageBase(BaseChunkIdx, 0),
|
||||
PageBase(BaseChunkIdx, 65),
|
||||
PageBase(BaseChunkIdx, 130),
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{195, PallocChunkPages - 195}},
|
||||
},
|
||||
},
|
||||
"FreePallocChunkPages": {
|
||||
npages: PallocChunkPages,
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
|
@ -965,6 +940,38 @@ func TestPageAllocFree(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
// avoid constant overflow when PallocChunkPages is small
|
||||
var PallocChunkPages uint = PallocChunkPages
|
||||
tests["Free64"] = test{
|
||||
npages: 64,
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
},
|
||||
frees: []uintptr{
|
||||
PageBase(BaseChunkIdx, 0),
|
||||
PageBase(BaseChunkIdx, 64),
|
||||
PageBase(BaseChunkIdx, 128),
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{192, PallocChunkPages - 192}},
|
||||
},
|
||||
}
|
||||
tests["Free65"] = test{
|
||||
npages: 65,
|
||||
before: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
},
|
||||
frees: []uintptr{
|
||||
PageBase(BaseChunkIdx, 0),
|
||||
PageBase(BaseChunkIdx, 65),
|
||||
PageBase(BaseChunkIdx, 130),
|
||||
},
|
||||
after: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{195, PallocChunkPages - 195}},
|
||||
},
|
||||
}
|
||||
}
|
||||
for name, v := range tests {
|
||||
v := v
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
|
|
@ -269,23 +269,6 @@ func TestPageAllocAllocToCache(t *testing.T) {
|
|||
afterScav map[ChunkIdx][]BitRange
|
||||
}
|
||||
tests := map[string]test{
|
||||
"AllFree": {
|
||||
beforeAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
},
|
||||
beforeScav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{1, 1}, {64, 64}},
|
||||
},
|
||||
hits: []PageCache{
|
||||
NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0), 0x2),
|
||||
NewPageCache(PageBase(BaseChunkIdx, 64), ^uint64(0), ^uint64(0)),
|
||||
NewPageCache(PageBase(BaseChunkIdx, 128), ^uint64(0), 0),
|
||||
NewPageCache(PageBase(BaseChunkIdx, 192), ^uint64(0), 0),
|
||||
},
|
||||
afterAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 256}},
|
||||
},
|
||||
},
|
||||
"ManyArena": {
|
||||
beforeAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
|
@ -306,42 +289,7 @@ func TestPageAllocAllocToCache(t *testing.T) {
|
|||
BaseChunkIdx + 2: {{0, PallocChunkPages}},
|
||||
},
|
||||
},
|
||||
"NotContiguous": {
|
||||
beforeAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 0xff: {{0, 0}},
|
||||
},
|
||||
beforeScav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 0xff: {{31, 67}},
|
||||
},
|
||||
hits: []PageCache{
|
||||
NewPageCache(PageBase(BaseChunkIdx+0xff, 0), ^uint64(0), ((uint64(1)<<33)-1)<<31),
|
||||
},
|
||||
afterAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 0xff: {{0, 64}},
|
||||
},
|
||||
afterScav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 0xff: {{64, 34}},
|
||||
},
|
||||
},
|
||||
"First": {
|
||||
beforeAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 32}, {33, 31}, {96, 32}},
|
||||
},
|
||||
beforeScav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{1, 4}, {31, 5}, {66, 2}},
|
||||
},
|
||||
hits: []PageCache{
|
||||
NewPageCache(PageBase(BaseChunkIdx, 0), 1<<32, 1<<32),
|
||||
NewPageCache(PageBase(BaseChunkIdx, 64), (uint64(1)<<32)-1, 0x3<<2),
|
||||
},
|
||||
afterAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 128}},
|
||||
},
|
||||
},
|
||||
|
||||
"Fail": {
|
||||
beforeAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
|
@ -373,6 +321,61 @@ func TestPageAllocAllocToCache(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
tests["AllFree"] = test{
|
||||
beforeAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {},
|
||||
},
|
||||
beforeScav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{1, 1}, {64, 64}},
|
||||
},
|
||||
hits: []PageCache{
|
||||
NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0), 0x2),
|
||||
NewPageCache(PageBase(BaseChunkIdx, 64), ^uint64(0), ^uint64(0)),
|
||||
NewPageCache(PageBase(BaseChunkIdx, 128), ^uint64(0), 0),
|
||||
NewPageCache(PageBase(BaseChunkIdx, 192), ^uint64(0), 0),
|
||||
},
|
||||
afterAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 256}},
|
||||
},
|
||||
}
|
||||
tests["NotContiguous"] = test{
|
||||
beforeAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 0xff: {{0, 0}},
|
||||
},
|
||||
beforeScav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 0xff: {{31, 67}},
|
||||
},
|
||||
hits: []PageCache{
|
||||
NewPageCache(PageBase(BaseChunkIdx+0xff, 0), ^uint64(0), ((uint64(1)<<33)-1)<<31),
|
||||
},
|
||||
afterAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 0xff: {{0, 64}},
|
||||
},
|
||||
afterScav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, PallocChunkPages}},
|
||||
BaseChunkIdx + 0xff: {{64, 34}},
|
||||
},
|
||||
}
|
||||
tests["First"] = test{
|
||||
beforeAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 32}, {33, 31}, {96, 32}},
|
||||
},
|
||||
beforeScav: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{1, 4}, {31, 5}, {66, 2}},
|
||||
},
|
||||
hits: []PageCache{
|
||||
NewPageCache(PageBase(BaseChunkIdx, 0), 1<<32, 1<<32),
|
||||
NewPageCache(PageBase(BaseChunkIdx, 64), (uint64(1)<<32)-1, 0x3<<2),
|
||||
},
|
||||
afterAlloc: map[ChunkIdx][]BitRange{
|
||||
BaseChunkIdx: {{0, 128}},
|
||||
},
|
||||
}
|
||||
}
|
||||
// Disable these tests on iOS since we have a small address space.
|
||||
// See #46860.
|
||||
if PageAlloc64Bit != 0 && goos.IsIos == 0 {
|
||||
|
|
|
@ -54,28 +54,31 @@ func TestPallocBitsAllocRange(t *testing.T) {
|
|||
want[PallocChunkPages/64-1] = 1 << 63
|
||||
test(t, PallocChunkPages-1, 1, want)
|
||||
})
|
||||
if PallocChunkPages >= 512 {
|
||||
t.Run("Inner", func(t *testing.T) {
|
||||
want := new(PallocBits)
|
||||
want[2] = 0x3e
|
||||
want[:][2] = 0x3e
|
||||
test(t, 129, 5, want)
|
||||
})
|
||||
t.Run("Aligned", func(t *testing.T) {
|
||||
want := new(PallocBits)
|
||||
want[2] = ^uint64(0)
|
||||
want[3] = ^uint64(0)
|
||||
want[:][2] = ^uint64(0)
|
||||
want[:][3] = ^uint64(0)
|
||||
test(t, 128, 128, want)
|
||||
})
|
||||
t.Run("Begin", func(t *testing.T) {
|
||||
want := new(PallocBits)
|
||||
want[0] = ^uint64(0)
|
||||
want[1] = ^uint64(0)
|
||||
want[2] = ^uint64(0)
|
||||
want[3] = ^uint64(0)
|
||||
want[4] = ^uint64(0)
|
||||
want[5] = 0x1
|
||||
want[:][0] = ^uint64(0)
|
||||
want[:][1] = ^uint64(0)
|
||||
want[:][2] = ^uint64(0)
|
||||
want[:][3] = ^uint64(0)
|
||||
want[:][4] = ^uint64(0)
|
||||
want[:][5] = 0x1
|
||||
test(t, 0, 321, want)
|
||||
})
|
||||
t.Run("End", func(t *testing.T) {
|
||||
// avoid constant overflow when PallocChunkPages is small
|
||||
var PallocChunkPages uint = PallocChunkPages
|
||||
want := new(PallocBits)
|
||||
want[PallocChunkPages/64-1] = ^uint64(0)
|
||||
want[PallocChunkPages/64-2] = ^uint64(0)
|
||||
|
@ -83,6 +86,7 @@ func TestPallocBitsAllocRange(t *testing.T) {
|
|||
want[PallocChunkPages/64-4] = 1 << 63
|
||||
test(t, PallocChunkPages-(64*3+1), 64*3+1, want)
|
||||
})
|
||||
}
|
||||
t.Run("All", func(t *testing.T) {
|
||||
want := new(PallocBits)
|
||||
for i := range want {
|
||||
|
@ -118,10 +122,11 @@ func TestMallocBitsPopcntRange(t *testing.T) {
|
|||
i, n uint // bit range to popcnt over.
|
||||
want uint // expected popcnt result on that range.
|
||||
}
|
||||
tests := map[string]struct {
|
||||
type testCase struct {
|
||||
init []BitRange // bit ranges to set to 1 in the bitmap.
|
||||
tests []test // a set of popcnt tests to run over the bitmap.
|
||||
}{
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"None": {
|
||||
tests: []test{
|
||||
{0, 1, 0},
|
||||
|
@ -157,7 +162,9 @@ func TestMallocBitsPopcntRange(t *testing.T) {
|
|||
{0, PallocChunkPages, PallocChunkPages / 2},
|
||||
},
|
||||
},
|
||||
"OddBound": {
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
tests["OddBound"] = testCase{
|
||||
init: []BitRange{{0, 111}},
|
||||
tests: []test{
|
||||
{0, 1, 1},
|
||||
|
@ -172,8 +179,8 @@ func TestMallocBitsPopcntRange(t *testing.T) {
|
|||
{PallocChunkPages / 2, PallocChunkPages / 2, 0},
|
||||
{0, PallocChunkPages, 111},
|
||||
},
|
||||
},
|
||||
"Scattered": {
|
||||
}
|
||||
tests["Scattered"] = testCase{
|
||||
init: []BitRange{
|
||||
{1, 3}, {5, 1}, {7, 1}, {10, 2}, {13, 1}, {15, 4},
|
||||
{21, 1}, {23, 1}, {26, 2}, {30, 5}, {36, 2}, {40, 3},
|
||||
|
@ -190,7 +197,7 @@ func TestMallocBitsPopcntRange(t *testing.T) {
|
|||
{1, 128, 74},
|
||||
{0, PallocChunkPages, 75},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
for name, v := range tests {
|
||||
v := v
|
||||
|
@ -251,6 +258,7 @@ func TestPallocBitsSummarize(t *testing.T) {
|
|||
PackPallocSum(11, 23, 23),
|
||||
},
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
tests["StartMaxEnd"] = test{
|
||||
free: []BitRange{{0, 4}, {50, 100}, {PallocChunkPages - 4, 4}},
|
||||
hits: []PallocSum{
|
||||
|
@ -269,6 +277,7 @@ func TestPallocBitsSummarize(t *testing.T) {
|
|||
PackPallocSum(0, 5, 0),
|
||||
},
|
||||
}
|
||||
}
|
||||
tests["One"] = test{
|
||||
free: []BitRange{{2, 1}},
|
||||
hits: []PallocSum{
|
||||
|
@ -329,12 +338,13 @@ func BenchmarkPallocBitsSummarize(b *testing.B) {
|
|||
|
||||
// Ensures page allocation works.
|
||||
func TestPallocBitsAlloc(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
type test struct {
|
||||
before []BitRange
|
||||
after []BitRange
|
||||
npages uintptr
|
||||
hits []uint
|
||||
}{
|
||||
}
|
||||
tests := map[string]test{
|
||||
"AllFree1": {
|
||||
npages: 1,
|
||||
hits: []uint{0, 1, 2, 3, 4, 5},
|
||||
|
@ -350,22 +360,6 @@ func TestPallocBitsAlloc(t *testing.T) {
|
|||
hits: []uint{0, 5, 10, 15, 20},
|
||||
after: []BitRange{{0, 25}},
|
||||
},
|
||||
"AllFree64": {
|
||||
npages: 64,
|
||||
hits: []uint{0, 64, 128},
|
||||
after: []BitRange{{0, 192}},
|
||||
},
|
||||
"AllFree65": {
|
||||
npages: 65,
|
||||
hits: []uint{0, 65, 130},
|
||||
after: []BitRange{{0, 195}},
|
||||
},
|
||||
"SomeFree64": {
|
||||
before: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}},
|
||||
npages: 64,
|
||||
hits: []uint{^uint(0)},
|
||||
after: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}},
|
||||
},
|
||||
"NoneFree1": {
|
||||
before: []BitRange{{0, PallocChunkPages}},
|
||||
npages: 1,
|
||||
|
@ -408,18 +402,38 @@ func TestPallocBitsAlloc(t *testing.T) {
|
|||
hits: []uint{PallocChunkPages/2 - 3, ^uint(0)},
|
||||
after: []BitRange{{0, PallocChunkPages}},
|
||||
},
|
||||
"ExactFit65": {
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
// avoid constant overflow when PallocChunkPages is small
|
||||
var PallocChunkPages uint = PallocChunkPages
|
||||
tests["AllFree64"] = test{
|
||||
npages: 64,
|
||||
hits: []uint{0, 64, 128},
|
||||
after: []BitRange{{0, 192}},
|
||||
}
|
||||
tests["AllFree65"] = test{
|
||||
npages: 65,
|
||||
hits: []uint{0, 65, 130},
|
||||
after: []BitRange{{0, 195}},
|
||||
}
|
||||
tests["SomeFree64"] = test{
|
||||
before: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}},
|
||||
npages: 64,
|
||||
hits: []uint{^uint(0)},
|
||||
after: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}},
|
||||
}
|
||||
tests["ExactFit65"] = test{
|
||||
before: []BitRange{{0, PallocChunkPages/2 - 31}, {PallocChunkPages/2 + 34, PallocChunkPages/2 - 34}},
|
||||
npages: 65,
|
||||
hits: []uint{PallocChunkPages/2 - 31, ^uint(0)},
|
||||
after: []BitRange{{0, PallocChunkPages}},
|
||||
},
|
||||
"SomeFree161": {
|
||||
}
|
||||
tests["SomeFree161"] = test{
|
||||
before: []BitRange{{0, 185}, {331, 1}},
|
||||
npages: 161,
|
||||
hits: []uint{332},
|
||||
after: []BitRange{{0, 185}, {331, 162}},
|
||||
},
|
||||
}
|
||||
}
|
||||
for name, v := range tests {
|
||||
v := v
|
||||
|
@ -442,18 +456,13 @@ func TestPallocBitsAlloc(t *testing.T) {
|
|||
|
||||
// Ensures page freeing works.
|
||||
func TestPallocBitsFree(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
type test struct {
|
||||
beforeInv []BitRange
|
||||
afterInv []BitRange
|
||||
frees []uint
|
||||
npages uintptr
|
||||
}{
|
||||
"SomeFree": {
|
||||
npages: 1,
|
||||
beforeInv: []BitRange{{0, 32}, {64, 32}, {100, 1}},
|
||||
frees: []uint{32},
|
||||
afterInv: []BitRange{{0, 33}, {64, 32}, {100, 1}},
|
||||
},
|
||||
}
|
||||
tests := map[string]test{
|
||||
"NoneFree1": {
|
||||
npages: 1,
|
||||
frees: []uint{0, 1, 2, 3, 4, 5},
|
||||
|
@ -469,16 +478,24 @@ func TestPallocBitsFree(t *testing.T) {
|
|||
frees: []uint{0, 5, 10, 15, 20},
|
||||
afterInv: []BitRange{{0, 25}},
|
||||
},
|
||||
"NoneFree64": {
|
||||
}
|
||||
if PallocChunkPages >= 512 {
|
||||
tests["SomeFree"] = test{
|
||||
npages: 1,
|
||||
beforeInv: []BitRange{{0, 32}, {64, 32}, {100, 1}},
|
||||
frees: []uint{32},
|
||||
afterInv: []BitRange{{0, 33}, {64, 32}, {100, 1}},
|
||||
}
|
||||
tests["NoneFree64"] = test{
|
||||
npages: 64,
|
||||
frees: []uint{0, 64, 128},
|
||||
afterInv: []BitRange{{0, 192}},
|
||||
},
|
||||
"NoneFree65": {
|
||||
}
|
||||
tests["NoneFree65"] = test{
|
||||
npages: 65,
|
||||
frees: []uint{0, 65, 130},
|
||||
afterInv: []BitRange{{0, 195}},
|
||||
},
|
||||
}
|
||||
}
|
||||
for name, v := range tests {
|
||||
v := v
|
||||
|
|
|
@ -9,11 +9,10 @@ import (
|
|||
"io"
|
||||
)
|
||||
|
||||
// Expect 8 MB of memory usage for a small wasm program.
|
||||
// This reflects the current allocator. We test an exact
|
||||
// value here, but if the allocator changes, we can update
|
||||
// or relax this.
|
||||
const want = 8 << 20
|
||||
// Expect less than 3 MB of memory usage for a small wasm program.
|
||||
// This reflects the current allocator. If the allocator changes,
|
||||
// update this value.
|
||||
const want = 3 << 20
|
||||
|
||||
var w = io.Discard
|
||||
|
||||
|
@ -22,8 +21,8 @@ func main() {
|
|||
|
||||
const pageSize = 64 * 1024
|
||||
sz := uintptr(currentMemory()) * pageSize
|
||||
if sz != want {
|
||||
fmt.Printf("FAIL: unexpected memory size %d, want %d\n", sz, want)
|
||||
if sz > want {
|
||||
fmt.Printf("FAIL: unexpected memory size %d, want <= %d\n", sz, want)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue