archiver: convert buffer pool to use sync.Pool

This commit is contained in:
Michael Eischer 2025-10-13 22:08:58 +02:00
parent dd6cb0dd8e
commit 7f6fdcc52c
2 changed files with 14 additions and 27 deletions

View file

@ -1,5 +1,7 @@
package archiver package archiver
import "sync"
// buffer is a reusable buffer. After the buffer has been used, Release should // buffer is a reusable buffer. After the buffer has been used, Release should
// be called so the underlying slice is put back into the pool. // be called so the underlying slice is put back into the pool.
type buffer struct { type buffer struct {
@ -14,41 +16,32 @@ func (b *buffer) Release() {
return return
} }
select { pool.pool.Put(b)
case pool.ch <- b:
default:
}
} }
// bufferPool implements a limited set of reusable buffers. // bufferPool implements a limited set of reusable buffers.
type bufferPool struct { type bufferPool struct {
ch chan *buffer pool sync.Pool
defaultSize int defaultSize int
} }
// newBufferPool initializes a new buffer pool. The pool stores at most max // newBufferPool initializes a new buffer pool. The pool stores at most max
// items. New buffers are created with defaultSize. Buffers that have grown // items. New buffers are created with defaultSize. Buffers that have grown
// larger are not put back. // larger are not put back.
func newBufferPool(max int, defaultSize int) *bufferPool { func newBufferPool(defaultSize int) *bufferPool {
b := &bufferPool{ b := &bufferPool{
ch: make(chan *buffer, max),
defaultSize: defaultSize, defaultSize: defaultSize,
} }
b.pool = sync.Pool{New: func() any {
return &buffer{
Data: make([]byte, defaultSize),
pool: b,
}
}}
return b return b
} }
// Get returns a new buffer, either from the pool or newly allocated. // Get returns a new buffer, either from the pool or newly allocated.
func (pool *bufferPool) Get() *buffer { func (pool *bufferPool) Get() *buffer {
select { return pool.pool.Get().(*buffer)
case buf := <-pool.ch:
return buf
default:
}
b := &buffer{
Data: make([]byte, pool.defaultSize),
pool: pool,
}
return b
} }

View file

@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"runtime"
"sync" "sync"
"github.com/restic/chunker" "github.com/restic/chunker"
@ -34,16 +33,11 @@ type fileSaver struct {
// started, it is stopped when ctx is cancelled. // started, it is stopped when ctx is cancelled.
func newFileSaver(ctx context.Context, wg *errgroup.Group, uploader restic.BlobSaverAsync, pol chunker.Pol, fileWorkers uint) *fileSaver { func newFileSaver(ctx context.Context, wg *errgroup.Group, uploader restic.BlobSaverAsync, pol chunker.Pol, fileWorkers uint) *fileSaver {
ch := make(chan saveFileJob) ch := make(chan saveFileJob)
debug.Log("new file saver with %v file workers", fileWorkers)
// TODO find a way to get rid of this parameter
blobWorkers := uint(runtime.GOMAXPROCS(0))
debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers)
poolSize := fileWorkers + blobWorkers
s := &fileSaver{ s := &fileSaver{
uploader: uploader, uploader: uploader,
saveFilePool: newBufferPool(int(poolSize), chunker.MaxSize), saveFilePool: newBufferPool(chunker.MaxSize),
pol: pol, pol: pol,
ch: ch, ch: ch,