internall/poll: remove bufs field from Windows' poll.operation

The bufs field is used to avoid allocating it every time it is needed.
We can do better by using a sync.Pool to reuse allocations across
operations and FDs instead of the field.

A side benefit is that FD is now 16 bytes smaller and operation more
stateless.

Change-Id: I5d686d1526f6c63e7ca1ae84da1fbf2044b24703
Reviewed-on: https://go-review.googlesource.com/c/go/+/698798
Reviewed-by: Damien Neil <dneil@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
qmuntal 2025-08-25 12:49:37 +02:00 committed by Quim Muntal
parent 801b74eb95
commit afc51ed007

View file

@ -78,7 +78,6 @@ type operation struct {
// fields used only by net package // fields used only by net package
buf syscall.WSABuf buf syscall.WSABuf
bufs []syscall.WSABuf
} }
func (o *operation) setEvent() { func (o *operation) setEvent() {
@ -113,34 +112,48 @@ func (o *operation) InitBuf(buf []byte) {
o.buf.Buf = unsafe.SliceData(buf) o.buf.Buf = unsafe.SliceData(buf)
} }
func (o *operation) InitBufs(buf *[][]byte) { var wsaBufsPool = sync.Pool{
if o.bufs == nil { New: func() any {
o.bufs = make([]syscall.WSABuf, 0, len(*buf)) buf := make([]syscall.WSABuf, 0, 16)
} else { return &buf
o.bufs = o.bufs[:0] },
} }
func newWSABufs(buf *[][]byte) *[]syscall.WSABuf {
bufsPtr := wsaBufsPool.Get().(*[]syscall.WSABuf)
*bufsPtr = (*bufsPtr)[:0]
for _, b := range *buf { for _, b := range *buf {
if len(b) == 0 { if len(b) == 0 {
o.bufs = append(o.bufs, syscall.WSABuf{}) *bufsPtr = append(*bufsPtr, syscall.WSABuf{})
continue continue
} }
for len(b) > maxRW { for len(b) > maxRW {
o.bufs = append(o.bufs, syscall.WSABuf{Len: maxRW, Buf: &b[0]}) *bufsPtr = append(*bufsPtr, syscall.WSABuf{Len: maxRW, Buf: &b[0]})
b = b[maxRW:] b = b[maxRW:]
} }
if len(b) > 0 { if len(b) > 0 {
o.bufs = append(o.bufs, syscall.WSABuf{Len: uint32(len(b)), Buf: &b[0]}) *bufsPtr = append(*bufsPtr, syscall.WSABuf{Len: uint32(len(b)), Buf: &b[0]})
} }
} }
return bufsPtr
} }
// ClearBufs clears all pointers to Buffers parameter captured func freeWSABufs(bufsPtr *[]syscall.WSABuf) {
// by InitBufs, so it can be released by garbage collector. // Clear pointers to buffers so they can be released by garbage collector.
func (o *operation) ClearBufs() { bufs := *bufsPtr
for i := range o.bufs { for i := range bufs {
o.bufs[i].Buf = nil bufs[i].Buf = nil
} }
o.bufs = o.bufs[:0] // Proper usage of a sync.Pool requires each entry to have approximately
// the same memory cost. To obtain this property when the stored type
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
// to place back in the pool.
//
// See https://go.dev/issue/23199
if cap(*bufsPtr) > 128 {
*bufsPtr = nil
}
wsaBufsPool.Put(bufsPtr)
} }
// wsaMsgPool is a pool of WSAMsg structures that can only hold a single WSABuf. // wsaMsgPool is a pool of WSAMsg structures that can only hold a single WSABuf.
@ -939,13 +952,12 @@ func (fd *FD) Writev(buf *[][]byte) (int64, error) {
if race.Enabled { if race.Enabled {
race.ReleaseMerge(unsafe.Pointer(&ioSync)) race.ReleaseMerge(unsafe.Pointer(&ioSync))
} }
o := &fd.wop bufs := newWSABufs(buf)
o.InitBufs(buf) defer freeWSABufs(bufs)
n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) {
err = syscall.WSASend(fd.Sysfd, &o.bufs[0], uint32(len(o.bufs)), &qty, 0, &o.o, nil) err = syscall.WSASend(fd.Sysfd, &(*bufs)[0], uint32(len(*bufs)), &qty, 0, &o.o, nil)
return qty, err return qty, err
}) })
o.ClearBufs()
TestHookDidWritev(n) TestHookDidWritev(n)
consume(buf, int64(n)) consume(buf, int64(n))
return int64(n), err return int64(n), err