encoding/json/jsontext: preserve buffer capacity in Encoder.Reset

This does the equivalent of CL 681177 for the Encoder.
It preserves the internal buffer between resets.

Change-Id: I5e9353b6d7755e067d4f9a4d1ea3d8f056253027
Reviewed-on: https://go-review.googlesource.com/c/go/+/690375
Reviewed-by: Johan Brandhorst-Satzkorn <johan.brandhorst@gmail.com>
Auto-Submit: Joseph Tsai <joetsai@digital-static.net>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Damien Neil <dneil@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
Joe Tsai 2025-06-24 18:56:26 -07:00 committed by Gopher Robot
parent 91c4f0ccd5
commit ebdbfccd98
4 changed files with 109 additions and 14 deletions

View file

@ -138,12 +138,10 @@ func (d *Decoder) Reset(r io.Reader, opts ...Options) {
case d.s.Flags.Get(jsonflags.WithinArshalCall):
panic("jsontext: cannot reset Decoder passed to json.UnmarshalerFrom")
}
// If the decoder was previously aliasing a bytes.Buffer,
// invalidate the alias to avoid writing into the bytes.Buffer's
// internal buffer.
// Reuse the buffer if it does not alias a previous [bytes.Buffer].
b := d.s.buf[:0]
if _, ok := d.s.rd.(*bytes.Buffer); ok {
b = nil // avoid reusing b since it aliases the previous bytes.Buffer.
b = nil
}
d.s.reset(b, r, opts...)
}

View file

@ -1269,14 +1269,14 @@ func TestPeekableDecoder(t *testing.T) {
// TestDecoderReset tests that the decoder preserves its internal
// buffer between Reset calls to avoid frequent allocations when reusing the decoder.
// It ensures that the buffer capacity is maintained while avoiding aliasing
// issues with bytes.Buffer.
// issues with [bytes.Buffer].
func TestDecoderReset(t *testing.T) {
// Create a decoder with a reasonably large JSON input to ensure buffer growth
// Create a decoder with a reasonably large JSON input to ensure buffer growth.
largeJSON := `{"key1":"value1","key2":"value2","key3":"value3","key4":"value4","key5":"value5"}`
dec := NewDecoder(strings.NewReader(largeJSON))
t.Run("Test capacity preservation", func(t *testing.T) {
// Read the first JSON value to grow the internal buffer
// Read the first JSON value to grow the internal buffer.
val1, err := dec.ReadValue()
if err != nil {
t.Fatalf("first ReadValue failed: %v", err)
@ -1285,22 +1285,22 @@ func TestDecoderReset(t *testing.T) {
t.Fatalf("first ReadValue = %q, want %q", val1, largeJSON)
}
// Get the buffer capacity after first use
// Get the buffer capacity after first use.
initialCapacity := cap(dec.s.buf)
if initialCapacity == 0 {
t.Fatalf("expected non-zero buffer capacity after first use")
}
// Reset with a new reader - this should preserve the buffer capacity
// Reset with a new reader - this should preserve the buffer capacity.
dec.Reset(strings.NewReader(largeJSON))
// Verify the buffer capacity is preserved (or at least not smaller)
// Verify the buffer capacity is preserved (or at least not smaller).
preservedCapacity := cap(dec.s.buf)
if preservedCapacity < initialCapacity {
t.Fatalf("buffer capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
}
// Read the second JSON value to ensure the decoder still works correctly
// Read the second JSON value to ensure the decoder still works correctly.
val2, err := dec.ReadValue()
if err != nil {
t.Fatalf("second ReadValue failed: %v", err)
@ -1317,7 +1317,7 @@ func TestDecoderReset(t *testing.T) {
dec.Reset(bb)
bbBuf = bb.Bytes()
// Read the third JSON value to ensure functionality with bytes.Buffer
// Read the third JSON value to ensure functionality with bytes.Buffer.
val3, err := dec.ReadValue()
if err != nil {
t.Fatalf("fourth ReadValue failed: %v", err)

View file

@ -107,12 +107,17 @@ func (e *Encoder) Reset(w io.Writer, opts ...Options) {
case e.s.Flags.Get(jsonflags.WithinArshalCall):
panic("jsontext: cannot reset Encoder passed to json.MarshalerTo")
}
e.s.reset(nil, w, opts...)
// Reuse the buffer if it does not alias a previous [bytes.Buffer].
b := e.s.Buf[:0]
if _, ok := e.s.wr.(*bytes.Buffer); ok {
b = nil
}
e.s.reset(b, w, opts...)
}
func (e *encoderState) reset(b []byte, w io.Writer, opts ...Options) {
e.state.reset()
e.encodeBuffer = encodeBuffer{Buf: b, wr: w, bufStats: e.bufStats}
e.encodeBuffer = encodeBuffer{Buf: b, wr: w, availBuffer: e.availBuffer, bufStats: e.bufStats}
if bb, ok := w.(*bytes.Buffer); ok && bb != nil {
e.Buf = bb.AvailableBuffer() // alias the unused buffer of bb
}

View file

@ -735,3 +735,95 @@ func testEncoderErrors(t *testing.T, where jsontest.CasePos, opts []Options, cal
t.Fatalf("%s: Encoder.OutputOffset = %v, want %v", where, gotOffset, wantOffset)
}
}
// TestEncoderReset tests that the encoder preserves its internal
// buffer between Reset calls to avoid frequent allocations when reusing the encoder.
// It ensures that the buffer capacity is maintained while avoiding aliasing
// issues with [bytes.Buffer].
func TestEncoderReset(t *testing.T) {
// Create an encoder with a reasonably large JSON input to ensure buffer growth.
largeJSON := `{"key1":"value1","key2":"value2","key3":"value3","key4":"value4","key5":"value5"}` + "\n"
bb := new(bytes.Buffer)
enc := NewEncoder(struct{ io.Writer }{bb}) // mask out underlying [bytes.Buffer]
t.Run("Test capacity preservation", func(t *testing.T) {
// Write the first JSON value to grow the internal buffer.
err := enc.WriteValue(append(enc.AvailableBuffer(), largeJSON...))
if err != nil {
t.Fatalf("first WriteValue failed: %v", err)
}
if bb.String() != largeJSON {
t.Fatalf("first WriteValue = %q, want %q", bb.String(), largeJSON)
}
// Get the buffer capacity after first use.
initialCapacity := cap(enc.s.Buf)
initialCacheCapacity := cap(enc.s.availBuffer)
if initialCapacity == 0 {
t.Fatalf("expected non-zero buffer capacity after first use")
}
if initialCacheCapacity == 0 {
t.Fatalf("expected non-zero cache capacity after first use")
}
// Reset with a new writer - this should preserve the buffer capacity.
bb.Reset()
enc.Reset(struct{ io.Writer }{bb})
// Verify the buffer capacity is preserved (or at least not smaller).
preservedCapacity := cap(enc.s.Buf)
if preservedCapacity < initialCapacity {
t.Fatalf("buffer capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
}
preservedCacheCapacity := cap(enc.s.availBuffer)
if preservedCacheCapacity < initialCacheCapacity {
t.Fatalf("cache capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
}
// Write the second JSON value to ensure the encoder still works correctly.
err = enc.WriteValue(append(enc.AvailableBuffer(), largeJSON...))
if err != nil {
t.Fatalf("second WriteValue failed: %v", err)
}
if bb.String() != largeJSON {
t.Fatalf("second WriteValue = %q, want %q", bb.String(), largeJSON)
}
})
t.Run("Test aliasing with bytes.Buffer", func(t *testing.T) {
// Test with bytes.Buffer to verify proper aliasing behavior.
bb.Reset()
enc.Reset(bb)
// Write the third JSON value to ensure functionality with bytes.Buffer.
err := enc.WriteValue([]byte(largeJSON))
if err != nil {
t.Fatalf("fourth WriteValue failed: %v", err)
}
if bb.String() != largeJSON {
t.Fatalf("fourth WriteValue = %q, want %q", bb.String(), largeJSON)
}
// The encoder buffer should alias bytes.Buffer's internal buffer.
if cap(enc.s.Buf) == 0 || cap(bb.AvailableBuffer()) == 0 || &enc.s.Buf[:1][0] != &bb.AvailableBuffer()[:1][0] {
t.Fatalf("encoder buffer does not alias bytes.Buffer")
}
})
t.Run("Test aliasing removed after Reset", func(t *testing.T) {
// Reset with a new reader and verify the buffer is not aliased.
bb.Reset()
enc.Reset(struct{ io.Writer }{bb})
err := enc.WriteValue([]byte(largeJSON))
if err != nil {
t.Fatalf("fifth WriteValue failed: %v", err)
}
if bb.String() != largeJSON {
t.Fatalf("fourth WriteValue = %q, want %q", bb.String(), largeJSON)
}
// The encoder buffer should not alias the bytes.Buffer's internal buffer.
if cap(enc.s.Buf) == 0 || cap(bb.AvailableBuffer()) == 0 || &enc.s.Buf[:1][0] == &bb.AvailableBuffer()[:1][0] {
t.Fatalf("encoder buffer aliases bytes.Buffer")
}
})
}