mirror of
https://github.com/python/cpython.git
synced 2025-12-08 06:10:17 +00:00
gh-116738: Use PyMutex for bz2 module (gh-140555)
The methods are already wrapped with a lock, which makes them thread-safe in free-threaded build. This replaces `PyThread_acquire_lock` with `PyMutex` and removes some macros and allocation handling code. Also add a test for free-threading to ensure we aren't getting data races and that the locking is working.
This commit is contained in:
parent
e8b5cb8f33
commit
9479a62a51
2 changed files with 65 additions and 37 deletions
53
Lib/test/test_free_threading/test_bz2.py
Normal file
53
Lib/test/test_free_threading/test_bz2.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
import unittest
|
||||
|
||||
from test.support import import_helper, threading_helper
|
||||
from test.support.threading_helper import run_concurrently
|
||||
|
||||
bz2 = import_helper.import_module("bz2")
|
||||
from bz2 import BZ2Compressor, BZ2Decompressor
|
||||
|
||||
from test.test_bz2 import ext_decompress, BaseTest
|
||||
|
||||
|
||||
NTHREADS = 10
|
||||
TEXT = BaseTest.TEXT
|
||||
|
||||
|
||||
@threading_helper.requires_working_threading()
|
||||
class TestBZ2(unittest.TestCase):
|
||||
def test_compressor(self):
|
||||
bz2c = BZ2Compressor()
|
||||
|
||||
def worker():
|
||||
# it should return empty bytes as it buffers data internally
|
||||
data = bz2c.compress(TEXT)
|
||||
self.assertEqual(data, b"")
|
||||
|
||||
run_concurrently(worker_func=worker, nthreads=NTHREADS)
|
||||
data = bz2c.flush()
|
||||
# The decompressed data should be TEXT repeated NTHREADS times
|
||||
decompressed = ext_decompress(data)
|
||||
self.assertEqual(decompressed, TEXT * NTHREADS)
|
||||
|
||||
def test_decompressor(self):
|
||||
chunk_size = 128
|
||||
chunks = [bytes([ord("a") + i]) * chunk_size for i in range(NTHREADS)]
|
||||
input_data = b"".join(chunks)
|
||||
compressed = bz2.compress(input_data)
|
||||
|
||||
bz2d = BZ2Decompressor()
|
||||
output = []
|
||||
|
||||
def worker():
|
||||
data = bz2d.decompress(compressed, chunk_size)
|
||||
self.assertEqual(len(data), chunk_size)
|
||||
output.append(data)
|
||||
|
||||
run_concurrently(worker_func=worker, nthreads=NTHREADS)
|
||||
self.assertEqual(len(output), NTHREADS)
|
||||
# Verify the expected chunks (order doesn't matter due to append race)
|
||||
self.assertEqual(set(output), set(chunks))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue