2024-06-14 13:21:18 +00:00
|
|
|
|
import json
|
|
|
|
|
from collections.abc import Generator
|
2024-02-14 10:33:19 +01:00
|
|
|
|
from dataclasses import dataclass
|
2024-06-14 13:21:18 +00:00
|
|
|
|
from pathlib import Path
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
from warc2zim.utils import to_string
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
|
class EncodedForTest:
|
|
|
|
|
content: str
|
|
|
|
|
encoding: str
|
|
|
|
|
encoded: bytes
|
|
|
|
|
valid: bool
|
|
|
|
|
|
|
|
|
|
def __init__(self, content: str, encoding: str):
|
|
|
|
|
self.content = content
|
|
|
|
|
self.encoding = encoding
|
|
|
|
|
try:
|
|
|
|
|
self.encoded = content.encode(encoding)
|
|
|
|
|
self.valid = True
|
|
|
|
|
except ValueError:
|
|
|
|
|
self.valid = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(
|
|
|
|
|
params=[
|
|
|
|
|
"Simple ascii content",
|
2024-06-14 13:21:18 +00:00
|
|
|
|
"A content with non ascii chars éœo€ð",
|
|
|
|
|
"Latin1 contént",
|
|
|
|
|
"Latin2 conteňt",
|
2024-02-14 10:33:19 +01:00
|
|
|
|
"这是中文文本", # "This is a chinese text" (in chinese)
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
def content(request):
|
|
|
|
|
yield request.param
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(
|
|
|
|
|
params=[
|
|
|
|
|
"ascii",
|
|
|
|
|
"utf-8",
|
|
|
|
|
"utf-16",
|
|
|
|
|
"utf-32",
|
|
|
|
|
"latin1",
|
2024-06-14 13:21:18 +00:00
|
|
|
|
"latin2",
|
2024-02-14 10:33:19 +01:00
|
|
|
|
"gb2312",
|
|
|
|
|
"gbk",
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
def encoding(request):
|
|
|
|
|
yield request.param
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
def simple_encoded_content(content, encoding):
|
|
|
|
|
return EncodedForTest(content, encoding)
|
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def test_decode_http_header(simple_encoded_content):
|
2024-02-14 10:33:19 +01:00
|
|
|
|
if not simple_encoded_content.valid:
|
|
|
|
|
# Nothing to test
|
|
|
|
|
return
|
2024-06-14 13:21:18 +00:00
|
|
|
|
assert (
|
|
|
|
|
to_string(simple_encoded_content.encoded, simple_encoded_content.encoding, [])
|
|
|
|
|
== simple_encoded_content.content
|
|
|
|
|
)
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
2024-06-14 13:21:18 +00:00
|
|
|
|
class DeclaredHtmlEncodedForTest(EncodedForTest):
|
|
|
|
|
def __init__(self, content: str, encoding: str):
|
|
|
|
|
html_content = f'<html><meta charset="{encoding}"><body>{content}</body></html>'
|
|
|
|
|
super().__init__(html_content, encoding)
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def declared_html_encoded_content(content, encoding):
|
|
|
|
|
return DeclaredHtmlEncodedForTest(content, encoding)
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def test_decode_html_header(declared_html_encoded_content):
|
|
|
|
|
test_case = declared_html_encoded_content
|
2024-02-14 10:33:19 +01:00
|
|
|
|
if not test_case.valid:
|
|
|
|
|
return
|
2024-06-14 13:21:18 +00:00
|
|
|
|
assert to_string(test_case.encoded, None, []) == test_case.content
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def test_decode_str(content, encoding):
|
|
|
|
|
result = to_string(content, encoding, [])
|
|
|
|
|
assert result == content
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def test_binary_content():
|
|
|
|
|
content = "Hello, 你好".encode("utf-32")
|
|
|
|
|
content = bytes([0xEF, 0xBB, 0xBF]) + content
|
|
|
|
|
# [0xEF, 0xBB, 0xBF] is a BOM marker for utf-8
|
|
|
|
|
# It will trick chardet to be really confident it is utf-8.
|
|
|
|
|
# However, this cannot be properly decoded using utf-8 ; but a value is still
|
|
|
|
|
# returned, since upstream server promised this is utf-8
|
|
|
|
|
assert to_string(content, "UTF-8", [])
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def test_single_bad_character():
|
|
|
|
|
content = bytes([0xEF, 0xBB, 0xBF]) + b"prem" + bytes([0xC3]) + "ière".encode()
|
|
|
|
|
# [0xEF, 0xBB, 0xBF] is a BOM marker for utf-8-sig
|
|
|
|
|
# 0xC3 is a bad character (nothing in utf-8-sig at this position)
|
|
|
|
|
result = to_string(content, "utf-8-sig", [])
|
|
|
|
|
assert result == "prem<EFBFBD>ière"
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def test_decode_charset_to_try(simple_encoded_content):
|
|
|
|
|
if not simple_encoded_content.valid:
|
|
|
|
|
# Nothing to test
|
2024-02-14 10:33:19 +01:00
|
|
|
|
return
|
2024-06-14 13:21:18 +00:00
|
|
|
|
assert (
|
|
|
|
|
to_string(
|
|
|
|
|
simple_encoded_content.encoded, None, [simple_encoded_content.encoding]
|
|
|
|
|
)
|
|
|
|
|
== simple_encoded_content.content
|
|
|
|
|
)
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def test_decode_weird_encoding_not_declared_not_in_try_list():
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
|
to_string("Latin1 contént".encode("latin1"), None, ["UTF-8"])
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def test_decode_weird_encoding_not_declared_in_try_list():
|
|
|
|
|
content = "Latin1 contént"
|
|
|
|
|
assert to_string(content.encode("latin1"), None, ["UTF-8", "latin1"]) == content
|
2024-02-14 10:33:19 +01:00
|
|
|
|
|
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
@dataclass
|
|
|
|
|
class CharsetsTestData:
|
|
|
|
|
filename: str
|
|
|
|
|
probable_charset: str | None # probable charset to use
|
|
|
|
|
known_charset: str | None # charset we know is being used (fake file typically)
|
|
|
|
|
http_charset: (
|
|
|
|
|
str | None
|
|
|
|
|
) # encoding to pass as http header because file is missing details and encoding is
|
|
|
|
|
# not standard
|
|
|
|
|
expected_strings: list[str]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_testdata() -> Generator[CharsetsTestData, None, None]:
|
|
|
|
|
data = json.loads(
|
|
|
|
|
(Path(__file__).parent / "encodings" / "definition.json").read_bytes()
|
|
|
|
|
)
|
|
|
|
|
for file in data["files"]:
|
|
|
|
|
yield CharsetsTestData(
|
|
|
|
|
filename=file["filename"],
|
|
|
|
|
probable_charset=file.get("probable_charset", None),
|
|
|
|
|
known_charset=file.get("known_charset", None),
|
|
|
|
|
http_charset=file.get("http_charset", None),
|
|
|
|
|
expected_strings=file.get("expected_strings", []),
|
|
|
|
|
)
|
|
|
|
|
|
2024-05-17 15:18:31 +00:00
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
def get_testdata_id(test_data: CharsetsTestData) -> str:
|
|
|
|
|
return test_data.filename
|
2024-05-17 15:18:31 +00:00
|
|
|
|
|
2024-06-14 13:21:18 +00:00
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("testdata", get_testdata(), ids=get_testdata_id)
|
|
|
|
|
def test_decode_files(testdata: CharsetsTestData):
|
|
|
|
|
result = to_string(
|
|
|
|
|
(Path(__file__).parent / "encodings" / testdata.filename).read_bytes(),
|
|
|
|
|
testdata.http_charset,
|
|
|
|
|
["UTF-8", "latin1"],
|
|
|
|
|
)
|
|
|
|
|
for expected_string in testdata.expected_strings:
|
|
|
|
|
assert expected_string in result
|