mirror of
https://github.com/webrecorder/browsertrix-crawler.git
synced 2025-10-19 06:23:16 +00:00

Fixes #674 This PR supersedes #505, and instead of using js-wacz for optimized WACZ creation: - generates an 'in-place' or 'streaming' WACZ in the crawler, without having to copy the data again. - WACZ contents are streamed to remote upload (or to disk) from existing files on disk - CDXJ indices per-WARC are first written to 'warc-cdx' directory, then merged using the linux 'sort' command, and compressed to ZipNum if >50K (or always if using --generateCDX) - All data in the WARCs is written and read only once - Should result in significant speed / disk usage improvements: previously WARC was written once, then read again (for CDXJ indexing), read again (for adding to new WACZ ZIP), written to disk (into new WACZ ZIP), read again (if upload to remote endpoint). Now, WARCs are written once, along with the per-WARC CDXJ, the CDXJ only is reread, sorted and merged on-disk, and all data is read once to either generate WACZ on disk or upload to remote. --------- Co-authored-by: Tessa Walsh <tessa@bitarchivist.net>
53 lines
1.7 KiB
JavaScript
53 lines
1.7 KiB
JavaScript
import child_process from "child_process";
|
|
import fs from "fs";
|
|
|
|
const doValidate = process.argv.filter((x) => x.startsWith('-validate'))[0];
|
|
const testIf = (condition, ...args) => condition ? test(...args) : test.skip(...args);
|
|
|
|
test("ensure multi url crawl run with docker run passes", async () => {
|
|
child_process.execSync(
|
|
'docker run -v $PWD/test-crawls:/crawls webrecorder/browsertrix-crawler crawl --url https://www.iana.org/ --url https://webrecorder.net/ --generateWACZ --text --collection advanced --combineWARC --rolloverSize 10000 --workers 2 --title "test title" --description "test description" --pages 2 --limit 2',
|
|
);
|
|
});
|
|
|
|
testIf(doValidate, "validate multi url crawl wacz", () => {
|
|
child_process.execSync(
|
|
"wacz validate --file ./test-crawls/collections/advanced/advanced.wacz",
|
|
);
|
|
});
|
|
|
|
test("check that the favicon made it into the pages jsonl file", () => {
|
|
expect(
|
|
fs.existsSync("test-crawls/collections/advanced/pages/pages.jsonl"),
|
|
).toBe(true);
|
|
|
|
const data1 = JSON.parse(
|
|
fs
|
|
.readFileSync(
|
|
"test-crawls/collections/advanced/pages/pages.jsonl",
|
|
"utf8",
|
|
)
|
|
.split("\n")[1],
|
|
);
|
|
const data2 = JSON.parse(
|
|
fs
|
|
.readFileSync(
|
|
"test-crawls/collections/advanced/pages/pages.jsonl",
|
|
"utf8",
|
|
)
|
|
.split("\n")[2],
|
|
);
|
|
const data = [data1, data2];
|
|
for (const d of data) {
|
|
if (d.url === "https://webrecorder.net/") {
|
|
expect(d.favIconUrl).toEqual(
|
|
"https://webrecorder.net/assets/favicon.ico",
|
|
);
|
|
}
|
|
if (d.url === "https://iana.org/") {
|
|
expect(d.favIconUrl).toEqual(
|
|
"https://www.iana.org/_img/bookmark_icon.ico",
|
|
);
|
|
}
|
|
}
|
|
});
|