browsertrix-crawler/tests/extra_hops_depth.test.js
Ilya Kreymer b5f3238c29
Separate writing pages to pages.jsonl + extraPages.jsonl to use with new py-wacz (#535)
Cherry-picked from the use-js-wacz branch, now implementing separate
writing of pages.jsonl / extraPages.jsonl to be used with py-wacz and
new `--copy-page-files` flag.

Dependent on py-wacz 0.5.0 (via webrecorder/py-wacz#43)

---------
Co-authored-by: Tessa Walsh <tessa@bitarchivist.net>
2024-04-11 13:55:52 -07:00

67 lines
1.9 KiB
JavaScript

import fs from "fs";
import util from "util";
import { exec as execCallback } from "child_process";
const exec = util.promisify(execCallback);
const extraHopsTimeout = 180000;
test(
"check that URLs are crawled 2 extra hops beyond depth",
async () => {
try {
await exec(
"docker run -v $PWD/test-crawls:/crawls -v $PWD/tests/fixtures:/tests/fixtures webrecorder/browsertrix-crawler crawl --collection extra-hops-beyond --extraHops 2 --url https://webrecorder.net/ --limit 7",
);
} catch (error) {
console.log(error);
}
const crawledPages = fs.readFileSync(
"test-crawls/collections/extra-hops-beyond/pages/pages.jsonl",
"utf8",
);
const crawledPagesArray = crawledPages.trim().split("\n");
const crawledExtraPages = fs.readFileSync(
"test-crawls/collections/extra-hops-beyond/pages/extraPages.jsonl",
"utf8",
);
const crawledExtraPagesArray = crawledExtraPages.trim().split("\n");
const expectedPages = [
"https://webrecorder.net/",
];
const expectedExtraPages = [
"https://webrecorder.net/blog",
"https://webrecorder.net/tools",
"https://webrecorder.net/community",
"https://webrecorder.net/about",
"https://webrecorder.net/contact",
"https://webrecorder.net/faq",
];
// first line is the header, not page, so adding -1
expect(crawledPagesArray.length - 1).toEqual(expectedPages.length);
expect(crawledExtraPagesArray.length - 1).toEqual(expectedExtraPages.length);
for (const page of crawledPagesArray) {
const url = JSON.parse(page).url;
if (!url) {
continue;
}
expect(expectedPages.indexOf(url) >= 0).toBe(true);
}
for (const page of crawledExtraPagesArray) {
const url = JSON.parse(page).url;
if (!url) {
continue;
}
expect(expectedExtraPages.indexOf(url) >= 0).toBe(true);
}
},
extraHopsTimeout,
);