mirror of
https://github.com/webrecorder/browsertrix-crawler.git
synced 2025-10-19 14:33:17 +00:00

- support uploading WACZ to s3-compatible storage (via minio client) - config storage loaded from env vars, enabled when WACZ output is used. - support pinging either or an http or a redis key-based webhook, - webhook: include 'completed' bool to indicate if fully completed crawl or partial (eg. interrupted via signal) - consolidate redis init to redis.js - support upload filename with custom variables: can interpolate current timestamp (@ts), hostname (@hostname) and user provided id (@crawlId) - README: add docs for s3 storage, remove unused args - update to pywb 2.6.2, browsertrix-behaviors 0.2.4 * fix to `limit` option, ensure limit check uses shared state * bump version to 0.5.0-beta.1
36 lines
1,006 B
JavaScript
36 lines
1,006 B
JavaScript
const child_process = require("child_process");
|
|
const fs = require("fs");
|
|
|
|
test("ensure custom driver with custom selector crawls JS files as pages", async () => {
|
|
jest.setTimeout(30000);
|
|
|
|
try {
|
|
child_process.execSync("docker run -v $PWD/tests/fixtures:/tests/fixtures -v $PWD/test-crawls:/crawls webrecorder/browsertrix-crawler crawl --url https://www.iana.org/ --collection custom-driver-1 --driver /tests/fixtures/driver-1.js");
|
|
}
|
|
catch (error) {
|
|
console.log(error);
|
|
}
|
|
|
|
const crawledPages = fs.readFileSync("test-crawls/collections/custom-driver-1/pages/pages.jsonl", "utf8");
|
|
const pages = new Set();
|
|
|
|
for (const line of crawledPages.trim().split("\n")) {
|
|
const url = JSON.parse(line).url;
|
|
if (!url) {
|
|
continue;
|
|
}
|
|
pages.add(url);
|
|
}
|
|
|
|
console.log(pages);
|
|
|
|
const expectedPages = new Set([
|
|
"https://www.iana.org/",
|
|
"https://www.iana.org/_js/jquery.js",
|
|
"https://www.iana.org/_js/iana.js"
|
|
]);
|
|
|
|
expect(pages).toEqual(expectedPages);
|
|
|
|
});
|
|
|