browsertrix-crawler/tests/config_file.test.js
Ilya Kreymer 277314f2de Convert to ESM (#179)
* switch base image to chrome/chromium 105 with node 18.x
* convert all source to esm for node 18.x, remove unneeded node-fetch dependency
* ci: use node 18.x, update to latest actions
* tests: convert to esm, run with --experimental-vm-modules
* tests: set higher default timeout (90s) for all tests
* tests: rename driver test fixture to .mjs for loading in jest
* bump to 0.8.0
2022-11-15 18:30:27 -08:00

67 lines
1.9 KiB
JavaScript

import fs from "fs";
import yaml from "js-yaml";
import util from "util";
import {exec as execCallback } from "child_process";
const exec = util.promisify(execCallback);
test("check yaml config file with seed list is used", async () => {
try{
await exec("docker run -v $PWD/test-crawls:/crawls -v $PWD/tests/fixtures:/tests/fixtures webrecorder/browsertrix-crawler crawl --config /tests/fixtures/crawl-1.yaml --depth 0");
}
catch (error) {
console.log(error);
}
const crawledPages = fs.readFileSync("test-crawls/collections/configtest/pages/pages.jsonl", "utf8");
const pages = new Set();
for (const line of crawledPages.trim().split("\n")) {
const url = JSON.parse(line).url;
if (url) {
pages.add(url);
}
}
const config = yaml.load(fs.readFileSync("tests/fixtures/crawl-1.yaml", "utf8"));
let foundAllSeeds = true;
for (const seed of config.seeds) {
const url = new URL(seed).href;
if (!pages.has(url)) {
foundAllSeeds = false;
}
}
expect(foundAllSeeds).toBe(true);
expect(fs.existsSync("test-crawls/collections/configtest/configtest.wacz")).toBe(true);
});
test("check yaml config file will be overwritten by command line", async () => {
try{
await exec("docker run -v $PWD/test-crawls:/crawls -v $PWD/tests/fixtures:/tests/fixtures webrecorder/browsertrix-crawler crawl --collection configtest-2 --config /tests/fixtures/crawl-1.yaml --url https://www.example.com --timeout 20000");
}
catch (error) {
console.log(error);
}
const crawledPages = fs.readFileSync("test-crawls/collections/configtest-2/pages/pages.jsonl", "utf8");
const pages = new Set();
for (const line of crawledPages.trim().split("\n")) {
const url = JSON.parse(line).url;
if (url) {
pages.add(url);
}
}
expect(pages.has("https://www.example.com/")).toBe(true);
expect(pages.size).toBe(1);
});