clamav/libclamav/msxml_parser.c

638 lines
24 KiB
C
Raw Normal View History

/*
* Extract component parts of various MS XML files (e.g. MS Office 2003 XML Documents)
*
2025-02-14 10:24:30 -05:00
* Copyright (C) 2013-2025 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
* Copyright (C) 2007-2013 Sourcefire, Inc.
*
* Authors: Kevin Lin
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#if HAVE_CONFIG_H
#include "clamav-config.h"
#endif
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "clamav.h"
#include "others.h"
#include "conv.h"
2015-03-16 12:07:03 -04:00
#include "scanners.h"
#include "json_api.h"
#include "msxml_parser.h"
#include <libxml/xmlreader.h>
#define MSXML_VERBIOSE 0
#if MSXML_VERBIOSE
#define cli_msxmlmsg(...) cli_dbgmsg(__VA_ARGS__)
#else
#define cli_msxmlmsg(...)
#endif
#define check_state(state) \
do { \
if (state == -1) { \
cli_warnmsg("check_state[msxml]: CL_EPARSE @ ln%d\n", __LINE__); \
return CL_EPARSE; \
} else if (state == 0) { \
cli_dbgmsg("check_state[msxml]: CL_BREAK @ ln%d\n", __LINE__); \
return CL_BREAK; \
} \
} while (0)
#define track_json(mxctx) (mxctx->ictx->flags & MSXML_FLAG_JSON)
struct msxml_ictx {
cli_ctx *ctx;
uint32_t flags;
const struct key_entry *keys;
size_t num_keys;
json_object *root;
int toval;
};
struct key_entry blank_key = {NULL, NULL, 0};
static const struct key_entry *msxml_check_key(struct msxml_ictx *ictx, const xmlChar *key, size_t keylen)
{
unsigned i;
if (keylen > MSXML_JSON_STRLEN_MAX - 1) {
cli_dbgmsg("msxml_check_key: key name too long\n");
return &blank_key;
}
for (i = 0; i < ictx->num_keys; ++i) {
if (keylen == strlen(ictx->keys[i].key) && !strncasecmp((char *)key, ictx->keys[i].key, keylen)) {
return &ictx->keys[i];
}
}
return &blank_key;
}
static void msxml_error_handler(void *arg, const char *msg, xmlParserSeverities severity, xmlTextReaderLocatorPtr locator)
{
int line = xmlTextReaderLocatorLineNumber(locator);
xmlChar *URI = xmlTextReaderLocatorBaseURI(locator);
2019-05-03 18:25:17 -04:00
UNUSEDPARAM(arg);
switch (severity) {
case XML_PARSER_SEVERITY_WARNING:
case XML_PARSER_SEVERITY_VALIDITY_WARNING:
cli_dbgmsg("%s:%d: parser warning : %s", (char *)URI, line, msg);
break;
case XML_PARSER_SEVERITY_ERROR:
case XML_PARSER_SEVERITY_VALIDITY_ERROR:
cli_dbgmsg("%s:%d: parser error : %s", (char *)URI, line, msg);
break;
default:
cli_dbgmsg("%s:%d: unknown severity : %s", (char *)URI, line, msg);
break;
}
2015-04-16 12:31:09 -04:00
free(URI);
}
static int msxml_is_int(const char *value, size_t len, int32_t *val)
{
long val2;
char *endptr = NULL;
val2 = strtol(value, &endptr, 10);
if (endptr != value + len) {
return 0;
}
*val = (int32_t)(val2 & 0x0000ffff);
return 1;
}
static int msxml_parse_value(json_object *wrkptr, const char *arrname, const xmlChar *node_value)
{
json_object *newobj, *arrobj;
int val;
if (!wrkptr)
return CL_ENULLARG;
arrobj = cli_jsonarray(wrkptr, arrname);
if (arrobj == NULL) {
return CL_EMEM;
}
if (msxml_is_int((const char *)node_value, xmlStrlen(node_value), &val)) {
newobj = json_object_new_int(val);
} else if (!xmlStrcmp(node_value, (const xmlChar *)"true")) {
newobj = json_object_new_boolean(1);
} else if (!xmlStrcmp(node_value, (const xmlChar *)"false")) {
newobj = json_object_new_boolean(0);
} else {
newobj = json_object_new_string((const char *)node_value);
}
if (NULL == newobj) {
cli_errmsg("msxml_parse_value: no memory for json value for [%s]\n", arrname);
return CL_EMEM;
}
json_object_array_add(arrobj, newobj);
return CL_SUCCESS;
}
#define MAX_ATTRIBS 20
static cl_error_t msxml_parse_element(struct msxml_ctx *mxctx, xmlTextReaderPtr reader, int rlvl, void *jptr)
{
const xmlChar *element_name = NULL;
const xmlChar *node_name = NULL, *node_value = NULL;
const struct key_entry *keyinfo;
struct attrib_entry attribs[MAX_ATTRIBS];
cl_error_t ret;
int state, node_type, endtag = 0, num_attribs = 0;
cli_ctx *ctx = mxctx->ictx->ctx;
json_object *root = mxctx->ictx->root;
json_object *parent = (json_object *)jptr;
json_object *thisjobj = NULL;
cli_msxmlmsg("in msxml_parse_element @ layer %d\n", rlvl);
/* check recursion level */
if (rlvl >= MSXML_RECLEVEL_MAX) {
cli_dbgmsg("msxml_parse_element: reached msxml json recursion limit\n");
if (track_json(mxctx)) {
cl_error_t tmp = cli_json_parse_error(root, "MSXML_RECURSIVE_LIMIT");
if (tmp != CL_SUCCESS)
return tmp;
}
/* skip it */
state = xmlTextReaderNext(reader);
check_state(state);
return CL_SUCCESS;
}
/* acquire element type */
node_type = xmlTextReaderNodeType(reader);
if (node_type == -1)
return CL_EPARSE;
node_name = xmlTextReaderConstLocalName(reader);
node_value = xmlTextReaderConstValue(reader);
/* branch on node type */
switch (node_type) {
case XML_READER_TYPE_ELEMENT:
cli_msxmlmsg("msxml_parse_element: ELEMENT %s [%d]: %s\n", node_name, node_type, node_value);
/* storing the element name for verification/collection */
element_name = node_name;
if (!element_name) {
cli_dbgmsg("msxml_parse_element: element tag node nameless\n");
if (track_json(mxctx)) {
cl_error_t tmp = cli_json_parse_error(root, "MSXML_NAMELESS_ELEMENT");
if (tmp != CL_SUCCESS)
return tmp;
}
return CL_EPARSE; /* no name, nameless */
}
/* determine if the element is interesting */
keyinfo = msxml_check_key(mxctx->ictx, element_name, xmlStrlen(element_name));
cli_msxmlmsg("key: %s\n", keyinfo->key);
cli_msxmlmsg("name: %s\n", keyinfo->name);
cli_msxmlmsg("type: 0x%x\n", keyinfo->type);
/* element and contents are ignored */
if (keyinfo->type & MSXML_IGNORE_ELEM) {
cli_msxmlmsg("msxml_parse_element: IGNORING ELEMENT %s\n", keyinfo->name);
state = xmlTextReaderNext(reader);
check_state(state);
return CL_SUCCESS;
}
if (track_json(mxctx) && (keyinfo->type & MSXML_JSON_TRACK)) {
if (keyinfo->type & MSXML_JSON_ROOT)
thisjobj = cli_jsonobj(root, keyinfo->name);
else if (keyinfo->type & MSXML_JSON_WRKPTR)
thisjobj = cli_jsonobj(parent, keyinfo->name);
if (!thisjobj) {
return CL_EMEM;
}
cli_msxmlmsg("msxml_parse_element: generated json object [%s]\n", keyinfo->name);
/* count this element */
if (thisjobj && (keyinfo->type & MSXML_JSON_COUNT)) {
json_object *counter = NULL;
if (!json_object_object_get_ex(thisjobj, "Count", &counter)) { /* object not found */
cli_jsonint(thisjobj, "Count", 1);
} else {
int value = json_object_get_int(counter);
cli_jsonint(thisjobj, "Count", value + 1);
}
cli_msxmlmsg("msxml_parse_element: retrieved json object [Count]\n");
}
/* check if multiple entries are allowed */
if (thisjobj && (keyinfo->type & MSXML_JSON_MULTI)) {
/* replace this object with an array entry object */
json_object *multi = cli_jsonarray(thisjobj, "Multi");
if (!multi) {
return CL_EMEM;
}
cli_msxmlmsg("msxml_parse_element: generated or retrieved json multi array\n");
thisjobj = cli_jsonobj(multi, NULL);
if (!thisjobj)
return CL_EMEM;
cli_msxmlmsg("msxml_parse_element: generated json multi entry object\n");
}
/* handle attributes */
if (thisjobj && (keyinfo->type & MSXML_JSON_ATTRIB)) {
state = xmlTextReaderHasAttributes(reader);
if (state == 1) {
json_object *attributes;
const xmlChar *name, *value;
attributes = cli_jsonobj(thisjobj, "Attributes");
if (!attributes) {
return CL_EPARSE;
}
cli_msxmlmsg("msxml_parse_element: retrieved json object [Attributes]\n");
while (xmlTextReaderMoveToNextAttribute(reader) == 1) {
name = xmlTextReaderConstLocalName(reader);
value = xmlTextReaderConstValue(reader);
cli_msxmlmsg("\t%s: %s\n", name, value);
cli_jsonstr(attributes, (char *)name, (const char *)value);
}
} else if (state == -1)
return CL_EPARSE;
}
}
/* populate attributes for scanning callback - BROKEN, probably from the fact the reader is pointed to the attribute from previously parsing attributes */
if ((keyinfo->type & MSXML_SCAN_CB) && mxctx->scan_cb) {
state = xmlTextReaderHasAttributes(reader);
if (state == 0) {
state = xmlTextReaderMoveToFirstAttribute(reader);
if (state == 1) {
/* read first attribute (current head) */
attribs[num_attribs].key = (const char *)xmlTextReaderConstLocalName(reader);
attribs[num_attribs].value = (const char *)xmlTextReaderConstValue(reader);
num_attribs++;
} else if (state == -1) {
return CL_EPARSE;
}
}
/* start reading attributes or read remainder of attributes */
if (state == 1) {
cli_msxmlmsg("msxml_parse_element: adding attributes to scanning context\n");
while ((num_attribs < MAX_ATTRIBS) && (xmlTextReaderMoveToNextAttribute(reader) == 1)) {
attribs[num_attribs].key = (const char *)xmlTextReaderConstLocalName(reader);
attribs[num_attribs].value = (const char *)xmlTextReaderConstValue(reader);
num_attribs++;
}
} else if (state == -1) {
return CL_EPARSE;
}
}
/* check self-containment */
state = xmlTextReaderMoveToElement(reader);
if (state == -1)
return CL_EPARSE;
state = xmlTextReaderIsEmptyElement(reader);
if (state == 1) {
cli_msxmlmsg("msxml_parse_element: SELF-CLOSING\n");
state = xmlTextReaderNext(reader);
check_state(state);
return CL_SUCCESS;
} else if (state == -1)
return CL_EPARSE;
/* advance to first content node */
state = xmlTextReaderRead(reader);
check_state(state);
while (!endtag) {
if (track_json(mxctx) && (cli_json_timeout_cycle_check(ctx, &(mxctx->ictx->toval)) != CL_SUCCESS))
return CL_ETIMEOUT;
node_type = xmlTextReaderNodeType(reader);
if (node_type == -1)
return CL_EPARSE;
switch (node_type) {
case XML_READER_TYPE_ELEMENT:
ret = msxml_parse_element(mxctx, reader, rlvl + 1, thisjobj ? thisjobj : parent);
if (ret != CL_SUCCESS) {
return ret;
}
break;
case XML_READER_TYPE_TEXT:
node_value = xmlTextReaderConstValue(reader);
cli_msxmlmsg("TEXT: %s\n", node_value);
if (thisjobj && (keyinfo->type & MSXML_JSON_VALUE)) {
ret = msxml_parse_value(thisjobj, "Value", node_value);
if (ret != CL_SUCCESS)
return ret;
cli_msxmlmsg("msxml_parse_element: added json value [%s: %s]\n", keyinfo->name, (const char *)node_value);
}
/* callback-based scanning mechanism for embedded objects (used by HWPML) */
if ((keyinfo->type & MSXML_SCAN_CB) && mxctx->scan_cb) {
char name[1024];
char *tempfile = name;
int of;
size_t vlen = strlen((const char *)node_value);
cli_msxmlmsg("BINARY CALLBACK DATA!\n");
libclamav: Add engine option to toggle temp directory recursion Temp directory recursion in ClamAV is when each layer of a scan gets its own temp directory in the parent layer's temp directory. In addition to temp directory recursion, ClamAV has been creating a new subdirectory for each file scan as a risk-adverse method to ensure no temporary file leaks fill up the disk. Creating a directory is relatively slow on Windows in particular if scanning a lot of very small files. This commit: 1. Separates the temp directory recursion feature from the leave-temps feature so that libclamav can leave temp files without making subdirectories for each file scanned. 2. Makes it so that when temp directory recursion is off, libclamav will just use the configure temp directory for all files. The new option to enable temp directory recursion is for libclamav-only at this time. It is off by default, and you can enable it like this: ```c cl_engine_set_num(engine, CL_ENGINE_TMPDIR_RECURSION, 1); ``` For the `clamscan` and `clamd` programs, temp directory recursion will be enabled when `--leave-temps` / `LeaveTemporaryFiles` is enabled. The difference is that when disabled, it will return to using the configured temp directory without making a subdirectory for each file scanned, so as to improve scan performance for small files, mostly on Windows. Under the hood, this commit also: 1. Cleans up how we keep track of tmpdirs for each layer. The goal here is to align how we keep track of layer-specific stuff using the scan_layer structure. 2. Cleans up how we record metadata JSON for embedded files. Note: Embedded files being different from Contained files, as they are extracted not with a parser, but by finding them with file type magic signatures. CLAM-1583
2025-06-09 20:42:31 -04:00
if ((ret = cli_gentempfd(ctx->this_layer_tmpdir, &tempfile, &of)) != CL_SUCCESS) {
cli_warnmsg("msxml_parse_element: failed to create temporary file %s\n", tempfile);
return ret;
}
if (cli_writen(of, (char *)node_value, vlen) != vlen) {
close(of);
if (!(ctx->engine->keeptmp))
cli_unlink(tempfile);
free(tempfile);
return CL_EWRITE;
}
cli_dbgmsg("msxml_parse_element: extracted binary data to %s\n", tempfile);
ret = mxctx->scan_cb(of, tempfile, ctx, num_attribs, attribs, mxctx->scan_data);
close(of);
if (!(ctx->engine->keeptmp)) {
cli_unlink(tempfile);
}
free(tempfile);
if (ret != CL_SUCCESS) {
return ret;
}
}
/* scanning protocol for embedded objects encoded in base64 (used by MSXML) */
if (keyinfo->type & MSXML_SCAN_B64) {
char name[1024];
char *decoded, *tempfile = name;
size_t decodedlen;
int of;
cli_msxmlmsg("BINARY DATA!\n");
decoded = (char *)cl_base64_decode((char *)node_value, strlen((const char *)node_value), NULL, &decodedlen, 0);
if (!decoded) {
cli_warnmsg("msxml_parse_element: failed to decode base64-encoded binary data\n");
state = xmlTextReaderRead(reader);
check_state(state);
break;
}
libclamav: Add engine option to toggle temp directory recursion Temp directory recursion in ClamAV is when each layer of a scan gets its own temp directory in the parent layer's temp directory. In addition to temp directory recursion, ClamAV has been creating a new subdirectory for each file scan as a risk-adverse method to ensure no temporary file leaks fill up the disk. Creating a directory is relatively slow on Windows in particular if scanning a lot of very small files. This commit: 1. Separates the temp directory recursion feature from the leave-temps feature so that libclamav can leave temp files without making subdirectories for each file scanned. 2. Makes it so that when temp directory recursion is off, libclamav will just use the configure temp directory for all files. The new option to enable temp directory recursion is for libclamav-only at this time. It is off by default, and you can enable it like this: ```c cl_engine_set_num(engine, CL_ENGINE_TMPDIR_RECURSION, 1); ``` For the `clamscan` and `clamd` programs, temp directory recursion will be enabled when `--leave-temps` / `LeaveTemporaryFiles` is enabled. The difference is that when disabled, it will return to using the configured temp directory without making a subdirectory for each file scanned, so as to improve scan performance for small files, mostly on Windows. Under the hood, this commit also: 1. Cleans up how we keep track of tmpdirs for each layer. The goal here is to align how we keep track of layer-specific stuff using the scan_layer structure. 2. Cleans up how we record metadata JSON for embedded files. Note: Embedded files being different from Contained files, as they are extracted not with a parser, but by finding them with file type magic signatures. CLAM-1583
2025-06-09 20:42:31 -04:00
if ((ret = cli_gentempfd(ctx->this_layer_tmpdir, &tempfile, &of)) != CL_SUCCESS) {
cli_warnmsg("msxml_parse_element: failed to create temporary file %s\n", tempfile);
free(decoded);
return ret;
}
if (cli_writen(of, decoded, decodedlen) != decodedlen) {
free(decoded);
close(of);
if (!(ctx->engine->keeptmp))
cli_unlink(tempfile);
free(tempfile);
return CL_EWRITE;
}
free(decoded);
cli_dbgmsg("msxml_parse_element: extracted binary data to %s\n", tempfile);
ret = cli_magic_scan_desc(of, tempfile, ctx, NULL, LAYER_ATTRIBUTES_NONE);
close(of);
if (!(ctx->engine->keeptmp))
cli_unlink(tempfile);
free(tempfile);
if (ret != CL_SUCCESS) {
return ret;
}
}
/* advance to next node */
state = xmlTextReaderRead(reader);
check_state(state);
break;
case XML_READER_TYPE_COMMENT:
node_value = xmlTextReaderConstValue(reader);
cli_msxmlmsg("COMMENT: %s\n", node_value);
/* callback-based scanning mechanism for comments (used by MHTML) */
if ((keyinfo->type & MSXML_COMMENT_CB) && mxctx->comment_cb) {
ret = mxctx->comment_cb((const char *)node_value, ctx, thisjobj, mxctx->comment_data);
if (ret != CL_SUCCESS) {
return ret;
}
}
/* advance to next node */
state = xmlTextReaderRead(reader);
check_state(state);
break;
case XML_READER_TYPE_SIGNIFICANT_WHITESPACE:
/* advance to next node */
state = xmlTextReaderRead(reader);
check_state(state);
break;
case XML_READER_TYPE_END_ELEMENT:
cli_msxmlmsg("in msxml_parse_element @ layer %d closed\n", rlvl);
node_name = xmlTextReaderConstLocalName(reader);
if (!node_name) {
cli_dbgmsg("msxml_parse_element: element end tag node nameless\n");
return CL_EPARSE; /* no name, nameless */
}
if (xmlStrcmp(element_name, node_name)) {
cli_dbgmsg("msxml_parse_element: element tag does not match end tag %s != %s\n", element_name, node_name);
return CL_EFORMAT;
}
/* advance to next element tag */
state = xmlTextReaderRead(reader);
check_state(state);
endtag = 1;
break;
default:
node_name = xmlTextReaderConstLocalName(reader);
node_value = xmlTextReaderConstValue(reader);
cli_dbgmsg("msxml_parse_element: unhandled xml secondary node %s [%d]: %s\n", node_name, node_type, node_value);
state = xmlTextReaderRead(reader);
check_state(state);
}
}
break;
case XML_READER_TYPE_PROCESSING_INSTRUCTION:
cli_msxmlmsg("msxml_parse_element: PROCESSING INSTRUCTION %s [%d]: %s\n", node_name, node_type, node_value);
break;
case XML_READER_TYPE_SIGNIFICANT_WHITESPACE:
cli_msxmlmsg("msxml_parse_element: SIGNIFICANT WHITESPACE %s [%d]: %s\n", node_name, node_type, node_value);
break;
case XML_READER_TYPE_END_ELEMENT:
cli_msxmlmsg("msxml_parse_element: END ELEMENT %s [%d]: %s\n", node_name, node_type, node_value);
return CL_SUCCESS;
default:
cli_dbgmsg("msxml_parse_element: unhandled xml primary node %s [%d]: %s\n", node_name, node_type, node_value);
}
return CL_SUCCESS;
}
/* reader initialization and closing handled by caller */
cl_error_t cli_msxml_parse_document(cli_ctx *ctx, xmlTextReaderPtr reader, const struct key_entry *keys, const size_t num_keys, uint32_t flags, struct msxml_ctx *mxctx)
{
struct msxml_ctx reserve;
struct msxml_ictx ictx;
int state;
cl_error_t ret = CL_SUCCESS;
if (!ctx) {
return CL_ENULLARG;
}
if (!mxctx) {
memset(&reserve, 0, sizeof(reserve));
mxctx = &reserve;
}
ictx.ctx = ctx;
ictx.flags = flags;
ictx.keys = keys;
ictx.num_keys = num_keys;
if (flags & MSXML_FLAG_JSON) {
ictx.root = ctx->this_layer_metadata_json;
2015-04-16 12:31:09 -04:00
/* JSON Sanity Check */
if (!ictx.root)
ictx.flags &= ~MSXML_FLAG_JSON;
ictx.toval = 0;
} else {
ictx.root = NULL;
}
mxctx->ictx = &ictx;
/* Error Handler (setting handler on tree walker causes segfault) */
if (!(flags & MSXML_FLAG_WALK))
// xmlTextReaderSetErrorHandler(reader, NULL, NULL); /* xml default handler */
xmlTextReaderSetErrorHandler(reader, msxml_error_handler, NULL);
/* Main Processing Loop */
while ((state = xmlTextReaderRead(reader)) == 1) {
if ((ictx.flags & MSXML_FLAG_JSON) && (cli_json_timeout_cycle_check(ictx.ctx, &(ictx.toval)) != CL_SUCCESS))
return CL_ETIMEOUT;
ret = msxml_parse_element(mxctx, reader, 0, ictx.root);
if (ret != CL_SUCCESS) {
if (ret == CL_VIRUS || ret == CL_ETIMEOUT || ret == CL_BREAK) {
cli_dbgmsg("cli_msxml_parse_document: encountered halt event in parsing xml document\n");
break;
} else {
cli_warnmsg("cli_msxml_parse_document: encountered issue in parsing xml document\n");
break;
}
}
}
if (state == -1) {
ret = CL_EPARSE;
}
/* Parse General Error Handler */
if (ictx.flags & MSXML_FLAG_JSON) {
cl_error_t tmp = CL_SUCCESS;
switch (ret) {
case CL_SUCCESS:
case CL_BREAK: /* OK */
break;
case CL_VIRUS:
tmp = cli_json_parse_error(ictx.root, "MSXML_INTR_VIRUS");
break;
case CL_ETIMEOUT:
tmp = cli_json_parse_error(ictx.root, "MSXML_INTR_TIMEOUT");
break;
case CL_EPARSE:
tmp = cli_json_parse_error(ictx.root, "MSXML_ERROR_XMLPARSER");
break;
case CL_EMEM:
tmp = cli_json_parse_error(ictx.root, "MSXML_ERROR_OUTOFMEM");
break;
case CL_EFORMAT:
tmp = cli_json_parse_error(ictx.root, "MSXML_ERROR_MALFORMED");
break;
default:
tmp = cli_json_parse_error(ictx.root, "MSXML_ERROR_OTHER");
break;
}
if (tmp) {
return tmp;
}
}
/* non-critical return suppression */
libclamav: Fix scan recursion tracking Scan recursion is the process of identifying files embedded in other files and then scanning them, recursively. Internally this process is more complex than it may sound because a file may have multiple layers of types before finding a new "file". At present we treat the recursion count in the scanning context as an index into both our fmap list AND our container list. These two lists are conceptually a part of the same thing and should be unified. But what's concerning is that the "recursion level" isn't actually incremented or decremented at the same time that we add a layer to the fmap or container lists but instead is more touchy-feely, increasing when we find a new "file". To account for this shadiness, the size of the fmap and container lists has always been a little longer than our "max scan recursion" limit so we don't accidentally overflow the fmap or container arrays (!). I've implemented a single recursion-stack as an array, similar to before, which includes a pointer to each fmap at each layer, along with the size and type. Push and pop functions add and remove layers whenever a new fmap is added. A boolean argument when pushing indicates if the new layer represents a new buffer or new file (descriptor). A new buffer will reset the "nested fmap level" (described below). This commit also provides a solution for an issue where we detect embedded files more than once during scan recursion. For illustration, imagine a tarball named foo.tar.gz with this structure: | description | type | rec level | nested fmap level | | ------------------------- | ----- | --------- | ----------------- | | foo.tar.gz | GZ | 0 | 0 | | └── foo.tar | TAR | 1 | 0 | | ├── bar.zip | ZIP | 2 | 1 | | │   └── hola.txt | ASCII | 3 | 0 | | └── baz.exe | PE | 2 | 1 | But suppose baz.exe embeds a ZIP archive and a 7Z archive, like this: | description | type | rec level | nested fmap level | | ------------------------- | ----- | --------- | ----------------- | | baz.exe | PE | 0 | 0 | | ├── sfx.zip | ZIP | 1 | 1 | | │   └── hello.txt | ASCII | 2 | 0 | | └── sfx.7z | 7Z | 1 | 1 | |    └── world.txt | ASCII | 2 | 0 | (A) If we scan for embedded files at any layer, we may detect: | description | type | rec level | nested fmap level | | ------------------------- | ----- | --------- | ----------------- | | foo.tar.gz | GZ | 0 | 0 | | ├── foo.tar | TAR | 1 | 0 | | │ ├── bar.zip | ZIP | 2 | 1 | | │ │   └── hola.txt | ASCII | 3 | 0 | | │ ├── baz.exe | PE | 2 | 1 | | │ │ ├── sfx.zip | ZIP | 3 | 1 | | │ │ │   └── hello.txt | ASCII | 4 | 0 | | │ │ └── sfx.7z | 7Z | 3 | 1 | | │ │    └── world.txt | ASCII | 4 | 0 | | │ ├── sfx.zip | ZIP | 2 | 1 | | │ │   └── hello.txt | ASCII | 3 | 0 | | │ └── sfx.7z | 7Z | 2 | 1 | | │   └── world.txt | ASCII | 3 | 0 | | ├── sfx.zip | ZIP | 1 | 1 | | └── sfx.7z | 7Z | 1 | 1 | (A) is bad because it scans content more than once. Note that for the GZ layer, it may detect the ZIP and 7Z if the signature hits on the compressed data, which it might, though extracting the ZIP and 7Z will likely fail. The reason the above doesn't happen now is that we restrict embedded type scans for a bunch of archive formats to include GZ and TAR. (B) If we scan for embedded files at the foo.tar layer, we may detect: | description | type | rec level | nested fmap level | | ------------------------- | ----- | --------- | ----------------- | | foo.tar.gz | GZ | 0 | 0 | | └── foo.tar | TAR | 1 | 0 | | ├── bar.zip | ZIP | 2 | 1 | | │   └── hola.txt | ASCII | 3 | 0 | | ├── baz.exe | PE | 2 | 1 | | ├── sfx.zip | ZIP | 2 | 1 | | │   └── hello.txt | ASCII | 3 | 0 | | └── sfx.7z | 7Z | 2 | 1 | |    └── world.txt | ASCII | 3 | 0 | (B) is almost right. But we can achieve it easily enough only scanning for embedded content in the current fmap when the "nested fmap level" is 0. The upside is that it should safely detect all embedded content, even if it may think the sfz.zip and sfx.7z are in foo.tar instead of in baz.exe. The biggest risk I can think of affects ZIPs. SFXZIP detection is identical to ZIP detection, which is why we don't allow SFXZIP to be detected if insize of a ZIP. If we only allow embedded type scanning at fmap-layer 0 in each buffer, this will fail to detect the embedded ZIP if the bar.exe was not compressed in foo.zip and if non-compressed files extracted from ZIPs aren't extracted as new buffers: | description | type | rec level | nested fmap level | | ------------------------- | ----- | --------- | ----------------- | | foo.zip | ZIP | 0 | 0 | | └── bar.exe | PE | 1 | 1 | | └── sfx.zip | ZIP | 2 | 2 | Provided that we ensure all files extracted from zips are scanned in new buffers, option (B) should be safe. (C) If we scan for embedded files at the baz.exe layer, we may detect: | description | type | rec level | nested fmap level | | ------------------------- | ----- | --------- | ----------------- | | foo.tar.gz | GZ | 0 | 0 | | └── foo.tar | TAR | 1 | 0 | | ├── bar.zip | ZIP | 2 | 1 | | │   └── hola.txt | ASCII | 3 | 0 | | └── baz.exe | PE | 2 | 1 | | ├── sfx.zip | ZIP | 3 | 1 | | │   └── hello.txt | ASCII | 4 | 0 | | └── sfx.7z | 7Z | 3 | 1 | |    └── world.txt | ASCII | 4 | 0 | (C) is right. But it's harder to achieve. For this example we can get it by restricting 7ZSFX and ZIPSFX detection only when scanning an executable. But that may mean losing detection of archives embedded elsewhere. And we'd have to identify allowable container types for each possible embedded type, which would be very difficult. So this commit aims to solve the issue the (B)-way. Note that in all situations, we still have to scan with file typing enabled to determine if we need to reassign the current file type, such as re-identifying a Bzip2 archive as a DMG that happens to be Bzip2- compressed. Detection of DMG and a handful of other types rely on finding data partway through or near the ned of a file before reassigning the entire file as the new type. Other fixes and considerations in this commit: - The utf16 HTML parser has weak error handling, particularly with respect to creating a nested fmap for scanning the ascii decoded file. This commit cleans up the error handling and wraps the nested scan with the recursion-stack push()/pop() for correct recursion tracking. Before this commit, each container layer had a flag to indicate if the container layer is valid. We need something similar so that the cli_recursion_stack_get_*() functions ignore normalized layers. Details... Imagine an LDB signature for HTML content that specifies a ZIP container. If the signature actually alerts on the normalized HTML and you don't ignore normalized layers for the container check, it will appear as though the alert is in an HTML container rather than a ZIP container. This commit accomplishes this with a boolean you set in the scan context before scanning a new layer. Then when the new fmap is created, it will use that flag to set similar flag for the layer. The context flag is reset those that anything after this doesn't have that flag. The flag allows the new recursion_stack_get() function to ignore normalized layers when iterating the stack to return a layer at a requested index, negative or positive. Scanning normalized extracted/normalized javascript and VBA should also use the 'layer is normalized' flag. - This commit also fixes Heuristic.Broken.Executable alert for ELF files to make sure that: A) these only alert if cli_append_virus() returns CL_VIRUS (aka it respects the FP check). B) all broken-executable alerts for ELF only happen if the SCAN_HEURISTIC_BROKEN option is enabled. - This commit also cleans up the error handling in cli_magic_scan_dir(). This was needed so we could correctly apply the layer-is-normalized-flag to all VBA macros extracted to a directory when scanning the directory. - Also fix an issue where exceeding scan maximums wouldn't cause embedded file detection scans to abort. Granted we don't actually want to abort if max filesize or max recursion depth are exceeded... only if max scansize, max files, and max scantime are exceeded. Add 'abort_scan' flag to scan context, to protect against depending on correct error propagation for fatal conditions. Instead, setting this flag in the scan context should guarantee that a fatal condition deep in scan recursion isn't lost which result in more stuff being scanned instead of aborting. This shouldn't be necessary, but some status codes like CL_ETIMEOUT never used to be fatal and it's easier to do this than to verify every parser only returns CL_ETIMEOUT and other "fatal status codes" in fatal conditions. - Remove duplicate is_tar() prototype from filestypes.c and include is_tar.h instead. - Presently we create the fmap hash when creating the fmap. This wastes a bit of CPU if the hash is never needed. Now that we're creating fmap's for all embedded files discovered with file type recognition scans, this is a much more frequent occurence and really slows things down. This commit fixes the issue by only creating fmap hashes as needed. This should not only resolve the perfomance impact of creating fmap's for all embedded files, but also should improve performance in general. - Add allmatch check to the zip parser after the central-header meta match. That way we don't multiple alerts with the same match except in allmatch mode. Clean up error handling in the zip parser a tiny bit. - Fixes to ensure that the scan limits such as scansize, filesize, recursion depth, # of embedded files, and scantime are always reported if AlertExceedsMax (--alert-exceeds-max) is enabled. - Fixed an issue where non-fatal alerts for exceeding scan maximums may mask signature matches later on. I changed it so these alerts use the "possibly unwanted" alert-type and thus only alert if no other alerts were found or if all-match or heuristic-precedence are enabled. - Added the "Heuristics.Limits.Exceeded.*" events to the JSON metadata when the --gen-json feature is enabled. These will show up once under "ParseErrors" the first time a limit is exceeded. In the present implementation, only one limits-exceeded events will be added, so as to prevent a malicious or malformed sample from filling the JSON buffer with millions of events and using a tonne of RAM.
2021-09-11 14:15:21 -07:00
if (ret == CL_BREAK)
ret = CL_SUCCESS;
/* important but non-critical suppression */
if (ret == CL_EPARSE) {
cli_dbgmsg("cli_msxml_parse_document: suppressing parsing error to continue scan\n");
ret = CL_SUCCESS;
}
return ret;
}