2010-01-05 19:07:23 +01:00
|
|
|
/*
|
2025-02-14 10:24:30 -05:00
|
|
|
* Copyright (C) 2013-2025 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
|
2019-01-25 10:15:50 -05:00
|
|
|
* Copyright (C) 2010-2013 Sourcefire, Inc.
|
2010-01-05 19:07:23 +01:00
|
|
|
*
|
2010-01-07 23:38:33 +01:00
|
|
|
* Authors: aCaB <acab@clamav.net>, Török Edvin <edwin@clamav.net>
|
2010-01-05 19:07:23 +01:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
* MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
2010-01-05 18:15:59 +01:00
|
|
|
#if HAVE_CONFIG_H
|
|
|
|
#include "clamav-config.h"
|
|
|
|
#endif
|
|
|
|
|
2010-01-06 19:32:34 +02:00
|
|
|
#include <string.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <assert.h>
|
2010-01-05 18:15:59 +01:00
|
|
|
|
2009-08-11 12:23:14 +02:00
|
|
|
#include "mpool.h"
|
|
|
|
#include "clamav.h"
|
|
|
|
#include "cache.h"
|
2023-03-31 17:20:54 -04:00
|
|
|
#include "math.h"
|
2010-01-05 14:56:33 +01:00
|
|
|
#include "fmap.h"
|
2009-08-11 12:23:14 +02:00
|
|
|
|
2022-08-03 20:34:48 -07:00
|
|
|
#include "clamav_rust.h"
|
|
|
|
|
2023-03-31 17:20:54 -04:00
|
|
|
/* The chooser function
|
2010-01-15 03:00:15 +01:00
|
|
|
Each tree is protected by a mutex against concurrent access */
|
2023-03-31 17:20:54 -04:00
|
|
|
static inline unsigned int getkey(uint8_t *hash, size_t trees)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2022-01-25 13:34:56 -05:00
|
|
|
if (hash) {
|
2023-03-31 17:20:54 -04:00
|
|
|
// Take the first two bytes (16 bits) of the hash, which total to 65536 values,
|
|
|
|
// and modulus that by the number of trees desired.
|
|
|
|
// As long as trees < 65536, and the hash is uniformly distributed,
|
|
|
|
// the resulting key will be a good value to use a bucket identifier
|
|
|
|
// for evenly placing values.
|
|
|
|
return (hash[0] | (((unsigned int)hash[1]) << 8)) % trees;
|
2022-01-25 13:34:56 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-12-03 12:40:13 -05:00
|
|
|
}
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
/* SPLAY --------------------------------------------------------------------- */
|
|
|
|
struct node { /* a node */
|
2025-06-03 19:03:20 -04:00
|
|
|
int64_t digest[4];
|
2010-01-09 02:19:25 +01:00
|
|
|
struct node *left;
|
|
|
|
struct node *right;
|
2010-01-11 13:10:36 +01:00
|
|
|
struct node *up;
|
2010-01-13 00:03:30 +01:00
|
|
|
struct node *next;
|
|
|
|
struct node *prev;
|
2010-01-10 20:24:09 +01:00
|
|
|
uint32_t size;
|
2010-03-05 22:17:46 +01:00
|
|
|
uint32_t minrec;
|
2010-01-09 02:19:25 +01:00
|
|
|
};
|
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
struct cache_set { /* a tree */
|
2010-01-09 02:19:25 +01:00
|
|
|
struct node *data;
|
|
|
|
struct node *root;
|
2010-01-13 00:03:30 +01:00
|
|
|
struct node *first;
|
|
|
|
struct node *last;
|
2010-01-09 02:19:25 +01:00
|
|
|
};
|
|
|
|
|
2022-08-03 20:34:48 -07:00
|
|
|
struct CACHE {
|
|
|
|
struct cache_set cacheset;
|
2023-03-31 17:20:54 -04:00
|
|
|
uint32_t trees;
|
|
|
|
uint32_t nodes_per_tree;
|
2022-08-03 20:34:48 -07:00
|
|
|
#ifdef CL_THREAD_SAFE
|
|
|
|
pthread_mutex_t mutex;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
/* Allocates all the nodes and sets up the replacement chain */
|
2023-03-31 17:20:54 -04:00
|
|
|
static int cacheset_init(struct cache_set *cs, mpool_t *mempool, uint32_t nodes_per_tree)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2010-01-13 00:03:30 +01:00
|
|
|
unsigned int i;
|
2019-05-07 16:52:29 -04:00
|
|
|
|
|
|
|
#ifndef USE_MPOOL
|
|
|
|
UNUSEDPARAM(mempool);
|
|
|
|
#endif
|
|
|
|
|
2023-03-31 17:20:54 -04:00
|
|
|
cs->data = MPOOL_CALLOC(mempool, nodes_per_tree, sizeof(*cs->data));
|
2010-01-13 00:03:30 +01:00
|
|
|
cs->root = NULL;
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2018-12-03 12:40:13 -05:00
|
|
|
if (!cs->data)
|
|
|
|
return 1;
|
2010-01-13 00:03:30 +01:00
|
|
|
|
2023-03-31 17:20:54 -04:00
|
|
|
for (i = 1; i < nodes_per_tree; i++) {
|
2018-12-03 12:40:13 -05:00
|
|
|
cs->data[i - 1].next = &cs->data[i];
|
|
|
|
cs->data[i].prev = &cs->data[i - 1];
|
2010-01-13 00:03:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
cs->first = cs->data;
|
2023-03-31 17:20:54 -04:00
|
|
|
cs->last = &cs->data[nodes_per_tree - 1];
|
2010-01-13 00:03:30 +01:00
|
|
|
|
2010-01-09 02:19:25 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
/* Frees all the nodes */
|
2018-12-03 12:40:13 -05:00
|
|
|
static inline void cacheset_destroy(struct cache_set *cs, mpool_t *mempool)
|
|
|
|
{
|
2019-05-07 16:52:29 -04:00
|
|
|
#ifndef USE_MPOOL
|
|
|
|
UNUSEDPARAM(mempool);
|
|
|
|
#endif
|
|
|
|
|
2019-05-03 18:16:03 -04:00
|
|
|
MPOOL_FREE(mempool, cs->data);
|
2010-01-14 18:54:53 +01:00
|
|
|
cs->data = NULL;
|
|
|
|
}
|
|
|
|
|
2024-01-19 09:08:36 -08:00
|
|
|
/* The left/right chooser for the splay tree */
|
2018-12-03 12:40:13 -05:00
|
|
|
static inline int cmp(int64_t *a, ssize_t sa, int64_t *b, ssize_t sb)
|
|
|
|
{
|
|
|
|
if (a[1] < b[1]) return -1;
|
|
|
|
if (a[1] > b[1]) return 1;
|
|
|
|
if (a[0] < b[0]) return -1;
|
|
|
|
if (a[0] > b[0]) return 1;
|
|
|
|
if (sa < sb) return -1;
|
|
|
|
if (sa > sb) return 1;
|
2010-01-15 03:00:15 +01:00
|
|
|
return 0;
|
2010-01-09 16:21:48 +01:00
|
|
|
}
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
/* #define PRINT_TREE */
|
2010-01-14 04:38:31 +01:00
|
|
|
#ifdef PRINT_TREE
|
|
|
|
#define ptree printf
|
|
|
|
#else
|
2010-01-15 03:00:15 +01:00
|
|
|
#define ptree(...)
|
2010-01-14 04:38:31 +01:00
|
|
|
#endif
|
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
/* Debug function to print the tree and check its consistency */
|
|
|
|
/* #define CHECK_TREE */
|
2010-01-14 04:38:31 +01:00
|
|
|
#ifdef CHECK_TREE
|
2018-12-03 12:40:13 -05:00
|
|
|
static int printtree(struct cache_set *cs, struct node *n, int d)
|
|
|
|
{
|
2010-01-14 04:38:31 +01:00
|
|
|
int i;
|
|
|
|
int ab = 0;
|
2013-04-15 17:58:26 -04:00
|
|
|
if ((n == NULL) || (cs == NULL) || (cs->data == NULL)) return 0;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (n == cs->root) {
|
|
|
|
ptree("--------------------------\n");
|
|
|
|
}
|
|
|
|
ab |= printtree(cs, n->right, d + 1);
|
|
|
|
if (n->right) {
|
|
|
|
if (cmp(n->digest, n->size, n->right->digest, n->right->size) >= 0) {
|
|
|
|
for (i = 0; i < d; i++) ptree(" ");
|
|
|
|
ptree("^^^^ %lld >= %lld\n", n->digest[1], n->right->digest[1]);
|
|
|
|
ab = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < d; i++) ptree(" ");
|
|
|
|
ptree("%08x(%02u)\n", n->digest[1] >> 48, n - cs->data);
|
|
|
|
if (n->left) {
|
|
|
|
if (cmp(n->digest, n->size, n->left->digest, n->left->size) <= 0) {
|
|
|
|
for (i = 0; i < d; i++) ptree(" ");
|
|
|
|
ptree("vvvv %lld <= %lld\n", n->digest[1], n->left->digest[1]);
|
|
|
|
ab = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (d) {
|
|
|
|
if (!n->up) {
|
|
|
|
printf("no parent, [node %02u]!\n", n - cs->data);
|
|
|
|
ab = 1;
|
|
|
|
} else {
|
|
|
|
if (n->up->left != n && n->up->right != n) {
|
|
|
|
printf("broken parent [node %02u, parent node %02u]\n", n - cs->data, n->up - cs->data);
|
|
|
|
ab = 1;
|
|
|
|
}
|
|
|
|
}
|
2010-01-14 04:38:31 +01:00
|
|
|
} else {
|
2018-12-03 12:40:13 -05:00
|
|
|
if (n->up) {
|
|
|
|
printf("root with a parent, [node %02u]!\n", n - cs->data);
|
|
|
|
ab = 1;
|
|
|
|
}
|
2010-01-14 04:38:31 +01:00
|
|
|
}
|
2018-12-03 12:40:13 -05:00
|
|
|
ab |= printtree(cs, n->left, d + 1);
|
2010-01-14 04:38:31 +01:00
|
|
|
return ab;
|
|
|
|
}
|
|
|
|
#else
|
2018-12-03 12:40:13 -05:00
|
|
|
#define printtree(a, b, c) (0)
|
2010-01-14 04:38:31 +01:00
|
|
|
#endif
|
|
|
|
|
2013-04-15 17:58:26 -04:00
|
|
|
/* For troubleshooting only; prints out one specific node */
|
|
|
|
/* #define PRINT_NODE */
|
|
|
|
#ifdef PRINT_NODE
|
2018-12-03 12:40:13 -05:00
|
|
|
static void printnode(const char *prefix, struct cache_set *cs, struct node *n)
|
|
|
|
{
|
2013-04-15 17:58:26 -04:00
|
|
|
if (!prefix || !cs || !cs->data) {
|
|
|
|
printf("bad args!\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!n) {
|
|
|
|
printf("no node!\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
printf("%s node [%02u]:", prefix, n - cs->data);
|
2025-06-03 19:03:20 -04:00
|
|
|
printf(" size=%lu digest=%llx,%llx,%llx,%llx\n", (unsigned long)(n->size), n->digest[0], n->digest[1], n->digest[2], n->digest[3]);
|
2013-04-15 17:58:26 -04:00
|
|
|
printf("\tleft=");
|
2018-12-03 12:40:13 -05:00
|
|
|
if (n->left)
|
2013-04-15 17:58:26 -04:00
|
|
|
printf("%02u ", n->left - cs->data);
|
|
|
|
else
|
|
|
|
printf("NULL ");
|
|
|
|
printf("right=");
|
2018-12-03 12:40:13 -05:00
|
|
|
if (n->right)
|
2013-04-15 17:58:26 -04:00
|
|
|
printf("%02u ", n->right - cs->data);
|
|
|
|
else
|
|
|
|
printf("NULL ");
|
|
|
|
printf("up=");
|
2018-12-03 12:40:13 -05:00
|
|
|
if (n->up)
|
2013-04-15 17:58:26 -04:00
|
|
|
printf("%02u ", n->up - cs->data);
|
|
|
|
else
|
|
|
|
printf("NULL ");
|
|
|
|
|
|
|
|
printf("\tprev=");
|
2018-12-03 12:40:13 -05:00
|
|
|
if (n->prev)
|
2013-04-15 17:58:26 -04:00
|
|
|
printf("%02u ", n->prev - cs->data);
|
|
|
|
else
|
|
|
|
printf("NULL ");
|
|
|
|
printf("next=");
|
2018-12-03 12:40:13 -05:00
|
|
|
if (n->next)
|
2013-04-15 17:58:26 -04:00
|
|
|
printf("%02u\n", n->next - cs->data);
|
|
|
|
else
|
|
|
|
printf("NULL\n");
|
|
|
|
}
|
|
|
|
#else
|
libclamav: Fix scan recursion tracking
Scan recursion is the process of identifying files embedded in other
files and then scanning them, recursively.
Internally this process is more complex than it may sound because a file
may have multiple layers of types before finding a new "file".
At present we treat the recursion count in the scanning context as an
index into both our fmap list AND our container list. These two lists
are conceptually a part of the same thing and should be unified.
But what's concerning is that the "recursion level" isn't actually
incremented or decremented at the same time that we add a layer to the
fmap or container lists but instead is more touchy-feely, increasing
when we find a new "file".
To account for this shadiness, the size of the fmap and container lists
has always been a little longer than our "max scan recursion" limit so
we don't accidentally overflow the fmap or container arrays (!).
I've implemented a single recursion-stack as an array, similar to before,
which includes a pointer to each fmap at each layer, along with the size
and type. Push and pop functions add and remove layers whenever a new
fmap is added. A boolean argument when pushing indicates if the new layer
represents a new buffer or new file (descriptor). A new buffer will reset
the "nested fmap level" (described below).
This commit also provides a solution for an issue where we detect
embedded files more than once during scan recursion.
For illustration, imagine a tarball named foo.tar.gz with this structure:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| └── baz.exe | PE | 2 | 1 |
But suppose baz.exe embeds a ZIP archive and a 7Z archive, like this:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| baz.exe | PE | 0 | 0 |
| ├── sfx.zip | ZIP | 1 | 1 |
| │ └── hello.txt | ASCII | 2 | 0 |
| └── sfx.7z | 7Z | 1 | 1 |
| └── world.txt | ASCII | 2 | 0 |
(A) If we scan for embedded files at any layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| ├── foo.tar | TAR | 1 | 0 |
| │ ├── bar.zip | ZIP | 2 | 1 |
| │ │ └── hola.txt | ASCII | 3 | 0 |
| │ ├── baz.exe | PE | 2 | 1 |
| │ │ ├── sfx.zip | ZIP | 3 | 1 |
| │ │ │ └── hello.txt | ASCII | 4 | 0 |
| │ │ └── sfx.7z | 7Z | 3 | 1 |
| │ │ └── world.txt | ASCII | 4 | 0 |
| │ ├── sfx.zip | ZIP | 2 | 1 |
| │ │ └── hello.txt | ASCII | 3 | 0 |
| │ └── sfx.7z | 7Z | 2 | 1 |
| │ └── world.txt | ASCII | 3 | 0 |
| ├── sfx.zip | ZIP | 1 | 1 |
| └── sfx.7z | 7Z | 1 | 1 |
(A) is bad because it scans content more than once.
Note that for the GZ layer, it may detect the ZIP and 7Z if the
signature hits on the compressed data, which it might, though
extracting the ZIP and 7Z will likely fail.
The reason the above doesn't happen now is that we restrict embedded
type scans for a bunch of archive formats to include GZ and TAR.
(B) If we scan for embedded files at the foo.tar layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| ├── baz.exe | PE | 2 | 1 |
| ├── sfx.zip | ZIP | 2 | 1 |
| │ └── hello.txt | ASCII | 3 | 0 |
| └── sfx.7z | 7Z | 2 | 1 |
| └── world.txt | ASCII | 3 | 0 |
(B) is almost right. But we can achieve it easily enough only scanning for
embedded content in the current fmap when the "nested fmap level" is 0.
The upside is that it should safely detect all embedded content, even if
it may think the sfz.zip and sfx.7z are in foo.tar instead of in baz.exe.
The biggest risk I can think of affects ZIPs. SFXZIP detection
is identical to ZIP detection, which is why we don't allow SFXZIP to be
detected if insize of a ZIP. If we only allow embedded type scanning at
fmap-layer 0 in each buffer, this will fail to detect the embedded ZIP
if the bar.exe was not compressed in foo.zip and if non-compressed files
extracted from ZIPs aren't extracted as new buffers:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.zip | ZIP | 0 | 0 |
| └── bar.exe | PE | 1 | 1 |
| └── sfx.zip | ZIP | 2 | 2 |
Provided that we ensure all files extracted from zips are scanned in
new buffers, option (B) should be safe.
(C) If we scan for embedded files at the baz.exe layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| └── baz.exe | PE | 2 | 1 |
| ├── sfx.zip | ZIP | 3 | 1 |
| │ └── hello.txt | ASCII | 4 | 0 |
| └── sfx.7z | 7Z | 3 | 1 |
| └── world.txt | ASCII | 4 | 0 |
(C) is right. But it's harder to achieve. For this example we can get it by
restricting 7ZSFX and ZIPSFX detection only when scanning an executable.
But that may mean losing detection of archives embedded elsewhere.
And we'd have to identify allowable container types for each possible
embedded type, which would be very difficult.
So this commit aims to solve the issue the (B)-way.
Note that in all situations, we still have to scan with file typing
enabled to determine if we need to reassign the current file type, such
as re-identifying a Bzip2 archive as a DMG that happens to be Bzip2-
compressed. Detection of DMG and a handful of other types rely on
finding data partway through or near the ned of a file before
reassigning the entire file as the new type.
Other fixes and considerations in this commit:
- The utf16 HTML parser has weak error handling, particularly with respect
to creating a nested fmap for scanning the ascii decoded file.
This commit cleans up the error handling and wraps the nested scan with
the recursion-stack push()/pop() for correct recursion tracking.
Before this commit, each container layer had a flag to indicate if the
container layer is valid.
We need something similar so that the cli_recursion_stack_get_*()
functions ignore normalized layers. Details...
Imagine an LDB signature for HTML content that specifies a ZIP
container. If the signature actually alerts on the normalized HTML and
you don't ignore normalized layers for the container check, it will
appear as though the alert is in an HTML container rather than a ZIP
container.
This commit accomplishes this with a boolean you set in the scan context
before scanning a new layer. Then when the new fmap is created, it will
use that flag to set similar flag for the layer. The context flag is
reset those that anything after this doesn't have that flag.
The flag allows the new recursion_stack_get() function to ignore
normalized layers when iterating the stack to return a layer at a
requested index, negative or positive.
Scanning normalized extracted/normalized javascript and VBA should also
use the 'layer is normalized' flag.
- This commit also fixes Heuristic.Broken.Executable alert for ELF files
to make sure that:
A) these only alert if cli_append_virus() returns CL_VIRUS (aka it
respects the FP check).
B) all broken-executable alerts for ELF only happen if the
SCAN_HEURISTIC_BROKEN option is enabled.
- This commit also cleans up the error handling in cli_magic_scan_dir().
This was needed so we could correctly apply the layer-is-normalized-flag
to all VBA macros extracted to a directory when scanning the directory.
- Also fix an issue where exceeding scan maximums wouldn't cause embedded
file detection scans to abort. Granted we don't actually want to abort
if max filesize or max recursion depth are exceeded... only if max
scansize, max files, and max scantime are exceeded.
Add 'abort_scan' flag to scan context, to protect against depending on
correct error propagation for fatal conditions. Instead, setting this
flag in the scan context should guarantee that a fatal condition deep in
scan recursion isn't lost which result in more stuff being scanned
instead of aborting. This shouldn't be necessary, but some status codes
like CL_ETIMEOUT never used to be fatal and it's easier to do this than
to verify every parser only returns CL_ETIMEOUT and other "fatal
status codes" in fatal conditions.
- Remove duplicate is_tar() prototype from filestypes.c and include
is_tar.h instead.
- Presently we create the fmap hash when creating the fmap.
This wastes a bit of CPU if the hash is never needed.
Now that we're creating fmap's for all embedded files discovered with
file type recognition scans, this is a much more frequent occurence and
really slows things down.
This commit fixes the issue by only creating fmap hashes as needed.
This should not only resolve the perfomance impact of creating fmap's
for all embedded files, but also should improve performance in general.
- Add allmatch check to the zip parser after the central-header meta
match. That way we don't multiple alerts with the same match except in
allmatch mode. Clean up error handling in the zip parser a tiny bit.
- Fixes to ensure that the scan limits such as scansize, filesize,
recursion depth, # of embedded files, and scantime are always reported
if AlertExceedsMax (--alert-exceeds-max) is enabled.
- Fixed an issue where non-fatal alerts for exceeding scan maximums may
mask signature matches later on. I changed it so these alerts use the
"possibly unwanted" alert-type and thus only alert if no other alerts
were found or if all-match or heuristic-precedence are enabled.
- Added the "Heuristics.Limits.Exceeded.*" events to the JSON metadata
when the --gen-json feature is enabled. These will show up once under
"ParseErrors" the first time a limit is exceeded. In the present
implementation, only one limits-exceeded events will be added, so as to
prevent a malicious or malformed sample from filling the JSON buffer
with millions of events and using a tonne of RAM.
2021-09-11 14:15:21 -07:00
|
|
|
#define printnode(a, b, c)
|
2013-04-15 17:58:26 -04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* #define PRINT_CHAINS */
|
|
|
|
#ifdef PRINT_CHAINS
|
|
|
|
/* For troubleshooting only, print the chain forwards and back */
|
2018-12-03 12:40:13 -05:00
|
|
|
static inline void printchain(const char *prefix, struct cache_set *cs)
|
|
|
|
{
|
2013-04-15 17:58:26 -04:00
|
|
|
if (!cs || !cs->data) return;
|
|
|
|
if (prefix) printf("%s: ", prefix);
|
|
|
|
printf("chain by next: ");
|
|
|
|
{
|
|
|
|
unsigned int i = 0;
|
|
|
|
struct node *x = cs->first;
|
2018-12-03 12:40:13 -05:00
|
|
|
while (x) {
|
2013-04-15 17:58:26 -04:00
|
|
|
printf("%02d,", x - cs->data);
|
2018-12-03 12:40:13 -05:00
|
|
|
x = x->next;
|
2013-04-15 17:58:26 -04:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
printf(" [count=%u]\nchain by prev: ", i);
|
2018-12-03 12:40:13 -05:00
|
|
|
x = cs->last;
|
|
|
|
i = 0;
|
|
|
|
while (x) {
|
2013-04-15 17:58:26 -04:00
|
|
|
printf("%02d,", x - cs->data);
|
2018-12-03 12:40:13 -05:00
|
|
|
x = x->prev;
|
2013-04-15 17:58:26 -04:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
printf(" [count=%u]\n", i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2022-01-24 21:03:20 -08:00
|
|
|
#define printchain(a, b)
|
2013-04-15 17:58:26 -04:00
|
|
|
#endif
|
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
/* Looks up a node and splays it up to the root of the tree */
|
2025-06-03 19:03:20 -04:00
|
|
|
static int splay(int64_t *sha2_256, size_t len, struct cache_set *cs)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2010-03-05 22:17:46 +01:00
|
|
|
struct node next = {{0, 0}, NULL, NULL, NULL, NULL, NULL, 0, 0}, *right = &next, *left = &next, *temp, *root = cs->root;
|
2010-01-14 04:38:31 +01:00
|
|
|
int comp, found = 0;
|
2010-01-11 13:10:36 +01:00
|
|
|
|
2018-12-03 12:40:13 -05:00
|
|
|
if (!root)
|
|
|
|
return 0;
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2018-12-03 12:40:13 -05:00
|
|
|
while (1) {
|
2025-06-03 19:03:20 -04:00
|
|
|
comp = cmp(sha2_256, len, root->digest, root->size);
|
2018-12-03 12:40:13 -05:00
|
|
|
if (comp < 0) {
|
|
|
|
if (!root->left) break;
|
2025-06-03 19:03:20 -04:00
|
|
|
if (cmp(sha2_256, len, root->left->digest, root->left->size) < 0) {
|
2018-12-03 12:40:13 -05:00
|
|
|
temp = root->left;
|
2010-01-09 02:19:25 +01:00
|
|
|
root->left = temp->right;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (temp->right) temp->right->up = root;
|
2010-01-09 02:19:25 +01:00
|
|
|
temp->right = root;
|
2018-12-03 12:40:13 -05:00
|
|
|
root->up = temp;
|
|
|
|
root = temp;
|
|
|
|
if (!root->left) break;
|
|
|
|
}
|
2010-01-09 02:19:25 +01:00
|
|
|
right->left = root;
|
2018-12-03 12:40:13 -05:00
|
|
|
root->up = right;
|
|
|
|
right = root;
|
|
|
|
root = root->left;
|
|
|
|
} else if (comp > 0) {
|
|
|
|
if (!root->right) break;
|
2025-06-03 19:03:20 -04:00
|
|
|
if (cmp(sha2_256, len, root->right->digest, root->right->size) > 0) {
|
2018-12-03 12:40:13 -05:00
|
|
|
temp = root->right;
|
2010-01-09 02:19:25 +01:00
|
|
|
root->right = temp->left;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (temp->left) temp->left->up = root;
|
2010-01-09 02:19:25 +01:00
|
|
|
temp->left = root;
|
2018-12-03 12:40:13 -05:00
|
|
|
root->up = temp;
|
|
|
|
root = temp;
|
|
|
|
if (!root->right) break;
|
|
|
|
}
|
|
|
|
left->right = root;
|
|
|
|
root->up = left;
|
|
|
|
left = root;
|
|
|
|
root = root->right;
|
|
|
|
} else {
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
2010-01-09 02:19:25 +01:00
|
|
|
}
|
2010-01-09 16:21:48 +01:00
|
|
|
|
2010-01-09 02:19:25 +01:00
|
|
|
left->right = root->left;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (root->left) root->left->up = left;
|
2010-01-09 02:19:25 +01:00
|
|
|
right->left = root->right;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (root->right) root->right->up = right;
|
2010-01-09 02:19:25 +01:00
|
|
|
root->left = next.right;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (next.right) next.right->up = root;
|
2010-01-09 02:19:25 +01:00
|
|
|
root->right = next.left;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (next.left) next.left->up = root;
|
2010-01-11 18:37:54 +01:00
|
|
|
root->up = NULL;
|
2010-01-09 02:19:25 +01:00
|
|
|
cs->root = root;
|
2010-01-14 04:38:31 +01:00
|
|
|
return found;
|
2010-01-09 02:19:25 +01:00
|
|
|
}
|
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
/* Looks up an hash in the tree and maintains the replacement chain */
|
2025-06-03 19:03:20 -04:00
|
|
|
static inline int cacheset_lookup(struct cache_set *cs, uint8_t *sha2_256, size_t size, uint32_t recursion_level)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2025-06-03 19:03:20 -04:00
|
|
|
int64_t hash[4];
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
memcpy(hash, sha2_256, 32);
|
2018-12-03 12:40:13 -05:00
|
|
|
if (splay(hash, size, cs)) {
|
|
|
|
struct node *o = cs->root->prev, *p = cs->root, *q = cs->root->next;
|
2010-01-14 04:38:31 +01:00
|
|
|
#ifdef PRINT_CHAINS
|
2018-12-03 12:40:13 -05:00
|
|
|
printf("promoting %02d\n", p - cs->data);
|
|
|
|
printchain("before", cs);
|
2010-01-14 04:38:31 +01:00
|
|
|
#endif
|
2018-12-03 12:40:13 -05:00
|
|
|
if (q) {
|
|
|
|
if (o)
|
|
|
|
o->next = q;
|
|
|
|
else
|
|
|
|
cs->first = q;
|
|
|
|
q->prev = o;
|
|
|
|
cs->last->next = p;
|
|
|
|
p->prev = cs->last;
|
|
|
|
p->next = NULL;
|
|
|
|
cs->last = p;
|
|
|
|
}
|
2010-01-14 04:38:31 +01:00
|
|
|
#ifdef PRINT_CHAINS
|
2018-12-03 12:40:13 -05:00
|
|
|
printchain("after", cs);
|
2010-01-14 04:38:31 +01:00
|
|
|
#endif
|
libclamav: Fix scan recursion tracking
Scan recursion is the process of identifying files embedded in other
files and then scanning them, recursively.
Internally this process is more complex than it may sound because a file
may have multiple layers of types before finding a new "file".
At present we treat the recursion count in the scanning context as an
index into both our fmap list AND our container list. These two lists
are conceptually a part of the same thing and should be unified.
But what's concerning is that the "recursion level" isn't actually
incremented or decremented at the same time that we add a layer to the
fmap or container lists but instead is more touchy-feely, increasing
when we find a new "file".
To account for this shadiness, the size of the fmap and container lists
has always been a little longer than our "max scan recursion" limit so
we don't accidentally overflow the fmap or container arrays (!).
I've implemented a single recursion-stack as an array, similar to before,
which includes a pointer to each fmap at each layer, along with the size
and type. Push and pop functions add and remove layers whenever a new
fmap is added. A boolean argument when pushing indicates if the new layer
represents a new buffer or new file (descriptor). A new buffer will reset
the "nested fmap level" (described below).
This commit also provides a solution for an issue where we detect
embedded files more than once during scan recursion.
For illustration, imagine a tarball named foo.tar.gz with this structure:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| └── baz.exe | PE | 2 | 1 |
But suppose baz.exe embeds a ZIP archive and a 7Z archive, like this:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| baz.exe | PE | 0 | 0 |
| ├── sfx.zip | ZIP | 1 | 1 |
| │ └── hello.txt | ASCII | 2 | 0 |
| └── sfx.7z | 7Z | 1 | 1 |
| └── world.txt | ASCII | 2 | 0 |
(A) If we scan for embedded files at any layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| ├── foo.tar | TAR | 1 | 0 |
| │ ├── bar.zip | ZIP | 2 | 1 |
| │ │ └── hola.txt | ASCII | 3 | 0 |
| │ ├── baz.exe | PE | 2 | 1 |
| │ │ ├── sfx.zip | ZIP | 3 | 1 |
| │ │ │ └── hello.txt | ASCII | 4 | 0 |
| │ │ └── sfx.7z | 7Z | 3 | 1 |
| │ │ └── world.txt | ASCII | 4 | 0 |
| │ ├── sfx.zip | ZIP | 2 | 1 |
| │ │ └── hello.txt | ASCII | 3 | 0 |
| │ └── sfx.7z | 7Z | 2 | 1 |
| │ └── world.txt | ASCII | 3 | 0 |
| ├── sfx.zip | ZIP | 1 | 1 |
| └── sfx.7z | 7Z | 1 | 1 |
(A) is bad because it scans content more than once.
Note that for the GZ layer, it may detect the ZIP and 7Z if the
signature hits on the compressed data, which it might, though
extracting the ZIP and 7Z will likely fail.
The reason the above doesn't happen now is that we restrict embedded
type scans for a bunch of archive formats to include GZ and TAR.
(B) If we scan for embedded files at the foo.tar layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| ├── baz.exe | PE | 2 | 1 |
| ├── sfx.zip | ZIP | 2 | 1 |
| │ └── hello.txt | ASCII | 3 | 0 |
| └── sfx.7z | 7Z | 2 | 1 |
| └── world.txt | ASCII | 3 | 0 |
(B) is almost right. But we can achieve it easily enough only scanning for
embedded content in the current fmap when the "nested fmap level" is 0.
The upside is that it should safely detect all embedded content, even if
it may think the sfz.zip and sfx.7z are in foo.tar instead of in baz.exe.
The biggest risk I can think of affects ZIPs. SFXZIP detection
is identical to ZIP detection, which is why we don't allow SFXZIP to be
detected if insize of a ZIP. If we only allow embedded type scanning at
fmap-layer 0 in each buffer, this will fail to detect the embedded ZIP
if the bar.exe was not compressed in foo.zip and if non-compressed files
extracted from ZIPs aren't extracted as new buffers:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.zip | ZIP | 0 | 0 |
| └── bar.exe | PE | 1 | 1 |
| └── sfx.zip | ZIP | 2 | 2 |
Provided that we ensure all files extracted from zips are scanned in
new buffers, option (B) should be safe.
(C) If we scan for embedded files at the baz.exe layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| └── baz.exe | PE | 2 | 1 |
| ├── sfx.zip | ZIP | 3 | 1 |
| │ └── hello.txt | ASCII | 4 | 0 |
| └── sfx.7z | 7Z | 3 | 1 |
| └── world.txt | ASCII | 4 | 0 |
(C) is right. But it's harder to achieve. For this example we can get it by
restricting 7ZSFX and ZIPSFX detection only when scanning an executable.
But that may mean losing detection of archives embedded elsewhere.
And we'd have to identify allowable container types for each possible
embedded type, which would be very difficult.
So this commit aims to solve the issue the (B)-way.
Note that in all situations, we still have to scan with file typing
enabled to determine if we need to reassign the current file type, such
as re-identifying a Bzip2 archive as a DMG that happens to be Bzip2-
compressed. Detection of DMG and a handful of other types rely on
finding data partway through or near the ned of a file before
reassigning the entire file as the new type.
Other fixes and considerations in this commit:
- The utf16 HTML parser has weak error handling, particularly with respect
to creating a nested fmap for scanning the ascii decoded file.
This commit cleans up the error handling and wraps the nested scan with
the recursion-stack push()/pop() for correct recursion tracking.
Before this commit, each container layer had a flag to indicate if the
container layer is valid.
We need something similar so that the cli_recursion_stack_get_*()
functions ignore normalized layers. Details...
Imagine an LDB signature for HTML content that specifies a ZIP
container. If the signature actually alerts on the normalized HTML and
you don't ignore normalized layers for the container check, it will
appear as though the alert is in an HTML container rather than a ZIP
container.
This commit accomplishes this with a boolean you set in the scan context
before scanning a new layer. Then when the new fmap is created, it will
use that flag to set similar flag for the layer. The context flag is
reset those that anything after this doesn't have that flag.
The flag allows the new recursion_stack_get() function to ignore
normalized layers when iterating the stack to return a layer at a
requested index, negative or positive.
Scanning normalized extracted/normalized javascript and VBA should also
use the 'layer is normalized' flag.
- This commit also fixes Heuristic.Broken.Executable alert for ELF files
to make sure that:
A) these only alert if cli_append_virus() returns CL_VIRUS (aka it
respects the FP check).
B) all broken-executable alerts for ELF only happen if the
SCAN_HEURISTIC_BROKEN option is enabled.
- This commit also cleans up the error handling in cli_magic_scan_dir().
This was needed so we could correctly apply the layer-is-normalized-flag
to all VBA macros extracted to a directory when scanning the directory.
- Also fix an issue where exceeding scan maximums wouldn't cause embedded
file detection scans to abort. Granted we don't actually want to abort
if max filesize or max recursion depth are exceeded... only if max
scansize, max files, and max scantime are exceeded.
Add 'abort_scan' flag to scan context, to protect against depending on
correct error propagation for fatal conditions. Instead, setting this
flag in the scan context should guarantee that a fatal condition deep in
scan recursion isn't lost which result in more stuff being scanned
instead of aborting. This shouldn't be necessary, but some status codes
like CL_ETIMEOUT never used to be fatal and it's easier to do this than
to verify every parser only returns CL_ETIMEOUT and other "fatal
status codes" in fatal conditions.
- Remove duplicate is_tar() prototype from filestypes.c and include
is_tar.h instead.
- Presently we create the fmap hash when creating the fmap.
This wastes a bit of CPU if the hash is never needed.
Now that we're creating fmap's for all embedded files discovered with
file type recognition scans, this is a much more frequent occurence and
really slows things down.
This commit fixes the issue by only creating fmap hashes as needed.
This should not only resolve the perfomance impact of creating fmap's
for all embedded files, but also should improve performance in general.
- Add allmatch check to the zip parser after the central-header meta
match. That way we don't multiple alerts with the same match except in
allmatch mode. Clean up error handling in the zip parser a tiny bit.
- Fixes to ensure that the scan limits such as scansize, filesize,
recursion depth, # of embedded files, and scantime are always reported
if AlertExceedsMax (--alert-exceeds-max) is enabled.
- Fixed an issue where non-fatal alerts for exceeding scan maximums may
mask signature matches later on. I changed it so these alerts use the
"possibly unwanted" alert-type and thus only alert if no other alerts
were found or if all-match or heuristic-precedence are enabled.
- Added the "Heuristics.Limits.Exceeded.*" events to the JSON metadata
when the --gen-json feature is enabled. These will show up once under
"ParseErrors" the first time a limit is exceeded. In the present
implementation, only one limits-exceeded events will be added, so as to
prevent a malicious or malformed sample from filling the JSON buffer
with millions of events and using a tonne of RAM.
2021-09-11 14:15:21 -07:00
|
|
|
|
|
|
|
// The recursion_level check here to prevent a "clean" result from exceeding max recursion from
|
|
|
|
// causing a false negative if the same file is scanned where the recursion depth is lower.
|
|
|
|
// e.g. if max-rec set to 4 and "file5" is malicious, a scan of file1 should not cause a scan of file3 to be "clean"
|
|
|
|
// root
|
|
|
|
// ├── file1 -> file2 -> file3 -> file4 -> file5
|
|
|
|
// └── file3 -> file4 -> file5
|
|
|
|
// See: https://bugzilla.clamav.net/show_bug.cgi?id=1856
|
|
|
|
if (recursion_level >= p->minrec)
|
2018-12-03 12:40:13 -05:00
|
|
|
return 1;
|
2010-01-14 04:38:31 +01:00
|
|
|
}
|
|
|
|
return 0;
|
2010-01-09 02:19:25 +01:00
|
|
|
}
|
|
|
|
|
2010-01-15 03:00:15 +01:00
|
|
|
/* If the hash is present nothing happens.
|
|
|
|
Otherwise a new node is created for the hash picking one from the begin of the chain.
|
|
|
|
Used nodes are moved to the end of the chain */
|
2025-06-03 19:03:20 -04:00
|
|
|
static inline const char *cacheset_add(struct cache_set *cs, uint8_t *sha2_256, size_t size, uint32_t recursion_level)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2010-01-09 02:19:25 +01:00
|
|
|
struct node *newnode;
|
2025-06-03 19:03:20 -04:00
|
|
|
int64_t hash[4];
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
memcpy(hash, sha2_256, 32);
|
2018-12-03 12:40:13 -05:00
|
|
|
if (splay(hash, size, cs)) {
|
libclamav: Fix scan recursion tracking
Scan recursion is the process of identifying files embedded in other
files and then scanning them, recursively.
Internally this process is more complex than it may sound because a file
may have multiple layers of types before finding a new "file".
At present we treat the recursion count in the scanning context as an
index into both our fmap list AND our container list. These two lists
are conceptually a part of the same thing and should be unified.
But what's concerning is that the "recursion level" isn't actually
incremented or decremented at the same time that we add a layer to the
fmap or container lists but instead is more touchy-feely, increasing
when we find a new "file".
To account for this shadiness, the size of the fmap and container lists
has always been a little longer than our "max scan recursion" limit so
we don't accidentally overflow the fmap or container arrays (!).
I've implemented a single recursion-stack as an array, similar to before,
which includes a pointer to each fmap at each layer, along with the size
and type. Push and pop functions add and remove layers whenever a new
fmap is added. A boolean argument when pushing indicates if the new layer
represents a new buffer or new file (descriptor). A new buffer will reset
the "nested fmap level" (described below).
This commit also provides a solution for an issue where we detect
embedded files more than once during scan recursion.
For illustration, imagine a tarball named foo.tar.gz with this structure:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| └── baz.exe | PE | 2 | 1 |
But suppose baz.exe embeds a ZIP archive and a 7Z archive, like this:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| baz.exe | PE | 0 | 0 |
| ├── sfx.zip | ZIP | 1 | 1 |
| │ └── hello.txt | ASCII | 2 | 0 |
| └── sfx.7z | 7Z | 1 | 1 |
| └── world.txt | ASCII | 2 | 0 |
(A) If we scan for embedded files at any layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| ├── foo.tar | TAR | 1 | 0 |
| │ ├── bar.zip | ZIP | 2 | 1 |
| │ │ └── hola.txt | ASCII | 3 | 0 |
| │ ├── baz.exe | PE | 2 | 1 |
| │ │ ├── sfx.zip | ZIP | 3 | 1 |
| │ │ │ └── hello.txt | ASCII | 4 | 0 |
| │ │ └── sfx.7z | 7Z | 3 | 1 |
| │ │ └── world.txt | ASCII | 4 | 0 |
| │ ├── sfx.zip | ZIP | 2 | 1 |
| │ │ └── hello.txt | ASCII | 3 | 0 |
| │ └── sfx.7z | 7Z | 2 | 1 |
| │ └── world.txt | ASCII | 3 | 0 |
| ├── sfx.zip | ZIP | 1 | 1 |
| └── sfx.7z | 7Z | 1 | 1 |
(A) is bad because it scans content more than once.
Note that for the GZ layer, it may detect the ZIP and 7Z if the
signature hits on the compressed data, which it might, though
extracting the ZIP and 7Z will likely fail.
The reason the above doesn't happen now is that we restrict embedded
type scans for a bunch of archive formats to include GZ and TAR.
(B) If we scan for embedded files at the foo.tar layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| ├── baz.exe | PE | 2 | 1 |
| ├── sfx.zip | ZIP | 2 | 1 |
| │ └── hello.txt | ASCII | 3 | 0 |
| └── sfx.7z | 7Z | 2 | 1 |
| └── world.txt | ASCII | 3 | 0 |
(B) is almost right. But we can achieve it easily enough only scanning for
embedded content in the current fmap when the "nested fmap level" is 0.
The upside is that it should safely detect all embedded content, even if
it may think the sfz.zip and sfx.7z are in foo.tar instead of in baz.exe.
The biggest risk I can think of affects ZIPs. SFXZIP detection
is identical to ZIP detection, which is why we don't allow SFXZIP to be
detected if insize of a ZIP. If we only allow embedded type scanning at
fmap-layer 0 in each buffer, this will fail to detect the embedded ZIP
if the bar.exe was not compressed in foo.zip and if non-compressed files
extracted from ZIPs aren't extracted as new buffers:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.zip | ZIP | 0 | 0 |
| └── bar.exe | PE | 1 | 1 |
| └── sfx.zip | ZIP | 2 | 2 |
Provided that we ensure all files extracted from zips are scanned in
new buffers, option (B) should be safe.
(C) If we scan for embedded files at the baz.exe layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| └── baz.exe | PE | 2 | 1 |
| ├── sfx.zip | ZIP | 3 | 1 |
| │ └── hello.txt | ASCII | 4 | 0 |
| └── sfx.7z | 7Z | 3 | 1 |
| └── world.txt | ASCII | 4 | 0 |
(C) is right. But it's harder to achieve. For this example we can get it by
restricting 7ZSFX and ZIPSFX detection only when scanning an executable.
But that may mean losing detection of archives embedded elsewhere.
And we'd have to identify allowable container types for each possible
embedded type, which would be very difficult.
So this commit aims to solve the issue the (B)-way.
Note that in all situations, we still have to scan with file typing
enabled to determine if we need to reassign the current file type, such
as re-identifying a Bzip2 archive as a DMG that happens to be Bzip2-
compressed. Detection of DMG and a handful of other types rely on
finding data partway through or near the ned of a file before
reassigning the entire file as the new type.
Other fixes and considerations in this commit:
- The utf16 HTML parser has weak error handling, particularly with respect
to creating a nested fmap for scanning the ascii decoded file.
This commit cleans up the error handling and wraps the nested scan with
the recursion-stack push()/pop() for correct recursion tracking.
Before this commit, each container layer had a flag to indicate if the
container layer is valid.
We need something similar so that the cli_recursion_stack_get_*()
functions ignore normalized layers. Details...
Imagine an LDB signature for HTML content that specifies a ZIP
container. If the signature actually alerts on the normalized HTML and
you don't ignore normalized layers for the container check, it will
appear as though the alert is in an HTML container rather than a ZIP
container.
This commit accomplishes this with a boolean you set in the scan context
before scanning a new layer. Then when the new fmap is created, it will
use that flag to set similar flag for the layer. The context flag is
reset those that anything after this doesn't have that flag.
The flag allows the new recursion_stack_get() function to ignore
normalized layers when iterating the stack to return a layer at a
requested index, negative or positive.
Scanning normalized extracted/normalized javascript and VBA should also
use the 'layer is normalized' flag.
- This commit also fixes Heuristic.Broken.Executable alert for ELF files
to make sure that:
A) these only alert if cli_append_virus() returns CL_VIRUS (aka it
respects the FP check).
B) all broken-executable alerts for ELF only happen if the
SCAN_HEURISTIC_BROKEN option is enabled.
- This commit also cleans up the error handling in cli_magic_scan_dir().
This was needed so we could correctly apply the layer-is-normalized-flag
to all VBA macros extracted to a directory when scanning the directory.
- Also fix an issue where exceeding scan maximums wouldn't cause embedded
file detection scans to abort. Granted we don't actually want to abort
if max filesize or max recursion depth are exceeded... only if max
scansize, max files, and max scantime are exceeded.
Add 'abort_scan' flag to scan context, to protect against depending on
correct error propagation for fatal conditions. Instead, setting this
flag in the scan context should guarantee that a fatal condition deep in
scan recursion isn't lost which result in more stuff being scanned
instead of aborting. This shouldn't be necessary, but some status codes
like CL_ETIMEOUT never used to be fatal and it's easier to do this than
to verify every parser only returns CL_ETIMEOUT and other "fatal
status codes" in fatal conditions.
- Remove duplicate is_tar() prototype from filestypes.c and include
is_tar.h instead.
- Presently we create the fmap hash when creating the fmap.
This wastes a bit of CPU if the hash is never needed.
Now that we're creating fmap's for all embedded files discovered with
file type recognition scans, this is a much more frequent occurence and
really slows things down.
This commit fixes the issue by only creating fmap hashes as needed.
This should not only resolve the perfomance impact of creating fmap's
for all embedded files, but also should improve performance in general.
- Add allmatch check to the zip parser after the central-header meta
match. That way we don't multiple alerts with the same match except in
allmatch mode. Clean up error handling in the zip parser a tiny bit.
- Fixes to ensure that the scan limits such as scansize, filesize,
recursion depth, # of embedded files, and scantime are always reported
if AlertExceedsMax (--alert-exceeds-max) is enabled.
- Fixed an issue where non-fatal alerts for exceeding scan maximums may
mask signature matches later on. I changed it so these alerts use the
"possibly unwanted" alert-type and thus only alert if no other alerts
were found or if all-match or heuristic-precedence are enabled.
- Added the "Heuristics.Limits.Exceeded.*" events to the JSON metadata
when the --gen-json feature is enabled. These will show up once under
"ParseErrors" the first time a limit is exceeded. In the present
implementation, only one limits-exceeded events will be added, so as to
prevent a malicious or malformed sample from filling the JSON buffer
with millions of events and using a tonne of RAM.
2021-09-11 14:15:21 -07:00
|
|
|
if (cs->root->minrec > recursion_level)
|
|
|
|
cs->root->minrec = recursion_level;
|
2022-01-27 10:08:59 -08:00
|
|
|
return NULL; /* Already there */
|
2010-03-05 22:17:46 +01:00
|
|
|
}
|
2010-01-13 00:03:30 +01:00
|
|
|
|
2010-01-14 04:38:31 +01:00
|
|
|
ptree("1:\n");
|
2018-12-03 12:40:13 -05:00
|
|
|
if (printtree(cs, cs->root, 0)) {
|
2022-01-27 10:08:59 -08:00
|
|
|
return "cacheset_add: inconsistent tree before choosing newnode, good luck";
|
2010-01-14 04:38:31 +01:00
|
|
|
}
|
|
|
|
|
2010-01-13 00:03:30 +01:00
|
|
|
newnode = cs->first;
|
2018-12-03 12:40:13 -05:00
|
|
|
while (newnode) {
|
|
|
|
if (!newnode->right && !newnode->left)
|
2013-02-27 11:34:13 -05:00
|
|
|
break;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (newnode->next) {
|
|
|
|
if (newnode == newnode->next) {
|
2022-01-27 10:08:59 -08:00
|
|
|
return "cacheset_add: cache chain in a bad state";
|
2013-04-15 17:58:26 -04:00
|
|
|
}
|
|
|
|
newnode = newnode->next;
|
2018-12-03 12:40:13 -05:00
|
|
|
} else {
|
2022-01-27 10:08:59 -08:00
|
|
|
return "cacheset_add: end of chain reached";
|
2013-02-27 11:34:13 -05:00
|
|
|
}
|
2010-01-13 00:03:30 +01:00
|
|
|
}
|
2018-12-03 12:40:13 -05:00
|
|
|
if (!newnode) {
|
2022-01-27 10:08:59 -08:00
|
|
|
return "cacheset_add: tree has got no end nodes";
|
2018-12-03 12:40:13 -05:00
|
|
|
}
|
|
|
|
if (newnode->up) {
|
|
|
|
if (newnode->up->left == newnode)
|
|
|
|
newnode->up->left = NULL;
|
|
|
|
else
|
|
|
|
newnode->up->right = NULL;
|
|
|
|
}
|
|
|
|
if (newnode->prev)
|
|
|
|
newnode->prev->next = newnode->next;
|
|
|
|
if (newnode->next)
|
|
|
|
newnode->next->prev = newnode->prev;
|
|
|
|
if (cs->first == newnode)
|
|
|
|
cs->first = newnode->next;
|
|
|
|
|
|
|
|
newnode->prev = cs->last;
|
|
|
|
newnode->next = NULL;
|
2010-01-13 00:03:30 +01:00
|
|
|
cs->last->next = newnode;
|
2018-12-03 12:40:13 -05:00
|
|
|
cs->last = newnode;
|
2010-01-14 04:38:31 +01:00
|
|
|
|
|
|
|
ptree("2:\n");
|
2018-12-03 12:40:13 -05:00
|
|
|
if (printtree(cs, cs->root, 0)) {
|
2022-01-27 10:08:59 -08:00
|
|
|
return "cacheset_add: inconsistent tree before adding newnode, good luck";
|
2010-01-14 04:38:31 +01:00
|
|
|
}
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2018-12-03 12:40:13 -05:00
|
|
|
if (!cs->root) {
|
|
|
|
newnode->left = NULL;
|
|
|
|
newnode->right = NULL;
|
2010-01-09 02:19:25 +01:00
|
|
|
} else {
|
2018-12-03 12:40:13 -05:00
|
|
|
if (cmp(hash, size, cs->root->digest, cs->root->size) < 0) {
|
|
|
|
newnode->left = cs->root->left;
|
|
|
|
newnode->right = cs->root;
|
|
|
|
cs->root->left = NULL;
|
|
|
|
} else {
|
|
|
|
newnode->right = cs->root->right;
|
|
|
|
newnode->left = cs->root;
|
|
|
|
cs->root->right = NULL;
|
|
|
|
}
|
|
|
|
if (newnode->left) newnode->left->up = newnode;
|
|
|
|
if (newnode->right) newnode->right->up = newnode;
|
2010-01-09 02:19:25 +01:00
|
|
|
}
|
|
|
|
newnode->digest[0] = hash[0];
|
|
|
|
newnode->digest[1] = hash[1];
|
2018-12-03 12:40:13 -05:00
|
|
|
newnode->up = NULL;
|
|
|
|
newnode->size = size;
|
libclamav: Fix scan recursion tracking
Scan recursion is the process of identifying files embedded in other
files and then scanning them, recursively.
Internally this process is more complex than it may sound because a file
may have multiple layers of types before finding a new "file".
At present we treat the recursion count in the scanning context as an
index into both our fmap list AND our container list. These two lists
are conceptually a part of the same thing and should be unified.
But what's concerning is that the "recursion level" isn't actually
incremented or decremented at the same time that we add a layer to the
fmap or container lists but instead is more touchy-feely, increasing
when we find a new "file".
To account for this shadiness, the size of the fmap and container lists
has always been a little longer than our "max scan recursion" limit so
we don't accidentally overflow the fmap or container arrays (!).
I've implemented a single recursion-stack as an array, similar to before,
which includes a pointer to each fmap at each layer, along with the size
and type. Push and pop functions add and remove layers whenever a new
fmap is added. A boolean argument when pushing indicates if the new layer
represents a new buffer or new file (descriptor). A new buffer will reset
the "nested fmap level" (described below).
This commit also provides a solution for an issue where we detect
embedded files more than once during scan recursion.
For illustration, imagine a tarball named foo.tar.gz with this structure:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| └── baz.exe | PE | 2 | 1 |
But suppose baz.exe embeds a ZIP archive and a 7Z archive, like this:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| baz.exe | PE | 0 | 0 |
| ├── sfx.zip | ZIP | 1 | 1 |
| │ └── hello.txt | ASCII | 2 | 0 |
| └── sfx.7z | 7Z | 1 | 1 |
| └── world.txt | ASCII | 2 | 0 |
(A) If we scan for embedded files at any layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| ├── foo.tar | TAR | 1 | 0 |
| │ ├── bar.zip | ZIP | 2 | 1 |
| │ │ └── hola.txt | ASCII | 3 | 0 |
| │ ├── baz.exe | PE | 2 | 1 |
| │ │ ├── sfx.zip | ZIP | 3 | 1 |
| │ │ │ └── hello.txt | ASCII | 4 | 0 |
| │ │ └── sfx.7z | 7Z | 3 | 1 |
| │ │ └── world.txt | ASCII | 4 | 0 |
| │ ├── sfx.zip | ZIP | 2 | 1 |
| │ │ └── hello.txt | ASCII | 3 | 0 |
| │ └── sfx.7z | 7Z | 2 | 1 |
| │ └── world.txt | ASCII | 3 | 0 |
| ├── sfx.zip | ZIP | 1 | 1 |
| └── sfx.7z | 7Z | 1 | 1 |
(A) is bad because it scans content more than once.
Note that for the GZ layer, it may detect the ZIP and 7Z if the
signature hits on the compressed data, which it might, though
extracting the ZIP and 7Z will likely fail.
The reason the above doesn't happen now is that we restrict embedded
type scans for a bunch of archive formats to include GZ and TAR.
(B) If we scan for embedded files at the foo.tar layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| ├── baz.exe | PE | 2 | 1 |
| ├── sfx.zip | ZIP | 2 | 1 |
| │ └── hello.txt | ASCII | 3 | 0 |
| └── sfx.7z | 7Z | 2 | 1 |
| └── world.txt | ASCII | 3 | 0 |
(B) is almost right. But we can achieve it easily enough only scanning for
embedded content in the current fmap when the "nested fmap level" is 0.
The upside is that it should safely detect all embedded content, even if
it may think the sfz.zip and sfx.7z are in foo.tar instead of in baz.exe.
The biggest risk I can think of affects ZIPs. SFXZIP detection
is identical to ZIP detection, which is why we don't allow SFXZIP to be
detected if insize of a ZIP. If we only allow embedded type scanning at
fmap-layer 0 in each buffer, this will fail to detect the embedded ZIP
if the bar.exe was not compressed in foo.zip and if non-compressed files
extracted from ZIPs aren't extracted as new buffers:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.zip | ZIP | 0 | 0 |
| └── bar.exe | PE | 1 | 1 |
| └── sfx.zip | ZIP | 2 | 2 |
Provided that we ensure all files extracted from zips are scanned in
new buffers, option (B) should be safe.
(C) If we scan for embedded files at the baz.exe layer, we may detect:
| description | type | rec level | nested fmap level |
| ------------------------- | ----- | --------- | ----------------- |
| foo.tar.gz | GZ | 0 | 0 |
| └── foo.tar | TAR | 1 | 0 |
| ├── bar.zip | ZIP | 2 | 1 |
| │ └── hola.txt | ASCII | 3 | 0 |
| └── baz.exe | PE | 2 | 1 |
| ├── sfx.zip | ZIP | 3 | 1 |
| │ └── hello.txt | ASCII | 4 | 0 |
| └── sfx.7z | 7Z | 3 | 1 |
| └── world.txt | ASCII | 4 | 0 |
(C) is right. But it's harder to achieve. For this example we can get it by
restricting 7ZSFX and ZIPSFX detection only when scanning an executable.
But that may mean losing detection of archives embedded elsewhere.
And we'd have to identify allowable container types for each possible
embedded type, which would be very difficult.
So this commit aims to solve the issue the (B)-way.
Note that in all situations, we still have to scan with file typing
enabled to determine if we need to reassign the current file type, such
as re-identifying a Bzip2 archive as a DMG that happens to be Bzip2-
compressed. Detection of DMG and a handful of other types rely on
finding data partway through or near the ned of a file before
reassigning the entire file as the new type.
Other fixes and considerations in this commit:
- The utf16 HTML parser has weak error handling, particularly with respect
to creating a nested fmap for scanning the ascii decoded file.
This commit cleans up the error handling and wraps the nested scan with
the recursion-stack push()/pop() for correct recursion tracking.
Before this commit, each container layer had a flag to indicate if the
container layer is valid.
We need something similar so that the cli_recursion_stack_get_*()
functions ignore normalized layers. Details...
Imagine an LDB signature for HTML content that specifies a ZIP
container. If the signature actually alerts on the normalized HTML and
you don't ignore normalized layers for the container check, it will
appear as though the alert is in an HTML container rather than a ZIP
container.
This commit accomplishes this with a boolean you set in the scan context
before scanning a new layer. Then when the new fmap is created, it will
use that flag to set similar flag for the layer. The context flag is
reset those that anything after this doesn't have that flag.
The flag allows the new recursion_stack_get() function to ignore
normalized layers when iterating the stack to return a layer at a
requested index, negative or positive.
Scanning normalized extracted/normalized javascript and VBA should also
use the 'layer is normalized' flag.
- This commit also fixes Heuristic.Broken.Executable alert for ELF files
to make sure that:
A) these only alert if cli_append_virus() returns CL_VIRUS (aka it
respects the FP check).
B) all broken-executable alerts for ELF only happen if the
SCAN_HEURISTIC_BROKEN option is enabled.
- This commit also cleans up the error handling in cli_magic_scan_dir().
This was needed so we could correctly apply the layer-is-normalized-flag
to all VBA macros extracted to a directory when scanning the directory.
- Also fix an issue where exceeding scan maximums wouldn't cause embedded
file detection scans to abort. Granted we don't actually want to abort
if max filesize or max recursion depth are exceeded... only if max
scansize, max files, and max scantime are exceeded.
Add 'abort_scan' flag to scan context, to protect against depending on
correct error propagation for fatal conditions. Instead, setting this
flag in the scan context should guarantee that a fatal condition deep in
scan recursion isn't lost which result in more stuff being scanned
instead of aborting. This shouldn't be necessary, but some status codes
like CL_ETIMEOUT never used to be fatal and it's easier to do this than
to verify every parser only returns CL_ETIMEOUT and other "fatal
status codes" in fatal conditions.
- Remove duplicate is_tar() prototype from filestypes.c and include
is_tar.h instead.
- Presently we create the fmap hash when creating the fmap.
This wastes a bit of CPU if the hash is never needed.
Now that we're creating fmap's for all embedded files discovered with
file type recognition scans, this is a much more frequent occurence and
really slows things down.
This commit fixes the issue by only creating fmap hashes as needed.
This should not only resolve the perfomance impact of creating fmap's
for all embedded files, but also should improve performance in general.
- Add allmatch check to the zip parser after the central-header meta
match. That way we don't multiple alerts with the same match except in
allmatch mode. Clean up error handling in the zip parser a tiny bit.
- Fixes to ensure that the scan limits such as scansize, filesize,
recursion depth, # of embedded files, and scantime are always reported
if AlertExceedsMax (--alert-exceeds-max) is enabled.
- Fixed an issue where non-fatal alerts for exceeding scan maximums may
mask signature matches later on. I changed it so these alerts use the
"possibly unwanted" alert-type and thus only alert if no other alerts
were found or if all-match or heuristic-precedence are enabled.
- Added the "Heuristics.Limits.Exceeded.*" events to the JSON metadata
when the --gen-json feature is enabled. These will show up once under
"ParseErrors" the first time a limit is exceeded. In the present
implementation, only one limits-exceeded events will be added, so as to
prevent a malicious or malformed sample from filling the JSON buffer
with millions of events and using a tonne of RAM.
2021-09-11 14:15:21 -07:00
|
|
|
newnode->minrec = recursion_level;
|
2018-12-03 12:40:13 -05:00
|
|
|
cs->root = newnode;
|
2010-01-14 04:38:31 +01:00
|
|
|
|
|
|
|
ptree("3: %lld\n", hash[1]);
|
2018-12-03 12:40:13 -05:00
|
|
|
if (printtree(cs, cs->root, 0)) {
|
2022-01-27 10:08:59 -08:00
|
|
|
return "cacheset_add: inconsistent tree after adding newnode, good luck";
|
2010-01-14 04:38:31 +01:00
|
|
|
}
|
2013-04-15 17:58:26 -04:00
|
|
|
printnode("newnode", cs, newnode);
|
2022-01-27 10:08:59 -08:00
|
|
|
return NULL;
|
2010-01-09 02:19:25 +01:00
|
|
|
}
|
2013-04-15 17:58:26 -04:00
|
|
|
|
2012-11-27 11:24:52 -05:00
|
|
|
/* If the hash is not present nothing happens other than splaying the tree.
|
2019-05-03 18:16:03 -04:00
|
|
|
Otherwise the identified node is removed from the tree and then placed back at
|
2012-11-27 11:24:52 -05:00
|
|
|
the front of the chain. */
|
2025-06-03 19:03:20 -04:00
|
|
|
static inline void cacheset_remove(struct cache_set *cs, uint8_t *sha2_256, size_t size)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2012-11-27 11:24:52 -05:00
|
|
|
struct node *targetnode;
|
|
|
|
struct node *reattachnode;
|
2025-06-03 19:03:20 -04:00
|
|
|
int64_t hash[4];
|
2012-11-27 11:24:52 -05:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
memcpy(hash, sha2_256, 32);
|
2018-12-03 12:40:13 -05:00
|
|
|
if (splay(hash, size, cs) != 1) {
|
|
|
|
cli_dbgmsg("cacheset_remove: node not found in tree\n");
|
|
|
|
return; /* No op */
|
2012-11-27 11:24:52 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
ptree("cacheset_remove: node found and splayed to root\n");
|
|
|
|
targetnode = cs->root;
|
2013-04-15 17:58:26 -04:00
|
|
|
printnode("targetnode", cs, targetnode);
|
2012-11-27 11:24:52 -05:00
|
|
|
|
|
|
|
/* First fix the tree */
|
2018-12-03 12:40:13 -05:00
|
|
|
if (targetnode->left == NULL) {
|
2012-11-27 11:24:52 -05:00
|
|
|
/* At left edge so prune */
|
|
|
|
cs->root = targetnode->right;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (cs->root)
|
2012-11-27 11:24:52 -05:00
|
|
|
cs->root->up = NULL;
|
2018-12-03 12:40:13 -05:00
|
|
|
} else {
|
2012-11-27 11:24:52 -05:00
|
|
|
/* new root will come from leftside tree */
|
2018-12-03 12:40:13 -05:00
|
|
|
cs->root = targetnode->left;
|
2012-11-27 11:24:52 -05:00
|
|
|
cs->root->up = NULL;
|
|
|
|
/* splay tree, expecting not found, bringing rightmost member to root */
|
|
|
|
splay(hash, size, cs);
|
2013-04-15 17:58:26 -04:00
|
|
|
|
2012-11-27 11:24:52 -05:00
|
|
|
if (targetnode->right) {
|
|
|
|
/* reattach right tree to clean right-side attach point */
|
|
|
|
reattachnode = cs->root;
|
2018-12-03 12:40:13 -05:00
|
|
|
while (reattachnode->right)
|
2012-11-27 11:24:52 -05:00
|
|
|
reattachnode = reattachnode->right; /* shouldn't happen, but safer in case of dupe */
|
2018-12-03 12:40:13 -05:00
|
|
|
reattachnode->right = targetnode->right;
|
2013-04-15 17:58:26 -04:00
|
|
|
targetnode->right->up = reattachnode;
|
2012-11-27 11:24:52 -05:00
|
|
|
}
|
|
|
|
}
|
2018-12-03 12:40:13 -05:00
|
|
|
targetnode->size = (size_t)0;
|
2013-04-15 17:58:26 -04:00
|
|
|
targetnode->digest[0] = 0;
|
|
|
|
targetnode->digest[1] = 0;
|
2025-06-03 19:03:20 -04:00
|
|
|
targetnode->digest[2] = 0;
|
|
|
|
targetnode->digest[3] = 0;
|
2018-12-03 12:40:13 -05:00
|
|
|
targetnode->up = NULL;
|
|
|
|
targetnode->left = NULL;
|
|
|
|
targetnode->right = NULL;
|
2012-11-27 11:24:52 -05:00
|
|
|
|
|
|
|
/* Tree is fixed, so now fix chain around targetnode */
|
2018-12-03 12:40:13 -05:00
|
|
|
if (targetnode->prev)
|
2012-11-27 11:24:52 -05:00
|
|
|
targetnode->prev->next = targetnode->next;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (targetnode->next)
|
2012-11-27 11:24:52 -05:00
|
|
|
targetnode->next->prev = targetnode->prev;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (cs->last == targetnode)
|
2012-11-27 11:24:52 -05:00
|
|
|
cs->last = targetnode->prev;
|
|
|
|
|
2013-02-27 11:34:13 -05:00
|
|
|
/* Put targetnode at front of chain, if not there already */
|
2018-12-03 12:40:13 -05:00
|
|
|
if (cs->first != targetnode) {
|
2013-02-27 11:34:13 -05:00
|
|
|
targetnode->next = cs->first;
|
2018-12-03 12:40:13 -05:00
|
|
|
if (cs->first)
|
2013-02-27 11:34:13 -05:00
|
|
|
cs->first->prev = targetnode;
|
|
|
|
cs->first = targetnode;
|
2012-11-27 11:24:52 -05:00
|
|
|
}
|
|
|
|
targetnode->prev = NULL;
|
2013-04-15 17:58:26 -04:00
|
|
|
|
|
|
|
printnode("root", cs, cs->root);
|
|
|
|
printnode("first", cs, cs->first);
|
|
|
|
printnode("last", cs, cs->last);
|
|
|
|
|
|
|
|
printchain("remove (after)", cs);
|
2012-11-27 11:24:52 -05:00
|
|
|
}
|
2010-01-08 01:39:25 +01:00
|
|
|
|
2022-08-03 20:34:48 -07:00
|
|
|
/* Looks up an hash in the proper tree */
|
2025-06-03 19:03:20 -04:00
|
|
|
static cl_error_t cache_lookup_hash(uint8_t *sha2_256, size_t len, struct CACHE *cache, uint32_t recursion_level)
|
2022-08-03 20:34:48 -07:00
|
|
|
{
|
2025-06-03 19:03:20 -04:00
|
|
|
cl_error_t ret = CL_ERROR;
|
2022-08-03 20:34:48 -07:00
|
|
|
unsigned int key = 0;
|
|
|
|
struct CACHE *c;
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
if (!sha2_256) {
|
2022-08-03 20:34:48 -07:00
|
|
|
cli_dbgmsg("cache_lookup: No hash available. Nothing to look up.\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
ret = CL_ENULLARG;
|
|
|
|
goto done;
|
2022-08-03 20:34:48 -07:00
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
key = getkey(sha2_256, cache->trees);
|
2022-08-03 20:34:48 -07:00
|
|
|
|
|
|
|
c = &cache[key];
|
2010-01-08 01:39:25 +01:00
|
|
|
|
2010-05-07 10:07:48 +03:00
|
|
|
#ifdef CL_THREAD_SAFE
|
2022-08-03 20:34:48 -07:00
|
|
|
if (pthread_mutex_lock(&c->mutex)) {
|
|
|
|
cli_errmsg("cache_lookup_hash: cache_lookup_hash: mutex lock fail\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
ret = CL_ELOCK;
|
|
|
|
goto done;
|
2022-08-03 20:34:48 -07:00
|
|
|
}
|
2010-05-07 10:07:48 +03:00
|
|
|
#endif
|
2010-01-13 00:03:30 +01:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
ret = (cacheset_lookup(&c->cacheset, sha2_256, len, recursion_level)) ? CL_CLEAN : CL_VIRUS;
|
2022-08-03 20:34:48 -07:00
|
|
|
|
|
|
|
#ifdef CL_THREAD_SAFE
|
|
|
|
pthread_mutex_unlock(&c->mutex);
|
|
|
|
#endif
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
done:
|
2022-08-03 20:34:48 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
cl_error_t clean_cache_init(struct cl_engine *engine)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2025-06-03 19:03:20 -04:00
|
|
|
cl_error_t status = CL_ERROR;
|
2010-11-04 21:14:14 +02:00
|
|
|
struct CACHE *cache;
|
2025-06-03 19:03:20 -04:00
|
|
|
uint32_t i;
|
2010-01-11 13:10:36 +01:00
|
|
|
|
2018-12-03 12:40:13 -05:00
|
|
|
if (!engine) {
|
2025-06-03 19:03:20 -04:00
|
|
|
cli_errmsg("clean_cache_init: Engine is NULL.\n");
|
|
|
|
status = CL_ENULLARG;
|
|
|
|
goto done;
|
2010-01-08 01:39:25 +01:00
|
|
|
}
|
2010-01-14 18:54:53 +01:00
|
|
|
|
2013-12-11 14:58:07 -05:00
|
|
|
if (engine->engine_options & ENGINE_OPTIONS_DISABLE_CACHE) {
|
2022-08-03 20:34:48 -07:00
|
|
|
cli_dbgmsg("clean_cache_init: Caching disabled.\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
status = CL_SUCCESS;
|
|
|
|
goto done;
|
2013-12-11 14:58:07 -05:00
|
|
|
}
|
|
|
|
|
2023-03-31 17:20:54 -04:00
|
|
|
// The user requested the cache size to be engine->cache_size
|
|
|
|
// The nodes within each tree are locked together, so having one tree would result in excessive lock contention.
|
|
|
|
// However, having too many trees is inefficient.
|
|
|
|
// A good balance is to have trees and nodes per tree be equal, which is done by using the sqrt of the user request cache size.
|
|
|
|
const uint32_t trees = ceil(sqrt(engine->cache_size));
|
|
|
|
const uint32_t nodes_per_tree = ceil(sqrt(engine->cache_size));
|
|
|
|
|
|
|
|
cli_dbgmsg("clean_cache_init: Requested cache size: %d. Actual cache size: %d. Trees: %d. Nodes per tree: %d.\n", engine->cache_size, trees * nodes_per_tree, trees, nodes_per_tree);
|
|
|
|
|
|
|
|
if (!(cache = MPOOL_MALLOC(engine->mempool, sizeof(struct CACHE) * trees))) {
|
2025-06-03 19:03:20 -04:00
|
|
|
cli_errmsg("clean_cache_init: Failed to allocate memory for cache.\n");
|
|
|
|
status = CL_EMEM;
|
|
|
|
goto done;
|
2010-01-08 01:39:25 +01:00
|
|
|
}
|
|
|
|
|
2023-03-31 17:20:54 -04:00
|
|
|
cache->trees = trees;
|
|
|
|
cache->nodes_per_tree = nodes_per_tree;
|
|
|
|
|
|
|
|
for (i = 0; i < trees; i++) {
|
2019-05-07 16:52:29 -04:00
|
|
|
#ifdef CL_THREAD_SAFE
|
2018-12-03 12:40:13 -05:00
|
|
|
if (pthread_mutex_init(&cache[i].mutex, NULL)) {
|
2025-06-03 19:03:20 -04:00
|
|
|
cli_errmsg("clean_cache_init: Mutex init failed.\n");
|
|
|
|
status = CL_EMEM;
|
|
|
|
goto done;
|
2018-12-03 12:40:13 -05:00
|
|
|
}
|
2019-05-07 16:52:29 -04:00
|
|
|
#endif
|
2023-03-31 17:20:54 -04:00
|
|
|
if (cacheset_init(&cache[i].cacheset, engine->mempool, cache->nodes_per_tree)) {
|
2025-06-03 19:03:20 -04:00
|
|
|
cli_errmsg("clean_cache_init: Failed to initialize cache set.\n");
|
|
|
|
status = CL_EMEM;
|
|
|
|
goto done;
|
2018-12-03 12:40:13 -05:00
|
|
|
}
|
2010-01-08 01:39:25 +01:00
|
|
|
}
|
2025-06-03 19:03:20 -04:00
|
|
|
|
2010-01-14 18:54:53 +01:00
|
|
|
engine->cache = cache;
|
2025-06-03 19:03:20 -04:00
|
|
|
status = CL_SUCCESS;
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (status != CL_SUCCESS) {
|
|
|
|
cli_errmsg("clean_cache_init: Failed to initialize cache.\n");
|
|
|
|
clean_cache_destroy(engine);
|
|
|
|
} else {
|
|
|
|
cli_dbgmsg("clean_cache_init: Cache initialized successfully.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
2010-01-08 01:39:25 +01:00
|
|
|
}
|
|
|
|
|
2022-08-03 20:34:48 -07:00
|
|
|
void clean_cache_destroy(struct cl_engine *engine)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2010-11-04 21:14:14 +02:00
|
|
|
struct CACHE *cache;
|
2010-01-14 18:54:53 +01:00
|
|
|
unsigned int i;
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2018-12-03 12:40:13 -05:00
|
|
|
if (!engine || !(cache = engine->cache))
|
|
|
|
return;
|
2013-12-11 14:58:07 -05:00
|
|
|
|
2013-11-15 19:15:20 +00:00
|
|
|
if (engine->engine_options & ENGINE_OPTIONS_DISABLE_CACHE) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-31 17:20:54 -04:00
|
|
|
for (i = 0; i < cache->trees; i++) {
|
2018-12-03 12:40:13 -05:00
|
|
|
cacheset_destroy(&cache[i].cacheset, engine->mempool);
|
2019-05-07 16:52:29 -04:00
|
|
|
#ifdef CL_THREAD_SAFE
|
2018-12-03 12:40:13 -05:00
|
|
|
pthread_mutex_destroy(&cache[i].mutex);
|
2019-05-07 16:52:29 -04:00
|
|
|
#endif
|
2010-01-14 18:54:53 +01:00
|
|
|
}
|
2019-05-03 18:16:03 -04:00
|
|
|
MPOOL_FREE(engine->mempool, cache);
|
2010-01-14 18:54:53 +01:00
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
void clean_cache_add(cli_ctx *ctx)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2025-06-03 19:03:20 -04:00
|
|
|
cl_error_t ret;
|
|
|
|
|
2022-01-27 10:08:59 -08:00
|
|
|
const char *errmsg = NULL;
|
2022-01-25 13:34:56 -05:00
|
|
|
|
|
|
|
unsigned int key = 0;
|
2010-03-05 22:17:46 +01:00
|
|
|
uint32_t level;
|
2010-01-08 01:39:25 +01:00
|
|
|
struct CACHE *c;
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
uint8_t *sha2_256 = NULL;
|
|
|
|
size_t size = 0;
|
2013-12-11 14:58:07 -05:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
if (!ctx || !ctx->engine || !ctx->engine->cache) {
|
|
|
|
goto done;
|
2013-11-15 19:15:20 +00:00
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
if (ctx->engine->engine_options & ENGINE_OPTIONS_DISABLE_CACHE) {
|
|
|
|
cli_dbgmsg("clean_cache_add: Caching disabled. Not adding sample to cache.\n");
|
|
|
|
goto done;
|
2022-01-25 13:34:56 -05:00
|
|
|
}
|
|
|
|
|
2022-08-03 20:34:48 -07:00
|
|
|
if (SCAN_COLLECT_METADATA) {
|
|
|
|
// Don't cache when using the "collect metadata" feature.
|
|
|
|
// We don't cache the JSON, so we can't reproduce it when the cache is positive.
|
|
|
|
cli_dbgmsg("clean_cache_add: collect metadata feature enabled, skipping cache\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
goto done;
|
2022-08-03 20:34:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->fmap && ctx->fmap->dont_cache_flag == true) {
|
|
|
|
cli_dbgmsg("clean_cache_add: caching disabled for this layer, skipping cache\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
goto done;
|
2013-06-18 12:30:21 -04:00
|
|
|
}
|
2022-08-03 20:34:48 -07:00
|
|
|
|
2025-06-09 01:33:26 -04:00
|
|
|
if (0 < evidence_num_alerts(ctx->this_layer_evidence)) {
|
2022-08-03 20:34:48 -07:00
|
|
|
// TODO: The dont cache flag should take care of preventing caching of files with embedded files that alert.
|
|
|
|
// Consider removing this check to allow caching of other actually clean files found within archives.
|
|
|
|
// It would be a (very) minor optimization.
|
|
|
|
cli_dbgmsg("clean_cache_add: alert found within same topfile, skipping cache\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the hash */
|
|
|
|
ret = fmap_get_hash(ctx->fmap, &sha2_256, CLI_HASH_SHA2_256);
|
|
|
|
if (CL_SUCCESS != ret || NULL == sha2_256) {
|
|
|
|
cli_dbgmsg("clean_cache_add: Failed to get SHA2-256 hash.\n");
|
|
|
|
goto done;
|
2022-08-03 20:34:48 -07:00
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
/* Get the file size */
|
|
|
|
size = ctx->fmap->len;
|
|
|
|
|
2025-09-26 18:26:00 -04:00
|
|
|
level = ctx->fmap->dont_cache_flag ? ctx->recursion_level : 0;
|
2022-08-03 20:34:48 -07:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
key = getkey(sha2_256, ctx->engine->cache->trees);
|
2022-08-03 20:34:48 -07:00
|
|
|
c = &ctx->engine->cache[key];
|
|
|
|
|
2019-05-07 16:52:29 -04:00
|
|
|
#ifdef CL_THREAD_SAFE
|
2018-12-03 12:40:13 -05:00
|
|
|
if (pthread_mutex_lock(&c->mutex)) {
|
|
|
|
cli_errmsg("cli_add: mutex lock fail\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
goto done;
|
2010-01-08 01:39:25 +01:00
|
|
|
}
|
2019-05-07 16:52:29 -04:00
|
|
|
#endif
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
errmsg = cacheset_add(&c->cacheset, sha2_256, size, level);
|
2010-01-09 02:19:25 +01:00
|
|
|
|
2019-05-07 16:52:29 -04:00
|
|
|
#ifdef CL_THREAD_SAFE
|
2010-01-08 01:39:25 +01:00
|
|
|
pthread_mutex_unlock(&c->mutex);
|
2019-05-07 16:52:29 -04:00
|
|
|
#endif
|
2022-01-27 10:08:59 -08:00
|
|
|
if (errmsg != NULL) {
|
|
|
|
cli_errmsg("%s\n", errmsg);
|
|
|
|
}
|
2022-08-03 20:34:48 -07:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
cli_dbgmsg("clean_cache_add: "
|
|
|
|
"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x (level %u)\n",
|
|
|
|
sha2_256[0], sha2_256[1], sha2_256[2], sha2_256[3], sha2_256[4], sha2_256[5], sha2_256[6], sha2_256[7],
|
|
|
|
sha2_256[8], sha2_256[9], sha2_256[10], sha2_256[11], sha2_256[12], sha2_256[13], sha2_256[14], sha2_256[15],
|
|
|
|
sha2_256[16], sha2_256[17], sha2_256[18], sha2_256[19], sha2_256[20], sha2_256[21], sha2_256[22], sha2_256[23],
|
|
|
|
sha2_256[24], sha2_256[25], sha2_256[26], sha2_256[27], sha2_256[28], sha2_256[29], sha2_256[30], sha2_256[31],
|
|
|
|
level);
|
2022-08-03 20:34:48 -07:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
done:
|
2010-01-08 01:39:25 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
void clean_cache_remove(uint8_t *sha2_256, size_t size, const struct cl_engine *engine)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2022-01-25 13:34:56 -05:00
|
|
|
unsigned int key = 0;
|
2012-11-27 11:24:52 -05:00
|
|
|
struct CACHE *c;
|
|
|
|
|
2018-12-03 12:40:13 -05:00
|
|
|
if (!engine || !engine->cache)
|
2025-06-03 19:03:20 -04:00
|
|
|
goto done;
|
2013-12-11 14:58:07 -05:00
|
|
|
|
2013-11-15 19:15:20 +00:00
|
|
|
if (engine->engine_options & ENGINE_OPTIONS_DISABLE_CACHE) {
|
2022-08-03 20:34:48 -07:00
|
|
|
cli_dbgmsg("clean_cache_remove: Caching disabled.\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
goto done;
|
2013-11-15 19:15:20 +00:00
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
if (!sha2_256) {
|
2022-08-03 20:34:48 -07:00
|
|
|
cli_dbgmsg("clean_cache_remove: No hash available. Nothing to remove from cache.\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
goto done;
|
2022-01-25 13:34:56 -05:00
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
key = getkey(sha2_256, engine->cache->trees);
|
2013-04-15 17:58:26 -04:00
|
|
|
|
2012-11-27 16:17:31 -05:00
|
|
|
c = &engine->cache[key];
|
2019-05-07 16:52:29 -04:00
|
|
|
#ifdef CL_THREAD_SAFE
|
2018-12-03 12:40:13 -05:00
|
|
|
if (pthread_mutex_lock(&c->mutex)) {
|
|
|
|
cli_errmsg("cli_add: mutex lock fail\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
goto done;
|
2012-11-27 11:24:52 -05:00
|
|
|
}
|
2019-05-07 16:52:29 -04:00
|
|
|
#endif
|
2012-11-27 11:24:52 -05:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
cacheset_remove(&c->cacheset, sha2_256, size);
|
2012-11-27 11:24:52 -05:00
|
|
|
|
2019-05-07 16:52:29 -04:00
|
|
|
#ifdef CL_THREAD_SAFE
|
2012-11-27 11:24:52 -05:00
|
|
|
pthread_mutex_unlock(&c->mutex);
|
2019-05-07 16:52:29 -04:00
|
|
|
#endif
|
2025-06-03 19:03:20 -04:00
|
|
|
cli_dbgmsg("clean_cache_remove: "
|
|
|
|
"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
|
|
|
sha2_256[0], sha2_256[1], sha2_256[2], sha2_256[3], sha2_256[4], sha2_256[5], sha2_256[6], sha2_256[7],
|
|
|
|
sha2_256[8], sha2_256[9], sha2_256[10], sha2_256[11], sha2_256[12], sha2_256[13], sha2_256[14], sha2_256[15],
|
|
|
|
sha2_256[16], sha2_256[17], sha2_256[18], sha2_256[19], sha2_256[20], sha2_256[21], sha2_256[22], sha2_256[23],
|
|
|
|
sha2_256[24], sha2_256[25], sha2_256[26], sha2_256[27], sha2_256[28], sha2_256[29], sha2_256[30], sha2_256[31]);
|
|
|
|
|
|
|
|
done:
|
2012-11-27 11:24:52 -05:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
cl_error_t clean_cache_check(cli_ctx *ctx)
|
2018-12-03 12:40:13 -05:00
|
|
|
{
|
2025-06-03 19:03:20 -04:00
|
|
|
cl_error_t status = CL_VIRUS;
|
|
|
|
uint8_t *sha2_256 = NULL;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
if (!ctx || !ctx->engine) {
|
|
|
|
cli_errmsg("clean_cache_check: Context or engine is NULL.\n");
|
|
|
|
status = CL_ENULLARG;
|
|
|
|
goto done;
|
|
|
|
}
|
2015-11-04 14:46:46 -05:00
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
if (!ctx->engine->cache) {
|
|
|
|
if (ctx->engine->engine_options & ENGINE_OPTIONS_DISABLE_CACHE) {
|
|
|
|
cli_dbgmsg("clean_cache_check: Caching is disabled.\n");
|
|
|
|
status = CL_VIRUS;
|
|
|
|
} else {
|
|
|
|
cli_dbgmsg("clean_cache_check: Cache is not initialized.\n");
|
|
|
|
status = CL_ENULLARG;
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
2015-11-04 14:46:46 -05:00
|
|
|
|
2022-08-03 20:34:48 -07:00
|
|
|
if (SCAN_COLLECT_METADATA) {
|
|
|
|
// Don't cache when using the "collect metadata" feature.
|
|
|
|
// We don't cache the JSON, so we can't reproduce it when the cache is positive.
|
|
|
|
cli_dbgmsg("clean_cache_check: collect metadata feature enabled, skipping cache\n");
|
2025-06-03 19:03:20 -04:00
|
|
|
status = CL_VIRUS;
|
|
|
|
goto done;
|
2022-08-03 20:34:48 -07:00
|
|
|
}
|
|
|
|
|
2025-06-03 19:03:20 -04:00
|
|
|
status = fmap_get_hash(ctx->fmap, &sha2_256, CLI_HASH_SHA2_256);
|
|
|
|
if (status != CL_SUCCESS || !sha2_256) {
|
|
|
|
cli_dbgmsg("clean_cache_check: Failed to get SHA2-256 hash. Cannot check in cache.\n");
|
|
|
|
status = CL_VIRUS;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
size = ctx->fmap->len;
|
|
|
|
|
|
|
|
status = cache_lookup_hash(sha2_256, size, ctx->engine->cache, ctx->recursion_level);
|
|
|
|
cli_dbgmsg("clean_cache_check: "
|
|
|
|
"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x is %s\n",
|
|
|
|
sha2_256[0], sha2_256[1], sha2_256[2], sha2_256[3], sha2_256[4], sha2_256[5], sha2_256[6], sha2_256[7],
|
|
|
|
sha2_256[8], sha2_256[9], sha2_256[10], sha2_256[11], sha2_256[12], sha2_256[13], sha2_256[14], sha2_256[15],
|
|
|
|
sha2_256[16], sha2_256[17], sha2_256[18], sha2_256[19], sha2_256[20], sha2_256[21], sha2_256[22], sha2_256[23],
|
|
|
|
sha2_256[24], sha2_256[25], sha2_256[26], sha2_256[27], sha2_256[28], sha2_256[29], sha2_256[30], sha2_256[31],
|
|
|
|
(status == CL_VIRUS) ? "negative" : "positive");
|
|
|
|
|
|
|
|
done:
|
|
|
|
return status;
|
2010-01-08 01:39:25 +01:00
|
|
|
}
|