HFS+ parser: Fix bugs introduced in refactor

Fix pretty silly double-free, and fix issue where 'extracted_file' was
not being set to true.

Also rename `uncompressed` variable to `uncompressed_block`, because
there is another variable with the same name, above.
This commit is contained in:
Micah Snyder 2022-09-29 22:09:15 -07:00 committed by Micah Snyder
parent 4036484924
commit 92cbfa4bc0

View file

@ -1180,6 +1180,8 @@ static cl_error_t hfsplus_walk_catalog(cli_ctx *ctx, hfsPlusVolumeHeader *volHea
written = cli_writen(ofd, uncompressed, header.fileSize); written = cli_writen(ofd, uncompressed, header.fileSize);
extracted_file = true;
free(uncompressed); free(uncompressed);
uncompressed = NULL; uncompressed = NULL;
} }
@ -1237,7 +1239,7 @@ static cl_error_t hfsplus_walk_catalog(cli_ctx *ctx, hfsPlusVolumeHeader *volHea
cli_dbgmsg("hfsplus_walk_catalog: Failed to read block table\n"); cli_dbgmsg("hfsplus_walk_catalog: Failed to read block table\n");
} else { } else {
uint8_t block[4096]; uint8_t block[4096];
uint8_t uncompressed[4096]; uint8_t uncompressed_block[4096];
unsigned curBlock; unsigned curBlock;
for (curBlock = 0; status == CL_SUCCESS && curBlock < numBlocks; ++curBlock) { for (curBlock = 0; status == CL_SUCCESS && curBlock < numBlocks; ++curBlock) {
@ -1279,8 +1281,8 @@ static cl_error_t hfsplus_walk_catalog(cli_ctx *ctx, hfsPlusVolumeHeader *volHea
stream.opaque = Z_NULL; stream.opaque = Z_NULL;
stream.avail_in = readLen; stream.avail_in = readLen;
stream.next_in = block; stream.next_in = block;
stream.avail_out = sizeof(uncompressed); stream.avail_out = sizeof(uncompressed_block);
stream.next_out = uncompressed; stream.next_out = uncompressed_block;
if (Z_OK != (z_ret = inflateInit2(&stream, 15))) { if (Z_OK != (z_ret = inflateInit2(&stream, 15))) {
cli_dbgmsg("hfsplus_walk_catalog: inflateInit2 failed (%d)\n", z_ret); cli_dbgmsg("hfsplus_walk_catalog: inflateInit2 failed (%d)\n", z_ret);
@ -1293,8 +1295,8 @@ static cl_error_t hfsplus_walk_catalog(cli_ctx *ctx, hfsPlusVolumeHeader *volHea
if (streamCompressed) { if (streamCompressed) {
stream.avail_in = readLen; stream.avail_in = readLen;
stream.next_in = block; stream.next_in = block;
stream.avail_out = sizeof(uncompressed); stream.avail_out = sizeof(uncompressed_block);
stream.next_out = uncompressed; stream.next_out = uncompressed_block;
while (stream.avail_in > 0) { while (stream.avail_in > 0) {
z_ret = inflate(&stream, Z_NO_FLUSH); z_ret = inflate(&stream, Z_NO_FLUSH);
@ -1304,14 +1306,14 @@ static cl_error_t hfsplus_walk_catalog(cli_ctx *ctx, hfsPlusVolumeHeader *volHea
goto done; goto done;
} }
if (cli_writen(ofd, &uncompressed, sizeof(uncompressed) - stream.avail_out) != sizeof(uncompressed) - stream.avail_out) { if (cli_writen(ofd, &uncompressed_block, sizeof(uncompressed_block) - stream.avail_out) != sizeof(uncompressed_block) - stream.avail_out) {
cli_dbgmsg("hfsplus_walk_catalog: Failed to write to temporary file\n"); cli_dbgmsg("hfsplus_walk_catalog: Failed to write to temporary file\n");
status = CL_EWRITE; status = CL_EWRITE;
goto done; goto done;
} }
written += sizeof(uncompressed) - stream.avail_out; written += sizeof(uncompressed_block) - stream.avail_out;
stream.avail_out = sizeof(uncompressed); stream.avail_out = sizeof(uncompressed_block);
stream.next_out = uncompressed; stream.next_out = uncompressed_block;
extracted_file = true; extracted_file = true;
} }
@ -1432,7 +1434,6 @@ static cl_error_t hfsplus_walk_catalog(cli_ctx *ctx, hfsPlusVolumeHeader *volHea
done: done:
if (table) { if (table) {
free(table); free(table);
table = NULL;
} }
if (-1 != ifd) { if (-1 != ifd) {
close(ifd); close(ifd);
@ -1453,7 +1454,6 @@ done:
goto done; goto done;
} }
} }
free(tmpname); free(tmpname);
} }
if (NULL != nodeBuf) { if (NULL != nodeBuf) {
@ -1462,9 +1462,8 @@ done:
if (NULL != name_utf8) { if (NULL != name_utf8) {
free(name_utf8); free(name_utf8);
} }
if (NULL != name_utf8) { if (NULL != uncompressed) {
free(name_utf8); free(uncompressed);
name_utf8 = NULL;
} }
return status; return status;