Split data into chunks and slap multiprocessing on it to lower memory footprint and execution time
This commit is contained in:
parent
87d50d9f8a
commit
8b0766b719
1 changed files with 44 additions and 16 deletions
|
@ -1,21 +1,21 @@
|
|||
# Copyright (c) 2024 Julian Müller (ChaoticByte)
|
||||
# License: MIT
|
||||
|
||||
def obf(data: bytes, key: bytes, decrypt: bool = False, iterations: int = 8) -> bytes:
|
||||
assert type(data) == bytes
|
||||
assert type(key) == bytes
|
||||
assert type(iterations) == int
|
||||
assert type(decrypt) == bool
|
||||
data = bytearray(data)
|
||||
key = bytearray(key)
|
||||
len_data = len(data)
|
||||
from multiprocessing import Pool
|
||||
|
||||
CHUNKSIZE = 1024 * 4
|
||||
|
||||
|
||||
def _obf(args) -> bytearray:
|
||||
data_chunk, key, decrypt, iterations = args
|
||||
len_data = len(data_chunk)
|
||||
len_key = len(key)
|
||||
for _ in range(iterations):
|
||||
# shift (encrypt)
|
||||
if not decrypt:
|
||||
for i in range(len_data):
|
||||
n = key[i % len_key]
|
||||
data[i] = (data[i] + n) % 256
|
||||
data_chunk[i] = (data_chunk[i] + n) % 256
|
||||
# transpose
|
||||
# list of tuples that stores transposition data (from, to):
|
||||
swap_indices = [] # (this is extremely memory inefficient lol)
|
||||
|
@ -28,16 +28,44 @@ def obf(data: bytes, key: bytes, decrypt: bool = False, iterations: int = 8) ->
|
|||
swap_indices.reverse()
|
||||
for a, b in swap_indices:
|
||||
# swap values
|
||||
a_ = data[a]
|
||||
b_ = data[b]
|
||||
data[a] = b_
|
||||
data[b] = a_
|
||||
a_ = data_chunk[a]
|
||||
b_ = data_chunk[b]
|
||||
data_chunk[a] = b_
|
||||
data_chunk[b] = a_
|
||||
# unshift (decrypt)
|
||||
if decrypt:
|
||||
for i in range(len_data):
|
||||
n = key[i % len_key]
|
||||
b = data[i] - n
|
||||
b = data_chunk[i] - n
|
||||
while b < 0:
|
||||
b = 256 + b
|
||||
data[i] = b
|
||||
return bytes(data)
|
||||
data_chunk[i] = b
|
||||
return data_chunk
|
||||
|
||||
|
||||
def obf(data: bytes, key: bytes, decrypt: bool = False, iterations: int = 8, processes: int = 4) -> bytes:
|
||||
assert type(data) == bytes
|
||||
assert type(key) == bytes
|
||||
assert type(iterations) == int
|
||||
assert type(decrypt) == bool
|
||||
assert type(processes) == int
|
||||
data = bytearray(data)
|
||||
key = bytearray(key)
|
||||
len_data_complete = len(data)
|
||||
|
||||
chunks = []
|
||||
p = 0
|
||||
while p < len_data_complete:
|
||||
p_new = p + CHUNKSIZE
|
||||
chunk = data[p:p_new]
|
||||
chunks.append((chunk, key, decrypt, iterations))
|
||||
p = p_new
|
||||
|
||||
del data
|
||||
|
||||
pool = Pool(processes=4)
|
||||
results = pool.map(_obf, chunks)
|
||||
|
||||
del chunks
|
||||
|
||||
return bytes(b''.join(results))
|
||||
|
|
Reference in a new issue