Split data into chunks and slap multiprocessing on it to lower memory footprint and execution time
This commit is contained in:
parent
87d50d9f8a
commit
8b0766b719
1 changed files with 44 additions and 16 deletions
|
@ -1,21 +1,21 @@
|
||||||
# Copyright (c) 2024 Julian Müller (ChaoticByte)
|
# Copyright (c) 2024 Julian Müller (ChaoticByte)
|
||||||
# License: MIT
|
# License: MIT
|
||||||
|
|
||||||
def obf(data: bytes, key: bytes, decrypt: bool = False, iterations: int = 8) -> bytes:
|
from multiprocessing import Pool
|
||||||
assert type(data) == bytes
|
|
||||||
assert type(key) == bytes
|
CHUNKSIZE = 1024 * 4
|
||||||
assert type(iterations) == int
|
|
||||||
assert type(decrypt) == bool
|
|
||||||
data = bytearray(data)
|
def _obf(args) -> bytearray:
|
||||||
key = bytearray(key)
|
data_chunk, key, decrypt, iterations = args
|
||||||
len_data = len(data)
|
len_data = len(data_chunk)
|
||||||
len_key = len(key)
|
len_key = len(key)
|
||||||
for _ in range(iterations):
|
for _ in range(iterations):
|
||||||
# shift (encrypt)
|
# shift (encrypt)
|
||||||
if not decrypt:
|
if not decrypt:
|
||||||
for i in range(len_data):
|
for i in range(len_data):
|
||||||
n = key[i % len_key]
|
n = key[i % len_key]
|
||||||
data[i] = (data[i] + n) % 256
|
data_chunk[i] = (data_chunk[i] + n) % 256
|
||||||
# transpose
|
# transpose
|
||||||
# list of tuples that stores transposition data (from, to):
|
# list of tuples that stores transposition data (from, to):
|
||||||
swap_indices = [] # (this is extremely memory inefficient lol)
|
swap_indices = [] # (this is extremely memory inefficient lol)
|
||||||
|
@ -28,16 +28,44 @@ def obf(data: bytes, key: bytes, decrypt: bool = False, iterations: int = 8) ->
|
||||||
swap_indices.reverse()
|
swap_indices.reverse()
|
||||||
for a, b in swap_indices:
|
for a, b in swap_indices:
|
||||||
# swap values
|
# swap values
|
||||||
a_ = data[a]
|
a_ = data_chunk[a]
|
||||||
b_ = data[b]
|
b_ = data_chunk[b]
|
||||||
data[a] = b_
|
data_chunk[a] = b_
|
||||||
data[b] = a_
|
data_chunk[b] = a_
|
||||||
# unshift (decrypt)
|
# unshift (decrypt)
|
||||||
if decrypt:
|
if decrypt:
|
||||||
for i in range(len_data):
|
for i in range(len_data):
|
||||||
n = key[i % len_key]
|
n = key[i % len_key]
|
||||||
b = data[i] - n
|
b = data_chunk[i] - n
|
||||||
while b < 0:
|
while b < 0:
|
||||||
b = 256 + b
|
b = 256 + b
|
||||||
data[i] = b
|
data_chunk[i] = b
|
||||||
return bytes(data)
|
return data_chunk
|
||||||
|
|
||||||
|
|
||||||
|
def obf(data: bytes, key: bytes, decrypt: bool = False, iterations: int = 8, processes: int = 4) -> bytes:
|
||||||
|
assert type(data) == bytes
|
||||||
|
assert type(key) == bytes
|
||||||
|
assert type(iterations) == int
|
||||||
|
assert type(decrypt) == bool
|
||||||
|
assert type(processes) == int
|
||||||
|
data = bytearray(data)
|
||||||
|
key = bytearray(key)
|
||||||
|
len_data_complete = len(data)
|
||||||
|
|
||||||
|
chunks = []
|
||||||
|
p = 0
|
||||||
|
while p < len_data_complete:
|
||||||
|
p_new = p + CHUNKSIZE
|
||||||
|
chunk = data[p:p_new]
|
||||||
|
chunks.append((chunk, key, decrypt, iterations))
|
||||||
|
p = p_new
|
||||||
|
|
||||||
|
del data
|
||||||
|
|
||||||
|
pool = Pool(processes=4)
|
||||||
|
results = pool.map(_obf, chunks)
|
||||||
|
|
||||||
|
del chunks
|
||||||
|
|
||||||
|
return bytes(b''.join(results))
|
||||||
|
|
Reference in a new issue