2020-03-08 19:23:58 +01:00
|
|
|
/*
|
2022-01-31 16:16:59 +01:00
|
|
|
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
|
2020-03-08 19:23:58 +01:00
|
|
|
*
|
2021-04-22 01:24:48 -07:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-03-08 19:23:58 +01:00
|
|
|
*/
|
|
|
|
|
2020-03-09 21:29:22 +01:00
|
|
|
#include <AK/Badge.h>
|
2021-01-24 15:28:26 +01:00
|
|
|
#include <AK/Debug.h>
|
2020-03-08 19:23:58 +01:00
|
|
|
#include <AK/HashTable.h>
|
2020-11-08 12:48:16 +00:00
|
|
|
#include <AK/StackInfo.h>
|
2020-10-15 20:46:52 +02:00
|
|
|
#include <AK/TemporaryChange.h>
|
2020-08-16 20:33:56 +02:00
|
|
|
#include <LibCore/ElapsedTimer.h>
|
2021-05-27 19:03:41 +02:00
|
|
|
#include <LibJS/Heap/CellAllocator.h>
|
2020-03-18 20:03:17 +01:00
|
|
|
#include <LibJS/Heap/Handle.h>
|
2020-03-16 14:20:30 +01:00
|
|
|
#include <LibJS/Heap/Heap.h>
|
|
|
|
#include <LibJS/Heap/HeapBlock.h>
|
2020-03-08 19:23:58 +01:00
|
|
|
#include <LibJS/Interpreter.h>
|
2020-03-16 14:20:30 +01:00
|
|
|
#include <LibJS/Runtime/Object.h>
|
2021-06-12 05:23:33 +03:00
|
|
|
#include <LibJS/Runtime/WeakContainer.h>
|
2022-09-24 11:56:43 +02:00
|
|
|
#include <LibJS/SafeFunction.h>
|
2020-03-16 19:08:59 +01:00
|
|
|
#include <setjmp.h>
|
2020-03-08 19:23:58 +01:00
|
|
|
|
2022-10-09 15:23:23 -06:00
|
|
|
#ifdef AK_OS_SERENITY
|
2021-08-11 20:31:11 +02:00
|
|
|
# include <serenity.h>
|
|
|
|
#endif
|
|
|
|
|
2020-03-08 19:23:58 +01:00
|
|
|
namespace JS {
|
|
|
|
|
2022-10-09 15:23:23 -06:00
|
|
|
#ifdef AK_OS_SERENITY
|
2021-08-11 20:41:29 +02:00
|
|
|
static int gc_perf_string_id;
|
2021-08-11 20:31:11 +02:00
|
|
|
#endif
|
|
|
|
|
2022-09-24 11:56:43 +02:00
|
|
|
// NOTE: We keep a per-thread list of custom ranges. This hinges on the assumption that there is one JS VM per thread.
|
|
|
|
static __thread HashMap<FlatPtr*, size_t>* s_custom_ranges_for_conservative_scan = nullptr;
|
|
|
|
|
2020-09-20 19:24:44 +02:00
|
|
|
Heap::Heap(VM& vm)
|
|
|
|
: m_vm(vm)
|
2020-03-08 19:23:58 +01:00
|
|
|
{
|
2022-10-09 15:23:23 -06:00
|
|
|
#ifdef AK_OS_SERENITY
|
2021-08-11 20:31:11 +02:00
|
|
|
auto gc_signpost_string = "Garbage collection"sv;
|
2021-08-11 20:41:29 +02:00
|
|
|
gc_perf_string_id = perf_register_string(gc_signpost_string.characters_without_null_termination(), gc_signpost_string.length());
|
2021-08-11 20:31:11 +02:00
|
|
|
#endif
|
|
|
|
|
2021-05-29 06:24:30 -06:00
|
|
|
if constexpr (HeapBlock::min_possible_cell_size <= 16) {
|
|
|
|
m_allocators.append(make<CellAllocator>(16));
|
|
|
|
}
|
|
|
|
static_assert(HeapBlock::min_possible_cell_size <= 24, "Heap Cell tracking uses too much data!");
|
2021-05-27 19:03:41 +02:00
|
|
|
m_allocators.append(make<CellAllocator>(32));
|
|
|
|
m_allocators.append(make<CellAllocator>(64));
|
2022-01-31 16:16:59 +01:00
|
|
|
m_allocators.append(make<CellAllocator>(96));
|
2021-05-27 19:03:41 +02:00
|
|
|
m_allocators.append(make<CellAllocator>(128));
|
|
|
|
m_allocators.append(make<CellAllocator>(256));
|
|
|
|
m_allocators.append(make<CellAllocator>(512));
|
|
|
|
m_allocators.append(make<CellAllocator>(1024));
|
|
|
|
m_allocators.append(make<CellAllocator>(3072));
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Heap::~Heap()
|
|
|
|
{
|
2021-10-02 01:36:57 +02:00
|
|
|
vm().string_cache().clear();
|
2020-03-23 14:11:19 +01:00
|
|
|
collect_garbage(CollectionType::CollectEverything);
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
|
|
|
|
2021-05-27 19:03:41 +02:00
|
|
|
ALWAYS_INLINE CellAllocator& Heap::allocator_for_size(size_t cell_size)
|
2020-10-06 18:50:47 +02:00
|
|
|
{
|
|
|
|
for (auto& allocator : m_allocators) {
|
|
|
|
if (allocator->cell_size() >= cell_size)
|
|
|
|
return *allocator;
|
|
|
|
}
|
2021-06-06 23:08:15 +01:00
|
|
|
dbgln("Cannot get CellAllocator for cell size {}, largest available is {}!", cell_size, m_allocators.last()->cell_size());
|
2021-02-23 20:42:32 +01:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-10-06 18:50:47 +02:00
|
|
|
}
|
|
|
|
|
2020-03-08 19:23:58 +01:00
|
|
|
Cell* Heap::allocate_cell(size_t size)
|
|
|
|
{
|
2020-04-06 12:36:49 +02:00
|
|
|
if (should_collect_on_every_allocation()) {
|
2020-03-16 19:18:46 +01:00
|
|
|
collect_garbage();
|
2020-04-06 12:36:49 +02:00
|
|
|
} else if (m_allocations_since_last_gc > m_max_allocations_between_gc) {
|
|
|
|
m_allocations_since_last_gc = 0;
|
|
|
|
collect_garbage();
|
|
|
|
} else {
|
|
|
|
++m_allocations_since_last_gc;
|
|
|
|
}
|
2020-03-16 19:18:46 +01:00
|
|
|
|
2020-10-06 18:50:47 +02:00
|
|
|
auto& allocator = allocator_for_size(size);
|
|
|
|
return allocator.allocate_cell(*this);
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
|
|
|
|
2020-08-16 20:33:56 +02:00
|
|
|
void Heap::collect_garbage(CollectionType collection_type, bool print_report)
|
2020-03-08 19:23:58 +01:00
|
|
|
{
|
2021-02-23 20:42:32 +01:00
|
|
|
VERIFY(!m_collecting_garbage);
|
2020-09-21 14:35:19 +02:00
|
|
|
TemporaryChange change(m_collecting_garbage, true);
|
|
|
|
|
2022-10-09 15:23:23 -06:00
|
|
|
#ifdef AK_OS_SERENITY
|
2021-08-11 20:31:11 +02:00
|
|
|
static size_t global_gc_counter = 0;
|
|
|
|
perf_event(PERF_EVENT_SIGNPOST, gc_perf_string_id, global_gc_counter++);
|
|
|
|
#endif
|
|
|
|
|
2021-09-12 08:51:44 -07:00
|
|
|
auto collection_measurement_timer = Core::ElapsedTimer::start_new();
|
2020-03-23 14:11:19 +01:00
|
|
|
if (collection_type == CollectionType::CollectGarbage) {
|
2020-04-19 11:30:47 +02:00
|
|
|
if (m_gc_deferrals) {
|
|
|
|
m_should_gc_when_deferral_ends = true;
|
|
|
|
return;
|
|
|
|
}
|
2020-03-23 14:11:19 +01:00
|
|
|
HashTable<Cell*> roots;
|
|
|
|
gather_roots(roots);
|
|
|
|
mark_live_cells(roots);
|
|
|
|
}
|
2020-08-16 20:33:56 +02:00
|
|
|
sweep_dead_cells(print_report, collection_measurement_timer);
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
|
|
|
|
2020-03-15 15:12:34 +01:00
|
|
|
void Heap::gather_roots(HashTable<Cell*>& roots)
|
2020-03-08 19:23:58 +01:00
|
|
|
{
|
2020-09-21 13:47:33 +02:00
|
|
|
vm().gather_roots(roots);
|
2020-03-16 19:08:59 +01:00
|
|
|
gather_conservative_roots(roots);
|
|
|
|
|
2021-07-21 19:45:21 +02:00
|
|
|
for (auto& handle : m_handles)
|
|
|
|
roots.set(handle.cell());
|
2020-03-18 20:03:17 +01:00
|
|
|
|
LibJS: Let MarkedVector<T> inherit from Vector and handle Cell* + Value
Note: MarkedVector is still relatively new and has zero users right now,
so these changes don't affect any code other than the class itself.
Reasons for this are the rather limited API:
- Despite the name and unlike MarkedValueList, MarkedVector isn't
actually a Vector, it *wraps* a Vector. This means that plenty of
convenient APIs are unavailable and have to be exported on the class
separately and forwarded to the internal Vector, or need to go through
the exposed Span - both not great options.
- Exposing append(Cell*) and prepend(Cell*) on the base class means that
it was possible to append any Cell type, not just T! All the strong
typing guarantees are basically gone, and MarkedVector doesn't do much
more than casting Cells to the appropriate type through the exposed
Span.
All of this combined means that MarkedVector - in its current form -
doesn't provide much value over MarkedValueList, and that we have to
maintain two separate, yet almost identical classes.
Let's fix this!
The updated MarkedVector steals various concepts from the existing
MarkedValueList, especially the ability to copy. On the other hand, it
remains generic enough to handle both Cell* and Value for T, making
MarkedValueList effectively redundant :^)
Additionally, by inheriting from Vector we get all the current and
future APIs without having to select and expose them separately.
MarkedVectorBase remains and takes care of communicating creation and
destruction of the class to the heap. Visiting the contained values is
handled via a pure virtual method gather_roots(), which is being called
by the Heap's function of the same name; much like the VM has one.
From there, values are added to the roots HashTable if they are cells
for T = Value, and unconditionally for any other T.
As a small additional improvement the template now also takes an
inline_capacity parameter, defaulting to 32, and forwards it to the
Vector template; allowing for possible future optimizations of current
uses of MarkedValueList, which hard-codes it to 32.
2022-02-09 09:40:49 +00:00
|
|
|
for (auto& vector : m_marked_vectors)
|
|
|
|
vector.gather_roots(roots);
|
2021-12-16 18:54:06 +01:00
|
|
|
|
2021-04-18 18:12:33 +02:00
|
|
|
if constexpr (HEAP_DEBUG) {
|
|
|
|
dbgln("gather_roots:");
|
|
|
|
for (auto* root : roots)
|
|
|
|
dbgln(" + {}", root);
|
|
|
|
}
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
|
|
|
|
2021-01-05 12:26:23 +01:00
|
|
|
__attribute__((no_sanitize("address"))) void Heap::gather_conservative_roots(HashTable<Cell*>& roots)
|
2020-03-16 19:08:59 +01:00
|
|
|
{
|
|
|
|
FlatPtr dummy;
|
|
|
|
|
2021-04-07 15:12:32 +02:00
|
|
|
dbgln_if(HEAP_DEBUG, "gather_conservative_roots:");
|
2020-03-16 19:08:59 +01:00
|
|
|
|
|
|
|
jmp_buf buf;
|
|
|
|
setjmp(buf);
|
|
|
|
|
|
|
|
HashTable<FlatPtr> possible_pointers;
|
|
|
|
|
2021-05-25 19:03:30 +02:00
|
|
|
auto* raw_jmp_buf = reinterpret_cast<FlatPtr const*>(buf);
|
2020-03-23 13:14:57 +01:00
|
|
|
|
2022-02-25 01:26:52 +01:00
|
|
|
auto add_possible_value = [&](FlatPtr data) {
|
|
|
|
if constexpr (sizeof(FlatPtr*) == sizeof(Value)) {
|
|
|
|
// Because Value stores pointers in non-canonical form we have to check if the top bytes
|
|
|
|
// match any pointer-backed tag, in that case we have to extract the pointer to its
|
|
|
|
// canonical form and add that as a possible pointer.
|
|
|
|
if ((data & SHIFTED_IS_CELL_PATTERN) == SHIFTED_IS_CELL_PATTERN)
|
2022-09-20 18:09:33 +02:00
|
|
|
possible_pointers.set(Value::extract_pointer_bits(data));
|
2022-02-25 01:26:52 +01:00
|
|
|
else
|
|
|
|
possible_pointers.set(data);
|
|
|
|
} else {
|
|
|
|
static_assert((sizeof(Value) % sizeof(FlatPtr*)) == 0);
|
|
|
|
// In the 32-bit case we will look at the top and bottom part of Value separately we just
|
|
|
|
// add both the upper and lower bytes as possible pointers.
|
|
|
|
possible_pointers.set(data);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-05-08 22:31:43 +04:30
|
|
|
for (size_t i = 0; i < ((size_t)sizeof(buf)) / sizeof(FlatPtr); i += sizeof(FlatPtr))
|
2022-02-25 01:26:52 +01:00
|
|
|
add_possible_value(raw_jmp_buf[i]);
|
2020-03-16 19:08:59 +01:00
|
|
|
|
2021-05-25 19:03:30 +02:00
|
|
|
auto stack_reference = bit_cast<FlatPtr>(&dummy);
|
2020-11-08 12:48:16 +00:00
|
|
|
auto& stack_info = m_vm.stack_info();
|
2020-03-16 19:08:59 +01:00
|
|
|
|
2020-11-08 12:48:16 +00:00
|
|
|
for (FlatPtr stack_address = stack_reference; stack_address < stack_info.top(); stack_address += sizeof(FlatPtr)) {
|
2020-03-16 19:08:59 +01:00
|
|
|
auto data = *reinterpret_cast<FlatPtr*>(stack_address);
|
2022-02-25 01:26:52 +01:00
|
|
|
add_possible_value(data);
|
2020-03-16 19:08:59 +01:00
|
|
|
}
|
|
|
|
|
2022-09-24 11:56:43 +02:00
|
|
|
// NOTE: If we have any custom ranges registered, scan those as well.
|
|
|
|
// This is where JS::SafeFunction closures get marked.
|
|
|
|
if (s_custom_ranges_for_conservative_scan) {
|
|
|
|
for (auto& custom_range : *s_custom_ranges_for_conservative_scan) {
|
|
|
|
for (size_t i = 0; i < (custom_range.value / sizeof(FlatPtr)); ++i) {
|
|
|
|
add_possible_value(custom_range.key[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-10 20:24:08 +01:00
|
|
|
HashTable<HeapBlock*> all_live_heap_blocks;
|
|
|
|
for_each_block([&](auto& block) {
|
|
|
|
all_live_heap_blocks.set(&block);
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
|
2020-03-16 19:08:59 +01:00
|
|
|
for (auto possible_pointer : possible_pointers) {
|
|
|
|
if (!possible_pointer)
|
|
|
|
continue;
|
2022-04-01 20:58:27 +03:00
|
|
|
dbgln_if(HEAP_DEBUG, " ? {}", (void const*)possible_pointer);
|
|
|
|
auto* possible_heap_block = HeapBlock::from_cell(reinterpret_cast<Cell const*>(possible_pointer));
|
2020-11-10 20:24:08 +01:00
|
|
|
if (all_live_heap_blocks.contains(possible_heap_block)) {
|
|
|
|
if (auto* cell = possible_heap_block->cell_from_possible_pointer(possible_pointer)) {
|
2021-05-25 18:35:27 +02:00
|
|
|
if (cell->state() == Cell::State::Live) {
|
2022-04-01 20:58:27 +03:00
|
|
|
dbgln_if(HEAP_DEBUG, " ?-> {}", (void const*)cell);
|
2020-11-10 20:24:08 +01:00
|
|
|
roots.set(cell);
|
|
|
|
} else {
|
2022-04-01 20:58:27 +03:00
|
|
|
dbgln_if(HEAP_DEBUG, " #-> {}", (void const*)cell);
|
2020-11-10 20:24:08 +01:00
|
|
|
}
|
2020-03-16 19:08:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 22:11:22 +01:00
|
|
|
class MarkingVisitor final : public Cell::Visitor {
|
2020-03-08 19:23:58 +01:00
|
|
|
public:
|
2022-03-14 10:25:06 -06:00
|
|
|
MarkingVisitor() = default;
|
2020-03-08 19:23:58 +01:00
|
|
|
|
2021-12-04 10:09:09 +01:00
|
|
|
virtual void visit_impl(Cell& cell) override
|
2020-03-08 19:23:58 +01:00
|
|
|
{
|
2021-05-25 18:39:01 +02:00
|
|
|
if (cell.is_marked())
|
2020-03-09 22:11:22 +01:00
|
|
|
return;
|
2021-05-25 19:44:32 +02:00
|
|
|
dbgln_if(HEAP_DEBUG, " ! {}", &cell);
|
2021-09-11 16:44:40 +02:00
|
|
|
|
2021-05-25 18:39:01 +02:00
|
|
|
cell.set_marked(true);
|
|
|
|
cell.visit_edges(*this);
|
2020-03-09 22:11:22 +01:00
|
|
|
}
|
|
|
|
};
|
2020-03-08 19:23:58 +01:00
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
void Heap::mark_live_cells(HashTable<Cell*> const& roots)
|
2020-03-08 19:23:58 +01:00
|
|
|
{
|
2021-04-07 15:12:32 +02:00
|
|
|
dbgln_if(HEAP_DEBUG, "mark_live_cells:");
|
2021-09-07 17:14:05 +02:00
|
|
|
|
2020-03-09 22:11:22 +01:00
|
|
|
MarkingVisitor visitor;
|
2020-04-16 16:07:50 +02:00
|
|
|
for (auto* root : roots)
|
2020-03-09 22:11:22 +01:00
|
|
|
visitor.visit(root);
|
2021-09-07 17:14:05 +02:00
|
|
|
|
|
|
|
for (auto& inverse_root : m_uprooted_cells)
|
|
|
|
inverse_root->set_marked(false);
|
|
|
|
|
|
|
|
m_uprooted_cells.clear();
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
void Heap::sweep_dead_cells(bool print_report, Core::ElapsedTimer const& measurement_timer)
|
2020-03-08 19:23:58 +01:00
|
|
|
{
|
2021-04-07 15:12:32 +02:00
|
|
|
dbgln_if(HEAP_DEBUG, "sweep_dead_cells:");
|
2020-03-21 11:45:50 +01:00
|
|
|
Vector<HeapBlock*, 32> empty_blocks;
|
2020-10-06 18:50:47 +02:00
|
|
|
Vector<HeapBlock*, 32> full_blocks_that_became_usable;
|
2020-03-21 11:45:50 +01:00
|
|
|
|
2020-08-16 20:33:56 +02:00
|
|
|
size_t collected_cells = 0;
|
|
|
|
size_t live_cells = 0;
|
|
|
|
size_t collected_cell_bytes = 0;
|
|
|
|
size_t live_cell_bytes = 0;
|
|
|
|
|
2020-10-06 18:50:47 +02:00
|
|
|
for_each_block([&](auto& block) {
|
2020-03-21 11:45:50 +01:00
|
|
|
bool block_has_live_cells = false;
|
2020-10-06 18:50:47 +02:00
|
|
|
bool block_was_full = block.is_full();
|
2021-05-25 18:35:27 +02:00
|
|
|
block.template for_each_cell_in_state<Cell::State::Live>([&](Cell* cell) {
|
|
|
|
if (!cell->is_marked()) {
|
|
|
|
dbgln_if(HEAP_DEBUG, " ~ {}", cell);
|
2022-02-04 16:12:39 +01:00
|
|
|
block.deallocate(cell);
|
2021-05-25 18:35:27 +02:00
|
|
|
++collected_cells;
|
|
|
|
collected_cell_bytes += block.cell_size();
|
|
|
|
} else {
|
|
|
|
cell->set_marked(false);
|
|
|
|
block_has_live_cells = true;
|
|
|
|
++live_cells;
|
|
|
|
live_cell_bytes += block.cell_size();
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
|
|
|
});
|
2020-03-21 11:45:50 +01:00
|
|
|
if (!block_has_live_cells)
|
2020-10-06 18:50:47 +02:00
|
|
|
empty_blocks.append(&block);
|
|
|
|
else if (block_was_full != block.is_full())
|
|
|
|
full_blocks_that_became_usable.append(&block);
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2020-03-21 11:45:50 +01:00
|
|
|
|
2021-10-08 19:47:25 +02:00
|
|
|
for (auto& weak_container : m_weak_containers)
|
|
|
|
weak_container.remove_dead_cells({});
|
|
|
|
|
2020-03-21 11:45:50 +01:00
|
|
|
for (auto* block : empty_blocks) {
|
2021-04-07 15:12:32 +02:00
|
|
|
dbgln_if(HEAP_DEBUG, " - HeapBlock empty @ {}: cell_size={}", block, block->cell_size());
|
2020-10-06 18:50:47 +02:00
|
|
|
allocator_for_size(block->cell_size()).block_did_become_empty({}, *block);
|
2020-03-21 11:45:50 +01:00
|
|
|
}
|
|
|
|
|
2020-10-06 18:50:47 +02:00
|
|
|
for (auto* block : full_blocks_that_became_usable) {
|
2021-04-07 15:12:32 +02:00
|
|
|
dbgln_if(HEAP_DEBUG, " - HeapBlock usable again @ {}: cell_size={}", block, block->cell_size());
|
2020-10-06 18:50:47 +02:00
|
|
|
allocator_for_size(block->cell_size()).block_did_become_usable({}, *block);
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
2020-10-06 18:50:47 +02:00
|
|
|
|
2021-04-18 18:12:33 +02:00
|
|
|
if constexpr (HEAP_DEBUG) {
|
|
|
|
for_each_block([&](auto& block) {
|
|
|
|
dbgln(" > Live HeapBlock @ {}: cell_size={}", &block, block.cell_size());
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
2020-08-16 20:33:56 +02:00
|
|
|
|
|
|
|
int time_spent = measurement_timer.elapsed();
|
|
|
|
|
|
|
|
if (print_report) {
|
2020-10-06 18:50:47 +02:00
|
|
|
size_t live_block_count = 0;
|
|
|
|
for_each_block([&](auto&) {
|
|
|
|
++live_block_count;
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
|
2020-10-04 15:44:40 +01:00
|
|
|
dbgln("Garbage collection report");
|
|
|
|
dbgln("=============================================");
|
|
|
|
dbgln(" Time spent: {} ms", time_spent);
|
|
|
|
dbgln(" Live cells: {} ({} bytes)", live_cells, live_cell_bytes);
|
|
|
|
dbgln("Collected cells: {} ({} bytes)", collected_cells, collected_cell_bytes);
|
2020-10-06 18:50:47 +02:00
|
|
|
dbgln(" Live blocks: {} ({} bytes)", live_block_count, live_block_count * HeapBlock::block_size);
|
2020-10-04 15:44:40 +01:00
|
|
|
dbgln(" Freed blocks: {} ({} bytes)", empty_blocks.size(), empty_blocks.size() * HeapBlock::block_size);
|
|
|
|
dbgln("=============================================");
|
2020-08-16 20:33:56 +02:00
|
|
|
}
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|
2020-03-18 20:03:17 +01:00
|
|
|
|
|
|
|
void Heap::did_create_handle(Badge<HandleImpl>, HandleImpl& impl)
|
|
|
|
{
|
2021-07-21 19:45:21 +02:00
|
|
|
VERIFY(!m_handles.contains(impl));
|
|
|
|
m_handles.append(impl);
|
2020-03-18 20:03:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Heap::did_destroy_handle(Badge<HandleImpl>, HandleImpl& impl)
|
|
|
|
{
|
2021-07-21 19:45:21 +02:00
|
|
|
VERIFY(m_handles.contains(impl));
|
|
|
|
m_handles.remove(impl);
|
2020-03-18 20:03:17 +01:00
|
|
|
}
|
|
|
|
|
2021-12-16 18:54:06 +01:00
|
|
|
void Heap::did_create_marked_vector(Badge<MarkedVectorBase>, MarkedVectorBase& vector)
|
|
|
|
{
|
|
|
|
VERIFY(!m_marked_vectors.contains(vector));
|
|
|
|
m_marked_vectors.append(vector);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Heap::did_destroy_marked_vector(Badge<MarkedVectorBase>, MarkedVectorBase& vector)
|
|
|
|
{
|
|
|
|
VERIFY(m_marked_vectors.contains(vector));
|
|
|
|
m_marked_vectors.remove(vector);
|
|
|
|
}
|
|
|
|
|
2021-06-12 05:23:33 +03:00
|
|
|
void Heap::did_create_weak_container(Badge<WeakContainer>, WeakContainer& set)
|
2021-06-09 20:10:47 +03:00
|
|
|
{
|
2021-07-21 19:57:41 +02:00
|
|
|
VERIFY(!m_weak_containers.contains(set));
|
|
|
|
m_weak_containers.append(set);
|
2021-06-09 20:10:47 +03:00
|
|
|
}
|
|
|
|
|
2021-06-12 05:23:33 +03:00
|
|
|
void Heap::did_destroy_weak_container(Badge<WeakContainer>, WeakContainer& set)
|
2021-06-09 20:10:47 +03:00
|
|
|
{
|
2021-07-21 19:57:41 +02:00
|
|
|
VERIFY(m_weak_containers.contains(set));
|
|
|
|
m_weak_containers.remove(set);
|
2021-06-09 20:10:47 +03:00
|
|
|
}
|
|
|
|
|
2020-04-19 11:30:47 +02:00
|
|
|
void Heap::defer_gc(Badge<DeferGC>)
|
|
|
|
{
|
|
|
|
++m_gc_deferrals;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Heap::undefer_gc(Badge<DeferGC>)
|
|
|
|
{
|
2021-02-23 20:42:32 +01:00
|
|
|
VERIFY(m_gc_deferrals > 0);
|
2020-04-19 11:30:47 +02:00
|
|
|
--m_gc_deferrals;
|
|
|
|
|
|
|
|
if (!m_gc_deferrals) {
|
|
|
|
if (m_should_gc_when_deferral_ends)
|
|
|
|
collect_garbage();
|
|
|
|
m_should_gc_when_deferral_ends = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-07 17:14:05 +02:00
|
|
|
void Heap::uproot_cell(Cell* cell)
|
|
|
|
{
|
|
|
|
m_uprooted_cells.append(cell);
|
|
|
|
}
|
|
|
|
|
2022-09-24 11:56:43 +02:00
|
|
|
void register_safe_function_closure(void* base, size_t size)
|
|
|
|
{
|
|
|
|
if (!s_custom_ranges_for_conservative_scan) {
|
|
|
|
// FIXME: This per-thread HashMap is currently leaked on thread exit.
|
|
|
|
s_custom_ranges_for_conservative_scan = new HashMap<FlatPtr*, size_t>;
|
|
|
|
}
|
|
|
|
auto result = s_custom_ranges_for_conservative_scan->set(reinterpret_cast<FlatPtr*>(base), size);
|
|
|
|
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
void unregister_safe_function_closure(void* base, size_t)
|
|
|
|
{
|
|
|
|
VERIFY(s_custom_ranges_for_conservative_scan);
|
|
|
|
bool did_remove = s_custom_ranges_for_conservative_scan->remove(reinterpret_cast<FlatPtr*>(base));
|
|
|
|
VERIFY(did_remove);
|
|
|
|
}
|
|
|
|
|
2020-03-08 19:23:58 +01:00
|
|
|
}
|