mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-12-07 21:59:54 +00:00
LibHTTP: Place HTTP disk cache log points behind a debug flag
These log points are quite verbose. Before we enable the disk cache by default, let's place them behind a debug flag.
This commit is contained in:
parent
adcf5462af
commit
aae8574d25
Notes:
github-actions[bot]
2025-12-02 11:21:02 +00:00
Author: https://github.com/trflynn89
Commit: aae8574d25
Pull-request: https://github.com/LadybirdBrowser/ladybird/pull/6991
5 changed files with 26 additions and 18 deletions
|
|
@ -4,6 +4,7 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <AK/Debug.h>
|
||||
#include <LibCore/EventLoop.h>
|
||||
#include <LibCore/StandardPaths.h>
|
||||
#include <LibFileSystem/FileSystem.h>
|
||||
|
|
@ -64,11 +65,11 @@ Variant<Optional<CacheEntryWriter&>, DiskCache::CacheHasOpenEntry> DiskCache::cr
|
|||
|
||||
auto cache_entry = CacheEntryWriter::create(*this, m_index, cache_key, move(serialized_url), request_start_time, current_time_offset_for_testing);
|
||||
if (cache_entry.is_error()) {
|
||||
dbgln("\033[31;1mUnable to create cache entry for\033[0m {}: {}", url, cache_entry.error());
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[31;1mUnable to create cache entry for\033[0m {}: {}", url, cache_entry.error());
|
||||
return Optional<CacheEntryWriter&> {};
|
||||
}
|
||||
|
||||
dbgln("\033[32;1mCreated disk cache entry for\033[0m {}", url);
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[32;1mCreated disk cache entry for\033[0m {}", url);
|
||||
|
||||
auto* cache_entry_pointer = cache_entry.value().ptr();
|
||||
m_open_cache_entries.ensure(cache_key).append(cache_entry.release_value());
|
||||
|
|
@ -89,13 +90,13 @@ Variant<Optional<CacheEntryReader&>, DiskCache::CacheHasOpenEntry> DiskCache::op
|
|||
|
||||
auto index_entry = m_index.find_entry(cache_key);
|
||||
if (!index_entry.has_value()) {
|
||||
dbgln("\033[35;1mNo disk cache entry for\033[0m {}", url);
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[35;1mNo disk cache entry for\033[0m {}", url);
|
||||
return Optional<CacheEntryReader&> {};
|
||||
}
|
||||
|
||||
auto cache_entry = CacheEntryReader::create(*this, m_index, cache_key, index_entry->response_headers, index_entry->data_size);
|
||||
if (cache_entry.is_error()) {
|
||||
dbgln("\033[31;1mUnable to open cache entry for\033[0m {}: {}", url, cache_entry.error());
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[31;1mUnable to open cache entry for\033[0m {}: {}", url, cache_entry.error());
|
||||
m_index.remove_entry(cache_key);
|
||||
|
||||
return Optional<CacheEntryReader&> {};
|
||||
|
|
@ -109,11 +110,11 @@ Variant<Optional<CacheEntryReader&>, DiskCache::CacheHasOpenEntry> DiskCache::op
|
|||
|
||||
switch (cache_lifetime_status(response_headers, freshness_lifetime, current_age)) {
|
||||
case CacheLifetimeStatus::Fresh:
|
||||
dbgln("\033[32;1mOpened disk cache entry for\033[0m {} (lifetime={}s age={}s) ({} bytes)", url, freshness_lifetime.to_seconds(), current_age.to_seconds(), index_entry->data_size);
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[32;1mOpened disk cache entry for\033[0m {} (lifetime={}s age={}s) ({} bytes)", url, freshness_lifetime.to_seconds(), current_age.to_seconds(), index_entry->data_size);
|
||||
break;
|
||||
|
||||
case CacheLifetimeStatus::Expired:
|
||||
dbgln("\033[33;1mCache entry expired for\033[0m {} (lifetime={}s age={}s)", url, freshness_lifetime.to_seconds(), current_age.to_seconds());
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[33;1mCache entry expired for\033[0m {} (lifetime={}s age={}s)", url, freshness_lifetime.to_seconds(), current_age.to_seconds());
|
||||
cache_entry.value()->remove();
|
||||
|
||||
return Optional<CacheEntryReader&> {};
|
||||
|
|
@ -123,7 +124,7 @@ Variant<Optional<CacheEntryReader&>, DiskCache::CacheHasOpenEntry> DiskCache::op
|
|||
if (check_if_cache_has_open_entry(request, cache_key, url, CheckReaderEntries::Yes))
|
||||
return Optional<CacheEntryReader&> {};
|
||||
|
||||
dbgln("\033[36;1mMust revalidate disk cache entry for\033[0m {} (lifetime={}s age={}s)", url, freshness_lifetime.to_seconds(), current_age.to_seconds());
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[36;1mMust revalidate disk cache entry for\033[0m {} (lifetime={}s age={}s)", url, freshness_lifetime.to_seconds(), current_age.to_seconds());
|
||||
cache_entry.value()->set_must_revalidate();
|
||||
break;
|
||||
}
|
||||
|
|
@ -142,7 +143,7 @@ bool DiskCache::check_if_cache_has_open_entry(CacheRequest& request, u64 cache_k
|
|||
|
||||
for (auto const& open_entry : *open_entries) {
|
||||
if (is<CacheEntryWriter>(*open_entry)) {
|
||||
dbgln("\033[36;1mDeferring disk cache entry for\033[0m {} (waiting for existing writer)", url);
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[36;1mDeferring disk cache entry for\033[0m {} (waiting for existing writer)", url);
|
||||
m_requests_waiting_completion.ensure(cache_key).append(request);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -150,7 +151,7 @@ bool DiskCache::check_if_cache_has_open_entry(CacheRequest& request, u64 cache_k
|
|||
// We allow concurrent readers unless another reader is open for revalidation. That reader will issue the network
|
||||
// request, which may then result in the cache entry being updated or deleted.
|
||||
if (check_reader_entries == CheckReaderEntries::Yes || as<CacheEntryReader>(*open_entry).must_revalidate()) {
|
||||
dbgln("\033[36;1mDeferring disk cache entry for\033[0m {} (waiting for existing reader)", url);
|
||||
dbgln_if(HTTP_DISK_CACHE_DEBUG, "\033[36;1mDeferring disk cache entry for\033[0m {} (waiting for existing reader)", url);
|
||||
m_requests_waiting_completion.ensure(cache_key).append(request);
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue