mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-10-19 15:43:20 +00:00

If the Ladybird process crashes or just ends normally, the IPC transport connection with WebContent may be shutdown after a send sync event (for example: WebContentClient DidRequestCookie) was sent from WebContent, but before the Ladybird process provided the matching sync event response (for example: WebContentClient DidRequestCookieResponse). This can lead to a runaway WebContent process if other IPC events (for example: WebContentServer DidPaint, or SetSystemVisibilityState, or MouseEvent, or CloseServer, etc...) are also queued when the IPC connection is shutdown. At the core of the issue is that the loop waiting for the matching send sync response will prioritize waiting for the response and remain spinning even if the IPC connection is reporting that it was shutdown, but only if there happens to be other unrelated events received before the IPC shutdown is detected. These unrelated events will not be processed because the loop is stuck waiting for the response that due to the Ladybird process having stopped, will never be sent. Because the shutdown of the IPC connection is not handled when other events happen to be also present, new events may be posted for transfer by the WebContent process if the page is very active. If many new events are posted this could lead to a slow or very quick memory leak in the WebContent process due to the queue growing large, sometimes all the way to total system memory exhaustion. If no events or only a few new events are sent, then the leak may be hard to detect. This PR fixes the faulty IPC shutdown handling by not getting stuck if any messages are present in the receive queue. Before returning to the caller any remaining messages will be immediately processed.
166 lines
4.9 KiB
C++
166 lines
4.9 KiB
C++
/*
|
|
* Copyright (c) 2021-2024, Andreas Kling <andreas@ladybird.org>
|
|
* Copyright (c) 2025, Aliaksandr Kalenik <kalenik.aliaksandr@gmail.com>
|
|
* Copyright (c) 2022, the SerenityOS developers.
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#include <AK/Vector.h>
|
|
#include <LibCore/Socket.h>
|
|
#include <LibIPC/Connection.h>
|
|
#include <LibIPC/Message.h>
|
|
#include <LibIPC/Stub.h>
|
|
|
|
namespace IPC {
|
|
|
|
ConnectionBase::ConnectionBase(IPC::Stub& local_stub, NonnullOwnPtr<Transport> transport, u32 local_endpoint_magic)
|
|
: m_local_stub(local_stub)
|
|
, m_transport(move(transport))
|
|
, m_local_endpoint_magic(local_endpoint_magic)
|
|
{
|
|
m_transport->set_up_read_hook([this] {
|
|
NonnullRefPtr protect = *this;
|
|
|
|
if (auto result = drain_messages_from_peer(); result.is_error())
|
|
dbgln("Read hook error while draining messages: {}", result.error());
|
|
|
|
handle_messages();
|
|
});
|
|
}
|
|
|
|
ConnectionBase::~ConnectionBase() = default;
|
|
|
|
bool ConnectionBase::is_open() const
|
|
{
|
|
return m_transport->is_open();
|
|
}
|
|
|
|
ErrorOr<void> ConnectionBase::post_message(Message const& message)
|
|
{
|
|
return post_message(TRY(message.encode()));
|
|
}
|
|
|
|
ErrorOr<void> ConnectionBase::post_message(MessageBuffer buffer)
|
|
{
|
|
// NOTE: If this connection is being shut down, but has not yet been destroyed,
|
|
// the socket will be closed. Don't try to send more messages.
|
|
if (!m_transport->is_open())
|
|
return Error::from_string_literal("Trying to post_message during IPC shutdown");
|
|
|
|
MUST(buffer.transfer_message(*m_transport));
|
|
|
|
return {};
|
|
}
|
|
|
|
void ConnectionBase::shutdown()
|
|
{
|
|
m_transport->close();
|
|
die();
|
|
}
|
|
|
|
void ConnectionBase::shutdown_with_error(Error const& error)
|
|
{
|
|
dbgln("IPC::ConnectionBase ({:p}) had an error ({}), disconnecting.", this, error);
|
|
shutdown();
|
|
}
|
|
|
|
void ConnectionBase::handle_messages()
|
|
{
|
|
auto messages = move(m_unprocessed_messages);
|
|
for (auto& message : messages) {
|
|
if (message->endpoint_magic() != m_local_endpoint_magic)
|
|
continue;
|
|
|
|
if (!is_open())
|
|
dbgln("Handling message while connection closed: {}", message->message_name());
|
|
|
|
auto handler_result = m_local_stub.handle(move(message));
|
|
if (handler_result.is_error()) {
|
|
dbgln("IPC::ConnectionBase::handle_messages: {}", handler_result.error());
|
|
continue;
|
|
}
|
|
|
|
if (!is_open())
|
|
continue;
|
|
|
|
if (auto response = handler_result.release_value()) {
|
|
if (auto post_result = post_message(*response); post_result.is_error())
|
|
dbgln("IPC::ConnectionBase::handle_messages: {}", post_result.error());
|
|
}
|
|
}
|
|
}
|
|
|
|
void ConnectionBase::wait_for_transport_to_become_readable()
|
|
{
|
|
m_transport->wait_until_readable();
|
|
}
|
|
|
|
ErrorOr<void> ConnectionBase::drain_messages_from_peer()
|
|
{
|
|
auto schedule_shutdown = m_transport->read_as_many_messages_as_possible_without_blocking([&](auto&& raw_message) {
|
|
if (auto message = try_parse_message(raw_message.bytes, raw_message.fds)) {
|
|
m_unprocessed_messages.append(message.release_nonnull());
|
|
} else {
|
|
dbgln("Failed to parse IPC message {:hex-dump}", raw_message.bytes);
|
|
VERIFY_NOT_REACHED();
|
|
}
|
|
});
|
|
|
|
if (!m_unprocessed_messages.is_empty()) {
|
|
deferred_invoke([this] {
|
|
handle_messages();
|
|
});
|
|
}
|
|
|
|
if (schedule_shutdown == Transport::ShouldShutdown::Yes) {
|
|
deferred_invoke([this] {
|
|
shutdown();
|
|
});
|
|
return Error::from_string_literal("IPC connection EOF");
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
OwnPtr<IPC::Message> ConnectionBase::wait_for_specific_endpoint_message_impl(u32 endpoint_magic, int message_id)
|
|
{
|
|
for (;;) {
|
|
// Double check we don't already have the event waiting for us.
|
|
// Otherwise we might end up blocked for a while for no reason.
|
|
for (size_t i = 0; i < m_unprocessed_messages.size(); ++i) {
|
|
auto& message = m_unprocessed_messages[i];
|
|
if (message->endpoint_magic() != endpoint_magic)
|
|
continue;
|
|
if (message->message_id() == message_id)
|
|
return m_unprocessed_messages.take(i);
|
|
}
|
|
|
|
if (!is_open())
|
|
break;
|
|
|
|
wait_for_transport_to_become_readable();
|
|
if (drain_messages_from_peer().is_error())
|
|
break;
|
|
}
|
|
|
|
dbgln("Failed to receive message_id: {}", message_id);
|
|
|
|
if (!m_unprocessed_messages.is_empty()) {
|
|
m_transport->close();
|
|
|
|
dbgln("Transport shutdown with unprocessed messages left: {}", m_unprocessed_messages.size());
|
|
for (size_t i = 0; i < m_unprocessed_messages.size(); ++i) {
|
|
auto& message = m_unprocessed_messages[i];
|
|
dbgln(" Message {:03} is: {:2}-{}", i, message->message_id(), message->message_name());
|
|
}
|
|
|
|
dbgln("Handling remaining messages before returning to caller");
|
|
handle_messages();
|
|
dbgln("Messages handled, returning to caller");
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
}
|