2024-10-22 15:47:33 -06:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2024, Andrew Kaster <andrew@ladybird.org>
|
2025-04-07 04:17:36 +02:00
|
|
|
* Copyright (c) 2025, Aliaksandr Kalenik <kalenik.aliaksandr@gmail.com>
|
2024-10-22 15:47:33 -06:00
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
|
|
|
*/
|
|
|
|
|
|
2026-01-21 21:43:07 +01:00
|
|
|
#include <AK/Checked.h>
|
2024-10-22 15:47:33 -06:00
|
|
|
#include <AK/NonnullOwnPtr.h>
|
2026-03-11 13:05:09 +01:00
|
|
|
#include <AK/ScopeGuard.h>
|
2026-01-21 21:54:20 +01:00
|
|
|
#include <AK/Types.h>
|
2024-10-22 15:47:33 -06:00
|
|
|
#include <LibCore/Socket.h>
|
|
|
|
|
#include <LibCore/System.h>
|
2026-03-13 17:11:22 +01:00
|
|
|
#include <LibIPC/Attachment.h>
|
2026-03-14 17:34:46 +01:00
|
|
|
#include <LibIPC/File.h>
|
2026-01-21 22:07:14 +01:00
|
|
|
#include <LibIPC/Limits.h>
|
2026-03-14 17:34:46 +01:00
|
|
|
#include <LibIPC/TransportHandle.h>
|
2024-10-22 15:47:33 -06:00
|
|
|
#include <LibIPC/TransportSocket.h>
|
2025-09-17 15:45:03 -05:00
|
|
|
#include <LibThreading/Thread.h>
|
2024-10-22 15:47:33 -06:00
|
|
|
|
|
|
|
|
namespace IPC {
|
|
|
|
|
|
2026-03-19 19:42:05 +01:00
|
|
|
ErrorOr<NonnullOwnPtr<TransportSocket>> TransportSocket::from_socket(NonnullOwnPtr<Core::LocalSocket> socket)
|
|
|
|
|
{
|
|
|
|
|
return make<TransportSocket>(move(socket));
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-11 13:05:09 +01:00
|
|
|
ErrorOr<TransportSocket::Paired> TransportSocket::create_paired()
|
|
|
|
|
{
|
|
|
|
|
int fds[2] {};
|
|
|
|
|
TRY(Core::System::socketpair(AF_LOCAL, SOCK_STREAM, 0, fds));
|
|
|
|
|
|
|
|
|
|
ArmedScopeGuard guard_fd_0 { [&] { MUST(Core::System::close(fds[0])); } };
|
|
|
|
|
ArmedScopeGuard guard_fd_1 { [&] { MUST(Core::System::close(fds[1])); } };
|
|
|
|
|
|
|
|
|
|
auto socket0 = TRY(Core::LocalSocket::adopt_fd(fds[0]));
|
|
|
|
|
guard_fd_0.disarm();
|
|
|
|
|
TRY(socket0->set_close_on_exec(true));
|
|
|
|
|
TRY(socket0->set_blocking(false));
|
|
|
|
|
|
2026-03-14 17:34:46 +01:00
|
|
|
TRY(Core::System::set_close_on_exec(fds[1], true));
|
2026-03-11 13:05:09 +01:00
|
|
|
guard_fd_1.disarm();
|
|
|
|
|
|
2026-03-14 17:34:46 +01:00
|
|
|
// Local side gets a full transport; remote side is just a handle containing the raw fd for transfer to another process.
|
2026-03-11 13:05:09 +01:00
|
|
|
return Paired {
|
|
|
|
|
make<TransportSocket>(move(socket0)),
|
2026-03-14 17:34:46 +01:00
|
|
|
TransportHandle { File::adopt_fd(fds[1]) },
|
2026-03-11 13:05:09 +01:00
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-04 21:42:48 +01:00
|
|
|
void SendQueue::enqueue_message(ReadonlyBytes header, ReadonlyBytes payload, Vector<int>&& fds)
|
2025-04-09 20:54:41 +02:00
|
|
|
{
|
|
|
|
|
Threading::MutexLocker locker(m_mutex);
|
2026-03-04 21:42:48 +01:00
|
|
|
VERIFY(MUST(m_stream.write_some(header)) == header.size());
|
|
|
|
|
VERIFY(MUST(m_stream.write_some(payload)) == payload.size());
|
2025-04-09 20:54:41 +02:00
|
|
|
m_fds.append(fds.data(), fds.size());
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-14 03:22:10 +02:00
|
|
|
SendQueue::BytesAndFds SendQueue::peek(size_t max_bytes)
|
2025-04-09 20:54:41 +02:00
|
|
|
{
|
|
|
|
|
Threading::MutexLocker locker(m_mutex);
|
2025-04-14 03:22:10 +02:00
|
|
|
BytesAndFds result;
|
|
|
|
|
auto bytes_to_send = min(max_bytes, m_stream.used_buffer_size());
|
|
|
|
|
result.bytes.resize(bytes_to_send);
|
|
|
|
|
m_stream.peek_some(result.bytes);
|
2025-05-24 01:20:35 -06:00
|
|
|
|
|
|
|
|
if (m_fds.size() > 0) {
|
|
|
|
|
auto fds_to_send = min(m_fds.size(), Core::LocalSocket::MAX_TRANSFER_FDS);
|
|
|
|
|
result.fds = Vector<int> { m_fds.span().slice(0, fds_to_send) };
|
|
|
|
|
// NOTE: This relies on a subsequent call to discard to actually remove the fds from m_fds
|
|
|
|
|
}
|
2025-04-14 03:22:10 +02:00
|
|
|
return result;
|
2025-04-09 20:54:41 +02:00
|
|
|
}
|
|
|
|
|
|
2025-04-14 03:22:10 +02:00
|
|
|
void SendQueue::discard(size_t bytes_count, size_t fds_count)
|
2025-04-09 20:54:41 +02:00
|
|
|
{
|
|
|
|
|
Threading::MutexLocker locker(m_mutex);
|
2025-04-14 03:22:10 +02:00
|
|
|
MUST(m_stream.discard(bytes_count));
|
|
|
|
|
m_fds.remove(0, fds_count);
|
2025-04-09 20:54:41 +02:00
|
|
|
}
|
|
|
|
|
|
2024-10-22 15:47:33 -06:00
|
|
|
TransportSocket::TransportSocket(NonnullOwnPtr<Core::LocalSocket> socket)
|
|
|
|
|
: m_socket(move(socket))
|
|
|
|
|
{
|
2026-03-18 13:32:11 -04:00
|
|
|
// Disable the socket's built-in notifier. TransportSocket uses its own pipe-based notification mechanism on the IO
|
|
|
|
|
// thread, so this notifier is unused. Otherwise, when the socket reaches EOF, this notifier is disabled from the IO
|
|
|
|
|
// thread. In the Qt UI, this causes QSocketNotifier destruction to be deferred. If the socket is closed before the
|
|
|
|
|
// deferred destruction runs, Qt detects an invalid socket and prints a warning.
|
|
|
|
|
m_socket->set_notifications_enabled(false);
|
|
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
(void)Core::System::setsockopt(m_socket->fd().value(), SOL_SOCKET, SO_SNDBUF, &SOCKET_BUFFER_SIZE, sizeof(SOCKET_BUFFER_SIZE));
|
|
|
|
|
(void)Core::System::setsockopt(m_socket->fd().value(), SOL_SOCKET, SO_RCVBUF, &SOCKET_BUFFER_SIZE, sizeof(SOCKET_BUFFER_SIZE));
|
|
|
|
|
|
2025-04-08 04:55:50 +02:00
|
|
|
m_send_queue = adopt_ref(*new SendQueue);
|
2025-04-09 20:54:41 +02:00
|
|
|
|
2026-02-12 23:33:40 -06:00
|
|
|
auto fds = MUST(Core::System::pipe2(O_CLOEXEC | O_NONBLOCK));
|
|
|
|
|
m_wakeup_io_thread_read_fd = adopt_ref(*new AutoCloseFileDescriptor(fds[0]));
|
|
|
|
|
m_wakeup_io_thread_write_fd = adopt_ref(*new AutoCloseFileDescriptor(fds[1]));
|
2025-04-09 20:54:41 +02:00
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
{
|
|
|
|
|
auto fds = MUST(Core::System::pipe2(O_CLOEXEC | O_NONBLOCK));
|
|
|
|
|
m_notify_hook_read_fd = adopt_ref(*new AutoCloseFileDescriptor(fds[0]));
|
|
|
|
|
m_notify_hook_write_fd = adopt_ref(*new AutoCloseFileDescriptor(fds[1]));
|
|
|
|
|
}
|
|
|
|
|
|
2026-01-23 15:14:34 -06:00
|
|
|
m_io_thread = Threading::Thread::construct("IPC IO"sv, [this] { return io_thread_loop(); });
|
2025-10-13 04:58:27 +02:00
|
|
|
m_io_thread->start();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
intptr_t TransportSocket::io_thread_loop()
|
|
|
|
|
{
|
|
|
|
|
Array<struct pollfd, 2> pollfds;
|
|
|
|
|
for (;;) {
|
|
|
|
|
auto want_to_write = [&] {
|
|
|
|
|
auto [bytes, fds] = m_send_queue->peek(1);
|
|
|
|
|
return !bytes.is_empty() || !fds.is_empty();
|
|
|
|
|
}();
|
|
|
|
|
|
|
|
|
|
auto state = m_io_thread_state.load();
|
|
|
|
|
if (state == IOThreadState::Stopped)
|
|
|
|
|
break;
|
|
|
|
|
if (state == IOThreadState::SendPendingMessagesAndStop && !want_to_write) {
|
|
|
|
|
m_io_thread_state = IOThreadState::Stopped;
|
|
|
|
|
break;
|
2025-04-08 04:55:50 +02:00
|
|
|
}
|
2025-05-20 16:21:17 -04:00
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
short events = POLLIN;
|
|
|
|
|
if (want_to_write)
|
|
|
|
|
events |= POLLOUT;
|
|
|
|
|
pollfds[0] = { .fd = m_socket->fd().value(), .events = events, .revents = 0 };
|
|
|
|
|
pollfds[1] = { .fd = m_wakeup_io_thread_read_fd->value(), .events = POLLIN, .revents = 0 };
|
2025-05-20 16:21:17 -04:00
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
ErrorOr<int> result { 0 };
|
|
|
|
|
do {
|
|
|
|
|
result = Core::System::poll(pollfds, -1);
|
|
|
|
|
} while (result.is_error() && result.error().code() == EINTR);
|
|
|
|
|
if (result.is_error()) {
|
|
|
|
|
dbgln("TransportSocket poll error: {}", result.error());
|
2026-01-21 21:24:14 +01:00
|
|
|
m_io_thread_state = IOThreadState::Stopped;
|
|
|
|
|
break;
|
2025-10-13 04:58:27 +02:00
|
|
|
}
|
2025-04-08 04:55:50 +02:00
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
if (pollfds[1].revents & POLLIN) {
|
|
|
|
|
char buf[64];
|
2026-01-13 00:14:46 +01:00
|
|
|
// The wakeup pipe is non-blocking, so EAGAIN is possible if there's a spurious wakeup.
|
|
|
|
|
(void)Core::System::read(m_wakeup_io_thread_read_fd->value(), { buf, sizeof(buf) });
|
2025-10-13 04:58:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (pollfds[0].revents & POLLIN)
|
|
|
|
|
read_incoming_messages();
|
|
|
|
|
|
|
|
|
|
if (pollfds[0].revents & POLLHUP) {
|
|
|
|
|
m_io_thread_state = IOThreadState::Stopped;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (pollfds[0].revents & (POLLERR | POLLNVAL)) {
|
2026-01-21 21:24:14 +01:00
|
|
|
dbgln("TransportSocket poll: socket error (POLLERR or POLLNVAL)");
|
|
|
|
|
m_io_thread_state = IOThreadState::Stopped;
|
|
|
|
|
break;
|
2025-10-13 04:58:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (pollfds[0].revents & POLLOUT) {
|
|
|
|
|
auto [bytes, fds] = m_send_queue->peek(4096);
|
|
|
|
|
if (!bytes.is_empty() || !fds.is_empty()) {
|
|
|
|
|
ReadonlyBytes remaining = bytes;
|
|
|
|
|
if (transfer_data(remaining, fds) == TransferState::SocketClosed) {
|
|
|
|
|
m_io_thread_state = IOThreadState::Stopped;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VERIFY(m_io_thread_state == IOThreadState::Stopped);
|
2026-04-08 15:49:05 +02:00
|
|
|
if (!m_is_being_transferred.load(AK::MemoryOrder::memory_order_acquire)) {
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
m_incoming_cv.broadcast();
|
|
|
|
|
notify_read_available();
|
|
|
|
|
}
|
2025-10-13 04:58:27 +02:00
|
|
|
return 0;
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
void TransportSocket::wake_io_thread()
|
2025-05-20 16:21:17 -04:00
|
|
|
{
|
2025-10-13 04:58:27 +02:00
|
|
|
Array<u8, 1> bytes = { 0 };
|
|
|
|
|
(void)Core::System::write(m_wakeup_io_thread_write_fd->value(), bytes);
|
2025-05-20 16:21:17 -04:00
|
|
|
}
|
|
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
TransportSocket::~TransportSocket()
|
2025-04-08 04:55:50 +02:00
|
|
|
{
|
2025-10-13 04:58:27 +02:00
|
|
|
stop_io_thread(IOThreadState::Stopped);
|
|
|
|
|
m_read_hook_notifier.clear();
|
|
|
|
|
}
|
2025-05-20 16:21:17 -04:00
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
void TransportSocket::stop_io_thread(IOThreadState desired_state)
|
|
|
|
|
{
|
|
|
|
|
VERIFY(desired_state == IOThreadState::Stopped || desired_state == IOThreadState::SendPendingMessagesAndStop);
|
|
|
|
|
m_io_thread_state.store(desired_state, AK::MemoryOrder::memory_order_release);
|
|
|
|
|
wake_io_thread();
|
|
|
|
|
if (m_io_thread && m_io_thread->needs_to_be_joined())
|
|
|
|
|
(void)m_io_thread->join();
|
2025-04-08 04:55:50 +02:00
|
|
|
}
|
2024-10-22 15:47:33 -06:00
|
|
|
|
2026-02-12 23:33:40 -06:00
|
|
|
void TransportSocket::notify_read_available()
|
|
|
|
|
{
|
|
|
|
|
if (!m_notify_hook_write_fd)
|
|
|
|
|
return;
|
|
|
|
|
Array<u8, 1> bytes = { 0 };
|
|
|
|
|
(void)Core::System::write(m_notify_hook_write_fd->value(), bytes);
|
|
|
|
|
}
|
|
|
|
|
|
2024-10-22 15:47:33 -06:00
|
|
|
void TransportSocket::set_up_read_hook(Function<void()> hook)
|
|
|
|
|
{
|
2025-10-13 04:58:27 +02:00
|
|
|
m_on_read_hook = move(hook);
|
|
|
|
|
m_read_hook_notifier = Core::Notifier::construct(m_notify_hook_read_fd->value(), Core::NotificationType::Read);
|
|
|
|
|
m_read_hook_notifier->on_activation = [this] {
|
|
|
|
|
VERIFY(m_notify_hook_read_fd);
|
|
|
|
|
char buf[64];
|
|
|
|
|
(void)Core::System::read(m_notify_hook_read_fd->value(), { buf, sizeof(buf) });
|
|
|
|
|
if (m_on_read_hook)
|
|
|
|
|
m_on_read_hook();
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
Threading::MutexLocker locker(m_incoming_mutex);
|
|
|
|
|
if (!m_incoming_messages.is_empty()) {
|
|
|
|
|
Array<u8, 1> bytes = { 0 };
|
|
|
|
|
MUST(Core::System::write(m_notify_hook_write_fd->value(), bytes));
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool TransportSocket::is_open() const
|
|
|
|
|
{
|
|
|
|
|
return m_socket->is_open();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TransportSocket::close()
|
|
|
|
|
{
|
2025-10-13 04:58:27 +02:00
|
|
|
stop_io_thread(IOThreadState::Stopped);
|
2024-10-22 15:47:33 -06:00
|
|
|
m_socket->close();
|
|
|
|
|
}
|
|
|
|
|
|
2025-05-20 16:21:17 -04:00
|
|
|
void TransportSocket::close_after_sending_all_pending_messages()
|
|
|
|
|
{
|
2025-10-13 04:58:27 +02:00
|
|
|
stop_io_thread(IOThreadState::SendPendingMessagesAndStop);
|
|
|
|
|
m_socket->close();
|
2025-05-20 16:21:17 -04:00
|
|
|
}
|
|
|
|
|
|
2024-10-22 15:47:33 -06:00
|
|
|
void TransportSocket::wait_until_readable()
|
|
|
|
|
{
|
2025-10-13 04:58:27 +02:00
|
|
|
Threading::MutexLocker lock(m_incoming_mutex);
|
|
|
|
|
while (m_incoming_messages.is_empty() && m_io_thread_state == IOThreadState::Running) {
|
|
|
|
|
m_incoming_cv.wait();
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-01-21 22:00:09 +01:00
|
|
|
// Maximum size of accumulated unprocessed bytes before we disconnect the peer
|
|
|
|
|
static constexpr size_t MAX_UNPROCESSED_BUFFER_SIZE = 128 * MiB;
|
|
|
|
|
|
|
|
|
|
// Maximum number of accumulated unprocessed file descriptors before we disconnect the peer
|
|
|
|
|
static constexpr size_t MAX_UNPROCESSED_FDS = 512;
|
|
|
|
|
|
2025-04-07 04:17:36 +02:00
|
|
|
struct MessageHeader {
|
2025-04-07 23:41:24 +02:00
|
|
|
enum class Type : u8 {
|
|
|
|
|
Payload = 0,
|
|
|
|
|
FileDescriptorAcknowledgement = 1,
|
|
|
|
|
};
|
|
|
|
|
Type type { Type::Payload };
|
|
|
|
|
u32 payload_size { 0 };
|
2025-04-07 04:17:36 +02:00
|
|
|
u32 fd_count { 0 };
|
|
|
|
|
};
|
|
|
|
|
|
2026-03-13 17:11:22 +01:00
|
|
|
void TransportSocket::post_message(Vector<u8> const& bytes_to_write, Vector<Attachment>& attachments)
|
2025-04-07 04:17:36 +02:00
|
|
|
{
|
2026-03-13 17:11:22 +01:00
|
|
|
auto num_fds_to_transfer = attachments.size();
|
2025-05-24 01:19:02 -06:00
|
|
|
|
2026-03-04 21:42:48 +01:00
|
|
|
MessageHeader header {
|
|
|
|
|
.type = MessageHeader::Type::Payload,
|
|
|
|
|
.payload_size = static_cast<u32>(bytes_to_write.size()),
|
|
|
|
|
.fd_count = static_cast<u32>(num_fds_to_transfer),
|
|
|
|
|
};
|
2025-04-08 04:55:50 +02:00
|
|
|
|
|
|
|
|
auto raw_fds = Vector<int, 1> {};
|
|
|
|
|
if (num_fds_to_transfer > 0) {
|
|
|
|
|
raw_fds.ensure_capacity(num_fds_to_transfer);
|
2026-03-13 17:11:22 +01:00
|
|
|
Threading::MutexLocker locker(m_fds_retained_until_received_by_peer_mutex);
|
|
|
|
|
for (auto& attachment : attachments) {
|
|
|
|
|
int fd = attachment.to_fd();
|
|
|
|
|
auto auto_fd = adopt_ref(*new AutoCloseFileDescriptor(fd));
|
|
|
|
|
raw_fds.unchecked_append(auto_fd->value());
|
|
|
|
|
m_fds_retained_until_received_by_peer.enqueue(move(auto_fd));
|
2025-04-08 04:55:50 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-04 21:42:48 +01:00
|
|
|
m_send_queue->enqueue_message({ reinterpret_cast<u8 const*>(&header), sizeof(header) }, bytes_to_write, move(raw_fds));
|
2025-10-13 04:58:27 +02:00
|
|
|
wake_io_thread();
|
2025-04-10 10:16:13 +01:00
|
|
|
}
|
|
|
|
|
|
2025-04-09 20:54:41 +02:00
|
|
|
ErrorOr<void> TransportSocket::send_message(Core::LocalSocket& socket, ReadonlyBytes& bytes_to_write, Vector<int>& unowned_fds)
|
2025-04-08 04:55:50 +02:00
|
|
|
{
|
2024-10-22 15:47:33 -06:00
|
|
|
auto num_fds_to_transfer = unowned_fds.size();
|
|
|
|
|
while (!bytes_to_write.is_empty()) {
|
|
|
|
|
ErrorOr<ssize_t> maybe_nwritten = 0;
|
|
|
|
|
if (num_fds_to_transfer > 0) {
|
2025-04-08 04:55:50 +02:00
|
|
|
maybe_nwritten = socket.send_message(bytes_to_write, 0, unowned_fds);
|
2024-10-22 15:47:33 -06:00
|
|
|
} else {
|
2025-04-08 04:55:50 +02:00
|
|
|
maybe_nwritten = socket.write_some(bytes_to_write);
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (maybe_nwritten.is_error()) {
|
2025-04-09 20:54:41 +02:00
|
|
|
if (auto error = maybe_nwritten.release_error(); error.is_errno() && (error.code() == EAGAIN || error.code() == EWOULDBLOCK || error.code() == EINTR)) {
|
|
|
|
|
return {};
|
2024-10-22 15:47:33 -06:00
|
|
|
} else {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bytes_to_write = bytes_to_write.slice(maybe_nwritten.value());
|
2025-04-09 20:54:41 +02:00
|
|
|
num_fds_to_transfer = 0;
|
|
|
|
|
unowned_fds.clear();
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
|
2025-05-20 16:21:17 -04:00
|
|
|
TransportSocket::TransferState TransportSocket::transfer_data(ReadonlyBytes& bytes, Vector<int>& fds)
|
|
|
|
|
{
|
|
|
|
|
auto byte_count = bytes.size();
|
|
|
|
|
auto fd_count = fds.size();
|
|
|
|
|
|
|
|
|
|
if (auto result = send_message(*m_socket, bytes, fds); result.is_error()) {
|
|
|
|
|
if (result.error().is_errno() && result.error().code() == EPIPE) {
|
|
|
|
|
// The socket is closed from the other end, we can stop sending.
|
|
|
|
|
return TransferState::SocketClosed;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dbgln("TransportSocket::send_thread: {}", result.error());
|
2026-01-21 21:24:14 +01:00
|
|
|
return TransferState::SocketClosed;
|
2025-05-20 16:21:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto written_byte_count = byte_count - bytes.size();
|
|
|
|
|
auto written_fd_count = fd_count - fds.size();
|
|
|
|
|
if (written_byte_count > 0 || written_fd_count > 0)
|
|
|
|
|
m_send_queue->discard(written_byte_count, written_fd_count);
|
|
|
|
|
|
|
|
|
|
return TransferState::Continue;
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
void TransportSocket::read_incoming_messages()
|
2024-10-22 15:47:33 -06:00
|
|
|
{
|
2025-10-13 04:58:27 +02:00
|
|
|
Vector<NonnullOwnPtr<Message>> batch;
|
|
|
|
|
while (m_socket->is_open()) {
|
2025-04-07 04:17:36 +02:00
|
|
|
u8 buffer[4096];
|
|
|
|
|
auto received_fds = Vector<int> {};
|
2024-10-22 15:47:33 -06:00
|
|
|
auto maybe_bytes_read = m_socket->receive_message({ buffer, 4096 }, MSG_DONTWAIT, received_fds);
|
|
|
|
|
if (maybe_bytes_read.is_error()) {
|
|
|
|
|
auto error = maybe_bytes_read.release_error();
|
2025-05-10 16:35:59 -04:00
|
|
|
|
|
|
|
|
if (error.is_errno() && error.code() == EAGAIN) {
|
2024-10-22 15:47:33 -06:00
|
|
|
break;
|
|
|
|
|
}
|
2025-05-10 16:35:59 -04:00
|
|
|
if (error.is_errno() && error.code() == ECONNRESET) {
|
2025-10-13 04:58:27 +02:00
|
|
|
m_peer_eof = true;
|
2024-10-22 15:47:33 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dbgln("TransportSocket::read_as_much_as_possible_without_blocking: {}", error);
|
|
|
|
|
warnln("TransportSocket::read_as_much_as_possible_without_blocking: {}", error);
|
2026-01-21 21:24:14 +01:00
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto bytes_read = maybe_bytes_read.release_value();
|
2025-05-24 01:20:35 -06:00
|
|
|
if (bytes_read.is_empty() && received_fds.is_empty()) {
|
2025-10-13 04:58:27 +02:00
|
|
|
m_peer_eof = true;
|
2024-10-22 15:47:33 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2026-01-21 22:00:09 +01:00
|
|
|
if (m_unprocessed_bytes.size() + bytes_read.size() > MAX_UNPROCESSED_BUFFER_SIZE) {
|
|
|
|
|
dbgln("TransportSocket: Unprocessed buffer would exceed {} bytes, disconnecting peer", MAX_UNPROCESSED_BUFFER_SIZE);
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2026-01-21 21:49:34 +01:00
|
|
|
if (m_unprocessed_bytes.try_append(bytes_read.data(), bytes_read.size()).is_error()) {
|
|
|
|
|
dbgln("TransportSocket: Failed to append to unprocessed_bytes buffer");
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2026-03-13 17:11:22 +01:00
|
|
|
if (m_unprocessed_attachments.size() + received_fds.size() > MAX_UNPROCESSED_FDS) {
|
2026-01-21 22:00:09 +01:00
|
|
|
dbgln("TransportSocket: Unprocessed FDs would exceed {}, disconnecting peer", MAX_UNPROCESSED_FDS);
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2025-04-07 04:17:36 +02:00
|
|
|
for (auto const& fd : received_fds) {
|
2026-03-13 17:11:22 +01:00
|
|
|
m_unprocessed_attachments.enqueue(Attachment::from_fd(fd));
|
2025-04-07 04:17:36 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-01-21 21:43:07 +01:00
|
|
|
Checked<u32> received_fd_count = 0;
|
|
|
|
|
Checked<u32> acknowledged_fd_count = 0;
|
2025-04-07 04:17:36 +02:00
|
|
|
size_t index = 0;
|
2025-04-07 18:22:04 +02:00
|
|
|
while (index + sizeof(MessageHeader) <= m_unprocessed_bytes.size()) {
|
2025-04-07 04:17:36 +02:00
|
|
|
MessageHeader header;
|
|
|
|
|
memcpy(&header, m_unprocessed_bytes.data() + index, sizeof(MessageHeader));
|
2025-04-07 23:41:24 +02:00
|
|
|
if (header.type == MessageHeader::Type::Payload) {
|
2026-01-21 21:54:20 +01:00
|
|
|
if (header.payload_size > MAX_MESSAGE_PAYLOAD_SIZE) {
|
|
|
|
|
dbgln("TransportSocket: Rejecting message with payload_size {} exceeding limit {}", header.payload_size, MAX_MESSAGE_PAYLOAD_SIZE);
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if (header.fd_count > MAX_MESSAGE_FD_COUNT) {
|
|
|
|
|
dbgln("TransportSocket: Rejecting message with fd_count {} exceeding limit {}", header.fd_count, MAX_MESSAGE_FD_COUNT);
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2026-01-21 21:51:45 +01:00
|
|
|
Checked<size_t> message_size = header.payload_size;
|
|
|
|
|
message_size += sizeof(MessageHeader);
|
|
|
|
|
if (message_size.has_overflow() || message_size.value() > m_unprocessed_bytes.size() - index)
|
2025-04-07 23:41:24 +02:00
|
|
|
break;
|
2026-03-13 17:11:22 +01:00
|
|
|
if (header.fd_count > m_unprocessed_attachments.size())
|
2025-04-07 23:41:24 +02:00
|
|
|
break;
|
2026-01-25 12:19:53 +01:00
|
|
|
auto message = make<Message>();
|
2025-04-07 23:41:24 +02:00
|
|
|
received_fd_count += header.fd_count;
|
2026-01-21 21:43:07 +01:00
|
|
|
if (received_fd_count.has_overflow()) {
|
|
|
|
|
dbgln("TransportSocket: received_fd_count would overflow");
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2026-01-25 12:19:53 +01:00
|
|
|
for (size_t i = 0; i < header.fd_count; ++i)
|
2026-03-13 17:11:22 +01:00
|
|
|
message->attachments.enqueue(m_unprocessed_attachments.dequeue());
|
2026-01-25 12:19:53 +01:00
|
|
|
if (message->bytes.try_append(m_unprocessed_bytes.data() + index + sizeof(MessageHeader), header.payload_size).is_error()) {
|
|
|
|
|
dbgln("TransportSocket: Failed to allocate message buffer for payload_size {}", header.payload_size);
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
2026-01-21 21:49:34 +01:00
|
|
|
}
|
2026-01-25 12:19:53 +01:00
|
|
|
batch.append(move(message));
|
2025-04-07 23:41:24 +02:00
|
|
|
} else if (header.type == MessageHeader::Type::FileDescriptorAcknowledgement) {
|
2026-01-21 21:24:14 +01:00
|
|
|
if (header.payload_size != 0) {
|
|
|
|
|
dbgln("TransportSocket: FileDescriptorAcknowledgement with non-zero payload_size {}", header.payload_size);
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2025-04-07 23:41:24 +02:00
|
|
|
acknowledged_fd_count += header.fd_count;
|
2026-01-21 21:43:07 +01:00
|
|
|
if (acknowledged_fd_count.has_overflow()) {
|
|
|
|
|
dbgln("TransportSocket: acknowledged_fd_count would overflow");
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2025-04-07 23:41:24 +02:00
|
|
|
} else {
|
2026-01-21 21:24:14 +01:00
|
|
|
dbgln("TransportSocket: Unknown message header type {}", static_cast<u8>(header.type));
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
2025-04-07 23:41:24 +02:00
|
|
|
}
|
2026-01-21 21:51:45 +01:00
|
|
|
Checked<size_t> new_index = index;
|
|
|
|
|
new_index += header.payload_size;
|
|
|
|
|
new_index += sizeof(MessageHeader);
|
|
|
|
|
if (new_index.has_overflow()) {
|
|
|
|
|
dbgln("TransportSocket: index would overflow");
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
index = new_index.value();
|
2025-04-07 23:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
2026-01-21 21:43:07 +01:00
|
|
|
if (acknowledged_fd_count > 0u) {
|
2025-10-13 04:58:27 +02:00
|
|
|
Threading::MutexLocker locker(m_fds_retained_until_received_by_peer_mutex);
|
2026-01-21 21:43:07 +01:00
|
|
|
while (acknowledged_fd_count > 0u) {
|
|
|
|
|
if (m_fds_retained_until_received_by_peer.is_empty()) {
|
|
|
|
|
dbgln("TransportSocket: Peer acknowledged more FDs than we sent");
|
|
|
|
|
m_peer_eof = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2025-04-08 22:01:46 +02:00
|
|
|
(void)m_fds_retained_until_received_by_peer.dequeue();
|
2025-04-08 04:55:50 +02:00
|
|
|
--acknowledged_fd_count;
|
|
|
|
|
}
|
2025-04-07 23:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
2026-01-21 21:43:07 +01:00
|
|
|
if (received_fd_count > 0u) {
|
2026-03-04 21:42:48 +01:00
|
|
|
MessageHeader header {
|
|
|
|
|
.type = MessageHeader::Type::FileDescriptorAcknowledgement,
|
|
|
|
|
.payload_size = 0,
|
|
|
|
|
.fd_count = received_fd_count.value(),
|
|
|
|
|
};
|
|
|
|
|
m_send_queue->enqueue_message({ reinterpret_cast<u8 const*>(&header), sizeof(header) }, {}, {});
|
2025-10-13 04:58:27 +02:00
|
|
|
wake_io_thread();
|
2025-04-07 04:17:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (index < m_unprocessed_bytes.size()) {
|
2026-03-04 22:06:27 +01:00
|
|
|
auto remaining = m_unprocessed_bytes.size() - index;
|
|
|
|
|
m_unprocessed_bytes.overwrite(0, m_unprocessed_bytes.data() + index, remaining);
|
|
|
|
|
m_unprocessed_bytes.resize(remaining);
|
2025-04-07 04:17:36 +02:00
|
|
|
} else {
|
|
|
|
|
m_unprocessed_bytes.clear();
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
|
2025-10-13 04:58:27 +02:00
|
|
|
if (!batch.is_empty()) {
|
|
|
|
|
Threading::MutexLocker locker(m_incoming_mutex);
|
|
|
|
|
m_incoming_messages.extend(move(batch));
|
|
|
|
|
m_incoming_cv.broadcast();
|
|
|
|
|
notify_read_available();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (m_peer_eof) {
|
|
|
|
|
m_incoming_cv.broadcast();
|
|
|
|
|
notify_read_available();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TransportSocket::ShouldShutdown TransportSocket::read_as_many_messages_as_possible_without_blocking(Function<void(Message&&)>&& callback)
|
|
|
|
|
{
|
|
|
|
|
Vector<NonnullOwnPtr<Message>> messages;
|
|
|
|
|
{
|
|
|
|
|
Threading::MutexLocker locker(m_incoming_mutex);
|
|
|
|
|
messages = move(m_incoming_messages);
|
|
|
|
|
}
|
|
|
|
|
for (auto& message : messages)
|
|
|
|
|
callback(move(*message));
|
|
|
|
|
return m_peer_eof ? ShouldShutdown::Yes : ShouldShutdown::No;
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
|
2026-03-14 17:34:46 +01:00
|
|
|
ErrorOr<TransportHandle> TransportSocket::release_for_transfer()
|
2024-10-22 15:47:33 -06:00
|
|
|
{
|
2026-04-08 15:49:05 +02:00
|
|
|
m_is_being_transferred.store(true, AK::MemoryOrder::memory_order_release);
|
2025-10-13 04:58:27 +02:00
|
|
|
stop_io_thread(IOThreadState::SendPendingMessagesAndStop);
|
2026-03-14 17:34:46 +01:00
|
|
|
auto fd = TRY(m_socket->release_fd());
|
|
|
|
|
return TransportHandle { File::adopt_fd(fd) };
|
2024-10-22 15:47:33 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|