2024-10-31 12:44:19 +05:00
/*
* Copyright ( c ) 2023 , Andreas Kling < andreas @ ladybird . org >
2025-01-07 14:04:16 +05:00
* Copyright ( c ) 2024 - 2025 , stasoid < stasoid @ yahoo . com >
2025-07-14 01:16:11 -07:00
* Copyright ( c ) 2025 , ayeteadoe < ayeteadoe @ gmail . com >
2025-09-02 17:14:30 +02:00
* Copyright ( c ) 2025 , Ryszard Goc < ryszardgoc @ gmail . com >
2024-10-31 12:44:19 +05:00
*
* SPDX - License - Identifier : BSD - 2 - Clause
*/
2025-09-02 17:14:30 +02:00
# include <AK/Assertions.h>
# include <AK/Diagnostics.h>
2025-02-10 13:02:51 -07:00
# include <AK/HashMap.h>
2025-09-02 17:14:30 +02:00
# include <AK/NonnullOwnPtr.h>
# include <AK/Windows.h>
2024-10-31 12:44:19 +05:00
# include <LibCore/EventLoopImplementationWindows.h>
# include <LibCore/Notifier.h>
# include <LibCore/ThreadEventQueue.h>
LibCore: Make single-shot timer objects manually reset on Windows
This fixes a really nasty EventLoop bug which I debugged for 2 weeks.
The spin_until([&]{return completed_tasks == total_tasks;}) in
TraversableNavigable::check_if_unloading_is_canceled spins forever.
Cause of the bug:
check_if_unloading_is_canceled is called deferred
check_if_unloading_is_canceled creates a task:
queue_global_task(..., [&] {
...
completed_tasks++;
}));
This task is never executed.
queue_global_task calls TaskQueue::add
void TaskQueue::add(task)
{
m_tasks.append(task);
m_event_loop->schedule();
}
void HTML::EventLoop::schedule()
{
if (!m_system_event_loop_timer)
m_system_event_loop_timer = Timer::create_single_shot(
0, // delay
[&] { process(); });
if (!m_system_event_loop_timer->is_active())
m_system_event_loop_timer->restart();
}
EventLoop::process executes one task from task queue and calls
schedule again if there are more tasks.
So task processing relies on one single-shot zero-delay timer,
m_system_event_loop_timer.
Timers and other notification events are handled by Core::EventLoop
and Core::ThreadEventQueue, these are different from HTML::EventLoop
and HTML::TaskQueue mentioned above.
check_if_unloading_is_canceled is called using deferred_invoke
mechanism, different from m_system_event_loop_timer,
see Navigable::navigate and Core::EventLoop::deferred_invoke.
The core of the problem is that Core::EventLoop::pump is called again
(from spin_until) after timer fired but before its handler is executed.
In ThreadEventQueue::process events are moved into local variable before
executing. The first of those events is check_if_unloading_is_canceled.
One of the rest events is Web::HTML::EventLoop::process sheduled in
EventLoop::schedule using m_system_event_loop_timer.
When check_if_unloading_is_canceled calls queue_global_task its
m_system_event_loop_timer is still active because Timer::timer_event
was not yet called, so the timer is not restarted.
But Timer::timer_event (and hence EventLoop::process) will never execute
because check_if_unloading_is_canceled calls spin_until after
queue_global_task, and EventLoop::process is no longer in
event_queue.m_private->queued_events.
By making a single-shot timer manually-reset we are allowing it to fire
several times. So when spin_until is executed m_system_event_loop_timer
is fired again. Not an ideal solution, but this is the best I could
come up with. This commit makes the behavior match EventLoopImplUnix,
in which single-shot timer can also fire several times.
Adding event_queue.process(); at the start of pump like in EvtLoopImplQt
doesn't fix the problem.
Note: Timer::start calls EventReceiver::start_timer, which calls
EventLoop::register_timer with should_reload always set to true
(single-shot vs periodic are handled in Timer::timer_event instead),
so I use static_cast<Timer&>(object).is_single_shot() instead of
!should_reload.
2025-02-19 22:36:05 +05:00
# include <LibCore/Timer.h>
2025-09-02 17:14:30 +02:00
# include <LibThreading/Mutex.h>
2025-11-22 15:02:26 -08:00
# include <LibThreading/MutexProtected.h>
2024-12-01 14:43:10 +05:00
2025-09-02 17:14:30 +02:00
struct OwnHandle {
2024-10-31 12:44:19 +05:00
HANDLE handle = NULL ;
2025-09-02 17:14:30 +02:00
explicit OwnHandle ( HANDLE h = NULL )
2024-10-31 12:44:19 +05:00
: handle ( h )
{
}
2025-09-02 17:14:30 +02:00
OwnHandle ( OwnHandle & & h )
2024-10-31 12:44:19 +05:00
{
handle = h . handle ;
h . handle = NULL ;
}
2025-09-02 17:14:30 +02:00
// This operation can only be done when handle is NULL
OwnHandle & operator = ( OwnHandle & & other )
2024-10-31 12:44:19 +05:00
{
VERIFY ( ! handle ) ;
2025-09-02 17:14:30 +02:00
if ( this = = & other )
return * this ;
handle = other . handle ;
other . handle = NULL ;
return * this ;
2024-10-31 12:44:19 +05:00
}
2025-09-02 17:14:30 +02:00
~ OwnHandle ( )
2024-10-31 12:44:19 +05:00
{
if ( handle )
CloseHandle ( handle ) ;
}
2025-09-02 17:14:30 +02:00
bool operator = = ( OwnHandle const & h ) const { return handle = = h . handle ; }
2024-10-31 12:44:19 +05:00
bool operator = = ( HANDLE h ) const { return handle = = h ; }
} ;
template < >
2025-09-02 17:14:30 +02:00
struct Traits < OwnHandle > : DefaultTraits < OwnHandle > {
static unsigned hash ( OwnHandle const & h ) { return Traits < HANDLE > : : hash ( h . handle ) ; }
2024-10-31 12:44:19 +05:00
} ;
template < >
2025-09-02 17:14:30 +02:00
constexpr bool IsHashCompatible < HANDLE , OwnHandle > = true ;
2024-10-31 12:44:19 +05:00
namespace Core {
2025-09-02 17:14:30 +02:00
enum class CompletionType : u8 {
Wake ,
Timer ,
Notifer ,
2025-11-22 15:02:26 -08:00
Process ,
2025-09-02 17:14:30 +02:00
} ;
struct CompletionPacket {
CompletionType type ;
} ;
2025-11-17 13:22:10 -08:00
struct EventLoopWake final : CompletionPacket {
OwnHandle wait_packet ;
OwnHandle wait_event ;
} ;
2025-09-02 17:14:30 +02:00
struct EventLoopTimer final : CompletionPacket {
~ EventLoopTimer ( )
{
CancelWaitableTimer ( timer . handle ) ;
}
OwnHandle timer ;
OwnHandle wait_packet ;
bool is_periodic ;
2024-10-31 12:44:19 +05:00
WeakPtr < EventReceiver > owner ;
} ;
2025-09-02 17:14:30 +02:00
struct EventLoopNotifier final : CompletionPacket {
~ EventLoopNotifier ( )
{
}
Notifier * notifier ;
OwnHandle wait_packet ;
OwnHandle wait_event ;
} ;
2025-11-22 15:02:26 -08:00
struct EventLoopProcess final : CompletionPacket {
~ EventLoopProcess ( ) = default ;
OwnHandle process ;
pid_t pid ;
Function < void ( pid_t ) > exit_handler ;
OwnHandle jobobject ;
} ;
2024-10-31 12:44:19 +05:00
struct ThreadData {
2025-01-08 11:41:50 +05:00
static ThreadData * the ( )
2024-10-31 12:44:19 +05:00
{
thread_local OwnPtr < ThreadData > thread_data = make < ThreadData > ( ) ;
2025-01-08 11:41:50 +05:00
if ( thread_data )
return & * thread_data ;
return nullptr ;
2024-10-31 12:44:19 +05:00
}
ThreadData ( )
2025-11-17 13:22:10 -08:00
: wake_data ( make < EventLoopWake > ( ) )
2024-10-31 12:44:19 +05:00
{
2025-11-17 13:22:10 -08:00
wake_data - > type = CompletionType : : Wake ;
wake_data - > wait_event . handle = CreateEvent ( NULL , FALSE , FALSE , NULL ) ;
2025-09-02 17:14:30 +02:00
// Consider a way for different event loops to have a different number of threads
iocp . handle = CreateIoCompletionPort ( INVALID_HANDLE_VALUE , NULL , 0 , 1 ) ;
VERIFY ( iocp . handle ) ;
2025-11-17 13:22:10 -08:00
NTSTATUS status = g_system . NtCreateWaitCompletionPacket ( & wake_data - > wait_packet . handle , GENERIC_READ | GENERIC_WRITE , NULL ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
status = g_system . NtAssociateWaitCompletionPacket ( wake_data - > wait_packet . handle , iocp . handle , wake_data - > wait_event . handle , wake_data . ptr ( ) , NULL , 0 , 0 , NULL ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
}
~ ThreadData ( )
{
NTSTATUS status = g_system . NtCancelWaitCompletionPacket ( wake_data - > wait_packet . handle , TRUE ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
2024-10-31 12:44:19 +05:00
}
2025-09-02 17:14:30 +02:00
OwnHandle iocp ;
// These are only used to register and unregister. The event loop doesn't access these.
HashMap < intptr_t , NonnullOwnPtr < EventLoopTimer > > timers ;
HashMap < Notifier * , NonnullOwnPtr < EventLoopNotifier > > notifiers ;
2024-10-31 12:44:19 +05:00
2025-11-17 13:22:10 -08:00
// The wake completion packet is posted to the thread's event loop to wake it.
NonnullOwnPtr < EventLoopWake > wake_data ;
2024-10-31 12:44:19 +05:00
} ;
2025-11-22 15:02:26 -08:00
static Threading : : MutexProtected < HashMap < pid_t , NonnullOwnPtr < EventLoopProcess > > > s_processes ;
2024-10-31 12:44:19 +05:00
EventLoopImplementationWindows : : EventLoopImplementationWindows ( )
2025-11-17 13:22:10 -08:00
: m_wake_event ( ThreadData : : the ( ) - > wake_data - > wait_event . handle )
2025-09-02 17:14:30 +02:00
{
2026-02-27 08:20:13 -06:00
VERIFY ( m_wake_event ) ;
2025-09-02 17:14:30 +02:00
}
EventLoopImplementationWindows : : ~ EventLoopImplementationWindows ( )
2024-10-31 12:44:19 +05:00
{
}
int EventLoopImplementationWindows : : exec ( )
{
for ( ; ; ) {
if ( m_exit_requested )
return m_exit_code ;
pump ( PumpMode : : WaitForEvents ) ;
}
VERIFY_NOT_REACHED ( ) ;
}
2025-09-02 17:14:30 +02:00
static constexpr bool debug_event_loop = false ;
2025-10-22 14:57:47 -05:00
size_t EventLoopImplementationWindows : : pump ( PumpMode pump_mode )
2024-10-31 12:44:19 +05:00
{
2025-01-08 12:27:40 +05:00
auto & event_queue = ThreadEventQueue : : current ( ) ;
auto * thread_data = ThreadData : : the ( ) ;
2024-10-31 12:44:19 +05:00
2025-09-02 17:14:30 +02:00
// NOTE: The number of entries to dequeue is to be optimized. Ideally we always dequeue all outstanding packets,
// but we don't want to increase the cost of each pump unnecessarily. If more than one entry is never dequeued
// at once, we could switch to using GetQueuedCompletionStatus which directly returns the values.
constexpr ULONG entry_count = 32 ;
OVERLAPPED_ENTRY entries [ entry_count ] ;
ULONG entries_removed = 0 ;
2024-10-31 12:44:19 +05:00
2025-01-08 12:27:40 +05:00
bool has_pending_events = event_queue . has_pending_events ( ) ;
2025-10-22 14:57:47 -05:00
DWORD timeout = 0 ;
if ( ! has_pending_events & & pump_mode = = PumpMode : : WaitForEvents )
timeout = INFINITE ;
2025-09-02 17:14:30 +02:00
BOOL success = GetQueuedCompletionStatusEx ( thread_data - > iocp . handle , entries , entry_count , & entries_removed , timeout , FALSE ) ;
dbgln_if ( debug_event_loop , " Event loop dequed {} events " , entries_removed ) ;
if ( success ) {
for ( ULONG i = 0 ; i < entries_removed ; i + + ) {
auto & entry = entries [ i ] ;
auto * packet = reinterpret_cast < CompletionPacket * > ( entry . lpCompletionKey ) ;
2025-11-17 13:22:10 -08:00
if ( packet - > type = = CompletionType : : Wake ) {
auto * wake_data = static_cast < EventLoopWake * > ( packet ) ;
NTSTATUS status = g_system . NtAssociateWaitCompletionPacket ( wake_data - > wait_packet . handle , thread_data - > iocp . handle , wake_data - > wait_event . handle , wake_data , NULL , 0 , 0 , NULL ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
2025-09-02 17:14:30 +02:00
continue ;
}
if ( packet - > type = = CompletionType : : Timer ) {
auto * timer = static_cast < EventLoopTimer * > ( packet ) ;
if ( auto owner = timer - > owner . strong_ref ( ) )
2025-12-03 11:38:19 +01:00
event_queue . post_event ( owner , Event : : Type : : Timer ) ;
2025-11-17 13:25:03 -08:00
if ( timer - > is_periodic ) {
NTSTATUS status = g_system . NtAssociateWaitCompletionPacket ( timer - > wait_packet . handle , thread_data - > iocp . handle , timer - > timer . handle , timer , NULL , 0 , 0 , NULL ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
}
2025-09-02 17:14:30 +02:00
continue ;
}
if ( packet - > type = = CompletionType : : Notifer ) {
2025-11-17 13:18:53 -08:00
auto * notifier_data = static_cast < EventLoopNotifier * > ( packet ) ;
2025-12-03 11:38:19 +01:00
event_queue . post_event ( notifier_data - > notifier , Core : : Event : : Type : : NotifierActivation ) ;
2025-11-17 13:25:03 -08:00
NTSTATUS status = g_system . NtAssociateWaitCompletionPacket ( notifier_data - > wait_packet . handle , thread_data - > iocp . handle , notifier_data - > wait_event . handle , notifier_data , NULL , 0 , 0 , NULL ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
2025-09-02 17:14:30 +02:00
continue ;
2025-01-08 12:27:40 +05:00
}
2025-11-22 15:02:26 -08:00
if ( packet - > type = = CompletionType : : Process ) {
auto * process_data = static_cast < EventLoopProcess * > ( packet ) ;
pid_t const process_id = process_data - > pid ;
// NOTE: This may seem like the incorrect parameter, but https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port
// states that this field represents the event type indicator
DWORD const event_type = entry . dwNumberOfBytesTransferred ;
if ( reinterpret_cast < intptr_t > ( entry . lpOverlapped ) = = process_id & & ( event_type = = JOB_OBJECT_MSG_EXIT_PROCESS | | event_type = = JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS ) ) {
Optional < NonnullOwnPtr < EventLoopProcess > > owned_process = s_processes . with_locked ( [ & ] ( auto & processes ) {
return processes . take ( process_id ) ;
} ) ;
if ( owned_process . has_value ( ) )
owned_process . release_value ( ) - > exit_handler ( process_id ) ;
}
continue ;
}
2025-09-02 17:14:30 +02:00
VERIFY_NOT_REACHED ( ) ;
}
} else {
DWORD error = GetLastError ( ) ;
switch ( error ) {
case WAIT_TIMEOUT :
break ;
default :
dbgln ( " GetQueuedCompletionStatusEx failed with unexpected error: {} " , Error : : from_windows_error ( error ) ) ;
VERIFY_NOT_REACHED ( ) ;
2024-10-31 12:44:19 +05:00
}
}
2025-01-08 12:27:40 +05:00
return event_queue . process ( ) ;
2024-10-31 12:44:19 +05:00
}
void EventLoopImplementationWindows : : quit ( int code )
{
m_exit_requested = true ;
m_exit_code = code ;
}
void EventLoopImplementationWindows : : wake ( )
{
2025-11-17 13:22:10 -08:00
SetEvent ( m_wake_event ) ;
2024-10-31 12:44:19 +05:00
}
static int notifier_type_to_network_event ( NotificationType type )
{
switch ( type ) {
case NotificationType : : Read :
2025-07-14 01:16:11 -07:00
return FD_READ | FD_CLOSE | FD_ACCEPT ;
2024-10-31 12:44:19 +05:00
case NotificationType : : Write :
return FD_WRITE ;
default :
dbgln ( " This notification type is not implemented: {} " , ( int ) type ) ;
VERIFY_NOT_REACHED ( ) ;
}
}
void EventLoopManagerWindows : : register_notifier ( Notifier & notifier )
{
2025-09-02 17:14:30 +02:00
auto * thread_data = ThreadData : : the ( ) ;
auto & notifiers = thread_data - > notifiers ;
if ( notifiers . contains ( & notifier ) )
return ;
2024-10-31 12:44:19 +05:00
HANDLE event = CreateEvent ( NULL , FALSE , FALSE , NULL ) ;
VERIFY ( event ) ;
2025-01-07 14:04:16 +05:00
int rc = WSAEventSelect ( notifier . fd ( ) , event , notifier_type_to_network_event ( notifier . type ( ) ) ) ;
VERIFY ( ! rc ) ;
2024-10-31 12:44:19 +05:00
2025-09-02 17:14:30 +02:00
auto notifier_data = make < EventLoopNotifier > ( ) ;
notifier_data - > type = CompletionType : : Notifer ;
notifier_data - > notifier = & notifier ;
notifier_data - > wait_event . handle = event ;
2025-11-17 13:25:03 -08:00
NTSTATUS status = g_system . NtCreateWaitCompletionPacket ( & notifier_data - > wait_packet . handle , GENERIC_READ | GENERIC_WRITE , NULL ) ;
2025-09-02 17:14:30 +02:00
VERIFY ( NT_SUCCESS ( status ) ) ;
2025-11-17 13:25:03 -08:00
status = g_system . NtAssociateWaitCompletionPacket ( notifier_data - > wait_packet . handle , thread_data - > iocp . handle , event , notifier_data . ptr ( ) , NULL , 0 , 0 , NULL ) ;
2025-09-02 17:14:30 +02:00
VERIFY ( NT_SUCCESS ( status ) ) ;
notifiers . set ( & notifier , move ( notifier_data ) ) ;
2024-10-31 12:44:19 +05:00
}
void EventLoopManagerWindows : : unregister_notifier ( Notifier & notifier )
{
2025-09-02 17:14:30 +02:00
auto * thread_data = ThreadData : : the ( ) ;
VERIFY ( thread_data ) ;
auto & notifiers = thread_data - > notifiers ;
auto maybe_notifier_data = notifiers . take ( & notifier ) ;
if ( ! maybe_notifier_data . has_value ( ) )
return ;
auto notifier_data = move ( maybe_notifier_data . value ( ) ) ;
// We are removing the signalled packets since the caller no longer expects them
NTSTATUS status = g_system . NtCancelWaitCompletionPacket ( notifier_data - > wait_packet . handle , TRUE ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
// TODO: Reuse the data structure
2024-10-31 12:44:19 +05:00
}
2025-08-11 11:45:39 +02:00
intptr_t EventLoopManagerWindows : : register_timer ( EventReceiver & object , int milliseconds , bool should_reload )
2024-10-31 12:44:19 +05:00
{
VERIFY ( milliseconds > = 0 ) ;
2025-09-02 17:14:30 +02:00
auto * thread_data = ThreadData : : the ( ) ;
VERIFY ( thread_data ) ;
auto & timers = thread_data - > timers ;
2025-11-17 13:19:35 -08:00
// FIXME: This is a temporary fix for issue #3641
bool manual_reset = static_cast < Timer & > ( object ) . is_single_shot ( ) ;
HANDLE timer = CreateWaitableTimer ( NULL , manual_reset , NULL ) ;
VERIFY ( timer ) ;
2025-09-02 17:14:30 +02:00
auto timer_data = make < EventLoopTimer > ( ) ;
timer_data - > type = CompletionType : : Timer ;
2025-11-17 13:19:35 -08:00
timer_data - > timer . handle = timer ;
2025-09-02 17:14:30 +02:00
timer_data - > owner = object . make_weak_ptr ( ) ;
timer_data - > is_periodic = should_reload ;
VERIFY ( timer_data - > timer . handle ) ;
NTSTATUS status = g_system . NtCreateWaitCompletionPacket ( & timer_data - > wait_packet . handle , GENERIC_READ | GENERIC_WRITE , NULL ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
2024-10-31 12:44:19 +05:00
LARGE_INTEGER first_time = { } ;
// Measured in 0.1μs intervals, negative means starting from now
2025-09-02 17:14:30 +02:00
first_time . QuadPart = - 10'000LL * milliseconds ;
BOOL succeeded = SetWaitableTimer ( timer_data - > timer . handle , & first_time , should_reload ? milliseconds : 0 , NULL , NULL , FALSE ) ;
VERIFY ( succeeded ) ;
status = g_system . NtAssociateWaitCompletionPacket ( timer_data - > wait_packet . handle , thread_data - > iocp . handle , timer_data - > timer . handle , timer_data . ptr ( ) , NULL , 0 , 0 , NULL ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
2024-10-31 12:44:19 +05:00
2025-09-02 17:14:30 +02:00
auto timer_id = reinterpret_cast < intptr_t > ( timer_data . ptr ( ) ) ;
VERIFY ( ! timers . get ( timer_id ) . has_value ( ) ) ;
timers . set ( timer_id , move ( timer_data ) ) ;
return timer_id ;
2024-10-31 12:44:19 +05:00
}
void EventLoopManagerWindows : : unregister_timer ( intptr_t timer_id )
{
2025-09-02 17:14:30 +02:00
if ( auto * thread_data = ThreadData : : the ( ) ) {
auto maybe_timer = thread_data - > timers . take ( timer_id ) ;
if ( ! maybe_timer . has_value ( ) )
return ;
auto timer = move ( maybe_timer . value ( ) ) ;
2025-11-17 13:25:03 -08:00
NTSTATUS status = g_system . NtCancelWaitCompletionPacket ( timer - > wait_packet . handle , TRUE ) ;
VERIFY ( NT_SUCCESS ( status ) ) ;
2025-09-02 17:14:30 +02:00
}
2024-10-31 12:44:19 +05:00
}
2024-11-21 22:13:44 +01:00
int EventLoopManagerWindows : : register_signal ( [[maybe_unused]] int signal_number, [[maybe_unused]] Function < void ( int ) > handler )
2024-10-31 12:44:19 +05:00
{
dbgln ( " Core::EventLoopManagerWindows::register_signal() is not implemented " ) ;
VERIFY_NOT_REACHED ( ) ;
}
2024-11-21 22:13:44 +01:00
void EventLoopManagerWindows : : unregister_signal ( [[maybe_unused]] int handler_id )
2024-10-31 12:44:19 +05:00
{
dbgln ( " Core::EventLoopManagerWindows::unregister_signal() is not implemented " ) ;
VERIFY_NOT_REACHED ( ) ;
}
2025-11-22 15:02:26 -08:00
void EventLoopManagerWindows : : register_process ( pid_t pid , ESCAPING Function < void ( pid_t ) > exit_handler )
{
auto * thread_data = ThreadData : : the ( ) ;
VERIFY ( thread_data ) ;
s_processes . with_locked ( [ & ] ( auto & processes ) {
if ( processes . contains ( pid ) )
return ;
HANDLE process_handle = OpenProcess ( PROCESS_ALL_ACCESS , FALSE , pid ) ;
VERIFY ( process_handle ) ;
HANDLE job_object_handle = CreateJobObject ( nullptr , nullptr ) ;
VERIFY ( job_object_handle ) ;
BOOL succeeded = AssignProcessToJobObject ( job_object_handle , process_handle ) ;
VERIFY ( succeeded ) ;
auto process_data = make < EventLoopProcess > ( ) ;
process_data - > type = CompletionType : : Process ;
process_data - > process . handle = process_handle ;
process_data - > pid = pid ;
process_data - > exit_handler = move ( exit_handler ) ;
process_data - > jobobject . handle = job_object_handle ;
JOBOBJECT_ASSOCIATE_COMPLETION_PORT joacp = { . CompletionKey = process_data . ptr ( ) , . CompletionPort = thread_data - > iocp . handle } ;
succeeded = SetInformationJobObject ( job_object_handle , JobObjectAssociateCompletionPortInformation , & joacp , sizeof ( JOBOBJECT_ASSOCIATE_COMPLETION_PORT ) ) ;
VERIFY ( succeeded ) ;
processes . set ( pid , move ( process_data ) ) ;
} ) ;
}
void EventLoopManagerWindows : : unregister_process ( pid_t pid )
{
auto maybe_process = s_processes . with_locked ( [ & ] ( auto & processes ) {
return processes . take ( pid ) ;
} ) ;
if ( ! maybe_process . has_value ( ) )
return ;
auto process_data = maybe_process . release_value ( ) ;
JOBOBJECT_ASSOCIATE_COMPLETION_PORT joacp = { . CompletionKey = process_data , . CompletionPort = nullptr } ;
BOOL succeeded = SetInformationJobObject ( process_data - > jobobject . handle , JobObjectAssociateCompletionPortInformation , & joacp , sizeof ( JOBOBJECT_ASSOCIATE_COMPLETION_PORT ) ) ;
VERIFY ( succeeded ) ;
}
2024-10-31 12:44:19 +05:00
void EventLoopManagerWindows : : did_post_event ( )
{
}
NonnullOwnPtr < EventLoopImplementation > EventLoopManagerWindows : : make_implementation ( )
{
return make < EventLoopImplementationWindows > ( ) ;
}
}