2020-01-18 09:38:21 +01:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
2022-01-02 14:52:38 +01:00
* Copyright ( c ) 2022 , kleines Filmröllchen < malu . bertsch @ gmail . com >
2022-02-26 09:09:45 -07:00
* Copyright ( c ) 2022 , the SerenityOS developers .
2020-01-18 09:38:21 +01:00
*
2021-04-22 01:24:48 -07:00
* SPDX - License - Identifier : BSD - 2 - Clause
2020-01-18 09:38:21 +01:00
*/
2022-01-21 13:23:57 +01:00
# include <AK/Assertions.h>
2020-02-14 22:29:06 +01:00
# include <AK/Badge.h>
2021-01-24 15:28:26 +01:00
# include <AK/Debug.h>
2021-04-14 10:29:33 +02:00
# include <AK/Format.h>
2020-01-05 12:28:42 +13:00
# include <AK/IDAllocator.h>
2019-08-17 11:35:09 +02:00
# include <AK/JsonObject.h>
# include <AK/JsonValue.h>
2020-03-10 10:31:37 +00:00
# include <AK/NeverDestroyed.h>
2021-01-08 12:09:39 -07:00
# include <AK/Singleton.h>
2020-07-06 15:48:02 -06:00
# include <AK/TemporaryChange.h>
2019-05-28 11:53:16 +02:00
# include <AK/Time.h>
2020-02-06 15:04:03 +01:00
# include <LibCore/Event.h>
# include <LibCore/EventLoop.h>
2020-02-14 22:29:06 +01:00
# include <LibCore/LocalServer.h>
2020-02-06 15:04:03 +01:00
# include <LibCore/Notifier.h>
# include <LibCore/Object.h>
2022-12-29 13:20:44 +01:00
# include <LibCore/Promise.h>
2022-09-06 00:04:06 -06:00
# include <LibCore/SessionManagement.h>
2023-02-08 23:05:44 +01:00
# include <LibCore/Socket.h>
2021-07-09 11:14:57 +02:00
# include <LibThreading/Mutex.h>
2022-01-02 14:52:38 +01:00
# include <LibThreading/MutexProtected.h>
2019-06-22 21:21:57 +02:00
# include <errno.h>
# include <fcntl.h>
2020-07-06 15:48:02 -06:00
# include <signal.h>
2019-06-22 21:21:57 +02:00
# include <stdio.h>
# include <string.h>
# include <sys/select.h>
# include <sys/socket.h>
# include <sys/time.h>
2022-12-29 13:20:44 +01:00
# include <sys/types.h>
2019-06-22 21:21:57 +02:00
# include <time.h>
# include <unistd.h>
2019-04-10 17:30:34 +02:00
2022-10-09 15:23:23 -06:00
# ifdef AK_OS_SERENITY
2022-09-22 07:46:01 -06:00
# include <LibCore / Account.h>
2022-01-23 14:47:10 +01:00
extern bool s_global_initializers_ran ;
# endif
2020-02-02 12:34:39 +01:00
namespace Core {
2021-05-13 22:42:11 +02:00
class InspectorServerConnection ;
[[maybe_unused]] static bool connect_to_inspector_server ( ) ;
2020-01-03 20:27:48 +01:00
2020-02-15 02:09:00 +01:00
struct EventLoopTimer {
int timer_id { 0 } ;
2021-08-14 16:50:16 -07:00
Time interval ;
Time fire_time ;
2020-02-15 02:09:00 +01:00
bool should_reload { false } ;
TimerShouldFireWhenNotVisible fire_when_not_visible { TimerShouldFireWhenNotVisible : : No } ;
WeakPtr < Object > owner ;
2022-04-01 20:58:27 +03:00
void reload ( Time const & now ) ;
bool has_expired ( Time const & now ) const ;
2020-02-15 02:09:00 +01:00
} ;
struct EventLoop : : Private {
2021-07-09 11:14:57 +02:00
Threading : : Mutex lock ;
2020-02-15 02:09:00 +01:00
} ;
2022-01-02 14:52:38 +01:00
static Threading : : MutexProtected < NeverDestroyed < IDAllocator > > s_id_allocator ;
static Threading : : MutexProtected < RefPtr < InspectorServerConnection > > s_inspector_server_connection ;
2022-01-07 00:45:27 +01:00
2022-01-02 14:52:38 +01:00
// Each thread has its own event loop stack, its own timers, notifiers and a wake pipe.
static thread_local Vector < EventLoop & > * s_event_loop_stack ;
static thread_local HashMap < int , NonnullOwnPtr < EventLoopTimer > > * s_timers ;
static thread_local HashTable < Notifier * > * s_notifiers ;
2022-12-30 15:17:52 +01:00
// The wake pipe is both responsible for notifying us when someone calls wake(), as well as POSIX signals.
// While wake() pushes zero into the pipe, signal numbers (by defintion nonzero, see signal_numbers.h) are pushed into the pipe verbatim.
2022-01-02 14:52:38 +01:00
thread_local int EventLoop : : s_wake_pipe_fds [ 2 ] ;
2022-01-02 15:14:25 +01:00
thread_local bool EventLoop : : s_wake_pipe_initialized { false } ;
2022-12-29 13:20:44 +01:00
thread_local bool s_warned_promise_count { false } ;
2022-01-02 15:14:25 +01:00
void EventLoop : : initialize_wake_pipes ( )
{
if ( ! s_wake_pipe_initialized ) {
# if defined(SOCK_NONBLOCK)
int rc = pipe2 ( s_wake_pipe_fds , O_CLOEXEC ) ;
# else
int rc = pipe ( s_wake_pipe_fds ) ;
fcntl ( s_wake_pipe_fds [ 0 ] , F_SETFD , FD_CLOEXEC ) ;
fcntl ( s_wake_pipe_fds [ 1 ] , F_SETFD , FD_CLOEXEC ) ;
# endif
VERIFY ( rc = = 0 ) ;
s_wake_pipe_initialized = true ;
}
}
2019-04-10 17:30:34 +02:00
2021-08-25 19:59:45 +02:00
bool EventLoop : : has_been_instantiated ( )
{
2022-01-02 14:52:38 +01:00
return s_event_loop_stack ! = nullptr & & ! s_event_loop_stack - > is_empty ( ) ;
2021-08-25 19:59:45 +02:00
}
2021-01-08 12:09:39 -07:00
class SignalHandlers : public RefCounted < SignalHandlers > {
AK_MAKE_NONCOPYABLE ( SignalHandlers ) ;
AK_MAKE_NONMOVABLE ( SignalHandlers ) ;
public :
SignalHandlers ( int signo , void ( * handle_signal ) ( int ) ) ;
~ SignalHandlers ( ) ;
void dispatch ( ) ;
int add ( Function < void ( int ) > & & handler ) ;
bool remove ( int handler_id ) ;
bool is_empty ( ) const
{
if ( m_calling_handlers ) {
for ( auto & handler : m_handlers_pending ) {
if ( handler . value )
return false ; // an add is pending
}
}
return m_handlers . is_empty ( ) ;
}
bool have ( int handler_id ) const
{
if ( m_calling_handlers ) {
auto it = m_handlers_pending . find ( handler_id ) ;
if ( it ! = m_handlers_pending . end ( ) ) {
if ( ! it - > value )
return false ; // a deletion is pending
}
}
return m_handlers . contains ( handler_id ) ;
}
int m_signo ;
void ( * m_original_handler ) ( int ) ; // TODO: can't use sighandler_t?
HashMap < int , Function < void ( int ) > > m_handlers ;
HashMap < int , Function < void ( int ) > > m_handlers_pending ;
bool m_calling_handlers { false } ;
} ;
struct SignalHandlersInfo {
HashMap < int , NonnullRefPtr < SignalHandlers > > signal_handlers ;
int next_signal_id { 0 } ;
} ;
2021-08-07 21:34:11 +02:00
static Singleton < SignalHandlersInfo > s_signals ;
2021-01-08 12:09:39 -07:00
template < bool create_if_null = true >
inline SignalHandlersInfo * signals_info ( )
{
2021-06-24 10:28:36 +02:00
return s_signals . ptr ( ) ;
2021-01-08 12:09:39 -07:00
}
pid_t EventLoop : : s_pid ;
2021-05-13 22:42:11 +02:00
class InspectorServerConnection : public Object {
C_OBJECT ( InspectorServerConnection )
2021-10-31 23:38:04 +01:00
private :
2023-02-08 23:05:44 +01:00
explicit InspectorServerConnection ( NonnullOwnPtr < LocalSocket > socket )
2019-09-21 10:28:02 +02:00
: m_socket ( move ( socket ) )
2022-01-02 14:52:38 +01:00
, m_client_id ( s_id_allocator . with_locked ( [ ] ( auto & allocator ) {
return allocator - > allocate ( ) ;
} ) )
2019-09-11 21:19:23 +02:00
{
2022-10-09 15:23:23 -06:00
# ifdef AK_OS_SERENITY
2019-09-21 10:28:02 +02:00
m_socket - > on_ready_to_read = [ this ] {
2020-02-20 12:54:15 +01:00
u32 length ;
2023-02-24 22:38:01 +01:00
auto maybe_bytes_read = m_socket - > read_some ( { ( u8 * ) & length , sizeof ( length ) } ) ;
2022-04-15 13:33:02 +01:00
if ( maybe_bytes_read . is_error ( ) ) {
dbgln ( " InspectorServerConnection: Failed to read message length from inspector server connection: {} " , maybe_bytes_read . error ( ) ) ;
2022-01-15 11:57:58 +00:00
shutdown ( ) ;
return ;
}
2022-04-15 13:33:02 +01:00
auto bytes_read = maybe_bytes_read . release_value ( ) ;
if ( bytes_read . is_empty ( ) ) {
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " RPC client disconnected " ) ;
2019-09-22 00:17:53 +02:00
shutdown ( ) ;
2019-09-11 21:19:23 +02:00
return ;
}
2022-01-15 11:57:58 +00:00
2022-04-15 13:33:02 +01:00
VERIFY ( bytes_read . size ( ) = = sizeof ( length ) ) ;
2019-09-11 21:19:23 +02:00
2022-01-15 11:57:58 +00:00
auto request_buffer = ByteBuffer : : create_uninitialized ( length ) . release_value ( ) ;
2023-02-24 22:38:01 +01:00
maybe_bytes_read = m_socket - > read_some ( request_buffer . bytes ( ) ) ;
2022-04-15 13:33:02 +01:00
if ( maybe_bytes_read . is_error ( ) ) {
dbgln ( " InspectorServerConnection: Failed to read message content from inspector server connection: {} " , maybe_bytes_read . error ( ) ) ;
2022-01-15 11:57:58 +00:00
shutdown ( ) ;
return ;
}
2022-04-15 13:33:02 +01:00
bytes_read = maybe_bytes_read . release_value ( ) ;
2022-01-15 11:57:58 +00:00
auto request_json = JsonValue : : from_string ( request_buffer ) ;
2021-11-15 01:46:51 +01:00
if ( request_json . is_error ( ) | | ! request_json . value ( ) . is_object ( ) ) {
2020-10-15 13:21:23 +02:00
dbgln ( " RPC client sent invalid request " ) ;
2019-09-22 00:17:53 +02:00
shutdown ( ) ;
2019-09-11 21:19:23 +02:00
return ;
}
2020-06-10 21:40:27 -07:00
handle_request ( request_json . value ( ) . as_object ( ) ) ;
2019-09-11 21:19:23 +02:00
} ;
2021-01-12 17:38:52 +03:30
# else
warnln ( " RPC Client constructed outside serenity, this is very likely a bug! " ) ;
# endif
2019-09-11 21:19:23 +02:00
}
2021-05-13 22:42:11 +02:00
virtual ~ InspectorServerConnection ( ) override
2019-09-11 21:19:23 +02:00
{
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
if ( auto inspected_object = m_inspected_object . strong_ref ( ) )
inspected_object - > decrement_inspector_count ( { } ) ;
2019-09-11 21:19:23 +02:00
}
2021-10-31 23:38:04 +01:00
public :
2022-04-01 20:58:27 +03:00
void send_response ( JsonObject const & response )
2019-09-11 21:19:23 +02:00
{
2022-12-06 01:12:49 +00:00
auto serialized = response . to_deprecated_string ( ) ;
2022-07-03 14:21:43 +02:00
auto bytes_to_send = serialized . bytes ( ) ;
u32 length = bytes_to_send . size ( ) ;
2022-01-15 11:57:58 +00:00
// FIXME: Propagate errors
2023-02-24 22:38:01 +01:00
// FIXME: This should write the entire span.
auto sent = MUST ( m_socket - > write_some ( { ( u8 const * ) & length , sizeof ( length ) } ) ) ;
2022-07-03 14:21:43 +02:00
VERIFY ( sent = = sizeof ( length ) ) ;
while ( ! bytes_to_send . is_empty ( ) ) {
2023-02-24 22:38:01 +01:00
size_t bytes_sent = MUST ( m_socket - > write_some ( bytes_to_send ) ) ;
2022-07-03 14:21:43 +02:00
bytes_to_send = bytes_to_send . slice ( bytes_sent ) ;
}
2019-09-11 21:19:23 +02:00
}
2022-04-01 20:58:27 +03:00
void handle_request ( JsonObject const & request )
2019-09-11 21:19:23 +02:00
{
2022-12-21 16:51:33 +00:00
auto type = request . get_deprecated_string ( " type " sv ) ;
2019-09-11 21:19:23 +02:00
2022-12-21 16:51:33 +00:00
if ( ! type . has_value ( ) ) {
2020-10-15 13:21:23 +02:00
dbgln ( " RPC client sent request without type field " ) ;
2019-09-11 21:19:23 +02:00
return ;
}
if ( type = = " Identify " ) {
JsonObject response ;
2022-12-21 16:51:33 +00:00
response . set ( " type " , type . value ( ) ) ;
2019-09-11 21:19:23 +02:00
response . set ( " pid " , getpid ( ) ) ;
2022-10-09 15:23:23 -06:00
# ifdef AK_OS_SERENITY
2019-09-11 21:19:23 +02:00
char buffer [ 1024 ] ;
if ( get_process_name ( buffer , sizeof ( buffer ) ) > = 0 ) {
response . set ( " process_name " , buffer ) ;
} else {
response . set ( " process_name " , JsonValue ( ) ) ;
}
# endif
send_response ( response ) ;
return ;
}
if ( type = = " GetAllObjects " ) {
JsonObject response ;
2022-12-21 16:51:33 +00:00
response . set ( " type " , type . value ( ) ) ;
2019-09-11 21:19:23 +02:00
JsonArray objects ;
2020-02-02 12:34:39 +01:00
for ( auto & object : Object : : all_objects ( ) ) {
2019-09-11 21:19:23 +02:00
JsonObject json_object ;
object . save_to ( json_object ) ;
objects . append ( move ( json_object ) ) ;
}
response . set ( " objects " , move ( objects ) ) ;
send_response ( response ) ;
return ;
}
2020-03-05 14:40:47 +01:00
if ( type = = " SetInspectedObject " ) {
2022-12-21 16:51:33 +00:00
auto address = request . get_addr ( " address " sv ) ;
2020-03-05 14:40:47 +01:00
for ( auto & object : Object : : all_objects ( ) ) {
2020-03-08 10:36:51 +01:00
if ( ( FlatPtr ) & object = = address ) {
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
if ( auto inspected_object = m_inspected_object . strong_ref ( ) )
inspected_object - > decrement_inspector_count ( { } ) ;
m_inspected_object = object ;
object . increment_inspector_count ( { } ) ;
2020-03-05 15:46:00 +01:00
break ;
2020-03-05 14:40:47 +01:00
}
}
2020-03-05 15:46:00 +01:00
return ;
}
if ( type = = " SetProperty " ) {
2022-12-21 16:51:33 +00:00
auto address = request . get_addr ( " address " sv ) ;
2020-03-05 15:46:00 +01:00
for ( auto & object : Object : : all_objects ( ) ) {
2020-03-08 10:36:51 +01:00
if ( ( FlatPtr ) & object = = address ) {
2022-12-21 16:51:33 +00:00
bool success = object . set_property ( request . get_deprecated_string ( " name " sv ) . value ( ) , request . get ( " value " sv ) . value ( ) ) ;
2020-03-05 15:46:00 +01:00
JsonObject response ;
response . set ( " type " , " SetProperty " ) ;
response . set ( " success " , success ) ;
send_response ( response ) ;
break ;
}
}
return ;
2020-03-05 14:40:47 +01:00
}
2019-09-11 21:19:23 +02:00
if ( type = = " Disconnect " ) {
2019-09-22 00:17:53 +02:00
shutdown ( ) ;
2019-09-11 21:19:23 +02:00
return ;
}
}
2019-09-22 00:17:53 +02:00
void shutdown ( )
{
2022-01-02 14:52:38 +01:00
s_id_allocator . with_locked ( [ this ] ( auto & allocator ) { allocator - > deallocate ( m_client_id ) ; } ) ;
2019-09-22 00:17:53 +02:00
}
2019-09-11 21:19:23 +02:00
private :
2023-02-08 23:05:44 +01:00
NonnullOwnPtr < LocalSocket > m_socket ;
2020-03-05 14:40:47 +01:00
WeakPtr < Object > m_inspected_object ;
2020-01-03 20:27:48 +01:00
int m_client_id { - 1 } ;
2019-09-11 21:19:23 +02:00
} ;
2021-05-13 22:42:11 +02:00
EventLoop : : EventLoop ( [[maybe_unused]] MakeInspectable make_inspectable )
2022-02-11 16:57:10 +01:00
: m_wake_pipe_fds ( & s_wake_pipe_fds )
, m_private ( make < Private > ( ) )
2019-04-10 17:30:34 +02:00
{
2022-10-09 15:23:23 -06:00
# ifdef AK_OS_SERENITY
2022-01-23 14:47:10 +01:00
if ( ! s_global_initializers_ran ) {
// NOTE: Trying to have an event loop as a global variable will lead to initialization-order fiascos,
// as the event loop constructor accesses and/or sets other global variables.
// Therefore, we crash the program before ASAN catches us.
// If you came here because of the assertion failure, please redesign your program to not have global event loops.
// The common practice is to initialize the main event loop in the main function, and if necessary,
// pass event loop references around or access them with EventLoop::with_main_locked() and EventLoop::current().
VERIFY_NOT_REACHED ( ) ;
}
# endif
2019-04-10 17:30:34 +02:00
if ( ! s_event_loop_stack ) {
2021-06-08 19:36:27 +04:30
s_event_loop_stack = new Vector < EventLoop & > ;
2020-02-15 02:09:00 +01:00
s_timers = new HashMap < int , NonnullOwnPtr < EventLoopTimer > > ;
2020-02-02 12:34:39 +01:00
s_notifiers = new HashTable < Notifier * > ;
2019-04-10 17:30:34 +02:00
}
2022-04-24 01:48:11 +02:00
if ( s_event_loop_stack - > is_empty ( ) ) {
s_pid = getpid ( ) ;
s_event_loop_stack - > append ( * this ) ;
2019-08-17 11:35:09 +02:00
2022-10-09 15:23:23 -06:00
# ifdef AK_OS_SERENITY
2022-04-24 01:48:11 +02:00
if ( getuid ( ) ! = 0 ) {
if ( getenv ( " MAKE_INSPECTABLE " ) = = " 1 " sv )
make_inspectable = Core : : EventLoop : : MakeInspectable : : Yes ;
if ( make_inspectable = = MakeInspectable : : Yes
& & ! s_inspector_server_connection . with_locked ( [ ] ( auto inspector_server_connection ) { return inspector_server_connection ; } ) ) {
if ( ! connect_to_inspector_server ( ) )
dbgln ( " Core::EventLoop: Failed to connect to InspectorServer " ) ;
2022-01-02 14:52:38 +01:00
}
}
2022-04-24 01:48:11 +02:00
# endif
}
2019-04-10 17:30:34 +02:00
2022-01-02 15:14:25 +01:00
initialize_wake_pipes ( ) ;
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " {} Core::EventLoop constructed :) " , getpid ( ) ) ;
2019-04-10 17:30:34 +02:00
}
2020-02-02 12:34:39 +01:00
EventLoop : : ~ EventLoop ( )
2019-04-10 17:30:34 +02:00
{
2022-04-24 01:48:11 +02:00
if ( ! s_event_loop_stack - > is_empty ( ) & & & s_event_loop_stack - > last ( ) = = this )
s_event_loop_stack - > take_last ( ) ;
2019-04-10 17:30:34 +02:00
}
2021-05-13 22:42:11 +02:00
bool connect_to_inspector_server ( )
2020-05-28 18:19:49 +03:00
{
2022-10-09 15:23:23 -06:00
# ifdef AK_OS_SERENITY
2022-09-06 00:04:06 -06:00
auto maybe_path = SessionManagement : : parse_path_with_sid ( " /tmp/session/%sid/portal/inspectables " sv ) ;
if ( maybe_path . is_error ( ) ) {
dbgln ( " connect_to_inspector_server: {} " , maybe_path . error ( ) ) ;
return false ;
}
auto inspector_server_path = maybe_path . value ( ) ;
2023-02-08 23:05:44 +01:00
auto maybe_socket = LocalSocket : : connect ( inspector_server_path , Socket : : PreventSIGPIPE : : Yes ) ;
2022-01-15 11:57:58 +00:00
if ( maybe_socket . is_error ( ) ) {
dbgln ( " connect_to_inspector_server: Failed to connect: {} " , maybe_socket . error ( ) ) ;
2021-05-13 22:42:11 +02:00
return false ;
2022-01-15 11:57:58 +00:00
}
2022-01-02 14:52:38 +01:00
s_inspector_server_connection . with_locked ( [ & ] ( auto & inspector_server_connection ) {
2022-01-15 11:57:58 +00:00
inspector_server_connection = InspectorServerConnection : : construct ( maybe_socket . release_value ( ) ) ;
2022-01-02 14:52:38 +01:00
} ) ;
2021-05-13 22:42:11 +02:00
return true ;
2021-01-12 17:38:52 +03:30
# else
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2021-01-12 17:38:52 +03:30
# endif
2020-05-28 18:19:49 +03:00
}
2022-03-10 14:48:15 +05:00
# define VERIFY_EVENT_LOOP_INITIALIZED() \
do { \
if ( ! s_event_loop_stack ) { \
warnln ( " EventLoop static API was called without prior EventLoop init! " ) ; \
VERIFY_NOT_REACHED ( ) ; \
} \
} while ( 0 )
2020-02-02 12:34:39 +01:00
EventLoop & EventLoop : : current ( )
2019-04-10 17:30:34 +02:00
{
2022-03-10 14:48:15 +05:00
VERIFY_EVENT_LOOP_INITIALIZED ( ) ;
2021-06-08 19:36:27 +04:30
return s_event_loop_stack - > last ( ) ;
2019-04-10 17:30:34 +02:00
}
2020-02-02 12:34:39 +01:00
void EventLoop : : quit ( int code )
2019-04-10 17:30:34 +02:00
{
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop::quit({}) " , code ) ;
2019-04-10 17:30:34 +02:00
m_exit_requested = true ;
m_exit_code = code ;
}
2020-02-02 12:34:39 +01:00
void EventLoop : : unquit ( )
2019-10-25 00:18:31 -05:00
{
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop::unquit() " ) ;
2019-10-25 00:18:31 -05:00
m_exit_requested = false ;
m_exit_code = 0 ;
}
2020-02-02 12:34:39 +01:00
struct EventLoopPusher {
2019-04-10 17:30:34 +02:00
public :
2020-02-02 12:34:39 +01:00
EventLoopPusher ( EventLoop & event_loop )
2019-05-28 11:53:16 +02:00
: m_event_loop ( event_loop )
2019-04-10 17:30:34 +02:00
{
2022-02-08 23:25:18 +01:00
if ( EventLoop : : has_been_instantiated ( ) ) {
2020-02-02 12:34:39 +01:00
m_event_loop . take_pending_events_from ( EventLoop : : current ( ) ) ;
2021-06-08 19:36:27 +04:30
s_event_loop_stack - > append ( event_loop ) ;
2019-04-10 17:30:34 +02:00
}
}
2020-02-02 12:34:39 +01:00
~ EventLoopPusher ( )
2019-04-10 17:30:34 +02:00
{
2022-02-08 23:25:18 +01:00
if ( EventLoop : : has_been_instantiated ( ) ) {
2019-04-10 17:30:34 +02:00
s_event_loop_stack - > take_last ( ) ;
2022-12-29 13:20:44 +01:00
for ( auto & job : m_event_loop . m_pending_promises ) {
// When this event loop was not running below another event loop, the jobs may very well have finished in the meantime.
if ( ! job - > is_resolved ( ) )
job - > cancel ( Error : : from_string_view ( " EventLoop is exiting " sv ) ) ;
}
2020-02-02 12:34:39 +01:00
EventLoop : : current ( ) . take_pending_events_from ( m_event_loop ) ;
2019-04-10 17:30:34 +02:00
}
}
2019-05-28 11:53:16 +02:00
2019-04-10 17:30:34 +02:00
private :
2020-02-02 12:34:39 +01:00
EventLoop & m_event_loop ;
2019-04-10 17:30:34 +02:00
} ;
2020-02-02 12:34:39 +01:00
int EventLoop : : exec ( )
2019-04-10 17:30:34 +02:00
{
2020-02-02 12:34:39 +01:00
EventLoopPusher pusher ( * this ) ;
2019-04-10 17:30:34 +02:00
for ( ; ; ) {
if ( m_exit_requested )
return m_exit_code ;
2019-05-18 13:39:21 +02:00
pump ( ) ;
}
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2019-05-18 13:39:21 +02:00
}
2019-04-29 15:57:49 +02:00
2021-09-25 19:32:14 +02:00
void EventLoop : : spin_until ( Function < bool ( ) > goal_condition )
{
EventLoopPusher pusher ( * this ) ;
while ( ! goal_condition ( ) )
pump ( ) ;
}
2022-01-06 00:55:48 +01:00
size_t EventLoop : : pump ( WaitMode mode )
2019-05-18 13:39:21 +02:00
{
2020-05-16 22:02:53 +02:00
wait_for_event ( mode ) ;
2019-07-20 15:50:03 +02:00
2019-05-18 13:39:21 +02:00
decltype ( m_queued_events ) events ;
{
2021-07-09 11:14:57 +02:00
Threading : : MutexLocker locker ( m_private - > lock ) ;
2019-05-18 13:39:21 +02:00
events = move ( m_queued_events ) ;
}
2019-04-29 15:57:49 +02:00
2022-12-29 13:20:44 +01:00
m_pending_promises . remove_all_matching ( [ ] ( auto & job ) { return job - > is_resolved ( ) | | job - > is_canceled ( ) ; } ) ;
2022-01-06 00:55:48 +01:00
size_t processed_events = 0 ;
2020-02-25 14:49:47 +01:00
for ( size_t i = 0 ; i < events . size ( ) ; + + i ) {
2019-07-21 10:17:20 +02:00
auto & queued_event = events . at ( i ) ;
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
auto receiver = queued_event . receiver . strong_ref ( ) ;
2019-05-18 13:39:21 +02:00
auto & event = * queued_event . event ;
2019-07-26 16:00:56 +02:00
if ( receiver )
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop: {} event {} " , * receiver , event . type ( ) ) ;
2019-05-18 13:39:21 +02:00
if ( ! receiver ) {
switch ( event . type ( ) ) {
2020-02-02 12:34:39 +01:00
case Event : : Quit :
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2019-05-18 13:39:21 +02:00
default :
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Event type {} with no receiver :( " , event . type ( ) ) ;
2020-06-02 12:46:21 +02:00
break ;
2019-05-18 13:39:21 +02:00
}
2020-02-02 12:34:39 +01:00
} else if ( event . type ( ) = = Event : : Type : : DeferredInvoke ) {
2021-05-01 21:10:08 +02:00
dbgln_if ( DEFERRED_INVOKE_DEBUG , " DeferredInvoke: receiver = {} " , * receiver ) ;
2021-08-30 10:43:28 +00:00
static_cast < DeferredInvocationEvent & > ( event ) . m_invokee ( ) ;
2019-05-18 13:39:21 +02:00
} else {
2020-02-02 12:34:39 +01:00
NonnullRefPtr < Object > protector ( * receiver ) ;
2019-09-20 20:37:31 +02:00
receiver - > dispatch_event ( event ) ;
2019-05-18 13:39:21 +02:00
}
2022-01-06 00:55:48 +01:00
+ + processed_events ;
2019-04-10 17:30:34 +02:00
2019-05-18 13:39:21 +02:00
if ( m_exit_requested ) {
2021-07-09 11:14:57 +02:00
Threading : : MutexLocker locker ( m_private - > lock ) ;
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop: Exit requested. Rejigging {} events. " , events . size ( ) - i ) ;
2019-07-25 16:12:51 +02:00
decltype ( m_queued_events ) new_event_queue ;
new_event_queue . ensure_capacity ( m_queued_events . size ( ) + events . size ( ) ) ;
2020-04-03 22:55:48 +02:00
for ( + + i ; i < events . size ( ) ; + + i )
2019-07-25 16:12:51 +02:00
new_event_queue . unchecked_append ( move ( events [ i ] ) ) ;
2021-06-12 13:24:45 +02:00
new_event_queue . extend ( move ( m_queued_events ) ) ;
2019-07-25 16:12:51 +02:00
m_queued_events = move ( new_event_queue ) ;
2022-01-06 00:55:48 +01:00
break ;
2019-04-10 17:30:34 +02:00
}
}
2022-01-06 00:55:48 +01:00
2022-12-29 13:20:44 +01:00
if ( m_pending_promises . size ( ) > 30 & & ! s_warned_promise_count ) {
s_warned_promise_count = true ;
dbgln ( " EventLoop {:p} warning: Job queue wasn't designed for this load ({} promises). Please begin optimizing EventLoop::pump() -> m_pending_promises.remove_all_matching " , this , m_pending_promises . size ( ) ) ;
}
2022-01-06 00:55:48 +01:00
return processed_events ;
2019-04-10 17:30:34 +02:00
}
2022-02-11 16:57:10 +01:00
void EventLoop : : post_event ( Object & receiver , NonnullOwnPtr < Event > & & event , ShouldWake should_wake )
2019-04-10 17:30:34 +02:00
{
2021-07-09 11:14:57 +02:00
Threading : : MutexLocker lock ( m_private - > lock ) ;
2022-01-06 07:07:15 -07:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop::post_event: ({}) << receiver={}, event={} " , m_queued_events . size ( ) , receiver , event ) ;
2020-02-14 22:29:06 +01:00
m_queued_events . empend ( receiver , move ( event ) ) ;
2022-02-11 16:57:10 +01:00
if ( should_wake = = ShouldWake : : Yes )
wake ( ) ;
2019-04-10 17:30:34 +02:00
}
LibAudio: Prevent racy eternal deadlock of the audio enqueue thread
The audio enqueuer thread goes to sleep when there is no more audio data
present, and through normal Core::EventLoop events it can be woken up.
However, that waking up only happens when the thread is not currently
running, so that the wake-up events don't queue up and cause weirdness.
The atomic variable responsible for keeping track of whether the thread
is active can lead to a racy deadlock however, where the audio enqueuer
thread will never wake up again despite there being audio data to
enqueue. Consider this scenario:
- Main thread calls into async_enqueue. It detects that according to the
atomic variable, the other thread is still running, skipping the event
queue wake.
- Enqueuer thread has just finished playing the last chunk of audio and
detects that there is no audio left. It enters the if block with the
dbgln "Reached end of provided audio data..."
- Main thread enqueues audio, making the user sample queue non-empty.
- Enqueuer thread does not check this condition again, instead setting
the atomic variable to indicate that it is not running. It exits into
an event loop sleep.
- Main thread exits async_enqueue. The calling audio enqueuing system
(see e.g. Piano, but all of them function similarly) will wait until
the enqueuer thread has played enough samples before async_enqueue is
called again. However, since the enqueuer thread will never play any
audio, this condition is never fulfilled and audio playback deadlocks
This commit fixes that by allowing the event loop to not enqueue an
event that already exists, therefore overloading the audio enqueuer
event loop by at maximum one message in weird situations. We entirely
get rid of the atomic variable and the race condition is prevented.
2022-07-13 10:36:57 +02:00
void EventLoop : : wake_once ( Object & receiver , int custom_event_type )
{
Threading : : MutexLocker lock ( m_private - > lock ) ;
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop::wake_once: event type {} " , custom_event_type ) ;
auto identical_events = m_queued_events . find_if ( [ & ] ( auto & queued_event ) {
if ( queued_event . receiver . is_null ( ) )
return false ;
auto const & event = queued_event . event ;
auto is_receiver_identical = queued_event . receiver . ptr ( ) = = & receiver ;
auto event_id_matches = event - > type ( ) = = Event : : Type : : Custom & & static_cast < CustomEvent const * > ( event . ptr ( ) ) - > custom_type ( ) = = custom_event_type ;
return is_receiver_identical & & event_id_matches ;
} ) ;
// Event is not in the queue yet, so we want to wake.
if ( identical_events . is_end ( ) )
post_event ( receiver , make < CustomEvent > ( custom_event_type ) , ShouldWake : : Yes ) ;
}
2022-12-29 13:20:44 +01:00
void EventLoop : : add_job ( NonnullRefPtr < Promise < NonnullRefPtr < Object > > > job_promise )
{
m_pending_promises . append ( move ( job_promise ) ) ;
}
2021-01-08 12:09:39 -07:00
SignalHandlers : : SignalHandlers ( int signo , void ( * handle_signal ) ( int ) )
2020-07-06 15:48:02 -06:00
: m_signo ( signo )
2021-01-08 12:09:39 -07:00
, m_original_handler ( signal ( signo , handle_signal ) )
2020-07-06 15:48:02 -06:00
{
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop: Registered handler for signal {} " , m_signo ) ;
2020-07-06 15:48:02 -06:00
}
2021-01-08 12:09:39 -07:00
SignalHandlers : : ~ SignalHandlers ( )
2020-07-06 15:48:02 -06:00
{
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop: Unregistering handler for signal {} " , m_signo ) ;
2021-01-08 10:29:11 -07:00
signal ( m_signo , m_original_handler ) ;
2020-07-06 15:48:02 -06:00
}
2021-01-08 12:09:39 -07:00
void SignalHandlers : : dispatch ( )
2020-07-06 15:48:02 -06:00
{
2021-01-08 10:29:11 -07:00
TemporaryChange change ( m_calling_handlers , true ) ;
2020-07-06 15:48:02 -06:00
for ( auto & handler : m_handlers )
handler . value ( m_signo ) ;
2021-01-08 10:29:11 -07:00
if ( ! m_handlers_pending . is_empty ( ) ) {
// Apply pending adds/removes
for ( auto & handler : m_handlers_pending ) {
if ( handler . value ) {
auto result = m_handlers . set ( handler . key , move ( handler . value ) ) ;
2021-02-23 20:42:32 +01:00
VERIFY ( result = = AK : : HashSetResult : : InsertedNewEntry ) ;
2021-01-08 10:29:11 -07:00
} else {
m_handlers . remove ( handler . key ) ;
}
}
m_handlers_pending . clear ( ) ;
}
2020-07-06 15:48:02 -06:00
}
2021-01-08 12:09:39 -07:00
int SignalHandlers : : add ( Function < void ( int ) > & & handler )
2020-07-06 15:48:02 -06:00
{
2021-01-08 12:09:39 -07:00
int id = + + signals_info ( ) - > next_signal_id ; // TODO: worry about wrapping and duplicates?
2021-01-08 10:29:11 -07:00
if ( m_calling_handlers )
m_handlers_pending . set ( id , move ( handler ) ) ;
else
m_handlers . set ( id , move ( handler ) ) ;
2020-07-06 15:48:02 -06:00
return id ;
}
2021-01-08 12:09:39 -07:00
bool SignalHandlers : : remove ( int handler_id )
2020-07-06 15:48:02 -06:00
{
2021-02-23 20:42:32 +01:00
VERIFY ( handler_id ! = 0 ) ;
2021-01-08 10:29:11 -07:00
if ( m_calling_handlers ) {
auto it = m_handlers . find ( handler_id ) ;
if ( it ! = m_handlers . end ( ) ) {
// Mark pending remove
2021-01-10 16:29:28 -07:00
m_handlers_pending . set ( handler_id , { } ) ;
2021-01-08 10:29:11 -07:00
return true ;
}
it = m_handlers_pending . find ( handler_id ) ;
if ( it ! = m_handlers_pending . end ( ) ) {
if ( ! it - > value )
return false ; // already was marked as deleted
it - > value = nullptr ;
return true ;
}
return false ;
}
2020-07-06 15:48:02 -06:00
return m_handlers . remove ( handler_id ) ;
}
void EventLoop : : dispatch_signal ( int signo )
{
2021-01-08 12:09:39 -07:00
auto & info = * signals_info ( ) ;
auto handlers = info . signal_handlers . find ( signo ) ;
if ( handlers ! = info . signal_handlers . end ( ) ) {
2021-01-08 10:29:11 -07:00
// Make sure we bump the ref count while dispatching the handlers!
// This allows a handler to unregister/register while the handlers
// are being called!
auto handler = handlers - > value ;
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop: dispatching signal {} " , signo ) ;
2021-01-08 10:29:11 -07:00
handler - > dispatch ( ) ;
2020-07-06 15:48:02 -06:00
}
}
void EventLoop : : handle_signal ( int signo )
{
2021-02-23 20:42:32 +01:00
VERIFY ( signo ! = 0 ) ;
2020-07-06 15:48:02 -06:00
// We MUST check if the current pid still matches, because there
// is a window between fork() and exec() where a signal delivered
2022-01-06 07:07:15 -07:00
// to our fork could be inadvertently routed to the parent process!
2020-07-06 15:48:02 -06:00
if ( getpid ( ) = = s_pid ) {
int nwritten = write ( s_wake_pipe_fds [ 1 ] , & signo , sizeof ( signo ) ) ;
if ( nwritten < 0 ) {
perror ( " EventLoop::register_signal: write " ) ;
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2020-07-06 15:48:02 -06:00
}
} else {
// We're a fork who received a signal, reset s_pid
s_pid = 0 ;
}
}
int EventLoop : : register_signal ( int signo , Function < void ( int ) > handler )
{
2021-02-23 20:42:32 +01:00
VERIFY ( signo ! = 0 ) ;
2021-01-08 12:09:39 -07:00
auto & info = * signals_info ( ) ;
auto handlers = info . signal_handlers . find ( signo ) ;
if ( handlers = = info . signal_handlers . end ( ) ) {
2021-04-23 16:46:57 +02:00
auto signal_handlers = adopt_ref ( * new SignalHandlers ( signo , EventLoop : : handle_signal ) ) ;
2021-01-08 10:29:11 -07:00
auto handler_id = signal_handlers - > add ( move ( handler ) ) ;
2021-01-08 12:09:39 -07:00
info . signal_handlers . set ( signo , move ( signal_handlers ) ) ;
2020-07-06 15:48:02 -06:00
return handler_id ;
} else {
2021-01-08 10:29:11 -07:00
return handlers - > value - > add ( move ( handler ) ) ;
2020-07-06 15:48:02 -06:00
}
}
void EventLoop : : unregister_signal ( int handler_id )
{
2021-02-23 20:42:32 +01:00
VERIFY ( handler_id ! = 0 ) ;
2020-07-06 15:48:02 -06:00
int remove_signo = 0 ;
2021-01-08 12:09:39 -07:00
auto & info = * signals_info ( ) ;
for ( auto & h : info . signal_handlers ) {
2021-01-08 10:29:11 -07:00
auto & handlers = * h . value ;
if ( handlers . remove ( handler_id ) ) {
2020-07-06 15:48:02 -06:00
if ( handlers . is_empty ( ) )
remove_signo = handlers . m_signo ;
break ;
}
}
if ( remove_signo ! = 0 )
2021-01-08 12:09:39 -07:00
info . signal_handlers . remove ( remove_signo ) ;
2020-07-06 15:48:02 -06:00
}
2020-09-07 22:44:42 +04:30
void EventLoop : : notify_forked ( ForkEvent event )
{
2022-03-10 14:48:15 +05:00
VERIFY_EVENT_LOOP_INITIALIZED ( ) ;
2020-09-07 22:44:42 +04:30
switch ( event ) {
case ForkEvent : : Child :
s_event_loop_stack - > clear ( ) ;
s_timers - > clear ( ) ;
s_notifiers - > clear ( ) ;
2022-01-02 15:14:25 +01:00
s_wake_pipe_initialized = false ;
initialize_wake_pipes ( ) ;
2021-01-08 12:09:39 -07:00
if ( auto * info = signals_info < false > ( ) ) {
info - > signal_handlers . clear ( ) ;
info - > next_signal_id = 0 ;
}
2020-09-07 22:44:42 +04:30
s_pid = 0 ;
return ;
}
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2020-09-07 22:44:42 +04:30
}
2020-02-02 12:34:39 +01:00
void EventLoop : : wait_for_event ( WaitMode mode )
2019-04-10 17:30:34 +02:00
{
fd_set rfds ;
fd_set wfds ;
2020-07-06 15:48:02 -06:00
retry :
2022-12-30 15:17:52 +01:00
// Set up the file descriptors for select().
// Basically, we translate high-level event information into low-level selectable file descriptors.
2019-04-10 17:30:34 +02:00
FD_ZERO ( & rfds ) ;
FD_ZERO ( & wfds ) ;
int max_fd = 0 ;
2019-05-28 11:53:16 +02:00
auto add_fd_to_set = [ & max_fd ] ( int fd , fd_set & set ) {
2019-04-10 17:30:34 +02:00
FD_SET ( fd , & set ) ;
if ( fd > max_fd )
max_fd = fd ;
} ;
int max_fd_added = - 1 ;
2022-12-30 15:17:52 +01:00
// The wake pipe informs us of POSIX signals as well as manual calls to wake()
2019-07-14 14:28:24 +02:00
add_fd_to_set ( s_wake_pipe_fds [ 0 ] , rfds ) ;
2019-04-10 17:30:34 +02:00
max_fd = max ( max_fd , max_fd_added ) ;
2022-01-07 00:45:27 +01:00
2022-01-02 14:52:38 +01:00
for ( auto & notifier : * s_notifiers ) {
if ( notifier - > event_mask ( ) & Notifier : : Read )
add_fd_to_set ( notifier - > fd ( ) , rfds ) ;
if ( notifier - > event_mask ( ) & Notifier : : Write )
add_fd_to_set ( notifier - > fd ( ) , wfds ) ;
if ( notifier - > event_mask ( ) & Notifier : : Exceptional )
VERIFY_NOT_REACHED ( ) ;
2019-04-10 17:30:34 +02:00
}
2019-04-29 15:57:49 +02:00
bool queued_events_is_empty ;
{
2021-07-09 11:14:57 +02:00
Threading : : MutexLocker locker ( m_private - > lock ) ;
2019-04-29 15:57:49 +02:00
queued_events_is_empty = m_queued_events . is_empty ( ) ;
}
2022-12-30 15:17:52 +01:00
// Figure out how long to wait at maximum.
// This mainly depends on the WaitMode and whether we have pending events, but also the next expiring timer.
2021-08-14 16:50:16 -07:00
Time now ;
2019-04-10 17:30:34 +02:00
struct timeval timeout = { 0 , 0 } ;
2019-05-18 13:39:21 +02:00
bool should_wait_forever = false ;
2020-05-15 18:21:40 +03:00
if ( mode = = WaitMode : : WaitForEvents & & queued_events_is_empty ) {
auto next_timer_expiration = get_next_timer_expiration ( ) ;
if ( next_timer_expiration . has_value ( ) ) {
2021-08-14 16:50:16 -07:00
now = Time : : now_monotonic_coarse ( ) ;
auto computed_timeout = next_timer_expiration . value ( ) - now ;
if ( computed_timeout . is_negative ( ) )
computed_timeout = Time : : zero ( ) ;
timeout = computed_timeout . to_timeval ( ) ;
2019-05-18 13:39:21 +02:00
} else {
should_wait_forever = true ;
}
2019-05-18 02:00:01 +02:00
}
2019-04-10 17:30:34 +02:00
2020-07-06 15:48:02 -06:00
try_select_again :
2022-12-30 15:17:52 +01:00
// select() and wait for file system events, calls to wake(), POSIX signals, or timer expirations.
2020-06-17 12:27:48 +04:30
int marked_fd_count = select ( max_fd + 1 , & rfds , & wfds , nullptr , should_wait_forever ? nullptr : & timeout ) ;
2022-12-30 15:17:52 +01:00
// Because POSIX, we might spuriously return from select() with EINTR; just select again.
2020-06-17 12:27:48 +04:30
if ( marked_fd_count < 0 ) {
int saved_errno = errno ;
if ( saved_errno = = EINTR ) {
if ( m_exit_requested )
return ;
goto try_select_again ;
}
2021-12-19 10:56:50 +01:00
dbgln ( " Core::EventLoop::wait_for_event: {} ({}: {}) " , marked_fd_count , saved_errno , strerror ( saved_errno ) ) ;
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2020-06-17 12:27:48 +04:30
}
2022-12-30 15:17:52 +01:00
// We woke up due to a call to wake() or a POSIX signal.
// Handle signals and see whether we need to handle events as well.
2019-07-14 14:28:24 +02:00
if ( FD_ISSET ( s_wake_pipe_fds [ 0 ] , & rfds ) ) {
2020-07-06 15:48:02 -06:00
int wake_events [ 8 ] ;
2022-01-21 13:23:57 +01:00
ssize_t nread ;
// We might receive another signal while read()ing here. The signal will go to the handle_signal properly,
// but we get interrupted. Therefore, just retry while we were interrupted.
do {
errno = 0 ;
nread = read ( s_wake_pipe_fds [ 0 ] , wake_events , sizeof ( wake_events ) ) ;
if ( nread = = 0 )
break ;
} while ( nread < 0 & & errno = = EINTR ) ;
2019-07-14 14:28:24 +02:00
if ( nread < 0 ) {
2022-01-21 13:23:57 +01:00
perror ( " Core::EventLoop::wait_for_event: read from wake pipe " ) ;
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2019-07-14 14:28:24 +02:00
}
2021-02-23 20:42:32 +01:00
VERIFY ( nread > 0 ) ;
2020-07-06 15:48:02 -06:00
bool wake_requested = false ;
int event_count = nread / sizeof ( wake_events [ 0 ] ) ;
for ( int i = 0 ; i < event_count ; i + + ) {
if ( wake_events [ i ] ! = 0 )
dispatch_signal ( wake_events [ i ] ) ;
else
wake_requested = true ;
}
if ( ! wake_requested & & nread = = sizeof ( wake_events ) )
goto retry ;
2019-07-14 10:20:57 +02:00
}
2019-05-18 02:00:01 +02:00
if ( ! s_timers - > is_empty ( ) ) {
2021-08-14 16:50:16 -07:00
now = Time : : now_monotonic_coarse ( ) ;
2019-05-18 02:00:01 +02:00
}
2019-04-18 01:37:23 +02:00
2022-12-30 15:17:52 +01:00
// Handle expired timers.
2019-04-10 17:30:34 +02:00
for ( auto & it : * s_timers ) {
auto & timer = * it . value ;
2019-04-18 01:37:23 +02:00
if ( ! timer . has_expired ( now ) )
2019-04-10 17:30:34 +02:00
continue ;
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
auto owner = timer . owner . strong_ref ( ) ;
if ( timer . fire_when_not_visible = = TimerShouldFireWhenNotVisible : : No
& & owner & & ! owner - > is_visible_for_timer_purposes ( ) ) {
2019-12-29 15:58:07 +01:00
continue ;
}
2021-04-14 10:29:33 +02:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop: Timer {} has expired, sending Core::TimerEvent to {} " , timer . timer_id , * owner ) ;
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
if ( owner )
post_event ( * owner , make < TimerEvent > ( timer . timer_id ) ) ;
2019-04-10 17:30:34 +02:00
if ( timer . should_reload ) {
2019-04-18 01:37:23 +02:00
timer . reload ( now ) ;
2019-04-10 17:30:34 +02:00
} else {
// FIXME: Support removing expired timers that don't want to reload.
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2019-04-10 17:30:34 +02:00
}
}
2019-06-05 01:57:28 -07:00
if ( ! marked_fd_count )
return ;
2022-12-30 15:17:52 +01:00
// Handle file system notifiers by making them normal events.
2019-04-10 17:30:34 +02:00
for ( auto & notifier : * s_notifiers ) {
if ( FD_ISSET ( notifier - > fd ( ) , & rfds ) ) {
2020-07-06 23:02:07 +02:00
if ( notifier - > event_mask ( ) & Notifier : : Event : : Read )
2020-02-02 12:34:39 +01:00
post_event ( * notifier , make < NotifierReadEvent > ( notifier - > fd ( ) ) ) ;
2019-04-10 17:30:34 +02:00
}
if ( FD_ISSET ( notifier - > fd ( ) , & wfds ) ) {
2020-07-06 23:02:07 +02:00
if ( notifier - > event_mask ( ) & Notifier : : Event : : Write )
2020-02-02 12:34:39 +01:00
post_event ( * notifier , make < NotifierWriteEvent > ( notifier - > fd ( ) ) ) ;
2019-04-10 17:30:34 +02:00
}
}
}
2022-04-01 20:58:27 +03:00
bool EventLoopTimer : : has_expired ( Time const & now ) const
2019-04-10 17:30:34 +02:00
{
2021-08-14 16:50:16 -07:00
return now > fire_time ;
2019-04-10 17:30:34 +02:00
}
2022-04-01 20:58:27 +03:00
void EventLoopTimer : : reload ( Time const & now )
2019-04-10 17:30:34 +02:00
{
2021-08-14 16:50:16 -07:00
fire_time = now + interval ;
2019-04-10 17:30:34 +02:00
}
2021-08-14 16:50:16 -07:00
Optional < Time > EventLoop : : get_next_timer_expiration ( )
2019-04-10 17:30:34 +02:00
{
2022-02-15 14:47:32 +01:00
auto now = Time : : now_monotonic_coarse ( ) ;
2021-08-14 16:50:16 -07:00
Optional < Time > soonest { } ;
2019-04-10 17:30:34 +02:00
for ( auto & it : * s_timers ) {
auto & fire_time = it . value - > fire_time ;
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
auto owner = it . value - > owner . strong_ref ( ) ;
2019-12-29 15:58:07 +01:00
if ( it . value - > fire_when_not_visible = = TimerShouldFireWhenNotVisible : : No
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
& & owner & & ! owner - > is_visible_for_timer_purposes ( ) ) {
2019-12-29 15:58:07 +01:00
continue ;
}
2022-02-15 14:47:32 +01:00
// OPTIMIZATION: If we have a timer that needs to fire right away, we can stop looking here.
// FIXME: This whole operation could be O(1) with a better data structure.
if ( fire_time < now )
return now ;
2021-08-14 16:50:16 -07:00
if ( ! soonest . has_value ( ) | | fire_time < soonest . value ( ) )
2019-04-10 17:30:34 +02:00
soonest = fire_time ;
}
2020-05-15 18:21:40 +03:00
return soonest ;
2019-04-10 17:30:34 +02:00
}
2020-02-02 12:34:39 +01:00
int EventLoop : : register_timer ( Object & object , int milliseconds , bool should_reload , TimerShouldFireWhenNotVisible fire_when_not_visible )
2019-04-10 17:30:34 +02:00
{
2022-03-10 14:48:15 +05:00
VERIFY_EVENT_LOOP_INITIALIZED ( ) ;
2021-02-23 20:42:32 +01:00
VERIFY ( milliseconds > = 0 ) ;
2019-04-10 17:30:34 +02:00
auto timer = make < EventLoopTimer > ( ) ;
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
timer - > owner = object ;
2021-08-14 16:50:16 -07:00
timer - > interval = Time : : from_milliseconds ( milliseconds ) ;
timer - > reload ( Time : : now_monotonic_coarse ( ) ) ;
2019-04-10 17:30:34 +02:00
timer - > should_reload = should_reload ;
2019-12-29 15:58:07 +01:00
timer - > fire_when_not_visible = fire_when_not_visible ;
2022-01-02 14:52:38 +01:00
int timer_id = s_id_allocator . with_locked ( [ ] ( auto & allocator ) { return allocator - > allocate ( ) ; } ) ;
2019-04-10 17:30:34 +02:00
timer - > timer_id = timer_id ;
2019-07-23 14:55:12 +02:00
s_timers - > set ( timer_id , move ( timer ) ) ;
2019-04-10 17:30:34 +02:00
return timer_id ;
}
2020-02-02 12:34:39 +01:00
bool EventLoop : : unregister_timer ( int timer_id )
2019-04-10 17:30:34 +02:00
{
2022-03-10 14:48:15 +05:00
VERIFY_EVENT_LOOP_INITIALIZED ( ) ;
2022-01-02 14:52:38 +01:00
s_id_allocator . with_locked ( [ & ] ( auto & allocator ) { allocator - > deallocate ( timer_id ) ; } ) ;
2019-04-10 17:30:34 +02:00
auto it = s_timers - > find ( timer_id ) ;
if ( it = = s_timers - > end ( ) )
return false ;
s_timers - > remove ( it ) ;
return true ;
}
2020-02-02 12:34:39 +01:00
void EventLoop : : register_notifier ( Badge < Notifier > , Notifier & notifier )
2019-04-10 17:30:34 +02:00
{
2022-03-10 14:48:15 +05:00
VERIFY_EVENT_LOOP_INITIALIZED ( ) ;
2019-04-10 17:30:34 +02:00
s_notifiers - > set ( & notifier ) ;
}
2020-02-02 12:34:39 +01:00
void EventLoop : : unregister_notifier ( Badge < Notifier > , Notifier & notifier )
2019-04-10 17:30:34 +02:00
{
2022-03-10 14:48:15 +05:00
VERIFY_EVENT_LOOP_INITIALIZED ( ) ;
2019-04-10 17:30:34 +02:00
s_notifiers - > remove ( & notifier ) ;
}
2019-07-14 10:20:57 +02:00
2022-02-11 16:57:10 +01:00
void EventLoop : : wake_current ( )
{
EventLoop : : current ( ) . wake ( ) ;
}
2020-02-02 12:34:39 +01:00
void EventLoop : : wake ( )
2019-07-14 10:20:57 +02:00
{
2022-02-11 16:57:10 +01:00
dbgln_if ( EVENTLOOP_DEBUG , " Core::EventLoop::wake() " ) ;
2020-07-06 15:48:02 -06:00
int wake_event = 0 ;
2022-02-11 16:57:10 +01:00
int nwritten = write ( ( * m_wake_pipe_fds ) [ 1 ] , & wake_event , sizeof ( wake_event ) ) ;
2019-07-14 10:20:57 +02:00
if ( nwritten < 0 ) {
2020-02-02 12:34:39 +01:00
perror ( " EventLoop::wake: write " ) ;
2021-02-23 20:42:32 +01:00
VERIFY_NOT_REACHED ( ) ;
2019-07-14 10:20:57 +02:00
}
}
2020-02-02 12:34:39 +01:00
2020-02-14 22:29:06 +01:00
EventLoop : : QueuedEvent : : QueuedEvent ( Object & receiver , NonnullOwnPtr < Event > event )
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
: receiver ( receiver )
2020-02-14 22:29:06 +01:00
, event ( move ( event ) )
{
}
EventLoop : : QueuedEvent : : QueuedEvent ( QueuedEvent & & other )
: receiver ( other . receiver )
, event ( move ( other . event ) )
{
}
2020-02-02 12:34:39 +01:00
}