2017-09-07 23:51:28 -06:00
|
|
|
#ifndef Py_INTERNAL_CEVAL_H
|
|
|
|
|
#define Py_INTERNAL_CEVAL_H
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
|
extern "C" {
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-04-17 23:02:26 +02:00
|
|
|
#ifndef Py_BUILD_CORE
|
|
|
|
|
# error "this header requires Py_BUILD_CORE define"
|
2018-11-09 13:03:37 +01:00
|
|
|
#endif
|
|
|
|
|
|
2025-03-19 23:01:32 +01:00
|
|
|
#include "dynamic_annotations.h" // _Py_ANNOTATE_RWLOCK_CREATE
|
2023-10-18 00:33:50 +09:00
|
|
|
|
2025-03-19 23:01:32 +01:00
|
|
|
#include "pycore_code.h" // _PyCode_GetTLBCFast()
|
2023-08-21 20:05:59 +02:00
|
|
|
#include "pycore_interp.h" // PyInterpreterState.eval_frame
|
|
|
|
|
#include "pycore_pystate.h" // _PyThreadState_GET()
|
2025-03-19 23:01:32 +01:00
|
|
|
#include "pycore_stats.h" // EVAL_CALL_STAT_INC()
|
2025-03-19 15:23:32 +01:00
|
|
|
#include "pycore_typedefs.h" // _PyInterpreterFrame
|
|
|
|
|
|
2023-08-21 20:05:59 +02:00
|
|
|
|
2019-10-02 23:51:20 +02:00
|
|
|
/* Forward declarations */
|
|
|
|
|
struct _ceval_runtime_state;
|
2019-11-14 12:20:46 +01:00
|
|
|
|
2023-08-24 20:25:22 +02:00
|
|
|
// Export for '_lsprof' shared extension
|
|
|
|
|
PyAPI_FUNC(int) _PyEval_SetProfile(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
|
2025-08-13 14:15:12 -04:00
|
|
|
extern int _PyEval_SetProfileAllThreads(PyInterpreterState *interp, Py_tracefunc func, PyObject *arg);
|
2023-08-24 20:25:22 +02:00
|
|
|
|
|
|
|
|
extern int _PyEval_SetTrace(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
|
2025-08-13 14:15:12 -04:00
|
|
|
extern int _PyEval_SetTraceAllThreads(PyInterpreterState *interp, Py_tracefunc func, PyObject *arg);
|
2023-08-24 20:25:22 +02:00
|
|
|
|
2023-11-03 09:39:50 -07:00
|
|
|
extern int _PyEval_SetOpcodeTrace(PyFrameObject *f, bool enable);
|
|
|
|
|
|
2023-08-24 20:25:22 +02:00
|
|
|
// Helper to look up a builtin object
|
|
|
|
|
// Export for 'array' shared extension
|
|
|
|
|
PyAPI_FUNC(PyObject*) _PyEval_GetBuiltin(PyObject *);
|
|
|
|
|
|
|
|
|
|
extern void _PyEval_SetSwitchInterval(unsigned long microseconds);
|
|
|
|
|
extern unsigned long _PyEval_GetSwitchInterval(void);
|
|
|
|
|
|
|
|
|
|
// Export for '_queue' shared extension
|
|
|
|
|
PyAPI_FUNC(int) _PyEval_MakePendingCalls(PyThreadState *);
|
|
|
|
|
|
2022-01-13 17:09:24 -07:00
|
|
|
#ifndef Py_DEFAULT_RECURSION_LIMIT
|
2022-10-05 01:34:03 +01:00
|
|
|
# define Py_DEFAULT_RECURSION_LIMIT 1000
|
2022-01-13 17:09:24 -07:00
|
|
|
#endif
|
|
|
|
|
|
2020-03-17 18:56:44 +01:00
|
|
|
extern void _Py_FinishPendingCalls(PyThreadState *tstate);
|
2023-12-07 14:33:40 -05:00
|
|
|
extern void _PyEval_InitState(PyInterpreterState *);
|
2024-02-20 06:57:48 -08:00
|
|
|
extern void _PyEval_SignalReceived(void);
|
2023-08-24 18:06:53 +02:00
|
|
|
|
2023-10-09 07:39:51 -06:00
|
|
|
// bitwise flags:
|
|
|
|
|
#define _Py_PENDING_MAINTHREADONLY 1
|
|
|
|
|
#define _Py_PENDING_RAWFREE 2
|
|
|
|
|
|
2024-04-25 19:05:51 -06:00
|
|
|
typedef int _Py_add_pending_call_result;
|
|
|
|
|
#define _Py_ADD_PENDING_SUCCESS 0
|
|
|
|
|
#define _Py_ADD_PENDING_FULL -1
|
|
|
|
|
|
2023-07-25 05:16:28 +02:00
|
|
|
// Export for '_testinternalcapi' shared extension
|
2024-04-25 19:05:51 -06:00
|
|
|
PyAPI_FUNC(_Py_add_pending_call_result) _PyEval_AddPendingCall(
|
2020-04-08 23:35:05 +02:00
|
|
|
PyInterpreterState *interp,
|
2023-09-19 15:01:34 -06:00
|
|
|
_Py_pending_call_func func,
|
2023-06-13 15:02:19 -06:00
|
|
|
void *arg,
|
2023-10-09 07:39:51 -06:00
|
|
|
int flags);
|
|
|
|
|
|
2020-04-15 01:16:24 +09:00
|
|
|
#ifdef HAVE_FORK
|
2020-06-02 18:44:54 +02:00
|
|
|
extern PyStatus _PyEval_ReInitThreads(PyThreadState *tstate);
|
2020-04-15 01:16:24 +09:00
|
|
|
#endif
|
2017-09-07 23:51:28 -06:00
|
|
|
|
2022-03-21 03:03:22 +01:00
|
|
|
// Used by sys.call_tracing()
|
|
|
|
|
extern PyObject* _PyEval_CallTracing(PyObject *func, PyObject *args);
|
|
|
|
|
|
2022-03-21 01:15:32 +01:00
|
|
|
// Used by sys.get_asyncgen_hooks()
|
|
|
|
|
extern PyObject* _PyEval_GetAsyncGenFirstiter(void);
|
|
|
|
|
extern PyObject* _PyEval_GetAsyncGenFinalizer(void);
|
|
|
|
|
|
|
|
|
|
// Used by sys.set_asyncgen_hooks()
|
|
|
|
|
extern int _PyEval_SetAsyncGenFirstiter(PyObject *);
|
|
|
|
|
extern int _PyEval_SetAsyncGenFinalizer(PyObject *);
|
|
|
|
|
|
2022-03-21 02:24:00 +01:00
|
|
|
// Used by sys.get_coroutine_origin_tracking_depth()
|
|
|
|
|
// and sys.set_coroutine_origin_tracking_depth()
|
|
|
|
|
extern int _PyEval_GetCoroutineOriginTrackingDepth(void);
|
|
|
|
|
extern int _PyEval_SetCoroutineOriginTrackingDepth(int depth);
|
|
|
|
|
|
|
|
|
|
extern void _PyEval_Fini(void);
|
2019-06-03 21:30:58 +09:00
|
|
|
|
2021-02-18 19:20:16 +01:00
|
|
|
|
2021-02-20 15:17:18 +01:00
|
|
|
extern PyObject* _PyEval_GetBuiltins(PyThreadState *tstate);
|
2021-02-18 19:20:16 +01:00
|
|
|
|
2022-08-30 18:11:18 +01:00
|
|
|
// Trampoline API
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
// Callback to initialize the trampoline state
|
|
|
|
|
void* (*init_state)(void);
|
|
|
|
|
// Callback to register every trampoline being created
|
|
|
|
|
void (*write_state)(void* state, const void *code_addr,
|
|
|
|
|
unsigned int code_size, PyCodeObject* code);
|
|
|
|
|
// Callback to free the trampoline state
|
|
|
|
|
int (*free_state)(void* state);
|
|
|
|
|
} _PyPerf_Callbacks;
|
|
|
|
|
|
|
|
|
|
extern int _PyPerfTrampoline_SetCallbacks(_PyPerf_Callbacks *);
|
|
|
|
|
extern void _PyPerfTrampoline_GetCallbacks(_PyPerf_Callbacks *);
|
|
|
|
|
extern int _PyPerfTrampoline_Init(int activate);
|
|
|
|
|
extern int _PyPerfTrampoline_Fini(void);
|
2023-12-01 13:20:51 +00:00
|
|
|
extern void _PyPerfTrampoline_FreeArenas(void);
|
2022-08-30 18:11:18 +01:00
|
|
|
extern int _PyIsPerfTrampolineActive(void);
|
|
|
|
|
extern PyStatus _PyPerfTrampoline_AfterFork_Child(void);
|
|
|
|
|
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
|
|
|
|
extern _PyPerf_Callbacks _Py_perfmap_callbacks;
|
2024-05-05 03:07:29 +02:00
|
|
|
extern _PyPerf_Callbacks _Py_perfmap_jit_callbacks;
|
2022-08-30 18:11:18 +01:00
|
|
|
#endif
|
2021-02-18 19:20:16 +01:00
|
|
|
|
2019-11-14 12:20:46 +01:00
|
|
|
static inline PyObject*
|
2025-03-19 15:23:32 +01:00
|
|
|
_PyEval_EvalFrame(PyThreadState *tstate, _PyInterpreterFrame *frame, int throwflag)
|
2019-11-14 12:20:46 +01:00
|
|
|
{
|
2022-05-27 16:31:41 +01:00
|
|
|
EVAL_CALL_STAT_INC(EVAL_CALL_TOTAL);
|
2021-10-11 11:34:02 +01:00
|
|
|
if (tstate->interp->eval_frame == NULL) {
|
|
|
|
|
return _PyEval_EvalFrameDefault(tstate, frame, throwflag);
|
|
|
|
|
}
|
2021-07-26 11:22:16 +01:00
|
|
|
return tstate->interp->eval_frame(tstate, frame, throwflag);
|
2019-11-14 12:20:46 +01:00
|
|
|
}
|
|
|
|
|
|
2025-08-21 10:40:53 +01:00
|
|
|
#ifdef _Py_TIER2
|
|
|
|
|
#ifdef _Py_JIT
|
|
|
|
|
_Py_CODEUNIT *_Py_LazyJitTrampoline(
|
|
|
|
|
struct _PyExecutorObject *current_executor, _PyInterpreterFrame *frame,
|
|
|
|
|
_PyStackRef *stack_pointer, PyThreadState *tstate
|
|
|
|
|
);
|
|
|
|
|
#else
|
|
|
|
|
_Py_CODEUNIT *_PyTier2Interpreter(
|
|
|
|
|
struct _PyExecutorObject *current_executor, _PyInterpreterFrame *frame,
|
|
|
|
|
_PyStackRef *stack_pointer, PyThreadState *tstate
|
|
|
|
|
);
|
|
|
|
|
#endif
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
extern _PyJitEntryFuncPtr _Py_jit_entry;
|
|
|
|
|
|
2022-03-21 02:24:00 +01:00
|
|
|
extern PyObject*
|
2021-02-01 10:42:03 +00:00
|
|
|
_PyEval_Vector(PyThreadState *tstate,
|
2021-11-23 09:53:24 +00:00
|
|
|
PyFunctionObject *func, PyObject *locals,
|
2021-02-01 10:42:03 +00:00
|
|
|
PyObject* const* args, size_t argcount,
|
|
|
|
|
PyObject *kwnames);
|
2019-11-16 01:03:22 +01:00
|
|
|
|
2023-05-05 13:23:00 -06:00
|
|
|
extern int _PyEval_ThreadsInitialized(void);
|
2023-12-12 19:20:21 -05:00
|
|
|
extern void _PyEval_InitGIL(PyThreadState *tstate, int own_gil);
|
2021-02-19 15:10:45 +01:00
|
|
|
extern void _PyEval_FiniGIL(PyInterpreterState *interp);
|
2020-03-09 21:24:14 +01:00
|
|
|
|
2024-05-23 16:59:35 -04:00
|
|
|
extern void _PyEval_AcquireLock(PyThreadState *tstate);
|
2024-05-06 20:07:23 -07:00
|
|
|
|
2024-05-23 16:59:35 -04:00
|
|
|
extern void _PyEval_ReleaseLock(PyInterpreterState *, PyThreadState *,
|
|
|
|
|
int final_release);
|
2020-03-18 02:26:04 +01:00
|
|
|
|
2024-05-06 20:07:23 -07:00
|
|
|
#ifdef Py_GIL_DISABLED
|
|
|
|
|
// Returns 0 or 1 if the GIL for the given thread's interpreter is disabled or
|
|
|
|
|
// enabled, respectively.
|
|
|
|
|
//
|
|
|
|
|
// The enabled state of the GIL will not change while one or more threads are
|
|
|
|
|
// attached.
|
|
|
|
|
static inline int
|
|
|
|
|
_PyEval_IsGILEnabled(PyThreadState *tstate)
|
|
|
|
|
{
|
2024-06-02 10:19:02 -04:00
|
|
|
struct _gil_runtime_state *gil = tstate->interp->ceval.gil;
|
|
|
|
|
return _Py_atomic_load_int_relaxed(&gil->enabled) != 0;
|
2024-05-06 20:07:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Enable or disable the GIL used by the interpreter that owns tstate, which
|
|
|
|
|
// must be the current thread. This may affect other interpreters, if the GIL
|
|
|
|
|
// is shared. All three functions will be no-ops (and return 0) if the
|
|
|
|
|
// interpreter's `enable_gil' config is not _PyConfig_GIL_DEFAULT.
|
|
|
|
|
//
|
|
|
|
|
// Every call to _PyEval_EnableGILTransient() must be paired with exactly one
|
|
|
|
|
// call to either _PyEval_EnableGILPermanent() or
|
|
|
|
|
// _PyEval_DisableGIL(). _PyEval_EnableGILPermanent() and _PyEval_DisableGIL()
|
|
|
|
|
// must only be called while the GIL is enabled from a call to
|
|
|
|
|
// _PyEval_EnableGILTransient().
|
|
|
|
|
//
|
|
|
|
|
// _PyEval_EnableGILTransient() returns 1 if it enabled the GIL, or 0 if the
|
|
|
|
|
// GIL was already enabled, whether transiently or permanently. The caller will
|
|
|
|
|
// hold the GIL upon return.
|
|
|
|
|
//
|
|
|
|
|
// _PyEval_EnableGILPermanent() returns 1 if it permanently enabled the GIL
|
|
|
|
|
// (which must already be enabled), or 0 if it was already permanently
|
|
|
|
|
// enabled. Once _PyEval_EnableGILPermanent() has been called once, all
|
|
|
|
|
// subsequent calls to any of the three functions will be no-ops.
|
|
|
|
|
//
|
|
|
|
|
// _PyEval_DisableGIL() returns 1 if it disabled the GIL, or 0 if the GIL was
|
|
|
|
|
// kept enabled because of another request, whether transient or permanent.
|
|
|
|
|
//
|
|
|
|
|
// All three functions must be called by an attached thread (this implies that
|
|
|
|
|
// if the GIL is enabled, the current thread must hold it).
|
|
|
|
|
extern int _PyEval_EnableGILTransient(PyThreadState *tstate);
|
|
|
|
|
extern int _PyEval_EnableGILPermanent(PyThreadState *tstate);
|
|
|
|
|
extern int _PyEval_DisableGIL(PyThreadState *state);
|
2024-11-04 11:13:32 -08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
static inline _Py_CODEUNIT *
|
|
|
|
|
_PyEval_GetExecutableCode(PyThreadState *tstate, PyCodeObject *co)
|
|
|
|
|
{
|
|
|
|
|
_Py_CODEUNIT *bc = _PyCode_GetTLBCFast(tstate, co);
|
|
|
|
|
if (bc != NULL) {
|
|
|
|
|
return bc;
|
|
|
|
|
}
|
|
|
|
|
return _PyCode_GetTLBC(co);
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-06 20:07:23 -07:00
|
|
|
#endif
|
|
|
|
|
|
2021-03-08 22:56:37 +01:00
|
|
|
extern void _PyEval_DeactivateOpCache(void);
|
|
|
|
|
|
2020-03-13 10:19:38 +01:00
|
|
|
|
|
|
|
|
/* --- _Py_EnterRecursiveCall() ----------------------------------------- */
|
|
|
|
|
|
2025-02-25 09:24:48 +00:00
|
|
|
static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
|
|
|
|
|
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
|
|
|
|
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
2025-11-19 10:16:24 +00:00
|
|
|
// Overflow if stack pointer is between soft limit and the base of the hardware stack.
|
|
|
|
|
// If it is below the hardware stack base, assume that we have the wrong stack limits, and do nothing.
|
|
|
|
|
// We could have the wrong stack limits because of limited platform support, or user-space threads.
|
2025-11-17 05:41:22 -08:00
|
|
|
#if _Py_STACK_GROWS_DOWN
|
2025-11-19 10:16:24 +00:00
|
|
|
return here_addr < _tstate->c_stack_soft_limit && here_addr >= _tstate->c_stack_soft_limit - 2 * _PyOS_STACK_MARGIN_BYTES;
|
2025-11-17 05:41:22 -08:00
|
|
|
#else
|
2025-11-19 10:16:24 +00:00
|
|
|
return here_addr > _tstate->c_stack_soft_limit && here_addr <= _tstate->c_stack_soft_limit + 2 * _PyOS_STACK_MARGIN_BYTES;
|
2025-11-17 05:41:22 -08:00
|
|
|
#endif
|
2025-02-25 09:24:48 +00:00
|
|
|
}
|
2020-03-13 10:19:38 +01:00
|
|
|
|
2023-08-24 18:06:53 +02:00
|
|
|
// Export for '_json' shared extension, used via _Py_EnterRecursiveCall()
|
|
|
|
|
// static inline function.
|
2020-03-13 10:19:38 +01:00
|
|
|
PyAPI_FUNC(int) _Py_CheckRecursiveCall(
|
|
|
|
|
PyThreadState *tstate,
|
|
|
|
|
const char *where);
|
|
|
|
|
|
2025-09-17 16:04:02 +01:00
|
|
|
PyAPI_FUNC(int) _Py_CheckRecursiveCallPy(
|
2022-10-05 01:34:03 +01:00
|
|
|
PyThreadState *tstate);
|
|
|
|
|
|
2022-05-04 13:30:23 +02:00
|
|
|
static inline int _Py_EnterRecursiveCallTstate(PyThreadState *tstate,
|
|
|
|
|
const char *where) {
|
2020-03-13 10:19:38 +01:00
|
|
|
return (_Py_MakeRecCheck(tstate) && _Py_CheckRecursiveCall(tstate, where));
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-04 13:30:23 +02:00
|
|
|
static inline int _Py_EnterRecursiveCall(const char *where) {
|
2021-10-13 14:09:13 +02:00
|
|
|
PyThreadState *tstate = _PyThreadState_GET();
|
2022-05-04 13:30:23 +02:00
|
|
|
return _Py_EnterRecursiveCallTstate(tstate, where);
|
2020-03-13 10:19:38 +01:00
|
|
|
}
|
|
|
|
|
|
2025-02-25 09:24:48 +00:00
|
|
|
static inline void _Py_LeaveRecursiveCallTstate(PyThreadState *tstate) {
|
|
|
|
|
(void)tstate;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(void) _Py_InitializeRecursionLimits(PyThreadState *tstate);
|
|
|
|
|
|
|
|
|
|
static inline int _Py_ReachedRecursionLimit(PyThreadState *tstate) {
|
|
|
|
|
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
|
|
|
|
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
2025-04-30 11:37:53 +01:00
|
|
|
assert(_tstate->c_stack_hard_limit != 0);
|
2025-11-17 05:41:22 -08:00
|
|
|
#if _Py_STACK_GROWS_DOWN
|
2025-02-25 09:24:48 +00:00
|
|
|
return here_addr <= _tstate->c_stack_soft_limit;
|
2025-11-17 05:41:22 -08:00
|
|
|
#else
|
|
|
|
|
return here_addr >= _tstate->c_stack_soft_limit;
|
|
|
|
|
#endif
|
2020-03-13 10:19:38 +01:00
|
|
|
}
|
|
|
|
|
|
2025-11-27 13:22:15 +01:00
|
|
|
// Export for test_peg_generator
|
|
|
|
|
PyAPI_FUNC(int) _Py_ReachedRecursionLimitWithMargin(
|
2025-11-27 12:32:00 +01:00
|
|
|
PyThreadState *tstate,
|
|
|
|
|
int margin_count);
|
|
|
|
|
|
2022-05-04 13:30:23 +02:00
|
|
|
static inline void _Py_LeaveRecursiveCall(void) {
|
2020-03-13 10:19:38 +01:00
|
|
|
}
|
|
|
|
|
|
2025-03-19 15:23:32 +01:00
|
|
|
extern _PyInterpreterFrame* _PyEval_GetFrame(void);
|
2021-07-26 11:22:16 +01:00
|
|
|
|
2025-06-16 17:34:19 -06:00
|
|
|
extern PyObject * _PyEval_GetGlobalsFromRunningMain(PyThreadState *);
|
|
|
|
|
extern int _PyEval_EnsureBuiltins(
|
|
|
|
|
PyThreadState *,
|
|
|
|
|
PyObject *,
|
|
|
|
|
PyObject **p_builtins);
|
|
|
|
|
extern int _PyEval_EnsureBuiltinsWithModule(
|
|
|
|
|
PyThreadState *,
|
|
|
|
|
PyObject *,
|
|
|
|
|
PyObject **p_builtins);
|
|
|
|
|
|
2024-04-25 11:32:47 +01:00
|
|
|
PyAPI_FUNC(PyObject *)_Py_MakeCoro(PyFunctionObject *func);
|
2020-03-13 10:19:38 +01:00
|
|
|
|
2023-07-03 21:28:27 +01:00
|
|
|
/* Handle signals, pending calls, GIL drop request
|
|
|
|
|
and asynchronous exception */
|
2024-02-29 08:11:28 -08:00
|
|
|
PyAPI_FUNC(int) _Py_HandlePending(PyThreadState *tstate);
|
2022-08-24 14:21:01 +01:00
|
|
|
|
2023-07-05 17:05:02 -06:00
|
|
|
extern PyObject * _PyEval_GetFrameLocals(void);
|
2022-08-24 14:21:01 +01:00
|
|
|
|
2024-02-29 08:11:28 -08:00
|
|
|
typedef PyObject *(*conversion_func)(PyObject *);
|
|
|
|
|
|
|
|
|
|
PyAPI_DATA(const binaryfunc) _PyEval_BinaryOps[];
|
|
|
|
|
PyAPI_DATA(const conversion_func) _PyEval_ConversionFuncs[];
|
|
|
|
|
|
2024-06-18 12:17:46 +01:00
|
|
|
typedef struct _special_method {
|
|
|
|
|
PyObject *name;
|
|
|
|
|
const char *error;
|
2025-04-19 10:44:01 +02:00
|
|
|
const char *error_suggestion; // improved optional suggestion
|
2024-06-18 12:17:46 +01:00
|
|
|
} _Py_SpecialMethod;
|
|
|
|
|
|
|
|
|
|
PyAPI_DATA(const _Py_SpecialMethod) _Py_SpecialMethods[];
|
2024-10-07 14:56:39 +01:00
|
|
|
PyAPI_DATA(const size_t) _Py_FunctionAttributeOffsets[];
|
2024-06-18 12:17:46 +01:00
|
|
|
|
2024-02-29 08:11:28 -08:00
|
|
|
PyAPI_FUNC(int) _PyEval_CheckExceptStarTypeValid(PyThreadState *tstate, PyObject* right);
|
|
|
|
|
PyAPI_FUNC(int) _PyEval_CheckExceptTypeValid(PyThreadState *tstate, PyObject* right);
|
2025-01-25 13:00:23 +00:00
|
|
|
PyAPI_FUNC(int) _PyEval_ExceptionGroupMatch(_PyInterpreterFrame *, PyObject* exc_value, PyObject *match_type, PyObject **match, PyObject **rest);
|
2024-02-29 08:11:28 -08:00
|
|
|
PyAPI_FUNC(void) _PyEval_FormatAwaitableError(PyThreadState *tstate, PyTypeObject *type, int oparg);
|
|
|
|
|
PyAPI_FUNC(void) _PyEval_FormatExcCheckArg(PyThreadState *tstate, PyObject *exc, const char *format_str, PyObject *obj);
|
|
|
|
|
PyAPI_FUNC(void) _PyEval_FormatExcUnbound(PyThreadState *tstate, PyCodeObject *co, int oparg);
|
|
|
|
|
PyAPI_FUNC(void) _PyEval_FormatKwargsError(PyThreadState *tstate, PyObject *func, PyObject *kwargs);
|
2024-07-18 14:24:58 -07:00
|
|
|
PyAPI_FUNC(PyObject *) _PyEval_ImportFrom(PyThreadState *, PyObject *, PyObject *);
|
2025-12-06 15:44:07 +00:00
|
|
|
PyAPI_FUNC(PyObject *) _PyEval_LazyImportName(PyThreadState *tstate, PyObject *builtins, PyObject *globals,
|
2025-09-19 00:54:01 -07:00
|
|
|
PyObject *locals, PyObject *name, PyObject *fromlist, PyObject *level, int lazy);
|
2025-09-16 15:10:28 -07:00
|
|
|
PyAPI_FUNC(PyObject *) _PyEval_LazyImportFrom(PyThreadState *tstate, PyObject *v, PyObject *name);
|
|
|
|
|
PyAPI_FUNC(PyObject *) _PyEval_ImportName(PyThreadState *tstate, PyObject *builtins, PyObject *globals, PyObject *locals,
|
|
|
|
|
PyObject *name, PyObject *fromlist, PyObject *level);
|
2025-09-22 07:42:54 -07:00
|
|
|
PyObject *
|
|
|
|
|
_PyEval_ImportNameWithImport(PyThreadState *tstate, PyObject *import_func, PyObject *globals, PyObject *locals,
|
|
|
|
|
PyObject *name, PyObject *fromlist, PyObject *level);
|
2024-02-29 08:11:28 -08:00
|
|
|
PyAPI_FUNC(PyObject *)_PyEval_MatchClass(PyThreadState *tstate, PyObject *subject, PyObject *type, Py_ssize_t nargs, PyObject *kwargs);
|
|
|
|
|
PyAPI_FUNC(PyObject *)_PyEval_MatchKeys(PyThreadState *tstate, PyObject *map, PyObject *keys);
|
2024-07-29 12:17:47 -07:00
|
|
|
PyAPI_FUNC(void) _PyEval_MonitorRaise(PyThreadState *tstate, _PyInterpreterFrame *frame, _Py_CODEUNIT *instr);
|
2025-10-31 13:09:22 +03:00
|
|
|
PyAPI_FUNC(bool) _PyEval_NoToolsForUnwind(PyThreadState *tstate);
|
2025-02-12 17:44:59 +00:00
|
|
|
PyAPI_FUNC(int) _PyEval_UnpackIterableStackRef(PyThreadState *tstate, PyObject *v, int argcnt, int argcntafter, _PyStackRef *sp);
|
2024-02-29 08:11:28 -08:00
|
|
|
PyAPI_FUNC(void) _PyEval_FrameClearAndPop(PyThreadState *tstate, _PyInterpreterFrame *frame);
|
2024-06-27 03:10:43 +08:00
|
|
|
PyAPI_FUNC(PyObject **) _PyObjectArray_FromStackRefArray(_PyStackRef *input, Py_ssize_t nargs, PyObject **scratch);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(void) _PyObjectArray_Free(PyObject **array, PyObject **scratch);
|
2023-07-20 13:37:19 -07:00
|
|
|
|
2024-08-02 16:31:17 +01:00
|
|
|
PyAPI_FUNC(PyObject *) _PyEval_GetANext(PyObject *aiter);
|
2024-09-14 00:23:51 +08:00
|
|
|
PyAPI_FUNC(void) _PyEval_LoadGlobalStackRef(PyObject *globals, PyObject *builtins, PyObject *name, _PyStackRef *writeto);
|
2024-08-02 16:31:17 +01:00
|
|
|
PyAPI_FUNC(PyObject *) _PyEval_GetAwaitable(PyObject *iterable, int oparg);
|
|
|
|
|
PyAPI_FUNC(PyObject *) _PyEval_LoadName(PyThreadState *tstate, _PyInterpreterFrame *frame, PyObject *name);
|
2024-10-07 14:56:39 +01:00
|
|
|
PyAPI_FUNC(int)
|
|
|
|
|
_Py_Check_ArgsIterable(PyThreadState *tstate, PyObject *func, PyObject *args);
|
2022-10-05 01:34:03 +01:00
|
|
|
|
2025-04-19 10:44:01 +02:00
|
|
|
/*
|
|
|
|
|
* Indicate whether a special method of given 'oparg' can use the (improved)
|
|
|
|
|
* alternative error message instead. Only methods loaded by LOAD_SPECIAL
|
|
|
|
|
* support alternative error messages.
|
|
|
|
|
*
|
|
|
|
|
* Symbol is exported for the JIT (see discussion on GH-132218).
|
|
|
|
|
*/
|
|
|
|
|
PyAPI_FUNC(int)
|
|
|
|
|
_PyEval_SpecialMethodCanSuggest(PyObject *self, int oparg);
|
|
|
|
|
|
2024-02-20 06:57:48 -08:00
|
|
|
/* Bits that can be set in PyThreadState.eval_breaker */
|
|
|
|
|
#define _PY_GIL_DROP_REQUEST_BIT (1U << 0)
|
|
|
|
|
#define _PY_SIGNALS_PENDING_BIT (1U << 1)
|
|
|
|
|
#define _PY_CALLS_TO_DO_BIT (1U << 2)
|
|
|
|
|
#define _PY_ASYNC_EXCEPTION_BIT (1U << 3)
|
|
|
|
|
#define _PY_GC_SCHEDULED_BIT (1U << 4)
|
|
|
|
|
#define _PY_EVAL_PLEASE_STOP_BIT (1U << 5)
|
|
|
|
|
#define _PY_EVAL_EXPLICIT_MERGE_BIT (1U << 6)
|
2024-09-26 17:35:42 -07:00
|
|
|
#define _PY_EVAL_JIT_INVALIDATE_COLD_BIT (1U << 7)
|
2023-10-04 16:09:48 +01:00
|
|
|
|
|
|
|
|
/* Reserve a few bits for future use */
|
|
|
|
|
#define _PY_EVAL_EVENTS_BITS 8
|
|
|
|
|
#define _PY_EVAL_EVENTS_MASK ((1 << _PY_EVAL_EVENTS_BITS)-1)
|
|
|
|
|
|
|
|
|
|
static inline void
|
2024-02-20 06:57:48 -08:00
|
|
|
_Py_set_eval_breaker_bit(PyThreadState *tstate, uintptr_t bit)
|
2023-10-04 16:09:48 +01:00
|
|
|
{
|
2024-02-20 06:57:48 -08:00
|
|
|
_Py_atomic_or_uintptr(&tstate->eval_breaker, bit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
_Py_unset_eval_breaker_bit(PyThreadState *tstate, uintptr_t bit)
|
|
|
|
|
{
|
|
|
|
|
_Py_atomic_and_uintptr(&tstate->eval_breaker, ~bit);
|
2023-10-04 16:09:48 +01:00
|
|
|
}
|
|
|
|
|
|
2024-02-20 06:57:48 -08:00
|
|
|
static inline int
|
|
|
|
|
_Py_eval_breaker_bit_is_set(PyThreadState *tstate, uintptr_t bit)
|
2023-10-04 16:09:48 +01:00
|
|
|
{
|
2024-02-20 06:57:48 -08:00
|
|
|
uintptr_t b = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
|
|
|
|
|
return (b & bit) != 0;
|
2023-10-04 16:09:48 +01:00
|
|
|
}
|
|
|
|
|
|
2024-02-20 06:57:48 -08:00
|
|
|
// Free-threaded builds use these functions to set or unset a bit on all
|
|
|
|
|
// threads in the given interpreter.
|
|
|
|
|
void _Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit);
|
|
|
|
|
void _Py_unset_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit);
|
|
|
|
|
|
2025-03-10 14:06:56 +00:00
|
|
|
PyAPI_FUNC(_PyStackRef) _PyFloat_FromDouble_ConsumeInputs(_PyStackRef left, _PyStackRef right, double value);
|
2024-10-14 14:18:57 +01:00
|
|
|
|
2025-04-03 16:20:01 +01:00
|
|
|
#ifndef Py_SUPPORTS_REMOTE_DEBUG
|
|
|
|
|
#if defined(__APPLE__)
|
2025-04-06 21:39:25 +01:00
|
|
|
#include <TargetConditionals.h>
|
2025-04-03 16:20:01 +01:00
|
|
|
# if !defined(TARGET_OS_OSX)
|
|
|
|
|
// Older macOS SDKs do not define TARGET_OS_OSX
|
|
|
|
|
# define TARGET_OS_OSX 1
|
|
|
|
|
# endif
|
|
|
|
|
#endif
|
|
|
|
|
#if ((defined(__APPLE__) && TARGET_OS_OSX) || defined(MS_WINDOWS) || (defined(__linux__) && HAVE_PROCESS_VM_READV))
|
|
|
|
|
# define Py_SUPPORTS_REMOTE_DEBUG 1
|
|
|
|
|
#endif
|
|
|
|
|
#endif
|
|
|
|
|
|
2025-04-23 20:59:41 +01:00
|
|
|
#if defined(Py_REMOTE_DEBUG) && defined(Py_SUPPORTS_REMOTE_DEBUG)
|
|
|
|
|
extern int _PyRunRemoteDebugger(PyThreadState *tstate);
|
|
|
|
|
#endif
|
|
|
|
|
|
2025-06-05 18:53:57 +01:00
|
|
|
PyAPI_FUNC(_PyStackRef)
|
|
|
|
|
_PyForIter_VirtualIteratorNext(PyThreadState* tstate, struct _PyInterpreterFrame* frame, _PyStackRef iter, _PyStackRef *index_ptr);
|
2025-05-27 15:59:45 +01:00
|
|
|
|
2025-06-25 13:03:05 +02:00
|
|
|
/* Special methods used by LOAD_SPECIAL */
|
|
|
|
|
#define SPECIAL___ENTER__ 0
|
|
|
|
|
#define SPECIAL___EXIT__ 1
|
|
|
|
|
#define SPECIAL___AENTER__ 2
|
|
|
|
|
#define SPECIAL___AEXIT__ 3
|
|
|
|
|
#define SPECIAL_MAX 3
|
|
|
|
|
|
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310)
This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg
Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md .
This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277
The optimizer stack space check is disabled, as it's no longer valid to deal with underflow.
Pros:
* Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator.
* `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace.
* The new JIT frontend is able to handle a lot more control-flow than the old one.
* Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing.
* Better handling of polymorphism. We leverage the specializing interpreter for this.
Cons:
* (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962
Design:
* After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests.
* The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering.
* The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory.
* The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt.
* The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested.
* Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace.
* Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
|
|
|
PyAPI_DATA(const _Py_CODEUNIT *) _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR;
|
|
|
|
|
|
2025-12-03 17:43:35 +00:00
|
|
|
/* Helper functions for large uops */
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(PyObject *)
|
|
|
|
|
_Py_VectorCall_StackRefSteal(
|
|
|
|
|
_PyStackRef callable,
|
|
|
|
|
_PyStackRef *arguments,
|
|
|
|
|
int total_args,
|
|
|
|
|
_PyStackRef kwnames);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(PyObject *)
|
|
|
|
|
_Py_BuiltinCallFast_StackRefSteal(
|
|
|
|
|
_PyStackRef callable,
|
|
|
|
|
_PyStackRef *arguments,
|
|
|
|
|
int total_args);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(PyObject *)
|
|
|
|
|
_Py_BuiltinCallFastWithKeywords_StackRefSteal(
|
|
|
|
|
_PyStackRef callable,
|
|
|
|
|
_PyStackRef *arguments,
|
|
|
|
|
int total_args);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(PyObject *)
|
|
|
|
|
_PyCallMethodDescriptorFast_StackRefSteal(
|
|
|
|
|
_PyStackRef callable,
|
|
|
|
|
PyMethodDef *meth,
|
|
|
|
|
PyObject *self,
|
|
|
|
|
_PyStackRef *arguments,
|
|
|
|
|
int total_args);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(PyObject *)
|
|
|
|
|
_PyCallMethodDescriptorFastWithKeywords_StackRefSteal(
|
|
|
|
|
_PyStackRef callable,
|
|
|
|
|
PyMethodDef *meth,
|
|
|
|
|
PyObject *self,
|
|
|
|
|
_PyStackRef *arguments,
|
|
|
|
|
int total_args);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(PyObject *)
|
|
|
|
|
_Py_CallBuiltinClass_StackRefSteal(
|
|
|
|
|
_PyStackRef callable,
|
|
|
|
|
_PyStackRef *arguments,
|
|
|
|
|
int total_args);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(PyObject *)
|
|
|
|
|
_Py_BuildString_StackRefSteal(
|
|
|
|
|
_PyStackRef *arguments,
|
|
|
|
|
int total_args);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(PyObject *)
|
|
|
|
|
_Py_BuildMap_StackRefSteal(
|
|
|
|
|
_PyStackRef *arguments,
|
|
|
|
|
int half_args);
|
|
|
|
|
|
|
|
|
|
PyAPI_FUNC(void)
|
|
|
|
|
_Py_assert_within_stack_bounds(
|
|
|
|
|
_PyInterpreterFrame *frame, _PyStackRef *stack_pointer,
|
|
|
|
|
const char *filename, int lineno);
|
|
|
|
|
|
2017-09-07 23:51:28 -06:00
|
|
|
#ifdef __cplusplus
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
#endif /* !Py_INTERNAL_CEVAL_H */
|