cpython/Python/ceval.c

3510 lines
108 KiB
C
Raw Normal View History

1990-12-20 15:06:42 +00:00
/* Execute compiled code */
1990-11-18 17:27:39 +00:00
#include "ceval.h"
int
Py_GetRecursionLimit(void)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
return interp->ceval.recursion_limit;
}
void
Py_SetRecursionLimit(int new_limit)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
_PyEval_StopTheWorld(interp);
interp->ceval.recursion_limit = new_limit;
_Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
int depth = p->py_recursion_limit - p->py_recursion_remaining;
p->py_recursion_limit = new_limit;
p->py_recursion_remaining = new_limit - depth;
}
_Py_FOR_EACH_TSTATE_END(interp);
_PyEval_StartTheWorld(interp);
}
int
_Py_ReachedRecursionLimitWithMargin(PyThreadState *tstate, int margin_count)
{
uintptr_t here_addr = _Py_get_machine_stack_pointer();
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
#if _Py_STACK_GROWS_DOWN
if (here_addr > _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES) {
#else
if (here_addr <= _tstate->c_stack_soft_limit - margin_count * _PyOS_STACK_MARGIN_BYTES) {
#endif
return 0;
}
if (_tstate->c_stack_hard_limit == 0) {
_Py_InitializeRecursionLimits(tstate);
}
#if _Py_STACK_GROWS_DOWN
return here_addr <= _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES &&
here_addr >= _tstate->c_stack_soft_limit - 2 * _PyOS_STACK_MARGIN_BYTES;
#else
return here_addr > _tstate->c_stack_soft_limit - margin_count * _PyOS_STACK_MARGIN_BYTES &&
here_addr <= _tstate->c_stack_soft_limit + 2 * _PyOS_STACK_MARGIN_BYTES;
#endif
}
void
_Py_EnterRecursiveCallUnchecked(PyThreadState *tstate)
{
uintptr_t here_addr = _Py_get_machine_stack_pointer();
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
#if _Py_STACK_GROWS_DOWN
if (here_addr < _tstate->c_stack_hard_limit) {
#else
if (here_addr > _tstate->c_stack_hard_limit) {
#endif
Py_FatalError("Unchecked stack overflow.");
}
}
#if defined(__s390x__)
# define Py_C_STACK_SIZE 320000
#elif defined(_WIN32)
// Don't define Py_C_STACK_SIZE, ask the O/S
#elif defined(__ANDROID__)
# define Py_C_STACK_SIZE 1200000
#elif defined(__sparc__)
# define Py_C_STACK_SIZE 1600000
#elif defined(__hppa__) || defined(__powerpc64__)
# define Py_C_STACK_SIZE 2000000
#else
# define Py_C_STACK_SIZE 4000000
#endif
#if defined(__EMSCRIPTEN__)
// Temporary workaround to make `pthread_getattr_np` work on Emscripten.
// Emscripten 4.0.6 will contain a fix:
// https://github.com/emscripten-core/emscripten/pull/23887
#include "emscripten/stack.h"
#define pthread_attr_t workaround_pthread_attr_t
#define pthread_getattr_np workaround_pthread_getattr_np
#define pthread_attr_getguardsize workaround_pthread_attr_getguardsize
#define pthread_attr_getstack workaround_pthread_attr_getstack
#define pthread_attr_destroy workaround_pthread_attr_destroy
typedef struct {
void *_a_stackaddr;
size_t _a_stacksize, _a_guardsize;
} pthread_attr_t;
extern __attribute__((__visibility__("hidden"))) unsigned __default_guardsize;
// Modified version of pthread_getattr_np from the upstream PR.
int pthread_getattr_np(pthread_t thread, pthread_attr_t *attr) {
attr->_a_stackaddr = (void*)emscripten_stack_get_base();
attr->_a_stacksize = emscripten_stack_get_base() - emscripten_stack_get_end();
attr->_a_guardsize = __default_guardsize;
return 0;
}
// These three functions copied without any changes from Emscripten libc.
int pthread_attr_getguardsize(const pthread_attr_t *restrict a, size_t *restrict size)
{
*size = a->_a_guardsize;
return 0;
}
int pthread_attr_getstack(const pthread_attr_t *restrict a, void **restrict addr, size_t *restrict size)
{
/// XXX musl is not standard-conforming? It should not report EINVAL if _a_stackaddr is zero, and it should
/// report EINVAL if a is null: http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getstack.html
if (!a) return EINVAL;
// if (!a->_a_stackaddr)
// return EINVAL;
*size = a->_a_stacksize;
*addr = (void *)(a->_a_stackaddr - *size);
return 0;
}
int pthread_attr_destroy(pthread_attr_t *a)
{
return 0;
}
#endif
static void
hardware_stack_limits(uintptr_t *base, uintptr_t *top, uintptr_t sp)
{
#ifdef WIN32
ULONG_PTR low, high;
GetCurrentThreadStackLimits(&low, &high);
*top = (uintptr_t)high;
ULONG guarantee = 0;
SetThreadStackGuarantee(&guarantee);
*base = (uintptr_t)low + guarantee;
#elif defined(__APPLE__)
pthread_t this_thread = pthread_self();
void *stack_addr = pthread_get_stackaddr_np(this_thread); // top of the stack
size_t stack_size = pthread_get_stacksize_np(this_thread);
*top = (uintptr_t)stack_addr;
*base = ((uintptr_t)stack_addr) - stack_size;
#else
/// XXX musl supports HAVE_PTHRED_GETATTR_NP, but the resulting stack size
/// (on alpine at least) is much smaller than expected and imposes undue limits
/// compared to the old stack size estimation. (We assume musl is not glibc.)
# if defined(HAVE_PTHREAD_GETATTR_NP) && !defined(_AIX) && \
!defined(__NetBSD__) && (defined(__GLIBC__) || !defined(__linux__))
size_t stack_size, guard_size;
void *stack_addr;
pthread_attr_t attr;
int err = pthread_getattr_np(pthread_self(), &attr);
if (err == 0) {
err = pthread_attr_getguardsize(&attr, &guard_size);
err |= pthread_attr_getstack(&attr, &stack_addr, &stack_size);
err |= pthread_attr_destroy(&attr);
}
if (err == 0) {
*base = ((uintptr_t)stack_addr) + guard_size;
*top = (uintptr_t)stack_addr + stack_size;
return;
}
# endif
// Add some space for caller function then round to minimum page size
// This is a guess at the top of the stack, but should be a reasonably
// good guess if called from _PyThreadState_Attach when creating a thread.
// If the thread is attached deep in a call stack, then the guess will be poor.
#if _Py_STACK_GROWS_DOWN
uintptr_t top_addr = _Py_SIZE_ROUND_UP(sp + 8*sizeof(void*), SYSTEM_PAGE_SIZE);
*top = top_addr;
*base = top_addr - Py_C_STACK_SIZE;
# else
uintptr_t base_addr = _Py_SIZE_ROUND_DOWN(sp - 8*sizeof(void*), SYSTEM_PAGE_SIZE);
*base = base_addr;
*top = base_addr + Py_C_STACK_SIZE;
#endif
#endif
}
static void
tstate_set_stack(PyThreadState *tstate,
uintptr_t base, uintptr_t top)
{
assert(base < top);
assert((top - base) >= _PyOS_MIN_STACK_SIZE);
#ifdef _Py_THREAD_SANITIZER
// Thread sanitizer crashes if we use more than half the stack.
uintptr_t stacksize = top - base;
# if _Py_STACK_GROWS_DOWN
base += stacksize/2;
# else
top -= stacksize/2;
# endif
#endif
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
#if _Py_STACK_GROWS_DOWN
_tstate->c_stack_top = top;
_tstate->c_stack_hard_limit = base + _PyOS_STACK_MARGIN_BYTES;
_tstate->c_stack_soft_limit = base + _PyOS_STACK_MARGIN_BYTES * 2;
# ifndef NDEBUG
// Sanity checks
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
assert(ts->c_stack_hard_limit <= ts->c_stack_soft_limit);
assert(ts->c_stack_soft_limit < ts->c_stack_top);
# endif
#else
_tstate->c_stack_top = base;
_tstate->c_stack_hard_limit = top - _PyOS_STACK_MARGIN_BYTES;
_tstate->c_stack_soft_limit = top - _PyOS_STACK_MARGIN_BYTES * 2;
# ifndef NDEBUG
// Sanity checks
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
assert(ts->c_stack_hard_limit >= ts->c_stack_soft_limit);
assert(ts->c_stack_soft_limit > ts->c_stack_top);
# endif
#endif
}
void
_Py_InitializeRecursionLimits(PyThreadState *tstate)
{
uintptr_t base, top;
uintptr_t here_addr = _Py_get_machine_stack_pointer();
hardware_stack_limits(&base, &top, here_addr);
assert(top != 0);
tstate_set_stack(tstate, base, top);
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
ts->c_stack_init_base = base;
ts->c_stack_init_top = top;
}
int
PyUnstable_ThreadState_SetStackProtection(PyThreadState *tstate,
void *stack_start_addr, size_t stack_size)
{
if (stack_size < _PyOS_MIN_STACK_SIZE) {
PyErr_Format(PyExc_ValueError,
"stack_size must be at least %zu bytes",
_PyOS_MIN_STACK_SIZE);
return -1;
}
uintptr_t base = (uintptr_t)stack_start_addr;
uintptr_t top = base + stack_size;
tstate_set_stack(tstate, base, top);
return 0;
}
void
PyUnstable_ThreadState_ResetStackProtection(PyThreadState *tstate)
{
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
if (ts->c_stack_init_top != 0) {
tstate_set_stack(tstate,
ts->c_stack_init_base,
ts->c_stack_init_top);
return;
}
_Py_InitializeRecursionLimits(tstate);
}
/* The function _Py_EnterRecursiveCallTstate() only calls _Py_CheckRecursiveCall()
if the stack pointer is between the stack base and c_stack_hard_limit. */
int
_Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
{
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
uintptr_t here_addr = _Py_get_machine_stack_pointer();
assert(_tstate->c_stack_soft_limit != 0);
assert(_tstate->c_stack_hard_limit != 0);
#if _Py_STACK_GROWS_DOWN
assert(here_addr >= _tstate->c_stack_hard_limit - _PyOS_STACK_MARGIN_BYTES);
if (here_addr < _tstate->c_stack_hard_limit) {
/* Overflowing while handling an overflow. Give up. */
int kbytes_used = (int)(_tstate->c_stack_top - here_addr)/1024;
#else
assert(here_addr <= _tstate->c_stack_hard_limit + _PyOS_STACK_MARGIN_BYTES);
if (here_addr > _tstate->c_stack_hard_limit) {
/* Overflowing while handling an overflow. Give up. */
int kbytes_used = (int)(here_addr - _tstate->c_stack_top)/1024;
#endif
char buffer[80];
snprintf(buffer, 80, "Unrecoverable stack overflow (used %d kB)%s", kbytes_used, where);
Py_FatalError(buffer);
}
if (tstate->recursion_headroom) {
return 0;
}
else {
#if _Py_STACK_GROWS_DOWN
int kbytes_used = (int)(_tstate->c_stack_top - here_addr)/1024;
#else
int kbytes_used = (int)(here_addr - _tstate->c_stack_top)/1024;
#endif
tstate->recursion_headroom++;
_PyErr_Format(tstate, PyExc_RecursionError,
"Stack overflow (used %d kB)%s",
kbytes_used,
where);
tstate->recursion_headroom--;
return -1;
}
}
const binaryfunc _PyEval_BinaryOps[] = {
[NB_ADD] = PyNumber_Add,
[NB_AND] = PyNumber_And,
[NB_FLOOR_DIVIDE] = PyNumber_FloorDivide,
[NB_LSHIFT] = PyNumber_Lshift,
[NB_MATRIX_MULTIPLY] = PyNumber_MatrixMultiply,
[NB_MULTIPLY] = PyNumber_Multiply,
[NB_REMAINDER] = PyNumber_Remainder,
[NB_OR] = PyNumber_Or,
[NB_POWER] = _PyNumber_PowerNoMod,
[NB_RSHIFT] = PyNumber_Rshift,
[NB_SUBTRACT] = PyNumber_Subtract,
[NB_TRUE_DIVIDE] = PyNumber_TrueDivide,
[NB_XOR] = PyNumber_Xor,
[NB_INPLACE_ADD] = PyNumber_InPlaceAdd,
[NB_INPLACE_AND] = PyNumber_InPlaceAnd,
[NB_INPLACE_FLOOR_DIVIDE] = PyNumber_InPlaceFloorDivide,
[NB_INPLACE_LSHIFT] = PyNumber_InPlaceLshift,
[NB_INPLACE_MATRIX_MULTIPLY] = PyNumber_InPlaceMatrixMultiply,
[NB_INPLACE_MULTIPLY] = PyNumber_InPlaceMultiply,
[NB_INPLACE_REMAINDER] = PyNumber_InPlaceRemainder,
[NB_INPLACE_OR] = PyNumber_InPlaceOr,
[NB_INPLACE_POWER] = _PyNumber_InPlacePowerNoMod,
[NB_INPLACE_RSHIFT] = PyNumber_InPlaceRshift,
[NB_INPLACE_SUBTRACT] = PyNumber_InPlaceSubtract,
[NB_INPLACE_TRUE_DIVIDE] = PyNumber_InPlaceTrueDivide,
[NB_INPLACE_XOR] = PyNumber_InPlaceXor,
[NB_SUBSCR] = PyObject_GetItem,
};
const conversion_func _PyEval_ConversionFuncs[4] = {
[FVC_STR] = PyObject_Str,
[FVC_REPR] = PyObject_Repr,
[FVC_ASCII] = PyObject_ASCII
};
const _Py_SpecialMethod _Py_SpecialMethods[] = {
[SPECIAL___ENTER__] = {
.name = &_Py_ID(__enter__),
.error = (
"'%T' object does not support the context manager protocol "
"(missed __enter__ method)"
),
.error_suggestion = (
"'%T' object does not support the context manager protocol "
"(missed __enter__ method) but it supports the asynchronous "
"context manager protocol. Did you mean to use 'async with'?"
)
},
[SPECIAL___EXIT__] = {
.name = &_Py_ID(__exit__),
.error = (
"'%T' object does not support the context manager protocol "
"(missed __exit__ method)"
),
.error_suggestion = (
"'%T' object does not support the context manager protocol "
"(missed __exit__ method) but it supports the asynchronous "
"context manager protocol. Did you mean to use 'async with'?"
)
},
[SPECIAL___AENTER__] = {
.name = &_Py_ID(__aenter__),
.error = (
"'%T' object does not support the asynchronous "
"context manager protocol (missed __aenter__ method)"
),
.error_suggestion = (
"'%T' object does not support the asynchronous context manager "
"protocol (missed __aenter__ method) but it supports the context "
"manager protocol. Did you mean to use 'with'?"
)
},
[SPECIAL___AEXIT__] = {
.name = &_Py_ID(__aexit__),
.error = (
"'%T' object does not support the asynchronous "
"context manager protocol (missed __aexit__ method)"
),
.error_suggestion = (
"'%T' object does not support the asynchronous context manager "
"protocol (missed __aexit__ method) but it supports the context "
"manager protocol. Did you mean to use 'with'?"
)
}
};
const size_t _Py_FunctionAttributeOffsets[] = {
[MAKE_FUNCTION_CLOSURE] = offsetof(PyFunctionObject, func_closure),
[MAKE_FUNCTION_ANNOTATIONS] = offsetof(PyFunctionObject, func_annotations),
[MAKE_FUNCTION_KWDEFAULTS] = offsetof(PyFunctionObject, func_kwdefaults),
[MAKE_FUNCTION_DEFAULTS] = offsetof(PyFunctionObject, func_defaults),
[MAKE_FUNCTION_ANNOTATE] = offsetof(PyFunctionObject, func_annotate),
};
// PEP 634: Structural Pattern Matching
// Return a tuple of values corresponding to keys, with error checks for
// duplicate/missing keys.
PyObject *
_PyEval_MatchKeys(PyThreadState *tstate, PyObject *map, PyObject *keys)
{
assert(PyTuple_CheckExact(keys));
Py_ssize_t nkeys = PyTuple_GET_SIZE(keys);
if (!nkeys) {
// No keys means no items.
return PyTuple_New(0);
}
PyObject *seen = NULL;
PyObject *dummy = NULL;
PyObject *values = NULL;
// We use the two argument form of map.get(key, default) for two reasons:
// - Atomically check for a key and get its value without error handling.
// - Don't cause key creation or resizing in dict subclasses like
// collections.defaultdict that define __missing__ (or similar).
_PyCStackRef cref;
_PyThreadState_PushCStackRef(tstate, &cref);
int meth_found = _PyObject_GetMethodStackRef(tstate, map, &_Py_ID(get), &cref.ref);
PyObject *get = PyStackRef_AsPyObjectBorrow(cref.ref);
if (get == NULL) {
goto fail;
}
seen = PySet_New(NULL);
if (seen == NULL) {
goto fail;
}
// dummy = object()
dummy = _PyObject_CallNoArgs((PyObject *)&PyBaseObject_Type);
if (dummy == NULL) {
goto fail;
}
values = PyTuple_New(nkeys);
if (values == NULL) {
goto fail;
}
for (Py_ssize_t i = 0; i < nkeys; i++) {
PyObject *key = PyTuple_GET_ITEM(keys, i);
if (PySet_Contains(seen, key) || PySet_Add(seen, key)) {
if (!_PyErr_Occurred(tstate)) {
// Seen it before!
_PyErr_Format(tstate, PyExc_ValueError,
"mapping pattern checks duplicate key (%R)", key);
}
goto fail;
}
PyObject *args[] = { map, key, dummy };
PyObject *value = NULL;
if (meth_found) {
value = PyObject_Vectorcall(get, args, 3, NULL);
}
else {
value = PyObject_Vectorcall(get, &args[1], 2, NULL);
}
if (value == NULL) {
goto fail;
}
if (value == dummy) {
// key not in map!
Py_DECREF(value);
Py_DECREF(values);
// Return None:
values = Py_NewRef(Py_None);
goto done;
}
PyTuple_SET_ITEM(values, i, value);
}
// Success:
done:
_PyThreadState_PopCStackRef(tstate, &cref);
Py_DECREF(seen);
Py_DECREF(dummy);
return values;
fail:
_PyThreadState_PopCStackRef(tstate, &cref);
Py_XDECREF(seen);
Py_XDECREF(dummy);
Py_XDECREF(values);
return NULL;
}
// Extract a named attribute from the subject, with additional bookkeeping to
// raise TypeErrors for repeated lookups. On failure, return NULL (with no
// error set). Use _PyErr_Occurred(tstate) to disambiguate.
static PyObject *
match_class_attr(PyThreadState *tstate, PyObject *subject, PyObject *type,
PyObject *name, PyObject *seen)
{
assert(PyUnicode_CheckExact(name));
assert(PySet_CheckExact(seen));
if (PySet_Contains(seen, name) || PySet_Add(seen, name)) {
if (!_PyErr_Occurred(tstate)) {
// Seen it before!
_PyErr_Format(tstate, PyExc_TypeError,
"%s() got multiple sub-patterns for attribute %R",
((PyTypeObject*)type)->tp_name, name);
}
return NULL;
}
PyObject *attr;
(void)PyObject_GetOptionalAttr(subject, name, &attr);
return attr;
}
// On success (match), return a tuple of extracted attributes. On failure (no
// match), return NULL. Use _PyErr_Occurred(tstate) to disambiguate.
PyObject*
_PyEval_MatchClass(PyThreadState *tstate, PyObject *subject, PyObject *type,
Py_ssize_t nargs, PyObject *kwargs)
{
if (!PyType_Check(type)) {
const char *e = "called match pattern must be a class";
_PyErr_Format(tstate, PyExc_TypeError, e);
return NULL;
}
assert(PyTuple_CheckExact(kwargs));
// First, an isinstance check:
if (PyObject_IsInstance(subject, type) <= 0) {
return NULL;
}
// So far so good:
PyObject *seen = PySet_New(NULL);
if (seen == NULL) {
return NULL;
}
PyObject *attrs = PyList_New(0);
if (attrs == NULL) {
Py_DECREF(seen);
return NULL;
}
// NOTE: From this point on, goto fail on failure:
PyObject *match_args = NULL;
// First, the positional subpatterns:
if (nargs) {
int match_self = 0;
if (PyObject_GetOptionalAttr(type, &_Py_ID(__match_args__), &match_args) < 0) {
goto fail;
}
if (match_args) {
if (!PyTuple_CheckExact(match_args)) {
const char *e = "%s.__match_args__ must be a tuple (got %s)";
_PyErr_Format(tstate, PyExc_TypeError, e,
((PyTypeObject *)type)->tp_name,
Py_TYPE(match_args)->tp_name);
goto fail;
}
}
else {
// _Py_TPFLAGS_MATCH_SELF is only acknowledged if the type does not
// define __match_args__. This is natural behavior for subclasses:
// it's as if __match_args__ is some "magic" value that is lost as
// soon as they redefine it.
match_args = PyTuple_New(0);
match_self = PyType_HasFeature((PyTypeObject*)type,
_Py_TPFLAGS_MATCH_SELF);
}
assert(PyTuple_CheckExact(match_args));
Py_ssize_t allowed = match_self ? 1 : PyTuple_GET_SIZE(match_args);
if (allowed < nargs) {
const char *plural = (allowed == 1) ? "" : "s";
_PyErr_Format(tstate, PyExc_TypeError,
"%s() accepts %d positional sub-pattern%s (%d given)",
((PyTypeObject*)type)->tp_name,
allowed, plural, nargs);
goto fail;
}
if (match_self) {
// Easy. Copy the subject itself, and move on to kwargs.
if (PyList_Append(attrs, subject) < 0) {
goto fail;
}
}
else {
for (Py_ssize_t i = 0; i < nargs; i++) {
PyObject *name = PyTuple_GET_ITEM(match_args, i);
if (!PyUnicode_CheckExact(name)) {
_PyErr_Format(tstate, PyExc_TypeError,
"__match_args__ elements must be strings "
"(got %s)", Py_TYPE(name)->tp_name);
goto fail;
}
PyObject *attr = match_class_attr(tstate, subject, type, name,
seen);
if (attr == NULL) {
goto fail;
}
if (PyList_Append(attrs, attr) < 0) {
Py_DECREF(attr);
goto fail;
}
Py_DECREF(attr);
}
}
Py_CLEAR(match_args);
}
// Finally, the keyword subpatterns:
for (Py_ssize_t i = 0; i < PyTuple_GET_SIZE(kwargs); i++) {
PyObject *name = PyTuple_GET_ITEM(kwargs, i);
PyObject *attr = match_class_attr(tstate, subject, type, name, seen);
if (attr == NULL) {
goto fail;
}
if (PyList_Append(attrs, attr) < 0) {
Py_DECREF(attr);
goto fail;
}
Py_DECREF(attr);
}
Py_SETREF(attrs, PyList_AsTuple(attrs));
Py_DECREF(seen);
return attrs;
fail:
// We really don't care whether an error was raised or not... that's our
// caller's problem. All we know is that the match failed.
Py_XDECREF(match_args);
Py_DECREF(seen);
Py_DECREF(attrs);
return NULL;
}
static int do_raise(PyThreadState *tstate, PyObject *exc, PyObject *cause);
1997-04-29 18:18:01 +00:00
PyObject *
2010-12-03 20:14:31 +00:00
PyEval_EvalCode(PyObject *co, PyObject *globals, PyObject *locals)
1995-07-18 14:51:37 +00:00
{
PyThreadState *tstate = _PyThreadState_GET();
if (locals == NULL) {
locals = globals;
}
PyObject *builtins = _PyDict_LoadBuiltinsFromGlobals(globals);
if (builtins == NULL) {
return NULL;
}
PyFrameConstructor desc = {
.fc_globals = globals,
.fc_builtins = builtins,
.fc_name = ((PyCodeObject *)co)->co_name,
.fc_qualname = ((PyCodeObject *)co)->co_name,
.fc_code = co,
.fc_defaults = NULL,
.fc_kwdefaults = NULL,
.fc_closure = NULL
};
PyFunctionObject *func = _PyFunction_FromConstructor(&desc);
_Py_DECREF_BUILTINS(builtins);
if (func == NULL) {
return NULL;
}
EVAL_CALL_STAT_INC(EVAL_CALL_LEGACY);
PyObject *res = _PyEval_Vector(tstate, func, locals, NULL, 0, NULL);
Py_DECREF(func);
return res;
1995-07-18 14:51:37 +00:00
}
/* Interpreter main loop */
PyObject *
PyEval_EvalFrame(PyFrameObject *f)
{
/* Function kept for backward compatibility */
PyThreadState *tstate = _PyThreadState_GET();
return _PyEval_EvalFrame(tstate, f->f_frame, 0);
}
PyObject *
PyEval_EvalFrameEx(PyFrameObject *f, int throwflag)
{
PyThreadState *tstate = _PyThreadState_GET();
return _PyEval_EvalFrame(tstate, f->f_frame, throwflag);
}
#include "ceval_macros.h"
/* Helper functions to keep the size of the largest uops down */
PyObject *
_Py_VectorCall_StackRefSteal(
_PyStackRef callable,
_PyStackRef *arguments,
int total_args,
_PyStackRef kwnames)
{
PyObject *res;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyObject *kwnames_o = PyStackRef_AsPyObjectBorrow(kwnames);
int positional_args = total_args;
if (kwnames_o != NULL) {
positional_args -= (int)PyTuple_GET_SIZE(kwnames_o);
}
res = PyObject_Vectorcall(
callable_o, args_o,
positional_args | PY_VECTORCALL_ARGUMENTS_OFFSET,
kwnames_o);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
PyStackRef_XCLOSE(kwnames);
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = total_args-1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
PyStackRef_CLOSE(callable);
return res;
}
PyObject*
_Py_VectorCallInstrumentation_StackRefSteal(
_PyStackRef callable,
_PyStackRef* arguments,
int total_args,
_PyStackRef kwnames,
bool call_instrumentation,
_PyInterpreterFrame* frame,
_Py_CODEUNIT* this_instr,
PyThreadState* tstate)
{
PyObject* res;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
PyObject* callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyObject* kwnames_o = PyStackRef_AsPyObjectBorrow(kwnames);
int positional_args = total_args;
if (kwnames_o != NULL) {
positional_args -= (int)PyTuple_GET_SIZE(kwnames_o);
}
res = PyObject_Vectorcall(
callable_o, args_o,
positional_args | PY_VECTORCALL_ARGUMENTS_OFFSET,
kwnames_o);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
if (call_instrumentation) {
PyObject* arg = total_args == 0 ?
&_PyInstrumentation_MISSING : PyStackRef_AsPyObjectBorrow(arguments[0]);
if (res == NULL) {
_Py_call_instrumentation_exc2(
tstate, PY_MONITORING_EVENT_C_RAISE,
frame, this_instr, callable_o, arg);
}
else {
int err = _Py_call_instrumentation_2args(
tstate, PY_MONITORING_EVENT_C_RETURN,
frame, this_instr, callable_o, arg);
if (err < 0) {
Py_CLEAR(res);
}
}
}
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
PyStackRef_XCLOSE(kwnames);
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = total_args - 1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
PyStackRef_CLOSE(callable);
return res;
}
PyObject *
_Py_BuiltinCallFast_StackRefSteal(
_PyStackRef callable,
_PyStackRef *arguments,
int total_args)
{
PyObject *res;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable_o);
res = _PyCFunctionFast_CAST(cfunc)(
PyCFunction_GET_SELF(callable_o),
args_o,
total_args
);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = total_args-1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
PyStackRef_CLOSE(callable);
return res;
}
PyObject *
_Py_BuiltinCallFastWithKeywords_StackRefSteal(
_PyStackRef callable,
_PyStackRef *arguments,
int total_args)
{
PyObject *res;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyCFunctionFastWithKeywords cfunc =
_PyCFunctionFastWithKeywords_CAST(PyCFunction_GET_FUNCTION(callable_o));
res = cfunc(PyCFunction_GET_SELF(callable_o), args_o, total_args, NULL);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = total_args-1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
PyStackRef_CLOSE(callable);
return res;
}
PyObject *
_PyCallMethodDescriptorFast_StackRefSteal(
_PyStackRef callable,
PyMethodDef *meth,
PyObject *self,
_PyStackRef *arguments,
int total_args)
{
PyObject *res;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
assert(((PyMethodDescrObject *)PyStackRef_AsPyObjectBorrow(callable))->d_method == meth);
assert(self == PyStackRef_AsPyObjectBorrow(arguments[0]));
PyCFunctionFast cfunc = _PyCFunctionFast_CAST(meth->ml_meth);
res = cfunc(self, (args_o + 1), total_args - 1);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = total_args-1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
PyStackRef_CLOSE(callable);
return res;
}
PyObject *
_PyCallMethodDescriptorFastWithKeywords_StackRefSteal(
_PyStackRef callable,
PyMethodDef *meth,
PyObject *self,
_PyStackRef *arguments,
int total_args)
{
PyObject *res;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
assert(((PyMethodDescrObject *)PyStackRef_AsPyObjectBorrow(callable))->d_method == meth);
assert(self == PyStackRef_AsPyObjectBorrow(arguments[0]));
PyCFunctionFastWithKeywords cfunc =
_PyCFunctionFastWithKeywords_CAST(meth->ml_meth);
res = cfunc(self, (args_o + 1), total_args-1, NULL);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = total_args-1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
PyStackRef_CLOSE(callable);
return res;
}
PyObject *
_Py_CallBuiltinClass_StackRefSteal(
_PyStackRef callable,
_PyStackRef *arguments,
int total_args)
{
PyObject *res;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
PyTypeObject *tp = (PyTypeObject *)PyStackRef_AsPyObjectBorrow(callable);
res = tp->tp_vectorcall((PyObject *)tp, args_o, total_args | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = total_args-1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
PyStackRef_CLOSE(callable);
return res;
}
PyObject *
_Py_BuildString_StackRefSteal(
_PyStackRef *arguments,
int total_args)
{
PyObject *res;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
res = _PyUnicode_JoinArray(&_Py_STR(empty), args_o, total_args);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = total_args-1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
return res;
}
PyObject *
_Py_BuildMap_StackRefSteal(
_PyStackRef *arguments,
int half_args)
{
PyObject *res;
STACKREFS_TO_PYOBJECTS(arguments, half_args*2, args_o);
if (CONVERSION_FAILED(args_o)) {
res = NULL;
goto cleanup;
}
res = _PyDict_FromItems(
args_o, 2,
args_o+1, 2,
half_args
);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
cleanup:
// arguments is a pointer into the GC visible stack,
// so we must NULL out values as we clear them.
for (int i = half_args*2-1; i >= 0; i--) {
_PyStackRef tmp = arguments[i];
arguments[i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
return res;
}
#ifdef Py_DEBUG
void
_Py_assert_within_stack_bounds(
_PyInterpreterFrame *frame, _PyStackRef *stack_pointer,
const char *filename, int lineno
) {
if (frame->owner == FRAME_OWNED_BY_INTERPRETER) {
return;
}
int level = (int)(stack_pointer - _PyFrame_Stackbase(frame));
if (level < 0) {
printf("Stack underflow (depth = %d) at %s:%d\n", level, filename, lineno);
fflush(stdout);
abort();
}
int size = _PyFrame_GetCode(frame)->co_stacksize;
if (level > size) {
printf("Stack overflow (depth = %d) at %s:%d\n", level, filename, lineno);
fflush(stdout);
abort();
}
}
#endif
int _Py_CheckRecursiveCallPy(
PyThreadState *tstate)
{
if (tstate->recursion_headroom) {
if (tstate->py_recursion_remaining < -50) {
/* Overflowing while handling an overflow. Give up. */
Py_FatalError("Cannot recover from Python stack overflow.");
}
}
else {
if (tstate->py_recursion_remaining <= 0) {
tstate->recursion_headroom++;
_PyErr_Format(tstate, PyExc_RecursionError,
"maximum recursion depth exceeded");
tstate->recursion_headroom--;
return -1;
}
}
return 0;
}
static const _Py_CODEUNIT _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS[] = {
/* Put a NOP at the start, so that the IP points into
* the code, rather than before it */
{ .op.code = NOP, .op.arg = 0 },
{ .op.code = INTERPRETER_EXIT, .op.arg = 0 }, /* reached on return */
{ .op.code = NOP, .op.arg = 0 },
{ .op.code = INTERPRETER_EXIT, .op.arg = 0 }, /* reached on yield */
{ .op.code = RESUME, .op.arg = RESUME_OPARG_DEPTH1_MASK | RESUME_AT_FUNC_START }
};
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
const _Py_CODEUNIT *_Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR = (_Py_CODEUNIT*)&_Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS;
#ifdef Py_DEBUG
extern void _PyUOpPrint(const _PyUOpInstruction *uop);
#endif
PyObject **
_PyObjectArray_FromStackRefArray(_PyStackRef *input, Py_ssize_t nargs, PyObject **scratch)
{
PyObject **result;
if (nargs > MAX_STACKREF_SCRATCH) {
// +1 in case PY_VECTORCALL_ARGUMENTS_OFFSET is set.
result = PyMem_Malloc((nargs + 1) * sizeof(PyObject *));
if (result == NULL) {
return NULL;
}
}
else {
result = scratch;
}
result++;
result[0] = NULL; /* Keep GCC happy */
for (int i = 0; i < nargs; i++) {
result[i] = PyStackRef_AsPyObjectBorrow(input[i]);
}
return result;
}
void
_PyObjectArray_Free(PyObject **array, PyObject **scratch)
{
if (array != scratch) {
PyMem_Free(array);
}
}
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#if _Py_TIER2
// 0 for success, -1 for error.
static int
stop_tracing_and_jit(PyThreadState *tstate, _PyInterpreterFrame *frame)
{
int _is_sys_tracing = (tstate->c_tracefunc != NULL) || (tstate->c_profilefunc != NULL);
int err = 0;
if (!_PyErr_Occurred(tstate) && !_is_sys_tracing) {
err = _PyOptimizer_Optimize(frame, tstate);
}
_PyJit_FinalizeTracing(tstate, err);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
return err;
}
#endif
/* _PyEval_EvalFrameDefault is too large to optimize for speed with PGO on MSVC.
*/
#if (defined(_MSC_VER) && \
(_MSC_VER < 1943) && \
defined(_Py_USING_PGO))
#define DO_NOT_OPTIMIZE_INTERP_LOOP
#endif
#ifdef DO_NOT_OPTIMIZE_INTERP_LOOP
# pragma optimize("t", off)
/* This setting is reversed below following _PyEval_EvalFrameDefault */
#endif
#if _Py_TAIL_CALL_INTERP
#include "opcode_targets.h"
#include "generated_cases.c.h"
#endif
#if (defined(__GNUC__) && __GNUC__ >= 10 && !defined(__clang__)) && defined(__x86_64__)
/*
* gh-129987: The SLP autovectorizer can cause poor code generation for
* opcode dispatch in some GCC versions (observed in GCCs 12 through 15,
* probably caused by https://gcc.gnu.org/bugzilla/show_bug.cgi?id=115777),
* negating any benefit we get from vectorization elsewhere in the
* interpreter loop. Disabling it significantly affected older GCC versions
* (prior to GCC 9, 40% performance drop), so we have to selectively disable
* it.
*/
#define DONT_SLP_VECTORIZE __attribute__((optimize ("no-tree-slp-vectorize")))
#else
#define DONT_SLP_VECTORIZE
#endif
PyObject* _Py_HOT_FUNCTION DONT_SLP_VECTORIZE
_PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int throwflag)
{
_Py_EnsureTstateNotNULL(tstate);
check_invalid_reentrancy();
CALL_STAT_INC(pyeval_calls);
#if USE_COMPUTED_GOTOS && !_Py_TAIL_CALL_INTERP
/* Import the static jump table */
#include "opcode_targets.h"
void **opcode_targets = opcode_targets_table;
#endif
#ifdef Py_STATS
int lastopcode = 0;
#endif
#if !_Py_TAIL_CALL_INTERP
uint8_t opcode; /* Current opcode */
int oparg; /* Current opcode argument, if any */
assert(tstate->current_frame == NULL || tstate->current_frame->stackpointer != NULL);
#if !USE_COMPUTED_GOTOS
uint8_t tracing_mode = 0;
uint8_t dispatch_code;
#endif
#endif
_PyEntryFrame entry;
if (_Py_EnterRecursiveCallTstate(tstate, "")) {
assert(frame->owner != FRAME_OWNED_BY_INTERPRETER);
_PyEval_FrameClearAndPop(tstate, frame);
return NULL;
}
/* Local "register" variables.
* These are cached values from the frame and code object. */
_Py_CODEUNIT *next_instr;
_PyStackRef *stack_pointer;
entry.stack[0] = PyStackRef_NULL;
#ifdef Py_STACKREF_DEBUG
entry.frame.f_funcobj = PyStackRef_None;
#elif defined(Py_DEBUG)
/* Set these to invalid but identifiable values for debugging. */
entry.frame.f_funcobj = (_PyStackRef){.bits = 0xaaa0};
entry.frame.f_locals = (PyObject*)0xaaa1;
entry.frame.frame_obj = (PyFrameObject*)0xaaa2;
entry.frame.f_globals = (PyObject*)0xaaa3;
entry.frame.f_builtins = (PyObject*)0xaaa4;
#endif
entry.frame.f_executable = PyStackRef_None;
entry.frame.instr_ptr = (_Py_CODEUNIT *)_Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS + 1;
entry.frame.stackpointer = entry.stack;
entry.frame.owner = FRAME_OWNED_BY_INTERPRETER;
entry.frame.visited = 0;
entry.frame.return_offset = 0;
#ifdef Py_DEBUG
entry.frame.lltrace = 0;
#endif
/* Push frame */
entry.frame.previous = tstate->current_frame;
frame->previous = &entry.frame;
tstate->current_frame = frame;
entry.frame.localsplus[0] = PyStackRef_NULL;
#ifdef _Py_TIER2
if (tstate->current_executor != NULL) {
entry.frame.localsplus[0] = PyStackRef_FromPyObjectNew(tstate->current_executor);
tstate->current_executor = NULL;
}
#endif
/* support for generator.throw() */
if (throwflag) {
if (_Py_EnterRecursivePy(tstate)) {
goto early_exit;
}
#ifdef Py_GIL_DISABLED
/* Load thread-local bytecode */
if (frame->tlbc_index != ((_PyThreadStateImpl *)tstate)->tlbc_index) {
_Py_CODEUNIT *bytecode =
_PyEval_GetExecutableCode(tstate, _PyFrame_GetCode(frame));
if (bytecode == NULL) {
goto early_exit;
}
ptrdiff_t off = frame->instr_ptr - _PyFrame_GetBytecode(frame);
frame->tlbc_index = ((_PyThreadStateImpl *)tstate)->tlbc_index;
frame->instr_ptr = bytecode + off;
}
#endif
/* Because this avoids the RESUME, we need to update instrumentation */
_Py_Instrument(_PyFrame_GetCode(frame), tstate->interp);
next_instr = frame->instr_ptr;
monitor_throw(tstate, frame, next_instr);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TAIL_CALL_INTERP
# if Py_STATS
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_handler_table, 0, lastopcode);
# else
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_handler_table, 0);
# endif
#else
goto error;
#endif
}
#if _Py_TAIL_CALL_INTERP
# if Py_STATS
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_handler_table, 0, lastopcode);
# else
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_handler_table, 0);
# endif
#else
goto start_frame;
# include "generated_cases.c.h"
#endif
early_exit:
assert(_PyErr_Occurred(tstate));
_Py_LeaveRecursiveCallPy(tstate);
assert(frame->owner != FRAME_OWNED_BY_INTERPRETER);
// GH-99729: We need to unlink the frame *before* clearing it:
_PyInterpreterFrame *dying = frame;
frame = tstate->current_frame = dying->previous;
_PyEval_FrameClearAndPop(tstate, dying);
frame->return_offset = 0;
assert(frame->owner == FRAME_OWNED_BY_INTERPRETER);
/* Restore previous frame and exit */
tstate->current_frame = frame->previous;
return NULL;
}
#ifdef _Py_TIER2
#ifdef _Py_JIT
_PyJitEntryFuncPtr _Py_jit_entry = _Py_LazyJitShim;
#else
_PyJitEntryFuncPtr _Py_jit_entry = _PyTier2Interpreter;
#endif
#endif
#if defined(_Py_TIER2) && !defined(_Py_JIT)
_Py_CODEUNIT *
_PyTier2Interpreter(
_PyExecutorObject *current_executor, _PyInterpreterFrame *frame,
_PyStackRef *stack_pointer, PyThreadState *tstate
) {
const _PyUOpInstruction *next_uop;
int oparg;
/* Set up "jit" state after entry from tier 1.
* This mimics what the jit shim function does. */
tstate->jit_exit = NULL;
_PyStackRef _tos_cache0 = PyStackRef_ZERO_BITS;
_PyStackRef _tos_cache1 = PyStackRef_ZERO_BITS;
_PyStackRef _tos_cache2 = PyStackRef_ZERO_BITS;
int current_cached_values = 0;
tier2_start:
next_uop = current_executor->trace;
assert(next_uop->opcode == _START_EXECUTOR_r00 + current_cached_values ||
next_uop->opcode == _COLD_EXIT_r00 + current_cached_values ||
next_uop->opcode == _COLD_DYNAMIC_EXIT_r00 + current_cached_values);
#undef LOAD_IP
#define LOAD_IP(UNUSED) (void)0
#ifdef Py_STATS
// Disable these macros that apply to Tier 1 stats when we are in Tier 2
#undef STAT_INC
#define STAT_INC(opname, name) ((void)0)
#undef STAT_DEC
#define STAT_DEC(opname, name) ((void)0)
#endif
#undef ENABLE_SPECIALIZATION
#define ENABLE_SPECIALIZATION 0
#undef ENABLE_SPECIALIZATION_FT
#define ENABLE_SPECIALIZATION_FT 0
uint16_t uopcode;
#ifdef Py_STATS
int lastuop = 0;
uint64_t trace_uop_execution_counter = 0;
#endif
assert(next_uop->opcode == _START_EXECUTOR_r00 ||
next_uop->opcode == _COLD_EXIT_r00 ||
next_uop->opcode == _COLD_DYNAMIC_EXIT_r00);
tier2_dispatch:
for (;;) {
uopcode = next_uop->opcode;
#ifdef Py_DEBUG
if (frame->lltrace >= 3) {
dump_stack(frame, stack_pointer);
printf(" cache=[");
dump_cache_item(_tos_cache0, 0, current_cached_values);
printf(", ");
dump_cache_item(_tos_cache1, 1, current_cached_values);
printf(", ");
dump_cache_item(_tos_cache2, 2, current_cached_values);
printf("]\n");
if (next_uop->opcode == _START_EXECUTOR_r00) {
printf("%4d uop: ", 0);
}
else {
printf("%4d uop: ", (int)(next_uop - current_executor->trace));
}
_PyUOpPrint(next_uop);
printf("\n");
fflush(stdout);
}
#endif
next_uop++;
OPT_STAT_INC(uops_executed);
UOP_STAT_INC(uopcode, execution_count);
UOP_PAIR_INC(uopcode, lastuop);
#ifdef Py_STATS
trace_uop_execution_counter++;
((_PyUOpInstruction *)next_uop)[-1].execution_count++;
#endif
switch (uopcode) {
#include "executor_cases.c.h"
default:
#ifdef Py_DEBUG
{
printf("Unknown uop: ");
_PyUOpPrint(&next_uop[-1]);
printf(" @ %d\n", (int)(next_uop - current_executor->trace - 1));
Py_FatalError("Unknown uop");
}
#else
Py_UNREACHABLE();
#endif
}
}
jump_to_error_target:
#ifdef Py_DEBUG
if (frame->lltrace >= 2) {
printf("Error: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(" @ %d -> %s]\n",
(int)(next_uop - current_executor->trace - 1),
_PyOpcode_OpName[frame->instr_ptr->op.code]);
fflush(stdout);
}
#endif
assert(next_uop[-1].format == UOP_FORMAT_JUMP);
uint16_t target = uop_get_error_target(&next_uop[-1]);
next_uop = current_executor->trace + target;
goto tier2_dispatch;
jump_to_jump_target:
assert(next_uop[-1].format == UOP_FORMAT_JUMP);
target = uop_get_jump_target(&next_uop[-1]);
next_uop = current_executor->trace + target;
goto tier2_dispatch;
}
#endif // _Py_TIER2
#ifdef DO_NOT_OPTIMIZE_INTERP_LOOP
# pragma optimize("", on)
#endif
#if defined(__GNUC__) || defined(__clang__)
# pragma GCC diagnostic pop
#elif defined(_MSC_VER) /* MS_WINDOWS */
# pragma warning(pop)
#endif
static void
format_missing(PyThreadState *tstate, const char *kind,
PyCodeObject *co, PyObject *names, PyObject *qualname)
{
int err;
Py_ssize_t len = PyList_GET_SIZE(names);
PyObject *name_str, *comma, *tail, *tmp;
assert(PyList_CheckExact(names));
assert(len >= 1);
/* Deal with the joys of natural language. */
switch (len) {
case 1:
name_str = PyList_GET_ITEM(names, 0);
Py_INCREF(name_str);
break;
case 2:
name_str = PyUnicode_FromFormat("%U and %U",
PyList_GET_ITEM(names, len - 2),
PyList_GET_ITEM(names, len - 1));
break;
default:
tail = PyUnicode_FromFormat(", %U, and %U",
PyList_GET_ITEM(names, len - 2),
PyList_GET_ITEM(names, len - 1));
2012-06-01 11:18:22 -07:00
if (tail == NULL)
return;
/* Chop off the last two objects in the list. This shouldn't actually
fail, but we can't be too careful. */
err = PyList_SetSlice(names, len - 2, len, NULL);
if (err == -1) {
Py_DECREF(tail);
return;
}
/* Stitch everything up into a nice comma-separated list. */
comma = PyUnicode_FromString(", ");
if (comma == NULL) {
Py_DECREF(tail);
return;
}
tmp = PyUnicode_Join(comma, names);
Py_DECREF(comma);
if (tmp == NULL) {
Py_DECREF(tail);
return;
}
name_str = PyUnicode_Concat(tmp, tail);
Py_DECREF(tmp);
Py_DECREF(tail);
break;
}
if (name_str == NULL)
return;
_PyErr_Format(tstate, PyExc_TypeError,
"%U() missing %i required %s argument%s: %U",
qualname,
len,
kind,
len == 1 ? "" : "s",
name_str);
Py_DECREF(name_str);
}
static void
missing_arguments(PyThreadState *tstate, PyCodeObject *co,
Py_ssize_t missing, Py_ssize_t defcount,
_PyStackRef *localsplus, PyObject *qualname)
{
Py_ssize_t i, j = 0;
Py_ssize_t start, end;
int positional = (defcount != -1);
const char *kind = positional ? "positional" : "keyword-only";
PyObject *missing_names;
/* Compute the names of the arguments that are missing. */
missing_names = PyList_New(missing);
if (missing_names == NULL)
return;
if (positional) {
start = 0;
end = co->co_argcount - defcount;
}
else {
start = co->co_argcount;
end = start + co->co_kwonlyargcount;
}
for (i = start; i < end; i++) {
if (PyStackRef_IsNull(localsplus[i])) {
PyObject *raw = PyTuple_GET_ITEM(co->co_localsplusnames, i);
PyObject *name = PyObject_Repr(raw);
if (name == NULL) {
Py_DECREF(missing_names);
return;
}
PyList_SET_ITEM(missing_names, j++, name);
}
}
assert(j == missing);
format_missing(tstate, kind, co, missing_names, qualname);
Py_DECREF(missing_names);
}
static void
too_many_positional(PyThreadState *tstate, PyCodeObject *co,
Py_ssize_t given, PyObject *defaults,
_PyStackRef *localsplus, PyObject *qualname)
{
int plural;
Py_ssize_t kwonly_given = 0;
Py_ssize_t i;
PyObject *sig, *kwonly_sig;
Py_ssize_t co_argcount = co->co_argcount;
assert((co->co_flags & CO_VARARGS) == 0);
/* Count missing keyword-only args. */
for (i = co_argcount; i < co_argcount + co->co_kwonlyargcount; i++) {
if (PyStackRef_AsPyObjectBorrow(localsplus[i]) != NULL) {
kwonly_given++;
}
}
Py_ssize_t defcount = defaults == NULL ? 0 : PyTuple_GET_SIZE(defaults);
if (defcount) {
Py_ssize_t atleast = co_argcount - defcount;
plural = 1;
sig = PyUnicode_FromFormat("from %zd to %zd", atleast, co_argcount);
}
else {
plural = (co_argcount != 1);
sig = PyUnicode_FromFormat("%zd", co_argcount);
}
if (sig == NULL)
return;
if (kwonly_given) {
const char *format = " positional argument%s (and %zd keyword-only argument%s)";
kwonly_sig = PyUnicode_FromFormat(format,
given != 1 ? "s" : "",
kwonly_given,
kwonly_given != 1 ? "s" : "");
if (kwonly_sig == NULL) {
Py_DECREF(sig);
return;
}
}
else {
/* This will not fail. */
kwonly_sig = Py_GetConstant(Py_CONSTANT_EMPTY_STR);
assert(kwonly_sig != NULL);
}
_PyErr_Format(tstate, PyExc_TypeError,
"%U() takes %U positional argument%s but %zd%U %s given",
qualname,
sig,
plural ? "s" : "",
given,
kwonly_sig,
given == 1 && !kwonly_given ? "was" : "were");
Py_DECREF(sig);
Py_DECREF(kwonly_sig);
}
static int
positional_only_passed_as_keyword(PyThreadState *tstate, PyCodeObject *co,
Py_ssize_t kwcount, PyObject* kwnames,
PyObject *qualname)
{
int posonly_conflicts = 0;
PyObject* posonly_names = PyList_New(0);
if (posonly_names == NULL) {
goto fail;
}
for(int k=0; k < co->co_posonlyargcount; k++){
PyObject* posonly_name = PyTuple_GET_ITEM(co->co_localsplusnames, k);
for (int k2=0; k2<kwcount; k2++){
/* Compare the pointers first and fallback to PyObject_RichCompareBool*/
PyObject* kwname = PyTuple_GET_ITEM(kwnames, k2);
if (kwname == posonly_name){
if(PyList_Append(posonly_names, kwname) != 0) {
goto fail;
}
posonly_conflicts++;
continue;
}
int cmp = PyObject_RichCompareBool(posonly_name, kwname, Py_EQ);
if ( cmp > 0) {
if(PyList_Append(posonly_names, kwname) != 0) {
goto fail;
}
posonly_conflicts++;
} else if (cmp < 0) {
goto fail;
}
}
}
if (posonly_conflicts) {
PyObject* comma = PyUnicode_FromString(", ");
if (comma == NULL) {
goto fail;
}
PyObject* error_names = PyUnicode_Join(comma, posonly_names);
Py_DECREF(comma);
if (error_names == NULL) {
goto fail;
}
_PyErr_Format(tstate, PyExc_TypeError,
"%U() got some positional-only arguments passed"
" as keyword arguments: '%U'",
qualname, error_names);
Py_DECREF(error_names);
goto fail;
}
Py_DECREF(posonly_names);
return 0;
fail:
Py_XDECREF(posonly_names);
return 1;
}
static int
initialize_locals(PyThreadState *tstate, PyFunctionObject *func,
_PyStackRef *localsplus, _PyStackRef const *args,
Py_ssize_t argcount, PyObject *kwnames)
{
PyCodeObject *co = (PyCodeObject*)func->func_code;
const Py_ssize_t total_args = co->co_argcount + co->co_kwonlyargcount;
/* Create a dictionary for keyword parameters (**kwags) */
PyObject *kwdict;
Py_ssize_t i;
if (co->co_flags & CO_VARKEYWORDS) {
kwdict = PyDict_New();
if (kwdict == NULL) {
goto fail_pre_positional;
}
i = total_args;
if (co->co_flags & CO_VARARGS) {
i++;
}
assert(PyStackRef_IsNull(localsplus[i]));
localsplus[i] = PyStackRef_FromPyObjectSteal(kwdict);
}
else {
kwdict = NULL;
}
/* Copy all positional arguments into local variables */
Py_ssize_t j, n;
if (argcount > co->co_argcount) {
n = co->co_argcount;
}
else {
n = argcount;
}
for (j = 0; j < n; j++) {
assert(PyStackRef_IsNull(localsplus[j]));
localsplus[j] = args[j];
}
/* Pack other positional arguments into the *args argument */
if (co->co_flags & CO_VARARGS) {
PyObject *u = NULL;
if (argcount == n) {
u = (PyObject *)&_Py_SINGLETON(tuple_empty);
}
else {
u = _PyTuple_FromStackRefStealOnSuccess(args + n, argcount - n);
if (u == NULL) {
for (Py_ssize_t i = n; i < argcount; i++) {
PyStackRef_CLOSE(args[i]);
}
}
}
if (u == NULL) {
goto fail_post_positional;
}
assert(PyStackRef_AsPyObjectBorrow(localsplus[total_args]) == NULL);
localsplus[total_args] = PyStackRef_FromPyObjectSteal(u);
}
else if (argcount > n) {
/* Too many positional args. Error is reported later */
for (j = n; j < argcount; j++) {
PyStackRef_CLOSE(args[j]);
}
}
/* Handle keyword arguments */
if (kwnames != NULL) {
Py_ssize_t kwcount = PyTuple_GET_SIZE(kwnames);
for (i = 0; i < kwcount; i++) {
PyObject **co_varnames;
PyObject *keyword = PyTuple_GET_ITEM(kwnames, i);
_PyStackRef value_stackref = args[i+argcount];
Py_ssize_t j;
if (keyword == NULL || !PyUnicode_Check(keyword)) {
_PyErr_Format(tstate, PyExc_TypeError,
"%U() keywords must be strings",
func->func_qualname);
goto kw_fail;
}
/* Speed hack: do raw pointer compares. As names are
normally interned this should almost always hit. */
co_varnames = ((PyTupleObject *)(co->co_localsplusnames))->ob_item;
for (j = co->co_posonlyargcount; j < total_args; j++) {
PyObject *varname = co_varnames[j];
if (varname == keyword) {
goto kw_found;
}
}
/* Slow fallback, just in case */
for (j = co->co_posonlyargcount; j < total_args; j++) {
PyObject *varname = co_varnames[j];
int cmp = PyObject_RichCompareBool( keyword, varname, Py_EQ);
if (cmp > 0) {
goto kw_found;
}
else if (cmp < 0) {
goto kw_fail;
}
}
assert(j >= total_args);
if (kwdict == NULL) {
if (co->co_posonlyargcount
&& positional_only_passed_as_keyword(tstate, co,
kwcount, kwnames,
func->func_qualname))
{
goto kw_fail;
}
PyObject* suggestion_keyword = NULL;
if (total_args > co->co_posonlyargcount) {
PyObject* possible_keywords = PyList_New(total_args - co->co_posonlyargcount);
if (!possible_keywords) {
PyErr_Clear();
} else {
for (Py_ssize_t k = co->co_posonlyargcount; k < total_args; k++) {
PyList_SET_ITEM(possible_keywords, k - co->co_posonlyargcount, co_varnames[k]);
}
suggestion_keyword = _Py_CalculateSuggestions(possible_keywords, keyword);
Py_DECREF(possible_keywords);
}
}
if (suggestion_keyword) {
_PyErr_Format(tstate, PyExc_TypeError,
"%U() got an unexpected keyword argument '%S'. Did you mean '%S'?",
func->func_qualname, keyword, suggestion_keyword);
Py_DECREF(suggestion_keyword);
} else {
_PyErr_Format(tstate, PyExc_TypeError,
"%U() got an unexpected keyword argument '%S'",
func->func_qualname, keyword);
}
goto kw_fail;
}
if (PyDict_SetItem(kwdict, keyword, PyStackRef_AsPyObjectBorrow(value_stackref)) == -1) {
goto kw_fail;
}
PyStackRef_CLOSE(value_stackref);
continue;
kw_fail:
for (;i < kwcount; i++) {
PyStackRef_CLOSE(args[i+argcount]);
}
goto fail_post_args;
kw_found:
if (PyStackRef_AsPyObjectBorrow(localsplus[j]) != NULL) {
_PyErr_Format(tstate, PyExc_TypeError,
"%U() got multiple values for argument '%S'",
func->func_qualname, keyword);
goto kw_fail;
}
localsplus[j] = value_stackref;
}
}
/* Check the number of positional arguments */
if ((argcount > co->co_argcount) && !(co->co_flags & CO_VARARGS)) {
too_many_positional(tstate, co, argcount, func->func_defaults, localsplus,
func->func_qualname);
goto fail_post_args;
}
/* Add missing positional arguments (copy default values from defs) */
if (argcount < co->co_argcount) {
Py_ssize_t defcount = func->func_defaults == NULL ? 0 : PyTuple_GET_SIZE(func->func_defaults);
Py_ssize_t m = co->co_argcount - defcount;
Py_ssize_t missing = 0;
for (i = argcount; i < m; i++) {
if (PyStackRef_IsNull(localsplus[i])) {
missing++;
}
}
if (missing) {
missing_arguments(tstate, co, missing, defcount, localsplus,
func->func_qualname);
goto fail_post_args;
}
if (n > m)
i = n - m;
else
i = 0;
if (defcount) {
PyObject **defs = &PyTuple_GET_ITEM(func->func_defaults, 0);
for (; i < defcount; i++) {
if (PyStackRef_AsPyObjectBorrow(localsplus[m+i]) == NULL) {
PyObject *def = defs[i];
localsplus[m+i] = PyStackRef_FromPyObjectNew(def);
}
}
}
}
/* Add missing keyword arguments (copy default values from kwdefs) */
if (co->co_kwonlyargcount > 0) {
Py_ssize_t missing = 0;
for (i = co->co_argcount; i < total_args; i++) {
if (PyStackRef_AsPyObjectBorrow(localsplus[i]) != NULL)
continue;
PyObject *varname = PyTuple_GET_ITEM(co->co_localsplusnames, i);
if (func->func_kwdefaults != NULL) {
PyObject *def;
if (PyDict_GetItemRef(func->func_kwdefaults, varname, &def) < 0) {
goto fail_post_args;
}
if (def) {
localsplus[i] = PyStackRef_FromPyObjectSteal(def);
continue;
}
}
missing++;
}
if (missing) {
missing_arguments(tstate, co, missing, -1, localsplus,
func->func_qualname);
goto fail_post_args;
}
}
return 0;
fail_pre_positional:
for (j = 0; j < argcount; j++) {
PyStackRef_CLOSE(args[j]);
}
/* fall through */
fail_post_positional:
if (kwnames) {
Py_ssize_t kwcount = PyTuple_GET_SIZE(kwnames);
for (j = argcount; j < argcount+kwcount; j++) {
PyStackRef_CLOSE(args[j]);
}
}
/* fall through */
fail_post_args:
return -1;
}
static void
clear_thread_frame(PyThreadState *tstate, _PyInterpreterFrame * frame)
{
assert(frame->owner == FRAME_OWNED_BY_THREAD);
// Make sure that this is, indeed, the top frame. We can't check this in
// _PyThreadState_PopFrame, since f_code is already cleared at that point:
assert((PyObject **)frame + _PyFrame_GetCode(frame)->co_framesize ==
tstate->datastack_top);
assert(frame->frame_obj == NULL || frame->frame_obj->f_frame == frame);
_PyFrame_ClearExceptCode(frame);
PyStackRef_CLEAR(frame->f_executable);
_PyThreadState_PopFrame(tstate, frame);
}
static void
clear_gen_frame(PyThreadState *tstate, _PyInterpreterFrame * frame)
{
assert(frame->owner == FRAME_OWNED_BY_GENERATOR);
PyGenObject *gen = _PyGen_GetGeneratorFromFrame(frame);
FT_ATOMIC_STORE_INT8_RELEASE(gen->gi_frame_state, FRAME_CLEARED);
((_PyThreadStateImpl *)tstate)->generator_return_kind = GENERATOR_RETURN;
assert(tstate->exc_info == &gen->gi_exc_state);
tstate->exc_info = gen->gi_exc_state.previous_item;
gen->gi_exc_state.previous_item = NULL;
assert(frame->frame_obj == NULL || frame->frame_obj->f_frame == frame);
frame->previous = NULL;
_PyFrame_ClearExceptCode(frame);
_PyErr_ClearExcState(&gen->gi_exc_state);
}
void
_PyEval_FrameClearAndPop(PyThreadState *tstate, _PyInterpreterFrame * frame)
{
// Update last_profiled_frame for remote profiler frame caching.
// By this point, tstate->current_frame is already set to the parent frame.
// Only update if we're popping the exact frame that was last profiled.
// This avoids corrupting the cache when transient frames (called and returned
// between profiler samples) update last_profiled_frame to addresses the
// profiler never saw.
if (tstate->last_profiled_frame != NULL && tstate->last_profiled_frame == frame) {
tstate->last_profiled_frame = tstate->current_frame;
}
if (frame->owner == FRAME_OWNED_BY_THREAD) {
clear_thread_frame(tstate, frame);
}
else {
clear_gen_frame(tstate, frame);
}
}
/* Consumes references to func, locals and all the args */
_PyInterpreterFrame *
_PyEvalFramePushAndInit(PyThreadState *tstate, _PyStackRef func,
PyObject *locals, _PyStackRef const* args,
size_t argcount, PyObject *kwnames, _PyInterpreterFrame *previous)
{
PyFunctionObject *func_obj = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(func);
PyCodeObject * code = (PyCodeObject *)func_obj->func_code;
CALL_STAT_INC(frames_pushed);
_PyInterpreterFrame *frame = _PyThreadState_PushFrame(tstate, code->co_framesize);
if (frame == NULL) {
goto fail;
}
_PyFrame_Initialize(tstate, frame, func, locals, code, 0, previous);
if (initialize_locals(tstate, func_obj, frame->localsplus, args, argcount, kwnames)) {
assert(frame->owner == FRAME_OWNED_BY_THREAD);
clear_thread_frame(tstate, frame);
return NULL;
}
return frame;
fail:
/* Consume the references */
PyStackRef_CLOSE(func);
Py_XDECREF(locals);
for (size_t i = 0; i < argcount; i++) {
PyStackRef_CLOSE(args[i]);
}
if (kwnames) {
Py_ssize_t kwcount = PyTuple_GET_SIZE(kwnames);
for (Py_ssize_t i = 0; i < kwcount; i++) {
PyStackRef_CLOSE(args[i+argcount]);
}
}
PyErr_NoMemory();
return NULL;
}
/* Same as _PyEvalFramePushAndInit but takes an args tuple and kwargs dict.
Steals references to func, callargs and kwargs.
*/
_PyInterpreterFrame *
_PyEvalFramePushAndInit_Ex(PyThreadState *tstate, _PyStackRef func,
PyObject *locals, Py_ssize_t nargs, PyObject *callargs, PyObject *kwargs, _PyInterpreterFrame *previous)
{
bool has_dict = (kwargs != NULL && PyDict_GET_SIZE(kwargs) > 0);
PyObject *kwnames = NULL;
_PyStackRef *newargs;
PyObject *const *object_array = NULL;
_PyStackRef stack_array[8] = {0};
if (has_dict) {
object_array = _PyStack_UnpackDict(tstate, _PyTuple_ITEMS(callargs), nargs, kwargs, &kwnames);
if (object_array == NULL) {
PyStackRef_CLOSE(func);
goto error;
}
size_t total_args = nargs + PyDict_GET_SIZE(kwargs);
assert(sizeof(PyObject *) == sizeof(_PyStackRef));
newargs = (_PyStackRef *)object_array;
for (size_t i = 0; i < total_args; i++) {
newargs[i] = PyStackRef_FromPyObjectSteal(object_array[i]);
}
}
else {
if (nargs <= 8) {
newargs = stack_array;
}
else {
newargs = PyMem_Malloc(sizeof(_PyStackRef) *nargs);
if (newargs == NULL) {
PyErr_NoMemory();
PyStackRef_CLOSE(func);
goto error;
}
}
/* We need to create a new reference for all our args since the new frame steals them. */
for (Py_ssize_t i = 0; i < nargs; i++) {
newargs[i] = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(callargs, i));
}
}
_PyInterpreterFrame *new_frame = _PyEvalFramePushAndInit(
tstate, func, locals,
newargs, nargs, kwnames, previous
);
if (has_dict) {
_PyStack_UnpackDict_FreeNoDecRef(object_array, kwnames);
}
else if (nargs > 8) {
PyMem_Free((void *)newargs);
}
/* No need to decref func here because the reference has been stolen by
_PyEvalFramePushAndInit.
*/
Py_DECREF(callargs);
Py_XDECREF(kwargs);
return new_frame;
error:
Py_DECREF(callargs);
Py_XDECREF(kwargs);
return NULL;
}
PyObject *
_PyEval_Vector(PyThreadState *tstate, PyFunctionObject *func,
PyObject *locals,
PyObject* const* args, size_t argcount,
PyObject *kwnames)
{
size_t total_args = argcount;
if (kwnames) {
total_args += PyTuple_GET_SIZE(kwnames);
}
_PyStackRef stack_array[8] = {0};
_PyStackRef *arguments;
if (total_args <= 8) {
arguments = stack_array;
}
else {
arguments = PyMem_Malloc(sizeof(_PyStackRef) * total_args);
if (arguments == NULL) {
return PyErr_NoMemory();
}
}
/* _PyEvalFramePushAndInit consumes the references
* to func, locals and all its arguments */
Py_XINCREF(locals);
for (size_t i = 0; i < argcount; i++) {
arguments[i] = PyStackRef_FromPyObjectNew(args[i]);
}
if (kwnames) {
Py_ssize_t kwcount = PyTuple_GET_SIZE(kwnames);
for (Py_ssize_t i = 0; i < kwcount; i++) {
arguments[i+argcount] = PyStackRef_FromPyObjectNew(args[i+argcount]);
}
}
_PyInterpreterFrame *frame = _PyEvalFramePushAndInit(
tstate, PyStackRef_FromPyObjectNew(func), locals,
arguments, argcount, kwnames, NULL);
if (total_args > 8) {
PyMem_Free(arguments);
}
if (frame == NULL) {
return NULL;
}
EVAL_CALL_STAT_INC(EVAL_CALL_VECTOR);
return _PyEval_EvalFrame(tstate, frame, 0);
}
/* Legacy API */
PyObject *
PyEval_EvalCodeEx(PyObject *_co, PyObject *globals, PyObject *locals,
PyObject *const *args, int argcount,
PyObject *const *kws, int kwcount,
PyObject *const *defs, int defcount,
PyObject *kwdefs, PyObject *closure)
{
PyThreadState *tstate = _PyThreadState_GET();
PyObject *res = NULL;
PyObject *defaults = PyTuple_FromArray(defs, defcount);
if (defaults == NULL) {
return NULL;
}
PyObject *builtins = _PyDict_LoadBuiltinsFromGlobals(globals);
if (builtins == NULL) {
Py_DECREF(defaults);
return NULL;
}
if (locals == NULL) {
locals = globals;
}
PyObject *kwnames = NULL;
PyObject *const *allargs;
PyObject **newargs = NULL;
PyFunctionObject *func = NULL;
if (kwcount == 0) {
allargs = args;
}
else {
kwnames = PyTuple_New(kwcount);
if (kwnames == NULL) {
goto fail;
}
newargs = PyMem_Malloc(sizeof(PyObject *)*(kwcount+argcount));
if (newargs == NULL) {
goto fail;
}
for (int i = 0; i < argcount; i++) {
newargs[i] = args[i];
}
for (int i = 0; i < kwcount; i++) {
PyTuple_SET_ITEM(kwnames, i, Py_NewRef(kws[2*i]));
newargs[argcount+i] = kws[2*i+1];
}
allargs = newargs;
}
PyFrameConstructor constr = {
.fc_globals = globals,
.fc_builtins = builtins,
.fc_name = ((PyCodeObject *)_co)->co_name,
.fc_qualname = ((PyCodeObject *)_co)->co_name,
.fc_code = _co,
.fc_defaults = defaults,
.fc_kwdefaults = kwdefs,
.fc_closure = closure
};
func = _PyFunction_FromConstructor(&constr);
if (func == NULL) {
goto fail;
}
EVAL_CALL_STAT_INC(EVAL_CALL_LEGACY);
res = _PyEval_Vector(tstate, func, locals,
allargs, argcount,
kwnames);
fail:
Py_XDECREF(func);
Py_XDECREF(kwnames);
PyMem_Free(newargs);
_Py_DECREF_BUILTINS(builtins);
Py_DECREF(defaults);
return res;
}
/* Logic for matching an exception in an except* clause (too
complicated for inlining).
*/
int
_PyEval_ExceptionGroupMatch(_PyInterpreterFrame *frame, PyObject* exc_value,
PyObject *match_type, PyObject **match, PyObject **rest)
{
if (Py_IsNone(exc_value)) {
*match = Py_NewRef(Py_None);
*rest = Py_NewRef(Py_None);
return 0;
}
assert(PyExceptionInstance_Check(exc_value));
if (PyErr_GivenExceptionMatches(exc_value, match_type)) {
/* Full match of exc itself */
bool is_eg = _PyBaseExceptionGroup_Check(exc_value);
if (is_eg) {
*match = Py_NewRef(exc_value);
}
else {
/* naked exception - wrap it */
PyObject *excs = PyTuple_Pack(1, exc_value);
if (excs == NULL) {
return -1;
}
PyObject *wrapped = _PyExc_CreateExceptionGroup("", excs);
Py_DECREF(excs);
if (wrapped == NULL) {
return -1;
}
PyFrameObject *f = _PyFrame_GetFrameObject(frame);
if (f != NULL) {
PyObject *tb = _PyTraceBack_FromFrame(NULL, f);
if (tb == NULL) {
return -1;
}
PyException_SetTraceback(wrapped, tb);
Py_DECREF(tb);
}
*match = wrapped;
}
*rest = Py_NewRef(Py_None);
return 0;
}
/* exc_value does not match match_type.
* Check for partial match if it's an exception group.
*/
if (_PyBaseExceptionGroup_Check(exc_value)) {
PyObject *pair = PyObject_CallMethod(exc_value, "split", "(O)",
match_type);
if (pair == NULL) {
return -1;
}
if (!PyTuple_CheckExact(pair)) {
PyErr_Format(PyExc_TypeError,
"%.200s.split must return a tuple, not %.200s",
Py_TYPE(exc_value)->tp_name, Py_TYPE(pair)->tp_name);
Py_DECREF(pair);
return -1;
}
// allow tuples of length > 2 for backwards compatibility
if (PyTuple_GET_SIZE(pair) < 2) {
PyErr_Format(PyExc_TypeError,
"%.200s.split must return a 2-tuple, "
"got tuple of size %zd",
Py_TYPE(exc_value)->tp_name, PyTuple_GET_SIZE(pair));
Py_DECREF(pair);
return -1;
}
*match = Py_NewRef(PyTuple_GET_ITEM(pair, 0));
*rest = Py_NewRef(PyTuple_GET_ITEM(pair, 1));
Py_DECREF(pair);
return 0;
}
/* no match */
*match = Py_NewRef(Py_None);
*rest = Py_NewRef(exc_value);
return 0;
}
/* Iterate v argcnt times and store the results on the stack (via decreasing
Merged revisions 55225-55227,55229-55269 via svnmerge from svn+ssh://pythondev@svn.python.org/python/branches/p3yk ................ r55238 | guido.van.rossum | 2007-05-10 16:46:05 -0700 (Thu, 10 May 2007) | 9 lines Merged revisions 55227 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r55227 | guido.van.rossum | 2007-05-10 10:20:15 -0700 (Thu, 10 May 2007) | 2 lines Fix a bug in test_c_api() that caused a negative refcount. ........ ................ r55246 | neal.norwitz | 2007-05-11 00:01:52 -0700 (Fri, 11 May 2007) | 1 line Remove commands.getstatus() it is obsolete. ................ r55248 | neal.norwitz | 2007-05-11 00:29:05 -0700 (Fri, 11 May 2007) | 2 lines Remove bsddb185 support. ................ r55249 | neal.norwitz | 2007-05-11 00:29:50 -0700 (Fri, 11 May 2007) | 1 line Remove bsddb185 module too ................ r55250 | neal.norwitz | 2007-05-11 00:32:13 -0700 (Fri, 11 May 2007) | 1 line bsddb185: Gotta remove from the file checked in, not Setup ................ r55251 | neal.norwitz | 2007-05-11 00:53:26 -0700 (Fri, 11 May 2007) | 1 line Remove obsolete IRIX modules (as much as I could find, there is probably more) ................ r55252 | neal.norwitz | 2007-05-11 00:55:35 -0700 (Fri, 11 May 2007) | 1 line Remove SGI turd. ................ r55254 | georg.brandl | 2007-05-11 03:11:01 -0700 (Fri, 11 May 2007) | 2 lines Add a case for set comprehensions to the "cannot assign to" switch. ................ r55255 | georg.brandl | 2007-05-11 03:11:25 -0700 (Fri, 11 May 2007) | 2 lines Fix wrong imports. ................ r55261 | georg.brandl | 2007-05-11 07:37:48 -0700 (Fri, 11 May 2007) | 2 lines Remove removed tex files. ................ r55262 | georg.brandl | 2007-05-11 08:28:41 -0700 (Fri, 11 May 2007) | 2 lines Commit PEP 3132 implementation. ................ r55264 | georg.brandl | 2007-05-11 08:50:19 -0700 (Fri, 11 May 2007) | 2 lines Check in the inevitable AST version number and format Py_ssize_t with %zd. ................ r55265 | neal.norwitz | 2007-05-11 09:12:22 -0700 (Fri, 11 May 2007) | 1 line Remove mention of os.popen* and popen2.* since these will be removed. ................ r55266 | neal.norwitz | 2007-05-11 09:19:57 -0700 (Fri, 11 May 2007) | 1 line Get doc to build again (almost, the doc is fine) ................ r55267 | neal.norwitz | 2007-05-11 09:21:02 -0700 (Fri, 11 May 2007) | 1 line Really get doc to build (remove use of string module) ................ r55269 | neal.norwitz | 2007-05-11 09:29:43 -0700 (Fri, 11 May 2007) | 1 line Add some notes to cleanup later ................
2007-05-11 16:50:42 +00:00
sp). Return 1 for success, 0 if error.
Merged revisions 55225-55227,55229-55269 via svnmerge from svn+ssh://pythondev@svn.python.org/python/branches/p3yk ................ r55238 | guido.van.rossum | 2007-05-10 16:46:05 -0700 (Thu, 10 May 2007) | 9 lines Merged revisions 55227 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r55227 | guido.van.rossum | 2007-05-10 10:20:15 -0700 (Thu, 10 May 2007) | 2 lines Fix a bug in test_c_api() that caused a negative refcount. ........ ................ r55246 | neal.norwitz | 2007-05-11 00:01:52 -0700 (Fri, 11 May 2007) | 1 line Remove commands.getstatus() it is obsolete. ................ r55248 | neal.norwitz | 2007-05-11 00:29:05 -0700 (Fri, 11 May 2007) | 2 lines Remove bsddb185 support. ................ r55249 | neal.norwitz | 2007-05-11 00:29:50 -0700 (Fri, 11 May 2007) | 1 line Remove bsddb185 module too ................ r55250 | neal.norwitz | 2007-05-11 00:32:13 -0700 (Fri, 11 May 2007) | 1 line bsddb185: Gotta remove from the file checked in, not Setup ................ r55251 | neal.norwitz | 2007-05-11 00:53:26 -0700 (Fri, 11 May 2007) | 1 line Remove obsolete IRIX modules (as much as I could find, there is probably more) ................ r55252 | neal.norwitz | 2007-05-11 00:55:35 -0700 (Fri, 11 May 2007) | 1 line Remove SGI turd. ................ r55254 | georg.brandl | 2007-05-11 03:11:01 -0700 (Fri, 11 May 2007) | 2 lines Add a case for set comprehensions to the "cannot assign to" switch. ................ r55255 | georg.brandl | 2007-05-11 03:11:25 -0700 (Fri, 11 May 2007) | 2 lines Fix wrong imports. ................ r55261 | georg.brandl | 2007-05-11 07:37:48 -0700 (Fri, 11 May 2007) | 2 lines Remove removed tex files. ................ r55262 | georg.brandl | 2007-05-11 08:28:41 -0700 (Fri, 11 May 2007) | 2 lines Commit PEP 3132 implementation. ................ r55264 | georg.brandl | 2007-05-11 08:50:19 -0700 (Fri, 11 May 2007) | 2 lines Check in the inevitable AST version number and format Py_ssize_t with %zd. ................ r55265 | neal.norwitz | 2007-05-11 09:12:22 -0700 (Fri, 11 May 2007) | 1 line Remove mention of os.popen* and popen2.* since these will be removed. ................ r55266 | neal.norwitz | 2007-05-11 09:19:57 -0700 (Fri, 11 May 2007) | 1 line Get doc to build again (almost, the doc is fine) ................ r55267 | neal.norwitz | 2007-05-11 09:21:02 -0700 (Fri, 11 May 2007) | 1 line Really get doc to build (remove use of string module) ................ r55269 | neal.norwitz | 2007-05-11 09:29:43 -0700 (Fri, 11 May 2007) | 1 line Add some notes to cleanup later ................
2007-05-11 16:50:42 +00:00
If argcntafter == -1, do a simple unpack. If it is >= 0, do an unpack
with a variable target.
*/
int
_PyEval_UnpackIterableStackRef(PyThreadState *tstate, PyObject *v,
int argcnt, int argcntafter, _PyStackRef *sp)
{
int i = 0, j = 0;
Py_ssize_t ll = 0;
PyObject *it; /* iter(v) */
PyObject *w;
PyObject *l = NULL; /* variable list */
assert(v != NULL);
it = PyObject_GetIter(v);
if (it == NULL) {
if (_PyErr_ExceptionMatches(tstate, PyExc_TypeError) &&
Py_TYPE(v)->tp_iter == NULL && !PySequence_Check(v))
{
_PyErr_Format(tstate, PyExc_TypeError,
"cannot unpack non-iterable %.200s object",
Py_TYPE(v)->tp_name);
}
return 0;
}
for (; i < argcnt; i++) {
w = PyIter_Next(it);
if (w == NULL) {
/* Iterator done, via error or exhaustion. */
if (!_PyErr_Occurred(tstate)) {
if (argcntafter == -1) {
_PyErr_Format(tstate, PyExc_ValueError,
"not enough values to unpack "
"(expected %d, got %d)",
argcnt, i);
}
else {
_PyErr_Format(tstate, PyExc_ValueError,
"not enough values to unpack "
"(expected at least %d, got %d)",
argcnt + argcntafter, i);
}
}
goto Error;
}
*--sp = PyStackRef_FromPyObjectSteal(w);
}
if (argcntafter == -1) {
/* We better have exhausted the iterator now. */
w = PyIter_Next(it);
if (w == NULL) {
if (_PyErr_Occurred(tstate))
goto Error;
Py_DECREF(it);
return 1;
}
Py_DECREF(w);
if (PyList_CheckExact(v) || PyTuple_CheckExact(v)
|| PyDict_CheckExact(v)) {
ll = PyDict_CheckExact(v) ? PyDict_Size(v) : Py_SIZE(v);
if (ll > argcnt) {
_PyErr_Format(tstate, PyExc_ValueError,
"too many values to unpack (expected %d, got %zd)",
argcnt, ll);
goto Error;
}
}
_PyErr_Format(tstate, PyExc_ValueError,
"too many values to unpack (expected %d)",
argcnt);
goto Error;
}
l = PySequence_List(it);
if (l == NULL)
goto Error;
*--sp = PyStackRef_FromPyObjectSteal(l);
i++;
ll = PyList_GET_SIZE(l);
if (ll < argcntafter) {
_PyErr_Format(tstate, PyExc_ValueError,
"not enough values to unpack (expected at least %d, got %zd)",
argcnt + argcntafter, argcnt + ll);
goto Error;
}
/* Pop the "after-variable" args off the list. */
for (j = argcntafter; j > 0; j--, i++) {
*--sp = PyStackRef_FromPyObjectSteal(PyList_GET_ITEM(l, ll - j));
}
/* Resize the list. */
Py_SET_SIZE(l, ll - argcntafter);
Py_DECREF(it);
return 1;
Merged revisions 55225-55227,55229-55269 via svnmerge from svn+ssh://pythondev@svn.python.org/python/branches/p3yk ................ r55238 | guido.van.rossum | 2007-05-10 16:46:05 -0700 (Thu, 10 May 2007) | 9 lines Merged revisions 55227 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r55227 | guido.van.rossum | 2007-05-10 10:20:15 -0700 (Thu, 10 May 2007) | 2 lines Fix a bug in test_c_api() that caused a negative refcount. ........ ................ r55246 | neal.norwitz | 2007-05-11 00:01:52 -0700 (Fri, 11 May 2007) | 1 line Remove commands.getstatus() it is obsolete. ................ r55248 | neal.norwitz | 2007-05-11 00:29:05 -0700 (Fri, 11 May 2007) | 2 lines Remove bsddb185 support. ................ r55249 | neal.norwitz | 2007-05-11 00:29:50 -0700 (Fri, 11 May 2007) | 1 line Remove bsddb185 module too ................ r55250 | neal.norwitz | 2007-05-11 00:32:13 -0700 (Fri, 11 May 2007) | 1 line bsddb185: Gotta remove from the file checked in, not Setup ................ r55251 | neal.norwitz | 2007-05-11 00:53:26 -0700 (Fri, 11 May 2007) | 1 line Remove obsolete IRIX modules (as much as I could find, there is probably more) ................ r55252 | neal.norwitz | 2007-05-11 00:55:35 -0700 (Fri, 11 May 2007) | 1 line Remove SGI turd. ................ r55254 | georg.brandl | 2007-05-11 03:11:01 -0700 (Fri, 11 May 2007) | 2 lines Add a case for set comprehensions to the "cannot assign to" switch. ................ r55255 | georg.brandl | 2007-05-11 03:11:25 -0700 (Fri, 11 May 2007) | 2 lines Fix wrong imports. ................ r55261 | georg.brandl | 2007-05-11 07:37:48 -0700 (Fri, 11 May 2007) | 2 lines Remove removed tex files. ................ r55262 | georg.brandl | 2007-05-11 08:28:41 -0700 (Fri, 11 May 2007) | 2 lines Commit PEP 3132 implementation. ................ r55264 | georg.brandl | 2007-05-11 08:50:19 -0700 (Fri, 11 May 2007) | 2 lines Check in the inevitable AST version number and format Py_ssize_t with %zd. ................ r55265 | neal.norwitz | 2007-05-11 09:12:22 -0700 (Fri, 11 May 2007) | 1 line Remove mention of os.popen* and popen2.* since these will be removed. ................ r55266 | neal.norwitz | 2007-05-11 09:19:57 -0700 (Fri, 11 May 2007) | 1 line Get doc to build again (almost, the doc is fine) ................ r55267 | neal.norwitz | 2007-05-11 09:21:02 -0700 (Fri, 11 May 2007) | 1 line Really get doc to build (remove use of string module) ................ r55269 | neal.norwitz | 2007-05-11 09:29:43 -0700 (Fri, 11 May 2007) | 1 line Add some notes to cleanup later ................
2007-05-11 16:50:42 +00:00
Error:
for (; i > 0; i--, sp++) {
PyStackRef_CLOSE(*sp);
}
Py_XDECREF(it);
return 0;
}
void
_PyEval_MonitorRaise(PyThreadState *tstate, _PyInterpreterFrame *frame,
_Py_CODEUNIT *instr)
{
if (no_tools_for_global_event(tstate, PY_MONITORING_EVENT_RAISE)) {
return;
}
do_monitor_exc(tstate, frame, instr, PY_MONITORING_EVENT_RAISE);
}
bool
_PyEval_NoToolsForUnwind(PyThreadState *tstate) {
return no_tools_for_global_event(tstate, PY_MONITORING_EVENT_PY_UNWIND);
}
void
PyThreadState_EnterTracing(PyThreadState *tstate)
{
assert(tstate->tracing >= 0);
tstate->tracing++;
}
void
PyThreadState_LeaveTracing(PyThreadState *tstate)
{
assert(tstate->tracing > 0);
tstate->tracing--;
}
1992-01-12 02:29:51 +00:00
PyObject*
_PyEval_CallTracing(PyObject *func, PyObject *args)
{
// Save and disable tracing
PyThreadState *tstate = _PyThreadState_GET();
int save_tracing = tstate->tracing;
tstate->tracing = 0;
// Call the tracing function
PyObject *result = PyObject_Call(func, args, NULL);
// Restore tracing
tstate->tracing = save_tracing;
return result;
}
void
PyEval_SetProfile(Py_tracefunc func, PyObject *arg)
{
PyThreadState *tstate = _PyThreadState_GET();
if (_PyEval_SetProfile(tstate, func, arg) < 0) {
/* Log _PySys_Audit() error */
PyErr_FormatUnraisable("Exception ignored in PyEval_SetProfile");
}
}
void
PyEval_SetProfileAllThreads(Py_tracefunc func, PyObject *arg)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
if (_PyEval_SetProfileAllThreads(interp, func, arg) < 0) {
/* Log _PySys_Audit() error */
PyErr_FormatUnraisable("Exception ignored in PyEval_SetProfileAllThreads");
}
}
void
PyEval_SetTrace(Py_tracefunc func, PyObject *arg)
{
PyThreadState *tstate = _PyThreadState_GET();
if (_PyEval_SetTrace(tstate, func, arg) < 0) {
/* Log _PySys_Audit() error */
PyErr_FormatUnraisable("Exception ignored in PyEval_SetTrace");
}
}
void
PyEval_SetTraceAllThreads(Py_tracefunc func, PyObject *arg)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
if (_PyEval_SetTraceAllThreads(interp, func, arg) < 0) {
/* Log _PySys_Audit() error */
PyErr_FormatUnraisable("Exception ignored in PyEval_SetTraceAllThreads");
}
}
int
_PyEval_SetCoroutineOriginTrackingDepth(int depth)
{
PyThreadState *tstate = _PyThreadState_GET();
if (depth < 0) {
_PyErr_SetString(tstate, PyExc_ValueError, "depth must be >= 0");
return -1;
}
tstate->coroutine_origin_tracking_depth = depth;
return 0;
}
int
_PyEval_GetCoroutineOriginTrackingDepth(void)
{
PyThreadState *tstate = _PyThreadState_GET();
return tstate->coroutine_origin_tracking_depth;
}
int
_PyEval_SetAsyncGenFirstiter(PyObject *firstiter)
{
PyThreadState *tstate = _PyThreadState_GET();
if (_PySys_Audit(tstate, "sys.set_asyncgen_hook_firstiter", NULL) < 0) {
return -1;
}
Py_XSETREF(tstate->async_gen_firstiter, Py_XNewRef(firstiter));
return 0;
}
PyObject *
_PyEval_GetAsyncGenFirstiter(void)
{
PyThreadState *tstate = _PyThreadState_GET();
return tstate->async_gen_firstiter;
}
int
_PyEval_SetAsyncGenFinalizer(PyObject *finalizer)
{
PyThreadState *tstate = _PyThreadState_GET();
if (_PySys_Audit(tstate, "sys.set_asyncgen_hook_finalizer", NULL) < 0) {
return -1;
}
Py_XSETREF(tstate->async_gen_finalizer, Py_XNewRef(finalizer));
return 0;
}
PyObject *
_PyEval_GetAsyncGenFinalizer(void)
{
PyThreadState *tstate = _PyThreadState_GET();
return tstate->async_gen_finalizer;
}
_PyInterpreterFrame *
_PyEval_GetFrame(void)
{
PyThreadState *tstate = _PyThreadState_GET();
return _PyThreadState_GetFrame(tstate);
}
PyFrameObject *
PyEval_GetFrame(void)
{
_PyInterpreterFrame *frame = _PyEval_GetFrame();
if (frame == NULL) {
return NULL;
}
PyFrameObject *f = _PyFrame_GetFrameObject(frame);
if (f == NULL) {
PyErr_Clear();
}
return f;
}
PyObject *
_PyEval_GetBuiltins(PyThreadState *tstate)
{
_PyInterpreterFrame *frame = _PyThreadState_GetFrame(tstate);
if (frame != NULL) {
return frame->f_builtins;
}
return tstate->interp->builtins;
}
1997-04-29 18:18:01 +00:00
PyObject *
PyEval_GetBuiltins(void)
{
PyThreadState *tstate = _PyThreadState_GET();
return _PyEval_GetBuiltins(tstate);
}
/* Convenience function to get a builtin from its name */
PyObject *
bpo-46541: Replace core use of _Py_IDENTIFIER() with statically initialized global objects. (gh-30928) We're no longer using _Py_IDENTIFIER() (or _Py_static_string()) in any core CPython code. It is still used in a number of non-builtin stdlib modules. The replacement is: PyUnicodeObject (not pointer) fields under _PyRuntimeState, statically initialized as part of _PyRuntime. A new _Py_GET_GLOBAL_IDENTIFIER() macro facilitates lookup of the fields (along with _Py_GET_GLOBAL_STRING() for non-identifier strings). https://bugs.python.org/issue46541#msg411799 explains the rationale for this change. The core of the change is in: * (new) Include/internal/pycore_global_strings.h - the declarations for the global strings, along with the macros * Include/internal/pycore_runtime_init.h - added the static initializers for the global strings * Include/internal/pycore_global_objects.h - where the struct in pycore_global_strings.h is hooked into _PyRuntimeState * Tools/scripts/generate_global_objects.py - added generation of the global string declarations and static initializers I've also added a --check flag to generate_global_objects.py (along with make check-global-objects) to check for unused global strings. That check is added to the PR CI config. The remainder of this change updates the core code to use _Py_GET_GLOBAL_IDENTIFIER() instead of _Py_IDENTIFIER() and the related _Py*Id functions (likewise for _Py_GET_GLOBAL_STRING() instead of _Py_static_string()). This includes adding a few functions where there wasn't already an alternative to _Py*Id(), replacing the _Py_Identifier * parameter with PyObject *. The following are not changed (yet): * stop using _Py_IDENTIFIER() in the stdlib modules * (maybe) get rid of _Py_IDENTIFIER(), etc. entirely -- this may not be doable as at least one package on PyPI using this (private) API * (maybe) intern the strings during runtime init https://bugs.python.org/issue46541
2022-02-08 13:39:07 -07:00
_PyEval_GetBuiltin(PyObject *name)
{
PyObject *attr;
if (PyMapping_GetOptionalItem(PyEval_GetBuiltins(), name, &attr) == 0) {
PyErr_SetObject(PyExc_AttributeError, name);
}
return attr;
}
1997-04-29 18:18:01 +00:00
PyObject *
PyEval_GetLocals(void)
{
// We need to return a borrowed reference here, so some tricks are needed
PyThreadState *tstate = _PyThreadState_GET();
_PyInterpreterFrame *current_frame = _PyThreadState_GetFrame(tstate);
if (current_frame == NULL) {
_PyErr_SetString(tstate, PyExc_SystemError, "frame does not exist");
return NULL;
}
// Be aware that this returns a new reference
PyObject *locals = _PyFrame_GetLocals(current_frame);
if (locals == NULL) {
return NULL;
}
if (PyFrameLocalsProxy_Check(locals)) {
PyFrameObject *f = _PyFrame_GetFrameObject(current_frame);
PyObject *ret = f->f_locals_cache;
if (ret == NULL) {
ret = PyDict_New();
if (ret == NULL) {
Py_DECREF(locals);
return NULL;
}
f->f_locals_cache = ret;
}
if (PyDict_Update(ret, locals) < 0) {
// At this point, if the cache dict is broken, it will stay broken, as
// trying to clean it up or replace it will just cause other problems
ret = NULL;
}
Py_DECREF(locals);
return ret;
}
assert(PyMapping_Check(locals));
assert(Py_REFCNT(locals) > 1);
Py_DECREF(locals);
return locals;
}
PyObject *
_PyEval_GetFrameLocals(void)
{
PyThreadState *tstate = _PyThreadState_GET();
_PyInterpreterFrame *current_frame = _PyThreadState_GetFrame(tstate);
if (current_frame == NULL) {
_PyErr_SetString(tstate, PyExc_SystemError, "frame does not exist");
return NULL;
}
PyObject *locals = _PyFrame_GetLocals(current_frame);
if (locals == NULL) {
return NULL;
}
if (PyFrameLocalsProxy_Check(locals)) {
PyObject* ret = PyDict_New();
if (ret == NULL) {
Py_DECREF(locals);
return NULL;
}
if (PyDict_Update(ret, locals) < 0) {
Py_DECREF(ret);
Py_DECREF(locals);
return NULL;
}
Py_DECREF(locals);
return ret;
}
assert(PyMapping_Check(locals));
return locals;
}
static PyObject *
_PyEval_GetGlobals(PyThreadState *tstate)
{
_PyInterpreterFrame *current_frame = _PyThreadState_GetFrame(tstate);
if (current_frame == NULL) {
return NULL;
}
return current_frame->f_globals;
}
PyObject *
PyEval_GetGlobals(void)
{
PyThreadState *tstate = _PyThreadState_GET();
return _PyEval_GetGlobals(tstate);
}
PyObject *
_PyEval_GetGlobalsFromRunningMain(PyThreadState *tstate)
{
if (!_PyInterpreterState_IsRunningMain(tstate->interp)) {
return NULL;
}
PyObject *mod = _Py_GetMainModule(tstate);
if (_Py_CheckMainModule(mod) < 0) {
Py_XDECREF(mod);
return NULL;
}
PyObject *globals = PyModule_GetDict(mod); // borrowed
Py_DECREF(mod);
return globals;
}
static PyObject *
get_globals_builtins(PyObject *globals)
{
PyObject *builtins = NULL;
if (PyDict_Check(globals)) {
if (PyDict_GetItemRef(globals, &_Py_ID(__builtins__), &builtins) < 0) {
return NULL;
}
}
else {
if (PyMapping_GetOptionalItem(
globals, &_Py_ID(__builtins__), &builtins) < 0)
{
return NULL;
}
}
return builtins;
}
static int
set_globals_builtins(PyObject *globals, PyObject *builtins)
{
if (PyDict_Check(globals)) {
if (PyDict_SetItem(globals, &_Py_ID(__builtins__), builtins) < 0) {
return -1;
}
}
else {
if (PyObject_SetItem(globals, &_Py_ID(__builtins__), builtins) < 0) {
return -1;
}
}
return 0;
}
int
_PyEval_EnsureBuiltins(PyThreadState *tstate, PyObject *globals,
PyObject **p_builtins)
{
PyObject *builtins = get_globals_builtins(globals);
if (builtins == NULL) {
if (_PyErr_Occurred(tstate)) {
return -1;
}
builtins = PyEval_GetBuiltins(); // borrowed
if (builtins == NULL) {
assert(_PyErr_Occurred(tstate));
return -1;
}
Py_INCREF(builtins);
if (set_globals_builtins(globals, builtins) < 0) {
Py_DECREF(builtins);
return -1;
}
}
if (p_builtins != NULL) {
*p_builtins = builtins;
}
else {
Py_DECREF(builtins);
}
return 0;
}
int
_PyEval_EnsureBuiltinsWithModule(PyThreadState *tstate, PyObject *globals,
PyObject **p_builtins)
{
PyObject *builtins = get_globals_builtins(globals);
if (builtins == NULL) {
if (_PyErr_Occurred(tstate)) {
return -1;
}
builtins = PyImport_ImportModuleLevel("builtins", NULL, NULL, NULL, 0);
if (builtins == NULL) {
return -1;
}
if (set_globals_builtins(globals, builtins) < 0) {
Py_DECREF(builtins);
return -1;
}
}
if (p_builtins != NULL) {
*p_builtins = builtins;
}
else {
Py_DECREF(builtins);
}
return 0;
}
PyObject*
PyEval_GetFrameLocals(void)
{
return _PyEval_GetFrameLocals();
}
PyObject* PyEval_GetFrameGlobals(void)
{
PyThreadState *tstate = _PyThreadState_GET();
_PyInterpreterFrame *current_frame = _PyThreadState_GetFrame(tstate);
if (current_frame == NULL) {
return NULL;
}
return Py_XNewRef(current_frame->f_globals);
}
PyObject* PyEval_GetFrameBuiltins(void)
{
PyThreadState *tstate = _PyThreadState_GET();
return Py_XNewRef(_PyEval_GetBuiltins(tstate));
}
int
PyEval_MergeCompilerFlags(PyCompilerFlags *cf)
{
PyThreadState *tstate = _PyThreadState_GET();
_PyInterpreterFrame *current_frame = tstate->current_frame;
if (current_frame == tstate->base_frame) {
current_frame = NULL;
}
int result = cf->cf_flags != 0;
if (current_frame != NULL) {
const int codeflags = _PyFrame_GetCode(current_frame)->co_flags;
const int compilerflags = codeflags & PyCF_MASK;
if (compilerflags) {
result = 1;
cf->cf_flags |= compilerflags;
}
}
return result;
}
const char *
2001-08-02 04:15:00 +00:00
PyEval_GetFuncName(PyObject *func)
{
if (PyMethod_Check(func))
return PyEval_GetFuncName(PyMethod_GET_FUNCTION(func));
else if (PyFunction_Check(func))
return PyUnicode_AsUTF8(((PyFunctionObject*)func)->func_name);
else if (PyCFunction_Check(func))
return ((PyCFunctionObject*)func)->m_ml->ml_name;
else
return Py_TYPE(func)->tp_name;
}
const char *
2001-08-02 04:15:00 +00:00
PyEval_GetFuncDesc(PyObject *func)
{
if (PyMethod_Check(func))
return "()";
else if (PyFunction_Check(func))
return "()";
else if (PyCFunction_Check(func))
return "()";
else
return " object";
}
/* Extract a slice index from a PyLong or an object with the
nb_index slot defined, and store in *pi.
Silently reduce values larger than PY_SSIZE_T_MAX to PY_SSIZE_T_MAX,
and silently boost values less than PY_SSIZE_T_MIN to PY_SSIZE_T_MIN.
Return 0 on error, 1 on success.
*/
int
2006-02-15 17:27:45 +00:00
_PyEval_SliceIndex(PyObject *v, Py_ssize_t *pi)
{
PyThreadState *tstate = _PyThreadState_GET();
if (!Py_IsNone(v)) {
Py_ssize_t x;
if (_PyIndex_Check(v)) {
x = PyNumber_AsSsize_t(v, NULL);
if (x == -1 && _PyErr_Occurred(tstate))
return 0;
}
else {
_PyErr_SetString(tstate, PyExc_TypeError,
"slice indices must be integers or "
"None or have an __index__ method");
return 0;
}
*pi = x;
}
return 1;
}
int
_PyEval_SliceIndexNotNone(PyObject *v, Py_ssize_t *pi)
{
PyThreadState *tstate = _PyThreadState_GET();
Py_ssize_t x;
if (_PyIndex_Check(v)) {
x = PyNumber_AsSsize_t(v, NULL);
if (x == -1 && _PyErr_Occurred(tstate))
return 0;
}
else {
_PyErr_SetString(tstate, PyExc_TypeError,
"slice indices must be integers or "
"have an __index__ method");
return 0;
}
*pi = x;
return 1;
}
PyObject *
_PyEval_ImportName(PyThreadState *tstate, _PyInterpreterFrame *frame,
PyObject *name, PyObject *fromlist, PyObject *level)
{
PyObject *import_func;
if (PyMapping_GetOptionalItem(frame->f_builtins, &_Py_ID(__import__), &import_func) < 0) {
return NULL;
}
if (import_func == NULL) {
_PyErr_SetString(tstate, PyExc_ImportError, "__import__ not found");
return NULL;
}
PyObject *locals = frame->f_locals;
if (locals == NULL) {
locals = Py_None;
}
/* Fast path for not overloaded __import__. */
if (_PyImport_IsDefaultImportFunc(tstate->interp, import_func)) {
Py_DECREF(import_func);
int ilevel = PyLong_AsInt(level);
if (ilevel == -1 && _PyErr_Occurred(tstate)) {
return NULL;
}
return PyImport_ImportModuleLevelObject(
name,
frame->f_globals,
locals,
fromlist,
ilevel);
}
PyObject* args[5] = {name, frame->f_globals, locals, fromlist, level};
PyObject *res = PyObject_Vectorcall(import_func, args, 5, NULL);
Py_DECREF(import_func);
return res;
}
PyObject *
_PyEval_ImportFrom(PyThreadState *tstate, PyObject *v, PyObject *name)
{
PyObject *x;
PyObject *fullmodname, *mod_name, *origin, *mod_name_or_unknown, *errmsg, *spec;
if (PyObject_GetOptionalAttr(v, name, &x) != 0) {
return x;
}
/* Issue #17636: in case this failed because of a circular relative
import, try to fallback on reading the module directly from
sys.modules. */
if (PyObject_GetOptionalAttr(v, &_Py_ID(__name__), &mod_name) < 0) {
return NULL;
}
if (mod_name == NULL || !PyUnicode_Check(mod_name)) {
Py_CLEAR(mod_name);
goto error;
}
fullmodname = PyUnicode_FromFormat("%U.%U", mod_name, name);
if (fullmodname == NULL) {
Py_DECREF(mod_name);
return NULL;
}
x = PyImport_GetModule(fullmodname);
Py_DECREF(fullmodname);
if (x == NULL && !_PyErr_Occurred(tstate)) {
goto error;
}
Py_DECREF(mod_name);
return x;
error:
if (mod_name == NULL) {
mod_name_or_unknown = PyUnicode_FromString("<unknown module name>");
if (mod_name_or_unknown == NULL) {
return NULL;
}
} else {
mod_name_or_unknown = mod_name;
}
// mod_name is no longer an owned reference
assert(mod_name_or_unknown);
assert(mod_name == NULL || mod_name == mod_name_or_unknown);
origin = NULL;
if (PyObject_GetOptionalAttr(v, &_Py_ID(__spec__), &spec) < 0) {
Py_DECREF(mod_name_or_unknown);
return NULL;
}
if (spec == NULL) {
errmsg = PyUnicode_FromFormat(
"cannot import name %R from %R (unknown location)",
name, mod_name_or_unknown
);
goto done_with_errmsg;
}
if (_PyModuleSpec_GetFileOrigin(spec, &origin) < 0) {
goto done;
}
int is_possibly_shadowing = _PyModule_IsPossiblyShadowing(origin);
if (is_possibly_shadowing < 0) {
goto done;
}
int is_possibly_shadowing_stdlib = 0;
if (is_possibly_shadowing) {
PyObject *stdlib_modules;
if (PySys_GetOptionalAttrString("stdlib_module_names", &stdlib_modules) < 0) {
goto done;
}
if (stdlib_modules && PyAnySet_Check(stdlib_modules)) {
is_possibly_shadowing_stdlib = PySet_Contains(stdlib_modules, mod_name_or_unknown);
if (is_possibly_shadowing_stdlib < 0) {
Py_DECREF(stdlib_modules);
goto done;
}
}
Py_XDECREF(stdlib_modules);
}
if (origin == NULL && PyModule_Check(v)) {
// Fall back to __file__ for diagnostics if we don't have
// an origin that is a location
origin = PyModule_GetFilenameObject(v);
if (origin == NULL) {
if (!PyErr_ExceptionMatches(PyExc_SystemError)) {
goto done;
}
// PyModule_GetFilenameObject raised "module filename missing"
_PyErr_Clear(tstate);
}
assert(origin == NULL || PyUnicode_Check(origin));
}
if (is_possibly_shadowing_stdlib) {
assert(origin);
errmsg = PyUnicode_FromFormat(
"cannot import name %R from %R "
"(consider renaming %R since it has the same "
"name as the standard library module named %R "
"and prevents importing that standard library module)",
name, mod_name_or_unknown, origin, mod_name_or_unknown
);
}
else {
int rc = _PyModuleSpec_IsInitializing(spec);
if (rc < 0) {
goto done;
}
else if (rc > 0) {
if (is_possibly_shadowing) {
assert(origin);
// For non-stdlib modules, only mention the possibility of
// shadowing if the module is being initialized.
errmsg = PyUnicode_FromFormat(
"cannot import name %R from %R "
"(consider renaming %R if it has the same name "
"as a library you intended to import)",
name, mod_name_or_unknown, origin
);
}
else if (origin) {
errmsg = PyUnicode_FromFormat(
"cannot import name %R from partially initialized module %R "
"(most likely due to a circular import) (%S)",
name, mod_name_or_unknown, origin
);
}
else {
errmsg = PyUnicode_FromFormat(
"cannot import name %R from partially initialized module %R "
"(most likely due to a circular import)",
name, mod_name_or_unknown
);
}
}
else {
assert(rc == 0);
if (origin) {
errmsg = PyUnicode_FromFormat(
"cannot import name %R from %R (%S)",
name, mod_name_or_unknown, origin
);
}
else {
errmsg = PyUnicode_FromFormat(
"cannot import name %R from %R (unknown location)",
name, mod_name_or_unknown
);
}
}
}
done_with_errmsg:
if (errmsg != NULL) {
/* NULL checks for mod_name and origin done by _PyErr_SetImportErrorWithNameFrom */
_PyErr_SetImportErrorWithNameFrom(errmsg, mod_name, origin, name);
Py_DECREF(errmsg);
}
done:
Py_XDECREF(origin);
Py_XDECREF(spec);
Py_DECREF(mod_name_or_unknown);
return NULL;
}
#define CANNOT_CATCH_MSG "catching classes that do not inherit from "\
"BaseException is not allowed"
#define CANNOT_EXCEPT_STAR_EG "catching ExceptionGroup with except* "\
"is not allowed. Use except instead."
int
_PyEval_CheckExceptTypeValid(PyThreadState *tstate, PyObject* right)
{
if (PyTuple_Check(right)) {
Py_ssize_t i, length;
length = PyTuple_GET_SIZE(right);
for (i = 0; i < length; i++) {
PyObject *exc = PyTuple_GET_ITEM(right, i);
if (!PyExceptionClass_Check(exc)) {
_PyErr_SetString(tstate, PyExc_TypeError,
CANNOT_CATCH_MSG);
return -1;
}
}
}
else {
if (!PyExceptionClass_Check(right)) {
_PyErr_SetString(tstate, PyExc_TypeError,
CANNOT_CATCH_MSG);
return -1;
}
}
return 0;
}
int
_PyEval_CheckExceptStarTypeValid(PyThreadState *tstate, PyObject* right)
{
if (_PyEval_CheckExceptTypeValid(tstate, right) < 0) {
return -1;
}
/* reject except *ExceptionGroup */
int is_subclass = 0;
if (PyTuple_Check(right)) {
Py_ssize_t length = PyTuple_GET_SIZE(right);
for (Py_ssize_t i = 0; i < length; i++) {
PyObject *exc = PyTuple_GET_ITEM(right, i);
is_subclass = PyObject_IsSubclass(exc, PyExc_BaseExceptionGroup);
if (is_subclass < 0) {
return -1;
}
if (is_subclass) {
break;
}
}
}
else {
is_subclass = PyObject_IsSubclass(right, PyExc_BaseExceptionGroup);
if (is_subclass < 0) {
return -1;
}
}
if (is_subclass) {
_PyErr_SetString(tstate, PyExc_TypeError,
CANNOT_EXCEPT_STAR_EG);
return -1;
}
return 0;
}
int
_Py_Check_ArgsIterable(PyThreadState *tstate, PyObject *func, PyObject *args)
{
if (Py_TYPE(args)->tp_iter == NULL && !PySequence_Check(args)) {
_PyErr_Format(tstate, PyExc_TypeError,
"Value after * must be an iterable, not %.200s",
Py_TYPE(args)->tp_name);
return -1;
}
return 0;
}
void
_PyEval_FormatKwargsError(PyThreadState *tstate, PyObject *func, PyObject *kwargs)
{
/* _PyDict_MergeEx raises attribute
* error (percolated from an attempt
* to get 'keys' attribute) instead of
* a type error if its second argument
* is not a mapping.
*/
if (_PyErr_ExceptionMatches(tstate, PyExc_AttributeError)) {
_PyErr_Format(
tstate, PyExc_TypeError,
"Value after ** must be a mapping, not %.200s",
Py_TYPE(kwargs)->tp_name);
}
else if (_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) {
PyObject *exc = _PyErr_GetRaisedException(tstate);
PyObject *args = PyException_GetArgs(exc);
if (PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1) {
_PyErr_Clear(tstate);
PyObject *funcstr = _PyObject_FunctionStr(func);
if (funcstr != NULL) {
PyObject *key = PyTuple_GET_ITEM(args, 0);
_PyErr_Format(
tstate, PyExc_TypeError,
"%U got multiple values for keyword argument '%S'",
funcstr, key);
Py_DECREF(funcstr);
}
Py_XDECREF(exc);
}
else {
_PyErr_SetRaisedException(tstate, exc);
}
Py_DECREF(args);
}
}
void
_PyEval_FormatExcCheckArg(PyThreadState *tstate, PyObject *exc,
const char *format_str, PyObject *obj)
{
const char *obj_str;
if (!obj)
return;
obj_str = PyUnicode_AsUTF8(obj);
if (!obj_str)
return;
_PyErr_Format(tstate, exc, format_str, obj_str);
if (exc == PyExc_NameError) {
// Include the name in the NameError exceptions to offer suggestions later.
PyObject *exc = PyErr_GetRaisedException();
if (PyErr_GivenExceptionMatches(exc, PyExc_NameError)) {
if (((PyNameErrorObject*)exc)->name == NULL) {
// We do not care if this fails because we are going to restore the
// NameError anyway.
(void)PyObject_SetAttr(exc, &_Py_ID(name), obj);
}
}
PyErr_SetRaisedException(exc);
}
}
void
_PyEval_FormatExcUnbound(PyThreadState *tstate, PyCodeObject *co, int oparg)
{
PyObject *name;
/* Don't stomp existing exception */
if (_PyErr_Occurred(tstate))
return;
name = PyTuple_GET_ITEM(co->co_localsplusnames, oparg);
if (oparg < PyUnstable_Code_GetFirstFree(co)) {
_PyEval_FormatExcCheckArg(tstate, PyExc_UnboundLocalError,
UNBOUNDLOCAL_ERROR_MSG, name);
} else {
_PyEval_FormatExcCheckArg(tstate, PyExc_NameError,
UNBOUNDFREE_ERROR_MSG, name);
}
}
void
_PyEval_FormatAwaitableError(PyThreadState *tstate, PyTypeObject *type, int oparg)
{
if (type->tp_as_async == NULL || type->tp_as_async->am_await == NULL) {
if (oparg == 1) {
_PyErr_Format(tstate, PyExc_TypeError,
"'async with' received an object from __aenter__ "
"that does not implement __await__: %.100s",
type->tp_name);
}
else if (oparg == 2) {
_PyErr_Format(tstate, PyExc_TypeError,
"'async with' received an object from __aexit__ "
"that does not implement __await__: %.100s",
type->tp_name);
}
}
}
Py_ssize_t
PyUnstable_Eval_RequestCodeExtraIndex(freefunc free)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
Py_ssize_t new_index;
if (interp->co_extra_user_count == MAX_CO_EXTRA_USERS - 1) {
return -1;
}
new_index = interp->co_extra_user_count++;
interp->co_extra_freefuncs[new_index] = free;
return new_index;
}
/* Implement Py_EnterRecursiveCall() and Py_LeaveRecursiveCall() as functions
for the limited API. */
int Py_EnterRecursiveCall(const char *where)
{
return _Py_EnterRecursiveCall(where);
}
void Py_LeaveRecursiveCall(void)
{
_Py_LeaveRecursiveCall();
}
PyObject *
_PyEval_GetANext(PyObject *aiter)
{
unaryfunc getter = NULL;
PyObject *next_iter = NULL;
PyTypeObject *type = Py_TYPE(aiter);
if (PyAsyncGen_CheckExact(aiter)) {
return type->tp_as_async->am_anext(aiter);
}
if (type->tp_as_async != NULL){
getter = type->tp_as_async->am_anext;
}
if (getter != NULL) {
next_iter = (*getter)(aiter);
if (next_iter == NULL) {
return NULL;
}
}
else {
PyErr_Format(PyExc_TypeError,
"'async for' requires an iterator with "
"__anext__ method, got %.100s",
type->tp_name);
return NULL;
}
PyObject *awaitable = _PyCoro_GetAwaitableIter(next_iter);
if (awaitable == NULL) {
_PyErr_FormatFromCause(
PyExc_TypeError,
"'async for' received an invalid object "
"from __anext__: %.100s",
Py_TYPE(next_iter)->tp_name);
}
Py_DECREF(next_iter);
return awaitable;
}
void
_PyEval_LoadGlobalStackRef(PyObject *globals, PyObject *builtins, PyObject *name, _PyStackRef *writeto)
{
if (PyDict_CheckExact(globals) && PyDict_CheckExact(builtins)) {
_PyDict_LoadGlobalStackRef((PyDictObject *)globals,
(PyDictObject *)builtins,
name, writeto);
if (PyStackRef_IsNull(*writeto) && !PyErr_Occurred()) {
/* _PyDict_LoadGlobal() returns NULL without raising
* an exception if the key doesn't exist */
_PyEval_FormatExcCheckArg(PyThreadState_GET(), PyExc_NameError,
NAME_ERROR_MSG, name);
}
}
else {
/* Slow-path if globals or builtins is not a dict */
/* namespace 1: globals */
PyObject *res;
if (PyMapping_GetOptionalItem(globals, name, &res) < 0) {
*writeto = PyStackRef_NULL;
return;
}
if (res == NULL) {
/* namespace 2: builtins */
if (PyMapping_GetOptionalItem(builtins, name, &res) < 0) {
*writeto = PyStackRef_NULL;
return;
}
if (res == NULL) {
_PyEval_FormatExcCheckArg(
PyThreadState_GET(), PyExc_NameError,
NAME_ERROR_MSG, name);
*writeto = PyStackRef_NULL;
return;
}
}
*writeto = PyStackRef_FromPyObjectSteal(res);
}
}
PyObject *
_PyEval_GetAwaitable(PyObject *iterable, int oparg)
{
PyObject *iter = _PyCoro_GetAwaitableIter(iterable);
if (iter == NULL) {
_PyEval_FormatAwaitableError(PyThreadState_GET(),
Py_TYPE(iterable), oparg);
}
else if (PyCoro_CheckExact(iter)) {
PyCoroObject *coro = (PyCoroObject *)iter;
int8_t frame_state = FT_ATOMIC_LOAD_INT8_RELAXED(coro->cr_frame_state);
if (frame_state == FRAME_SUSPENDED_YIELD_FROM) {
/* `iter` is a coroutine object that is being awaited. */
Py_CLEAR(iter);
_PyErr_SetString(PyThreadState_GET(), PyExc_RuntimeError,
"coroutine is being awaited already");
}
}
return iter;
}
PyObject *
_PyEval_LoadName(PyThreadState *tstate, _PyInterpreterFrame *frame, PyObject *name)
{
PyObject *value;
if (frame->f_locals == NULL) {
_PyErr_SetString(tstate, PyExc_SystemError,
"no locals found");
return NULL;
}
if (PyMapping_GetOptionalItem(frame->f_locals, name, &value) < 0) {
return NULL;
}
if (value != NULL) {
return value;
}
if (PyDict_GetItemRef(frame->f_globals, name, &value) < 0) {
return NULL;
}
if (value != NULL) {
return value;
}
if (PyMapping_GetOptionalItem(frame->f_builtins, name, &value) < 0) {
return NULL;
}
if (value == NULL) {
_PyEval_FormatExcCheckArg(
tstate, PyExc_NameError,
NAME_ERROR_MSG, name);
}
return value;
}
static _PyStackRef
foriter_next(PyObject *seq, _PyStackRef index)
{
assert(PyStackRef_IsTaggedInt(index));
assert(PyTuple_CheckExact(seq) || PyList_CheckExact(seq));
intptr_t i = PyStackRef_UntagInt(index);
if (PyTuple_CheckExact(seq)) {
size_t size = PyTuple_GET_SIZE(seq);
if ((size_t)i >= size) {
return PyStackRef_NULL;
}
return PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, i));
}
PyObject *item = _PyList_GetItemRef((PyListObject *)seq, i);
if (item == NULL) {
return PyStackRef_NULL;
}
return PyStackRef_FromPyObjectSteal(item);
}
_PyStackRef _PyForIter_VirtualIteratorNext(PyThreadState* tstate, _PyInterpreterFrame* frame, _PyStackRef iter, _PyStackRef* index_ptr)
{
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyStackRef index = *index_ptr;
if (PyStackRef_IsTaggedInt(index)) {
*index_ptr = PyStackRef_IncrementTaggedIntNoOverflow(index);
return foriter_next(iter_o, index);
}
PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
if (next_o == NULL) {
if (_PyErr_Occurred(tstate)) {
if (_PyErr_ExceptionMatches(tstate, PyExc_StopIteration)) {
_PyEval_MonitorRaise(tstate, frame, frame->instr_ptr);
_PyErr_Clear(tstate);
}
else {
return PyStackRef_ERROR;
}
}
return PyStackRef_NULL;
}
return PyStackRef_FromPyObjectSteal(next_o);
}
/* Check if a 'cls' provides the given special method. */
static inline int
type_has_special_method(PyTypeObject *cls, PyObject *name)
{
// _PyType_Lookup() does not set an exception and returns a borrowed ref
assert(!PyErr_Occurred());
PyObject *r = _PyType_Lookup(cls, name);
return r != NULL && Py_TYPE(r)->tp_descr_get != NULL;
}
int
_PyEval_SpecialMethodCanSuggest(PyObject *self, int oparg)
{
PyTypeObject *type = Py_TYPE(self);
switch (oparg) {
case SPECIAL___ENTER__:
case SPECIAL___EXIT__: {
return type_has_special_method(type, &_Py_ID(__aenter__))
&& type_has_special_method(type, &_Py_ID(__aexit__));
}
case SPECIAL___AENTER__:
case SPECIAL___AEXIT__: {
return type_has_special_method(type, &_Py_ID(__enter__))
&& type_has_special_method(type, &_Py_ID(__exit__));
}
default:
Py_FatalError("unsupported special method");
}
}