mirror of
https://github.com/python/cpython.git
synced 2025-12-08 06:10:17 +00:00
Merge remote-tracking branch 'upstream/main' into lazy
This commit is contained in:
commit
db151a5192
869 changed files with 45727 additions and 16994 deletions
493
Python/ceval.c
493
Python/ceval.c
|
|
@ -352,13 +352,23 @@ _Py_ReachedRecursionLimitWithMargin(PyThreadState *tstate, int margin_count)
|
|||
{
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
if (here_addr > _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES) {
|
||||
#else
|
||||
if (here_addr <= _tstate->c_stack_soft_limit - margin_count * _PyOS_STACK_MARGIN_BYTES) {
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
if (_tstate->c_stack_hard_limit == 0) {
|
||||
_Py_InitializeRecursionLimits(tstate);
|
||||
}
|
||||
return here_addr <= _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES;
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
return here_addr <= _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES &&
|
||||
here_addr >= _tstate->c_stack_soft_limit - 2 * _PyOS_STACK_MARGIN_BYTES;
|
||||
#else
|
||||
return here_addr > _tstate->c_stack_soft_limit - margin_count * _PyOS_STACK_MARGIN_BYTES &&
|
||||
here_addr <= _tstate->c_stack_soft_limit + 2 * _PyOS_STACK_MARGIN_BYTES;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -366,7 +376,11 @@ _Py_EnterRecursiveCallUnchecked(PyThreadState *tstate)
|
|||
{
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
if (here_addr < _tstate->c_stack_hard_limit) {
|
||||
#else
|
||||
if (here_addr > _tstate->c_stack_hard_limit) {
|
||||
#endif
|
||||
Py_FatalError("Unchecked stack overflow.");
|
||||
}
|
||||
}
|
||||
|
|
@ -444,7 +458,7 @@ int pthread_attr_destroy(pthread_attr_t *a)
|
|||
#endif
|
||||
|
||||
static void
|
||||
hardware_stack_limits(uintptr_t *top, uintptr_t *base)
|
||||
hardware_stack_limits(uintptr_t *base, uintptr_t *top, uintptr_t sp)
|
||||
{
|
||||
#ifdef WIN32
|
||||
ULONG_PTR low, high;
|
||||
|
|
@ -480,32 +494,113 @@ hardware_stack_limits(uintptr_t *top, uintptr_t *base)
|
|||
return;
|
||||
}
|
||||
# endif
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
uintptr_t top_addr = _Py_SIZE_ROUND_UP(here_addr, 4096);
|
||||
// Add some space for caller function then round to minimum page size
|
||||
// This is a guess at the top of the stack, but should be a reasonably
|
||||
// good guess if called from _PyThreadState_Attach when creating a thread.
|
||||
// If the thread is attached deep in a call stack, then the guess will be poor.
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
uintptr_t top_addr = _Py_SIZE_ROUND_UP(sp + 8*sizeof(void*), SYSTEM_PAGE_SIZE);
|
||||
*top = top_addr;
|
||||
*base = top_addr - Py_C_STACK_SIZE;
|
||||
# else
|
||||
uintptr_t base_addr = _Py_SIZE_ROUND_DOWN(sp - 8*sizeof(void*), SYSTEM_PAGE_SIZE);
|
||||
*base = base_addr;
|
||||
*top = base_addr + Py_C_STACK_SIZE;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
tstate_set_stack(PyThreadState *tstate,
|
||||
uintptr_t base, uintptr_t top)
|
||||
{
|
||||
assert(base < top);
|
||||
assert((top - base) >= _PyOS_MIN_STACK_SIZE);
|
||||
|
||||
#ifdef _Py_THREAD_SANITIZER
|
||||
// Thread sanitizer crashes if we use more than half the stack.
|
||||
uintptr_t stacksize = top - base;
|
||||
# if _Py_STACK_GROWS_DOWN
|
||||
base += stacksize/2;
|
||||
# else
|
||||
top -= stacksize/2;
|
||||
# endif
|
||||
#endif
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
_tstate->c_stack_top = top;
|
||||
_tstate->c_stack_hard_limit = base + _PyOS_STACK_MARGIN_BYTES;
|
||||
_tstate->c_stack_soft_limit = base + _PyOS_STACK_MARGIN_BYTES * 2;
|
||||
# ifndef NDEBUG
|
||||
// Sanity checks
|
||||
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
|
||||
assert(ts->c_stack_hard_limit <= ts->c_stack_soft_limit);
|
||||
assert(ts->c_stack_soft_limit < ts->c_stack_top);
|
||||
# endif
|
||||
#else
|
||||
_tstate->c_stack_top = base;
|
||||
_tstate->c_stack_hard_limit = top - _PyOS_STACK_MARGIN_BYTES;
|
||||
_tstate->c_stack_soft_limit = top - _PyOS_STACK_MARGIN_BYTES * 2;
|
||||
# ifndef NDEBUG
|
||||
// Sanity checks
|
||||
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
|
||||
assert(ts->c_stack_hard_limit >= ts->c_stack_soft_limit);
|
||||
assert(ts->c_stack_soft_limit > ts->c_stack_top);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_Py_InitializeRecursionLimits(PyThreadState *tstate)
|
||||
{
|
||||
uintptr_t top;
|
||||
uintptr_t base;
|
||||
hardware_stack_limits(&top, &base);
|
||||
#ifdef _Py_THREAD_SANITIZER
|
||||
// Thread sanitizer crashes if we use more than half the stack.
|
||||
uintptr_t stacksize = top - base;
|
||||
base += stacksize/2;
|
||||
#endif
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
_tstate->c_stack_top = top;
|
||||
_tstate->c_stack_hard_limit = base + _PyOS_STACK_MARGIN_BYTES;
|
||||
_tstate->c_stack_soft_limit = base + _PyOS_STACK_MARGIN_BYTES * 2;
|
||||
uintptr_t base, top;
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
hardware_stack_limits(&base, &top, here_addr);
|
||||
assert(top != 0);
|
||||
|
||||
tstate_set_stack(tstate, base, top);
|
||||
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
|
||||
ts->c_stack_init_base = base;
|
||||
ts->c_stack_init_top = top;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
PyUnstable_ThreadState_SetStackProtection(PyThreadState *tstate,
|
||||
void *stack_start_addr, size_t stack_size)
|
||||
{
|
||||
if (stack_size < _PyOS_MIN_STACK_SIZE) {
|
||||
PyErr_Format(PyExc_ValueError,
|
||||
"stack_size must be at least %zu bytes",
|
||||
_PyOS_MIN_STACK_SIZE);
|
||||
return -1;
|
||||
}
|
||||
|
||||
uintptr_t base = (uintptr_t)stack_start_addr;
|
||||
uintptr_t top = base + stack_size;
|
||||
tstate_set_stack(tstate, base, top);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PyUnstable_ThreadState_ResetStackProtection(PyThreadState *tstate)
|
||||
{
|
||||
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
|
||||
if (ts->c_stack_init_top != 0) {
|
||||
tstate_set_stack(tstate,
|
||||
ts->c_stack_init_base,
|
||||
ts->c_stack_init_top);
|
||||
return;
|
||||
}
|
||||
|
||||
_Py_InitializeRecursionLimits(tstate);
|
||||
}
|
||||
|
||||
|
||||
/* The function _Py_EnterRecursiveCallTstate() only calls _Py_CheckRecursiveCall()
|
||||
if the recursion_depth reaches recursion_limit. */
|
||||
if the stack pointer is between the stack base and c_stack_hard_limit. */
|
||||
int
|
||||
_Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
|
||||
{
|
||||
|
|
@ -513,9 +608,17 @@ _Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
|
|||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
assert(_tstate->c_stack_soft_limit != 0);
|
||||
assert(_tstate->c_stack_hard_limit != 0);
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
assert(here_addr >= _tstate->c_stack_hard_limit - _PyOS_STACK_MARGIN_BYTES);
|
||||
if (here_addr < _tstate->c_stack_hard_limit) {
|
||||
/* Overflowing while handling an overflow. Give up. */
|
||||
int kbytes_used = (int)(_tstate->c_stack_top - here_addr)/1024;
|
||||
#else
|
||||
assert(here_addr <= _tstate->c_stack_hard_limit + _PyOS_STACK_MARGIN_BYTES);
|
||||
if (here_addr > _tstate->c_stack_hard_limit) {
|
||||
/* Overflowing while handling an overflow. Give up. */
|
||||
int kbytes_used = (int)(here_addr - _tstate->c_stack_top)/1024;
|
||||
#endif
|
||||
char buffer[80];
|
||||
snprintf(buffer, 80, "Unrecoverable stack overflow (used %d kB)%s", kbytes_used, where);
|
||||
Py_FatalError(buffer);
|
||||
|
|
@ -524,7 +627,11 @@ _Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
|
|||
return 0;
|
||||
}
|
||||
else {
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
int kbytes_used = (int)(_tstate->c_stack_top - here_addr)/1024;
|
||||
#else
|
||||
int kbytes_used = (int)(here_addr - _tstate->c_stack_top)/1024;
|
||||
#endif
|
||||
tstate->recursion_headroom++;
|
||||
_PyErr_Format(tstate, PyExc_RecursionError,
|
||||
"Stack overflow (used %d kB)%s",
|
||||
|
|
@ -911,6 +1018,283 @@ PyEval_EvalFrameEx(PyFrameObject *f, int throwflag)
|
|||
|
||||
#include "ceval_macros.h"
|
||||
|
||||
|
||||
/* Helper functions to keep the size of the largest uops down */
|
||||
|
||||
PyObject *
|
||||
_Py_VectorCall_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
_PyStackRef *arguments,
|
||||
int total_args,
|
||||
_PyStackRef kwnames)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
PyObject *kwnames_o = PyStackRef_AsPyObjectBorrow(kwnames);
|
||||
int positional_args = total_args;
|
||||
if (kwnames_o != NULL) {
|
||||
positional_args -= (int)PyTuple_GET_SIZE(kwnames_o);
|
||||
}
|
||||
res = PyObject_Vectorcall(
|
||||
callable_o, args_o,
|
||||
positional_args | PY_VECTORCALL_ARGUMENTS_OFFSET,
|
||||
kwnames_o);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
PyStackRef_XCLOSE(kwnames);
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_BuiltinCallFast_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable_o);
|
||||
res = _PyCFunctionFast_CAST(cfunc)(
|
||||
PyCFunction_GET_SELF(callable_o),
|
||||
args_o,
|
||||
total_args
|
||||
);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_BuiltinCallFastWithKeywords_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
PyCFunctionFastWithKeywords cfunc =
|
||||
_PyCFunctionFastWithKeywords_CAST(PyCFunction_GET_FUNCTION(callable_o));
|
||||
res = cfunc(PyCFunction_GET_SELF(callable_o), args_o, total_args, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyCallMethodDescriptorFast_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
PyMethodDef *meth,
|
||||
PyObject *self,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
assert(((PyMethodDescrObject *)PyStackRef_AsPyObjectBorrow(callable))->d_method == meth);
|
||||
assert(self == PyStackRef_AsPyObjectBorrow(arguments[0]));
|
||||
|
||||
PyCFunctionFast cfunc = _PyCFunctionFast_CAST(meth->ml_meth);
|
||||
res = cfunc(self, (args_o + 1), total_args - 1);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyCallMethodDescriptorFastWithKeywords_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
PyMethodDef *meth,
|
||||
PyObject *self,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
assert(((PyMethodDescrObject *)PyStackRef_AsPyObjectBorrow(callable))->d_method == meth);
|
||||
assert(self == PyStackRef_AsPyObjectBorrow(arguments[0]));
|
||||
|
||||
PyCFunctionFastWithKeywords cfunc =
|
||||
_PyCFunctionFastWithKeywords_CAST(meth->ml_meth);
|
||||
res = cfunc(self, (args_o + 1), total_args-1, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_CallBuiltinClass_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
PyTypeObject *tp = (PyTypeObject *)PyStackRef_AsPyObjectBorrow(callable);
|
||||
res = tp->tp_vectorcall((PyObject *)tp, args_o, total_args, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_BuildString_StackRefSteal(
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
res = _PyUnicode_JoinArray(&_Py_STR(empty), args_o, total_args);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_BuildMap_StackRefSteal(
|
||||
_PyStackRef *arguments,
|
||||
int half_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, half_args*2, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
res = _PyDict_FromItems(
|
||||
args_o, 2,
|
||||
args_o+1, 2,
|
||||
half_args
|
||||
);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = half_args*2-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
void
|
||||
_Py_assert_within_stack_bounds(
|
||||
_PyInterpreterFrame *frame, _PyStackRef *stack_pointer,
|
||||
const char *filename, int lineno
|
||||
) {
|
||||
if (frame->owner == FRAME_OWNED_BY_INTERPRETER) {
|
||||
return;
|
||||
}
|
||||
int level = (int)(stack_pointer - _PyFrame_Stackbase(frame));
|
||||
if (level < 0) {
|
||||
printf("Stack underflow (depth = %d) at %s:%d\n", level, filename, lineno);
|
||||
fflush(stdout);
|
||||
abort();
|
||||
}
|
||||
int size = _PyFrame_GetCode(frame)->co_stacksize;
|
||||
if (level > size) {
|
||||
printf("Stack overflow (depth = %d) at %s:%d\n", level, filename, lineno);
|
||||
fflush(stdout);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int _Py_CheckRecursiveCallPy(
|
||||
PyThreadState *tstate)
|
||||
{
|
||||
|
|
@ -942,6 +1326,8 @@ static const _Py_CODEUNIT _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS[] = {
|
|||
{ .op.code = RESUME, .op.arg = RESUME_OPARG_DEPTH1_MASK | RESUME_AT_FUNC_START }
|
||||
};
|
||||
|
||||
const _Py_CODEUNIT *_Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR = (_Py_CODEUNIT*)&_Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS;
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
extern void _PyUOpPrint(const _PyUOpInstruction *uop);
|
||||
#endif
|
||||
|
|
@ -970,11 +1356,12 @@ _PyObjectArray_FromStackRefArray(_PyStackRef *input, Py_ssize_t nargs, PyObject
|
|||
if (result == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
result++;
|
||||
}
|
||||
else {
|
||||
result = scratch;
|
||||
}
|
||||
result++;
|
||||
result[0] = NULL; /* Keep GCC happy */
|
||||
for (int i = 0; i < nargs; i++) {
|
||||
result[i] = PyStackRef_AsPyObjectBorrow(input[i]);
|
||||
}
|
||||
|
|
@ -989,6 +1376,49 @@ _PyObjectArray_Free(PyObject **array, PyObject **scratch)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
#define ASSERT_WITHIN_STACK_BOUNDS(F, L) _Py_assert_within_stack_bounds(frame, stack_pointer, (F), (L))
|
||||
#else
|
||||
#define ASSERT_WITHIN_STACK_BOUNDS(F, L) (void)0
|
||||
#endif
|
||||
|
||||
#if _Py_TIER2
|
||||
// 0 for success, -1 for error.
|
||||
static int
|
||||
stop_tracing_and_jit(PyThreadState *tstate, _PyInterpreterFrame *frame)
|
||||
{
|
||||
int _is_sys_tracing = (tstate->c_tracefunc != NULL) || (tstate->c_profilefunc != NULL);
|
||||
int err = 0;
|
||||
if (!_PyErr_Occurred(tstate) && !_is_sys_tracing) {
|
||||
err = _PyOptimizer_Optimize(frame, tstate);
|
||||
}
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
// Deal with backoffs
|
||||
_PyExitData *exit = _tstate->jit_tracer_state.initial_state.exit;
|
||||
if (exit == NULL) {
|
||||
// We hold a strong reference to the code object, so the instruction won't be freed.
|
||||
if (err <= 0) {
|
||||
_Py_BackoffCounter counter = _tstate->jit_tracer_state.initial_state.jump_backward_instr[1].counter;
|
||||
_tstate->jit_tracer_state.initial_state.jump_backward_instr[1].counter = restart_backoff_counter(counter);
|
||||
}
|
||||
else {
|
||||
_tstate->jit_tracer_state.initial_state.jump_backward_instr[1].counter = initial_jump_backoff_counter();
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Likewise, we hold a strong reference to the executor containing this exit, so the exit is guaranteed
|
||||
// to be valid to access.
|
||||
if (err <= 0) {
|
||||
exit->temperature = restart_backoff_counter(exit->temperature);
|
||||
}
|
||||
else {
|
||||
exit->temperature = initial_temperature_backoff_counter();
|
||||
}
|
||||
}
|
||||
_PyJit_FinalizeTracing(tstate);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* _PyEval_EvalFrameDefault is too large to optimize for speed with PGO on MSVC.
|
||||
*/
|
||||
|
|
@ -1048,6 +1478,10 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
|
|||
uint8_t opcode; /* Current opcode */
|
||||
int oparg; /* Current opcode argument, if any */
|
||||
assert(tstate->current_frame == NULL || tstate->current_frame->stackpointer != NULL);
|
||||
#if !USE_COMPUTED_GOTOS
|
||||
uint8_t tracing_mode = 0;
|
||||
uint8_t dispatch_code;
|
||||
#endif
|
||||
#endif
|
||||
_PyEntryFrame entry;
|
||||
|
||||
|
|
@ -1118,9 +1552,9 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
|
|||
stack_pointer = _PyFrame_GetStackPointer(frame);
|
||||
#if _Py_TAIL_CALL_INTERP
|
||||
# if Py_STATS
|
||||
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, 0, lastopcode);
|
||||
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_handler_table, 0, lastopcode);
|
||||
# else
|
||||
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, 0);
|
||||
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_handler_table, 0);
|
||||
# endif
|
||||
#else
|
||||
goto error;
|
||||
|
|
@ -1129,9 +1563,9 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
|
|||
|
||||
#if _Py_TAIL_CALL_INTERP
|
||||
# if Py_STATS
|
||||
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_table, 0, lastopcode);
|
||||
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_handler_table, 0, lastopcode);
|
||||
# else
|
||||
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_table, 0);
|
||||
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_handler_table, 0);
|
||||
# endif
|
||||
#else
|
||||
goto start_frame;
|
||||
|
|
@ -1173,7 +1607,9 @@ _PyTier2Interpreter(
|
|||
tier2_start:
|
||||
|
||||
next_uop = current_executor->trace;
|
||||
assert(next_uop->opcode == _START_EXECUTOR || next_uop->opcode == _COLD_EXIT);
|
||||
assert(next_uop->opcode == _START_EXECUTOR ||
|
||||
next_uop->opcode == _COLD_EXIT ||
|
||||
next_uop->opcode == _COLD_DYNAMIC_EXIT);
|
||||
|
||||
#undef LOAD_IP
|
||||
#define LOAD_IP(UNUSED) (void)0
|
||||
|
|
@ -1197,7 +1633,9 @@ _PyTier2Interpreter(
|
|||
uint64_t trace_uop_execution_counter = 0;
|
||||
#endif
|
||||
|
||||
assert(next_uop->opcode == _START_EXECUTOR || next_uop->opcode == _COLD_EXIT);
|
||||
assert(next_uop->opcode == _START_EXECUTOR ||
|
||||
next_uop->opcode == _COLD_EXIT ||
|
||||
next_uop->opcode == _COLD_DYNAMIC_EXIT);
|
||||
tier2_dispatch:
|
||||
for (;;) {
|
||||
uopcode = next_uop->opcode;
|
||||
|
|
@ -2149,6 +2587,7 @@ do_raise(PyThreadState *tstate, PyObject *exc, PyObject *cause)
|
|||
"calling %R should have returned an instance of "
|
||||
"BaseException, not %R",
|
||||
cause, Py_TYPE(fixed_cause));
|
||||
Py_DECREF(fixed_cause);
|
||||
goto raise_error;
|
||||
}
|
||||
Py_DECREF(cause);
|
||||
|
|
@ -2674,12 +3113,6 @@ _PyEval_GetBuiltin(PyObject *name)
|
|||
return attr;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyEval_GetBuiltinId(_Py_Identifier *name)
|
||||
{
|
||||
return _PyEval_GetBuiltin(_PyUnicode_FromId(name));
|
||||
}
|
||||
|
||||
PyObject *
|
||||
PyEval_GetLocals(void)
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue