mirror of
https://github.com/python/cpython.git
synced 2025-12-08 06:10:17 +00:00
Merge remote-tracking branch 'upstream/main' into lazy
This commit is contained in:
commit
db151a5192
869 changed files with 45727 additions and 16994 deletions
|
|
@ -16,6 +16,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
PyObject *filename;
|
||||
PyObject *module;
|
||||
int optimize;
|
||||
int ff_features;
|
||||
int syntax_check_only;
|
||||
|
|
@ -71,7 +72,8 @@ control_flow_in_finally_warning(const char *kw, stmt_ty n, _PyASTPreprocessState
|
|||
}
|
||||
int ret = _PyErr_EmitSyntaxWarning(msg, state->filename, n->lineno,
|
||||
n->col_offset + 1, n->end_lineno,
|
||||
n->end_col_offset + 1);
|
||||
n->end_col_offset + 1,
|
||||
state->module);
|
||||
Py_DECREF(msg);
|
||||
return ret < 0 ? 0 : 1;
|
||||
}
|
||||
|
|
@ -969,11 +971,13 @@ astfold_type_param(type_param_ty node_, PyArena *ctx_, _PyASTPreprocessState *st
|
|||
|
||||
int
|
||||
_PyAST_Preprocess(mod_ty mod, PyArena *arena, PyObject *filename, int optimize,
|
||||
int ff_features, int syntax_check_only, int enable_warnings)
|
||||
int ff_features, int syntax_check_only, int enable_warnings,
|
||||
PyObject *module)
|
||||
{
|
||||
_PyASTPreprocessState state;
|
||||
memset(&state, 0, sizeof(_PyASTPreprocessState));
|
||||
state.filename = filename;
|
||||
state.module = module;
|
||||
state.optimize = optimize;
|
||||
state.ff_features = ff_features;
|
||||
state.syntax_check_only = syntax_check_only;
|
||||
|
|
|
|||
|
|
@ -802,6 +802,7 @@ compile as builtin_compile
|
|||
dont_inherit: bool = False
|
||||
optimize: int = -1
|
||||
*
|
||||
module as modname: object = None
|
||||
_feature_version as feature_version: int = -1
|
||||
|
||||
Compile source into a code object that can be executed by exec() or eval().
|
||||
|
|
@ -821,8 +822,8 @@ in addition to any features explicitly specified.
|
|||
static PyObject *
|
||||
builtin_compile_impl(PyObject *module, PyObject *source, PyObject *filename,
|
||||
const char *mode, int flags, int dont_inherit,
|
||||
int optimize, int feature_version)
|
||||
/*[clinic end generated code: output=b0c09c84f116d3d7 input=8f0069edbdac381b]*/
|
||||
int optimize, PyObject *modname, int feature_version)
|
||||
/*[clinic end generated code: output=9a0dce1945917a86 input=ddeae1e0253459dc]*/
|
||||
{
|
||||
PyObject *source_copy;
|
||||
const char *str;
|
||||
|
|
@ -851,6 +852,15 @@ builtin_compile_impl(PyObject *module, PyObject *source, PyObject *filename,
|
|||
"compile(): invalid optimize value");
|
||||
goto error;
|
||||
}
|
||||
if (modname == Py_None) {
|
||||
modname = NULL;
|
||||
}
|
||||
else if (!PyUnicode_Check(modname)) {
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"compile() argument 'module' must be str or None, not %T",
|
||||
modname);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!dont_inherit) {
|
||||
PyEval_MergeCompilerFlags(&cf);
|
||||
|
|
@ -896,8 +906,9 @@ builtin_compile_impl(PyObject *module, PyObject *source, PyObject *filename,
|
|||
goto error;
|
||||
}
|
||||
int syntax_check_only = ((flags & PyCF_OPTIMIZED_AST) == PyCF_ONLY_AST); /* unoptiomized AST */
|
||||
if (_PyCompile_AstPreprocess(mod, filename, &cf, optimize,
|
||||
arena, syntax_check_only) < 0) {
|
||||
if (_PyCompile_AstPreprocess(mod, filename, &cf, optimize, arena,
|
||||
syntax_check_only, modname) < 0)
|
||||
{
|
||||
_PyArena_Free(arena);
|
||||
goto error;
|
||||
}
|
||||
|
|
@ -910,7 +921,7 @@ builtin_compile_impl(PyObject *module, PyObject *source, PyObject *filename,
|
|||
goto error;
|
||||
}
|
||||
result = (PyObject*)_PyAST_Compile(mod, filename,
|
||||
&cf, optimize, arena);
|
||||
&cf, optimize, arena, modname);
|
||||
}
|
||||
_PyArena_Free(arena);
|
||||
goto finally;
|
||||
|
|
@ -928,7 +939,9 @@ builtin_compile_impl(PyObject *module, PyObject *source, PyObject *filename,
|
|||
tstate->suppress_co_const_immortalization++;
|
||||
#endif
|
||||
|
||||
result = Py_CompileStringObject(str, filename, start[compile_mode], &cf, optimize);
|
||||
result = _Py_CompileStringObjectWithModule(str, filename,
|
||||
start[compile_mode], &cf,
|
||||
optimize, modname);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
tstate->suppress_co_const_immortalization--;
|
||||
|
|
|
|||
|
|
@ -2014,14 +2014,8 @@ dummy_func(
|
|||
}
|
||||
|
||||
inst(BUILD_STRING, (pieces[oparg] -- str)) {
|
||||
STACKREFS_TO_PYOBJECTS(pieces, oparg, pieces_o);
|
||||
if (CONVERSION_FAILED(pieces_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyObject *str_o = _PyUnicode_JoinArray(&_Py_STR(empty), pieces_o, oparg);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(pieces_o);
|
||||
DECREF_INPUTS();
|
||||
PyObject *str_o = _Py_BuildString_StackRefSteal(pieces, oparg);
|
||||
DEAD(pieces);
|
||||
ERROR_IF(str_o == NULL);
|
||||
str = PyStackRef_FromPyObjectSteal(str_o);
|
||||
}
|
||||
|
|
@ -2136,17 +2130,9 @@ dummy_func(
|
|||
}
|
||||
|
||||
inst(BUILD_MAP, (values[oparg*2] -- map)) {
|
||||
STACKREFS_TO_PYOBJECTS(values, oparg*2, values_o);
|
||||
if (CONVERSION_FAILED(values_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyObject *map_o = _PyDict_FromItems(
|
||||
values_o, 2,
|
||||
values_o+1, 2,
|
||||
oparg);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(values_o);
|
||||
DECREF_INPUTS();
|
||||
|
||||
PyObject *map_o = _Py_BuildMap_StackRefSteal(values, oparg);
|
||||
DEAD(values);
|
||||
ERROR_IF(map_o == NULL);
|
||||
map = PyStackRef_FromPyObjectStealMortal(map_o);
|
||||
}
|
||||
|
|
@ -2991,8 +2977,8 @@ dummy_func(
|
|||
JUMP_BACKWARD_JIT,
|
||||
};
|
||||
|
||||
tier1 op(_SPECIALIZE_JUMP_BACKWARD, (--)) {
|
||||
#if ENABLE_SPECIALIZATION_FT
|
||||
specializing tier1 op(_SPECIALIZE_JUMP_BACKWARD, (--)) {
|
||||
#if ENABLE_SPECIALIZATION
|
||||
if (this_instr->op.code == JUMP_BACKWARD) {
|
||||
uint8_t desired = tstate->interp->jit ? JUMP_BACKWARD_JIT : JUMP_BACKWARD_NO_JIT;
|
||||
FT_ATOMIC_STORE_UINT8_RELAXED(this_instr->op.code, desired);
|
||||
|
|
@ -3006,25 +2992,21 @@ dummy_func(
|
|||
tier1 op(_JIT, (--)) {
|
||||
#ifdef _Py_TIER2
|
||||
_Py_BackoffCounter counter = this_instr[1].counter;
|
||||
if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD_JIT) {
|
||||
_Py_CODEUNIT *start = this_instr;
|
||||
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
|
||||
if (!IS_JIT_TRACING() && backoff_counter_triggers(counter) &&
|
||||
this_instr->op.code == JUMP_BACKWARD_JIT &&
|
||||
next_instr->op.code != ENTER_EXECUTOR) {
|
||||
/* Back up over EXTENDED_ARGs so executor is inserted at the correct place */
|
||||
_Py_CODEUNIT *insert_exec_at = this_instr;
|
||||
while (oparg > 255) {
|
||||
oparg >>= 8;
|
||||
start--;
|
||||
insert_exec_at--;
|
||||
}
|
||||
_PyExecutorObject *executor;
|
||||
int optimized = _PyOptimizer_Optimize(frame, start, &executor, 0);
|
||||
if (optimized <= 0) {
|
||||
this_instr[1].counter = restart_backoff_counter(counter);
|
||||
ERROR_IF(optimized < 0);
|
||||
int succ = _PyJit_TryInitializeTracing(tstate, frame, this_instr, insert_exec_at, next_instr, STACK_LEVEL(), 0, NULL, oparg);
|
||||
if (succ) {
|
||||
ENTER_TRACING();
|
||||
}
|
||||
else {
|
||||
this_instr[1].counter = initial_jump_backoff_counter();
|
||||
assert(tstate->current_executor == NULL);
|
||||
assert(executor != tstate->interp->cold_executor);
|
||||
tstate->jit_exit = NULL;
|
||||
TIER1_TO_TIER2(executor);
|
||||
this_instr[1].counter = restart_backoff_counter(counter);
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
|
@ -3070,6 +3052,10 @@ dummy_func(
|
|||
|
||||
tier1 inst(ENTER_EXECUTOR, (--)) {
|
||||
#ifdef _Py_TIER2
|
||||
if (IS_JIT_TRACING()) {
|
||||
next_instr = this_instr;
|
||||
goto stop_tracing;
|
||||
}
|
||||
PyCodeObject *code = _PyFrame_GetCode(frame);
|
||||
_PyExecutorObject *executor = code->co_executors->executors[oparg & 255];
|
||||
assert(executor->vm_data.index == INSTR_OFFSET() - 1);
|
||||
|
|
@ -3131,7 +3117,7 @@ dummy_func(
|
|||
|
||||
macro(POP_JUMP_IF_NOT_NONE) = unused/1 + _IS_NONE + _POP_JUMP_IF_FALSE;
|
||||
|
||||
tier1 inst(JUMP_BACKWARD_NO_INTERRUPT, (--)) {
|
||||
replaced inst(JUMP_BACKWARD_NO_INTERRUPT, (--)) {
|
||||
/* This bytecode is used in the `yield from` or `await` loop.
|
||||
* If there is an interrupt, we want it handled in the innermost
|
||||
* generator or coroutine, so we deliberately do not check it here.
|
||||
|
|
@ -3742,7 +3728,7 @@ dummy_func(
|
|||
#if ENABLE_SPECIALIZATION_FT
|
||||
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
|
||||
next_instr = this_instr;
|
||||
_Py_Specialize_Call(callable, next_instr, oparg + !PyStackRef_IsNull(self_or_null));
|
||||
_Py_Specialize_Call(callable, self_or_null, next_instr, oparg + !PyStackRef_IsNull(self_or_null));
|
||||
DISPATCH_SAME_OPARG();
|
||||
}
|
||||
OPCODE_DEFERRED_INC(CALL);
|
||||
|
|
@ -3944,27 +3930,20 @@ dummy_func(
|
|||
#if TIER_ONE
|
||||
assert(opcode != INSTRUMENTED_CALL);
|
||||
#endif
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
|
||||
int total_args = oparg;
|
||||
_PyStackRef *arguments = args;
|
||||
if (!PyStackRef_IsNull(self_or_null)) {
|
||||
arguments--;
|
||||
total_args++;
|
||||
}
|
||||
/* Callable is not a normal Python function */
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyObject *res_o = PyObject_Vectorcall(
|
||||
callable_o, args_o,
|
||||
total_args | PY_VECTORCALL_ARGUMENTS_OFFSET,
|
||||
NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
|
||||
DECREF_INPUTS();
|
||||
PyObject *res_o = _Py_VectorCall_StackRefSteal(
|
||||
callable,
|
||||
arguments,
|
||||
total_args,
|
||||
PyStackRef_NULL);
|
||||
DEAD(args);
|
||||
DEAD(self_or_null);
|
||||
DEAD(callable);
|
||||
ERROR_IF(res_o == NULL);
|
||||
res = PyStackRef_FromPyObjectSteal(res_o);
|
||||
}
|
||||
|
|
@ -4239,14 +4218,13 @@ dummy_func(
|
|||
}
|
||||
DEOPT_IF(tp->tp_vectorcall == NULL);
|
||||
STAT_INC(CALL, hit);
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyObject *res_o = tp->tp_vectorcall((PyObject *)tp, args_o, total_args, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
DECREF_INPUTS();
|
||||
PyObject *res_o = _Py_CallBuiltinClass_StackRefSteal(
|
||||
callable,
|
||||
arguments,
|
||||
total_args);
|
||||
DEAD(args);
|
||||
DEAD(self_or_null);
|
||||
DEAD(callable);
|
||||
ERROR_IF(res_o == NULL);
|
||||
res = PyStackRef_FromPyObjectSteal(res_o);
|
||||
}
|
||||
|
|
@ -4294,31 +4272,24 @@ dummy_func(
|
|||
|
||||
op(_CALL_BUILTIN_FAST, (callable, self_or_null, args[oparg] -- res)) {
|
||||
/* Builtin METH_FASTCALL functions, without keywords */
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
|
||||
int total_args = oparg;
|
||||
_PyStackRef *arguments = args;
|
||||
if (!PyStackRef_IsNull(self_or_null)) {
|
||||
arguments--;
|
||||
total_args++;
|
||||
}
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
DEOPT_IF(!PyCFunction_CheckExact(callable_o));
|
||||
DEOPT_IF(PyCFunction_GET_FLAGS(callable_o) != METH_FASTCALL);
|
||||
STAT_INC(CALL, hit);
|
||||
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable_o);
|
||||
/* res = func(self, args, nargs) */
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyObject *res_o = _PyCFunctionFast_CAST(cfunc)(
|
||||
PyCFunction_GET_SELF(callable_o),
|
||||
args_o,
|
||||
total_args);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
|
||||
DECREF_INPUTS();
|
||||
PyObject *res_o = _Py_BuiltinCallFast_StackRefSteal(
|
||||
callable,
|
||||
arguments,
|
||||
total_args
|
||||
);
|
||||
DEAD(args);
|
||||
DEAD(self_or_null);
|
||||
DEAD(callable);
|
||||
ERROR_IF(res_o == NULL);
|
||||
res = PyStackRef_FromPyObjectSteal(res_o);
|
||||
}
|
||||
|
|
@ -4331,30 +4302,20 @@ dummy_func(
|
|||
|
||||
op(_CALL_BUILTIN_FAST_WITH_KEYWORDS, (callable, self_or_null, args[oparg] -- res)) {
|
||||
/* Builtin METH_FASTCALL | METH_KEYWORDS functions */
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
|
||||
int total_args = oparg;
|
||||
_PyStackRef *arguments = args;
|
||||
if (!PyStackRef_IsNull(self_or_null)) {
|
||||
arguments--;
|
||||
total_args++;
|
||||
}
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
DEOPT_IF(!PyCFunction_CheckExact(callable_o));
|
||||
DEOPT_IF(PyCFunction_GET_FLAGS(callable_o) != (METH_FASTCALL | METH_KEYWORDS));
|
||||
STAT_INC(CALL, hit);
|
||||
/* res = func(self, arguments, nargs, kwnames) */
|
||||
PyCFunctionFastWithKeywords cfunc =
|
||||
_PyCFunctionFastWithKeywords_CAST(PyCFunction_GET_FUNCTION(callable_o));
|
||||
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyObject *res_o = cfunc(PyCFunction_GET_SELF(callable_o), args_o, total_args, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
|
||||
DECREF_INPUTS();
|
||||
PyObject *res_o = _Py_BuiltinCallFastWithKeywords_StackRefSteal(callable, arguments, total_args);
|
||||
DEAD(args);
|
||||
DEAD(self_or_null);
|
||||
DEAD(callable);
|
||||
ERROR_IF(res_o == NULL);
|
||||
res = PyStackRef_FromPyObjectSteal(res_o);
|
||||
}
|
||||
|
|
@ -4448,7 +4409,6 @@ dummy_func(
|
|||
assert(oparg == 1);
|
||||
PyObject *self_o = PyStackRef_AsPyObjectBorrow(self);
|
||||
|
||||
DEOPT_IF(!PyList_CheckExact(self_o));
|
||||
DEOPT_IF(!LOCK_OBJECT(self_o));
|
||||
STAT_INC(CALL, hit);
|
||||
int err = _PyList_AppendTakeRef((PyListObject *)self_o, PyStackRef_AsPyObjectSteal(arg));
|
||||
|
|
@ -4522,19 +4482,16 @@ dummy_func(
|
|||
assert(self != NULL);
|
||||
EXIT_IF(!Py_IS_TYPE(self, d_type));
|
||||
STAT_INC(CALL, hit);
|
||||
int nargs = total_args - 1;
|
||||
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyCFunctionFastWithKeywords cfunc =
|
||||
_PyCFunctionFastWithKeywords_CAST(meth->ml_meth);
|
||||
PyObject *res_o = cfunc(self, (args_o + 1), nargs, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
|
||||
DECREF_INPUTS();
|
||||
PyObject *res_o = _PyCallMethodDescriptorFastWithKeywords_StackRefSteal(
|
||||
callable,
|
||||
meth,
|
||||
self,
|
||||
arguments,
|
||||
total_args
|
||||
);
|
||||
DEAD(args);
|
||||
DEAD(self_or_null);
|
||||
DEAD(callable);
|
||||
ERROR_IF(res_o == NULL);
|
||||
res = PyStackRef_FromPyObjectSteal(res_o);
|
||||
}
|
||||
|
|
@ -4602,18 +4559,16 @@ dummy_func(
|
|||
assert(self != NULL);
|
||||
EXIT_IF(!Py_IS_TYPE(self, method->d_common.d_type));
|
||||
STAT_INC(CALL, hit);
|
||||
int nargs = total_args - 1;
|
||||
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyCFunctionFast cfunc = _PyCFunctionFast_CAST(meth->ml_meth);
|
||||
PyObject *res_o = cfunc(self, (args_o + 1), nargs);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
|
||||
DECREF_INPUTS();
|
||||
PyObject *res_o = _PyCallMethodDescriptorFast_StackRefSteal(
|
||||
callable,
|
||||
meth,
|
||||
self,
|
||||
arguments,
|
||||
total_args
|
||||
);
|
||||
DEAD(args);
|
||||
DEAD(self_or_null);
|
||||
DEAD(callable);
|
||||
ERROR_IF(res_o == NULL);
|
||||
res = PyStackRef_FromPyObjectSteal(res_o);
|
||||
}
|
||||
|
|
@ -4846,30 +4801,21 @@ dummy_func(
|
|||
#if TIER_ONE
|
||||
assert(opcode != INSTRUMENTED_CALL);
|
||||
#endif
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
|
||||
int total_args = oparg;
|
||||
_PyStackRef *arguments = args;
|
||||
if (!PyStackRef_IsNull(self_or_null)) {
|
||||
arguments--;
|
||||
total_args++;
|
||||
}
|
||||
/* Callable is not a normal Python function */
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
DECREF_INPUTS();
|
||||
ERROR_IF(true);
|
||||
}
|
||||
PyObject *kwnames_o = PyStackRef_AsPyObjectBorrow(kwnames);
|
||||
int positional_args = total_args - (int)PyTuple_GET_SIZE(kwnames_o);
|
||||
PyObject *res_o = PyObject_Vectorcall(
|
||||
callable_o, args_o,
|
||||
positional_args | PY_VECTORCALL_ARGUMENTS_OFFSET,
|
||||
kwnames_o);
|
||||
PyStackRef_CLOSE(kwnames);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
|
||||
DECREF_INPUTS();
|
||||
PyObject *res_o = _Py_VectorCall_StackRefSteal(
|
||||
callable,
|
||||
arguments,
|
||||
total_args,
|
||||
kwnames);
|
||||
DEAD(kwnames);
|
||||
DEAD(args);
|
||||
DEAD(self_or_null);
|
||||
DEAD(callable);
|
||||
ERROR_IF(res_o == NULL);
|
||||
res = PyStackRef_FromPyObjectSteal(res_o);
|
||||
}
|
||||
|
|
@ -5298,19 +5244,40 @@ dummy_func(
|
|||
tier2 op(_EXIT_TRACE, (exit_p/4 --)) {
|
||||
_PyExitData *exit = (_PyExitData *)exit_p;
|
||||
#if defined(Py_DEBUG) && !defined(_Py_JIT)
|
||||
_Py_CODEUNIT *target = _PyFrame_GetBytecode(frame) + exit->target;
|
||||
const _Py_CODEUNIT *target = ((frame->owner == FRAME_OWNED_BY_INTERPRETER)
|
||||
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame))
|
||||
+ exit->target;
|
||||
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
|
||||
if (frame->lltrace >= 2) {
|
||||
if (frame->lltrace >= 3) {
|
||||
printf("SIDE EXIT: [UOp ");
|
||||
_PyUOpPrint(&next_uop[-1]);
|
||||
printf(", exit %tu, temp %d, target %d -> %s, is_control_flow %d]\n",
|
||||
exit - current_executor->exits, exit->temperature.value_and_backoff,
|
||||
(int)(target - _PyFrame_GetBytecode(frame)),
|
||||
_PyOpcode_OpName[target->op.code], exit->is_control_flow);
|
||||
}
|
||||
#endif
|
||||
tstate->jit_exit = exit;
|
||||
TIER2_TO_TIER2(exit->executor);
|
||||
}
|
||||
|
||||
tier2 op(_DYNAMIC_EXIT, (exit_p/4 --)) {
|
||||
#if defined(Py_DEBUG) && !defined(_Py_JIT)
|
||||
_PyExitData *exit = (_PyExitData *)exit_p;
|
||||
_Py_CODEUNIT *target = frame->instr_ptr;
|
||||
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
|
||||
if (frame->lltrace >= 3) {
|
||||
printf("DYNAMIC EXIT: [UOp ");
|
||||
_PyUOpPrint(&next_uop[-1]);
|
||||
printf(", exit %tu, temp %d, target %d -> %s]\n",
|
||||
exit - current_executor->exits, exit->temperature.value_and_backoff,
|
||||
(int)(target - _PyFrame_GetBytecode(frame)),
|
||||
_PyOpcode_OpName[target->op.code]);
|
||||
}
|
||||
#endif
|
||||
tstate->jit_exit = exit;
|
||||
TIER2_TO_TIER2(exit->executor);
|
||||
#endif
|
||||
// Disabled for now (gh-139109) as it slows down dynamic code tremendously.
|
||||
// Compile and jump to the cold dynamic executors in the future.
|
||||
GOTO_TIER_ONE(frame->instr_ptr);
|
||||
}
|
||||
|
||||
tier2 op(_CHECK_VALIDITY, (--)) {
|
||||
|
|
@ -5422,7 +5389,8 @@ dummy_func(
|
|||
}
|
||||
|
||||
tier2 op(_DEOPT, (--)) {
|
||||
GOTO_TIER_ONE(_PyFrame_GetBytecode(frame) + CURRENT_TARGET());
|
||||
GOTO_TIER_ONE((frame->owner == FRAME_OWNED_BY_INTERPRETER)
|
||||
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
|
||||
}
|
||||
|
||||
tier2 op(_HANDLE_PENDING_AND_DEOPT, (--)) {
|
||||
|
|
@ -5452,32 +5420,76 @@ dummy_func(
|
|||
tier2 op(_COLD_EXIT, ( -- )) {
|
||||
_PyExitData *exit = tstate->jit_exit;
|
||||
assert(exit != NULL);
|
||||
assert(frame->owner < FRAME_OWNED_BY_INTERPRETER);
|
||||
_Py_CODEUNIT *target = _PyFrame_GetBytecode(frame) + exit->target;
|
||||
_Py_BackoffCounter temperature = exit->temperature;
|
||||
if (!backoff_counter_triggers(temperature)) {
|
||||
exit->temperature = advance_backoff_counter(temperature);
|
||||
GOTO_TIER_ONE(target);
|
||||
}
|
||||
_PyExecutorObject *executor;
|
||||
if (target->op.code == ENTER_EXECUTOR) {
|
||||
PyCodeObject *code = _PyFrame_GetCode(frame);
|
||||
executor = code->co_executors->executors[target->op.arg];
|
||||
Py_INCREF(executor);
|
||||
assert(tstate->jit_exit == exit);
|
||||
exit->executor = executor;
|
||||
TIER2_TO_TIER2(exit->executor);
|
||||
}
|
||||
else {
|
||||
if (!backoff_counter_triggers(temperature)) {
|
||||
exit->temperature = advance_backoff_counter(temperature);
|
||||
GOTO_TIER_ONE(target);
|
||||
}
|
||||
_PyExecutorObject *previous_executor = _PyExecutor_FromExit(exit);
|
||||
assert(tstate->current_executor == (PyObject *)previous_executor);
|
||||
int chain_depth = previous_executor->vm_data.chain_depth + 1;
|
||||
int optimized = _PyOptimizer_Optimize(frame, target, &executor, chain_depth);
|
||||
if (optimized <= 0) {
|
||||
exit->temperature = restart_backoff_counter(temperature);
|
||||
GOTO_TIER_ONE(optimized < 0 ? NULL : target);
|
||||
// For control-flow guards, we don't want to increase the chain depth, as those don't actually
|
||||
// represent deopts but rather just normal programs!
|
||||
int chain_depth = previous_executor->vm_data.chain_depth + !exit->is_control_flow;
|
||||
// Note: it's safe to use target->op.arg here instead of the oparg given by EXTENDED_ARG.
|
||||
// The invariant in the optimizer is the deopt target always points back to the first EXTENDED_ARG.
|
||||
// So setting it to anything else is wrong.
|
||||
int succ = _PyJit_TryInitializeTracing(tstate, frame, target, target, target, STACK_LEVEL(), chain_depth, exit, target->op.arg);
|
||||
exit->temperature = restart_backoff_counter(exit->temperature);
|
||||
if (succ) {
|
||||
GOTO_TIER_ONE_CONTINUE_TRACING(target);
|
||||
}
|
||||
exit->temperature = initial_temperature_backoff_counter();
|
||||
GOTO_TIER_ONE(target);
|
||||
}
|
||||
}
|
||||
|
||||
tier2 op(_COLD_DYNAMIC_EXIT, ( -- )) {
|
||||
// TODO (gh-139109): This should be similar to _COLD_EXIT in the future.
|
||||
_Py_CODEUNIT *target = frame->instr_ptr;
|
||||
GOTO_TIER_ONE(target);
|
||||
}
|
||||
|
||||
tier2 op(_GUARD_IP__PUSH_FRAME, (ip/4 --)) {
|
||||
_Py_CODEUNIT *target = frame->instr_ptr + IP_OFFSET_OF(_PUSH_FRAME);
|
||||
if (target != (_Py_CODEUNIT *)ip) {
|
||||
frame->instr_ptr += IP_OFFSET_OF(_PUSH_FRAME);
|
||||
EXIT_IF(true);
|
||||
}
|
||||
}
|
||||
|
||||
tier2 op(_GUARD_IP_YIELD_VALUE, (ip/4 --)) {
|
||||
_Py_CODEUNIT *target = frame->instr_ptr + IP_OFFSET_OF(YIELD_VALUE);
|
||||
if (target != (_Py_CODEUNIT *)ip) {
|
||||
frame->instr_ptr += IP_OFFSET_OF(YIELD_VALUE);
|
||||
EXIT_IF(true);
|
||||
}
|
||||
}
|
||||
|
||||
tier2 op(_GUARD_IP_RETURN_VALUE, (ip/4 --)) {
|
||||
_Py_CODEUNIT *target = frame->instr_ptr + IP_OFFSET_OF(RETURN_VALUE);
|
||||
if (target != (_Py_CODEUNIT *)ip) {
|
||||
frame->instr_ptr += IP_OFFSET_OF(RETURN_VALUE);
|
||||
EXIT_IF(true);
|
||||
}
|
||||
}
|
||||
|
||||
tier2 op(_GUARD_IP_RETURN_GENERATOR, (ip/4 --)) {
|
||||
_Py_CODEUNIT *target = frame->instr_ptr + IP_OFFSET_OF(RETURN_GENERATOR);
|
||||
if (target != (_Py_CODEUNIT *)ip) {
|
||||
frame->instr_ptr += IP_OFFSET_OF(RETURN_GENERATOR);
|
||||
EXIT_IF(true);
|
||||
}
|
||||
assert(tstate->jit_exit == exit);
|
||||
exit->executor = executor;
|
||||
TIER2_TO_TIER2(exit->executor);
|
||||
}
|
||||
|
||||
label(pop_2_error) {
|
||||
|
|
@ -5624,6 +5636,65 @@ dummy_func(
|
|||
DISPATCH();
|
||||
}
|
||||
|
||||
inst(TRACE_RECORD, (--)) {
|
||||
#if _Py_TIER2
|
||||
assert(IS_JIT_TRACING());
|
||||
next_instr = this_instr;
|
||||
frame->instr_ptr = prev_instr;
|
||||
opcode = next_instr->op.code;
|
||||
bool stop_tracing = (opcode == WITH_EXCEPT_START ||
|
||||
opcode == RERAISE || opcode == CLEANUP_THROW ||
|
||||
opcode == PUSH_EXC_INFO || opcode == INTERPRETER_EXIT);
|
||||
int full = !_PyJit_translate_single_bytecode_to_trace(tstate, frame, next_instr, stop_tracing ? _DEOPT : 0);
|
||||
if (full) {
|
||||
LEAVE_TRACING();
|
||||
int err = stop_tracing_and_jit(tstate, frame);
|
||||
ERROR_IF(err < 0);
|
||||
DISPATCH();
|
||||
}
|
||||
// Super instructions. Instruction deopted. There's a mismatch in what the stack expects
|
||||
// in the optimizer. So we have to reflect in the trace correctly.
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
if ((_tstate->jit_tracer_state.prev_state.instr->op.code == CALL_LIST_APPEND &&
|
||||
opcode == POP_TOP) ||
|
||||
(_tstate->jit_tracer_state.prev_state.instr->op.code == BINARY_OP_INPLACE_ADD_UNICODE &&
|
||||
opcode == STORE_FAST)) {
|
||||
_tstate->jit_tracer_state.prev_state.instr_is_super = true;
|
||||
}
|
||||
else {
|
||||
_tstate->jit_tracer_state.prev_state.instr = next_instr;
|
||||
}
|
||||
PyObject *prev_code = PyStackRef_AsPyObjectBorrow(frame->f_executable);
|
||||
if (_tstate->jit_tracer_state.prev_state.instr_code != (PyCodeObject *)prev_code) {
|
||||
Py_SETREF(_tstate->jit_tracer_state.prev_state.instr_code, (PyCodeObject*)Py_NewRef((prev_code)));
|
||||
}
|
||||
|
||||
_tstate->jit_tracer_state.prev_state.instr_frame = frame;
|
||||
_tstate->jit_tracer_state.prev_state.instr_oparg = oparg;
|
||||
_tstate->jit_tracer_state.prev_state.instr_stacklevel = PyStackRef_IsNone(frame->f_executable) ? 2 : STACK_LEVEL();
|
||||
if (_PyOpcode_Caches[_PyOpcode_Deopt[opcode]]) {
|
||||
(&next_instr[1])->counter = trigger_backoff_counter();
|
||||
}
|
||||
DISPATCH_GOTO_NON_TRACING();
|
||||
#else
|
||||
(void)prev_instr;
|
||||
Py_FatalError("JIT instruction executed in non-jit build.");
|
||||
#endif
|
||||
}
|
||||
|
||||
label(stop_tracing) {
|
||||
#if _Py_TIER2
|
||||
assert(IS_JIT_TRACING());
|
||||
int opcode = next_instr->op.code;
|
||||
_PyJit_translate_single_bytecode_to_trace(tstate, frame, NULL, _EXIT_TRACE);
|
||||
LEAVE_TRACING();
|
||||
int err = stop_tracing_and_jit(tstate, frame);
|
||||
ERROR_IF(err < 0);
|
||||
DISPATCH_GOTO_NON_TRACING();
|
||||
#else
|
||||
Py_FatalError("JIT label executed in non-jit build.");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// END BYTECODES //
|
||||
|
|
|
|||
493
Python/ceval.c
493
Python/ceval.c
|
|
@ -352,13 +352,23 @@ _Py_ReachedRecursionLimitWithMargin(PyThreadState *tstate, int margin_count)
|
|||
{
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
if (here_addr > _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES) {
|
||||
#else
|
||||
if (here_addr <= _tstate->c_stack_soft_limit - margin_count * _PyOS_STACK_MARGIN_BYTES) {
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
if (_tstate->c_stack_hard_limit == 0) {
|
||||
_Py_InitializeRecursionLimits(tstate);
|
||||
}
|
||||
return here_addr <= _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES;
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
return here_addr <= _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES &&
|
||||
here_addr >= _tstate->c_stack_soft_limit - 2 * _PyOS_STACK_MARGIN_BYTES;
|
||||
#else
|
||||
return here_addr > _tstate->c_stack_soft_limit - margin_count * _PyOS_STACK_MARGIN_BYTES &&
|
||||
here_addr <= _tstate->c_stack_soft_limit + 2 * _PyOS_STACK_MARGIN_BYTES;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -366,7 +376,11 @@ _Py_EnterRecursiveCallUnchecked(PyThreadState *tstate)
|
|||
{
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
if (here_addr < _tstate->c_stack_hard_limit) {
|
||||
#else
|
||||
if (here_addr > _tstate->c_stack_hard_limit) {
|
||||
#endif
|
||||
Py_FatalError("Unchecked stack overflow.");
|
||||
}
|
||||
}
|
||||
|
|
@ -444,7 +458,7 @@ int pthread_attr_destroy(pthread_attr_t *a)
|
|||
#endif
|
||||
|
||||
static void
|
||||
hardware_stack_limits(uintptr_t *top, uintptr_t *base)
|
||||
hardware_stack_limits(uintptr_t *base, uintptr_t *top, uintptr_t sp)
|
||||
{
|
||||
#ifdef WIN32
|
||||
ULONG_PTR low, high;
|
||||
|
|
@ -480,32 +494,113 @@ hardware_stack_limits(uintptr_t *top, uintptr_t *base)
|
|||
return;
|
||||
}
|
||||
# endif
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
uintptr_t top_addr = _Py_SIZE_ROUND_UP(here_addr, 4096);
|
||||
// Add some space for caller function then round to minimum page size
|
||||
// This is a guess at the top of the stack, but should be a reasonably
|
||||
// good guess if called from _PyThreadState_Attach when creating a thread.
|
||||
// If the thread is attached deep in a call stack, then the guess will be poor.
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
uintptr_t top_addr = _Py_SIZE_ROUND_UP(sp + 8*sizeof(void*), SYSTEM_PAGE_SIZE);
|
||||
*top = top_addr;
|
||||
*base = top_addr - Py_C_STACK_SIZE;
|
||||
# else
|
||||
uintptr_t base_addr = _Py_SIZE_ROUND_DOWN(sp - 8*sizeof(void*), SYSTEM_PAGE_SIZE);
|
||||
*base = base_addr;
|
||||
*top = base_addr + Py_C_STACK_SIZE;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
tstate_set_stack(PyThreadState *tstate,
|
||||
uintptr_t base, uintptr_t top)
|
||||
{
|
||||
assert(base < top);
|
||||
assert((top - base) >= _PyOS_MIN_STACK_SIZE);
|
||||
|
||||
#ifdef _Py_THREAD_SANITIZER
|
||||
// Thread sanitizer crashes if we use more than half the stack.
|
||||
uintptr_t stacksize = top - base;
|
||||
# if _Py_STACK_GROWS_DOWN
|
||||
base += stacksize/2;
|
||||
# else
|
||||
top -= stacksize/2;
|
||||
# endif
|
||||
#endif
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
_tstate->c_stack_top = top;
|
||||
_tstate->c_stack_hard_limit = base + _PyOS_STACK_MARGIN_BYTES;
|
||||
_tstate->c_stack_soft_limit = base + _PyOS_STACK_MARGIN_BYTES * 2;
|
||||
# ifndef NDEBUG
|
||||
// Sanity checks
|
||||
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
|
||||
assert(ts->c_stack_hard_limit <= ts->c_stack_soft_limit);
|
||||
assert(ts->c_stack_soft_limit < ts->c_stack_top);
|
||||
# endif
|
||||
#else
|
||||
_tstate->c_stack_top = base;
|
||||
_tstate->c_stack_hard_limit = top - _PyOS_STACK_MARGIN_BYTES;
|
||||
_tstate->c_stack_soft_limit = top - _PyOS_STACK_MARGIN_BYTES * 2;
|
||||
# ifndef NDEBUG
|
||||
// Sanity checks
|
||||
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
|
||||
assert(ts->c_stack_hard_limit >= ts->c_stack_soft_limit);
|
||||
assert(ts->c_stack_soft_limit > ts->c_stack_top);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_Py_InitializeRecursionLimits(PyThreadState *tstate)
|
||||
{
|
||||
uintptr_t top;
|
||||
uintptr_t base;
|
||||
hardware_stack_limits(&top, &base);
|
||||
#ifdef _Py_THREAD_SANITIZER
|
||||
// Thread sanitizer crashes if we use more than half the stack.
|
||||
uintptr_t stacksize = top - base;
|
||||
base += stacksize/2;
|
||||
#endif
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
_tstate->c_stack_top = top;
|
||||
_tstate->c_stack_hard_limit = base + _PyOS_STACK_MARGIN_BYTES;
|
||||
_tstate->c_stack_soft_limit = base + _PyOS_STACK_MARGIN_BYTES * 2;
|
||||
uintptr_t base, top;
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
hardware_stack_limits(&base, &top, here_addr);
|
||||
assert(top != 0);
|
||||
|
||||
tstate_set_stack(tstate, base, top);
|
||||
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
|
||||
ts->c_stack_init_base = base;
|
||||
ts->c_stack_init_top = top;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
PyUnstable_ThreadState_SetStackProtection(PyThreadState *tstate,
|
||||
void *stack_start_addr, size_t stack_size)
|
||||
{
|
||||
if (stack_size < _PyOS_MIN_STACK_SIZE) {
|
||||
PyErr_Format(PyExc_ValueError,
|
||||
"stack_size must be at least %zu bytes",
|
||||
_PyOS_MIN_STACK_SIZE);
|
||||
return -1;
|
||||
}
|
||||
|
||||
uintptr_t base = (uintptr_t)stack_start_addr;
|
||||
uintptr_t top = base + stack_size;
|
||||
tstate_set_stack(tstate, base, top);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PyUnstable_ThreadState_ResetStackProtection(PyThreadState *tstate)
|
||||
{
|
||||
_PyThreadStateImpl *ts = (_PyThreadStateImpl *)tstate;
|
||||
if (ts->c_stack_init_top != 0) {
|
||||
tstate_set_stack(tstate,
|
||||
ts->c_stack_init_base,
|
||||
ts->c_stack_init_top);
|
||||
return;
|
||||
}
|
||||
|
||||
_Py_InitializeRecursionLimits(tstate);
|
||||
}
|
||||
|
||||
|
||||
/* The function _Py_EnterRecursiveCallTstate() only calls _Py_CheckRecursiveCall()
|
||||
if the recursion_depth reaches recursion_limit. */
|
||||
if the stack pointer is between the stack base and c_stack_hard_limit. */
|
||||
int
|
||||
_Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
|
||||
{
|
||||
|
|
@ -513,9 +608,17 @@ _Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
|
|||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
assert(_tstate->c_stack_soft_limit != 0);
|
||||
assert(_tstate->c_stack_hard_limit != 0);
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
assert(here_addr >= _tstate->c_stack_hard_limit - _PyOS_STACK_MARGIN_BYTES);
|
||||
if (here_addr < _tstate->c_stack_hard_limit) {
|
||||
/* Overflowing while handling an overflow. Give up. */
|
||||
int kbytes_used = (int)(_tstate->c_stack_top - here_addr)/1024;
|
||||
#else
|
||||
assert(here_addr <= _tstate->c_stack_hard_limit + _PyOS_STACK_MARGIN_BYTES);
|
||||
if (here_addr > _tstate->c_stack_hard_limit) {
|
||||
/* Overflowing while handling an overflow. Give up. */
|
||||
int kbytes_used = (int)(here_addr - _tstate->c_stack_top)/1024;
|
||||
#endif
|
||||
char buffer[80];
|
||||
snprintf(buffer, 80, "Unrecoverable stack overflow (used %d kB)%s", kbytes_used, where);
|
||||
Py_FatalError(buffer);
|
||||
|
|
@ -524,7 +627,11 @@ _Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
|
|||
return 0;
|
||||
}
|
||||
else {
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
int kbytes_used = (int)(_tstate->c_stack_top - here_addr)/1024;
|
||||
#else
|
||||
int kbytes_used = (int)(here_addr - _tstate->c_stack_top)/1024;
|
||||
#endif
|
||||
tstate->recursion_headroom++;
|
||||
_PyErr_Format(tstate, PyExc_RecursionError,
|
||||
"Stack overflow (used %d kB)%s",
|
||||
|
|
@ -911,6 +1018,283 @@ PyEval_EvalFrameEx(PyFrameObject *f, int throwflag)
|
|||
|
||||
#include "ceval_macros.h"
|
||||
|
||||
|
||||
/* Helper functions to keep the size of the largest uops down */
|
||||
|
||||
PyObject *
|
||||
_Py_VectorCall_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
_PyStackRef *arguments,
|
||||
int total_args,
|
||||
_PyStackRef kwnames)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
PyObject *kwnames_o = PyStackRef_AsPyObjectBorrow(kwnames);
|
||||
int positional_args = total_args;
|
||||
if (kwnames_o != NULL) {
|
||||
positional_args -= (int)PyTuple_GET_SIZE(kwnames_o);
|
||||
}
|
||||
res = PyObject_Vectorcall(
|
||||
callable_o, args_o,
|
||||
positional_args | PY_VECTORCALL_ARGUMENTS_OFFSET,
|
||||
kwnames_o);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
PyStackRef_XCLOSE(kwnames);
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_BuiltinCallFast_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable_o);
|
||||
res = _PyCFunctionFast_CAST(cfunc)(
|
||||
PyCFunction_GET_SELF(callable_o),
|
||||
args_o,
|
||||
total_args
|
||||
);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_BuiltinCallFastWithKeywords_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
|
||||
PyCFunctionFastWithKeywords cfunc =
|
||||
_PyCFunctionFastWithKeywords_CAST(PyCFunction_GET_FUNCTION(callable_o));
|
||||
res = cfunc(PyCFunction_GET_SELF(callable_o), args_o, total_args, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyCallMethodDescriptorFast_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
PyMethodDef *meth,
|
||||
PyObject *self,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
assert(((PyMethodDescrObject *)PyStackRef_AsPyObjectBorrow(callable))->d_method == meth);
|
||||
assert(self == PyStackRef_AsPyObjectBorrow(arguments[0]));
|
||||
|
||||
PyCFunctionFast cfunc = _PyCFunctionFast_CAST(meth->ml_meth);
|
||||
res = cfunc(self, (args_o + 1), total_args - 1);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyCallMethodDescriptorFastWithKeywords_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
PyMethodDef *meth,
|
||||
PyObject *self,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
assert(((PyMethodDescrObject *)PyStackRef_AsPyObjectBorrow(callable))->d_method == meth);
|
||||
assert(self == PyStackRef_AsPyObjectBorrow(arguments[0]));
|
||||
|
||||
PyCFunctionFastWithKeywords cfunc =
|
||||
_PyCFunctionFastWithKeywords_CAST(meth->ml_meth);
|
||||
res = cfunc(self, (args_o + 1), total_args-1, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_CallBuiltinClass_StackRefSteal(
|
||||
_PyStackRef callable,
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
PyTypeObject *tp = (PyTypeObject *)PyStackRef_AsPyObjectBorrow(callable);
|
||||
res = tp->tp_vectorcall((PyObject *)tp, args_o, total_args, NULL);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
PyStackRef_CLOSE(callable);
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_BuildString_StackRefSteal(
|
||||
_PyStackRef *arguments,
|
||||
int total_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
res = _PyUnicode_JoinArray(&_Py_STR(empty), args_o, total_args);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = total_args-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_BuildMap_StackRefSteal(
|
||||
_PyStackRef *arguments,
|
||||
int half_args)
|
||||
{
|
||||
PyObject *res;
|
||||
STACKREFS_TO_PYOBJECTS(arguments, half_args*2, args_o);
|
||||
if (CONVERSION_FAILED(args_o)) {
|
||||
res = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
res = _PyDict_FromItems(
|
||||
args_o, 2,
|
||||
args_o+1, 2,
|
||||
half_args
|
||||
);
|
||||
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
|
||||
assert((res != NULL) ^ (PyErr_Occurred() != NULL));
|
||||
cleanup:
|
||||
// arguments is a pointer into the GC visible stack,
|
||||
// so we must NULL out values as we clear them.
|
||||
for (int i = half_args*2-1; i >= 0; i--) {
|
||||
_PyStackRef tmp = arguments[i];
|
||||
arguments[i] = PyStackRef_NULL;
|
||||
PyStackRef_CLOSE(tmp);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
void
|
||||
_Py_assert_within_stack_bounds(
|
||||
_PyInterpreterFrame *frame, _PyStackRef *stack_pointer,
|
||||
const char *filename, int lineno
|
||||
) {
|
||||
if (frame->owner == FRAME_OWNED_BY_INTERPRETER) {
|
||||
return;
|
||||
}
|
||||
int level = (int)(stack_pointer - _PyFrame_Stackbase(frame));
|
||||
if (level < 0) {
|
||||
printf("Stack underflow (depth = %d) at %s:%d\n", level, filename, lineno);
|
||||
fflush(stdout);
|
||||
abort();
|
||||
}
|
||||
int size = _PyFrame_GetCode(frame)->co_stacksize;
|
||||
if (level > size) {
|
||||
printf("Stack overflow (depth = %d) at %s:%d\n", level, filename, lineno);
|
||||
fflush(stdout);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int _Py_CheckRecursiveCallPy(
|
||||
PyThreadState *tstate)
|
||||
{
|
||||
|
|
@ -942,6 +1326,8 @@ static const _Py_CODEUNIT _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS[] = {
|
|||
{ .op.code = RESUME, .op.arg = RESUME_OPARG_DEPTH1_MASK | RESUME_AT_FUNC_START }
|
||||
};
|
||||
|
||||
const _Py_CODEUNIT *_Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR = (_Py_CODEUNIT*)&_Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS;
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
extern void _PyUOpPrint(const _PyUOpInstruction *uop);
|
||||
#endif
|
||||
|
|
@ -970,11 +1356,12 @@ _PyObjectArray_FromStackRefArray(_PyStackRef *input, Py_ssize_t nargs, PyObject
|
|||
if (result == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
result++;
|
||||
}
|
||||
else {
|
||||
result = scratch;
|
||||
}
|
||||
result++;
|
||||
result[0] = NULL; /* Keep GCC happy */
|
||||
for (int i = 0; i < nargs; i++) {
|
||||
result[i] = PyStackRef_AsPyObjectBorrow(input[i]);
|
||||
}
|
||||
|
|
@ -989,6 +1376,49 @@ _PyObjectArray_Free(PyObject **array, PyObject **scratch)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
#define ASSERT_WITHIN_STACK_BOUNDS(F, L) _Py_assert_within_stack_bounds(frame, stack_pointer, (F), (L))
|
||||
#else
|
||||
#define ASSERT_WITHIN_STACK_BOUNDS(F, L) (void)0
|
||||
#endif
|
||||
|
||||
#if _Py_TIER2
|
||||
// 0 for success, -1 for error.
|
||||
static int
|
||||
stop_tracing_and_jit(PyThreadState *tstate, _PyInterpreterFrame *frame)
|
||||
{
|
||||
int _is_sys_tracing = (tstate->c_tracefunc != NULL) || (tstate->c_profilefunc != NULL);
|
||||
int err = 0;
|
||||
if (!_PyErr_Occurred(tstate) && !_is_sys_tracing) {
|
||||
err = _PyOptimizer_Optimize(frame, tstate);
|
||||
}
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
// Deal with backoffs
|
||||
_PyExitData *exit = _tstate->jit_tracer_state.initial_state.exit;
|
||||
if (exit == NULL) {
|
||||
// We hold a strong reference to the code object, so the instruction won't be freed.
|
||||
if (err <= 0) {
|
||||
_Py_BackoffCounter counter = _tstate->jit_tracer_state.initial_state.jump_backward_instr[1].counter;
|
||||
_tstate->jit_tracer_state.initial_state.jump_backward_instr[1].counter = restart_backoff_counter(counter);
|
||||
}
|
||||
else {
|
||||
_tstate->jit_tracer_state.initial_state.jump_backward_instr[1].counter = initial_jump_backoff_counter();
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Likewise, we hold a strong reference to the executor containing this exit, so the exit is guaranteed
|
||||
// to be valid to access.
|
||||
if (err <= 0) {
|
||||
exit->temperature = restart_backoff_counter(exit->temperature);
|
||||
}
|
||||
else {
|
||||
exit->temperature = initial_temperature_backoff_counter();
|
||||
}
|
||||
}
|
||||
_PyJit_FinalizeTracing(tstate);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* _PyEval_EvalFrameDefault is too large to optimize for speed with PGO on MSVC.
|
||||
*/
|
||||
|
|
@ -1048,6 +1478,10 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
|
|||
uint8_t opcode; /* Current opcode */
|
||||
int oparg; /* Current opcode argument, if any */
|
||||
assert(tstate->current_frame == NULL || tstate->current_frame->stackpointer != NULL);
|
||||
#if !USE_COMPUTED_GOTOS
|
||||
uint8_t tracing_mode = 0;
|
||||
uint8_t dispatch_code;
|
||||
#endif
|
||||
#endif
|
||||
_PyEntryFrame entry;
|
||||
|
||||
|
|
@ -1118,9 +1552,9 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
|
|||
stack_pointer = _PyFrame_GetStackPointer(frame);
|
||||
#if _Py_TAIL_CALL_INTERP
|
||||
# if Py_STATS
|
||||
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, 0, lastopcode);
|
||||
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_handler_table, 0, lastopcode);
|
||||
# else
|
||||
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_table, 0);
|
||||
return _TAIL_CALL_error(frame, stack_pointer, tstate, next_instr, instruction_funcptr_handler_table, 0);
|
||||
# endif
|
||||
#else
|
||||
goto error;
|
||||
|
|
@ -1129,9 +1563,9 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
|
|||
|
||||
#if _Py_TAIL_CALL_INTERP
|
||||
# if Py_STATS
|
||||
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_table, 0, lastopcode);
|
||||
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_handler_table, 0, lastopcode);
|
||||
# else
|
||||
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_table, 0);
|
||||
return _TAIL_CALL_start_frame(frame, NULL, tstate, NULL, instruction_funcptr_handler_table, 0);
|
||||
# endif
|
||||
#else
|
||||
goto start_frame;
|
||||
|
|
@ -1173,7 +1607,9 @@ _PyTier2Interpreter(
|
|||
tier2_start:
|
||||
|
||||
next_uop = current_executor->trace;
|
||||
assert(next_uop->opcode == _START_EXECUTOR || next_uop->opcode == _COLD_EXIT);
|
||||
assert(next_uop->opcode == _START_EXECUTOR ||
|
||||
next_uop->opcode == _COLD_EXIT ||
|
||||
next_uop->opcode == _COLD_DYNAMIC_EXIT);
|
||||
|
||||
#undef LOAD_IP
|
||||
#define LOAD_IP(UNUSED) (void)0
|
||||
|
|
@ -1197,7 +1633,9 @@ _PyTier2Interpreter(
|
|||
uint64_t trace_uop_execution_counter = 0;
|
||||
#endif
|
||||
|
||||
assert(next_uop->opcode == _START_EXECUTOR || next_uop->opcode == _COLD_EXIT);
|
||||
assert(next_uop->opcode == _START_EXECUTOR ||
|
||||
next_uop->opcode == _COLD_EXIT ||
|
||||
next_uop->opcode == _COLD_DYNAMIC_EXIT);
|
||||
tier2_dispatch:
|
||||
for (;;) {
|
||||
uopcode = next_uop->opcode;
|
||||
|
|
@ -2149,6 +2587,7 @@ do_raise(PyThreadState *tstate, PyObject *exc, PyObject *cause)
|
|||
"calling %R should have returned an instance of "
|
||||
"BaseException, not %R",
|
||||
cause, Py_TYPE(fixed_cause));
|
||||
Py_DECREF(fixed_cause);
|
||||
goto raise_error;
|
||||
}
|
||||
Py_DECREF(cause);
|
||||
|
|
@ -2674,12 +3113,6 @@ _PyEval_GetBuiltin(PyObject *name)
|
|||
return attr;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyEval_GetBuiltinId(_Py_Identifier *name)
|
||||
{
|
||||
return _PyEval_GetBuiltin(_PyUnicode_FromId(name));
|
||||
}
|
||||
|
||||
PyObject *
|
||||
PyEval_GetLocals(void)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -207,6 +207,7 @@ drop_gil_impl(PyThreadState *tstate, struct _gil_runtime_state *gil)
|
|||
_Py_atomic_store_int_relaxed(&gil->locked, 0);
|
||||
if (tstate != NULL) {
|
||||
tstate->holds_gil = 0;
|
||||
tstate->gil_requested = 0;
|
||||
}
|
||||
COND_SIGNAL(gil->cond);
|
||||
MUTEX_UNLOCK(gil->mutex);
|
||||
|
|
@ -320,6 +321,8 @@ take_gil(PyThreadState *tstate)
|
|||
|
||||
MUTEX_LOCK(gil->mutex);
|
||||
|
||||
tstate->gil_requested = 1;
|
||||
|
||||
int drop_requested = 0;
|
||||
while (_Py_atomic_load_int_relaxed(&gil->locked)) {
|
||||
unsigned long saved_switchnum = gil->switch_number;
|
||||
|
|
@ -407,6 +410,7 @@ take_gil(PyThreadState *tstate)
|
|||
}
|
||||
assert(_PyThreadState_CheckConsistency(tstate));
|
||||
|
||||
tstate->gil_requested = 0;
|
||||
tstate->holds_gil = 1;
|
||||
_Py_unset_eval_breaker_bit(tstate, _PY_GIL_DROP_REQUEST_BIT);
|
||||
update_eval_breaker_for_thread(interp, tstate);
|
||||
|
|
|
|||
|
|
@ -62,8 +62,9 @@
|
|||
#ifdef Py_STATS
|
||||
#define INSTRUCTION_STATS(op) \
|
||||
do { \
|
||||
PyStats *s = _PyStats_GET(); \
|
||||
OPCODE_EXE_INC(op); \
|
||||
if (_Py_stats) _Py_stats->opcode_stats[lastopcode].pair_count[op]++; \
|
||||
if (s) s->opcode_stats[lastopcode].pair_count[op]++; \
|
||||
lastopcode = op; \
|
||||
} while (0)
|
||||
#else
|
||||
|
|
@ -92,11 +93,19 @@
|
|||
# define Py_PRESERVE_NONE_CC __attribute__((preserve_none))
|
||||
Py_PRESERVE_NONE_CC typedef PyObject* (*py_tail_call_funcptr)(TAIL_CALL_PARAMS);
|
||||
|
||||
# define DISPATCH_TABLE_VAR instruction_funcptr_table
|
||||
# define DISPATCH_TABLE instruction_funcptr_handler_table
|
||||
# define TRACING_DISPATCH_TABLE instruction_funcptr_tracing_table
|
||||
# define TARGET(op) Py_PRESERVE_NONE_CC PyObject *_TAIL_CALL_##op(TAIL_CALL_PARAMS)
|
||||
|
||||
# define DISPATCH_GOTO() \
|
||||
do { \
|
||||
Py_MUSTTAIL return (((py_tail_call_funcptr *)instruction_funcptr_table)[opcode])(TAIL_CALL_ARGS); \
|
||||
} while (0)
|
||||
# define DISPATCH_GOTO_NON_TRACING() \
|
||||
do { \
|
||||
Py_MUSTTAIL return (((py_tail_call_funcptr *)DISPATCH_TABLE)[opcode])(TAIL_CALL_ARGS); \
|
||||
} while (0)
|
||||
# define JUMP_TO_LABEL(name) \
|
||||
do { \
|
||||
Py_MUSTTAIL return (_TAIL_CALL_##name)(TAIL_CALL_ARGS); \
|
||||
|
|
@ -114,19 +123,36 @@
|
|||
# endif
|
||||
# define LABEL(name) TARGET(name)
|
||||
#elif USE_COMPUTED_GOTOS
|
||||
# define DISPATCH_TABLE_VAR opcode_targets
|
||||
# define DISPATCH_TABLE opcode_targets_table
|
||||
# define TRACING_DISPATCH_TABLE opcode_tracing_targets_table
|
||||
# define TARGET(op) TARGET_##op:
|
||||
# define DISPATCH_GOTO() goto *opcode_targets[opcode]
|
||||
# define DISPATCH_GOTO_NON_TRACING() goto *DISPATCH_TABLE[opcode];
|
||||
# define JUMP_TO_LABEL(name) goto name;
|
||||
# define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
|
||||
# define LABEL(name) name:
|
||||
#else
|
||||
# define TARGET(op) case op: TARGET_##op:
|
||||
# define DISPATCH_GOTO() goto dispatch_opcode
|
||||
# define DISPATCH_GOTO() dispatch_code = opcode | tracing_mode ; goto dispatch_opcode
|
||||
# define DISPATCH_GOTO_NON_TRACING() dispatch_code = opcode; goto dispatch_opcode
|
||||
# define JUMP_TO_LABEL(name) goto name;
|
||||
# define JUMP_TO_PREDICTED(name) goto PREDICTED_##name;
|
||||
# define LABEL(name) name:
|
||||
#endif
|
||||
|
||||
#if (_Py_TAIL_CALL_INTERP || USE_COMPUTED_GOTOS) && _Py_TIER2
|
||||
# define IS_JIT_TRACING() (DISPATCH_TABLE_VAR == TRACING_DISPATCH_TABLE)
|
||||
# define ENTER_TRACING() \
|
||||
DISPATCH_TABLE_VAR = TRACING_DISPATCH_TABLE;
|
||||
# define LEAVE_TRACING() \
|
||||
DISPATCH_TABLE_VAR = DISPATCH_TABLE;
|
||||
#else
|
||||
# define IS_JIT_TRACING() (tracing_mode != 0)
|
||||
# define ENTER_TRACING() tracing_mode = 255
|
||||
# define LEAVE_TRACING() tracing_mode = 0
|
||||
#endif
|
||||
|
||||
/* PRE_DISPATCH_GOTO() does lltrace if enabled. Normally a no-op */
|
||||
#ifdef Py_DEBUG
|
||||
#define PRE_DISPATCH_GOTO() if (frame->lltrace >= 5) { \
|
||||
|
|
@ -163,11 +189,19 @@ do { \
|
|||
DISPATCH_GOTO(); \
|
||||
}
|
||||
|
||||
#define DISPATCH_NON_TRACING() \
|
||||
{ \
|
||||
assert(frame->stackpointer == NULL); \
|
||||
NEXTOPARG(); \
|
||||
PRE_DISPATCH_GOTO(); \
|
||||
DISPATCH_GOTO_NON_TRACING(); \
|
||||
}
|
||||
|
||||
#define DISPATCH_SAME_OPARG() \
|
||||
{ \
|
||||
opcode = next_instr->op.code; \
|
||||
PRE_DISPATCH_GOTO(); \
|
||||
DISPATCH_GOTO(); \
|
||||
DISPATCH_GOTO_NON_TRACING(); \
|
||||
}
|
||||
|
||||
#define DISPATCH_INLINED(NEW_FRAME) \
|
||||
|
|
@ -279,6 +313,7 @@ GETITEM(PyObject *v, Py_ssize_t i) {
|
|||
/* This takes a uint16_t instead of a _Py_BackoffCounter,
|
||||
* because it is used directly on the cache entry in generated code,
|
||||
* which is always an integral type. */
|
||||
// Force re-specialization when tracing a side exit to get good side exits.
|
||||
#define ADAPTIVE_COUNTER_TRIGGERS(COUNTER) \
|
||||
backoff_counter_triggers(forge_backoff_counter((COUNTER)))
|
||||
|
||||
|
|
@ -365,12 +400,19 @@ do { \
|
|||
next_instr = _Py_jit_entry((EXECUTOR), frame, stack_pointer, tstate); \
|
||||
frame = tstate->current_frame; \
|
||||
stack_pointer = _PyFrame_GetStackPointer(frame); \
|
||||
int keep_tracing_bit = (uintptr_t)next_instr & 1; \
|
||||
next_instr = (_Py_CODEUNIT *)(((uintptr_t)next_instr) & (~1)); \
|
||||
if (next_instr == NULL) { \
|
||||
/* gh-140104: The exception handler expects frame->instr_ptr
|
||||
to after this_instr, not this_instr! */ \
|
||||
next_instr = frame->instr_ptr + 1; \
|
||||
JUMP_TO_LABEL(error); \
|
||||
} \
|
||||
if (keep_tracing_bit) { \
|
||||
assert(((_PyThreadStateImpl *)tstate)->jit_tracer_state.prev_state.code_curr_size == 2); \
|
||||
ENTER_TRACING(); \
|
||||
DISPATCH_NON_TRACING(); \
|
||||
} \
|
||||
DISPATCH(); \
|
||||
} while (0)
|
||||
|
||||
|
|
@ -381,13 +423,23 @@ do { \
|
|||
goto tier2_start; \
|
||||
} while (0)
|
||||
|
||||
#define GOTO_TIER_ONE(TARGET) \
|
||||
do \
|
||||
{ \
|
||||
tstate->current_executor = NULL; \
|
||||
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); \
|
||||
_PyFrame_SetStackPointer(frame, stack_pointer); \
|
||||
return TARGET; \
|
||||
#define GOTO_TIER_ONE_SETUP \
|
||||
tstate->current_executor = NULL; \
|
||||
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); \
|
||||
_PyFrame_SetStackPointer(frame, stack_pointer);
|
||||
|
||||
#define GOTO_TIER_ONE(TARGET) \
|
||||
do \
|
||||
{ \
|
||||
GOTO_TIER_ONE_SETUP \
|
||||
return (_Py_CODEUNIT *)(TARGET); \
|
||||
} while (0)
|
||||
|
||||
#define GOTO_TIER_ONE_CONTINUE_TRACING(TARGET) \
|
||||
do \
|
||||
{ \
|
||||
GOTO_TIER_ONE_SETUP \
|
||||
return (_Py_CODEUNIT *)(((uintptr_t)(TARGET))| 1); \
|
||||
} while (0)
|
||||
|
||||
#define CURRENT_OPARG() (next_uop[-1].oparg)
|
||||
|
|
@ -406,7 +458,7 @@ do { \
|
|||
#define STACKREFS_TO_PYOBJECTS(ARGS, ARG_COUNT, NAME) \
|
||||
/* +1 because vectorcall might use -1 to write self */ \
|
||||
PyObject *NAME##_temp[MAX_STACKREF_SCRATCH+1]; \
|
||||
PyObject **NAME = _PyObjectArray_FromStackRefArray(ARGS, ARG_COUNT, NAME##_temp + 1);
|
||||
PyObject **NAME = _PyObjectArray_FromStackRefArray(ARGS, ARG_COUNT, NAME##_temp);
|
||||
|
||||
#define STACKREFS_TO_PYOBJECTS_CLEANUP(NAME) \
|
||||
/* +1 because we +1 previously */ \
|
||||
|
|
|
|||
24
Python/clinic/bltinmodule.c.h
generated
24
Python/clinic/bltinmodule.c.h
generated
|
|
@ -333,7 +333,8 @@ PyDoc_STRVAR(builtin_chr__doc__,
|
|||
|
||||
PyDoc_STRVAR(builtin_compile__doc__,
|
||||
"compile($module, /, source, filename, mode, flags=0,\n"
|
||||
" dont_inherit=False, optimize=-1, *, _feature_version=-1)\n"
|
||||
" dont_inherit=False, optimize=-1, *, module=None,\n"
|
||||
" _feature_version=-1)\n"
|
||||
"--\n"
|
||||
"\n"
|
||||
"Compile source into a code object that can be executed by exec() or eval().\n"
|
||||
|
|
@ -355,7 +356,7 @@ PyDoc_STRVAR(builtin_compile__doc__,
|
|||
static PyObject *
|
||||
builtin_compile_impl(PyObject *module, PyObject *source, PyObject *filename,
|
||||
const char *mode, int flags, int dont_inherit,
|
||||
int optimize, int feature_version);
|
||||
int optimize, PyObject *modname, int feature_version);
|
||||
|
||||
static PyObject *
|
||||
builtin_compile(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
|
||||
|
|
@ -363,7 +364,7 @@ builtin_compile(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObj
|
|||
PyObject *return_value = NULL;
|
||||
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
|
||||
|
||||
#define NUM_KEYWORDS 7
|
||||
#define NUM_KEYWORDS 8
|
||||
static struct {
|
||||
PyGC_Head _this_is_not_used;
|
||||
PyObject_VAR_HEAD
|
||||
|
|
@ -372,7 +373,7 @@ builtin_compile(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObj
|
|||
} _kwtuple = {
|
||||
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
|
||||
.ob_hash = -1,
|
||||
.ob_item = { &_Py_ID(source), &_Py_ID(filename), &_Py_ID(mode), &_Py_ID(flags), &_Py_ID(dont_inherit), &_Py_ID(optimize), &_Py_ID(_feature_version), },
|
||||
.ob_item = { &_Py_ID(source), &_Py_ID(filename), &_Py_ID(mode), &_Py_ID(flags), &_Py_ID(dont_inherit), &_Py_ID(optimize), &_Py_ID(module), &_Py_ID(_feature_version), },
|
||||
};
|
||||
#undef NUM_KEYWORDS
|
||||
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
|
||||
|
|
@ -381,14 +382,14 @@ builtin_compile(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObj
|
|||
# define KWTUPLE NULL
|
||||
#endif // !Py_BUILD_CORE
|
||||
|
||||
static const char * const _keywords[] = {"source", "filename", "mode", "flags", "dont_inherit", "optimize", "_feature_version", NULL};
|
||||
static const char * const _keywords[] = {"source", "filename", "mode", "flags", "dont_inherit", "optimize", "module", "_feature_version", NULL};
|
||||
static _PyArg_Parser _parser = {
|
||||
.keywords = _keywords,
|
||||
.fname = "compile",
|
||||
.kwtuple = KWTUPLE,
|
||||
};
|
||||
#undef KWTUPLE
|
||||
PyObject *argsbuf[7];
|
||||
PyObject *argsbuf[8];
|
||||
Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 3;
|
||||
PyObject *source;
|
||||
PyObject *filename = NULL;
|
||||
|
|
@ -396,6 +397,7 @@ builtin_compile(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObj
|
|||
int flags = 0;
|
||||
int dont_inherit = 0;
|
||||
int optimize = -1;
|
||||
PyObject *modname = Py_None;
|
||||
int feature_version = -1;
|
||||
|
||||
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
|
||||
|
|
@ -454,12 +456,18 @@ skip_optional_pos:
|
|||
if (!noptargs) {
|
||||
goto skip_optional_kwonly;
|
||||
}
|
||||
feature_version = PyLong_AsInt(args[6]);
|
||||
if (args[6]) {
|
||||
modname = args[6];
|
||||
if (!--noptargs) {
|
||||
goto skip_optional_kwonly;
|
||||
}
|
||||
}
|
||||
feature_version = PyLong_AsInt(args[7]);
|
||||
if (feature_version == -1 && PyErr_Occurred()) {
|
||||
goto exit;
|
||||
}
|
||||
skip_optional_kwonly:
|
||||
return_value = builtin_compile_impl(module, source, filename, mode, flags, dont_inherit, optimize, feature_version);
|
||||
return_value = builtin_compile_impl(module, source, filename, mode, flags, dont_inherit, optimize, modname, feature_version);
|
||||
|
||||
exit:
|
||||
/* Cleanup for filename */
|
||||
|
|
|
|||
|
|
@ -104,11 +104,13 @@ typedef struct _PyCompiler {
|
|||
* (including instructions for nested code objects)
|
||||
*/
|
||||
int c_disable_warning;
|
||||
PyObject *c_module;
|
||||
} compiler;
|
||||
|
||||
static int
|
||||
compiler_setup(compiler *c, mod_ty mod, PyObject *filename,
|
||||
PyCompilerFlags *flags, int optimize, PyArena *arena)
|
||||
PyCompilerFlags *flags, int optimize, PyArena *arena,
|
||||
PyObject *module)
|
||||
{
|
||||
PyCompilerFlags local_flags = _PyCompilerFlags_INIT;
|
||||
|
||||
|
|
@ -126,6 +128,7 @@ compiler_setup(compiler *c, mod_ty mod, PyObject *filename,
|
|||
if (!_PyFuture_FromAST(mod, filename, &c->c_future)) {
|
||||
return ERROR;
|
||||
}
|
||||
c->c_module = Py_XNewRef(module);
|
||||
if (!flags) {
|
||||
flags = &local_flags;
|
||||
}
|
||||
|
|
@ -136,7 +139,9 @@ compiler_setup(compiler *c, mod_ty mod, PyObject *filename,
|
|||
c->c_optimize = (optimize == -1) ? _Py_GetConfig()->optimization_level : optimize;
|
||||
c->c_save_nested_seqs = false;
|
||||
|
||||
if (!_PyAST_Preprocess(mod, arena, filename, c->c_optimize, merged, 0, 1)) {
|
||||
if (!_PyAST_Preprocess(mod, arena, filename, c->c_optimize, merged,
|
||||
0, 1, module))
|
||||
{
|
||||
return ERROR;
|
||||
}
|
||||
c->c_st = _PySymtable_Build(mod, filename, &c->c_future);
|
||||
|
|
@ -156,6 +161,7 @@ compiler_free(compiler *c)
|
|||
_PySymtable_Free(c->c_st);
|
||||
}
|
||||
Py_XDECREF(c->c_filename);
|
||||
Py_XDECREF(c->c_module);
|
||||
Py_XDECREF(c->c_const_cache);
|
||||
Py_XDECREF(c->c_stack);
|
||||
PyMem_Free(c);
|
||||
|
|
@ -163,13 +169,13 @@ compiler_free(compiler *c)
|
|||
|
||||
static compiler*
|
||||
new_compiler(mod_ty mod, PyObject *filename, PyCompilerFlags *pflags,
|
||||
int optimize, PyArena *arena)
|
||||
int optimize, PyArena *arena, PyObject *module)
|
||||
{
|
||||
compiler *c = PyMem_Calloc(1, sizeof(compiler));
|
||||
if (c == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (compiler_setup(c, mod, filename, pflags, optimize, arena) < 0) {
|
||||
if (compiler_setup(c, mod, filename, pflags, optimize, arena, module) < 0) {
|
||||
compiler_free(c);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1241,7 +1247,8 @@ _PyCompile_Warn(compiler *c, location loc, const char *format, ...)
|
|||
return ERROR;
|
||||
}
|
||||
int ret = _PyErr_EmitSyntaxWarning(msg, c->c_filename, loc.lineno, loc.col_offset + 1,
|
||||
loc.end_lineno, loc.end_col_offset + 1);
|
||||
loc.end_lineno, loc.end_col_offset + 1,
|
||||
c->c_module);
|
||||
Py_DECREF(msg);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -1496,10 +1503,10 @@ _PyCompile_OptimizeAndAssemble(compiler *c, int addNone)
|
|||
|
||||
PyCodeObject *
|
||||
_PyAST_Compile(mod_ty mod, PyObject *filename, PyCompilerFlags *pflags,
|
||||
int optimize, PyArena *arena)
|
||||
int optimize, PyArena *arena, PyObject *module)
|
||||
{
|
||||
assert(!PyErr_Occurred());
|
||||
compiler *c = new_compiler(mod, filename, pflags, optimize, arena);
|
||||
compiler *c = new_compiler(mod, filename, pflags, optimize, arena, module);
|
||||
if (c == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1512,7 +1519,8 @@ _PyAST_Compile(mod_ty mod, PyObject *filename, PyCompilerFlags *pflags,
|
|||
|
||||
int
|
||||
_PyCompile_AstPreprocess(mod_ty mod, PyObject *filename, PyCompilerFlags *cf,
|
||||
int optimize, PyArena *arena, int no_const_folding)
|
||||
int optimize, PyArena *arena, int no_const_folding,
|
||||
PyObject *module)
|
||||
{
|
||||
_PyFutureFeatures future;
|
||||
if (!_PyFuture_FromAST(mod, filename, &future)) {
|
||||
|
|
@ -1522,7 +1530,9 @@ _PyCompile_AstPreprocess(mod_ty mod, PyObject *filename, PyCompilerFlags *cf,
|
|||
if (optimize == -1) {
|
||||
optimize = _Py_GetConfig()->optimization_level;
|
||||
}
|
||||
if (!_PyAST_Preprocess(mod, arena, filename, optimize, flags, no_const_folding, 0)) {
|
||||
if (!_PyAST_Preprocess(mod, arena, filename, optimize, flags,
|
||||
no_const_folding, 0, module))
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -1647,7 +1657,7 @@ _PyCompile_CodeGen(PyObject *ast, PyObject *filename, PyCompilerFlags *pflags,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
compiler *c = new_compiler(mod, filename, pflags, optimize, arena);
|
||||
compiler *c = new_compiler(mod, filename, pflags, optimize, arena, NULL);
|
||||
if (c == NULL) {
|
||||
_PyArena_Free(arena);
|
||||
return NULL;
|
||||
|
|
|
|||
|
|
@ -17,18 +17,30 @@ untag_critical_section(uintptr_t tag)
|
|||
#endif
|
||||
|
||||
void
|
||||
_PyCriticalSection_BeginSlow(PyCriticalSection *c, PyMutex *m)
|
||||
_PyCriticalSection_BeginSlow(PyThreadState *tstate, PyCriticalSection *c, PyMutex *m)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
// As an optimisation for locking the same object recursively, skip
|
||||
// locking if the mutex is currently locked by the top-most critical
|
||||
// section.
|
||||
if (tstate->critical_section &&
|
||||
untag_critical_section(tstate->critical_section)->_cs_mutex == m) {
|
||||
c->_cs_mutex = NULL;
|
||||
c->_cs_prev = 0;
|
||||
return;
|
||||
// If the top-most critical section is a two-mutex critical section,
|
||||
// then locking is skipped if either mutex is m.
|
||||
if (tstate->critical_section) {
|
||||
PyCriticalSection *prev = untag_critical_section(tstate->critical_section);
|
||||
if (prev->_cs_mutex == m) {
|
||||
c->_cs_mutex = NULL;
|
||||
c->_cs_prev = 0;
|
||||
return;
|
||||
}
|
||||
if (tstate->critical_section & _Py_CRITICAL_SECTION_TWO_MUTEXES) {
|
||||
PyCriticalSection2 *prev2 = (PyCriticalSection2 *)
|
||||
untag_critical_section(tstate->critical_section);
|
||||
if (prev2->_cs_mutex2 == m) {
|
||||
c->_cs_mutex = NULL;
|
||||
c->_cs_prev = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
c->_cs_mutex = NULL;
|
||||
c->_cs_prev = (uintptr_t)tstate->critical_section;
|
||||
|
|
@ -40,11 +52,10 @@ _PyCriticalSection_BeginSlow(PyCriticalSection *c, PyMutex *m)
|
|||
}
|
||||
|
||||
void
|
||||
_PyCriticalSection2_BeginSlow(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2,
|
||||
_PyCriticalSection2_BeginSlow(PyThreadState *tstate, PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2,
|
||||
int is_m1_locked)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
c->_cs_base._cs_mutex = NULL;
|
||||
c->_cs_mutex2 = NULL;
|
||||
c->_cs_base._cs_prev = tstate->critical_section;
|
||||
|
|
@ -126,7 +137,7 @@ void
|
|||
PyCriticalSection_Begin(PyCriticalSection *c, PyObject *op)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyCriticalSection_Begin(c, op);
|
||||
_PyCriticalSection_Begin(_PyThreadState_GET(), c, op);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -135,7 +146,7 @@ void
|
|||
PyCriticalSection_BeginMutex(PyCriticalSection *c, PyMutex *m)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyCriticalSection_BeginMutex(c, m);
|
||||
_PyCriticalSection_BeginMutex(_PyThreadState_GET(), c, m);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -144,7 +155,7 @@ void
|
|||
PyCriticalSection_End(PyCriticalSection *c)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyCriticalSection_End(c);
|
||||
_PyCriticalSection_End(_PyThreadState_GET(), c);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -153,7 +164,7 @@ void
|
|||
PyCriticalSection2_Begin(PyCriticalSection2 *c, PyObject *a, PyObject *b)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyCriticalSection2_Begin(c, a, b);
|
||||
_PyCriticalSection2_Begin(_PyThreadState_GET(), c, a, b);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -162,7 +173,7 @@ void
|
|||
PyCriticalSection2_BeginMutex(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyCriticalSection2_BeginMutex(c, m1, m2);
|
||||
_PyCriticalSection2_BeginMutex(_PyThreadState_GET(), c, m1, m2);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -171,6 +182,6 @@ void
|
|||
PyCriticalSection2_End(PyCriticalSection2 *c)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyCriticalSection2_End(c);
|
||||
_PyCriticalSection2_End(_PyThreadState_GET(), c);
|
||||
#endif
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1960,10 +1960,11 @@ _PyErr_RaiseSyntaxError(PyObject *msg, PyObject *filename, int lineno, int col_o
|
|||
*/
|
||||
int
|
||||
_PyErr_EmitSyntaxWarning(PyObject *msg, PyObject *filename, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset)
|
||||
int end_lineno, int end_col_offset,
|
||||
PyObject *module)
|
||||
{
|
||||
if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg,
|
||||
filename, lineno, NULL, NULL) < 0)
|
||||
if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, filename, lineno,
|
||||
module, NULL) < 0)
|
||||
{
|
||||
if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) {
|
||||
/* Replace the SyntaxWarning exception with a SyntaxError
|
||||
|
|
|
|||
1239
Python/executor_cases.c.h
generated
1239
Python/executor_cases.c.h
generated
File diff suppressed because it is too large
Load diff
|
|
@ -2118,7 +2118,6 @@ _Py_wrealpath(const wchar_t *path,
|
|||
wchar_t *resolved_path, size_t resolved_path_len)
|
||||
{
|
||||
char *cpath;
|
||||
char cresolved_path[MAXPATHLEN];
|
||||
wchar_t *wresolved_path;
|
||||
char *res;
|
||||
size_t r;
|
||||
|
|
@ -2127,12 +2126,14 @@ _Py_wrealpath(const wchar_t *path,
|
|||
errno = EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
res = realpath(cpath, cresolved_path);
|
||||
res = realpath(cpath, NULL);
|
||||
PyMem_RawFree(cpath);
|
||||
if (res == NULL)
|
||||
return NULL;
|
||||
|
||||
wresolved_path = Py_DecodeLocale(cresolved_path, &r);
|
||||
wresolved_path = Py_DecodeLocale(res, &r);
|
||||
free(res);
|
||||
|
||||
if (wresolved_path == NULL) {
|
||||
errno = EINVAL;
|
||||
return NULL;
|
||||
|
|
|
|||
47
Python/gc.c
47
Python/gc.c
|
|
@ -483,11 +483,12 @@ validate_consistent_old_space(PyGC_Head *head)
|
|||
/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and
|
||||
* PREV_MASK_COLLECTING bit is set for all objects in containers.
|
||||
*/
|
||||
static void
|
||||
static Py_ssize_t
|
||||
update_refs(PyGC_Head *containers)
|
||||
{
|
||||
PyGC_Head *next;
|
||||
PyGC_Head *gc = GC_NEXT(containers);
|
||||
Py_ssize_t candidates = 0;
|
||||
|
||||
while (gc != containers) {
|
||||
next = GC_NEXT(gc);
|
||||
|
|
@ -519,7 +520,9 @@ update_refs(PyGC_Head *containers)
|
|||
*/
|
||||
_PyObject_ASSERT(op, gc_get_refs(gc) != 0);
|
||||
gc = next;
|
||||
candidates++;
|
||||
}
|
||||
return candidates;
|
||||
}
|
||||
|
||||
/* A traversal callback for subtract_refs. */
|
||||
|
|
@ -1240,7 +1243,7 @@ flag set but it does not clear it to skip unnecessary iteration. Before the
|
|||
flag is cleared (for example, by using 'clear_unreachable_mask' function or
|
||||
by a call to 'move_legacy_finalizers'), the 'unreachable' list is not a normal
|
||||
list and we can not use most gc_list_* functions for it. */
|
||||
static inline void
|
||||
static inline Py_ssize_t
|
||||
deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) {
|
||||
validate_list(base, collecting_clear_unreachable_clear);
|
||||
/* Using ob_refcnt and gc_refs, calculate which objects in the
|
||||
|
|
@ -1248,7 +1251,7 @@ deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) {
|
|||
* refcount greater than 0 when all the references within the
|
||||
* set are taken into account).
|
||||
*/
|
||||
update_refs(base); // gc_prev is used for gc_refs
|
||||
Py_ssize_t candidates = update_refs(base); // gc_prev is used for gc_refs
|
||||
subtract_refs(base);
|
||||
|
||||
/* Leave everything reachable from outside base in base, and move
|
||||
|
|
@ -1289,6 +1292,7 @@ deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) {
|
|||
move_unreachable(base, unreachable); // gc_prev is pointer again
|
||||
validate_list(base, collecting_clear_unreachable_clear);
|
||||
validate_list(unreachable, collecting_set_unreachable_set);
|
||||
return candidates;
|
||||
}
|
||||
|
||||
/* Handle objects that may have resurrected after a call to 'finalize_garbage', moving
|
||||
|
|
@ -1363,8 +1367,10 @@ gc_list_set_space(PyGC_Head *list, int space)
|
|||
static void
|
||||
add_stats(GCState *gcstate, int gen, struct gc_collection_stats *stats)
|
||||
{
|
||||
gcstate->generation_stats[gen].duration += stats->duration;
|
||||
gcstate->generation_stats[gen].collected += stats->collected;
|
||||
gcstate->generation_stats[gen].uncollectable += stats->uncollectable;
|
||||
gcstate->generation_stats[gen].candidates += stats->candidates;
|
||||
gcstate->generation_stats[gen].collections += 1;
|
||||
}
|
||||
|
||||
|
|
@ -1387,7 +1393,6 @@ gc_collect_young(PyThreadState *tstate,
|
|||
validate_spaces(gcstate);
|
||||
gcstate->young.count = 0;
|
||||
gcstate->old[gcstate->visited_space].count++;
|
||||
add_stats(gcstate, 0, stats);
|
||||
validate_spaces(gcstate);
|
||||
}
|
||||
|
||||
|
|
@ -1662,6 +1667,7 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
|
|||
Py_ssize_t objects_marked = mark_at_start(tstate);
|
||||
GC_STAT_ADD(1, objects_transitively_reachable, objects_marked);
|
||||
gcstate->work_to_do -= objects_marked;
|
||||
stats->candidates += objects_marked;
|
||||
validate_spaces(gcstate);
|
||||
return;
|
||||
}
|
||||
|
|
@ -1701,7 +1707,6 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
|
|||
assert(gc_list_is_empty(&increment));
|
||||
gcstate->work_to_do -= increment_size;
|
||||
|
||||
add_stats(gcstate, 1, stats);
|
||||
if (gc_list_is_empty(not_visited)) {
|
||||
completed_scavenge(gcstate);
|
||||
}
|
||||
|
|
@ -1736,7 +1741,6 @@ gc_collect_full(PyThreadState *tstate,
|
|||
completed_scavenge(gcstate);
|
||||
_PyGC_ClearAllFreeLists(tstate->interp);
|
||||
validate_spaces(gcstate);
|
||||
add_stats(gcstate, 2, stats);
|
||||
}
|
||||
|
||||
/* This is the main function. Read this to understand how the
|
||||
|
|
@ -1756,7 +1760,7 @@ gc_collect_region(PyThreadState *tstate,
|
|||
assert(!_PyErr_Occurred(tstate));
|
||||
|
||||
gc_list_init(&unreachable);
|
||||
deduce_unreachable(from, &unreachable);
|
||||
stats->candidates = deduce_unreachable(from, &unreachable);
|
||||
validate_consistent_old_space(from);
|
||||
untrack_tuples(from);
|
||||
|
||||
|
|
@ -1846,10 +1850,12 @@ do_gc_callback(GCState *gcstate, const char *phase,
|
|||
assert(PyList_CheckExact(gcstate->callbacks));
|
||||
PyObject *info = NULL;
|
||||
if (PyList_GET_SIZE(gcstate->callbacks) != 0) {
|
||||
info = Py_BuildValue("{sisnsn}",
|
||||
info = Py_BuildValue("{sisnsnsnsd}",
|
||||
"generation", generation,
|
||||
"collected", stats->collected,
|
||||
"uncollectable", stats->uncollectable);
|
||||
"uncollectable", stats->uncollectable,
|
||||
"candidates", stats->candidates,
|
||||
"duration", stats->duration);
|
||||
if (info == NULL) {
|
||||
PyErr_FormatUnraisable("Exception ignored while invoking gc callbacks");
|
||||
return;
|
||||
|
|
@ -2074,20 +2080,21 @@ _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason)
|
|||
// Don't start a garbage collection if one is already in progress.
|
||||
return 0;
|
||||
}
|
||||
gcstate->frame = tstate->current_frame;
|
||||
|
||||
struct gc_collection_stats stats = { 0 };
|
||||
if (reason != _Py_GC_REASON_SHUTDOWN) {
|
||||
invoke_gc_callback(gcstate, "start", generation, &stats);
|
||||
}
|
||||
PyTime_t t1;
|
||||
if (gcstate->debug & _PyGC_DEBUG_STATS) {
|
||||
PySys_WriteStderr("gc: collecting generation %d...\n", generation);
|
||||
(void)PyTime_PerfCounterRaw(&t1);
|
||||
show_stats_each_generations(gcstate);
|
||||
}
|
||||
if (PyDTrace_GC_START_ENABLED()) {
|
||||
PyDTrace_GC_START(generation);
|
||||
}
|
||||
PyTime_t start, stop;
|
||||
(void)PyTime_PerfCounterRaw(&start);
|
||||
PyObject *exc = _PyErr_GetRaisedException(tstate);
|
||||
switch(generation) {
|
||||
case 0:
|
||||
|
|
@ -2102,6 +2109,9 @@ _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason)
|
|||
default:
|
||||
Py_UNREACHABLE();
|
||||
}
|
||||
(void)PyTime_PerfCounterRaw(&stop);
|
||||
stats.duration = PyTime_AsSecondsDouble(stop - start);
|
||||
add_stats(gcstate, generation, &stats);
|
||||
if (PyDTrace_GC_DONE_ENABLED()) {
|
||||
PyDTrace_GC_DONE(stats.uncollectable + stats.collected);
|
||||
}
|
||||
|
|
@ -2111,22 +2121,21 @@ _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason)
|
|||
_PyErr_SetRaisedException(tstate, exc);
|
||||
GC_STAT_ADD(generation, objects_collected, stats.collected);
|
||||
#ifdef Py_STATS
|
||||
if (_Py_stats) {
|
||||
PyStats *s = _PyStats_GET();
|
||||
if (s) {
|
||||
GC_STAT_ADD(generation, object_visits,
|
||||
_Py_stats->object_stats.object_visits);
|
||||
_Py_stats->object_stats.object_visits = 0;
|
||||
s->object_stats.object_visits);
|
||||
s->object_stats.object_visits = 0;
|
||||
}
|
||||
#endif
|
||||
validate_spaces(gcstate);
|
||||
gcstate->frame = NULL;
|
||||
_Py_atomic_store_int(&gcstate->collecting, 0);
|
||||
|
||||
if (gcstate->debug & _PyGC_DEBUG_STATS) {
|
||||
PyTime_t t2;
|
||||
(void)PyTime_PerfCounterRaw(&t2);
|
||||
double d = PyTime_AsSecondsDouble(t2 - t1);
|
||||
PySys_WriteStderr(
|
||||
"gc: done, %zd unreachable, %zd uncollectable, %.4fs elapsed\n",
|
||||
stats.collected + stats.uncollectable, stats.uncollectable, d
|
||||
stats.collected + stats.uncollectable, stats.uncollectable, stats.duration
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -2234,7 +2243,7 @@ _PyGC_Fini(PyInterpreterState *interp)
|
|||
void
|
||||
_PyGC_Dump(PyGC_Head *g)
|
||||
{
|
||||
_PyObject_Dump(FROM_GC(g));
|
||||
PyUnstable_Object_Dump(FROM_GC(g));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -100,6 +100,7 @@ struct collection_state {
|
|||
int skip_deferred_objects;
|
||||
Py_ssize_t collected;
|
||||
Py_ssize_t uncollectable;
|
||||
Py_ssize_t candidates;
|
||||
Py_ssize_t long_lived_total;
|
||||
struct worklist unreachable;
|
||||
struct worklist legacy_finalizers;
|
||||
|
|
@ -675,10 +676,11 @@ gc_mark_span_push(gc_span_stack_t *ss, PyObject **start, PyObject **end)
|
|||
else {
|
||||
ss->capacity *= 2;
|
||||
}
|
||||
ss->stack = (gc_span_t *)PyMem_Realloc(ss->stack, ss->capacity * sizeof(gc_span_t));
|
||||
if (ss->stack == NULL) {
|
||||
gc_span_t *new_stack = (gc_span_t *)PyMem_Realloc(ss->stack, ss->capacity * sizeof(gc_span_t));
|
||||
if (new_stack == NULL) {
|
||||
return -1;
|
||||
}
|
||||
ss->stack = new_stack;
|
||||
}
|
||||
assert(end > start);
|
||||
ss->stack[ss->size].start = start;
|
||||
|
|
@ -974,15 +976,12 @@ static bool
|
|||
update_refs(const mi_heap_t *heap, const mi_heap_area_t *area,
|
||||
void *block, size_t block_size, void *args)
|
||||
{
|
||||
struct collection_state *state = (struct collection_state *)args;
|
||||
PyObject *op = op_from_block(block, args, false);
|
||||
if (op == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (gc_is_alive(op)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Exclude immortal objects from garbage collection
|
||||
if (_Py_IsImmortal(op)) {
|
||||
op->ob_tid = 0;
|
||||
|
|
@ -990,6 +989,11 @@ update_refs(const mi_heap_t *heap, const mi_heap_area_t *area,
|
|||
gc_clear_unreachable(op);
|
||||
return true;
|
||||
}
|
||||
// Marked objects count as candidates, immortals don't:
|
||||
state->candidates++;
|
||||
if (gc_is_alive(op)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Py_ssize_t refcount = Py_REFCNT(op);
|
||||
if (_PyObject_HasDeferredRefcount(op)) {
|
||||
|
|
@ -1910,7 +1914,8 @@ handle_resurrected_objects(struct collection_state *state)
|
|||
static void
|
||||
invoke_gc_callback(PyThreadState *tstate, const char *phase,
|
||||
int generation, Py_ssize_t collected,
|
||||
Py_ssize_t uncollectable)
|
||||
Py_ssize_t uncollectable, Py_ssize_t candidates,
|
||||
double duration)
|
||||
{
|
||||
assert(!_PyErr_Occurred(tstate));
|
||||
|
||||
|
|
@ -1924,10 +1929,12 @@ invoke_gc_callback(PyThreadState *tstate, const char *phase,
|
|||
assert(PyList_CheckExact(gcstate->callbacks));
|
||||
PyObject *info = NULL;
|
||||
if (PyList_GET_SIZE(gcstate->callbacks) != 0) {
|
||||
info = Py_BuildValue("{sisnsn}",
|
||||
info = Py_BuildValue("{sisnsnsnsd}",
|
||||
"generation", generation,
|
||||
"collected", collected,
|
||||
"uncollectable", uncollectable);
|
||||
"uncollectable", uncollectable,
|
||||
"candidates", candidates,
|
||||
"duration", duration);
|
||||
if (info == NULL) {
|
||||
PyErr_FormatUnraisable("Exception ignored while "
|
||||
"invoking gc callbacks");
|
||||
|
|
@ -2203,7 +2210,19 @@ record_deallocation(PyThreadState *tstate)
|
|||
gc->alloc_count--;
|
||||
if (gc->alloc_count <= -LOCAL_ALLOC_COUNT_THRESHOLD) {
|
||||
GCState *gcstate = &tstate->interp->gc;
|
||||
_Py_atomic_add_int(&gcstate->young.count, (int)gc->alloc_count);
|
||||
int count = _Py_atomic_load_int_relaxed(&gcstate->young.count);
|
||||
int new_count;
|
||||
do {
|
||||
if (count == 0) {
|
||||
break;
|
||||
}
|
||||
new_count = count + (int)gc->alloc_count;
|
||||
if (new_count < 0) {
|
||||
new_count = 0;
|
||||
}
|
||||
} while (!_Py_atomic_compare_exchange_int(&gcstate->young.count,
|
||||
&count,
|
||||
new_count));
|
||||
gc->alloc_count = 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -2339,7 +2358,6 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
|
|||
{
|
||||
Py_ssize_t m = 0; /* # objects collected */
|
||||
Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
|
||||
PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
|
||||
GCState *gcstate = &tstate->interp->gc;
|
||||
|
||||
// gc_collect_main() must not be called before _PyGC_Init
|
||||
|
|
@ -2358,30 +2376,32 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
|
|||
_Py_atomic_store_int(&gcstate->collecting, 0);
|
||||
return 0;
|
||||
}
|
||||
gcstate->frame = tstate->current_frame;
|
||||
|
||||
assert(generation >= 0 && generation < NUM_GENERATIONS);
|
||||
|
||||
#ifdef Py_STATS
|
||||
if (_Py_stats) {
|
||||
_Py_stats->object_stats.object_visits = 0;
|
||||
PyStats *s = _PyStats_GET();
|
||||
if (s) {
|
||||
s->object_stats.object_visits = 0;
|
||||
}
|
||||
#endif
|
||||
GC_STAT_ADD(generation, collections, 1);
|
||||
|
||||
if (reason != _Py_GC_REASON_SHUTDOWN) {
|
||||
invoke_gc_callback(tstate, "start", generation, 0, 0);
|
||||
invoke_gc_callback(tstate, "start", generation, 0, 0, 0, 0.0);
|
||||
}
|
||||
|
||||
if (gcstate->debug & _PyGC_DEBUG_STATS) {
|
||||
PySys_WriteStderr("gc: collecting generation %d...\n", generation);
|
||||
show_stats_each_generations(gcstate);
|
||||
// ignore error: don't interrupt the GC if reading the clock fails
|
||||
(void)PyTime_PerfCounterRaw(&t1);
|
||||
}
|
||||
|
||||
if (PyDTrace_GC_START_ENABLED()) {
|
||||
PyDTrace_GC_START(generation);
|
||||
}
|
||||
PyTime_t start, stop;
|
||||
(void)PyTime_PerfCounterRaw(&start);
|
||||
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
|
||||
|
|
@ -2396,13 +2416,13 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
|
|||
m = state.collected;
|
||||
n = state.uncollectable;
|
||||
|
||||
(void)PyTime_PerfCounterRaw(&stop);
|
||||
double duration = PyTime_AsSecondsDouble(stop - start);
|
||||
|
||||
if (gcstate->debug & _PyGC_DEBUG_STATS) {
|
||||
PyTime_t t2;
|
||||
(void)PyTime_PerfCounterRaw(&t2);
|
||||
double d = PyTime_AsSecondsDouble(t2 - t1);
|
||||
PySys_WriteStderr(
|
||||
"gc: done, %zd unreachable, %zd uncollectable, %.4fs elapsed\n",
|
||||
n+m, n, d);
|
||||
n+m, n, duration);
|
||||
}
|
||||
|
||||
// Clear the current thread's free-list again.
|
||||
|
|
@ -2423,13 +2443,18 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
|
|||
stats->collections++;
|
||||
stats->collected += m;
|
||||
stats->uncollectable += n;
|
||||
stats->duration += duration;
|
||||
stats->candidates += state.candidates;
|
||||
|
||||
GC_STAT_ADD(generation, objects_collected, m);
|
||||
#ifdef Py_STATS
|
||||
if (_Py_stats) {
|
||||
GC_STAT_ADD(generation, object_visits,
|
||||
_Py_stats->object_stats.object_visits);
|
||||
_Py_stats->object_stats.object_visits = 0;
|
||||
{
|
||||
PyStats *s = _PyStats_GET();
|
||||
if (s) {
|
||||
GC_STAT_ADD(generation, object_visits,
|
||||
s->object_stats.object_visits);
|
||||
s->object_stats.object_visits = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
@ -2438,10 +2463,11 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
|
|||
}
|
||||
|
||||
if (reason != _Py_GC_REASON_SHUTDOWN) {
|
||||
invoke_gc_callback(tstate, "stop", generation, m, n);
|
||||
invoke_gc_callback(tstate, "stop", generation, m, n, state.candidates, duration);
|
||||
}
|
||||
|
||||
assert(!_PyErr_Occurred(tstate));
|
||||
gcstate->frame = NULL;
|
||||
_Py_atomic_store_int(&gcstate->collecting, 0);
|
||||
return n + m;
|
||||
}
|
||||
|
|
|
|||
1196
Python/generated_cases.c.h
generated
1196
Python/generated_cases.c.h
generated
File diff suppressed because it is too large
Load diff
|
|
@ -3,6 +3,10 @@
|
|||
|
||||
#include "Python.h"
|
||||
|
||||
#ifdef _Py_COMPILER
|
||||
# define COMPILER _Py_COMPILER
|
||||
#endif
|
||||
|
||||
#ifndef COMPILER
|
||||
|
||||
// Note the __clang__ conditional has to come before the __GNUC__ one because
|
||||
|
|
|
|||
284
Python/import.c
284
Python/import.c
|
|
@ -3,6 +3,7 @@
|
|||
#include "Python.h"
|
||||
#include "pycore_audit.h" // _PySys_Audit()
|
||||
#include "pycore_ceval.h"
|
||||
#include "pycore_critical_section.h" // Py_BEGIN_CRITICAL_SECTION()
|
||||
#include "pycore_hashtable.h" // _Py_hashtable_new_full()
|
||||
#include "pycore_import.h" // _PyImport_BootstrapImp()
|
||||
#include "pycore_initconfig.h" // _PyStatus_OK()
|
||||
|
|
@ -324,13 +325,8 @@ PyImport_GetModule(PyObject *name)
|
|||
if not, create a new one and insert it in the modules dictionary. */
|
||||
|
||||
static PyObject *
|
||||
import_add_module(PyThreadState *tstate, PyObject *name)
|
||||
import_add_module_lock_held(PyObject *modules, PyObject *name)
|
||||
{
|
||||
PyObject *modules = get_modules_dict(tstate, false);
|
||||
if (modules == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
PyObject *m;
|
||||
if (PyMapping_GetOptionalItem(modules, name, &m) < 0) {
|
||||
return NULL;
|
||||
|
|
@ -350,6 +346,21 @@ import_add_module(PyThreadState *tstate, PyObject *name)
|
|||
return m;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
import_add_module(PyThreadState *tstate, PyObject *name)
|
||||
{
|
||||
PyObject *modules = get_modules_dict(tstate, false);
|
||||
if (modules == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
PyObject *m;
|
||||
Py_BEGIN_CRITICAL_SECTION(modules);
|
||||
m = import_add_module_lock_held(modules, name);
|
||||
Py_END_CRITICAL_SECTION();
|
||||
return m;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
PyImport_AddModuleRef(const char *name)
|
||||
{
|
||||
|
|
@ -687,8 +698,8 @@ _PyImport_ClearModulesByIndex(PyInterpreterState *interp)
|
|||
|
||||
(6). first time (not found in _PyRuntime.imports.extensions):
|
||||
A. _imp_create_dynamic_impl() -> import_find_extension()
|
||||
B. _imp_create_dynamic_impl() -> _PyImport_GetModInitFunc()
|
||||
C. _PyImport_GetModInitFunc(): load <module init func>
|
||||
B. _imp_create_dynamic_impl() -> _PyImport_GetModuleExportHooks()
|
||||
C. _PyImport_GetModuleExportHooks(): load <module init func>
|
||||
D. _imp_create_dynamic_impl() -> import_run_extension()
|
||||
E. import_run_extension() -> _PyImport_RunModInitFunc()
|
||||
F. _PyImport_RunModInitFunc(): call <module init func>
|
||||
|
|
@ -758,16 +769,19 @@ _PyImport_ClearModulesByIndex(PyInterpreterState *interp)
|
|||
A. noop
|
||||
|
||||
|
||||
...for multi-phase init modules:
|
||||
...for multi-phase init modules from PyModInit_* (PyModuleDef):
|
||||
|
||||
(6). every time:
|
||||
A. _imp_create_dynamic_impl() -> import_find_extension() (not found)
|
||||
B. _imp_create_dynamic_impl() -> _PyImport_GetModInitFunc()
|
||||
C. _PyImport_GetModInitFunc(): load <module init func>
|
||||
B. _imp_create_dynamic_impl() -> _PyImport_GetModuleExportHooks()
|
||||
C. _PyImport_GetModuleExportHooks(): load <module init func>
|
||||
D. _imp_create_dynamic_impl() -> import_run_extension()
|
||||
E. import_run_extension() -> _PyImport_RunModInitFunc()
|
||||
F. _PyImport_RunModInitFunc(): call <module init func>
|
||||
G. import_run_extension() -> PyModule_FromDefAndSpec()
|
||||
|
||||
PyModule_FromDefAndSpec():
|
||||
|
||||
H. PyModule_FromDefAndSpec(): gather/check moduledef slots
|
||||
I. if there's a Py_mod_create slot:
|
||||
1. PyModule_FromDefAndSpec(): call its function
|
||||
|
|
@ -780,10 +794,29 @@ _PyImport_ClearModulesByIndex(PyInterpreterState *interp)
|
|||
(10). every time:
|
||||
A. _imp_exec_dynamic_impl() -> exec_builtin_or_dynamic()
|
||||
B. if mod->md_state == NULL (including if m_size == 0):
|
||||
1. exec_builtin_or_dynamic() -> PyModule_ExecDef()
|
||||
2. PyModule_ExecDef(): allocate mod->md_state
|
||||
1. exec_builtin_or_dynamic() -> PyModule_Exec()
|
||||
2. PyModule_Exec(): allocate mod->md_state
|
||||
3. if there's a Py_mod_exec slot:
|
||||
1. PyModule_ExecDef(): call its function
|
||||
1. PyModule_Exec(): call its function
|
||||
|
||||
|
||||
...for multi-phase init modules from PyModExport_* (slots array):
|
||||
|
||||
(6). every time:
|
||||
|
||||
A. _imp_create_dynamic_impl() -> import_find_extension() (not found)
|
||||
B. _imp_create_dynamic_impl() -> _PyImport_GetModuleExportHooks()
|
||||
C. _PyImport_GetModuleExportHooks(): load <module export func>
|
||||
D. _imp_create_dynamic_impl() -> import_run_modexport()
|
||||
E. import_run_modexport(): call <module init func>
|
||||
F. import_run_modexport() -> PyModule_FromSlotsAndSpec()
|
||||
G. PyModule_FromSlotsAndSpec(): create temporary PyModuleDef-like
|
||||
H. PyModule_FromSlotsAndSpec() -> PyModule_FromDefAndSpec()
|
||||
|
||||
(PyModule_FromDefAndSpec behaves as for PyModInit_*, above)
|
||||
|
||||
(10). every time: as for PyModInit_*, above
|
||||
|
||||
*/
|
||||
|
||||
|
||||
|
|
@ -797,7 +830,7 @@ _PyImport_ClearModulesByIndex(PyInterpreterState *interp)
|
|||
substitute this (if the name actually matches).
|
||||
*/
|
||||
|
||||
_Py_thread_local const char *pkgcontext = NULL;
|
||||
static _Py_thread_local const char *pkgcontext = NULL;
|
||||
# undef PKGCONTEXT
|
||||
# define PKGCONTEXT pkgcontext
|
||||
|
||||
|
|
@ -840,25 +873,19 @@ _PyImport_SetDLOpenFlags(PyInterpreterState *interp, int new_val)
|
|||
/* Common implementation for _imp.exec_dynamic and _imp.exec_builtin */
|
||||
static int
|
||||
exec_builtin_or_dynamic(PyObject *mod) {
|
||||
PyModuleDef *def;
|
||||
void *state;
|
||||
|
||||
if (!PyModule_Check(mod)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
def = PyModule_GetDef(mod);
|
||||
if (def == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
state = PyModule_GetState(mod);
|
||||
if (state) {
|
||||
/* Already initialized; skip reload */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return PyModule_ExecDef(mod, def);
|
||||
return PyModule_Exec(mod);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1016,9 +1043,10 @@ struct extensions_cache_value {
|
|||
_Py_ext_module_origin origin;
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* The module's md_gil slot, for legacy modules that are reinitialized from
|
||||
m_dict rather than calling their initialization function again. */
|
||||
void *md_gil;
|
||||
/* The module's md_requires_gil member, for legacy modules that are
|
||||
* reinitialized from m_dict rather than calling their initialization
|
||||
* function again. */
|
||||
bool md_requires_gil;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
@ -1349,7 +1377,7 @@ static struct extensions_cache_value *
|
|||
_extensions_cache_set(PyObject *path, PyObject *name,
|
||||
PyModuleDef *def, PyModInitFunction m_init,
|
||||
Py_ssize_t m_index, PyObject *m_dict,
|
||||
_Py_ext_module_origin origin, void *md_gil)
|
||||
_Py_ext_module_origin origin, bool requires_gil)
|
||||
{
|
||||
struct extensions_cache_value *value = NULL;
|
||||
void *key = NULL;
|
||||
|
|
@ -1404,11 +1432,11 @@ _extensions_cache_set(PyObject *path, PyObject *name,
|
|||
/* m_dict is set by set_cached_m_dict(). */
|
||||
.origin=origin,
|
||||
#ifdef Py_GIL_DISABLED
|
||||
.md_gil=md_gil,
|
||||
.md_requires_gil=requires_gil,
|
||||
#endif
|
||||
};
|
||||
#ifndef Py_GIL_DISABLED
|
||||
(void)md_gil;
|
||||
(void)requires_gil;
|
||||
#endif
|
||||
if (init_cached_m_dict(newvalue, m_dict) < 0) {
|
||||
goto finally;
|
||||
|
|
@ -1546,26 +1574,13 @@ _PyImport_CheckGILForModule(PyObject* module, PyObject *module_name)
|
|||
}
|
||||
|
||||
if (!PyModule_Check(module) ||
|
||||
((PyModuleObject *)module)->md_gil == Py_MOD_GIL_USED) {
|
||||
if (_PyEval_EnableGILPermanent(tstate)) {
|
||||
int warn_result = PyErr_WarnFormat(
|
||||
PyExc_RuntimeWarning,
|
||||
1,
|
||||
"The global interpreter lock (GIL) has been enabled to load "
|
||||
"module '%U', which has not declared that it can run safely "
|
||||
"without the GIL. To override this behavior and keep the GIL "
|
||||
"disabled (at your own risk), run with PYTHON_GIL=0 or -Xgil=0.",
|
||||
module_name
|
||||
);
|
||||
if (warn_result < 0) {
|
||||
return warn_result;
|
||||
}
|
||||
((PyModuleObject *)module)->md_requires_gil)
|
||||
{
|
||||
if (PyModule_Check(module)) {
|
||||
assert(((PyModuleObject *)module)->md_token_is_def);
|
||||
}
|
||||
|
||||
const PyConfig *config = _PyInterpreterState_GetConfig(tstate->interp);
|
||||
if (config->enable_gil == _PyConfig_GIL_DEFAULT && config->verbose) {
|
||||
PySys_FormatStderr("# loading module '%U', which requires the GIL\n",
|
||||
module_name);
|
||||
if (_PyImport_EnableGILAndWarn(tstate, module_name) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
|
@ -1574,6 +1589,28 @@ _PyImport_CheckGILForModule(PyObject* module, PyObject *module_name)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
_PyImport_EnableGILAndWarn(PyThreadState *tstate, PyObject *module_name)
|
||||
{
|
||||
if (_PyEval_EnableGILPermanent(tstate)) {
|
||||
return PyErr_WarnFormat(
|
||||
PyExc_RuntimeWarning,
|
||||
1,
|
||||
"The global interpreter lock (GIL) has been enabled to load "
|
||||
"module '%U', which has not declared that it can run safely "
|
||||
"without the GIL. To override this behavior and keep the GIL "
|
||||
"disabled (at your own risk), run with PYTHON_GIL=0 or -Xgil=0.",
|
||||
module_name
|
||||
);
|
||||
}
|
||||
const PyConfig *config = _PyInterpreterState_GetConfig(tstate->interp);
|
||||
if (config->enable_gil == _PyConfig_GIL_DEFAULT && config->verbose) {
|
||||
PySys_FormatStderr("# loading module '%U', which requires the GIL\n",
|
||||
module_name);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static PyThreadState *
|
||||
|
|
@ -1724,7 +1761,7 @@ struct singlephase_global_update {
|
|||
Py_ssize_t m_index;
|
||||
PyObject *m_dict;
|
||||
_Py_ext_module_origin origin;
|
||||
void *md_gil;
|
||||
bool md_requires_gil;
|
||||
};
|
||||
|
||||
static struct extensions_cache_value *
|
||||
|
|
@ -1783,7 +1820,7 @@ update_global_state_for_extension(PyThreadState *tstate,
|
|||
#endif
|
||||
cached = _extensions_cache_set(
|
||||
path, name, def, m_init, singlephase->m_index, m_dict,
|
||||
singlephase->origin, singlephase->md_gil);
|
||||
singlephase->origin, singlephase->md_requires_gil);
|
||||
if (cached == NULL) {
|
||||
// XXX Ignore this error? Doing so would effectively
|
||||
// mark the module as not loadable.
|
||||
|
|
@ -1802,7 +1839,7 @@ finish_singlephase_extension(PyThreadState *tstate, PyObject *mod,
|
|||
PyObject *name, PyObject *modules)
|
||||
{
|
||||
assert(mod != NULL && PyModule_Check(mod));
|
||||
assert(cached->def == _PyModule_GetDef(mod));
|
||||
assert(cached->def == _PyModule_GetDefOrNull(mod));
|
||||
|
||||
Py_ssize_t index = _get_cached_module_index(cached);
|
||||
if (_modules_by_index_set(tstate->interp, index, mod) < 0) {
|
||||
|
|
@ -1872,7 +1909,7 @@ reload_singlephase_extension(PyThreadState *tstate,
|
|||
if (def->m_base.m_copy != NULL) {
|
||||
// For non-core modules, fetch the GIL slot that was stored by
|
||||
// import_run_extension().
|
||||
((PyModuleObject *)mod)->md_gil = cached->md_gil;
|
||||
((PyModuleObject *)mod)->md_requires_gil = cached->md_requires_gil;
|
||||
}
|
||||
#endif
|
||||
/* We can't set mod->md_def if it's missing,
|
||||
|
|
@ -1880,8 +1917,8 @@ reload_singlephase_extension(PyThreadState *tstate,
|
|||
* due to violating interpreter isolation.
|
||||
* See the note in set_cached_m_dict().
|
||||
* Until that is solved, we leave md_def set to NULL. */
|
||||
assert(_PyModule_GetDef(mod) == NULL
|
||||
|| _PyModule_GetDef(mod) == def);
|
||||
assert(_PyModule_GetDefOrNull(mod) == NULL
|
||||
|| _PyModule_GetDefOrNull(mod) == def);
|
||||
}
|
||||
else {
|
||||
assert(cached->m_dict == NULL);
|
||||
|
|
@ -1968,6 +2005,43 @@ import_find_extension(PyThreadState *tstate,
|
|||
return mod;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
import_run_modexport(PyThreadState *tstate, PyModExportFunction ex0,
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
PyObject *spec)
|
||||
{
|
||||
/* This is like import_run_extension, but avoids interpreter switching
|
||||
* and code for for single-phase modules.
|
||||
*/
|
||||
PyModuleDef_Slot *slots = ex0();
|
||||
if (!slots) {
|
||||
if (!PyErr_Occurred()) {
|
||||
PyErr_Format(
|
||||
PyExc_SystemError,
|
||||
"slot export function for module %s failed without setting an exception",
|
||||
info->name);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
if (PyErr_Occurred()) {
|
||||
PyErr_Format(
|
||||
PyExc_SystemError,
|
||||
"slot export function for module %s raised unreported exception",
|
||||
info->name);
|
||||
}
|
||||
PyObject *result = PyModule_FromSlotsAndSpec(slots, spec);
|
||||
if (!result) {
|
||||
return NULL;
|
||||
}
|
||||
if (PyModule_Check(result)) {
|
||||
PyModuleObject *mod = (PyModuleObject *)result;
|
||||
if (mod && !mod->md_token) {
|
||||
mod->md_token = slots;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
import_run_extension(PyThreadState *tstate, PyModInitFunction p0,
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
|
|
@ -2090,7 +2164,7 @@ import_run_extension(PyThreadState *tstate, PyModInitFunction p0,
|
|||
.m_index=def->m_base.m_index,
|
||||
.origin=info->origin,
|
||||
#ifdef Py_GIL_DISABLED
|
||||
.md_gil=((PyModuleObject *)mod)->md_gil,
|
||||
.md_requires_gil=((PyModuleObject *)mod)->md_requires_gil,
|
||||
#endif
|
||||
};
|
||||
// gh-88216: Extensions and def->m_base.m_copy can be updated
|
||||
|
|
@ -2140,7 +2214,7 @@ import_run_extension(PyThreadState *tstate, PyModInitFunction p0,
|
|||
assert_multiphase_def(def);
|
||||
assert(mod == NULL);
|
||||
/* Note that we cheat a little by not repeating the calls
|
||||
* to _PyImport_GetModInitFunc() and _PyImport_RunModInitFunc(). */
|
||||
* to _PyImport_GetModuleExportHooks() and _PyImport_RunModInitFunc(). */
|
||||
mod = PyModule_FromDefAndSpec(def, spec);
|
||||
if (mod == NULL) {
|
||||
goto error;
|
||||
|
|
@ -2254,8 +2328,9 @@ _PyImport_FixupBuiltin(PyThreadState *tstate, PyObject *mod, const char *name,
|
|||
return -1;
|
||||
}
|
||||
|
||||
PyModuleDef *def = PyModule_GetDef(mod);
|
||||
PyModuleDef *def = _PyModule_GetDefOrNull(mod);
|
||||
if (def == NULL) {
|
||||
assert(!PyErr_Occurred());
|
||||
PyErr_BadInternalCall();
|
||||
goto finally;
|
||||
}
|
||||
|
|
@ -2284,7 +2359,7 @@ _PyImport_FixupBuiltin(PyThreadState *tstate, PyObject *mod, const char *name,
|
|||
.origin=_Py_ext_module_origin_CORE,
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* Unused when m_dict == NULL. */
|
||||
.md_gil=NULL,
|
||||
.md_requires_gil=false,
|
||||
#endif
|
||||
};
|
||||
cached = update_global_state_for_extension(
|
||||
|
|
@ -2323,8 +2398,23 @@ is_builtin(PyObject *name)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static PyModInitFunction
|
||||
lookup_inittab_initfunc(const struct _Py_ext_module_loader_info* info)
|
||||
{
|
||||
for (struct _inittab *p = INITTAB; p->name != NULL; p++) {
|
||||
if (_PyUnicode_EqualToASCIIString(info->name, p->name)) {
|
||||
return (PyModInitFunction)p->initfunc;
|
||||
}
|
||||
}
|
||||
// not found
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
create_builtin(PyThreadState *tstate, PyObject *name, PyObject *spec)
|
||||
create_builtin(
|
||||
PyThreadState *tstate, PyObject *name,
|
||||
PyObject *spec,
|
||||
PyModInitFunction initfunc)
|
||||
{
|
||||
struct _Py_ext_module_loader_info info;
|
||||
if (_Py_ext_module_loader_info_init_for_builtin(&info, name) < 0) {
|
||||
|
|
@ -2337,8 +2427,8 @@ create_builtin(PyThreadState *tstate, PyObject *name, PyObject *spec)
|
|||
assert(!_PyErr_Occurred(tstate));
|
||||
assert(cached != NULL);
|
||||
/* The module might not have md_def set in certain reload cases. */
|
||||
assert(_PyModule_GetDef(mod) == NULL
|
||||
|| cached->def == _PyModule_GetDef(mod));
|
||||
assert(_PyModule_GetDefOrNull(mod) == NULL
|
||||
|| cached->def == _PyModule_GetDefOrNull(mod));
|
||||
assert_singlephase(cached);
|
||||
goto finally;
|
||||
}
|
||||
|
|
@ -2355,25 +2445,15 @@ create_builtin(PyThreadState *tstate, PyObject *name, PyObject *spec)
|
|||
_extensions_cache_delete(info.path, info.name);
|
||||
}
|
||||
|
||||
struct _inittab *found = NULL;
|
||||
for (struct _inittab *p = INITTAB; p->name != NULL; p++) {
|
||||
if (_PyUnicode_EqualToASCIIString(info.name, p->name)) {
|
||||
found = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found == NULL) {
|
||||
// not found
|
||||
mod = Py_NewRef(Py_None);
|
||||
goto finally;
|
||||
}
|
||||
|
||||
PyModInitFunction p0 = (PyModInitFunction)found->initfunc;
|
||||
PyModInitFunction p0 = initfunc;
|
||||
if (p0 == NULL) {
|
||||
/* Cannot re-init internal module ("sys" or "builtins") */
|
||||
assert(is_core_module(tstate->interp, info.name, info.path));
|
||||
mod = import_add_module(tstate, info.name);
|
||||
goto finally;
|
||||
p0 = lookup_inittab_initfunc(&info);
|
||||
if (p0 == NULL) {
|
||||
/* Cannot re-init internal module ("sys" or "builtins") */
|
||||
assert(is_core_module(tstate->interp, info.name, info.path));
|
||||
mod = import_add_module(tstate, info.name);
|
||||
goto finally;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
|
@ -2399,6 +2479,33 @@ create_builtin(PyThreadState *tstate, PyObject *name, PyObject *spec)
|
|||
return mod;
|
||||
}
|
||||
|
||||
PyObject*
|
||||
PyImport_CreateModuleFromInitfunc(
|
||||
PyObject *spec, PyObject *(*initfunc)(void))
|
||||
{
|
||||
if (initfunc == NULL) {
|
||||
PyErr_BadInternalCall();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
|
||||
PyObject *name = PyObject_GetAttr(spec, &_Py_ID(name));
|
||||
if (name == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!PyUnicode_Check(name)) {
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"spec name must be string, not %T", name);
|
||||
Py_DECREF(name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
PyObject *mod = create_builtin(tstate, name, spec, initfunc);
|
||||
Py_DECREF(name);
|
||||
return mod;
|
||||
}
|
||||
|
||||
/*****************************/
|
||||
/* the builtin modules table */
|
||||
|
|
@ -3168,7 +3275,7 @@ bootstrap_imp(PyThreadState *tstate)
|
|||
}
|
||||
|
||||
// Create the _imp module from its definition.
|
||||
PyObject *mod = create_builtin(tstate, name, spec);
|
||||
PyObject *mod = create_builtin(tstate, name, spec, NULL);
|
||||
Py_CLEAR(name);
|
||||
Py_DECREF(spec);
|
||||
if (mod == NULL) {
|
||||
|
|
@ -4798,7 +4905,7 @@ _imp_create_builtin(PyObject *module, PyObject *spec)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
PyObject *mod = create_builtin(tstate, name, spec);
|
||||
PyObject *mod = create_builtin(tstate, name, spec, NULL);
|
||||
Py_DECREF(name);
|
||||
return mod;
|
||||
}
|
||||
|
|
@ -5138,8 +5245,8 @@ _imp_create_dynamic_impl(PyObject *module, PyObject *spec, PyObject *file)
|
|||
assert(!_PyErr_Occurred(tstate));
|
||||
assert(cached != NULL);
|
||||
/* The module might not have md_def set in certain reload cases. */
|
||||
assert(_PyModule_GetDef(mod) == NULL
|
||||
|| cached->def == _PyModule_GetDef(mod));
|
||||
assert(_PyModule_GetDefOrNull(mod) == NULL
|
||||
|| cached->def == _PyModule_GetDefOrNull(mod));
|
||||
assert_singlephase(cached);
|
||||
goto finally;
|
||||
}
|
||||
|
|
@ -5164,7 +5271,7 @@ _imp_create_dynamic_impl(PyObject *module, PyObject *spec, PyObject *file)
|
|||
}
|
||||
|
||||
/* We would move this (and the fclose() below) into
|
||||
* _PyImport_GetModInitFunc(), but it isn't clear if the intervening
|
||||
* _PyImport_GetModuleExportHooks(), but it isn't clear if the intervening
|
||||
* code relies on fp still being open. */
|
||||
FILE *fp;
|
||||
if (file != NULL) {
|
||||
|
|
@ -5177,7 +5284,15 @@ _imp_create_dynamic_impl(PyObject *module, PyObject *spec, PyObject *file)
|
|||
fp = NULL;
|
||||
}
|
||||
|
||||
PyModInitFunction p0 = _PyImport_GetModInitFunc(&info, fp);
|
||||
PyModInitFunction p0 = NULL;
|
||||
PyModExportFunction ex0 = NULL;
|
||||
_PyImport_GetModuleExportHooks(&info, fp, &p0, &ex0);
|
||||
if (ex0) {
|
||||
mod = import_run_modexport(tstate, ex0, &info, spec);
|
||||
// Modules created from slots handle GIL enablement (Py_mod_gil slot)
|
||||
// when they're created.
|
||||
goto cleanup;
|
||||
}
|
||||
if (p0 == NULL) {
|
||||
goto finally;
|
||||
}
|
||||
|
|
@ -5199,6 +5314,7 @@ _imp_create_dynamic_impl(PyObject *module, PyObject *spec, PyObject *file)
|
|||
}
|
||||
#endif
|
||||
|
||||
cleanup:
|
||||
// XXX Shouldn't this happen in the error cases too (i.e. in "finally")?
|
||||
if (fp) {
|
||||
fclose(fp);
|
||||
|
|
|
|||
|
|
@ -5,38 +5,19 @@
|
|||
#include "pycore_call.h" // _PyObject_CallMethod()
|
||||
#include "pycore_import.h" // _PyImport_SwapPackageContext()
|
||||
#include "pycore_importdl.h"
|
||||
#include "pycore_moduleobject.h" // _PyModule_GetDef()
|
||||
#include "pycore_moduleobject.h" // _PyModule_GetDefOrNull()
|
||||
#include "pycore_pyerrors.h" // _PyErr_FormatFromCause()
|
||||
#include "pycore_runtime.h" // _Py_ID()
|
||||
|
||||
|
||||
/* ./configure sets HAVE_DYNAMIC_LOADING if dynamic loading of modules is
|
||||
supported on this platform. configure will then compile and link in one
|
||||
of the dynload_*.c files, as appropriate. We will call a function in
|
||||
those modules to get a function pointer to the module's init function.
|
||||
*/
|
||||
#ifdef HAVE_DYNAMIC_LOADING
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
extern dl_funcptr _PyImport_FindSharedFuncptrWindows(const char *prefix,
|
||||
const char *shortname,
|
||||
PyObject *pathname,
|
||||
FILE *fp);
|
||||
#else
|
||||
extern dl_funcptr _PyImport_FindSharedFuncptr(const char *prefix,
|
||||
const char *shortname,
|
||||
const char *pathname, FILE *fp);
|
||||
#endif
|
||||
|
||||
#endif /* HAVE_DYNAMIC_LOADING */
|
||||
|
||||
|
||||
/***********************************/
|
||||
/* module info to use when loading */
|
||||
/***********************************/
|
||||
|
||||
static const char * const ascii_only_prefix = "PyInit";
|
||||
static const char * const nonascii_prefix = "PyInitU";
|
||||
static const struct hook_prefixes ascii_only_prefixes = {
|
||||
"PyInit", "PyModExport"};
|
||||
static const struct hook_prefixes nonascii_prefixes = {
|
||||
"PyInitU", "PyModExportU"};
|
||||
|
||||
/* Get the variable part of a module's export symbol name.
|
||||
* Returns a bytes instance. For non-ASCII-named modules, the name is
|
||||
|
|
@ -45,7 +26,7 @@ static const char * const nonascii_prefix = "PyInitU";
|
|||
* nonascii_prefix, as appropriate.
|
||||
*/
|
||||
static PyObject *
|
||||
get_encoded_name(PyObject *name, const char **hook_prefix) {
|
||||
get_encoded_name(PyObject *name, const struct hook_prefixes **hook_prefixes) {
|
||||
PyObject *tmp;
|
||||
PyObject *encoded = NULL;
|
||||
PyObject *modname = NULL;
|
||||
|
|
@ -72,7 +53,7 @@ get_encoded_name(PyObject *name, const char **hook_prefix) {
|
|||
/* Encode to ASCII or Punycode, as needed */
|
||||
encoded = PyUnicode_AsEncodedString(name, "ascii", NULL);
|
||||
if (encoded != NULL) {
|
||||
*hook_prefix = ascii_only_prefix;
|
||||
*hook_prefixes = &ascii_only_prefixes;
|
||||
} else {
|
||||
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError)) {
|
||||
PyErr_Clear();
|
||||
|
|
@ -80,7 +61,7 @@ get_encoded_name(PyObject *name, const char **hook_prefix) {
|
|||
if (encoded == NULL) {
|
||||
goto error;
|
||||
}
|
||||
*hook_prefix = nonascii_prefix;
|
||||
*hook_prefixes = &nonascii_prefixes;
|
||||
} else {
|
||||
goto error;
|
||||
}
|
||||
|
|
@ -130,7 +111,7 @@ _Py_ext_module_loader_info_init(struct _Py_ext_module_loader_info *p_info,
|
|||
assert(PyUnicode_GetLength(name) > 0);
|
||||
info.name = Py_NewRef(name);
|
||||
|
||||
info.name_encoded = get_encoded_name(info.name, &info.hook_prefix);
|
||||
info.name_encoded = get_encoded_name(info.name, &info.hook_prefixes);
|
||||
if (info.name_encoded == NULL) {
|
||||
_Py_ext_module_loader_info_clear(&info);
|
||||
return -1;
|
||||
|
|
@ -175,7 +156,6 @@ _Py_ext_module_loader_info_init_for_builtin(
|
|||
PyObject *name)
|
||||
{
|
||||
assert(PyUnicode_Check(name));
|
||||
assert(PyUnicode_FindChar(name, '.', 0, PyUnicode_GetLength(name), -1) == -1);
|
||||
assert(PyUnicode_GetLength(name) > 0);
|
||||
|
||||
PyObject *name_encoded = PyUnicode_AsEncodedString(name, "ascii", NULL);
|
||||
|
|
@ -189,7 +169,7 @@ _Py_ext_module_loader_info_init_for_builtin(
|
|||
/* We won't need filename. */
|
||||
.path=name,
|
||||
.origin=_Py_ext_module_origin_BUILTIN,
|
||||
.hook_prefix=ascii_only_prefix,
|
||||
.hook_prefixes=&ascii_only_prefixes,
|
||||
.newcontext=NULL,
|
||||
};
|
||||
return 0;
|
||||
|
|
@ -377,39 +357,66 @@ _Py_ext_module_loader_result_apply_error(
|
|||
/********************************************/
|
||||
|
||||
#ifdef HAVE_DYNAMIC_LOADING
|
||||
PyModInitFunction
|
||||
_PyImport_GetModInitFunc(struct _Py_ext_module_loader_info *info,
|
||||
FILE *fp)
|
||||
static dl_funcptr
|
||||
findfuncptr(const char *prefix, const char *name_buf,
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
FILE *fp)
|
||||
{
|
||||
#ifdef MS_WINDOWS
|
||||
return _PyImport_FindSharedFuncptrWindows(
|
||||
prefix, name_buf, info->filename, fp);
|
||||
#else
|
||||
const char *path_buf = PyBytes_AS_STRING(info->filename_encoded);
|
||||
return _PyImport_FindSharedFuncptr(
|
||||
prefix, name_buf, path_buf, fp);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
_PyImport_GetModuleExportHooks(
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
FILE *fp,
|
||||
PyModInitFunction *modinit,
|
||||
PyModExportFunction *modexport)
|
||||
{
|
||||
*modinit = NULL;
|
||||
*modexport = NULL;
|
||||
|
||||
const char *name_buf = PyBytes_AS_STRING(info->name_encoded);
|
||||
dl_funcptr exportfunc;
|
||||
#ifdef MS_WINDOWS
|
||||
exportfunc = _PyImport_FindSharedFuncptrWindows(
|
||||
info->hook_prefix, name_buf, info->filename, fp);
|
||||
#else
|
||||
{
|
||||
const char *path_buf = PyBytes_AS_STRING(info->filename_encoded);
|
||||
exportfunc = _PyImport_FindSharedFuncptr(
|
||||
info->hook_prefix, name_buf, path_buf, fp);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (exportfunc == NULL) {
|
||||
if (!PyErr_Occurred()) {
|
||||
PyObject *msg;
|
||||
msg = PyUnicode_FromFormat(
|
||||
"dynamic module does not define "
|
||||
"module export function (%s_%s)",
|
||||
info->hook_prefix, name_buf);
|
||||
if (msg != NULL) {
|
||||
PyErr_SetImportError(msg, info->name, info->filename);
|
||||
Py_DECREF(msg);
|
||||
}
|
||||
exportfunc = findfuncptr(
|
||||
info->hook_prefixes->export_prefix,
|
||||
name_buf, info, fp);
|
||||
if (exportfunc) {
|
||||
*modexport = (PyModExportFunction)exportfunc;
|
||||
return 2;
|
||||
}
|
||||
if (PyErr_Occurred()) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
exportfunc = findfuncptr(
|
||||
info->hook_prefixes->init_prefix,
|
||||
name_buf, info, fp);
|
||||
if (exportfunc) {
|
||||
*modinit = (PyModInitFunction)exportfunc;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!PyErr_Occurred()) {
|
||||
PyObject *msg;
|
||||
msg = PyUnicode_FromFormat(
|
||||
"dynamic module does not define "
|
||||
"module export function (%s_%s or %s_%s)",
|
||||
info->hook_prefixes->export_prefix, name_buf,
|
||||
info->hook_prefixes->init_prefix, name_buf);
|
||||
if (msg != NULL) {
|
||||
PyErr_SetImportError(msg, info->name, info->filename);
|
||||
Py_DECREF(msg);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (PyModInitFunction)exportfunc;
|
||||
return -1;
|
||||
}
|
||||
#endif /* HAVE_DYNAMIC_LOADING */
|
||||
|
||||
|
|
@ -477,7 +484,7 @@ _PyImport_RunModInitFunc(PyModInitFunction p0,
|
|||
res.def = (PyModuleDef *)m;
|
||||
/* Run PyModule_FromDefAndSpec() to finish loading the module. */
|
||||
}
|
||||
else if (info->hook_prefix == nonascii_prefix) {
|
||||
else if (info->hook_prefixes == &nonascii_prefixes) {
|
||||
/* Non-ASCII is only supported for multi-phase init. */
|
||||
res.kind = _Py_ext_module_kind_MULTIPHASE;
|
||||
/* Don't allow legacy init for non-ASCII module names. */
|
||||
|
|
@ -496,7 +503,7 @@ _PyImport_RunModInitFunc(PyModInitFunction p0,
|
|||
goto error;
|
||||
}
|
||||
|
||||
res.def = _PyModule_GetDef(m);
|
||||
res.def = _PyModule_GetDefOrNull(m);
|
||||
if (res.def == NULL) {
|
||||
PyErr_Clear();
|
||||
_Py_ext_module_loader_result_set_error(
|
||||
|
|
|
|||
|
|
@ -2870,12 +2870,6 @@ _PyConfig_Write(const PyConfig *config, _PyRuntimeState *runtime)
|
|||
return _PyStatus_NO_MEMORY();
|
||||
}
|
||||
|
||||
#ifdef Py_STATS
|
||||
if (config->_pystats) {
|
||||
_Py_StatsOn();
|
||||
}
|
||||
#endif
|
||||
|
||||
return _PyStatus_OK();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
#include "pycore_tuple.h" // _PyTuple_FromArraySteal()
|
||||
|
||||
#include "opcode_ids.h"
|
||||
#include "pycore_optimizer.h"
|
||||
|
||||
|
||||
/* Uncomment this to dump debugging output when assertions fail */
|
||||
|
|
@ -190,7 +191,7 @@ is_instrumented(int opcode)
|
|||
{
|
||||
assert(opcode != 0);
|
||||
assert(opcode != RESERVED);
|
||||
return opcode != ENTER_EXECUTOR && opcode >= MIN_INSTRUMENTED_OPCODE;
|
||||
return opcode < ENTER_EXECUTOR && opcode >= MIN_INSTRUMENTED_OPCODE;
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
|
|
@ -525,7 +526,7 @@ valid_opcode(int opcode)
|
|||
if (IS_VALID_OPCODE(opcode) &&
|
||||
opcode != CACHE &&
|
||||
opcode != RESERVED &&
|
||||
opcode < 255)
|
||||
opcode < 254)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1785,6 +1786,7 @@ force_instrument_lock_held(PyCodeObject *code, PyInterpreterState *interp)
|
|||
_PyCode_Clear_Executors(code);
|
||||
}
|
||||
_Py_Executors_InvalidateDependency(interp, code, 1);
|
||||
_PyJit_Tracer_InvalidateDependency(PyThreadState_GET(), code);
|
||||
#endif
|
||||
int code_len = (int)Py_SIZE(code);
|
||||
/* Exit early to avoid creating instrumentation
|
||||
|
|
@ -2019,12 +2021,12 @@ _PyMonitoring_SetEvents(int tool_id, _PyMonitoringEventSet events)
|
|||
if (existing_events == events) {
|
||||
return 0;
|
||||
}
|
||||
set_events(&interp->monitors, tool_id, events);
|
||||
uint32_t new_version = global_version(interp) + MONITORING_VERSION_INCREMENT;
|
||||
if (new_version == 0) {
|
||||
PyErr_Format(PyExc_OverflowError, "events set too many times");
|
||||
return -1;
|
||||
}
|
||||
set_events(&interp->monitors, tool_id, events);
|
||||
set_global_version(tstate, new_version);
|
||||
#ifdef _Py_TIER2
|
||||
_Py_Executors_InvalidateAll(interp, 1);
|
||||
|
|
|
|||
|
|
@ -60,6 +60,10 @@ jit_error(const char *message)
|
|||
static unsigned char *
|
||||
jit_alloc(size_t size)
|
||||
{
|
||||
if (size > PY_MAX_JIT_CODE_SIZE) {
|
||||
jit_error("code too big; refactor bytecodes.c to keep uop size down, or reduce maximum trace length.");
|
||||
return NULL;
|
||||
}
|
||||
assert(size);
|
||||
assert(size % get_page_size() == 0);
|
||||
#ifdef MS_WINDOWS
|
||||
|
|
@ -604,7 +608,7 @@ _PyJIT_Compile(_PyExecutorObject *executor, const _PyUOpInstruction trace[], siz
|
|||
unsigned char *code = memory;
|
||||
state.trampolines.mem = memory + code_size;
|
||||
unsigned char *data = memory + code_size + state.trampolines.size + code_padding;
|
||||
assert(trace[0].opcode == _START_EXECUTOR || trace[0].opcode == _COLD_EXIT);
|
||||
assert(trace[0].opcode == _START_EXECUTOR || trace[0].opcode == _COLD_EXIT || trace[0].opcode == _COLD_DYNAMIC_EXIT);
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
const _PyUOpInstruction *instruction = &trace[i];
|
||||
group = &stencil_groups[instruction->opcode];
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#include "pycore_parking_lot.h"
|
||||
#include "pycore_semaphore.h"
|
||||
#include "pycore_time.h" // _PyTime_Add()
|
||||
#include "pycore_stats.h" // FT_STAT_MUTEX_SLEEP_INC()
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
# ifndef WIN32_LEAN_AND_MEAN
|
||||
|
|
@ -62,6 +63,8 @@ _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags)
|
|||
return PY_LOCK_FAILURE;
|
||||
}
|
||||
|
||||
FT_STAT_MUTEX_SLEEP_INC();
|
||||
|
||||
PyTime_t now;
|
||||
// silently ignore error: cannot report error to the caller
|
||||
(void)PyTime_MonotonicRaw(&now);
|
||||
|
|
|
|||
|
|
@ -310,7 +310,7 @@ w_PyLong(const PyLongObject *ob, char flag, WFILE *p)
|
|||
}
|
||||
if (!long_export.digits) {
|
||||
int8_t sign = long_export.value < 0 ? -1 : 1;
|
||||
uint64_t abs_value = Py_ABS(long_export.value);
|
||||
uint64_t abs_value = _Py_ABS_CAST(uint64_t, long_export.value);
|
||||
uint64_t d = abs_value;
|
||||
long l = 0;
|
||||
|
||||
|
|
|
|||
530
Python/opcode_targets.h
generated
530
Python/opcode_targets.h
generated
|
|
@ -233,7 +233,6 @@ static void *opcode_targets_table[256] = {
|
|||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&TARGET_INSTRUMENTED_END_FOR,
|
||||
&&TARGET_INSTRUMENTED_POP_ITER,
|
||||
&&TARGET_INSTRUMENTED_END_SEND,
|
||||
|
|
@ -256,9 +255,272 @@ static void *opcode_targets_table[256] = {
|
|||
&&TARGET_INSTRUMENTED_JUMP_BACKWARD,
|
||||
&&TARGET_INSTRUMENTED_LINE,
|
||||
&&TARGET_ENTER_EXECUTOR,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
};
|
||||
#if _Py_TIER2
|
||||
static void *opcode_tracing_targets_table[256] = {
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&_unknown_opcode,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
&&TARGET_TRACE_RECORD,
|
||||
};
|
||||
#endif
|
||||
#else /* _Py_TAIL_CALL_INTERP */
|
||||
static py_tail_call_funcptr instruction_funcptr_table[256];
|
||||
static py_tail_call_funcptr instruction_funcptr_handler_table[256];
|
||||
|
||||
static py_tail_call_funcptr instruction_funcptr_tracing_table[256];
|
||||
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_pop_2_error(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_pop_1_error(TAIL_CALL_PARAMS);
|
||||
|
|
@ -266,6 +528,7 @@ Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_error(TAIL_CALL_PARAMS);
|
|||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_exception_unwind(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_exit_unwind(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_start_frame(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_stop_tracing(TAIL_CALL_PARAMS);
|
||||
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_BINARY_OP(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_BINARY_OP_ADD_FLOAT(TAIL_CALL_PARAMS);
|
||||
|
|
@ -482,6 +745,7 @@ Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_TO_BOOL_INT(TAIL_CALL_PARAMS);
|
|||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_TO_BOOL_LIST(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_TO_BOOL_NONE(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_TO_BOOL_STR(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_TRACE_RECORD(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_UNARY_INVERT(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_UNARY_NEGATIVE(TAIL_CALL_PARAMS);
|
||||
Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_UNARY_NOT(TAIL_CALL_PARAMS);
|
||||
|
|
@ -503,7 +767,7 @@ Py_PRESERVE_NONE_CC static PyObject *_TAIL_CALL_UNKNOWN_OPCODE(TAIL_CALL_PARAMS)
|
|||
JUMP_TO_LABEL(error);
|
||||
}
|
||||
|
||||
static py_tail_call_funcptr instruction_funcptr_table[256] = {
|
||||
static py_tail_call_funcptr instruction_funcptr_handler_table[256] = {
|
||||
[BINARY_OP] = _TAIL_CALL_BINARY_OP,
|
||||
[BINARY_OP_ADD_FLOAT] = _TAIL_CALL_BINARY_OP_ADD_FLOAT,
|
||||
[BINARY_OP_ADD_INT] = _TAIL_CALL_BINARY_OP_ADD_INT,
|
||||
|
|
@ -719,6 +983,7 @@ static py_tail_call_funcptr instruction_funcptr_table[256] = {
|
|||
[TO_BOOL_LIST] = _TAIL_CALL_TO_BOOL_LIST,
|
||||
[TO_BOOL_NONE] = _TAIL_CALL_TO_BOOL_NONE,
|
||||
[TO_BOOL_STR] = _TAIL_CALL_TO_BOOL_STR,
|
||||
[TRACE_RECORD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNARY_INVERT] = _TAIL_CALL_UNARY_INVERT,
|
||||
[UNARY_NEGATIVE] = _TAIL_CALL_UNARY_NEGATIVE,
|
||||
[UNARY_NOT] = _TAIL_CALL_UNARY_NOT,
|
||||
|
|
@ -759,6 +1024,263 @@ static py_tail_call_funcptr instruction_funcptr_table[256] = {
|
|||
[230] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[231] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[232] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[233] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
};
|
||||
static py_tail_call_funcptr instruction_funcptr_tracing_table[256] = {
|
||||
[BINARY_OP] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_ADD_FLOAT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_ADD_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_ADD_UNICODE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_EXTEND] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_INPLACE_ADD_UNICODE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_MULTIPLY_FLOAT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_MULTIPLY_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_SUBSCR_DICT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_SUBSCR_GETITEM] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_SUBSCR_LIST_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_SUBSCR_LIST_SLICE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_SUBSCR_STR_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_SUBSCR_TUPLE_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_SUBTRACT_FLOAT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_OP_SUBTRACT_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BINARY_SLICE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BUILD_INTERPOLATION] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BUILD_LIST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BUILD_MAP] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BUILD_SET] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BUILD_SLICE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BUILD_STRING] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BUILD_TEMPLATE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[BUILD_TUPLE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CACHE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_ALLOC_AND_ENTER_INIT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_BOUND_METHOD_EXACT_ARGS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_BOUND_METHOD_GENERAL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_BUILTIN_CLASS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_BUILTIN_FAST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_BUILTIN_FAST_WITH_KEYWORDS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_BUILTIN_O] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_FUNCTION_EX] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_INTRINSIC_1] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_INTRINSIC_2] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_ISINSTANCE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_KW] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_KW_BOUND_METHOD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_KW_NON_PY] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_KW_PY] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_LEN] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_LIST_APPEND] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_METHOD_DESCRIPTOR_FAST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_METHOD_DESCRIPTOR_NOARGS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_METHOD_DESCRIPTOR_O] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_NON_PY_GENERAL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_PY_EXACT_ARGS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_PY_GENERAL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_STR_1] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_TUPLE_1] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CALL_TYPE_1] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CHECK_EG_MATCH] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CHECK_EXC_MATCH] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CLEANUP_THROW] = _TAIL_CALL_TRACE_RECORD,
|
||||
[COMPARE_OP] = _TAIL_CALL_TRACE_RECORD,
|
||||
[COMPARE_OP_FLOAT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[COMPARE_OP_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[COMPARE_OP_STR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CONTAINS_OP] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CONTAINS_OP_DICT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CONTAINS_OP_SET] = _TAIL_CALL_TRACE_RECORD,
|
||||
[CONVERT_VALUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[COPY] = _TAIL_CALL_TRACE_RECORD,
|
||||
[COPY_FREE_VARS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[DELETE_ATTR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[DELETE_DEREF] = _TAIL_CALL_TRACE_RECORD,
|
||||
[DELETE_FAST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[DELETE_GLOBAL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[DELETE_NAME] = _TAIL_CALL_TRACE_RECORD,
|
||||
[DELETE_SUBSCR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[DICT_MERGE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[DICT_UPDATE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[END_ASYNC_FOR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[END_FOR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[END_SEND] = _TAIL_CALL_TRACE_RECORD,
|
||||
[ENTER_EXECUTOR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[EXIT_INIT_CHECK] = _TAIL_CALL_TRACE_RECORD,
|
||||
[EXTENDED_ARG] = _TAIL_CALL_TRACE_RECORD,
|
||||
[FORMAT_SIMPLE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[FORMAT_WITH_SPEC] = _TAIL_CALL_TRACE_RECORD,
|
||||
[FOR_ITER] = _TAIL_CALL_TRACE_RECORD,
|
||||
[FOR_ITER_GEN] = _TAIL_CALL_TRACE_RECORD,
|
||||
[FOR_ITER_LIST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[FOR_ITER_RANGE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[FOR_ITER_TUPLE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[GET_AITER] = _TAIL_CALL_TRACE_RECORD,
|
||||
[GET_ANEXT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[GET_AWAITABLE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[GET_ITER] = _TAIL_CALL_TRACE_RECORD,
|
||||
[GET_LEN] = _TAIL_CALL_TRACE_RECORD,
|
||||
[GET_YIELD_FROM_ITER] = _TAIL_CALL_TRACE_RECORD,
|
||||
[IMPORT_FROM] = _TAIL_CALL_TRACE_RECORD,
|
||||
[IMPORT_NAME] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_CALL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_CALL_FUNCTION_EX] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_CALL_KW] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_END_ASYNC_FOR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_END_FOR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_END_SEND] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_FOR_ITER] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_INSTRUCTION] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_JUMP_BACKWARD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_JUMP_FORWARD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_LINE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_LOAD_SUPER_ATTR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_NOT_TAKEN] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_POP_ITER] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_POP_JUMP_IF_FALSE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_POP_JUMP_IF_NONE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_POP_JUMP_IF_NOT_NONE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_POP_JUMP_IF_TRUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_RESUME] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_RETURN_VALUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INSTRUMENTED_YIELD_VALUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[INTERPRETER_EXIT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[IS_OP] = _TAIL_CALL_TRACE_RECORD,
|
||||
[JUMP_BACKWARD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[JUMP_BACKWARD_JIT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[JUMP_BACKWARD_NO_INTERRUPT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[JUMP_BACKWARD_NO_JIT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[JUMP_FORWARD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LIST_APPEND] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LIST_EXTEND] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_CLASS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_CLASS_WITH_METACLASS_CHECK] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_INSTANCE_VALUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_METHOD_LAZY_DICT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_METHOD_NO_DICT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_METHOD_WITH_VALUES] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_MODULE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_PROPERTY] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_SLOT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_ATTR_WITH_HINT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_BUILD_CLASS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_COMMON_CONSTANT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_CONST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_DEREF] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_FAST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_FAST_AND_CLEAR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_FAST_BORROW] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_FAST_BORROW_LOAD_FAST_BORROW] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_FAST_CHECK] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_FAST_LOAD_FAST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_FROM_DICT_OR_DEREF] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_FROM_DICT_OR_GLOBALS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_GLOBAL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_GLOBAL_BUILTIN] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_GLOBAL_MODULE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_LOCALS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_NAME] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_SMALL_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_SPECIAL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_SUPER_ATTR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_SUPER_ATTR_ATTR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[LOAD_SUPER_ATTR_METHOD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[MAKE_CELL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[MAKE_FUNCTION] = _TAIL_CALL_TRACE_RECORD,
|
||||
[MAP_ADD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[MATCH_CLASS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[MATCH_KEYS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[MATCH_MAPPING] = _TAIL_CALL_TRACE_RECORD,
|
||||
[MATCH_SEQUENCE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[NOP] = _TAIL_CALL_TRACE_RECORD,
|
||||
[NOT_TAKEN] = _TAIL_CALL_TRACE_RECORD,
|
||||
[POP_EXCEPT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[POP_ITER] = _TAIL_CALL_TRACE_RECORD,
|
||||
[POP_JUMP_IF_FALSE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[POP_JUMP_IF_NONE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[POP_JUMP_IF_NOT_NONE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[POP_JUMP_IF_TRUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[POP_TOP] = _TAIL_CALL_TRACE_RECORD,
|
||||
[PUSH_EXC_INFO] = _TAIL_CALL_TRACE_RECORD,
|
||||
[PUSH_NULL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[RAISE_VARARGS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[RERAISE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[RESERVED] = _TAIL_CALL_TRACE_RECORD,
|
||||
[RESUME] = _TAIL_CALL_TRACE_RECORD,
|
||||
[RESUME_CHECK] = _TAIL_CALL_TRACE_RECORD,
|
||||
[RETURN_GENERATOR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[RETURN_VALUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[SEND] = _TAIL_CALL_TRACE_RECORD,
|
||||
[SEND_GEN] = _TAIL_CALL_TRACE_RECORD,
|
||||
[SETUP_ANNOTATIONS] = _TAIL_CALL_TRACE_RECORD,
|
||||
[SET_ADD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[SET_FUNCTION_ATTRIBUTE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[SET_UPDATE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_ATTR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_ATTR_INSTANCE_VALUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_ATTR_SLOT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_ATTR_WITH_HINT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_DEREF] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_FAST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_FAST_LOAD_FAST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_FAST_STORE_FAST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_GLOBAL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_NAME] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_SLICE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_SUBSCR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_SUBSCR_DICT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[STORE_SUBSCR_LIST_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[SWAP] = _TAIL_CALL_TRACE_RECORD,
|
||||
[TO_BOOL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[TO_BOOL_ALWAYS_TRUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[TO_BOOL_BOOL] = _TAIL_CALL_TRACE_RECORD,
|
||||
[TO_BOOL_INT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[TO_BOOL_LIST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[TO_BOOL_NONE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[TO_BOOL_STR] = _TAIL_CALL_TRACE_RECORD,
|
||||
[TRACE_RECORD] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNARY_INVERT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNARY_NEGATIVE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNARY_NOT] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNPACK_EX] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNPACK_SEQUENCE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNPACK_SEQUENCE_LIST] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNPACK_SEQUENCE_TUPLE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[UNPACK_SEQUENCE_TWO_TUPLE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[WITH_EXCEPT_START] = _TAIL_CALL_TRACE_RECORD,
|
||||
[YIELD_VALUE] = _TAIL_CALL_TRACE_RECORD,
|
||||
[121] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[122] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[123] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[124] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[125] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[126] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[127] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[210] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[211] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[212] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[213] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[214] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[215] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[216] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[217] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[218] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[219] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[220] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[221] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[222] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[223] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[224] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[225] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[226] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[227] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[228] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[229] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[230] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[231] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
[232] = _TAIL_CALL_UNKNOWN_OPCODE,
|
||||
};
|
||||
#endif /* _Py_TAIL_CALL_INTERP */
|
||||
|
|
|
|||
1071
Python/optimizer.c
1071
Python/optimizer.c
File diff suppressed because it is too large
Load diff
|
|
@ -142,9 +142,7 @@ incorrect_keys(PyObject *obj, uint32_t version)
|
|||
#define STACK_LEVEL() ((int)(stack_pointer - ctx->frame->stack))
|
||||
#define STACK_SIZE() ((int)(ctx->frame->stack_len))
|
||||
|
||||
#define WITHIN_STACK_BOUNDS() \
|
||||
(STACK_LEVEL() >= 0 && STACK_LEVEL() <= STACK_SIZE())
|
||||
|
||||
#define CURRENT_FRAME_IS_INIT_SHIM() (ctx->frame->code == ((PyCodeObject *)&_Py_InitCleanup))
|
||||
|
||||
#define GETLOCAL(idx) ((ctx->frame->locals[idx]))
|
||||
|
||||
|
|
@ -190,6 +188,27 @@ incorrect_keys(PyObject *obj, uint32_t version)
|
|||
|
||||
#define JUMP_TO_LABEL(label) goto label;
|
||||
|
||||
static int
|
||||
check_stack_bounds(JitOptContext *ctx, JitOptRef *stack_pointer, int offset, int opcode)
|
||||
{
|
||||
int stack_level = (int)(stack_pointer + (offset) - ctx->frame->stack);
|
||||
int should_check = !CURRENT_FRAME_IS_INIT_SHIM() ||
|
||||
(opcode == _RETURN_VALUE) ||
|
||||
(opcode == _RETURN_GENERATOR) ||
|
||||
(opcode == _YIELD_VALUE);
|
||||
if (should_check && (stack_level < 0 || stack_level > STACK_SIZE())) {
|
||||
ctx->contradiction = true;
|
||||
ctx->done = true;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CHECK_STACK_BOUNDS(offset) \
|
||||
if (check_stack_bounds(ctx, stack_pointer, offset, opcode)) { \
|
||||
break; \
|
||||
} \
|
||||
|
||||
static int
|
||||
optimize_to_bool(
|
||||
_PyUOpInstruction *this_instr,
|
||||
|
|
@ -267,7 +286,7 @@ static
|
|||
PyCodeObject *
|
||||
get_current_code_object(JitOptContext *ctx)
|
||||
{
|
||||
return (PyCodeObject *)ctx->frame->func->func_code;
|
||||
return (PyCodeObject *)ctx->frame->code;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
|
@ -276,6 +295,36 @@ get_co_name(JitOptContext *ctx, int index)
|
|||
return PyTuple_GET_ITEM(get_current_code_object(ctx)->co_names, index);
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
void
|
||||
_Py_opt_assert_within_stack_bounds(
|
||||
_Py_UOpsAbstractFrame *frame, JitOptRef *stack_pointer,
|
||||
const char *filename, int lineno
|
||||
) {
|
||||
if (frame->code == ((PyCodeObject *)&_Py_InitCleanup)) {
|
||||
return;
|
||||
}
|
||||
int level = (int)(stack_pointer - frame->stack);
|
||||
if (level < 0) {
|
||||
printf("Stack underflow (depth = %d) at %s:%d\n", level, filename, lineno);
|
||||
fflush(stdout);
|
||||
abort();
|
||||
}
|
||||
int size = (int)(frame->stack_len);
|
||||
if (level > size) {
|
||||
printf("Stack overflow (depth = %d) at %s:%d\n", level, filename, lineno);
|
||||
fflush(stdout);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
#define ASSERT_WITHIN_STACK_BOUNDS(F, L) _Py_opt_assert_within_stack_bounds(ctx->frame, stack_pointer, (F), (L))
|
||||
#else
|
||||
#define ASSERT_WITHIN_STACK_BOUNDS(F, L) (void)0
|
||||
#endif
|
||||
|
||||
// TODO (gh-134584) generate most of this table automatically
|
||||
const uint16_t op_without_decref_inputs[MAX_UOP_ID + 1] = {
|
||||
[_BINARY_OP_MULTIPLY_FLOAT] = _BINARY_OP_MULTIPLY_FLOAT__NO_DECREF_INPUTS,
|
||||
|
|
@ -298,10 +347,6 @@ optimize_uops(
|
|||
JitOptContext context;
|
||||
JitOptContext *ctx = &context;
|
||||
uint32_t opcode = UINT16_MAX;
|
||||
int curr_space = 0;
|
||||
int max_space = 0;
|
||||
_PyUOpInstruction *first_valid_check_stack = NULL;
|
||||
_PyUOpInstruction *corresponding_check_stack = NULL;
|
||||
|
||||
// Make sure that watchers are set up
|
||||
PyInterpreterState *interp = _PyInterpreterState_GET();
|
||||
|
|
@ -320,13 +365,18 @@ optimize_uops(
|
|||
ctx->frame = frame;
|
||||
|
||||
_PyUOpInstruction *this_instr = NULL;
|
||||
JitOptRef *stack_pointer = ctx->frame->stack_pointer;
|
||||
|
||||
for (int i = 0; !ctx->done; i++) {
|
||||
assert(i < trace_len);
|
||||
this_instr = &trace[i];
|
||||
|
||||
int oparg = this_instr->oparg;
|
||||
opcode = this_instr->opcode;
|
||||
JitOptRef *stack_pointer = ctx->frame->stack_pointer;
|
||||
|
||||
if (!CURRENT_FRAME_IS_INIT_SHIM()) {
|
||||
stack_pointer = ctx->frame->stack_pointer;
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
if (get_lltrace() >= 3) {
|
||||
|
|
@ -345,9 +395,11 @@ optimize_uops(
|
|||
Py_UNREACHABLE();
|
||||
}
|
||||
assert(ctx->frame != NULL);
|
||||
DPRINTF(3, " stack_level %d\n", STACK_LEVEL());
|
||||
ctx->frame->stack_pointer = stack_pointer;
|
||||
assert(STACK_LEVEL() >= 0);
|
||||
if (!CURRENT_FRAME_IS_INIT_SHIM()) {
|
||||
DPRINTF(3, " stack_level %d\n", STACK_LEVEL());
|
||||
ctx->frame->stack_pointer = stack_pointer;
|
||||
assert(STACK_LEVEL() >= 0);
|
||||
}
|
||||
}
|
||||
if (ctx->out_of_space) {
|
||||
DPRINTF(3, "\n");
|
||||
|
|
@ -355,27 +407,21 @@ optimize_uops(
|
|||
}
|
||||
if (ctx->contradiction) {
|
||||
// Attempted to push a "bottom" (contradiction) symbol onto the stack.
|
||||
// This means that the abstract interpreter has hit unreachable code.
|
||||
// This means that the abstract interpreter has optimized to trace
|
||||
// to an unreachable estate.
|
||||
// We *could* generate an _EXIT_TRACE or _FATAL_ERROR here, but hitting
|
||||
// bottom indicates type instability, so we are probably better off
|
||||
// bottom usually indicates an optimizer bug, so we are probably better off
|
||||
// retrying later.
|
||||
DPRINTF(3, "\n");
|
||||
DPRINTF(1, "Hit bottom in abstract interpreter\n");
|
||||
_Py_uop_abstractcontext_fini(ctx);
|
||||
OPT_STAT_INC(optimizer_contradiction);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Either reached the end or cannot optimize further, but there
|
||||
* would be no benefit in retrying later */
|
||||
_Py_uop_abstractcontext_fini(ctx);
|
||||
if (first_valid_check_stack != NULL) {
|
||||
assert(first_valid_check_stack->opcode == _CHECK_STACK_SPACE);
|
||||
assert(max_space > 0);
|
||||
assert(max_space <= INT_MAX);
|
||||
assert(max_space <= INT32_MAX);
|
||||
first_valid_check_stack->opcode = _CHECK_STACK_SPACE_OPERAND;
|
||||
first_valid_check_stack->operand0 = max_space;
|
||||
}
|
||||
return trace_len;
|
||||
|
||||
error:
|
||||
|
|
@ -460,6 +506,7 @@ remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size)
|
|||
buffer[pc].opcode = _NOP;
|
||||
}
|
||||
break;
|
||||
case _EXIT_TRACE:
|
||||
default:
|
||||
{
|
||||
// Cancel out pushes and pops, repeatedly. So:
|
||||
|
|
@ -493,7 +540,7 @@ remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size)
|
|||
}
|
||||
/* _PUSH_FRAME doesn't escape or error, but it
|
||||
* does need the IP for the return address */
|
||||
bool needs_ip = opcode == _PUSH_FRAME;
|
||||
bool needs_ip = (opcode == _PUSH_FRAME || opcode == _YIELD_VALUE || opcode == _DYNAMIC_EXIT || opcode == _EXIT_TRACE);
|
||||
if (_PyUop_Flags[opcode] & HAS_ESCAPES_FLAG) {
|
||||
needs_ip = true;
|
||||
may_have_escaped = true;
|
||||
|
|
@ -503,10 +550,14 @@ remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size)
|
|||
buffer[last_set_ip].opcode = _SET_IP;
|
||||
last_set_ip = -1;
|
||||
}
|
||||
if (opcode == _EXIT_TRACE) {
|
||||
return pc + 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case _JUMP_TO_TOP:
|
||||
case _EXIT_TRACE:
|
||||
case _DYNAMIC_EXIT:
|
||||
case _DEOPT:
|
||||
return pc + 1;
|
||||
}
|
||||
}
|
||||
|
|
@ -518,7 +569,7 @@ remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size)
|
|||
// > 0 - length of optimized trace
|
||||
int
|
||||
_Py_uop_analyze_and_optimize(
|
||||
_PyInterpreterFrame *frame,
|
||||
PyFunctionObject *func,
|
||||
_PyUOpInstruction *buffer,
|
||||
int length,
|
||||
int curr_stacklen,
|
||||
|
|
@ -528,8 +579,8 @@ _Py_uop_analyze_and_optimize(
|
|||
OPT_STAT_INC(optimizer_attempts);
|
||||
|
||||
length = optimize_uops(
|
||||
_PyFrame_GetFunction(frame), buffer,
|
||||
length, curr_stacklen, dependencies);
|
||||
func, buffer,
|
||||
length, curr_stacklen, dependencies);
|
||||
|
||||
if (length == 0) {
|
||||
return length;
|
||||
|
|
|
|||
|
|
@ -342,7 +342,6 @@ dummy_func(void) {
|
|||
int already_bool = optimize_to_bool(this_instr, ctx, value, &value);
|
||||
if (!already_bool) {
|
||||
sym_set_type(value, &PyBool_Type);
|
||||
value = sym_new_truthiness(ctx, value, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -752,8 +751,14 @@ dummy_func(void) {
|
|||
}
|
||||
|
||||
op(_PY_FRAME_KW, (callable, self_or_null, args[oparg], kwnames -- new_frame)) {
|
||||
new_frame = PyJitRef_NULL;
|
||||
ctx->done = true;
|
||||
assert((this_instr + 2)->opcode == _PUSH_FRAME);
|
||||
PyCodeObject *co = get_code_with_logging((this_instr + 2));
|
||||
if (co == NULL) {
|
||||
ctx->done = true;
|
||||
break;
|
||||
}
|
||||
|
||||
new_frame = PyJitRef_Wrap((JitOptSymbol *)frame_new(ctx, co, 0, NULL, 0));
|
||||
}
|
||||
|
||||
op(_CHECK_AND_ALLOCATE_OBJECT, (type_version/2, callable, self_or_null, args[oparg] -- callable, self_or_null, args[oparg])) {
|
||||
|
|
@ -764,8 +769,20 @@ dummy_func(void) {
|
|||
}
|
||||
|
||||
op(_CREATE_INIT_FRAME, (init, self, args[oparg] -- init_frame)) {
|
||||
init_frame = PyJitRef_NULL;
|
||||
ctx->done = true;
|
||||
ctx->frame->stack_pointer = stack_pointer - oparg - 2;
|
||||
_Py_UOpsAbstractFrame *shim = frame_new(ctx, (PyCodeObject *)&_Py_InitCleanup, 0, NULL, 0);
|
||||
if (shim == NULL) {
|
||||
break;
|
||||
}
|
||||
/* Push self onto stack of shim */
|
||||
shim->stack[0] = self;
|
||||
shim->stack_pointer++;
|
||||
assert((int)(shim->stack_pointer - shim->stack) == 1);
|
||||
ctx->frame = shim;
|
||||
ctx->curr_frame_depth++;
|
||||
assert((this_instr + 1)->opcode == _PUSH_FRAME);
|
||||
PyCodeObject *co = get_code_with_logging((this_instr + 1));
|
||||
init_frame = PyJitRef_Wrap((JitOptSymbol *)frame_new(ctx, co, 0, args-1, oparg+1));
|
||||
}
|
||||
|
||||
op(_RETURN_VALUE, (retval -- res)) {
|
||||
|
|
@ -773,42 +790,65 @@ dummy_func(void) {
|
|||
JitOptRef temp = PyJitRef_StripReferenceInfo(retval);
|
||||
DEAD(retval);
|
||||
SAVE_STACK();
|
||||
PyCodeObject *co = get_current_code_object(ctx);
|
||||
ctx->frame->stack_pointer = stack_pointer;
|
||||
frame_pop(ctx);
|
||||
PyCodeObject *returning_code = get_code_with_logging(this_instr);
|
||||
if (returning_code == NULL) {
|
||||
ctx->done = true;
|
||||
break;
|
||||
}
|
||||
int returning_stacklevel = this_instr->operand1;
|
||||
if (ctx->curr_frame_depth >= 2) {
|
||||
PyCodeObject *expected_code = ctx->frames[ctx->curr_frame_depth - 2].code;
|
||||
if (expected_code == returning_code) {
|
||||
assert((this_instr + 1)->opcode == _GUARD_IP_RETURN_VALUE);
|
||||
REPLACE_OP((this_instr + 1), _NOP, 0, 0);
|
||||
}
|
||||
}
|
||||
if (frame_pop(ctx, returning_code, returning_stacklevel)) {
|
||||
break;
|
||||
}
|
||||
stack_pointer = ctx->frame->stack_pointer;
|
||||
|
||||
/* Stack space handling */
|
||||
assert(corresponding_check_stack == NULL);
|
||||
assert(co != NULL);
|
||||
int framesize = co->co_framesize;
|
||||
assert(framesize > 0);
|
||||
assert(framesize <= curr_space);
|
||||
curr_space -= framesize;
|
||||
|
||||
RELOAD_STACK();
|
||||
res = temp;
|
||||
}
|
||||
|
||||
op(_RETURN_GENERATOR, ( -- res)) {
|
||||
SYNC_SP();
|
||||
PyCodeObject *co = get_current_code_object(ctx);
|
||||
ctx->frame->stack_pointer = stack_pointer;
|
||||
frame_pop(ctx);
|
||||
PyCodeObject *returning_code = get_code_with_logging(this_instr);
|
||||
if (returning_code == NULL) {
|
||||
ctx->done = true;
|
||||
break;
|
||||
}
|
||||
_Py_BloomFilter_Add(dependencies, returning_code);
|
||||
int returning_stacklevel = this_instr->operand1;
|
||||
if (frame_pop(ctx, returning_code, returning_stacklevel)) {
|
||||
break;
|
||||
}
|
||||
stack_pointer = ctx->frame->stack_pointer;
|
||||
res = sym_new_unknown(ctx);
|
||||
|
||||
/* Stack space handling */
|
||||
assert(corresponding_check_stack == NULL);
|
||||
assert(co != NULL);
|
||||
int framesize = co->co_framesize;
|
||||
assert(framesize > 0);
|
||||
assert(framesize <= curr_space);
|
||||
curr_space -= framesize;
|
||||
}
|
||||
|
||||
op(_YIELD_VALUE, (unused -- value)) {
|
||||
value = sym_new_unknown(ctx);
|
||||
op(_YIELD_VALUE, (retval -- value)) {
|
||||
// Mimics PyStackRef_MakeHeapSafe in the interpreter.
|
||||
JitOptRef temp = PyJitRef_StripReferenceInfo(retval);
|
||||
DEAD(retval);
|
||||
SAVE_STACK();
|
||||
ctx->frame->stack_pointer = stack_pointer;
|
||||
PyCodeObject *returning_code = get_code_with_logging(this_instr);
|
||||
if (returning_code == NULL) {
|
||||
ctx->done = true;
|
||||
break;
|
||||
}
|
||||
_Py_BloomFilter_Add(dependencies, returning_code);
|
||||
int returning_stacklevel = this_instr->operand1;
|
||||
if (frame_pop(ctx, returning_code, returning_stacklevel)) {
|
||||
break;
|
||||
}
|
||||
stack_pointer = ctx->frame->stack_pointer;
|
||||
RELOAD_STACK();
|
||||
value = temp;
|
||||
}
|
||||
|
||||
op(_GET_ITER, (iterable -- iter, index_or_null)) {
|
||||
|
|
@ -835,8 +875,6 @@ dummy_func(void) {
|
|||
}
|
||||
|
||||
op(_CHECK_STACK_SPACE, (unused, unused, unused[oparg] -- unused, unused, unused[oparg])) {
|
||||
assert(corresponding_check_stack == NULL);
|
||||
corresponding_check_stack = this_instr;
|
||||
}
|
||||
|
||||
op (_CHECK_STACK_SPACE_OPERAND, (framesize/2 -- )) {
|
||||
|
|
@ -848,38 +886,29 @@ dummy_func(void) {
|
|||
|
||||
op(_PUSH_FRAME, (new_frame -- )) {
|
||||
SYNC_SP();
|
||||
ctx->frame->stack_pointer = stack_pointer;
|
||||
if (!CURRENT_FRAME_IS_INIT_SHIM()) {
|
||||
ctx->frame->stack_pointer = stack_pointer;
|
||||
}
|
||||
ctx->frame = (_Py_UOpsAbstractFrame *)PyJitRef_Unwrap(new_frame);
|
||||
ctx->curr_frame_depth++;
|
||||
stack_pointer = ctx->frame->stack_pointer;
|
||||
uint64_t operand = this_instr->operand0;
|
||||
if (operand == 0 || (operand & 1)) {
|
||||
// It's either a code object or NULL
|
||||
if (operand == 0) {
|
||||
ctx->done = true;
|
||||
break;
|
||||
}
|
||||
PyFunctionObject *func = (PyFunctionObject *)operand;
|
||||
PyCodeObject *co = (PyCodeObject *)func->func_code;
|
||||
assert(PyFunction_Check(func));
|
||||
ctx->frame->func = func;
|
||||
/* Stack space handling */
|
||||
int framesize = co->co_framesize;
|
||||
assert(framesize > 0);
|
||||
curr_space += framesize;
|
||||
if (curr_space < 0 || curr_space > INT32_MAX) {
|
||||
// won't fit in signed 32-bit int
|
||||
ctx->done = true;
|
||||
break;
|
||||
if (!(operand & 1)) {
|
||||
PyFunctionObject *func = (PyFunctionObject *)operand;
|
||||
// No need to re-add to dependencies here. Already
|
||||
// handled by the tracer.
|
||||
ctx->frame->func = func;
|
||||
}
|
||||
max_space = curr_space > max_space ? curr_space : max_space;
|
||||
if (first_valid_check_stack == NULL) {
|
||||
first_valid_check_stack = corresponding_check_stack;
|
||||
// Fixed calls don't need IP guards.
|
||||
if ((this_instr-1)->opcode == _SAVE_RETURN_OFFSET ||
|
||||
(this_instr-1)->opcode == _CREATE_INIT_FRAME) {
|
||||
assert((this_instr+1)->opcode == _GUARD_IP__PUSH_FRAME);
|
||||
REPLACE_OP(this_instr+1, _NOP, 0, 0);
|
||||
}
|
||||
else if (corresponding_check_stack) {
|
||||
// delete all but the first valid _CHECK_STACK_SPACE
|
||||
corresponding_check_stack->opcode = _NOP;
|
||||
}
|
||||
corresponding_check_stack = NULL;
|
||||
}
|
||||
|
||||
op(_UNPACK_SEQUENCE, (seq -- values[oparg], top[0])) {
|
||||
|
|
@ -1024,6 +1053,10 @@ dummy_func(void) {
|
|||
ctx->done = true;
|
||||
}
|
||||
|
||||
op(_DEOPT, (--)) {
|
||||
ctx->done = true;
|
||||
}
|
||||
|
||||
op(_REPLACE_WITH_TRUE, (value -- res)) {
|
||||
REPLACE_OP(this_instr, _POP_TOP_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)Py_True);
|
||||
res = sym_new_const(ctx, Py_True);
|
||||
|
|
|
|||
670
Python/optimizer_cases.c.h
generated
670
Python/optimizer_cases.c.h
generated
File diff suppressed because it is too large
Load diff
|
|
@ -817,9 +817,14 @@ _Py_uop_frame_new(
|
|||
JitOptRef *args,
|
||||
int arg_len)
|
||||
{
|
||||
assert(ctx->curr_frame_depth < MAX_ABSTRACT_FRAME_DEPTH);
|
||||
if (ctx->curr_frame_depth >= MAX_ABSTRACT_FRAME_DEPTH) {
|
||||
ctx->done = true;
|
||||
ctx->out_of_space = true;
|
||||
OPT_STAT_INC(optimizer_frame_overflow);
|
||||
return NULL;
|
||||
}
|
||||
_Py_UOpsAbstractFrame *frame = &ctx->frames[ctx->curr_frame_depth];
|
||||
|
||||
frame->code = co;
|
||||
frame->stack_len = co->co_stacksize;
|
||||
frame->locals_len = co->co_nlocalsplus;
|
||||
|
||||
|
|
@ -901,13 +906,42 @@ _Py_uop_abstractcontext_init(JitOptContext *ctx)
|
|||
}
|
||||
|
||||
int
|
||||
_Py_uop_frame_pop(JitOptContext *ctx)
|
||||
_Py_uop_frame_pop(JitOptContext *ctx, PyCodeObject *co, int curr_stackentries)
|
||||
{
|
||||
_Py_UOpsAbstractFrame *frame = ctx->frame;
|
||||
ctx->n_consumed = frame->locals;
|
||||
|
||||
ctx->curr_frame_depth--;
|
||||
assert(ctx->curr_frame_depth >= 1);
|
||||
ctx->frame = &ctx->frames[ctx->curr_frame_depth - 1];
|
||||
|
||||
if (ctx->curr_frame_depth >= 1) {
|
||||
ctx->frame = &ctx->frames[ctx->curr_frame_depth - 1];
|
||||
|
||||
// We returned to the correct code. Nothing to do here.
|
||||
if (co == ctx->frame->code) {
|
||||
return 0;
|
||||
}
|
||||
// Else: the code we recorded doesn't match the code we *think* we're
|
||||
// returning to. We could trace anything, we can't just return to the
|
||||
// old frame. We have to restore what the tracer recorded
|
||||
// as the traced next frame.
|
||||
// Remove the current frame, and later swap it out with the right one.
|
||||
else {
|
||||
ctx->curr_frame_depth--;
|
||||
}
|
||||
}
|
||||
// Else: trace stack underflow.
|
||||
|
||||
// This handles swapping out frames.
|
||||
assert(curr_stackentries >= 1);
|
||||
// -1 to stackentries as we push to the stack our return value after this.
|
||||
_Py_UOpsAbstractFrame *new_frame = _Py_uop_frame_new(ctx, co, curr_stackentries - 1, NULL, 0);
|
||||
if (new_frame == NULL) {
|
||||
ctx->done = true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
ctx->curr_frame_depth++;
|
||||
ctx->frame = new_frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ static Py_ssize_t hashstats[Py_HASH_STATS_MAX + 1] = {0};
|
|||
#endif
|
||||
|
||||
/* For numeric types, the hash of a number x is based on the reduction
|
||||
of x modulo the prime P = 2**_PyHASH_BITS - 1. It's designed so that
|
||||
of x modulo the prime P = 2**PyHASH_BITS - 1. It's designed so that
|
||||
hash(x) == hash(y) whenever x and y are numerically equal, even if
|
||||
x and y have different types.
|
||||
|
||||
|
|
@ -52,8 +52,8 @@ static Py_ssize_t hashstats[Py_HASH_STATS_MAX + 1] = {0};
|
|||
|
||||
If the result of the reduction is infinity (this is impossible for
|
||||
integers, floats and Decimals) then use the predefined hash value
|
||||
_PyHASH_INF for x >= 0, or -_PyHASH_INF for x < 0, instead.
|
||||
_PyHASH_INF and -_PyHASH_INF are also used for the
|
||||
PyHASH_INF for x >= 0, or -PyHASH_INF for x < 0, instead.
|
||||
PyHASH_INF and -PyHASH_INF are also used for the
|
||||
hashes of float and Decimal infinities.
|
||||
|
||||
NaNs hash with a pointer hash. Having distinct hash values prevents
|
||||
|
|
@ -65,16 +65,16 @@ static Py_ssize_t hashstats[Py_HASH_STATS_MAX + 1] = {0};
|
|||
efficiently, even if the exponent of the binary or decimal number
|
||||
is large. The key point is that
|
||||
|
||||
reduce(x * y) == reduce(x) * reduce(y) (modulo _PyHASH_MODULUS)
|
||||
reduce(x * y) == reduce(x) * reduce(y) (modulo PyHASH_MODULUS)
|
||||
|
||||
provided that {reduce(x), reduce(y)} != {0, infinity}. The reduction of a
|
||||
binary or decimal float is never infinity, since the denominator is a power
|
||||
of 2 (for binary) or a divisor of a power of 10 (for decimal). So we have,
|
||||
for nonnegative x,
|
||||
|
||||
reduce(x * 2**e) == reduce(x) * reduce(2**e) % _PyHASH_MODULUS
|
||||
reduce(x * 2**e) == reduce(x) * reduce(2**e) % PyHASH_MODULUS
|
||||
|
||||
reduce(x * 10**e) == reduce(x) * reduce(10**e) % _PyHASH_MODULUS
|
||||
reduce(x * 10**e) == reduce(x) * reduce(10**e) % PyHASH_MODULUS
|
||||
|
||||
and reduce(10**e) can be computed efficiently by the usual modular
|
||||
exponentiation algorithm. For reduce(2**e) it's even better: since
|
||||
|
|
@ -92,7 +92,7 @@ _Py_HashDouble(PyObject *inst, double v)
|
|||
|
||||
if (!isfinite(v)) {
|
||||
if (isinf(v))
|
||||
return v > 0 ? _PyHASH_INF : -_PyHASH_INF;
|
||||
return v > 0 ? PyHASH_INF : -PyHASH_INF;
|
||||
else
|
||||
return PyObject_GenericHash(inst);
|
||||
}
|
||||
|
|
@ -109,19 +109,19 @@ _Py_HashDouble(PyObject *inst, double v)
|
|||
and hexadecimal floating point. */
|
||||
x = 0;
|
||||
while (m) {
|
||||
x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28);
|
||||
x = ((x << 28) & PyHASH_MODULUS) | x >> (PyHASH_BITS - 28);
|
||||
m *= 268435456.0; /* 2**28 */
|
||||
e -= 28;
|
||||
y = (Py_uhash_t)m; /* pull out integer part */
|
||||
m -= y;
|
||||
x += y;
|
||||
if (x >= _PyHASH_MODULUS)
|
||||
x -= _PyHASH_MODULUS;
|
||||
if (x >= PyHASH_MODULUS)
|
||||
x -= PyHASH_MODULUS;
|
||||
}
|
||||
|
||||
/* adjust for the exponent; first reduce it modulo _PyHASH_BITS */
|
||||
e = e >= 0 ? e % _PyHASH_BITS : _PyHASH_BITS-1-((-1-e) % _PyHASH_BITS);
|
||||
x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e);
|
||||
/* adjust for the exponent; first reduce it modulo PyHASH_BITS */
|
||||
e = e >= 0 ? e % PyHASH_BITS : PyHASH_BITS-1-((-1-e) % PyHASH_BITS);
|
||||
x = ((x << e) & PyHASH_MODULUS) | x >> (PyHASH_BITS - e);
|
||||
|
||||
x = x * sign;
|
||||
if (x == (Py_uhash_t)-1)
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@
|
|||
#include "pycore_runtime.h" // _Py_ID()
|
||||
#include "pycore_runtime_init.h" // _PyRuntimeState_INIT
|
||||
#include "pycore_setobject.h" // _PySet_NextEntry()
|
||||
#include "pycore_stats.h" // _PyStats_InterpInit()
|
||||
#include "pycore_sysmodule.h" // _PySys_ClearAttrString()
|
||||
#include "pycore_traceback.h" // _Py_DumpTracebackThreads()
|
||||
#include "pycore_typeobject.h" // _PyTypes_InitTypes()
|
||||
|
|
@ -656,6 +657,14 @@ pycore_create_interpreter(_PyRuntimeState *runtime,
|
|||
return status;
|
||||
}
|
||||
|
||||
#ifdef Py_STATS
|
||||
// initialize pystats. This must be done after the settings are loaded.
|
||||
status = _PyStats_InterpInit(interp);
|
||||
if (_PyStatus_EXCEPTION(status)) {
|
||||
return status;
|
||||
}
|
||||
#endif
|
||||
|
||||
// initialize the interp->obmalloc state. This must be done after
|
||||
// the settings are loaded (so that feature_flags are set) but before
|
||||
// any calls are made to obmalloc functions.
|
||||
|
|
@ -2484,6 +2493,14 @@ new_interpreter(PyThreadState **tstate_p,
|
|||
return status;
|
||||
}
|
||||
|
||||
#ifdef Py_STATS
|
||||
// initialize pystats. This must be done after the settings are loaded.
|
||||
status = _PyStats_InterpInit(interp);
|
||||
if (_PyStatus_EXCEPTION(status)) {
|
||||
return status;
|
||||
}
|
||||
#endif
|
||||
|
||||
// initialize the interp->obmalloc state. This must be done after
|
||||
// the settings are loaded (so that feature_flags are set) but before
|
||||
// any calls are made to obmalloc functions.
|
||||
|
|
@ -2641,7 +2658,7 @@ finalize_subinterpreters(void)
|
|||
(void)PyErr_WarnEx(
|
||||
PyExc_RuntimeWarning,
|
||||
"remaining subinterpreters; "
|
||||
"destroy them with _interpreters.destroy()",
|
||||
"close them with Interpreter.close()",
|
||||
0);
|
||||
|
||||
/* Swap out the current tstate, which we know must belong
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@
|
|||
#include "pycore_runtime.h" // _PyRuntime
|
||||
#include "pycore_runtime_init.h" // _PyRuntimeState_INIT
|
||||
#include "pycore_stackref.h" // Py_STACKREF_DEBUG
|
||||
#include "pycore_stats.h" // FT_STAT_WORLD_STOP_INC()
|
||||
#include "pycore_time.h" // _PyTime_Init()
|
||||
#include "pycore_uop.h" // UOP_BUFFER_SIZE
|
||||
#include "pycore_uniqueid.h" // _PyObject_FinalizePerThreadRefcounts()
|
||||
|
|
@ -465,6 +466,12 @@ alloc_interpreter(void)
|
|||
static void
|
||||
free_interpreter(PyInterpreterState *interp)
|
||||
{
|
||||
#ifdef Py_STATS
|
||||
if (interp->pystats_struct) {
|
||||
PyMem_RawFree(interp->pystats_struct);
|
||||
interp->pystats_struct = NULL;
|
||||
}
|
||||
#endif
|
||||
// The main interpreter is statically allocated so
|
||||
// should not be freed.
|
||||
if (interp != &_PyRuntime._main_interpreter) {
|
||||
|
|
@ -545,10 +552,6 @@ init_interpreter(PyInterpreterState *interp,
|
|||
_Py_brc_init_state(interp);
|
||||
#endif
|
||||
|
||||
#ifdef _Py_TIER2
|
||||
// Ensure the buffer is to be set as NULL.
|
||||
interp->jit_uop_buffer = NULL;
|
||||
#endif
|
||||
llist_init(&interp->mem_free_queue.head);
|
||||
llist_init(&interp->asyncio_tasks_head);
|
||||
interp->asyncio_tasks_lock = (PyMutex){0};
|
||||
|
|
@ -798,10 +801,6 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
|
|||
|
||||
#ifdef _Py_TIER2
|
||||
_Py_ClearExecutorDeletionList(interp);
|
||||
if (interp->jit_uop_buffer != NULL) {
|
||||
_PyObject_VirtualFree(interp->jit_uop_buffer, UOP_BUFFER_SIZE);
|
||||
interp->jit_uop_buffer = NULL;
|
||||
}
|
||||
#endif
|
||||
_PyAST_Fini(interp);
|
||||
_PyAtExit_Fini(interp);
|
||||
|
|
@ -824,6 +823,14 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
|
|||
assert(cold->vm_data.warm);
|
||||
_PyExecutor_Free(cold);
|
||||
}
|
||||
|
||||
struct _PyExecutorObject *cold_dynamic = interp->cold_dynamic_executor;
|
||||
if (cold_dynamic != NULL) {
|
||||
interp->cold_dynamic_executor = NULL;
|
||||
assert(cold_dynamic->vm_data.valid);
|
||||
assert(cold_dynamic->vm_data.warm);
|
||||
_PyExecutor_Free(cold_dynamic);
|
||||
}
|
||||
/* We don't clear sysdict and builtins until the end of this function.
|
||||
Because clearing other attributes can execute arbitrary Python code
|
||||
which requires sysdict and builtins. */
|
||||
|
|
@ -1407,6 +1414,9 @@ static void
|
|||
free_threadstate(_PyThreadStateImpl *tstate)
|
||||
{
|
||||
PyInterpreterState *interp = tstate->base.interp;
|
||||
#ifdef Py_STATS
|
||||
_PyStats_ThreadFini(tstate);
|
||||
#endif
|
||||
// The initial thread state of the interpreter is allocated
|
||||
// as part of the interpreter state so should not be freed.
|
||||
if (tstate == &interp->_initial_thread) {
|
||||
|
|
@ -1485,9 +1495,15 @@ init_threadstate(_PyThreadStateImpl *_tstate,
|
|||
_tstate->c_stack_top = 0;
|
||||
_tstate->c_stack_hard_limit = 0;
|
||||
|
||||
_tstate->c_stack_init_base = 0;
|
||||
_tstate->c_stack_init_top = 0;
|
||||
|
||||
_tstate->asyncio_running_loop = NULL;
|
||||
_tstate->asyncio_running_task = NULL;
|
||||
|
||||
#ifdef _Py_TIER2
|
||||
_tstate->jit_tracer_state.code_buffer = NULL;
|
||||
#endif
|
||||
tstate->delete_later = NULL;
|
||||
|
||||
llist_init(&_tstate->mem_free_queue);
|
||||
|
|
@ -1535,6 +1551,13 @@ new_threadstate(PyInterpreterState *interp, int whence)
|
|||
return NULL;
|
||||
}
|
||||
#endif
|
||||
#ifdef Py_STATS
|
||||
// The PyStats structure is quite large and is allocated separated from tstate.
|
||||
if (!_PyStats_ThreadInit(interp, tstate)) {
|
||||
free_threadstate(tstate);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* We serialize concurrent creation to protect global state. */
|
||||
HEAD_LOCK(interp->runtime);
|
||||
|
|
@ -1787,6 +1810,14 @@ tstate_delete_common(PyThreadState *tstate, int release_gil)
|
|||
assert(tstate_impl->refcounts.values == NULL);
|
||||
#endif
|
||||
|
||||
#if _Py_TIER2
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
if (_tstate->jit_tracer_state.code_buffer != NULL) {
|
||||
_PyObject_VirtualFree(_tstate->jit_tracer_state.code_buffer, UOP_BUFFER_SIZE);
|
||||
_tstate->jit_tracer_state.code_buffer = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
HEAD_UNLOCK(runtime);
|
||||
|
||||
// XXX Unbind in PyThreadState_Clear(), or earlier
|
||||
|
|
@ -1846,6 +1877,9 @@ _PyThreadState_DeleteCurrent(PyThreadState *tstate)
|
|||
_Py_EnsureTstateNotNULL(tstate);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
|
||||
#endif
|
||||
#ifdef Py_STATS
|
||||
_PyStats_Detach((_PyThreadStateImpl *)tstate);
|
||||
#endif
|
||||
current_fast_clear(tstate->interp->runtime);
|
||||
tstate_delete_common(tstate, 1); // release GIL as part of call
|
||||
|
|
@ -2020,6 +2054,10 @@ tstate_deactivate(PyThreadState *tstate)
|
|||
assert(tstate_is_bound(tstate));
|
||||
assert(tstate->_status.active);
|
||||
|
||||
#if Py_STATS
|
||||
_PyStats_Detach((_PyThreadStateImpl *)tstate);
|
||||
#endif
|
||||
|
||||
tstate->_status.active = 0;
|
||||
|
||||
// We do not unbind the gilstate tstate here.
|
||||
|
|
@ -2123,6 +2161,10 @@ _PyThreadState_Attach(PyThreadState *tstate)
|
|||
_PyCriticalSection_Resume(tstate);
|
||||
}
|
||||
|
||||
#ifdef Py_STATS
|
||||
_PyStats_Attach((_PyThreadStateImpl *)tstate);
|
||||
#endif
|
||||
|
||||
#if defined(Py_DEBUG)
|
||||
errno = err;
|
||||
#endif
|
||||
|
|
@ -2272,6 +2314,7 @@ stop_the_world(struct _stoptheworld_state *stw)
|
|||
stw->thread_countdown = 0;
|
||||
stw->stop_event = (PyEvent){0}; // zero-initialize (unset)
|
||||
stw->requester = _PyThreadState_GET(); // may be NULL
|
||||
FT_STAT_WORLD_STOP_INC();
|
||||
|
||||
_Py_FOR_EACH_STW_INTERP(stw, i) {
|
||||
_Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
|
||||
|
|
|
|||
819
Python/pystats.c
Normal file
819
Python/pystats.c
Normal file
|
|
@ -0,0 +1,819 @@
|
|||
#include "Python.h"
|
||||
|
||||
#include "pycore_opcode_metadata.h" // _PyOpcode_Caches
|
||||
#include "pycore_pyatomic_ft_wrappers.h"
|
||||
#include "pycore_pylifecycle.h" // _PyOS_URandomNonblock()
|
||||
#include "pycore_tstate.h"
|
||||
#include "pycore_initconfig.h" // _PyStatus_OK()
|
||||
#include "pycore_uop_metadata.h" // _PyOpcode_uop_name
|
||||
#include "pycore_uop_ids.h" // MAX_UOP_ID
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include "pycore_runtime.h" // NUM_GENERATIONS
|
||||
|
||||
#include <stdlib.h> // rand()
|
||||
|
||||
#ifdef Py_STATS
|
||||
|
||||
PyStats *
|
||||
_PyStats_GetLocal(void)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
if (tstate) {
|
||||
return tstate->pystats;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
#define STATS_LOCK(interp) PyMutex_Lock(&interp->pystats_mutex)
|
||||
#define STATS_UNLOCK(interp) PyMutex_Unlock(&interp->pystats_mutex)
|
||||
#else
|
||||
#define STATS_LOCK(interp)
|
||||
#define STATS_UNLOCK(interp)
|
||||
#endif
|
||||
|
||||
|
||||
#if PYSTATS_MAX_UOP_ID < MAX_UOP_ID
|
||||
#error "Not enough space allocated for pystats. Increase PYSTATS_MAX_UOP_ID to at least MAX_UOP_ID"
|
||||
#endif
|
||||
|
||||
#define ADD_STAT_TO_DICT(res, field) \
|
||||
do { \
|
||||
PyObject *val = PyLong_FromUnsignedLongLong(stats->field); \
|
||||
if (val == NULL) { \
|
||||
Py_DECREF(res); \
|
||||
return NULL; \
|
||||
} \
|
||||
if (PyDict_SetItemString(res, #field, val) == -1) { \
|
||||
Py_DECREF(res); \
|
||||
Py_DECREF(val); \
|
||||
return NULL; \
|
||||
} \
|
||||
Py_DECREF(val); \
|
||||
} while(0);
|
||||
|
||||
static PyObject*
|
||||
stats_to_dict(SpecializationStats *stats)
|
||||
{
|
||||
PyObject *res = PyDict_New();
|
||||
if (res == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
ADD_STAT_TO_DICT(res, success);
|
||||
ADD_STAT_TO_DICT(res, failure);
|
||||
ADD_STAT_TO_DICT(res, hit);
|
||||
ADD_STAT_TO_DICT(res, deferred);
|
||||
ADD_STAT_TO_DICT(res, miss);
|
||||
ADD_STAT_TO_DICT(res, deopt);
|
||||
PyObject *failure_kinds = PyTuple_New(SPECIALIZATION_FAILURE_KINDS);
|
||||
if (failure_kinds == NULL) {
|
||||
Py_DECREF(res);
|
||||
return NULL;
|
||||
}
|
||||
for (int i = 0; i < SPECIALIZATION_FAILURE_KINDS; i++) {
|
||||
PyObject *stat = PyLong_FromUnsignedLongLong(stats->failure_kinds[i]);
|
||||
if (stat == NULL) {
|
||||
Py_DECREF(res);
|
||||
Py_DECREF(failure_kinds);
|
||||
return NULL;
|
||||
}
|
||||
PyTuple_SET_ITEM(failure_kinds, i, stat);
|
||||
}
|
||||
if (PyDict_SetItemString(res, "failure_kinds", failure_kinds)) {
|
||||
Py_DECREF(res);
|
||||
Py_DECREF(failure_kinds);
|
||||
return NULL;
|
||||
}
|
||||
Py_DECREF(failure_kinds);
|
||||
return res;
|
||||
}
|
||||
#undef ADD_STAT_TO_DICT
|
||||
|
||||
static int
|
||||
add_stat_dict(
|
||||
PyStats *src,
|
||||
PyObject *res,
|
||||
int opcode,
|
||||
const char *name) {
|
||||
|
||||
SpecializationStats *stats = &src->opcode_stats[opcode].specialization;
|
||||
PyObject *d = stats_to_dict(stats);
|
||||
if (d == NULL) {
|
||||
return -1;
|
||||
}
|
||||
int err = PyDict_SetItemString(res, name, d);
|
||||
Py_DECREF(d);
|
||||
return err;
|
||||
}
|
||||
|
||||
PyObject*
|
||||
_Py_GetSpecializationStats(void) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
PyStats *src = FT_ATOMIC_LOAD_PTR_RELAXED(tstate->interp->pystats_struct);
|
||||
if (src == NULL) {
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
PyObject *stats = PyDict_New();
|
||||
if (stats == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
int err = 0;
|
||||
err += add_stat_dict(src, stats, CONTAINS_OP, "contains_op");
|
||||
err += add_stat_dict(src, stats, LOAD_SUPER_ATTR, "load_super_attr");
|
||||
err += add_stat_dict(src, stats, LOAD_ATTR, "load_attr");
|
||||
err += add_stat_dict(src, stats, LOAD_GLOBAL, "load_global");
|
||||
err += add_stat_dict(src, stats, STORE_SUBSCR, "store_subscr");
|
||||
err += add_stat_dict(src, stats, STORE_ATTR, "store_attr");
|
||||
err += add_stat_dict(src, stats, JUMP_BACKWARD, "jump_backward");
|
||||
err += add_stat_dict(src, stats, CALL, "call");
|
||||
err += add_stat_dict(src, stats, CALL_KW, "call_kw");
|
||||
err += add_stat_dict(src, stats, BINARY_OP, "binary_op");
|
||||
err += add_stat_dict(src, stats, COMPARE_OP, "compare_op");
|
||||
err += add_stat_dict(src, stats, UNPACK_SEQUENCE, "unpack_sequence");
|
||||
err += add_stat_dict(src, stats, FOR_ITER, "for_iter");
|
||||
err += add_stat_dict(src, stats, TO_BOOL, "to_bool");
|
||||
err += add_stat_dict(src, stats, SEND, "send");
|
||||
if (err < 0) {
|
||||
Py_DECREF(stats);
|
||||
return NULL;
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
||||
|
||||
#define PRINT_STAT(i, field) \
|
||||
if (stats[i].field) { \
|
||||
fprintf(out, " opcode[%s]." #field " : %" PRIu64 "\n", _PyOpcode_OpName[i], stats[i].field); \
|
||||
}
|
||||
|
||||
static void
|
||||
print_spec_stats(FILE *out, OpcodeStats *stats)
|
||||
{
|
||||
/* Mark some opcodes as specializable for stats,
|
||||
* even though we don't specialize them yet. */
|
||||
fprintf(out, "opcode[BINARY_SLICE].specializable : 1\n");
|
||||
fprintf(out, "opcode[STORE_SLICE].specializable : 1\n");
|
||||
fprintf(out, "opcode[GET_ITER].specializable : 1\n");
|
||||
for (int i = 0; i < 256; i++) {
|
||||
if (_PyOpcode_Caches[i]) {
|
||||
/* Ignore jumps as they cannot be specialized */
|
||||
switch (i) {
|
||||
case POP_JUMP_IF_FALSE:
|
||||
case POP_JUMP_IF_TRUE:
|
||||
case POP_JUMP_IF_NONE:
|
||||
case POP_JUMP_IF_NOT_NONE:
|
||||
case JUMP_BACKWARD:
|
||||
break;
|
||||
default:
|
||||
fprintf(out, "opcode[%s].specializable : 1\n", _PyOpcode_OpName[i]);
|
||||
}
|
||||
}
|
||||
PRINT_STAT(i, specialization.success);
|
||||
PRINT_STAT(i, specialization.failure);
|
||||
PRINT_STAT(i, specialization.hit);
|
||||
PRINT_STAT(i, specialization.deferred);
|
||||
PRINT_STAT(i, specialization.miss);
|
||||
PRINT_STAT(i, specialization.deopt);
|
||||
PRINT_STAT(i, execution_count);
|
||||
for (int j = 0; j < SPECIALIZATION_FAILURE_KINDS; j++) {
|
||||
uint64_t val = stats[i].specialization.failure_kinds[j];
|
||||
if (val) {
|
||||
fprintf(out, " opcode[%s].specialization.failure_kinds[%d] : %"
|
||||
PRIu64 "\n", _PyOpcode_OpName[i], j, val);
|
||||
}
|
||||
}
|
||||
for (int j = 0; j < 256; j++) {
|
||||
if (stats[i].pair_count[j]) {
|
||||
fprintf(out, "opcode[%s].pair_count[%s] : %" PRIu64 "\n",
|
||||
_PyOpcode_OpName[i], _PyOpcode_OpName[j], stats[i].pair_count[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#undef PRINT_STAT
|
||||
|
||||
|
||||
static void
|
||||
print_call_stats(FILE *out, CallStats *stats)
|
||||
{
|
||||
fprintf(out, "Calls to PyEval_EvalDefault: %" PRIu64 "\n", stats->pyeval_calls);
|
||||
fprintf(out, "Calls to Python functions inlined: %" PRIu64 "\n", stats->inlined_py_calls);
|
||||
fprintf(out, "Frames pushed: %" PRIu64 "\n", stats->frames_pushed);
|
||||
fprintf(out, "Frame objects created: %" PRIu64 "\n", stats->frame_objects_created);
|
||||
for (int i = 0; i < EVAL_CALL_KINDS; i++) {
|
||||
fprintf(out, "Calls via PyEval_EvalFrame[%d] : %" PRIu64 "\n", i, stats->eval_calls[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
print_object_stats(FILE *out, ObjectStats *stats)
|
||||
{
|
||||
fprintf(out, "Object allocations from freelist: %" PRIu64 "\n", stats->from_freelist);
|
||||
fprintf(out, "Object frees to freelist: %" PRIu64 "\n", stats->to_freelist);
|
||||
fprintf(out, "Object allocations: %" PRIu64 "\n", stats->allocations);
|
||||
fprintf(out, "Object allocations to 512 bytes: %" PRIu64 "\n", stats->allocations512);
|
||||
fprintf(out, "Object allocations to 4 kbytes: %" PRIu64 "\n", stats->allocations4k);
|
||||
fprintf(out, "Object allocations over 4 kbytes: %" PRIu64 "\n", stats->allocations_big);
|
||||
fprintf(out, "Object frees: %" PRIu64 "\n", stats->frees);
|
||||
fprintf(out, "Object inline values: %" PRIu64 "\n", stats->inline_values);
|
||||
fprintf(out, "Object interpreter mortal increfs: %" PRIu64 "\n", stats->interpreter_increfs);
|
||||
fprintf(out, "Object interpreter mortal decrefs: %" PRIu64 "\n", stats->interpreter_decrefs);
|
||||
fprintf(out, "Object mortal increfs: %" PRIu64 "\n", stats->increfs);
|
||||
fprintf(out, "Object mortal decrefs: %" PRIu64 "\n", stats->decrefs);
|
||||
fprintf(out, "Object interpreter immortal increfs: %" PRIu64 "\n", stats->interpreter_immortal_increfs);
|
||||
fprintf(out, "Object interpreter immortal decrefs: %" PRIu64 "\n", stats->interpreter_immortal_decrefs);
|
||||
fprintf(out, "Object immortal increfs: %" PRIu64 "\n", stats->immortal_increfs);
|
||||
fprintf(out, "Object immortal decrefs: %" PRIu64 "\n", stats->immortal_decrefs);
|
||||
fprintf(out, "Object materialize dict (on request): %" PRIu64 "\n", stats->dict_materialized_on_request);
|
||||
fprintf(out, "Object materialize dict (new key): %" PRIu64 "\n", stats->dict_materialized_new_key);
|
||||
fprintf(out, "Object materialize dict (too big): %" PRIu64 "\n", stats->dict_materialized_too_big);
|
||||
fprintf(out, "Object materialize dict (str subclass): %" PRIu64 "\n", stats->dict_materialized_str_subclass);
|
||||
fprintf(out, "Object method cache hits: %" PRIu64 "\n", stats->type_cache_hits);
|
||||
fprintf(out, "Object method cache misses: %" PRIu64 "\n", stats->type_cache_misses);
|
||||
fprintf(out, "Object method cache collisions: %" PRIu64 "\n", stats->type_cache_collisions);
|
||||
fprintf(out, "Object method cache dunder hits: %" PRIu64 "\n", stats->type_cache_dunder_hits);
|
||||
fprintf(out, "Object method cache dunder misses: %" PRIu64 "\n", stats->type_cache_dunder_misses);
|
||||
}
|
||||
|
||||
static void
|
||||
print_gc_stats(FILE *out, GCStats *stats)
|
||||
{
|
||||
for (int i = 0; i < NUM_GENERATIONS; i++) {
|
||||
fprintf(out, "GC[%d] collections: %" PRIu64 "\n", i, stats[i].collections);
|
||||
fprintf(out, "GC[%d] object visits: %" PRIu64 "\n", i, stats[i].object_visits);
|
||||
fprintf(out, "GC[%d] objects collected: %" PRIu64 "\n", i, stats[i].objects_collected);
|
||||
fprintf(out, "GC[%d] objects reachable from roots: %" PRIu64 "\n", i, stats[i].objects_transitively_reachable);
|
||||
fprintf(out, "GC[%d] objects not reachable from roots: %" PRIu64 "\n", i, stats[i].objects_not_transitively_reachable);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _Py_TIER2
|
||||
static void
|
||||
print_histogram(FILE *out, const char *name, uint64_t hist[_Py_UOP_HIST_SIZE])
|
||||
{
|
||||
for (int i = 0; i < _Py_UOP_HIST_SIZE; i++) {
|
||||
fprintf(out, "%s[%" PRIu64"]: %" PRIu64 "\n", name, (uint64_t)1 << i, hist[i]);
|
||||
}
|
||||
}
|
||||
|
||||
extern const char *_PyUOpName(int index);
|
||||
|
||||
static void
|
||||
print_optimization_stats(FILE *out, OptimizationStats *stats)
|
||||
{
|
||||
fprintf(out, "Optimization attempts: %" PRIu64 "\n", stats->attempts);
|
||||
fprintf(out, "Optimization traces created: %" PRIu64 "\n", stats->traces_created);
|
||||
fprintf(out, "Optimization traces executed: %" PRIu64 "\n", stats->traces_executed);
|
||||
fprintf(out, "Optimization uops executed: %" PRIu64 "\n", stats->uops_executed);
|
||||
fprintf(out, "Optimization trace stack overflow: %" PRIu64 "\n", stats->trace_stack_overflow);
|
||||
fprintf(out, "Optimization trace stack underflow: %" PRIu64 "\n", stats->trace_stack_underflow);
|
||||
fprintf(out, "Optimization trace too long: %" PRIu64 "\n", stats->trace_too_long);
|
||||
fprintf(out, "Optimization trace too short: %" PRIu64 "\n", stats->trace_too_short);
|
||||
fprintf(out, "Optimization inner loop: %" PRIu64 "\n", stats->inner_loop);
|
||||
fprintf(out, "Optimization recursive call: %" PRIu64 "\n", stats->recursive_call);
|
||||
fprintf(out, "Optimization low confidence: %" PRIu64 "\n", stats->low_confidence);
|
||||
fprintf(out, "Optimization unknown callee: %" PRIu64 "\n", stats->unknown_callee);
|
||||
fprintf(out, "Executors invalidated: %" PRIu64 "\n", stats->executors_invalidated);
|
||||
|
||||
print_histogram(out, "Trace length", stats->trace_length_hist);
|
||||
print_histogram(out, "Trace run length", stats->trace_run_length_hist);
|
||||
print_histogram(out, "Optimized trace length", stats->optimized_trace_length_hist);
|
||||
|
||||
fprintf(out, "Optimization optimizer attempts: %" PRIu64 "\n", stats->optimizer_attempts);
|
||||
fprintf(out, "Optimization optimizer successes: %" PRIu64 "\n", stats->optimizer_successes);
|
||||
fprintf(out, "Optimization optimizer failure no memory: %" PRIu64 "\n",
|
||||
stats->optimizer_failure_reason_no_memory);
|
||||
fprintf(out, "Optimizer remove globals builtins changed: %" PRIu64 "\n", stats->remove_globals_builtins_changed);
|
||||
fprintf(out, "Optimizer remove globals incorrect keys: %" PRIu64 "\n", stats->remove_globals_incorrect_keys);
|
||||
for (int i = 0; i <= MAX_UOP_ID; i++) {
|
||||
if (stats->opcode[i].execution_count) {
|
||||
fprintf(out, "uops[%s].execution_count : %" PRIu64 "\n", _PyUOpName(i), stats->opcode[i].execution_count);
|
||||
}
|
||||
if (stats->opcode[i].miss) {
|
||||
fprintf(out, "uops[%s].specialization.miss : %" PRIu64 "\n", _PyUOpName(i), stats->opcode[i].miss);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
if (stats->unsupported_opcode[i]) {
|
||||
fprintf(
|
||||
out,
|
||||
"unsupported_opcode[%s].count : %" PRIu64 "\n",
|
||||
_PyOpcode_OpName[i],
|
||||
stats->unsupported_opcode[i]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 1; i <= MAX_UOP_ID; i++){
|
||||
for (int j = 1; j <= MAX_UOP_ID; j++) {
|
||||
if (stats->opcode[i].pair_count[j]) {
|
||||
fprintf(out, "uop[%s].pair_count[%s] : %" PRIu64 "\n",
|
||||
_PyOpcode_uop_name[i], _PyOpcode_uop_name[j], stats->opcode[i].pair_count[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < MAX_UOP_ID; i++) {
|
||||
if (stats->error_in_opcode[i]) {
|
||||
fprintf(
|
||||
out,
|
||||
"error_in_opcode[%s].count : %" PRIu64 "\n",
|
||||
_PyUOpName(i),
|
||||
stats->error_in_opcode[i]
|
||||
);
|
||||
}
|
||||
}
|
||||
fprintf(out, "JIT total memory size: %" PRIu64 "\n", stats->jit_total_memory_size);
|
||||
fprintf(out, "JIT code size: %" PRIu64 "\n", stats->jit_code_size);
|
||||
fprintf(out, "JIT trampoline size: %" PRIu64 "\n", stats->jit_trampoline_size);
|
||||
fprintf(out, "JIT data size: %" PRIu64 "\n", stats->jit_data_size);
|
||||
fprintf(out, "JIT padding size: %" PRIu64 "\n", stats->jit_padding_size);
|
||||
fprintf(out, "JIT freed memory size: %" PRIu64 "\n", stats->jit_freed_memory_size);
|
||||
|
||||
print_histogram(out, "Trace total memory size", stats->trace_total_memory_hist);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
static void
|
||||
print_ft_stats(FILE *out, FTStats *stats)
|
||||
{
|
||||
fprintf(out, "Mutex sleeps (mutex_sleeps): %" PRIu64 "\n", stats->mutex_sleeps);
|
||||
fprintf(out, "QSBR polls (qsbr_polls): %" PRIu64 "\n", stats->qsbr_polls);
|
||||
fprintf(out, "World stops (world_stops): %" PRIu64 "\n", stats->world_stops);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
print_rare_event_stats(FILE *out, RareEventStats *stats)
|
||||
{
|
||||
fprintf(out, "Rare event (set_class): %" PRIu64 "\n", stats->set_class);
|
||||
fprintf(out, "Rare event (set_bases): %" PRIu64 "\n", stats->set_bases);
|
||||
fprintf(out, "Rare event (set_eval_frame_func): %" PRIu64 "\n", stats->set_eval_frame_func);
|
||||
fprintf(out, "Rare event (builtin_dict): %" PRIu64 "\n", stats->builtin_dict);
|
||||
fprintf(out, "Rare event (func_modification): %" PRIu64 "\n", stats->func_modification);
|
||||
fprintf(out, "Rare event (watched_dict_modification): %" PRIu64 "\n", stats->watched_dict_modification);
|
||||
fprintf(out, "Rare event (watched_globals_modification): %" PRIu64 "\n", stats->watched_globals_modification);
|
||||
}
|
||||
|
||||
static void
|
||||
print_stats(FILE *out, PyStats *stats)
|
||||
{
|
||||
print_spec_stats(out, stats->opcode_stats);
|
||||
print_call_stats(out, &stats->call_stats);
|
||||
print_object_stats(out, &stats->object_stats);
|
||||
print_gc_stats(out, stats->gc_stats);
|
||||
#ifdef _Py_TIER2
|
||||
print_optimization_stats(out, &stats->optimization_stats);
|
||||
#endif
|
||||
#ifdef Py_GIL_DISABLED
|
||||
print_ft_stats(out, &stats->ft_stats);
|
||||
#endif
|
||||
print_rare_event_stats(out, &stats->rare_event_stats);
|
||||
}
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
static void
|
||||
merge_specialization_stats(SpecializationStats *dest, const SpecializationStats *src)
|
||||
{
|
||||
dest->success += src->success;
|
||||
dest->failure += src->failure;
|
||||
dest->hit += src->hit;
|
||||
dest->deferred += src->deferred;
|
||||
dest->miss += src->miss;
|
||||
dest->deopt += src->deopt;
|
||||
for (int i = 0; i < SPECIALIZATION_FAILURE_KINDS; i++) {
|
||||
dest->failure_kinds[i] += src->failure_kinds[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
merge_opcode_stats_array(OpcodeStats *dest, const OpcodeStats *src)
|
||||
{
|
||||
for (int i = 0; i < 256; i++) {
|
||||
merge_specialization_stats(&dest[i].specialization, &src[i].specialization);
|
||||
dest[i].execution_count += src[i].execution_count;
|
||||
for (int j = 0; j < 256; j++) {
|
||||
dest[i].pair_count[j] += src[i].pair_count[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
merge_call_stats(CallStats *dest, const CallStats *src)
|
||||
{
|
||||
dest->inlined_py_calls += src->inlined_py_calls;
|
||||
dest->pyeval_calls += src->pyeval_calls;
|
||||
dest->frames_pushed += src->frames_pushed;
|
||||
dest->frame_objects_created += src->frame_objects_created;
|
||||
for (int i = 0; i < EVAL_CALL_KINDS; i++) {
|
||||
dest->eval_calls[i] += src->eval_calls[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
merge_object_stats(ObjectStats *dest, const ObjectStats *src)
|
||||
{
|
||||
dest->increfs += src->increfs;
|
||||
dest->decrefs += src->decrefs;
|
||||
dest->interpreter_increfs += src->interpreter_increfs;
|
||||
dest->interpreter_decrefs += src->interpreter_decrefs;
|
||||
dest->immortal_increfs += src->immortal_increfs;
|
||||
dest->immortal_decrefs += src->immortal_decrefs;
|
||||
dest->interpreter_immortal_increfs += src->interpreter_immortal_increfs;
|
||||
dest->interpreter_immortal_decrefs += src->interpreter_immortal_decrefs;
|
||||
dest->allocations += src->allocations;
|
||||
dest->allocations512 += src->allocations512;
|
||||
dest->allocations4k += src->allocations4k;
|
||||
dest->allocations_big += src->allocations_big;
|
||||
dest->frees += src->frees;
|
||||
dest->to_freelist += src->to_freelist;
|
||||
dest->from_freelist += src->from_freelist;
|
||||
dest->inline_values += src->inline_values;
|
||||
dest->dict_materialized_on_request += src->dict_materialized_on_request;
|
||||
dest->dict_materialized_new_key += src->dict_materialized_new_key;
|
||||
dest->dict_materialized_too_big += src->dict_materialized_too_big;
|
||||
dest->dict_materialized_str_subclass += src->dict_materialized_str_subclass;
|
||||
dest->type_cache_hits += src->type_cache_hits;
|
||||
dest->type_cache_misses += src->type_cache_misses;
|
||||
dest->type_cache_dunder_hits += src->type_cache_dunder_hits;
|
||||
dest->type_cache_dunder_misses += src->type_cache_dunder_misses;
|
||||
dest->type_cache_collisions += src->type_cache_collisions;
|
||||
dest->object_visits += src->object_visits;
|
||||
}
|
||||
|
||||
static void
|
||||
merge_uop_stats_array(UOpStats *dest, const UOpStats *src)
|
||||
{
|
||||
for (int i = 0; i <= PYSTATS_MAX_UOP_ID; i++) {
|
||||
dest[i].execution_count += src[i].execution_count;
|
||||
dest[i].miss += src[i].miss;
|
||||
for (int j = 0; j <= PYSTATS_MAX_UOP_ID; j++) {
|
||||
dest[i].pair_count[j] += src[i].pair_count[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
merge_optimization_stats(OptimizationStats *dest, const OptimizationStats *src)
|
||||
{
|
||||
dest->attempts += src->attempts;
|
||||
dest->traces_created += src->traces_created;
|
||||
dest->traces_executed += src->traces_executed;
|
||||
dest->uops_executed += src->uops_executed;
|
||||
dest->trace_stack_overflow += src->trace_stack_overflow;
|
||||
dest->trace_stack_underflow += src->trace_stack_underflow;
|
||||
dest->trace_too_long += src->trace_too_long;
|
||||
dest->trace_too_short += src->trace_too_short;
|
||||
dest->inner_loop += src->inner_loop;
|
||||
dest->recursive_call += src->recursive_call;
|
||||
dest->low_confidence += src->low_confidence;
|
||||
dest->unknown_callee += src->unknown_callee;
|
||||
dest->executors_invalidated += src->executors_invalidated;
|
||||
dest->optimizer_attempts += src->optimizer_attempts;
|
||||
dest->optimizer_successes += src->optimizer_successes;
|
||||
dest->optimizer_failure_reason_no_memory += src->optimizer_failure_reason_no_memory;
|
||||
dest->remove_globals_builtins_changed += src->remove_globals_builtins_changed;
|
||||
dest->remove_globals_incorrect_keys += src->remove_globals_incorrect_keys;
|
||||
dest->jit_total_memory_size += src->jit_total_memory_size;
|
||||
dest->jit_code_size += src->jit_code_size;
|
||||
dest->jit_trampoline_size += src->jit_trampoline_size;
|
||||
dest->jit_data_size += src->jit_data_size;
|
||||
dest->jit_padding_size += src->jit_padding_size;
|
||||
dest->jit_freed_memory_size += src->jit_freed_memory_size;
|
||||
|
||||
merge_uop_stats_array(dest->opcode, src->opcode);
|
||||
|
||||
for (int i = 0; i < 256; i++) {
|
||||
dest->unsupported_opcode[i] += src->unsupported_opcode[i];
|
||||
}
|
||||
for (int i = 0; i < _Py_UOP_HIST_SIZE; i++) {
|
||||
dest->trace_length_hist[i] += src->trace_length_hist[i];
|
||||
dest->trace_run_length_hist[i] += src->trace_run_length_hist[i];
|
||||
dest->optimized_trace_length_hist[i] += src->optimized_trace_length_hist[i];
|
||||
dest->trace_total_memory_hist[i] += src->trace_total_memory_hist[i];
|
||||
}
|
||||
for (int i = 0; i <= PYSTATS_MAX_UOP_ID; i++) {
|
||||
dest->error_in_opcode[i] += src->error_in_opcode[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
merge_ft_stats(FTStats *dest, const FTStats *src)
|
||||
{
|
||||
dest->mutex_sleeps = src->mutex_sleeps;
|
||||
dest->qsbr_polls = src->qsbr_polls;
|
||||
dest->world_stops = src->world_stops;
|
||||
}
|
||||
|
||||
static void
|
||||
merge_rare_event_stats(RareEventStats *dest, const RareEventStats *src)
|
||||
{
|
||||
dest->set_class += src->set_class;
|
||||
dest->set_bases += src->set_bases;
|
||||
dest->set_eval_frame_func += src->set_eval_frame_func;
|
||||
dest->builtin_dict += src->builtin_dict;
|
||||
dest->func_modification += src->func_modification;
|
||||
dest->watched_dict_modification += src->watched_dict_modification;
|
||||
dest->watched_globals_modification += src->watched_globals_modification;
|
||||
}
|
||||
|
||||
static void
|
||||
merge_gc_stats_array(GCStats *dest, const GCStats *src)
|
||||
{
|
||||
for (int i = 0; i < NUM_GENERATIONS; i++) {
|
||||
dest[i].collections += src[i].collections;
|
||||
dest[i].object_visits += src[i].object_visits;
|
||||
dest[i].objects_collected += src[i].objects_collected;
|
||||
dest[i].objects_transitively_reachable += src[i].objects_transitively_reachable;
|
||||
dest[i].objects_not_transitively_reachable += src[i].objects_not_transitively_reachable;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
stats_zero_thread(_PyThreadStateImpl *tstate)
|
||||
{
|
||||
// Zero the thread local stat counters
|
||||
if (tstate->pystats_struct) {
|
||||
memset(tstate->pystats_struct, 0, sizeof(PyStats));
|
||||
}
|
||||
}
|
||||
|
||||
// merge stats for a single thread into the global structure
|
||||
void
|
||||
stats_merge_thread(_PyThreadStateImpl *tstate)
|
||||
{
|
||||
PyStats *src = tstate->pystats_struct;
|
||||
PyStats *dest = ((PyThreadState *)tstate)->interp->pystats_struct;
|
||||
|
||||
if (src == NULL || dest == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Merge each category of stats using the helper functions.
|
||||
merge_opcode_stats_array(dest->opcode_stats, src->opcode_stats);
|
||||
merge_call_stats(&dest->call_stats, &src->call_stats);
|
||||
merge_object_stats(&dest->object_stats, &src->object_stats);
|
||||
merge_optimization_stats(&dest->optimization_stats, &src->optimization_stats);
|
||||
merge_ft_stats(&dest->ft_stats, &src->ft_stats);
|
||||
merge_rare_event_stats(&dest->rare_event_stats, &src->rare_event_stats);
|
||||
merge_gc_stats_array(dest->gc_stats, src->gc_stats);
|
||||
}
|
||||
#endif // Py_GIL_DISABLED
|
||||
|
||||
// toggle stats collection on or off for all threads
|
||||
static int
|
||||
stats_toggle_on_off(PyThreadState *tstate, int on)
|
||||
{
|
||||
bool changed = false;
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
STATS_LOCK(interp);
|
||||
if (on && interp->pystats_struct == NULL) {
|
||||
PyStats *s = PyMem_RawCalloc(1, sizeof(PyStats));
|
||||
if (s == NULL) {
|
||||
STATS_UNLOCK(interp);
|
||||
return -1;
|
||||
}
|
||||
FT_ATOMIC_STORE_PTR_RELAXED(interp->pystats_struct, s);
|
||||
}
|
||||
if (tstate->interp->pystats_enabled != on) {
|
||||
FT_ATOMIC_STORE_INT_RELAXED(interp->pystats_enabled, on);
|
||||
changed = true;
|
||||
}
|
||||
STATS_UNLOCK(interp);
|
||||
if (!changed) {
|
||||
return 0;
|
||||
}
|
||||
_PyEval_StopTheWorld(interp);
|
||||
_Py_FOR_EACH_TSTATE_UNLOCKED(interp, ts) {
|
||||
PyStats *s = NULL;
|
||||
if (interp->pystats_enabled) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyThreadStateImpl *ts_impl = (_PyThreadStateImpl *)ts;
|
||||
if (ts_impl->pystats_struct == NULL) {
|
||||
// first activation for this thread, allocate structure
|
||||
ts_impl->pystats_struct = PyMem_RawCalloc(1, sizeof(PyStats));
|
||||
}
|
||||
s = ts_impl->pystats_struct;
|
||||
#else
|
||||
s = ts->interp->pystats_struct;
|
||||
#endif
|
||||
}
|
||||
ts->pystats = s;
|
||||
}
|
||||
_PyEval_StartTheWorld(interp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// zero stats for all threads and for the interpreter
|
||||
static void
|
||||
stats_zero_all(void)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
if (tstate == NULL) {
|
||||
return;
|
||||
}
|
||||
if (FT_ATOMIC_LOAD_PTR_RELAXED(tstate->interp->pystats_struct) == NULL) {
|
||||
return;
|
||||
}
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
_PyEval_StopTheWorld(interp);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_Py_FOR_EACH_TSTATE_UNLOCKED(interp, ts) {
|
||||
stats_zero_thread((_PyThreadStateImpl *)ts);
|
||||
}
|
||||
#endif
|
||||
if (interp->pystats_struct) {
|
||||
memset(interp->pystats_struct, 0, sizeof(PyStats));
|
||||
}
|
||||
_PyEval_StartTheWorld(interp);
|
||||
}
|
||||
|
||||
// merge stats for all threads into the per-interpreter structure
|
||||
static void
|
||||
stats_merge_all(void)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
if (tstate == NULL) {
|
||||
return;
|
||||
}
|
||||
if (FT_ATOMIC_LOAD_PTR_RELAXED(tstate->interp->pystats_struct) == NULL) {
|
||||
return;
|
||||
}
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
_PyEval_StopTheWorld(interp);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_Py_FOR_EACH_TSTATE_UNLOCKED(interp, ts) {
|
||||
stats_merge_thread((_PyThreadStateImpl *)ts);
|
||||
stats_zero_thread((_PyThreadStateImpl *)ts);
|
||||
}
|
||||
#endif
|
||||
_PyEval_StartTheWorld(interp);
|
||||
}
|
||||
|
||||
int
|
||||
_Py_StatsOn(void)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
return stats_toggle_on_off(tstate, 1);
|
||||
}
|
||||
|
||||
void
|
||||
_Py_StatsOff(void)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
stats_toggle_on_off(tstate, 0);
|
||||
}
|
||||
|
||||
void
|
||||
_Py_StatsClear(void)
|
||||
{
|
||||
stats_zero_all();
|
||||
}
|
||||
|
||||
static int
|
||||
mem_is_zero(unsigned char *ptr, size_t size)
|
||||
{
|
||||
for (size_t i=0; i < size; i++) {
|
||||
if (*ptr != 0) {
|
||||
return 0;
|
||||
}
|
||||
ptr++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
_Py_PrintSpecializationStats(int to_file)
|
||||
{
|
||||
assert(to_file);
|
||||
stats_merge_all();
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
STATS_LOCK(tstate->interp);
|
||||
PyStats *stats = tstate->interp->pystats_struct;
|
||||
if (stats == NULL) {
|
||||
STATS_UNLOCK(tstate->interp);
|
||||
return 0;
|
||||
}
|
||||
#define MEM_IS_ZERO(DATA) mem_is_zero((unsigned char*)DATA, sizeof(*(DATA)))
|
||||
int is_zero = (
|
||||
MEM_IS_ZERO(stats->gc_stats) // is a pointer
|
||||
&& MEM_IS_ZERO(&stats->opcode_stats)
|
||||
&& MEM_IS_ZERO(&stats->call_stats)
|
||||
&& MEM_IS_ZERO(&stats->object_stats)
|
||||
);
|
||||
#undef MEM_IS_ZERO
|
||||
STATS_UNLOCK(tstate->interp);
|
||||
if (is_zero) {
|
||||
// gh-108753: -X pystats command line was used, but then _stats_off()
|
||||
// and _stats_clear() have been called: in this case, avoid printing
|
||||
// useless "all zeros" statistics.
|
||||
return 0;
|
||||
}
|
||||
|
||||
FILE *out = stderr;
|
||||
if (to_file) {
|
||||
/* Write to a file instead of stderr. */
|
||||
# ifdef MS_WINDOWS
|
||||
const char *dirname = "c:\\temp\\py_stats\\";
|
||||
# else
|
||||
const char *dirname = "/tmp/py_stats/";
|
||||
# endif
|
||||
/* Use random 160 bit number as file name,
|
||||
* to avoid both accidental collisions and
|
||||
* symlink attacks. */
|
||||
unsigned char rand[20];
|
||||
char hex_name[41];
|
||||
_PyOS_URandomNonblock(rand, 20);
|
||||
for (int i = 0; i < 20; i++) {
|
||||
hex_name[2*i] = Py_hexdigits[rand[i]&15];
|
||||
hex_name[2*i+1] = Py_hexdigits[(rand[i]>>4)&15];
|
||||
}
|
||||
hex_name[40] = '\0';
|
||||
char buf[64];
|
||||
assert(strlen(dirname) + 40 + strlen(".txt") < 64);
|
||||
sprintf(buf, "%s%s.txt", dirname, hex_name);
|
||||
FILE *fout = fopen(buf, "w");
|
||||
if (fout) {
|
||||
out = fout;
|
||||
}
|
||||
}
|
||||
else {
|
||||
fprintf(out, "Specialization stats:\n");
|
||||
}
|
||||
STATS_LOCK(tstate->interp);
|
||||
print_stats(out, stats);
|
||||
STATS_UNLOCK(tstate->interp);
|
||||
if (out != stderr) {
|
||||
fclose(out);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
PyStatus
|
||||
_PyStats_InterpInit(PyInterpreterState *interp)
|
||||
{
|
||||
if (interp->config._pystats) {
|
||||
// start with pystats enabled, can be disabled via sys._stats_off()
|
||||
// this needs to be set before the first tstate is created
|
||||
interp->pystats_enabled = 1;
|
||||
interp->pystats_struct = PyMem_RawCalloc(1, sizeof(PyStats));
|
||||
if (interp->pystats_struct == NULL) {
|
||||
return _PyStatus_ERR("out-of-memory while initializing interpreter");
|
||||
}
|
||||
}
|
||||
return _PyStatus_OK();
|
||||
}
|
||||
|
||||
bool
|
||||
_PyStats_ThreadInit(PyInterpreterState *interp, _PyThreadStateImpl *tstate)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
if (FT_ATOMIC_LOAD_INT_RELAXED(interp->pystats_enabled)) {
|
||||
assert(interp->pystats_struct != NULL);
|
||||
tstate->pystats_struct = PyMem_RawCalloc(1, sizeof(PyStats));
|
||||
if (tstate->pystats_struct == NULL) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
_PyStats_ThreadFini(_PyThreadStateImpl *tstate)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
STATS_LOCK(((PyThreadState *)tstate)->interp);
|
||||
stats_merge_thread(tstate);
|
||||
STATS_UNLOCK(((PyThreadState *)tstate)->interp);
|
||||
PyMem_RawFree(tstate->pystats_struct);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
_PyStats_Attach(_PyThreadStateImpl *tstate_impl)
|
||||
{
|
||||
PyStats *s;
|
||||
PyThreadState *tstate = (PyThreadState *)tstate_impl;
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
if (FT_ATOMIC_LOAD_INT_RELAXED(interp->pystats_enabled)) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
s = ((_PyThreadStateImpl *)tstate)->pystats_struct;
|
||||
#else
|
||||
s = tstate->interp->pystats_struct;
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
s = NULL;
|
||||
}
|
||||
tstate->pystats = s;
|
||||
}
|
||||
|
||||
void
|
||||
_PyStats_Detach(_PyThreadStateImpl *tstate_impl)
|
||||
{
|
||||
((PyThreadState *)tstate_impl)->pystats = NULL;
|
||||
}
|
||||
|
||||
#endif // Py_STATS
|
||||
|
|
@ -42,8 +42,7 @@ static PyObject *_Py_strhex_impl(const char* argbuf, const Py_ssize_t arglen,
|
|||
else {
|
||||
bytes_per_sep_group = 0;
|
||||
}
|
||||
|
||||
unsigned int abs_bytes_per_sep = Py_ABS(bytes_per_sep_group);
|
||||
unsigned int abs_bytes_per_sep = _Py_ABS_CAST(unsigned int, bytes_per_sep_group);
|
||||
Py_ssize_t resultlen = 0;
|
||||
if (bytes_per_sep_group && arglen > 0) {
|
||||
/* How many sep characters we'll be inserting. */
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ _Py_parse_inf_or_nan(const char *p, char **endptr)
|
|||
s += 3;
|
||||
if (case_insensitive_match(s, "inity"))
|
||||
s += 5;
|
||||
retval = negate ? -Py_INFINITY : Py_INFINITY;
|
||||
retval = negate ? -INFINITY : INFINITY;
|
||||
}
|
||||
else if (case_insensitive_match(s, "nan")) {
|
||||
s += 3;
|
||||
|
|
@ -286,7 +286,7 @@ _PyOS_ascii_strtod(const char *nptr, char **endptr)
|
|||
string, -1.0 is returned and again ValueError is raised.
|
||||
|
||||
On overflow (e.g., when trying to convert '1e500' on an IEEE 754 machine),
|
||||
if overflow_exception is NULL then +-Py_INFINITY is returned, and no Python
|
||||
if overflow_exception is NULL then +-INFINITY is returned, and no Python
|
||||
exception is raised. Otherwise, overflow_exception should point to
|
||||
a Python exception, this exception will be raised, -1.0 will be returned,
|
||||
and *endptr will point just past the end of the converted value.
|
||||
|
|
|
|||
|
|
@ -1181,7 +1181,7 @@ _PyErr_Display(PyObject *file, PyObject *unused, PyObject *value, PyObject *tb)
|
|||
}
|
||||
if (print_exception_recursive(&ctx, value) < 0) {
|
||||
PyErr_Clear();
|
||||
_PyObject_Dump(value);
|
||||
PyUnstable_Object_Dump(value);
|
||||
fprintf(stderr, "lost sys.stderr\n");
|
||||
}
|
||||
Py_XDECREF(ctx.seen);
|
||||
|
|
@ -1199,14 +1199,14 @@ PyErr_Display(PyObject *unused, PyObject *value, PyObject *tb)
|
|||
PyObject *file;
|
||||
if (PySys_GetOptionalAttr(&_Py_ID(stderr), &file) < 0) {
|
||||
PyObject *exc = PyErr_GetRaisedException();
|
||||
_PyObject_Dump(value);
|
||||
PyUnstable_Object_Dump(value);
|
||||
fprintf(stderr, "lost sys.stderr\n");
|
||||
_PyObject_Dump(exc);
|
||||
PyUnstable_Object_Dump(exc);
|
||||
Py_DECREF(exc);
|
||||
return;
|
||||
}
|
||||
if (file == NULL) {
|
||||
_PyObject_Dump(value);
|
||||
PyUnstable_Object_Dump(value);
|
||||
fprintf(stderr, "lost sys.stderr\n");
|
||||
return;
|
||||
}
|
||||
|
|
@ -1252,12 +1252,19 @@ _PyRun_StringFlagsWithName(const char *str, PyObject* name, int start,
|
|||
} else {
|
||||
name = &_Py_STR(anon_string);
|
||||
}
|
||||
PyObject *module = NULL;
|
||||
if (globals && PyDict_GetItemStringRef(globals, "__name__", &module) < 0) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
mod = _PyParser_ASTFromString(str, name, start, flags, arena);
|
||||
mod = _PyParser_ASTFromString(str, name, start, flags, arena, module);
|
||||
Py_XDECREF(module);
|
||||
|
||||
if (mod != NULL) {
|
||||
if (mod != NULL) {
|
||||
ret = run_mod(mod, name, globals, locals, flags, arena, source, generate_new_source);
|
||||
}
|
||||
|
||||
done:
|
||||
Py_XDECREF(source);
|
||||
_PyArena_Free(arena);
|
||||
return ret;
|
||||
|
|
@ -1407,8 +1414,17 @@ run_mod(mod_ty mod, PyObject *filename, PyObject *globals, PyObject *locals,
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
PyObject *module = NULL;
|
||||
if (globals && PyDict_GetItemStringRef(globals, "__name__", &module) < 0) {
|
||||
if (interactive_src) {
|
||||
Py_DECREF(interactive_filename);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
PyCodeObject *co = _PyAST_Compile(mod, interactive_filename, flags, -1, arena);
|
||||
PyCodeObject *co = _PyAST_Compile(mod, interactive_filename, flags, -1,
|
||||
arena, module);
|
||||
Py_XDECREF(module);
|
||||
if (co == NULL) {
|
||||
if (interactive_src) {
|
||||
Py_DECREF(interactive_filename);
|
||||
|
|
@ -1507,6 +1523,14 @@ run_pyc_file(FILE *fp, PyObject *globals, PyObject *locals,
|
|||
PyObject *
|
||||
Py_CompileStringObject(const char *str, PyObject *filename, int start,
|
||||
PyCompilerFlags *flags, int optimize)
|
||||
{
|
||||
return _Py_CompileStringObjectWithModule(str, filename, start,
|
||||
flags, optimize, NULL);
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_Py_CompileStringObjectWithModule(const char *str, PyObject *filename, int start,
|
||||
PyCompilerFlags *flags, int optimize, PyObject *module)
|
||||
{
|
||||
PyCodeObject *co;
|
||||
mod_ty mod;
|
||||
|
|
@ -1514,14 +1538,16 @@ Py_CompileStringObject(const char *str, PyObject *filename, int start,
|
|||
if (arena == NULL)
|
||||
return NULL;
|
||||
|
||||
mod = _PyParser_ASTFromString(str, filename, start, flags, arena);
|
||||
mod = _PyParser_ASTFromString(str, filename, start, flags, arena, module);
|
||||
if (mod == NULL) {
|
||||
_PyArena_Free(arena);
|
||||
return NULL;
|
||||
}
|
||||
if (flags && (flags->cf_flags & PyCF_ONLY_AST)) {
|
||||
int syntax_check_only = ((flags->cf_flags & PyCF_OPTIMIZED_AST) == PyCF_ONLY_AST); /* unoptiomized AST */
|
||||
if (_PyCompile_AstPreprocess(mod, filename, flags, optimize, arena, syntax_check_only) < 0) {
|
||||
if (_PyCompile_AstPreprocess(mod, filename, flags, optimize, arena,
|
||||
syntax_check_only, module) < 0)
|
||||
{
|
||||
_PyArena_Free(arena);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1529,7 +1555,7 @@ Py_CompileStringObject(const char *str, PyObject *filename, int start,
|
|||
_PyArena_Free(arena);
|
||||
return result;
|
||||
}
|
||||
co = _PyAST_Compile(mod, filename, flags, optimize, arena);
|
||||
co = _PyAST_Compile(mod, filename, flags, optimize, arena, module);
|
||||
_PyArena_Free(arena);
|
||||
return (PyObject *)co;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
#include "pycore_initconfig.h" // _PyStatus_ERR
|
||||
#include "pycore_pystate.h" // _Py_AssertHoldsTstate()
|
||||
#include "pycore_runtime.h" // _PyRuntime
|
||||
#include "pycore_time.h" // PyTime_t
|
||||
#include "pycore_time.h" // export _PyLong_FromTime_t()
|
||||
|
||||
#include <time.h> // gmtime_r()
|
||||
#ifdef HAVE_SYS_TIME_H
|
||||
|
|
@ -472,31 +472,6 @@ _PyTime_FromMicrosecondsClamp(PyTime_t us)
|
|||
}
|
||||
|
||||
|
||||
int
|
||||
_PyTime_FromLong(PyTime_t *tp, PyObject *obj)
|
||||
{
|
||||
if (!PyLong_Check(obj)) {
|
||||
PyErr_Format(PyExc_TypeError, "expect int, got %s",
|
||||
Py_TYPE(obj)->tp_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static_assert(sizeof(long long) == sizeof(PyTime_t),
|
||||
"PyTime_t is not long long");
|
||||
long long nsec = PyLong_AsLongLong(obj);
|
||||
if (nsec == -1 && PyErr_Occurred()) {
|
||||
if (PyErr_ExceptionMatches(PyExc_OverflowError)) {
|
||||
pytime_overflow();
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
PyTime_t t = (PyTime_t)nsec;
|
||||
*tp = t;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_CLOCK_GETTIME
|
||||
static int
|
||||
pytime_fromtimespec(PyTime_t *tp, const struct timespec *ts, int raise_exc)
|
||||
|
|
@ -658,14 +633,6 @@ PyTime_AsSecondsDouble(PyTime_t ns)
|
|||
}
|
||||
|
||||
|
||||
PyObject *
|
||||
_PyTime_AsLong(PyTime_t ns)
|
||||
{
|
||||
static_assert(sizeof(long long) >= sizeof(PyTime_t),
|
||||
"PyTime_t is larger than long long");
|
||||
return PyLong_FromLongLong((long long)ns);
|
||||
}
|
||||
|
||||
int
|
||||
_PyTime_FromSecondsDouble(double seconds, _PyTime_round_t round, PyTime_t *result)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@
|
|||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include "pycore_qsbr.h"
|
||||
#include "pycore_tstate.h" // _PyThreadStateImpl
|
||||
#include "pycore_stats.h" // FT_STAT_QSBR_POLL_INC()
|
||||
|
||||
|
||||
// Starting size of the array of qsbr thread states
|
||||
|
|
@ -158,7 +159,7 @@ _Py_qsbr_poll(struct _qsbr_thread_state *qsbr, uint64_t goal)
|
|||
if (_Py_qbsr_goal_reached(qsbr, goal)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
FT_STAT_QSBR_POLL_INC();
|
||||
uint64_t rd_seq = qsbr_poll_scan(qsbr->shared);
|
||||
return QSBR_LEQ(goal, rd_seq);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ static mach_port_t pid_to_task(pid_t pid);
|
|||
#endif
|
||||
|
||||
// Initialize the process handle
|
||||
static int
|
||||
UNUSED static int
|
||||
_Py_RemoteDebug_InitProcHandle(proc_handle_t *handle, pid_t pid) {
|
||||
handle->pid = pid;
|
||||
#if defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
|
||||
|
|
@ -188,7 +188,7 @@ _Py_RemoteDebug_InitProcHandle(proc_handle_t *handle, pid_t pid) {
|
|||
}
|
||||
|
||||
// Clean up the process handle
|
||||
static void
|
||||
UNUSED static void
|
||||
_Py_RemoteDebug_CleanupProcHandle(proc_handle_t *handle) {
|
||||
#ifdef MS_WINDOWS
|
||||
if (handle->hProcess != NULL) {
|
||||
|
|
@ -875,7 +875,7 @@ search_windows_map_for_section(proc_handle_t* handle, const char* secname, const
|
|||
#endif // MS_WINDOWS
|
||||
|
||||
// Get the PyRuntime section address for any platform
|
||||
static uintptr_t
|
||||
UNUSED static uintptr_t
|
||||
_Py_RemoteDebug_GetPyRuntimeAddress(proc_handle_t* handle)
|
||||
{
|
||||
uintptr_t address;
|
||||
|
|
@ -1158,7 +1158,7 @@ _Py_RemoteDebug_PagedReadRemoteMemory(proc_handle_t *handle,
|
|||
return _Py_RemoteDebug_ReadRemoteMemory(handle, addr, size, out);
|
||||
}
|
||||
|
||||
static int
|
||||
UNUSED static int
|
||||
_Py_RemoteDebug_ReadDebugOffsets(
|
||||
proc_handle_t *handle,
|
||||
uintptr_t *runtime_start_address,
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ cleanup_proc_handle(proc_handle_t *handle) {
|
|||
}
|
||||
|
||||
static int
|
||||
read_memory(proc_handle_t *handle, uint64_t remote_address, size_t len, void* dst)
|
||||
read_memory(proc_handle_t *handle, uintptr_t remote_address, size_t len, void* dst)
|
||||
{
|
||||
return _Py_RemoteDebug_ReadRemoteMemory(handle, remote_address, len, dst);
|
||||
}
|
||||
|
|
@ -235,7 +235,7 @@ send_exec_to_proc_handle(proc_handle_t *handle, int tid, const char *debugger_sc
|
|||
int is_remote_debugging_enabled = 0;
|
||||
if (0 != read_memory(
|
||||
handle,
|
||||
interpreter_state_addr + debug_offsets.debugger_support.remote_debugging_enabled,
|
||||
interpreter_state_addr + (uintptr_t)debug_offsets.debugger_support.remote_debugging_enabled,
|
||||
sizeof(int),
|
||||
&is_remote_debugging_enabled))
|
||||
{
|
||||
|
|
@ -255,7 +255,7 @@ send_exec_to_proc_handle(proc_handle_t *handle, int tid, const char *debugger_sc
|
|||
if (tid != 0) {
|
||||
if (0 != read_memory(
|
||||
handle,
|
||||
interpreter_state_addr + debug_offsets.interpreter_state.threads_head,
|
||||
interpreter_state_addr + (uintptr_t)debug_offsets.interpreter_state.threads_head,
|
||||
sizeof(void*),
|
||||
&thread_state_addr))
|
||||
{
|
||||
|
|
@ -264,7 +264,7 @@ send_exec_to_proc_handle(proc_handle_t *handle, int tid, const char *debugger_sc
|
|||
while (thread_state_addr != 0) {
|
||||
if (0 != read_memory(
|
||||
handle,
|
||||
thread_state_addr + debug_offsets.thread_state.native_thread_id,
|
||||
thread_state_addr + (uintptr_t)debug_offsets.thread_state.native_thread_id,
|
||||
sizeof(this_tid),
|
||||
&this_tid))
|
||||
{
|
||||
|
|
@ -277,7 +277,7 @@ send_exec_to_proc_handle(proc_handle_t *handle, int tid, const char *debugger_sc
|
|||
|
||||
if (0 != read_memory(
|
||||
handle,
|
||||
thread_state_addr + debug_offsets.thread_state.next,
|
||||
thread_state_addr + (uintptr_t)debug_offsets.thread_state.next,
|
||||
sizeof(void*),
|
||||
&thread_state_addr))
|
||||
{
|
||||
|
|
@ -294,7 +294,7 @@ send_exec_to_proc_handle(proc_handle_t *handle, int tid, const char *debugger_sc
|
|||
} else {
|
||||
if (0 != read_memory(
|
||||
handle,
|
||||
interpreter_state_addr + debug_offsets.interpreter_state.threads_main,
|
||||
interpreter_state_addr + (uintptr_t)debug_offsets.interpreter_state.threads_main,
|
||||
sizeof(void*),
|
||||
&thread_state_addr))
|
||||
{
|
||||
|
|
@ -346,7 +346,7 @@ send_exec_to_proc_handle(proc_handle_t *handle, int tid, const char *debugger_sc
|
|||
uintptr_t eval_breaker;
|
||||
if (0 != read_memory(
|
||||
handle,
|
||||
thread_state_addr + debug_offsets.debugger_support.eval_breaker,
|
||||
thread_state_addr + (uintptr_t)debug_offsets.debugger_support.eval_breaker,
|
||||
sizeof(uintptr_t),
|
||||
&eval_breaker))
|
||||
{
|
||||
|
|
|
|||
|
|
@ -23,437 +23,23 @@
|
|||
|
||||
#include <stdlib.h> // rand()
|
||||
|
||||
extern const char *_PyUOpName(int index);
|
||||
|
||||
/* For guidance on adding or extending families of instructions see
|
||||
* InternalDocs/interpreter.md `Specialization` section.
|
||||
*/
|
||||
|
||||
#ifdef Py_STATS
|
||||
GCStats _py_gc_stats[NUM_GENERATIONS] = { 0 };
|
||||
static PyStats _Py_stats_struct = { .gc_stats = _py_gc_stats };
|
||||
PyStats *_Py_stats = NULL;
|
||||
|
||||
#if PYSTATS_MAX_UOP_ID < MAX_UOP_ID
|
||||
#error "Not enough space allocated for pystats. Increase PYSTATS_MAX_UOP_ID to at least MAX_UOP_ID"
|
||||
#endif
|
||||
|
||||
#define ADD_STAT_TO_DICT(res, field) \
|
||||
do { \
|
||||
PyObject *val = PyLong_FromUnsignedLongLong(stats->field); \
|
||||
if (val == NULL) { \
|
||||
Py_DECREF(res); \
|
||||
return NULL; \
|
||||
} \
|
||||
if (PyDict_SetItemString(res, #field, val) == -1) { \
|
||||
Py_DECREF(res); \
|
||||
Py_DECREF(val); \
|
||||
return NULL; \
|
||||
} \
|
||||
Py_DECREF(val); \
|
||||
} while(0);
|
||||
|
||||
static PyObject*
|
||||
stats_to_dict(SpecializationStats *stats)
|
||||
{
|
||||
PyObject *res = PyDict_New();
|
||||
if (res == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
ADD_STAT_TO_DICT(res, success);
|
||||
ADD_STAT_TO_DICT(res, failure);
|
||||
ADD_STAT_TO_DICT(res, hit);
|
||||
ADD_STAT_TO_DICT(res, deferred);
|
||||
ADD_STAT_TO_DICT(res, miss);
|
||||
ADD_STAT_TO_DICT(res, deopt);
|
||||
PyObject *failure_kinds = PyTuple_New(SPECIALIZATION_FAILURE_KINDS);
|
||||
if (failure_kinds == NULL) {
|
||||
Py_DECREF(res);
|
||||
return NULL;
|
||||
}
|
||||
for (int i = 0; i < SPECIALIZATION_FAILURE_KINDS; i++) {
|
||||
PyObject *stat = PyLong_FromUnsignedLongLong(stats->failure_kinds[i]);
|
||||
if (stat == NULL) {
|
||||
Py_DECREF(res);
|
||||
Py_DECREF(failure_kinds);
|
||||
return NULL;
|
||||
}
|
||||
PyTuple_SET_ITEM(failure_kinds, i, stat);
|
||||
}
|
||||
if (PyDict_SetItemString(res, "failure_kinds", failure_kinds)) {
|
||||
Py_DECREF(res);
|
||||
Py_DECREF(failure_kinds);
|
||||
return NULL;
|
||||
}
|
||||
Py_DECREF(failure_kinds);
|
||||
return res;
|
||||
}
|
||||
#undef ADD_STAT_TO_DICT
|
||||
|
||||
static int
|
||||
add_stat_dict(
|
||||
PyObject *res,
|
||||
int opcode,
|
||||
const char *name) {
|
||||
|
||||
SpecializationStats *stats = &_Py_stats_struct.opcode_stats[opcode].specialization;
|
||||
PyObject *d = stats_to_dict(stats);
|
||||
if (d == NULL) {
|
||||
return -1;
|
||||
}
|
||||
int err = PyDict_SetItemString(res, name, d);
|
||||
Py_DECREF(d);
|
||||
return err;
|
||||
}
|
||||
|
||||
PyObject*
|
||||
_Py_GetSpecializationStats(void) {
|
||||
PyObject *stats = PyDict_New();
|
||||
if (stats == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
int err = 0;
|
||||
err += add_stat_dict(stats, CONTAINS_OP, "contains_op");
|
||||
err += add_stat_dict(stats, LOAD_SUPER_ATTR, "load_super_attr");
|
||||
err += add_stat_dict(stats, LOAD_ATTR, "load_attr");
|
||||
err += add_stat_dict(stats, LOAD_GLOBAL, "load_global");
|
||||
err += add_stat_dict(stats, STORE_SUBSCR, "store_subscr");
|
||||
err += add_stat_dict(stats, STORE_ATTR, "store_attr");
|
||||
err += add_stat_dict(stats, JUMP_BACKWARD, "jump_backward");
|
||||
err += add_stat_dict(stats, CALL, "call");
|
||||
err += add_stat_dict(stats, CALL_KW, "call_kw");
|
||||
err += add_stat_dict(stats, BINARY_OP, "binary_op");
|
||||
err += add_stat_dict(stats, COMPARE_OP, "compare_op");
|
||||
err += add_stat_dict(stats, UNPACK_SEQUENCE, "unpack_sequence");
|
||||
err += add_stat_dict(stats, FOR_ITER, "for_iter");
|
||||
err += add_stat_dict(stats, TO_BOOL, "to_bool");
|
||||
err += add_stat_dict(stats, SEND, "send");
|
||||
if (err < 0) {
|
||||
Py_DECREF(stats);
|
||||
return NULL;
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
||||
|
||||
#define PRINT_STAT(i, field) \
|
||||
if (stats[i].field) { \
|
||||
fprintf(out, " opcode[%s]." #field " : %" PRIu64 "\n", _PyOpcode_OpName[i], stats[i].field); \
|
||||
}
|
||||
|
||||
static void
|
||||
print_spec_stats(FILE *out, OpcodeStats *stats)
|
||||
{
|
||||
/* Mark some opcodes as specializable for stats,
|
||||
* even though we don't specialize them yet. */
|
||||
fprintf(out, "opcode[BINARY_SLICE].specializable : 1\n");
|
||||
fprintf(out, "opcode[STORE_SLICE].specializable : 1\n");
|
||||
fprintf(out, "opcode[GET_ITER].specializable : 1\n");
|
||||
for (int i = 0; i < 256; i++) {
|
||||
if (_PyOpcode_Caches[i]) {
|
||||
/* Ignore jumps as they cannot be specialized */
|
||||
switch (i) {
|
||||
case POP_JUMP_IF_FALSE:
|
||||
case POP_JUMP_IF_TRUE:
|
||||
case POP_JUMP_IF_NONE:
|
||||
case POP_JUMP_IF_NOT_NONE:
|
||||
case JUMP_BACKWARD:
|
||||
break;
|
||||
default:
|
||||
fprintf(out, "opcode[%s].specializable : 1\n", _PyOpcode_OpName[i]);
|
||||
}
|
||||
}
|
||||
PRINT_STAT(i, specialization.success);
|
||||
PRINT_STAT(i, specialization.failure);
|
||||
PRINT_STAT(i, specialization.hit);
|
||||
PRINT_STAT(i, specialization.deferred);
|
||||
PRINT_STAT(i, specialization.miss);
|
||||
PRINT_STAT(i, specialization.deopt);
|
||||
PRINT_STAT(i, execution_count);
|
||||
for (int j = 0; j < SPECIALIZATION_FAILURE_KINDS; j++) {
|
||||
uint64_t val = stats[i].specialization.failure_kinds[j];
|
||||
if (val) {
|
||||
fprintf(out, " opcode[%s].specialization.failure_kinds[%d] : %"
|
||||
PRIu64 "\n", _PyOpcode_OpName[i], j, val);
|
||||
}
|
||||
}
|
||||
for (int j = 0; j < 256; j++) {
|
||||
if (stats[i].pair_count[j]) {
|
||||
fprintf(out, "opcode[%s].pair_count[%s] : %" PRIu64 "\n",
|
||||
_PyOpcode_OpName[i], _PyOpcode_OpName[j], stats[i].pair_count[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#undef PRINT_STAT
|
||||
|
||||
|
||||
static void
|
||||
print_call_stats(FILE *out, CallStats *stats)
|
||||
{
|
||||
fprintf(out, "Calls to PyEval_EvalDefault: %" PRIu64 "\n", stats->pyeval_calls);
|
||||
fprintf(out, "Calls to Python functions inlined: %" PRIu64 "\n", stats->inlined_py_calls);
|
||||
fprintf(out, "Frames pushed: %" PRIu64 "\n", stats->frames_pushed);
|
||||
fprintf(out, "Frame objects created: %" PRIu64 "\n", stats->frame_objects_created);
|
||||
for (int i = 0; i < EVAL_CALL_KINDS; i++) {
|
||||
fprintf(out, "Calls via PyEval_EvalFrame[%d] : %" PRIu64 "\n", i, stats->eval_calls[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
print_object_stats(FILE *out, ObjectStats *stats)
|
||||
{
|
||||
fprintf(out, "Object allocations from freelist: %" PRIu64 "\n", stats->from_freelist);
|
||||
fprintf(out, "Object frees to freelist: %" PRIu64 "\n", stats->to_freelist);
|
||||
fprintf(out, "Object allocations: %" PRIu64 "\n", stats->allocations);
|
||||
fprintf(out, "Object allocations to 512 bytes: %" PRIu64 "\n", stats->allocations512);
|
||||
fprintf(out, "Object allocations to 4 kbytes: %" PRIu64 "\n", stats->allocations4k);
|
||||
fprintf(out, "Object allocations over 4 kbytes: %" PRIu64 "\n", stats->allocations_big);
|
||||
fprintf(out, "Object frees: %" PRIu64 "\n", stats->frees);
|
||||
fprintf(out, "Object inline values: %" PRIu64 "\n", stats->inline_values);
|
||||
fprintf(out, "Object interpreter mortal increfs: %" PRIu64 "\n", stats->interpreter_increfs);
|
||||
fprintf(out, "Object interpreter mortal decrefs: %" PRIu64 "\n", stats->interpreter_decrefs);
|
||||
fprintf(out, "Object mortal increfs: %" PRIu64 "\n", stats->increfs);
|
||||
fprintf(out, "Object mortal decrefs: %" PRIu64 "\n", stats->decrefs);
|
||||
fprintf(out, "Object interpreter immortal increfs: %" PRIu64 "\n", stats->interpreter_immortal_increfs);
|
||||
fprintf(out, "Object interpreter immortal decrefs: %" PRIu64 "\n", stats->interpreter_immortal_decrefs);
|
||||
fprintf(out, "Object immortal increfs: %" PRIu64 "\n", stats->immortal_increfs);
|
||||
fprintf(out, "Object immortal decrefs: %" PRIu64 "\n", stats->immortal_decrefs);
|
||||
fprintf(out, "Object materialize dict (on request): %" PRIu64 "\n", stats->dict_materialized_on_request);
|
||||
fprintf(out, "Object materialize dict (new key): %" PRIu64 "\n", stats->dict_materialized_new_key);
|
||||
fprintf(out, "Object materialize dict (too big): %" PRIu64 "\n", stats->dict_materialized_too_big);
|
||||
fprintf(out, "Object materialize dict (str subclass): %" PRIu64 "\n", stats->dict_materialized_str_subclass);
|
||||
fprintf(out, "Object method cache hits: %" PRIu64 "\n", stats->type_cache_hits);
|
||||
fprintf(out, "Object method cache misses: %" PRIu64 "\n", stats->type_cache_misses);
|
||||
fprintf(out, "Object method cache collisions: %" PRIu64 "\n", stats->type_cache_collisions);
|
||||
fprintf(out, "Object method cache dunder hits: %" PRIu64 "\n", stats->type_cache_dunder_hits);
|
||||
fprintf(out, "Object method cache dunder misses: %" PRIu64 "\n", stats->type_cache_dunder_misses);
|
||||
}
|
||||
|
||||
static void
|
||||
print_gc_stats(FILE *out, GCStats *stats)
|
||||
{
|
||||
for (int i = 0; i < NUM_GENERATIONS; i++) {
|
||||
fprintf(out, "GC[%d] collections: %" PRIu64 "\n", i, stats[i].collections);
|
||||
fprintf(out, "GC[%d] object visits: %" PRIu64 "\n", i, stats[i].object_visits);
|
||||
fprintf(out, "GC[%d] objects collected: %" PRIu64 "\n", i, stats[i].objects_collected);
|
||||
fprintf(out, "GC[%d] objects reachable from roots: %" PRIu64 "\n", i, stats[i].objects_transitively_reachable);
|
||||
fprintf(out, "GC[%d] objects not reachable from roots: %" PRIu64 "\n", i, stats[i].objects_not_transitively_reachable);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _Py_TIER2
|
||||
static void
|
||||
print_histogram(FILE *out, const char *name, uint64_t hist[_Py_UOP_HIST_SIZE])
|
||||
{
|
||||
for (int i = 0; i < _Py_UOP_HIST_SIZE; i++) {
|
||||
fprintf(out, "%s[%" PRIu64"]: %" PRIu64 "\n", name, (uint64_t)1 << i, hist[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
print_optimization_stats(FILE *out, OptimizationStats *stats)
|
||||
{
|
||||
fprintf(out, "Optimization attempts: %" PRIu64 "\n", stats->attempts);
|
||||
fprintf(out, "Optimization traces created: %" PRIu64 "\n", stats->traces_created);
|
||||
fprintf(out, "Optimization traces executed: %" PRIu64 "\n", stats->traces_executed);
|
||||
fprintf(out, "Optimization uops executed: %" PRIu64 "\n", stats->uops_executed);
|
||||
fprintf(out, "Optimization trace stack overflow: %" PRIu64 "\n", stats->trace_stack_overflow);
|
||||
fprintf(out, "Optimization trace stack underflow: %" PRIu64 "\n", stats->trace_stack_underflow);
|
||||
fprintf(out, "Optimization trace too long: %" PRIu64 "\n", stats->trace_too_long);
|
||||
fprintf(out, "Optimization trace too short: %" PRIu64 "\n", stats->trace_too_short);
|
||||
fprintf(out, "Optimization inner loop: %" PRIu64 "\n", stats->inner_loop);
|
||||
fprintf(out, "Optimization recursive call: %" PRIu64 "\n", stats->recursive_call);
|
||||
fprintf(out, "Optimization low confidence: %" PRIu64 "\n", stats->low_confidence);
|
||||
fprintf(out, "Optimization unknown callee: %" PRIu64 "\n", stats->unknown_callee);
|
||||
fprintf(out, "Executors invalidated: %" PRIu64 "\n", stats->executors_invalidated);
|
||||
|
||||
print_histogram(out, "Trace length", stats->trace_length_hist);
|
||||
print_histogram(out, "Trace run length", stats->trace_run_length_hist);
|
||||
print_histogram(out, "Optimized trace length", stats->optimized_trace_length_hist);
|
||||
|
||||
fprintf(out, "Optimization optimizer attempts: %" PRIu64 "\n", stats->optimizer_attempts);
|
||||
fprintf(out, "Optimization optimizer successes: %" PRIu64 "\n", stats->optimizer_successes);
|
||||
fprintf(out, "Optimization optimizer failure no memory: %" PRIu64 "\n",
|
||||
stats->optimizer_failure_reason_no_memory);
|
||||
fprintf(out, "Optimizer remove globals builtins changed: %" PRIu64 "\n", stats->remove_globals_builtins_changed);
|
||||
fprintf(out, "Optimizer remove globals incorrect keys: %" PRIu64 "\n", stats->remove_globals_incorrect_keys);
|
||||
for (int i = 0; i <= MAX_UOP_ID; i++) {
|
||||
if (stats->opcode[i].execution_count) {
|
||||
fprintf(out, "uops[%s].execution_count : %" PRIu64 "\n", _PyUOpName(i), stats->opcode[i].execution_count);
|
||||
}
|
||||
if (stats->opcode[i].miss) {
|
||||
fprintf(out, "uops[%s].specialization.miss : %" PRIu64 "\n", _PyUOpName(i), stats->opcode[i].miss);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
if (stats->unsupported_opcode[i]) {
|
||||
fprintf(
|
||||
out,
|
||||
"unsupported_opcode[%s].count : %" PRIu64 "\n",
|
||||
_PyOpcode_OpName[i],
|
||||
stats->unsupported_opcode[i]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 1; i <= MAX_UOP_ID; i++){
|
||||
for (int j = 1; j <= MAX_UOP_ID; j++) {
|
||||
if (stats->opcode[i].pair_count[j]) {
|
||||
fprintf(out, "uop[%s].pair_count[%s] : %" PRIu64 "\n",
|
||||
_PyOpcode_uop_name[i], _PyOpcode_uop_name[j], stats->opcode[i].pair_count[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < MAX_UOP_ID; i++) {
|
||||
if (stats->error_in_opcode[i]) {
|
||||
fprintf(
|
||||
out,
|
||||
"error_in_opcode[%s].count : %" PRIu64 "\n",
|
||||
_PyUOpName(i),
|
||||
stats->error_in_opcode[i]
|
||||
);
|
||||
}
|
||||
}
|
||||
fprintf(out, "JIT total memory size: %" PRIu64 "\n", stats->jit_total_memory_size);
|
||||
fprintf(out, "JIT code size: %" PRIu64 "\n", stats->jit_code_size);
|
||||
fprintf(out, "JIT trampoline size: %" PRIu64 "\n", stats->jit_trampoline_size);
|
||||
fprintf(out, "JIT data size: %" PRIu64 "\n", stats->jit_data_size);
|
||||
fprintf(out, "JIT padding size: %" PRIu64 "\n", stats->jit_padding_size);
|
||||
fprintf(out, "JIT freed memory size: %" PRIu64 "\n", stats->jit_freed_memory_size);
|
||||
|
||||
print_histogram(out, "Trace total memory size", stats->trace_total_memory_hist);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
print_rare_event_stats(FILE *out, RareEventStats *stats)
|
||||
{
|
||||
fprintf(out, "Rare event (set_class): %" PRIu64 "\n", stats->set_class);
|
||||
fprintf(out, "Rare event (set_bases): %" PRIu64 "\n", stats->set_bases);
|
||||
fprintf(out, "Rare event (set_eval_frame_func): %" PRIu64 "\n", stats->set_eval_frame_func);
|
||||
fprintf(out, "Rare event (builtin_dict): %" PRIu64 "\n", stats->builtin_dict);
|
||||
fprintf(out, "Rare event (func_modification): %" PRIu64 "\n", stats->func_modification);
|
||||
fprintf(out, "Rare event (watched_dict_modification): %" PRIu64 "\n", stats->watched_dict_modification);
|
||||
fprintf(out, "Rare event (watched_globals_modification): %" PRIu64 "\n", stats->watched_globals_modification);
|
||||
}
|
||||
|
||||
static void
|
||||
print_stats(FILE *out, PyStats *stats)
|
||||
{
|
||||
print_spec_stats(out, stats->opcode_stats);
|
||||
print_call_stats(out, &stats->call_stats);
|
||||
print_object_stats(out, &stats->object_stats);
|
||||
print_gc_stats(out, stats->gc_stats);
|
||||
#ifdef _Py_TIER2
|
||||
print_optimization_stats(out, &stats->optimization_stats);
|
||||
#endif
|
||||
print_rare_event_stats(out, &stats->rare_event_stats);
|
||||
}
|
||||
|
||||
void
|
||||
_Py_StatsOn(void)
|
||||
{
|
||||
_Py_stats = &_Py_stats_struct;
|
||||
}
|
||||
|
||||
void
|
||||
_Py_StatsOff(void)
|
||||
{
|
||||
_Py_stats = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
_Py_StatsClear(void)
|
||||
{
|
||||
memset(&_py_gc_stats, 0, sizeof(_py_gc_stats));
|
||||
memset(&_Py_stats_struct, 0, sizeof(_Py_stats_struct));
|
||||
_Py_stats_struct.gc_stats = _py_gc_stats;
|
||||
}
|
||||
|
||||
static int
|
||||
mem_is_zero(unsigned char *ptr, size_t size)
|
||||
{
|
||||
for (size_t i=0; i < size; i++) {
|
||||
if (*ptr != 0) {
|
||||
return 0;
|
||||
}
|
||||
ptr++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
_Py_PrintSpecializationStats(int to_file)
|
||||
{
|
||||
PyStats *stats = &_Py_stats_struct;
|
||||
#define MEM_IS_ZERO(DATA) mem_is_zero((unsigned char*)DATA, sizeof(*(DATA)))
|
||||
int is_zero = (
|
||||
MEM_IS_ZERO(stats->gc_stats) // is a pointer
|
||||
&& MEM_IS_ZERO(&stats->opcode_stats)
|
||||
&& MEM_IS_ZERO(&stats->call_stats)
|
||||
&& MEM_IS_ZERO(&stats->object_stats)
|
||||
);
|
||||
#undef MEM_IS_ZERO
|
||||
if (is_zero) {
|
||||
// gh-108753: -X pystats command line was used, but then _stats_off()
|
||||
// and _stats_clear() have been called: in this case, avoid printing
|
||||
// useless "all zeros" statistics.
|
||||
return 0;
|
||||
}
|
||||
|
||||
FILE *out = stderr;
|
||||
if (to_file) {
|
||||
/* Write to a file instead of stderr. */
|
||||
# ifdef MS_WINDOWS
|
||||
const char *dirname = "c:\\temp\\py_stats\\";
|
||||
# else
|
||||
const char *dirname = "/tmp/py_stats/";
|
||||
# endif
|
||||
/* Use random 160 bit number as file name,
|
||||
* to avoid both accidental collisions and
|
||||
* symlink attacks. */
|
||||
unsigned char rand[20];
|
||||
char hex_name[41];
|
||||
_PyOS_URandomNonblock(rand, 20);
|
||||
for (int i = 0; i < 20; i++) {
|
||||
hex_name[2*i] = Py_hexdigits[rand[i]&15];
|
||||
hex_name[2*i+1] = Py_hexdigits[(rand[i]>>4)&15];
|
||||
}
|
||||
hex_name[40] = '\0';
|
||||
char buf[64];
|
||||
assert(strlen(dirname) + 40 + strlen(".txt") < 64);
|
||||
sprintf(buf, "%s%s.txt", dirname, hex_name);
|
||||
FILE *fout = fopen(buf, "w");
|
||||
if (fout) {
|
||||
out = fout;
|
||||
}
|
||||
}
|
||||
else {
|
||||
fprintf(out, "Specialization stats:\n");
|
||||
}
|
||||
print_stats(out, stats);
|
||||
if (out != stderr) {
|
||||
fclose(out);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if Py_STATS
|
||||
#define SPECIALIZATION_FAIL(opcode, kind) \
|
||||
do { \
|
||||
if (_Py_stats) { \
|
||||
PyStats *s = _PyStats_GET(); \
|
||||
if (s) { \
|
||||
int _kind = (kind); \
|
||||
assert(_kind < SPECIALIZATION_FAILURE_KINDS); \
|
||||
_Py_stats->opcode_stats[opcode].specialization.failure_kinds[_kind]++; \
|
||||
s->opcode_stats[opcode].specialization.failure_kinds[_kind]++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // Py_STATS
|
||||
|
||||
|
||||
#ifndef SPECIALIZATION_FAIL
|
||||
#else
|
||||
# define SPECIALIZATION_FAIL(opcode, kind) ((void)0)
|
||||
#endif
|
||||
#endif // Py_STATS
|
||||
|
||||
// Initialize warmup counters and optimize instructions. This cannot fail.
|
||||
void
|
||||
|
|
@ -2037,8 +1623,8 @@ specialize_class_call(PyObject *callable, _Py_CODEUNIT *instr, int nargs)
|
|||
}
|
||||
|
||||
static int
|
||||
specialize_method_descriptor(PyMethodDescrObject *descr, _Py_CODEUNIT *instr,
|
||||
int nargs)
|
||||
specialize_method_descriptor(PyMethodDescrObject *descr, PyObject *self_or_null,
|
||||
_Py_CODEUNIT *instr, int nargs)
|
||||
{
|
||||
switch (descr->d_method->ml_flags &
|
||||
(METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O |
|
||||
|
|
@ -2062,8 +1648,11 @@ specialize_method_descriptor(PyMethodDescrObject *descr, _Py_CODEUNIT *instr,
|
|||
bool pop = (next.op.code == POP_TOP);
|
||||
int oparg = instr->op.arg;
|
||||
if ((PyObject *)descr == list_append && oparg == 1 && pop) {
|
||||
specialize(instr, CALL_LIST_APPEND);
|
||||
return 0;
|
||||
assert(self_or_null != NULL);
|
||||
if (PyList_CheckExact(self_or_null)) {
|
||||
specialize(instr, CALL_LIST_APPEND);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
specialize(instr, CALL_METHOD_DESCRIPTOR_O);
|
||||
return 0;
|
||||
|
|
@ -2201,7 +1790,7 @@ specialize_c_call(PyObject *callable, _Py_CODEUNIT *instr, int nargs)
|
|||
}
|
||||
|
||||
Py_NO_INLINE void
|
||||
_Py_Specialize_Call(_PyStackRef callable_st, _Py_CODEUNIT *instr, int nargs)
|
||||
_Py_Specialize_Call(_PyStackRef callable_st, _PyStackRef self_or_null_st, _Py_CODEUNIT *instr, int nargs)
|
||||
{
|
||||
PyObject *callable = PyStackRef_AsPyObjectBorrow(callable_st);
|
||||
|
||||
|
|
@ -2219,7 +1808,9 @@ _Py_Specialize_Call(_PyStackRef callable_st, _Py_CODEUNIT *instr, int nargs)
|
|||
fail = specialize_class_call(callable, instr, nargs);
|
||||
}
|
||||
else if (Py_IS_TYPE(callable, &PyMethodDescr_Type)) {
|
||||
fail = specialize_method_descriptor((PyMethodDescrObject *)callable, instr, nargs);
|
||||
PyObject *self_or_null = PyStackRef_AsPyObjectBorrow(self_or_null_st);
|
||||
fail = specialize_method_descriptor((PyMethodDescrObject *)callable,
|
||||
self_or_null, instr, nargs);
|
||||
}
|
||||
else if (PyMethod_Check(callable)) {
|
||||
PyObject *func = ((PyMethodObject *)callable)->im_func;
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ typedef struct _table_entry {
|
|||
int linenumber;
|
||||
const char *filename_borrow;
|
||||
int linenumber_borrow;
|
||||
int borrows;
|
||||
_PyStackRef borrowed_from;
|
||||
} TableEntry;
|
||||
|
||||
TableEntry *
|
||||
|
|
@ -34,6 +36,8 @@ make_table_entry(PyObject *obj, const char *filename, int linenumber)
|
|||
result->linenumber = linenumber;
|
||||
result->filename_borrow = NULL;
|
||||
result->linenumber_borrow = 0;
|
||||
result->borrows = 0;
|
||||
result->borrowed_from = PyStackRef_NULL;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -47,11 +51,13 @@ _Py_stackref_get_object(_PyStackRef ref)
|
|||
PyInterpreterState *interp = PyInterpreterState_Get();
|
||||
assert(interp != NULL);
|
||||
if (ref.index >= interp->next_stackref) {
|
||||
_Py_FatalErrorFormat(__func__, "Garbled stack ref with ID %" PRIu64 "\n", ref.index);
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Garbled stack ref with ID %" PRIu64 "\n", ref.index);
|
||||
}
|
||||
TableEntry *entry = _Py_hashtable_get(interp->open_stackrefs_table, (void *)ref.index);
|
||||
if (entry == NULL) {
|
||||
_Py_FatalErrorFormat(__func__, "Accessing closed stack ref with ID %" PRIu64 "\n", ref.index);
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Accessing closed stack ref with ID %" PRIu64 "\n", ref.index);
|
||||
}
|
||||
return entry->obj;
|
||||
}
|
||||
|
|
@ -68,13 +74,16 @@ _Py_stackref_close(_PyStackRef ref, const char *filename, int linenumber)
|
|||
assert(!PyStackRef_IsError(ref));
|
||||
PyInterpreterState *interp = PyInterpreterState_Get();
|
||||
if (ref.index >= interp->next_stackref) {
|
||||
_Py_FatalErrorFormat(__func__, "Invalid StackRef with ID %" PRIu64 " at %s:%d\n", (void *)ref.index, filename, linenumber);
|
||||
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Invalid StackRef with ID %" PRIu64 " at %s:%d\n",
|
||||
ref.index, filename, linenumber);
|
||||
}
|
||||
PyObject *obj;
|
||||
if (ref.index < INITIAL_STACKREF_INDEX) {
|
||||
if (ref.index == 0) {
|
||||
_Py_FatalErrorFormat(__func__, "Passing NULL to PyStackRef_CLOSE at %s:%d\n", filename, linenumber);
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Passing NULL to _Py_stackref_close at %s:%d\n",
|
||||
filename, linenumber);
|
||||
}
|
||||
// Pre-allocated reference to None, False or True -- Do not clear
|
||||
TableEntry *entry = _Py_hashtable_get(interp->open_stackrefs_table, (void *)ref.index);
|
||||
|
|
@ -88,10 +97,27 @@ _Py_stackref_close(_PyStackRef ref, const char *filename, int linenumber)
|
|||
if (entry != NULL) {
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Double close of ref ID %" PRIu64 " at %s:%d. Referred to instance of %s at %p. Closed at %s:%d\n",
|
||||
(void *)ref.index, filename, linenumber, entry->classname, entry->obj, entry->filename, entry->linenumber);
|
||||
ref.index, filename, linenumber, entry->classname, entry->obj, entry->filename, entry->linenumber);
|
||||
}
|
||||
#endif
|
||||
_Py_FatalErrorFormat(__func__, "Invalid StackRef with ID %" PRIu64 "\n", (void *)ref.index);
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Invalid StackRef with ID %" PRIu64 " at %s:%d\n",
|
||||
ref.index, filename, linenumber);
|
||||
}
|
||||
if (!PyStackRef_IsNull(entry->borrowed_from)) {
|
||||
_PyStackRef borrowed_from = entry->borrowed_from;
|
||||
TableEntry *entry_borrowed = _Py_hashtable_get(interp->open_stackrefs_table, (void *)borrowed_from.index);
|
||||
if (entry_borrowed == NULL) {
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Invalid borrowed StackRef with ID %" PRIu64 " at %s:%d\n",
|
||||
borrowed_from.index, filename, linenumber);
|
||||
}
|
||||
entry_borrowed->borrows--;
|
||||
}
|
||||
if (entry->borrows > 0) {
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"StackRef with ID %" PRIu64 " closed with %d borrowed refs at %s:%d. Opened at %s:%d\n",
|
||||
ref.index, entry->borrows, filename, linenumber, entry->filename, entry->linenumber);
|
||||
}
|
||||
obj = entry->obj;
|
||||
free(entry);
|
||||
|
|
@ -143,15 +169,62 @@ _Py_stackref_record_borrow(_PyStackRef ref, const char *filename, int linenumber
|
|||
if (entry != NULL) {
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Borrow of closed ref ID %" PRIu64 " at %s:%d. Referred to instance of %s at %p. Closed at %s:%d\n",
|
||||
(void *)ref.index, filename, linenumber, entry->classname, entry->obj, entry->filename, entry->linenumber);
|
||||
ref.index, filename, linenumber, entry->classname, entry->obj, entry->filename, entry->linenumber);
|
||||
}
|
||||
#endif
|
||||
_Py_FatalErrorFormat(__func__, "Invalid StackRef with ID %" PRIu64 " at %s:%d\n", (void *)ref.index, filename, linenumber);
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Invalid StackRef with ID %" PRIu64 " at %s:%d\n",
|
||||
ref.index, filename, linenumber);
|
||||
}
|
||||
entry->filename_borrow = filename;
|
||||
entry->linenumber_borrow = linenumber;
|
||||
}
|
||||
|
||||
_PyStackRef
|
||||
_Py_stackref_get_borrowed_from(_PyStackRef ref, const char *filename, int linenumber)
|
||||
{
|
||||
assert(!PyStackRef_IsError(ref));
|
||||
PyInterpreterState *interp = PyInterpreterState_Get();
|
||||
|
||||
TableEntry *entry = _Py_hashtable_get(interp->open_stackrefs_table, (void *)ref.index);
|
||||
if (entry == NULL) {
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Invalid StackRef with ID %" PRIu64 " at %s:%d\n",
|
||||
ref.index, filename, linenumber);
|
||||
}
|
||||
|
||||
return entry->borrowed_from;
|
||||
}
|
||||
|
||||
// This function should be used no more than once per ref.
|
||||
void
|
||||
_Py_stackref_set_borrowed_from(_PyStackRef ref, _PyStackRef borrowed_from, const char *filename, int linenumber)
|
||||
{
|
||||
assert(!PyStackRef_IsError(ref));
|
||||
PyInterpreterState *interp = PyInterpreterState_Get();
|
||||
|
||||
TableEntry *entry = _Py_hashtable_get(interp->open_stackrefs_table, (void *)ref.index);
|
||||
if (entry == NULL) {
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Invalid StackRef (ref) with ID %" PRIu64 " at %s:%d\n",
|
||||
ref.index, filename, linenumber);
|
||||
}
|
||||
|
||||
assert(PyStackRef_IsNull(entry->borrowed_from));
|
||||
if (PyStackRef_IsNull(borrowed_from)) {
|
||||
return;
|
||||
}
|
||||
|
||||
TableEntry *entry_borrowed = _Py_hashtable_get(interp->open_stackrefs_table, (void *)borrowed_from.index);
|
||||
if (entry_borrowed == NULL) {
|
||||
_Py_FatalErrorFormat(__func__,
|
||||
"Invalid StackRef (borrowed_from) with ID %" PRIu64 " at %s:%d\n",
|
||||
borrowed_from.index, filename, linenumber);
|
||||
}
|
||||
|
||||
entry->borrowed_from = borrowed_from;
|
||||
entry_borrowed->borrows++;
|
||||
}
|
||||
|
||||
void
|
||||
_Py_stackref_associate(PyInterpreterState *interp, PyObject *obj, _PyStackRef ref)
|
||||
|
|
|
|||
|
|
@ -3199,7 +3199,7 @@ symtable_raise_if_not_coroutine(struct symtable *st, const char *msg, _Py_Source
|
|||
|
||||
struct symtable *
|
||||
_Py_SymtableStringObjectFlags(const char *str, PyObject *filename,
|
||||
int start, PyCompilerFlags *flags)
|
||||
int start, PyCompilerFlags *flags, PyObject *module)
|
||||
{
|
||||
struct symtable *st;
|
||||
mod_ty mod;
|
||||
|
|
@ -3209,7 +3209,7 @@ _Py_SymtableStringObjectFlags(const char *str, PyObject *filename,
|
|||
if (arena == NULL)
|
||||
return NULL;
|
||||
|
||||
mod = _PyParser_ASTFromString(str, filename, start, flags, arena);
|
||||
mod = _PyParser_ASTFromString(str, filename, start, flags, arena, module);
|
||||
if (mod == NULL) {
|
||||
_PyArena_Free(arena);
|
||||
return NULL;
|
||||
|
|
|
|||
|
|
@ -1587,10 +1587,10 @@ get_hash_info(PyThreadState *tstate)
|
|||
} while(0)
|
||||
|
||||
SET_HASH_INFO_ITEM(PyLong_FromLong(8 * sizeof(Py_hash_t)));
|
||||
SET_HASH_INFO_ITEM(PyLong_FromSsize_t(_PyHASH_MODULUS));
|
||||
SET_HASH_INFO_ITEM(PyLong_FromLong(_PyHASH_INF));
|
||||
SET_HASH_INFO_ITEM(PyLong_FromSsize_t(PyHASH_MODULUS));
|
||||
SET_HASH_INFO_ITEM(PyLong_FromLong(PyHASH_INF));
|
||||
SET_HASH_INFO_ITEM(PyLong_FromLong(0)); // This is no longer used
|
||||
SET_HASH_INFO_ITEM(PyLong_FromLong(_PyHASH_IMAG));
|
||||
SET_HASH_INFO_ITEM(PyLong_FromLong(PyHASH_IMAG));
|
||||
SET_HASH_INFO_ITEM(PyUnicode_FromString(hashfunc->name));
|
||||
SET_HASH_INFO_ITEM(PyLong_FromLong(hashfunc->hash_bits));
|
||||
SET_HASH_INFO_ITEM(PyLong_FromLong(hashfunc->seed_bits));
|
||||
|
|
@ -2281,7 +2281,9 @@ static PyObject *
|
|||
sys__stats_on_impl(PyObject *module)
|
||||
/*[clinic end generated code: output=aca53eafcbb4d9fe input=43b5bfe145299e55]*/
|
||||
{
|
||||
_Py_StatsOn();
|
||||
if (_Py_StatsOn() < 0) {
|
||||
return NULL;
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
|
|
@ -2378,14 +2380,14 @@ sys_activate_stack_trampoline_impl(PyObject *module, const char *backend)
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
else if (strcmp(backend, "perf_jit") == 0) {
|
||||
_PyPerf_Callbacks cur_cb;
|
||||
_PyPerfTrampoline_GetCallbacks(&cur_cb);
|
||||
if (cur_cb.write_state != _Py_perfmap_jit_callbacks.write_state) {
|
||||
if (_PyPerfTrampoline_SetCallbacks(&_Py_perfmap_jit_callbacks) < 0 ) {
|
||||
PyErr_SetString(PyExc_ValueError, "can't activate perf jit trampoline");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
else if (strcmp(backend, "perf_jit") == 0) {
|
||||
_PyPerf_Callbacks cur_cb;
|
||||
_PyPerfTrampoline_GetCallbacks(&cur_cb);
|
||||
if (cur_cb.write_state != _Py_perfmap_jit_callbacks.write_state) {
|
||||
if (_PyPerfTrampoline_SetCallbacks(&_Py_perfmap_jit_callbacks) < 0 ) {
|
||||
PyErr_SetString(PyExc_ValueError, "can't activate perf jit trampoline");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -334,14 +334,12 @@ PyThread_GetInfo(void)
|
|||
|
||||
#ifdef HAVE_PTHREAD_STUBS
|
||||
value = Py_NewRef(Py_None);
|
||||
#elif defined(_POSIX_THREADS)
|
||||
#else
|
||||
value = PyUnicode_FromString("pymutex");
|
||||
if (value == NULL) {
|
||||
Py_DECREF(threadinfo);
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
value = Py_NewRef(Py_None);
|
||||
#endif
|
||||
PyStructSequence_SET_ITEM(threadinfo, pos++, value);
|
||||
|
||||
|
|
|
|||
|
|
@ -1028,14 +1028,24 @@ _Py_DumpWideString(int fd, wchar_t *str)
|
|||
|
||||
/* Write a frame into the file fd: "File "xxx", line xxx in xxx".
|
||||
|
||||
This function is signal safe. */
|
||||
This function is signal safe.
|
||||
|
||||
static void
|
||||
Return 0 on success. Return -1 if the frame is invalid. */
|
||||
|
||||
static int
|
||||
dump_frame(int fd, _PyInterpreterFrame *frame)
|
||||
{
|
||||
assert(frame->owner < FRAME_OWNED_BY_INTERPRETER);
|
||||
if (frame->owner == FRAME_OWNED_BY_INTERPRETER) {
|
||||
/* Ignore trampoline frame */
|
||||
return 0;
|
||||
}
|
||||
|
||||
PyCodeObject *code =_PyFrame_GetCode(frame);
|
||||
PyCodeObject *code = _PyFrame_SafeGetCode(frame);
|
||||
if (code == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int res = 0;
|
||||
PUTS(fd, " File ");
|
||||
if (code->co_filename != NULL
|
||||
&& PyUnicode_Check(code->co_filename))
|
||||
|
|
@ -1043,29 +1053,36 @@ dump_frame(int fd, _PyInterpreterFrame *frame)
|
|||
PUTS(fd, "\"");
|
||||
_Py_DumpASCII(fd, code->co_filename);
|
||||
PUTS(fd, "\"");
|
||||
} else {
|
||||
PUTS(fd, "???");
|
||||
}
|
||||
int lasti = PyUnstable_InterpreterFrame_GetLasti(frame);
|
||||
int lineno = _PyCode_Addr2LineNoTstate(code, lasti);
|
||||
else {
|
||||
PUTS(fd, "???");
|
||||
res = -1;
|
||||
}
|
||||
|
||||
PUTS(fd, ", line ");
|
||||
int lasti = _PyFrame_SafeGetLasti(frame);
|
||||
int lineno = -1;
|
||||
if (lasti >= 0) {
|
||||
lineno = _PyCode_SafeAddr2Line(code, lasti);
|
||||
}
|
||||
if (lineno >= 0) {
|
||||
_Py_DumpDecimal(fd, (size_t)lineno);
|
||||
}
|
||||
else {
|
||||
PUTS(fd, "???");
|
||||
res = -1;
|
||||
}
|
||||
PUTS(fd, " in ");
|
||||
|
||||
if (code->co_name != NULL
|
||||
&& PyUnicode_Check(code->co_name)) {
|
||||
PUTS(fd, " in ");
|
||||
if (code->co_name != NULL && PyUnicode_Check(code->co_name)) {
|
||||
_Py_DumpASCII(fd, code->co_name);
|
||||
}
|
||||
else {
|
||||
PUTS(fd, "???");
|
||||
res = -1;
|
||||
}
|
||||
|
||||
PUTS(fd, "\n");
|
||||
return res;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -1077,6 +1094,9 @@ tstate_is_freed(PyThreadState *tstate)
|
|||
if (_PyMem_IsPtrFreed(tstate->interp)) {
|
||||
return 1;
|
||||
}
|
||||
if (_PyMem_IsULongFreed(tstate->thread_id)) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1096,7 +1116,7 @@ dump_traceback(int fd, PyThreadState *tstate, int write_header)
|
|||
}
|
||||
|
||||
if (tstate_is_freed(tstate)) {
|
||||
PUTS(fd, " <tstate is freed>\n");
|
||||
PUTS(fd, " <freed thread state>\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1108,17 +1128,6 @@ dump_traceback(int fd, PyThreadState *tstate, int write_header)
|
|||
|
||||
unsigned int depth = 0;
|
||||
while (1) {
|
||||
if (frame->owner == FRAME_OWNED_BY_INTERPRETER) {
|
||||
/* Trampoline frame */
|
||||
frame = frame->previous;
|
||||
if (frame == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Can't have more than one shim frame in a row */
|
||||
assert(frame->owner != FRAME_OWNED_BY_INTERPRETER);
|
||||
}
|
||||
|
||||
if (MAX_FRAME_DEPTH <= depth) {
|
||||
if (MAX_FRAME_DEPTH < depth) {
|
||||
PUTS(fd, "plus ");
|
||||
|
|
@ -1128,8 +1137,20 @@ dump_traceback(int fd, PyThreadState *tstate, int write_header)
|
|||
break;
|
||||
}
|
||||
|
||||
dump_frame(fd, frame);
|
||||
frame = frame->previous;
|
||||
if (_PyMem_IsPtrFreed(frame)) {
|
||||
PUTS(fd, " <freed frame>\n");
|
||||
break;
|
||||
}
|
||||
// Read frame->previous early since memory can be freed during
|
||||
// dump_frame()
|
||||
_PyInterpreterFrame *previous = frame->previous;
|
||||
|
||||
if (dump_frame(fd, frame) < 0) {
|
||||
PUTS(fd, " <invalid frame>\n");
|
||||
break;
|
||||
}
|
||||
|
||||
frame = previous;
|
||||
if (frame == NULL) {
|
||||
break;
|
||||
}
|
||||
|
|
@ -1226,7 +1247,9 @@ write_thread_id(int fd, PyThreadState *tstate, int is_current)
|
|||
tstate->thread_id,
|
||||
sizeof(unsigned long) * 2);
|
||||
|
||||
write_thread_name(fd, tstate);
|
||||
if (!_PyMem_IsULongFreed(tstate->thread_id)) {
|
||||
write_thread_name(fd, tstate);
|
||||
}
|
||||
|
||||
PUTS(fd, " (most recent call first):\n");
|
||||
}
|
||||
|
|
@ -1284,7 +1307,6 @@ _Py_DumpTracebackThreads(int fd, PyInterpreterState *interp,
|
|||
return "unable to get the thread head state";
|
||||
|
||||
/* Dump the traceback of each thread */
|
||||
tstate = PyInterpreterState_ThreadHead(interp);
|
||||
unsigned int nthreads = 0;
|
||||
_Py_BEGIN_SUPPRESS_IPH
|
||||
do
|
||||
|
|
@ -1295,11 +1317,18 @@ _Py_DumpTracebackThreads(int fd, PyInterpreterState *interp,
|
|||
PUTS(fd, "...\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (tstate_is_freed(tstate)) {
|
||||
PUTS(fd, "<freed thread state>\n");
|
||||
break;
|
||||
}
|
||||
|
||||
write_thread_id(fd, tstate, tstate == current_tstate);
|
||||
if (tstate == current_tstate && tstate->interp->gc.collecting) {
|
||||
PUTS(fd, " Garbage-collecting\n");
|
||||
}
|
||||
dump_traceback(fd, tstate, 0);
|
||||
|
||||
tstate = PyThreadState_Next(tstate);
|
||||
nthreads++;
|
||||
} while (tstate != NULL);
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ typedef struct tracemalloc_frame frame_t;
|
|||
typedef struct tracemalloc_traceback traceback_t;
|
||||
|
||||
#define TRACEBACK_SIZE(NFRAME) \
|
||||
(sizeof(traceback_t) + sizeof(frame_t) * (NFRAME - 1))
|
||||
(sizeof(traceback_t) + sizeof(frame_t) * (NFRAME))
|
||||
|
||||
static const int MAX_NFRAME = UINT16_MAX;
|
||||
|
||||
|
|
@ -329,8 +329,9 @@ traceback_new(void)
|
|||
traceback->nframe = 0;
|
||||
traceback->total_nframe = 0;
|
||||
traceback_get_frames(traceback);
|
||||
if (traceback->nframe == 0)
|
||||
return &tracemalloc_empty_traceback;
|
||||
if (traceback->nframe == 0) {
|
||||
return tracemalloc_empty_traceback;
|
||||
}
|
||||
traceback->hash = traceback_hash(traceback);
|
||||
|
||||
/* intern the traceback */
|
||||
|
|
@ -754,12 +755,18 @@ _PyTraceMalloc_Init(void)
|
|||
return _PyStatus_NO_MEMORY();
|
||||
}
|
||||
|
||||
tracemalloc_empty_traceback.nframe = 1;
|
||||
tracemalloc_empty_traceback.total_nframe = 1;
|
||||
assert(tracemalloc_empty_traceback == NULL);
|
||||
tracemalloc_empty_traceback = raw_malloc(TRACEBACK_SIZE(1));
|
||||
if (tracemalloc_empty_traceback == NULL) {
|
||||
return _PyStatus_NO_MEMORY();
|
||||
}
|
||||
|
||||
tracemalloc_empty_traceback->nframe = 1;
|
||||
tracemalloc_empty_traceback->total_nframe = 1;
|
||||
/* borrowed reference */
|
||||
tracemalloc_empty_traceback.frames[0].filename = &_Py_STR(anon_unknown);
|
||||
tracemalloc_empty_traceback.frames[0].lineno = 0;
|
||||
tracemalloc_empty_traceback.hash = traceback_hash(&tracemalloc_empty_traceback);
|
||||
tracemalloc_empty_traceback->frames[0].filename = &_Py_STR(anon_unknown);
|
||||
tracemalloc_empty_traceback->frames[0].lineno = 0;
|
||||
tracemalloc_empty_traceback->hash = traceback_hash(tracemalloc_empty_traceback);
|
||||
|
||||
tracemalloc_config.initialized = TRACEMALLOC_INITIALIZED;
|
||||
return _PyStatus_OK();
|
||||
|
|
@ -782,6 +789,9 @@ tracemalloc_deinit(void)
|
|||
_Py_hashtable_destroy(tracemalloc_filenames);
|
||||
|
||||
PyThread_tss_delete(&tracemalloc_reentrant_key);
|
||||
|
||||
raw_free(tracemalloc_empty_traceback);
|
||||
tracemalloc_empty_traceback = NULL;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue