Merge remote-tracking branch 'upstream/main' into lazy

This commit is contained in:
Pablo Galindo 2025-12-06 15:40:02 +00:00
commit db151a5192
869 changed files with 45727 additions and 16994 deletions

View file

@ -2014,14 +2014,8 @@ dummy_func(
}
inst(BUILD_STRING, (pieces[oparg] -- str)) {
STACKREFS_TO_PYOBJECTS(pieces, oparg, pieces_o);
if (CONVERSION_FAILED(pieces_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyObject *str_o = _PyUnicode_JoinArray(&_Py_STR(empty), pieces_o, oparg);
STACKREFS_TO_PYOBJECTS_CLEANUP(pieces_o);
DECREF_INPUTS();
PyObject *str_o = _Py_BuildString_StackRefSteal(pieces, oparg);
DEAD(pieces);
ERROR_IF(str_o == NULL);
str = PyStackRef_FromPyObjectSteal(str_o);
}
@ -2136,17 +2130,9 @@ dummy_func(
}
inst(BUILD_MAP, (values[oparg*2] -- map)) {
STACKREFS_TO_PYOBJECTS(values, oparg*2, values_o);
if (CONVERSION_FAILED(values_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyObject *map_o = _PyDict_FromItems(
values_o, 2,
values_o+1, 2,
oparg);
STACKREFS_TO_PYOBJECTS_CLEANUP(values_o);
DECREF_INPUTS();
PyObject *map_o = _Py_BuildMap_StackRefSteal(values, oparg);
DEAD(values);
ERROR_IF(map_o == NULL);
map = PyStackRef_FromPyObjectStealMortal(map_o);
}
@ -2991,8 +2977,8 @@ dummy_func(
JUMP_BACKWARD_JIT,
};
tier1 op(_SPECIALIZE_JUMP_BACKWARD, (--)) {
#if ENABLE_SPECIALIZATION_FT
specializing tier1 op(_SPECIALIZE_JUMP_BACKWARD, (--)) {
#if ENABLE_SPECIALIZATION
if (this_instr->op.code == JUMP_BACKWARD) {
uint8_t desired = tstate->interp->jit ? JUMP_BACKWARD_JIT : JUMP_BACKWARD_NO_JIT;
FT_ATOMIC_STORE_UINT8_RELAXED(this_instr->op.code, desired);
@ -3006,25 +2992,21 @@ dummy_func(
tier1 op(_JIT, (--)) {
#ifdef _Py_TIER2
_Py_BackoffCounter counter = this_instr[1].counter;
if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD_JIT) {
_Py_CODEUNIT *start = this_instr;
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
if (!IS_JIT_TRACING() && backoff_counter_triggers(counter) &&
this_instr->op.code == JUMP_BACKWARD_JIT &&
next_instr->op.code != ENTER_EXECUTOR) {
/* Back up over EXTENDED_ARGs so executor is inserted at the correct place */
_Py_CODEUNIT *insert_exec_at = this_instr;
while (oparg > 255) {
oparg >>= 8;
start--;
insert_exec_at--;
}
_PyExecutorObject *executor;
int optimized = _PyOptimizer_Optimize(frame, start, &executor, 0);
if (optimized <= 0) {
this_instr[1].counter = restart_backoff_counter(counter);
ERROR_IF(optimized < 0);
int succ = _PyJit_TryInitializeTracing(tstate, frame, this_instr, insert_exec_at, next_instr, STACK_LEVEL(), 0, NULL, oparg);
if (succ) {
ENTER_TRACING();
}
else {
this_instr[1].counter = initial_jump_backoff_counter();
assert(tstate->current_executor == NULL);
assert(executor != tstate->interp->cold_executor);
tstate->jit_exit = NULL;
TIER1_TO_TIER2(executor);
this_instr[1].counter = restart_backoff_counter(counter);
}
}
else {
@ -3070,6 +3052,10 @@ dummy_func(
tier1 inst(ENTER_EXECUTOR, (--)) {
#ifdef _Py_TIER2
if (IS_JIT_TRACING()) {
next_instr = this_instr;
goto stop_tracing;
}
PyCodeObject *code = _PyFrame_GetCode(frame);
_PyExecutorObject *executor = code->co_executors->executors[oparg & 255];
assert(executor->vm_data.index == INSTR_OFFSET() - 1);
@ -3131,7 +3117,7 @@ dummy_func(
macro(POP_JUMP_IF_NOT_NONE) = unused/1 + _IS_NONE + _POP_JUMP_IF_FALSE;
tier1 inst(JUMP_BACKWARD_NO_INTERRUPT, (--)) {
replaced inst(JUMP_BACKWARD_NO_INTERRUPT, (--)) {
/* This bytecode is used in the `yield from` or `await` loop.
* If there is an interrupt, we want it handled in the innermost
* generator or coroutine, so we deliberately do not check it here.
@ -3742,7 +3728,7 @@ dummy_func(
#if ENABLE_SPECIALIZATION_FT
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_Call(callable, next_instr, oparg + !PyStackRef_IsNull(self_or_null));
_Py_Specialize_Call(callable, self_or_null, next_instr, oparg + !PyStackRef_IsNull(self_or_null));
DISPATCH_SAME_OPARG();
}
OPCODE_DEFERRED_INC(CALL);
@ -3944,27 +3930,20 @@ dummy_func(
#if TIER_ONE
assert(opcode != INSTRUMENTED_CALL);
#endif
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
/* Callable is not a normal Python function */
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyObject *res_o = PyObject_Vectorcall(
callable_o, args_o,
total_args | PY_VECTORCALL_ARGUMENTS_OFFSET,
NULL);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
DECREF_INPUTS();
PyObject *res_o = _Py_VectorCall_StackRefSteal(
callable,
arguments,
total_args,
PyStackRef_NULL);
DEAD(args);
DEAD(self_or_null);
DEAD(callable);
ERROR_IF(res_o == NULL);
res = PyStackRef_FromPyObjectSteal(res_o);
}
@ -4239,14 +4218,13 @@ dummy_func(
}
DEOPT_IF(tp->tp_vectorcall == NULL);
STAT_INC(CALL, hit);
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyObject *res_o = tp->tp_vectorcall((PyObject *)tp, args_o, total_args, NULL);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
DECREF_INPUTS();
PyObject *res_o = _Py_CallBuiltinClass_StackRefSteal(
callable,
arguments,
total_args);
DEAD(args);
DEAD(self_or_null);
DEAD(callable);
ERROR_IF(res_o == NULL);
res = PyStackRef_FromPyObjectSteal(res_o);
}
@ -4294,31 +4272,24 @@ dummy_func(
op(_CALL_BUILTIN_FAST, (callable, self_or_null, args[oparg] -- res)) {
/* Builtin METH_FASTCALL functions, without keywords */
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
DEOPT_IF(!PyCFunction_CheckExact(callable_o));
DEOPT_IF(PyCFunction_GET_FLAGS(callable_o) != METH_FASTCALL);
STAT_INC(CALL, hit);
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable_o);
/* res = func(self, args, nargs) */
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyObject *res_o = _PyCFunctionFast_CAST(cfunc)(
PyCFunction_GET_SELF(callable_o),
args_o,
total_args);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
DECREF_INPUTS();
PyObject *res_o = _Py_BuiltinCallFast_StackRefSteal(
callable,
arguments,
total_args
);
DEAD(args);
DEAD(self_or_null);
DEAD(callable);
ERROR_IF(res_o == NULL);
res = PyStackRef_FromPyObjectSteal(res_o);
}
@ -4331,30 +4302,20 @@ dummy_func(
op(_CALL_BUILTIN_FAST_WITH_KEYWORDS, (callable, self_or_null, args[oparg] -- res)) {
/* Builtin METH_FASTCALL | METH_KEYWORDS functions */
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
DEOPT_IF(!PyCFunction_CheckExact(callable_o));
DEOPT_IF(PyCFunction_GET_FLAGS(callable_o) != (METH_FASTCALL | METH_KEYWORDS));
STAT_INC(CALL, hit);
/* res = func(self, arguments, nargs, kwnames) */
PyCFunctionFastWithKeywords cfunc =
_PyCFunctionFastWithKeywords_CAST(PyCFunction_GET_FUNCTION(callable_o));
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyObject *res_o = cfunc(PyCFunction_GET_SELF(callable_o), args_o, total_args, NULL);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
DECREF_INPUTS();
PyObject *res_o = _Py_BuiltinCallFastWithKeywords_StackRefSteal(callable, arguments, total_args);
DEAD(args);
DEAD(self_or_null);
DEAD(callable);
ERROR_IF(res_o == NULL);
res = PyStackRef_FromPyObjectSteal(res_o);
}
@ -4448,7 +4409,6 @@ dummy_func(
assert(oparg == 1);
PyObject *self_o = PyStackRef_AsPyObjectBorrow(self);
DEOPT_IF(!PyList_CheckExact(self_o));
DEOPT_IF(!LOCK_OBJECT(self_o));
STAT_INC(CALL, hit);
int err = _PyList_AppendTakeRef((PyListObject *)self_o, PyStackRef_AsPyObjectSteal(arg));
@ -4522,19 +4482,16 @@ dummy_func(
assert(self != NULL);
EXIT_IF(!Py_IS_TYPE(self, d_type));
STAT_INC(CALL, hit);
int nargs = total_args - 1;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyCFunctionFastWithKeywords cfunc =
_PyCFunctionFastWithKeywords_CAST(meth->ml_meth);
PyObject *res_o = cfunc(self, (args_o + 1), nargs, NULL);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
DECREF_INPUTS();
PyObject *res_o = _PyCallMethodDescriptorFastWithKeywords_StackRefSteal(
callable,
meth,
self,
arguments,
total_args
);
DEAD(args);
DEAD(self_or_null);
DEAD(callable);
ERROR_IF(res_o == NULL);
res = PyStackRef_FromPyObjectSteal(res_o);
}
@ -4602,18 +4559,16 @@ dummy_func(
assert(self != NULL);
EXIT_IF(!Py_IS_TYPE(self, method->d_common.d_type));
STAT_INC(CALL, hit);
int nargs = total_args - 1;
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyCFunctionFast cfunc = _PyCFunctionFast_CAST(meth->ml_meth);
PyObject *res_o = cfunc(self, (args_o + 1), nargs);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
DECREF_INPUTS();
PyObject *res_o = _PyCallMethodDescriptorFast_StackRefSteal(
callable,
meth,
self,
arguments,
total_args
);
DEAD(args);
DEAD(self_or_null);
DEAD(callable);
ERROR_IF(res_o == NULL);
res = PyStackRef_FromPyObjectSteal(res_o);
}
@ -4846,30 +4801,21 @@ dummy_func(
#if TIER_ONE
assert(opcode != INSTRUMENTED_CALL);
#endif
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
/* Callable is not a normal Python function */
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
DECREF_INPUTS();
ERROR_IF(true);
}
PyObject *kwnames_o = PyStackRef_AsPyObjectBorrow(kwnames);
int positional_args = total_args - (int)PyTuple_GET_SIZE(kwnames_o);
PyObject *res_o = PyObject_Vectorcall(
callable_o, args_o,
positional_args | PY_VECTORCALL_ARGUMENTS_OFFSET,
kwnames_o);
PyStackRef_CLOSE(kwnames);
STACKREFS_TO_PYOBJECTS_CLEANUP(args_o);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
DECREF_INPUTS();
PyObject *res_o = _Py_VectorCall_StackRefSteal(
callable,
arguments,
total_args,
kwnames);
DEAD(kwnames);
DEAD(args);
DEAD(self_or_null);
DEAD(callable);
ERROR_IF(res_o == NULL);
res = PyStackRef_FromPyObjectSteal(res_o);
}
@ -5298,19 +5244,40 @@ dummy_func(
tier2 op(_EXIT_TRACE, (exit_p/4 --)) {
_PyExitData *exit = (_PyExitData *)exit_p;
#if defined(Py_DEBUG) && !defined(_Py_JIT)
_Py_CODEUNIT *target = _PyFrame_GetBytecode(frame) + exit->target;
const _Py_CODEUNIT *target = ((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame))
+ exit->target;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 2) {
if (frame->lltrace >= 3) {
printf("SIDE EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s, is_control_flow %d]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code], exit->is_control_flow);
}
#endif
tstate->jit_exit = exit;
TIER2_TO_TIER2(exit->executor);
}
tier2 op(_DYNAMIC_EXIT, (exit_p/4 --)) {
#if defined(Py_DEBUG) && !defined(_Py_JIT)
_PyExitData *exit = (_PyExitData *)exit_p;
_Py_CODEUNIT *target = frame->instr_ptr;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 3) {
printf("DYNAMIC EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code]);
}
#endif
tstate->jit_exit = exit;
TIER2_TO_TIER2(exit->executor);
#endif
// Disabled for now (gh-139109) as it slows down dynamic code tremendously.
// Compile and jump to the cold dynamic executors in the future.
GOTO_TIER_ONE(frame->instr_ptr);
}
tier2 op(_CHECK_VALIDITY, (--)) {
@ -5422,7 +5389,8 @@ dummy_func(
}
tier2 op(_DEOPT, (--)) {
GOTO_TIER_ONE(_PyFrame_GetBytecode(frame) + CURRENT_TARGET());
GOTO_TIER_ONE((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
tier2 op(_HANDLE_PENDING_AND_DEOPT, (--)) {
@ -5452,32 +5420,76 @@ dummy_func(
tier2 op(_COLD_EXIT, ( -- )) {
_PyExitData *exit = tstate->jit_exit;
assert(exit != NULL);
assert(frame->owner < FRAME_OWNED_BY_INTERPRETER);
_Py_CODEUNIT *target = _PyFrame_GetBytecode(frame) + exit->target;
_Py_BackoffCounter temperature = exit->temperature;
if (!backoff_counter_triggers(temperature)) {
exit->temperature = advance_backoff_counter(temperature);
GOTO_TIER_ONE(target);
}
_PyExecutorObject *executor;
if (target->op.code == ENTER_EXECUTOR) {
PyCodeObject *code = _PyFrame_GetCode(frame);
executor = code->co_executors->executors[target->op.arg];
Py_INCREF(executor);
assert(tstate->jit_exit == exit);
exit->executor = executor;
TIER2_TO_TIER2(exit->executor);
}
else {
if (!backoff_counter_triggers(temperature)) {
exit->temperature = advance_backoff_counter(temperature);
GOTO_TIER_ONE(target);
}
_PyExecutorObject *previous_executor = _PyExecutor_FromExit(exit);
assert(tstate->current_executor == (PyObject *)previous_executor);
int chain_depth = previous_executor->vm_data.chain_depth + 1;
int optimized = _PyOptimizer_Optimize(frame, target, &executor, chain_depth);
if (optimized <= 0) {
exit->temperature = restart_backoff_counter(temperature);
GOTO_TIER_ONE(optimized < 0 ? NULL : target);
// For control-flow guards, we don't want to increase the chain depth, as those don't actually
// represent deopts but rather just normal programs!
int chain_depth = previous_executor->vm_data.chain_depth + !exit->is_control_flow;
// Note: it's safe to use target->op.arg here instead of the oparg given by EXTENDED_ARG.
// The invariant in the optimizer is the deopt target always points back to the first EXTENDED_ARG.
// So setting it to anything else is wrong.
int succ = _PyJit_TryInitializeTracing(tstate, frame, target, target, target, STACK_LEVEL(), chain_depth, exit, target->op.arg);
exit->temperature = restart_backoff_counter(exit->temperature);
if (succ) {
GOTO_TIER_ONE_CONTINUE_TRACING(target);
}
exit->temperature = initial_temperature_backoff_counter();
GOTO_TIER_ONE(target);
}
}
tier2 op(_COLD_DYNAMIC_EXIT, ( -- )) {
// TODO (gh-139109): This should be similar to _COLD_EXIT in the future.
_Py_CODEUNIT *target = frame->instr_ptr;
GOTO_TIER_ONE(target);
}
tier2 op(_GUARD_IP__PUSH_FRAME, (ip/4 --)) {
_Py_CODEUNIT *target = frame->instr_ptr + IP_OFFSET_OF(_PUSH_FRAME);
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += IP_OFFSET_OF(_PUSH_FRAME);
EXIT_IF(true);
}
}
tier2 op(_GUARD_IP_YIELD_VALUE, (ip/4 --)) {
_Py_CODEUNIT *target = frame->instr_ptr + IP_OFFSET_OF(YIELD_VALUE);
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += IP_OFFSET_OF(YIELD_VALUE);
EXIT_IF(true);
}
}
tier2 op(_GUARD_IP_RETURN_VALUE, (ip/4 --)) {
_Py_CODEUNIT *target = frame->instr_ptr + IP_OFFSET_OF(RETURN_VALUE);
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += IP_OFFSET_OF(RETURN_VALUE);
EXIT_IF(true);
}
}
tier2 op(_GUARD_IP_RETURN_GENERATOR, (ip/4 --)) {
_Py_CODEUNIT *target = frame->instr_ptr + IP_OFFSET_OF(RETURN_GENERATOR);
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += IP_OFFSET_OF(RETURN_GENERATOR);
EXIT_IF(true);
}
assert(tstate->jit_exit == exit);
exit->executor = executor;
TIER2_TO_TIER2(exit->executor);
}
label(pop_2_error) {
@ -5624,6 +5636,65 @@ dummy_func(
DISPATCH();
}
inst(TRACE_RECORD, (--)) {
#if _Py_TIER2
assert(IS_JIT_TRACING());
next_instr = this_instr;
frame->instr_ptr = prev_instr;
opcode = next_instr->op.code;
bool stop_tracing = (opcode == WITH_EXCEPT_START ||
opcode == RERAISE || opcode == CLEANUP_THROW ||
opcode == PUSH_EXC_INFO || opcode == INTERPRETER_EXIT);
int full = !_PyJit_translate_single_bytecode_to_trace(tstate, frame, next_instr, stop_tracing ? _DEOPT : 0);
if (full) {
LEAVE_TRACING();
int err = stop_tracing_and_jit(tstate, frame);
ERROR_IF(err < 0);
DISPATCH();
}
// Super instructions. Instruction deopted. There's a mismatch in what the stack expects
// in the optimizer. So we have to reflect in the trace correctly.
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
if ((_tstate->jit_tracer_state.prev_state.instr->op.code == CALL_LIST_APPEND &&
opcode == POP_TOP) ||
(_tstate->jit_tracer_state.prev_state.instr->op.code == BINARY_OP_INPLACE_ADD_UNICODE &&
opcode == STORE_FAST)) {
_tstate->jit_tracer_state.prev_state.instr_is_super = true;
}
else {
_tstate->jit_tracer_state.prev_state.instr = next_instr;
}
PyObject *prev_code = PyStackRef_AsPyObjectBorrow(frame->f_executable);
if (_tstate->jit_tracer_state.prev_state.instr_code != (PyCodeObject *)prev_code) {
Py_SETREF(_tstate->jit_tracer_state.prev_state.instr_code, (PyCodeObject*)Py_NewRef((prev_code)));
}
_tstate->jit_tracer_state.prev_state.instr_frame = frame;
_tstate->jit_tracer_state.prev_state.instr_oparg = oparg;
_tstate->jit_tracer_state.prev_state.instr_stacklevel = PyStackRef_IsNone(frame->f_executable) ? 2 : STACK_LEVEL();
if (_PyOpcode_Caches[_PyOpcode_Deopt[opcode]]) {
(&next_instr[1])->counter = trigger_backoff_counter();
}
DISPATCH_GOTO_NON_TRACING();
#else
(void)prev_instr;
Py_FatalError("JIT instruction executed in non-jit build.");
#endif
}
label(stop_tracing) {
#if _Py_TIER2
assert(IS_JIT_TRACING());
int opcode = next_instr->op.code;
_PyJit_translate_single_bytecode_to_trace(tstate, frame, NULL, _EXIT_TRACE);
LEAVE_TRACING();
int err = stop_tracing_and_jit(tstate, frame);
ERROR_IF(err < 0);
DISPATCH_GOTO_NON_TRACING();
#else
Py_FatalError("JIT label executed in non-jit build.");
#endif
}
// END BYTECODES //