cpython/Python/executor_cases.c.h

18168 lines
728 KiB
C
Raw Normal View History

// This file is generated by Tools/cases_generator/tier2_generator.py
// from:
// Python/bytecodes.c
// Do not edit!
#ifdef TIER_ONE
#error "This file is for Tier 2 only"
#endif
#define TIER_TWO 2
case _NOP_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _NOP_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _NOP_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _NOP_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_PERIODIC_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = check_periodics(tstate);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err != 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _CHECK_PERIODIC_AT_END is not a viable micro-op for tier 2 because it is replaced */
case _CHECK_PERIODIC_IF_NOT_YIELD_FROM_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
if ((oparg & RESUME_OPARG_LOCATION_MASK) < RESUME_AFTER_YIELD_FROM) {
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = check_periodics(tstate);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err != 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _QUICKEN_RESUME is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
/* _LOAD_BYTECODE is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
case _RESUME_CHECK_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
#if defined(__EMSCRIPTEN__)
if (_Py_emscripten_signal_clock == 0) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING;
#endif
uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
uintptr_t version = FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(_PyFrame_GetCode(frame)->_co_instrumentation_version);
assert((version & _PY_EVAL_EVENTS_MASK) == 0);
if (eval_breaker != version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (frame->tlbc_index !=
((_PyThreadStateImpl *)tstate)->tlbc_index) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#endif
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _RESUME_CHECK_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
#if defined(__EMSCRIPTEN__)
if (_Py_emscripten_signal_clock == 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING;
#endif
uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
uintptr_t version = FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(_PyFrame_GetCode(frame)->_co_instrumentation_version);
assert((version & _PY_EVAL_EVENTS_MASK) == 0);
if (eval_breaker != version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (frame->tlbc_index !=
((_PyThreadStateImpl *)tstate)->tlbc_index) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _RESUME_CHECK_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
#if defined(__EMSCRIPTEN__)
if (_Py_emscripten_signal_clock == 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING;
#endif
uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
uintptr_t version = FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(_PyFrame_GetCode(frame)->_co_instrumentation_version);
assert((version & _PY_EVAL_EVENTS_MASK) == 0);
if (eval_breaker != version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (frame->tlbc_index !=
((_PyThreadStateImpl *)tstate)->tlbc_index) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _RESUME_CHECK_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
#if defined(__EMSCRIPTEN__)
if (_Py_emscripten_signal_clock == 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING;
#endif
uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
uintptr_t version = FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(_PyFrame_GetCode(frame)->_co_instrumentation_version);
assert((version & _PY_EVAL_EVENTS_MASK) == 0);
if (eval_breaker != version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (frame->tlbc_index !=
((_PyThreadStateImpl *)tstate)->tlbc_index) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _MONITOR_RESUME is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
case _LOAD_FAST_CHECK_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = CURRENT_OPARG();
_PyStackRef value_s = GETLOCAL(oparg);
if (PyStackRef_IsNull(value_s)) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcCheckArg(tstate, PyExc_UnboundLocalError,
UNBOUNDLOCAL_ERROR_MSG,
PyTuple_GetItem(_PyFrame_GetCode(frame)->co_localsplusnames, oparg)
);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
value = PyStackRef_DUP(value_s);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_CHECK_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
_PyStackRef value_s = GETLOCAL(oparg);
if (PyStackRef_IsNull(value_s)) {
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcCheckArg(tstate, PyExc_UnboundLocalError,
UNBOUNDLOCAL_ERROR_MSG,
PyTuple_GetItem(_PyFrame_GetCode(frame)->co_localsplusnames, oparg)
);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
value = PyStackRef_DUP(value_s);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_CHECK_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
_PyStackRef value_s = GETLOCAL(oparg);
if (PyStackRef_IsNull(value_s)) {
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcCheckArg(tstate, PyExc_UnboundLocalError,
UNBOUNDLOCAL_ERROR_MSG,
PyTuple_GetItem(_PyFrame_GetCode(frame)->co_localsplusnames, oparg)
);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
value = PyStackRef_DUP(value_s);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_0_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_0_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_0_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_1_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_1_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_1_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_2_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_2_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_2_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_3_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_3_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_3_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_4_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 4;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_4_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 4;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_4_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 4;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_5_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 5;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_5_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 5;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_5_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 5;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_6_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 6;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_6_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 6;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_6_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 6;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_7_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 7;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_7_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 7;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_7_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 7;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = CURRENT_OPARG();
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_DUP(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_0_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_0_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_0_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_1_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_1_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_1_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_2_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_2_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_2_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_3_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_3_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_3_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_4_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 4;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_4_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 4;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_4_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 4;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_5_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 5;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_5_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 5;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_5_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 5;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_6_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 6;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_6_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 6;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_6_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 6;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_7_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 7;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_7_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 7;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_7_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 7;
assert(oparg == CURRENT_OPARG());
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = CURRENT_OPARG();
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_BORROW_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
assert(!PyStackRef_IsNull(GETLOCAL(oparg)));
value = PyStackRef_Borrow(GETLOCAL(oparg));
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_AND_CLEAR_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = CURRENT_OPARG();
value = GETLOCAL(oparg);
GETLOCAL(oparg) = PyStackRef_NULL;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_AND_CLEAR_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
value = GETLOCAL(oparg);
GETLOCAL(oparg) = PyStackRef_NULL;
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FAST_AND_CLEAR_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
value = GETLOCAL(oparg);
GETLOCAL(oparg) = PyStackRef_NULL;
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = CURRENT_OPARG();
PyObject *obj = GETITEM(FRAME_CO_CONSTS, oparg);
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
PyObject *obj = GETITEM(FRAME_CO_CONSTS, oparg);
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
PyObject *obj = GETITEM(FRAME_CO_CONSTS, oparg);
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_0_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_0_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_0_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 0;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_1_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_1_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_1_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 1;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_2_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_2_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_2_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 2;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_3_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_3_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_3_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = 3;
assert(oparg == CURRENT_OPARG());
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = CURRENT_OPARG();
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SMALL_INT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
assert(oparg < _PY_NSMALLPOSINTS);
PyObject *obj = (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS + oparg];
value = PyStackRef_FromPyObjectBorrow(obj);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_0_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 0;
assert(oparg == CURRENT_OPARG());
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_1_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 1;
assert(oparg == CURRENT_OPARG());
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_2_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 2;
assert(oparg == CURRENT_OPARG());
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_3_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 3;
assert(oparg == CURRENT_OPARG());
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_4_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 4;
assert(oparg == CURRENT_OPARG());
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_5_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 5;
assert(oparg == CURRENT_OPARG());
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_6_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 6;
assert(oparg == CURRENT_OPARG());
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_7_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = 7;
assert(oparg == CURRENT_OPARG());
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_FAST_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
value = _stack_item_0;
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = value;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_NOP_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
assert(PyStackRef_IsNull(value) || (!PyStackRef_RefcountOnObject(value)) ||
_Py_IsImmortal((PyStackRef_AsPyObjectBorrow(value))));
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_NOP_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
assert(PyStackRef_IsNull(value) || (!PyStackRef_RefcountOnObject(value)) ||
_Py_IsImmortal((PyStackRef_AsPyObjectBorrow(value))));
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_NOP_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
assert(PyStackRef_IsNull(value) || (!PyStackRef_RefcountOnObject(value)) ||
_Py_IsImmortal((PyStackRef_AsPyObjectBorrow(value))));
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_NOP_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
assert(PyStackRef_IsNull(value) || (!PyStackRef_RefcountOnObject(value)) ||
_Py_IsImmortal((PyStackRef_AsPyObjectBorrow(value))));
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_INT_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
assert(PyLong_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyLong_ExactDealloc);
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_INT_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
assert(PyLong_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyLong_ExactDealloc);
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_INT_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
assert(PyLong_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyLong_ExactDealloc);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_INT_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
assert(PyLong_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyLong_ExactDealloc);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_FLOAT_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
assert(PyFloat_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyFloat_ExactDealloc);
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_FLOAT_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
assert(PyFloat_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyFloat_ExactDealloc);
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_FLOAT_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
assert(PyFloat_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyFloat_ExactDealloc);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_FLOAT_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
assert(PyFloat_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyFloat_ExactDealloc);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_UNICODE_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
assert(PyUnicode_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyUnicode_ExactDealloc);
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_UNICODE_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
assert(PyUnicode_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyUnicode_ExactDealloc);
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_UNICODE_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
assert(PyUnicode_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyUnicode_ExactDealloc);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_UNICODE_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
assert(PyUnicode_CheckExact(PyStackRef_AsPyObjectBorrow(value)));
PyStackRef_CLOSE_SPECIALIZED(value, _PyUnicode_ExactDealloc);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TWO_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
tos = _stack_item_1;
nos = _stack_item_0;
stack_pointer[0] = nos;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(tos);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(nos);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _PUSH_NULL_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef res;
res = PyStackRef_NULL;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _PUSH_NULL_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
res = PyStackRef_NULL;
_tos_cache1 = res;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _PUSH_NULL_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
res = PyStackRef_NULL;
_tos_cache2 = res;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _END_FOR_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_ITER_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef index_or_null;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
index_or_null = _stack_item_1;
iter = _stack_item_0;
(void)index_or_null;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(iter);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _END_SEND_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef receiver;
_PyStackRef val;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
receiver = _stack_item_0;
val = value;
stack_pointer[0] = val;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(receiver);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = val;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNARY_NEGATIVE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = PyNumber_Negative(PyStackRef_AsPyObjectBorrow(value));
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNARY_NOT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
value = stack_pointer[-1];
assert(PyStackRef_BoolCheck(value));
res = PyStackRef_IsFalse(value)
? PyStackRef_True : PyStackRef_False;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNARY_NOT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
assert(PyStackRef_BoolCheck(value));
res = PyStackRef_IsFalse(value)
? PyStackRef_True : PyStackRef_False;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNARY_NOT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
assert(PyStackRef_BoolCheck(value));
res = PyStackRef_IsFalse(value)
? PyStackRef_True : PyStackRef_False;
_tos_cache1 = res;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNARY_NOT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
assert(PyStackRef_BoolCheck(value));
res = PyStackRef_IsFalse(value)
? PyStackRef_True : PyStackRef_False;
_tos_cache2 = res;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = PyObject_IsTrue(PyStackRef_AsPyObjectBorrow(value));
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = err ? PyStackRef_True : PyStackRef_False;
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_BOOL_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
if (!PyStackRef_BoolCheck(value)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_BOOL_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
if (!PyStackRef_BoolCheck(value)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_BOOL_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
if (!PyStackRef_BoolCheck(value)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_BOOL_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
if (!PyStackRef_BoolCheck(value)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_INT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyLong_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
if (_PyLong_IsZero((PyLongObject *)value_o)) {
assert(_Py_IsImmortal(value_o));
res = PyStackRef_False;
}
else {
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
res = PyStackRef_True;
}
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_LIST_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
nos = stack_pointer[-2];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyList_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_LIST_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
nos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyList_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_LIST_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
nos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyList_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_LIST_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
nos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyList_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_LIST_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
tos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyList_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_LIST_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
tos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyList_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_LIST_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
tos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyList_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_LIST_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
tos = _stack_item_2;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyList_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_SLICE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
tos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PySlice_Check(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_SLICE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
tos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PySlice_Check(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_SLICE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
tos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PySlice_Check(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_SLICE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
tos = _stack_item_2;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PySlice_Check(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_LIST_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
assert(PyList_CheckExact(value_o));
STAT_INC(TO_BOOL, hit);
res = PyList_GET_SIZE(value_o) ? PyStackRef_True : PyStackRef_False;
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = value;
value = res;
stack_pointer[-1] = value;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_NONE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
value = stack_pointer[-1];
if (!PyStackRef_IsNone(value)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
res = PyStackRef_False;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_NONE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
if (!PyStackRef_IsNone(value)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
res = PyStackRef_False;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_NONE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
if (!PyStackRef_IsNone(value)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
res = PyStackRef_False;
_tos_cache1 = res;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_NONE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
if (!PyStackRef_IsNone(value)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(TO_BOOL, hit);
res = PyStackRef_False;
_tos_cache2 = res;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_UNICODE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
nos = stack_pointer[-2];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyUnicode_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_UNICODE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
nos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyUnicode_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_UNICODE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
nos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyUnicode_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_UNICODE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
nos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyUnicode_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_UNICODE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyUnicode_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_UNICODE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyUnicode_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_UNICODE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyUnicode_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_UNICODE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyUnicode_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TO_BOOL_STR_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
STAT_INC(TO_BOOL, hit);
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (value_o == &_Py_STR(empty)) {
assert(_Py_IsImmortal(value_o));
res = PyStackRef_False;
}
else {
assert(Py_SIZE(value_o));
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
res = PyStackRef_True;
}
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _REPLACE_WITH_TRUE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
res = PyStackRef_True;
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNARY_INVERT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = PyNumber_Invert(PyStackRef_AsPyObjectBorrow(value));
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_INT_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
if (!_PyLong_CheckExactAndCompact(left_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_INT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
if (!_PyLong_CheckExactAndCompact(left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_INT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
if (!_PyLong_CheckExactAndCompact(left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_INT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
left = _stack_item_1;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
if (!_PyLong_CheckExactAndCompact(left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = left;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = left;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_INT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!_PyLong_CheckExactAndCompact(value_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_INT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!_PyLong_CheckExactAndCompact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_INT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!_PyLong_CheckExactAndCompact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_INT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!_PyLong_CheckExactAndCompact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_OVERFLOWED_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
assert(Py_TYPE(left_o) == &PyLong_Type);
if (!_PyLong_IsCompact((PyLongObject *)left_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_OVERFLOWED_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
assert(Py_TYPE(left_o) == &PyLong_Type);
if (!_PyLong_IsCompact((PyLongObject *)left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_OVERFLOWED_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
assert(Py_TYPE(left_o) == &PyLong_Type);
if (!_PyLong_IsCompact((PyLongObject *)left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_OVERFLOWED_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
left = _stack_item_1;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
assert(Py_TYPE(left_o) == &PyLong_Type);
if (!_PyLong_IsCompact((PyLongObject *)left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = left;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = left;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_OVERFLOWED_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
assert(Py_TYPE(value_o) == &PyLong_Type);
if (!_PyLong_IsCompact((PyLongObject *)value_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_OVERFLOWED_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
assert(Py_TYPE(value_o) == &PyLong_Type);
if (!_PyLong_IsCompact((PyLongObject *)value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_OVERFLOWED_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
assert(Py_TYPE(value_o) == &PyLong_Type);
if (!_PyLong_IsCompact((PyLongObject *)value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_OVERFLOWED_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
assert(Py_TYPE(value_o) == &PyLong_Type);
if (!_PyLong_IsCompact((PyLongObject *)value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_MULTIPLY_INT_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
right = stack_pointer[-1];
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Multiply((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_MULTIPLY_INT_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
right = _stack_item_0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Multiply((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = right;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_MULTIPLY_INT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Multiply((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = right;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_INT_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
right = stack_pointer[-1];
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Add((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_INT_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
right = _stack_item_0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Add((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = right;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_INT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Add((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = right;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBTRACT_INT_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
right = stack_pointer[-1];
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Subtract((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBTRACT_INT_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
right = _stack_item_0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Subtract((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = right;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBTRACT_INT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyLong_CheckExact(left_o));
assert(PyLong_CheckExact(right_o));
assert(_PyLong_BothAreCompact((PyLongObject *)left_o, (PyLongObject *)right_o));
STAT_INC(BINARY_OP, hit);
res = _PyCompactLong_Subtract((PyLongObject *)left_o, (PyLongObject *)right_o);
if (PyStackRef_IsNull(res)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = right;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_FLOAT_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
if (!PyFloat_CheckExact(left_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_FLOAT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
if (!PyFloat_CheckExact(left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_FLOAT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
if (!PyFloat_CheckExact(left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_FLOAT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
left = _stack_item_1;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
if (!PyFloat_CheckExact(left_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = left;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = left;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_FLOAT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
value = stack_pointer[-1];
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyFloat_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_FLOAT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyFloat_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_FLOAT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
value = _stack_item_1;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyFloat_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_FLOAT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
value = _stack_item_2;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyFloat_CheckExact(value_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_MULTIPLY_FLOAT_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
right = stack_pointer[-1];
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval *
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_MULTIPLY_FLOAT_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
right = _stack_item_0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval *
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
stack_pointer[0] = right;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_MULTIPLY_FLOAT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval *
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_FLOAT_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
right = stack_pointer[-1];
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval +
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_FLOAT_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
right = _stack_item_0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval +
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
stack_pointer[0] = right;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_FLOAT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval +
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBTRACT_FLOAT_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
right = stack_pointer[-1];
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval -
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBTRACT_FLOAT_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
right = _stack_item_0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval -
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
stack_pointer[0] = right;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBTRACT_FLOAT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyFloat_CheckExact(left_o));
assert(PyFloat_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
double dres =
((PyFloatObject *)left_o)->ob_fval -
((PyFloatObject *)right_o)->ob_fval;
res = PyStackRef_FromPyObjectSteal(PyFloat_FromDouble(dres));
if (PyStackRef_IsNull(res)) {
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_UNICODE_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
right = stack_pointer[-1];
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyUnicode_CheckExact(left_o));
assert(PyUnicode_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
PyObject *res_o = PyUnicode_Concat(left_o, right_o);
res = PyStackRef_FromPyObjectSteal(res_o);
if (PyStackRef_IsNull(res)) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_UNICODE_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
right = _stack_item_0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyUnicode_CheckExact(left_o));
assert(PyUnicode_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
PyObject *res_o = PyUnicode_Concat(left_o, right_o);
res = PyStackRef_FromPyObjectSteal(res_o);
if (PyStackRef_IsNull(res)) {
stack_pointer[0] = right;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_ADD_UNICODE_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyUnicode_CheckExact(left_o));
assert(PyUnicode_CheckExact(right_o));
STAT_INC(BINARY_OP, hit);
PyObject *res_o = PyUnicode_Concat(left_o, right_o);
res = PyStackRef_FromPyObjectSteal(res_o);
if (PyStackRef_IsNull(res)) {
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_INPLACE_ADD_UNICODE_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
assert(PyUnicode_CheckExact(left_o));
assert(PyUnicode_CheckExact(PyStackRef_AsPyObjectBorrow(right)));
int next_oparg;
#if TIER_ONE
assert(next_instr->op.code == STORE_FAST);
next_oparg = next_instr->op.arg;
#else
next_oparg = (int)CURRENT_OPERAND0_16();
#endif
_PyStackRef *target_local = &GETLOCAL(next_oparg);
assert(PyUnicode_CheckExact(left_o));
if (PyStackRef_AsPyObjectBorrow(*target_local) != left_o) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = right;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(BINARY_OP, hit);
assert(Py_REFCNT(left_o) >= 2 || !PyStackRef_IsHeapSafe(left));
PyObject *temp = PyStackRef_AsPyObjectSteal(*target_local);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyUnicode_Append(&temp, right_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
PyStackRef_CLOSE_SPECIALIZED(right, _PyUnicode_ExactDealloc);
PyStackRef_CLOSE_SPECIALIZED(left, _PyUnicode_ExactDealloc);
if (temp == NULL) {
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(temp);
*target_local = PyStackRef_NULL;
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_BINARY_OP_EXTEND_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
_PyBinaryOpSpecializationDescr *d = (_PyBinaryOpSpecializationDescr*)descr;
assert(INLINE_CACHE_ENTRIES_BINARY_OP == 5);
assert(d && d->guard);
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int res = d->guard(left_o, right_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (!res) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = right;
_tos_cache0 = left;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = right;
_tos_cache0 = left;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_EXTEND_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(INLINE_CACHE_ENTRIES_BINARY_OP == 5);
_PyBinaryOpSpecializationDescr *d = (_PyBinaryOpSpecializationDescr*)descr;
STAT_INC(BINARY_OP, hit);
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = d->action(left_o, right_o);
_PyStackRef tmp = right;
right = PyStackRef_NULL;
stack_pointer[-1] = right;
PyStackRef_CLOSE(tmp);
tmp = left;
left = PyStackRef_NULL;
stack_pointer[-2] = left;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_SLICE_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef stop;
_PyStackRef start;
_PyStackRef container;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
stop = _stack_item_2;
start = _stack_item_1;
container = _stack_item_0;
stack_pointer[0] = container;
stack_pointer[1] = start;
stack_pointer[2] = stop;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *slice = _PyBuildSlice_ConsumeRefs(PyStackRef_AsPyObjectSteal(start),
PyStackRef_AsPyObjectSteal(stop));
stack_pointer = _PyFrame_GetStackPointer(frame);
PyObject *res_o;
if (slice == NULL) {
res_o = NULL;
}
else {
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
res_o = PyObject_GetItem(PyStackRef_AsPyObjectBorrow(container), slice);
Py_DECREF(slice);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += 2;
}
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(container);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_SLICE_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef stop;
_PyStackRef start;
_PyStackRef container;
_PyStackRef v;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
stop = _stack_item_2;
start = _stack_item_1;
container = _stack_item_0;
v = stack_pointer[-1];
stack_pointer[0] = container;
stack_pointer[1] = start;
stack_pointer[2] = stop;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *slice = _PyBuildSlice_ConsumeRefs(PyStackRef_AsPyObjectSteal(start),
PyStackRef_AsPyObjectSteal(stop));
stack_pointer = _PyFrame_GetStackPointer(frame);
int err;
if (slice == NULL) {
err = 1;
}
else {
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
err = PyObject_SetItem(PyStackRef_AsPyObjectBorrow(container), slice, PyStackRef_AsPyObjectBorrow(v));
Py_DECREF(slice);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += 2;
}
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = container;
container = PyStackRef_NULL;
stack_pointer[-3] = container;
PyStackRef_CLOSE(tmp);
tmp = v;
v = PyStackRef_NULL;
stack_pointer[-4] = v;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -4;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_LIST_INT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef list_st;
_PyStackRef res;
_PyStackRef ls;
_PyStackRef ss;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
sub_st = _stack_item_1;
list_st = _stack_item_0;
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *list = PyStackRef_AsPyObjectBorrow(list_st);
assert(PyLong_CheckExact(sub));
assert(PyList_CheckExact(list));
if (!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = sub_st;
_tos_cache0 = list_st;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
#ifdef Py_GIL_DISABLED
stack_pointer[0] = list_st;
stack_pointer[1] = sub_st;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyList_GetItemRef((PyListObject*)list, index);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = sub_st;
_tos_cache0 = list_st;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(BINARY_OP, hit);
res = PyStackRef_FromPyObjectSteal(res_o);
#else
if (index >= PyList_GET_SIZE(list)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = sub_st;
_tos_cache0 = list_st;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(BINARY_OP, hit);
PyObject *res_o = PyList_GET_ITEM(list, index);
assert(res_o != NULL);
res = PyStackRef_FromPyObjectNew(res_o);
stack_pointer += 2;
#endif
STAT_INC(BINARY_OP, hit);
ls = list_st;
ss = sub_st;
_tos_cache2 = ss;
_tos_cache1 = ls;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_LIST_SLICE_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef list_st;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
sub_st = _stack_item_1;
list_st = _stack_item_0;
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *list = PyStackRef_AsPyObjectBorrow(list_st);
assert(PySlice_Check(sub));
assert(PyList_CheckExact(list));
stack_pointer[0] = list_st;
stack_pointer[1] = sub_st;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyList_SliceSubscript(list, sub);
stack_pointer = _PyFrame_GetStackPointer(frame);
STAT_INC(BINARY_OP, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = sub_st;
sub_st = PyStackRef_NULL;
stack_pointer[-1] = sub_st;
PyStackRef_CLOSE(tmp);
tmp = list_st;
list_st = PyStackRef_NULL;
stack_pointer[-2] = list_st;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_STR_INT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef str_st;
_PyStackRef res;
_PyStackRef s;
_PyStackRef i;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
sub_st = _stack_item_1;
str_st = _stack_item_0;
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *str = PyStackRef_AsPyObjectBorrow(str_st);
assert(PyLong_CheckExact(sub));
assert(PyUnicode_CheckExact(str));
if (!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = sub_st;
_tos_cache0 = str_st;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
if (PyUnicode_GET_LENGTH(str) <= index) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = sub_st;
_tos_cache0 = str_st;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
if (!PyUnicode_IS_COMPACT_ASCII(str)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = sub_st;
_tos_cache0 = str_st;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
uint8_t c = PyUnicode_1BYTE_DATA(str)[index];
assert(c < 128);
STAT_INC(BINARY_OP, hit);
PyObject *res_o = (PyObject*)&_Py_SINGLETON(strings).ascii[c];
s = str_st;
i = sub_st;
res = PyStackRef_FromPyObjectBorrow(res_o);
_tos_cache2 = i;
_tos_cache1 = s;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_TUPLE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
nos = stack_pointer[-2];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyTuple_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_TUPLE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
nos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyTuple_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_TUPLE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
nos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyTuple_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_TUPLE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
nos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyTuple_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_TUPLE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
tos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyTuple_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_TUPLE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
tos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyTuple_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_TUPLE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
tos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyTuple_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_TUPLE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
tos = _stack_item_2;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyTuple_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_BINARY_OP_SUBSCR_TUPLE_INT_BOUNDS_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef tuple_st;
sub_st = stack_pointer[-1];
tuple_st = stack_pointer[-2];
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *tuple = PyStackRef_AsPyObjectBorrow(tuple_st);
assert(PyLong_CheckExact(sub));
assert(PyTuple_CheckExact(tuple));
if (!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
if (index >= PyTuple_GET_SIZE(tuple)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = sub_st;
_tos_cache0 = tuple_st;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_BINARY_OP_SUBSCR_TUPLE_INT_BOUNDS_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef tuple_st;
_PyStackRef _stack_item_0 = _tos_cache0;
sub_st = _stack_item_0;
tuple_st = stack_pointer[-1];
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *tuple = PyStackRef_AsPyObjectBorrow(tuple_st);
assert(PyLong_CheckExact(sub));
assert(PyTuple_CheckExact(tuple));
if (!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = sub_st;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
if (index >= PyTuple_GET_SIZE(tuple)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = sub_st;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = sub_st;
_tos_cache0 = tuple_st;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_BINARY_OP_SUBSCR_TUPLE_INT_BOUNDS_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef tuple_st;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
sub_st = _stack_item_1;
tuple_st = _stack_item_0;
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *tuple = PyStackRef_AsPyObjectBorrow(tuple_st);
assert(PyLong_CheckExact(sub));
assert(PyTuple_CheckExact(tuple));
if (!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = sub_st;
_tos_cache0 = tuple_st;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
if (index >= PyTuple_GET_SIZE(tuple)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = sub_st;
_tos_cache0 = tuple_st;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = sub_st;
_tos_cache0 = tuple_st;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_BINARY_OP_SUBSCR_TUPLE_INT_BOUNDS_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef tuple_st;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
sub_st = _stack_item_2;
tuple_st = _stack_item_1;
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *tuple = PyStackRef_AsPyObjectBorrow(tuple_st);
assert(PyLong_CheckExact(sub));
assert(PyTuple_CheckExact(tuple));
if (!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = sub_st;
_tos_cache1 = tuple_st;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
if (index >= PyTuple_GET_SIZE(tuple)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = sub_st;
_tos_cache1 = tuple_st;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = sub_st;
_tos_cache1 = tuple_st;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_TUPLE_INT_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef tuple_st;
_PyStackRef res;
_PyStackRef ts;
_PyStackRef ss;
sub_st = stack_pointer[-1];
tuple_st = stack_pointer[-2];
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *tuple = PyStackRef_AsPyObjectBorrow(tuple_st);
assert(PyLong_CheckExact(sub));
assert(PyTuple_CheckExact(tuple));
STAT_INC(BINARY_OP, hit);
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
PyObject *res_o = PyTuple_GET_ITEM(tuple, index);
assert(res_o != NULL);
res = PyStackRef_FromPyObjectNew(res_o);
ts = tuple_st;
ss = sub_st;
_tos_cache2 = ss;
_tos_cache1 = ts;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_TUPLE_INT_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef tuple_st;
_PyStackRef res;
_PyStackRef ts;
_PyStackRef ss;
_PyStackRef _stack_item_0 = _tos_cache0;
sub_st = _stack_item_0;
tuple_st = stack_pointer[-1];
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *tuple = PyStackRef_AsPyObjectBorrow(tuple_st);
assert(PyLong_CheckExact(sub));
assert(PyTuple_CheckExact(tuple));
STAT_INC(BINARY_OP, hit);
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
PyObject *res_o = PyTuple_GET_ITEM(tuple, index);
assert(res_o != NULL);
res = PyStackRef_FromPyObjectNew(res_o);
ts = tuple_st;
ss = sub_st;
_tos_cache2 = ss;
_tos_cache1 = ts;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_TUPLE_INT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef tuple_st;
_PyStackRef res;
_PyStackRef ts;
_PyStackRef ss;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
sub_st = _stack_item_1;
tuple_st = _stack_item_0;
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *tuple = PyStackRef_AsPyObjectBorrow(tuple_st);
assert(PyLong_CheckExact(sub));
assert(PyTuple_CheckExact(tuple));
STAT_INC(BINARY_OP, hit);
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
PyObject *res_o = PyTuple_GET_ITEM(tuple, index);
assert(res_o != NULL);
res = PyStackRef_FromPyObjectNew(res_o);
ts = tuple_st;
ss = sub_st;
_tos_cache2 = ss;
_tos_cache1 = ts;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_DICT_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
nos = stack_pointer[-2];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyDict_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_DICT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
nos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyDict_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_DICT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
nos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyDict_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_DICT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
nos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (!PyDict_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_DICT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
tos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyDict_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_DICT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
tos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyDict_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_DICT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
tos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyDict_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_DICT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
tos = _stack_item_2;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyDict_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_DICT_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef dict_st;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
sub_st = _stack_item_1;
dict_st = _stack_item_0;
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *dict = PyStackRef_AsPyObjectBorrow(dict_st);
assert(PyDict_CheckExact(dict));
STAT_INC(BINARY_OP, hit);
PyObject *res_o;
stack_pointer[0] = dict_st;
stack_pointer[1] = sub_st;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int rc = PyDict_GetItemRef(dict, sub, &res_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (rc == 0) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_SetKeyError(sub);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = sub_st;
sub_st = PyStackRef_NULL;
stack_pointer[-1] = sub_st;
PyStackRef_CLOSE(tmp);
tmp = dict_st;
dict_st = PyStackRef_NULL;
stack_pointer[-2] = dict_st;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (rc <= 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_CHECK_FUNC_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef container;
_PyStackRef getitem;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
container = _stack_item_0;
PyTypeObject *tp = Py_TYPE(PyStackRef_AsPyObjectBorrow(container));
if (!PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = container;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
PyHeapTypeObject *ht = (PyHeapTypeObject *)tp;
PyObject *getitem_o = FT_ATOMIC_LOAD_PTR_ACQUIRE(ht->_spec_cache.getitem);
if (getitem_o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = container;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
assert(PyFunction_Check(getitem_o));
uint32_t cached_version = FT_ATOMIC_LOAD_UINT32_RELAXED(ht->_spec_cache.getitem_version);
if (((PyFunctionObject *)getitem_o)->func_version != cached_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = container;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
PyCodeObject *code = (PyCodeObject *)PyFunction_GET_CODE(getitem_o);
assert(code->co_argcount == 2);
if (!_PyThreadState_HasStackSpace(tstate, code->co_framesize)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = container;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
getitem = PyStackRef_FromPyObjectNew(getitem_o);
STAT_INC(BINARY_OP, hit);
_tos_cache2 = getitem;
_tos_cache1 = _stack_item_1;
_tos_cache0 = container;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_INIT_CALL_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef getitem;
_PyStackRef sub;
_PyStackRef container;
_PyStackRef new_frame;
getitem = stack_pointer[-1];
sub = stack_pointer[-2];
container = stack_pointer[-3];
_PyInterpreterFrame* pushed_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame);
pushed_frame->localsplus[0] = container;
pushed_frame->localsplus[1] = sub;
frame->return_offset = 6u ;
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_INIT_CALL_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef getitem;
_PyStackRef sub;
_PyStackRef container;
_PyStackRef new_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
getitem = _stack_item_0;
sub = stack_pointer[-1];
container = stack_pointer[-2];
_PyInterpreterFrame* pushed_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame);
pushed_frame->localsplus[0] = container;
pushed_frame->localsplus[1] = sub;
frame->return_offset = 6u ;
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_INIT_CALL_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef getitem;
_PyStackRef sub;
_PyStackRef container;
_PyStackRef new_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
getitem = _stack_item_1;
sub = _stack_item_0;
container = stack_pointer[-1];
_PyInterpreterFrame* pushed_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame);
pushed_frame->localsplus[0] = container;
pushed_frame->localsplus[1] = sub;
frame->return_offset = 6u ;
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_SUBSCR_INIT_CALL_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef getitem;
_PyStackRef sub;
_PyStackRef container;
_PyStackRef new_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
getitem = _stack_item_2;
sub = _stack_item_1;
container = _stack_item_0;
_PyInterpreterFrame* pushed_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame);
pushed_frame->localsplus[0] = container;
pushed_frame->localsplus[1] = sub;
frame->return_offset = 6u ;
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LIST_APPEND_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef v;
_PyStackRef list;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
v = _stack_item_0;
list = stack_pointer[-1 - (oparg-1)];
int err = _PyList_AppendTakeRef((PyListObject *)PyStackRef_AsPyObjectBorrow(list),
PyStackRef_AsPyObjectSteal(v));
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_ADD_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef v;
_PyStackRef set;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
v = _stack_item_0;
set = stack_pointer[-1 - (oparg-1)];
stack_pointer[0] = v;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _PySet_AddTakeRef((PySetObject *)PyStackRef_AsPyObjectBorrow(set),
PyStackRef_AsPyObjectSteal(v));
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err) {
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_SUBSCR_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub;
_PyStackRef container;
_PyStackRef v;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
sub = _stack_item_2;
container = _stack_item_1;
v = _stack_item_0;
stack_pointer[0] = v;
stack_pointer[1] = container;
stack_pointer[2] = sub;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = PyObject_SetItem(PyStackRef_AsPyObjectBorrow(container), PyStackRef_AsPyObjectBorrow(sub), PyStackRef_AsPyObjectBorrow(v));
_PyStackRef tmp = sub;
sub = PyStackRef_NULL;
stack_pointer[-1] = sub;
PyStackRef_CLOSE(tmp);
tmp = container;
container = PyStackRef_NULL;
stack_pointer[-2] = container;
PyStackRef_CLOSE(tmp);
tmp = v;
v = PyStackRef_NULL;
stack_pointer[-3] = v;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_SUBSCR_LIST_INT_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub_st;
_PyStackRef list_st;
_PyStackRef value;
_PyStackRef ls;
_PyStackRef ss;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
sub_st = _stack_item_2;
list_st = _stack_item_1;
value = _stack_item_0;
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
PyObject *list = PyStackRef_AsPyObjectBorrow(list_st);
assert(PyLong_CheckExact(sub));
assert(PyList_CheckExact(list));
if (!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = sub_st;
_tos_cache1 = list_st;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0];
if (!LOCK_OBJECT(list)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = sub_st;
_tos_cache1 = list_st;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
if (index >= PyList_GET_SIZE(list)) {
UNLOCK_OBJECT(list);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = sub_st;
_tos_cache1 = list_st;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
}
STAT_INC(STORE_SUBSCR, hit);
PyObject *old_value = PyList_GET_ITEM(list, index);
FT_ATOMIC_STORE_PTR_RELEASE(_PyList_ITEMS(list)[index],
PyStackRef_AsPyObjectSteal(value));
assert(old_value != NULL);
UNLOCK_OBJECT(list);
ls = list_st;
ss = sub_st;
stack_pointer[0] = ls;
stack_pointer[1] = ss;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_DECREF(old_value);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache1 = ss;
_tos_cache0 = ls;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_SUBSCR_DICT_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub;
_PyStackRef dict_st;
_PyStackRef value;
_PyStackRef st;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
sub = _stack_item_2;
dict_st = _stack_item_1;
value = _stack_item_0;
PyObject *dict = PyStackRef_AsPyObjectBorrow(dict_st);
assert(PyDict_CheckExact(dict));
STAT_INC(STORE_SUBSCR, hit);
stack_pointer[0] = value;
stack_pointer[1] = dict_st;
stack_pointer[2] = sub;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _PyDict_SetItem_Take2((PyDictObject *)dict,
PyStackRef_AsPyObjectSteal(sub),
PyStackRef_AsPyObjectSteal(value));
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err) {
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(dict_st);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
st = dict_st;
_tos_cache0 = st;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DELETE_SUBSCR_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef sub;
_PyStackRef container;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
sub = _stack_item_1;
container = _stack_item_0;
stack_pointer[0] = container;
stack_pointer[1] = sub;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = PyObject_DelItem(PyStackRef_AsPyObjectBorrow(container),
PyStackRef_AsPyObjectBorrow(sub));
_PyStackRef tmp = sub;
sub = PyStackRef_NULL;
stack_pointer[-1] = sub;
PyStackRef_CLOSE(tmp);
tmp = container;
container = PyStackRef_NULL;
stack_pointer[-2] = container;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_INTRINSIC_1_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
value = _stack_item_0;
assert(oparg <= MAX_INTRINSIC_1);
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyIntrinsics_UnaryFunctions[oparg].func(tstate, PyStackRef_AsPyObjectBorrow(value));
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_INTRINSIC_2_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value1_st;
_PyStackRef value2_st;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
value1_st = _stack_item_1;
value2_st = _stack_item_0;
assert(oparg <= MAX_INTRINSIC_2);
PyObject *value1 = PyStackRef_AsPyObjectBorrow(value1_st);
PyObject *value2 = PyStackRef_AsPyObjectBorrow(value2_st);
stack_pointer[0] = value2_st;
stack_pointer[1] = value1_st;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyIntrinsics_BinaryFunctions[oparg].func(tstate, value2, value1);
_PyStackRef tmp = value1_st;
value1_st = PyStackRef_NULL;
stack_pointer[-1] = value1_st;
PyStackRef_CLOSE(tmp);
tmp = value2_st;
value2_st = PyStackRef_NULL;
stack_pointer[-2] = value2_st;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _RETURN_VALUE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef retval;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
retval = _stack_item_0;
assert(frame->owner != FRAME_OWNED_BY_INTERPRETER);
_PyStackRef temp = PyStackRef_MakeHeapSafe(retval);
_PyFrame_SetStackPointer(frame, stack_pointer);
assert(STACK_LEVEL() == 0);
_Py_LeaveRecursiveCallPy(tstate);
_PyInterpreterFrame *dying = frame;
frame = tstate->current_frame = dying->previous;
_PyEval_FrameClearAndPop(tstate, dying);
stack_pointer = _PyFrame_GetStackPointer(frame);
LOAD_IP(frame->return_offset);
res = temp;
LLTRACE_RESUME_FRAME();
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GET_AITER_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef obj;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
obj = _stack_item_0;
unaryfunc getter = NULL;
PyObject *obj_o = PyStackRef_AsPyObjectBorrow(obj);
PyObject *iter_o;
PyTypeObject *type = Py_TYPE(obj_o);
if (type->tp_as_async != NULL) {
getter = type->tp_as_async->am_aiter;
}
if (getter == NULL) {
stack_pointer[0] = obj;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_Format(tstate, PyExc_TypeError,
"'async for' requires an object with "
"__aiter__ method, got %.100s",
type->tp_name);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(obj);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
stack_pointer[0] = obj;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
iter_o = (*getter)(obj_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(obj);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (iter_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (Py_TYPE(iter_o)->tp_as_async == NULL ||
Py_TYPE(iter_o)->tp_as_async->am_anext == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_Format(tstate, PyExc_TypeError,
"'async for' received an object from __aiter__ "
"that does not implement __anext__: %.100s",
Py_TYPE(iter_o)->tp_name);
Py_DECREF(iter_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
iter = PyStackRef_FromPyObjectSteal(iter_o);
_tos_cache0 = iter;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GET_ANEXT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef aiter;
_PyStackRef awaitable;
_PyStackRef _stack_item_0 = _tos_cache0;
aiter = _stack_item_0;
stack_pointer[0] = aiter;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *awaitable_o = _PyEval_GetANext(PyStackRef_AsPyObjectBorrow(aiter));
stack_pointer = _PyFrame_GetStackPointer(frame);
if (awaitable_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
awaitable = PyStackRef_FromPyObjectSteal(awaitable_o);
_tos_cache1 = awaitable;
_tos_cache0 = aiter;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GET_AWAITABLE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iterable;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
iterable = _stack_item_0;
stack_pointer[0] = iterable;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *iter_o = _PyEval_GetAwaitable(PyStackRef_AsPyObjectBorrow(iterable), oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(iterable);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (iter_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
iter = PyStackRef_FromPyObjectSteal(iter_o);
_tos_cache0 = iter;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _SEND is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
case _SEND_GEN_FRAME_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef v;
_PyStackRef receiver;
_PyStackRef gen_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
v = _stack_item_1;
receiver = _stack_item_0;
PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(receiver);
if (Py_TYPE(gen) != &PyGen_Type && Py_TYPE(gen) != &PyCoro_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = v;
_tos_cache0 = receiver;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
if (!gen_try_set_executing((PyGenObject *)gen)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = v;
_tos_cache0 = receiver;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(SEND, hit);
_PyInterpreterFrame *pushed_frame = &gen->gi_iframe;
_PyFrame_StackPush(pushed_frame, PyStackRef_MakeHeapSafe(v));
gen->gi_exc_state.previous_item = tstate->exc_info;
tstate->exc_info = &gen->gi_exc_state;
assert( 2u + oparg <= UINT16_MAX);
frame->return_offset = (uint16_t)( 2u + oparg);
pushed_frame->previous = frame;
gen_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache1 = gen_frame;
_tos_cache0 = receiver;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _YIELD_VALUE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef retval;
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
retval = _stack_item_0;
assert(frame->owner != FRAME_OWNED_BY_INTERPRETER);
frame->instr_ptr++;
PyGenObject *gen = _PyGen_GetGeneratorFromFrame(frame);
assert(FRAME_SUSPENDED_YIELD_FROM == FRAME_SUSPENDED + 1);
assert(oparg == 0 || oparg == 1);
_PyStackRef temp = retval;
_PyFrame_SetStackPointer(frame, stack_pointer);
tstate->exc_info = gen->gi_exc_state.previous_item;
gen->gi_exc_state.previous_item = NULL;
_Py_LeaveRecursiveCallPy(tstate);
_PyInterpreterFrame *gen_frame = frame;
frame = tstate->current_frame = frame->previous;
gen_frame->previous = NULL;
((_PyThreadStateImpl *)tstate)->generator_return_kind = GENERATOR_YIELD;
FT_ATOMIC_STORE_INT8_RELEASE(gen->gi_frame_state, FRAME_SUSPENDED + oparg);
assert(INLINE_CACHE_ENTRIES_SEND == INLINE_CACHE_ENTRIES_FOR_ITER);
#if TIER_ONE
assert(frame->instr_ptr->op.code == INSTRUMENTED_LINE ||
frame->instr_ptr->op.code == INSTRUMENTED_INSTRUCTION ||
_PyOpcode_Deopt[frame->instr_ptr->op.code] == SEND ||
_PyOpcode_Deopt[frame->instr_ptr->op.code] == FOR_ITER ||
_PyOpcode_Deopt[frame->instr_ptr->op.code] == INTERPRETER_EXIT ||
_PyOpcode_Deopt[frame->instr_ptr->op.code] == ENTER_EXECUTOR);
#endif
stack_pointer = _PyFrame_GetStackPointer(frame);
LOAD_IP(1 + INLINE_CACHE_ENTRIES_SEND);
value = PyStackRef_MakeHeapSafe(temp);
LLTRACE_RESUME_FRAME();
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_EXCEPT_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef exc_value;
_PyStackRef _stack_item_0 = _tos_cache0;
exc_value = _stack_item_0;
_PyErr_StackItem *exc_info = tstate->exc_info;
stack_pointer[0] = exc_value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_XSETREF(exc_info->exc_value,
PyStackRef_IsNone(exc_value)
? NULL : PyStackRef_AsPyObjectSteal(exc_value));
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_COMMON_CONSTANT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = CURRENT_OPARG();
assert(oparg < NUM_COMMON_CONSTANTS);
value = PyStackRef_FromPyObjectNew(tstate->interp->common_consts[oparg]);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_COMMON_CONSTANT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
assert(oparg < NUM_COMMON_CONSTANTS);
value = PyStackRef_FromPyObjectNew(tstate->interp->common_consts[oparg]);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_COMMON_CONSTANT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
assert(oparg < NUM_COMMON_CONSTANTS);
value = PyStackRef_FromPyObjectNew(tstate->interp->common_consts[oparg]);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_BUILD_CLASS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bc;
int err;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *bc_o = _PyMapping_GetOptionalItem2(BUILTINS(), &_Py_ID(__build_class__), &err);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (bc_o == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_SetString(tstate, PyExc_NameError,
"__build_class__ not found");
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
bc = PyStackRef_FromPyObjectSteal(bc_o);
_tos_cache0 = bc;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_NAME_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef v;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
v = _stack_item_0;
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
PyObject *ns = LOCALS();
int err;
if (ns == NULL) {
stack_pointer[0] = v;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_Format(tstate, PyExc_SystemError,
"no locals found when storing %R", name);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(v);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (PyDict_CheckExact(ns)) {
stack_pointer[0] = v;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
err = PyDict_SetItem(ns, name, PyStackRef_AsPyObjectBorrow(v));
stack_pointer = _PyFrame_GetStackPointer(frame);
}
else {
stack_pointer[0] = v;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
err = PyObject_SetItem(ns, name, PyStackRef_AsPyObjectBorrow(v));
stack_pointer = _PyFrame_GetStackPointer(frame);
}
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(v);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DELETE_NAME_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
PyObject *ns = LOCALS();
int err;
if (ns == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_Format(tstate, PyExc_SystemError,
"no locals when deleting %R", name);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyFrame_SetStackPointer(frame, stack_pointer);
err = PyObject_DelItem(ns, name);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err != 0) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcCheckArg(tstate, PyExc_NameError,
NAME_ERROR_MSG,
name);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNPACK_SEQUENCE_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef seq;
_PyStackRef *top;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
seq = _stack_item_0;
top = &stack_pointer[oparg];
PyObject *seq_o = PyStackRef_AsPyObjectSteal(seq);
_PyFrame_SetStackPointer(frame, stack_pointer);
int res = _PyEval_UnpackIterableStackRef(tstate, seq_o, oparg, -1, top);
Py_DECREF(seq_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res == 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNPACK_SEQUENCE_TWO_TUPLE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef seq;
_PyStackRef val1;
_PyStackRef val0;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
seq = _stack_item_0;
assert(oparg == 2);
PyObject *seq_o = PyStackRef_AsPyObjectBorrow(seq);
assert(PyTuple_CheckExact(seq_o));
if (PyTuple_GET_SIZE(seq_o) != 2) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = seq;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(UNPACK_SEQUENCE, hit);
val0 = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq_o, 0));
val1 = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq_o, 1));
stack_pointer[0] = val1;
stack_pointer[1] = val0;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(seq);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache1 = val0;
_tos_cache0 = val1;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNPACK_SEQUENCE_TUPLE_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef seq;
_PyStackRef *values;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
seq = _stack_item_0;
values = &stack_pointer[0];
PyObject *seq_o = PyStackRef_AsPyObjectBorrow(seq);
assert(PyTuple_CheckExact(seq_o));
if (PyTuple_GET_SIZE(seq_o) != oparg) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = seq;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(UNPACK_SEQUENCE, hit);
PyObject **items = _PyTuple_ITEMS(seq_o);
for (int i = oparg; --i >= 0; ) {
*values++ = PyStackRef_FromPyObjectNew(items[i]);
}
stack_pointer += oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(seq);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNPACK_SEQUENCE_LIST_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef seq;
_PyStackRef *values;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
seq = _stack_item_0;
values = &stack_pointer[0];
PyObject *seq_o = PyStackRef_AsPyObjectBorrow(seq);
assert(PyList_CheckExact(seq_o));
if (!LOCK_OBJECT(seq_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = seq;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (PyList_GET_SIZE(seq_o) != oparg) {
UNLOCK_OBJECT(seq_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = seq;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
STAT_INC(UNPACK_SEQUENCE, hit);
PyObject **items = _PyList_ITEMS(seq_o);
for (int i = oparg; --i >= 0; ) {
*values++ = PyStackRef_FromPyObjectNew(items[i]);
}
UNLOCK_OBJECT(seq_o);
stack_pointer += oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(seq);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _UNPACK_EX_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef seq;
_PyStackRef *top;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
seq = _stack_item_0;
top = &stack_pointer[1 + (oparg & 0xFF) + (oparg >> 8)];
PyObject *seq_o = PyStackRef_AsPyObjectSteal(seq);
_PyFrame_SetStackPointer(frame, stack_pointer);
int res = _PyEval_UnpackIterableStackRef(tstate, seq_o, oparg & 0xFF, oparg >> 8, top);
Py_DECREF(seq_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res == 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += 1 + (oparg & 0xFF) + (oparg >> 8);
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_ATTR_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef v;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
owner = _stack_item_1;
v = _stack_item_0;
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
stack_pointer[0] = v;
stack_pointer[1] = owner;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = PyObject_SetAttr(PyStackRef_AsPyObjectBorrow(owner),
name, PyStackRef_AsPyObjectBorrow(v));
_PyStackRef tmp = owner;
owner = PyStackRef_NULL;
stack_pointer[-1] = owner;
PyStackRef_CLOSE(tmp);
tmp = v;
v = PyStackRef_NULL;
stack_pointer[-2] = v;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DELETE_ATTR_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
stack_pointer[0] = owner;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = PyObject_DelAttr(PyStackRef_AsPyObjectBorrow(owner), name);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_GLOBAL_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef v;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
v = _stack_item_0;
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
stack_pointer[0] = v;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = PyDict_SetItem(GLOBALS(), name, PyStackRef_AsPyObjectBorrow(v));
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(v);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DELETE_GLOBAL_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = PyDict_Pop(GLOBALS(), name, NULL);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (err == 0) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcCheckArg(tstate, PyExc_NameError,
NAME_ERROR_MSG, name);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_LOCALS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef locals;
PyObject *l = LOCALS();
if (l == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_SetString(tstate, PyExc_SystemError,
"no locals found");
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
locals = PyStackRef_FromPyObjectNew(l);
_tos_cache0 = locals;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_LOCALS_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef locals;
_PyStackRef _stack_item_0 = _tos_cache0;
PyObject *l = LOCALS();
if (l == NULL) {
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_SetString(tstate, PyExc_SystemError,
"no locals found");
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
locals = PyStackRef_FromPyObjectNew(l);
_tos_cache1 = locals;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_LOCALS_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef locals;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
PyObject *l = LOCALS();
if (l == NULL) {
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_SetString(tstate, PyExc_SystemError,
"no locals found");
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
locals = PyStackRef_FromPyObjectNew(l);
_tos_cache2 = locals;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _LOAD_FROM_DICT_OR_GLOBALS is not a viable micro-op for tier 2 because it has both popping and not-popping errors */
case _LOAD_NAME_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef v;
oparg = CURRENT_OPARG();
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *v_o = _PyEval_LoadName(tstate, frame, name);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (v_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
v = PyStackRef_FromPyObjectSteal(v_o);
_tos_cache0 = v;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_GLOBAL_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *res;
oparg = CURRENT_OPARG();
res = &stack_pointer[0];
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_LoadGlobalStackRef(GLOBALS(), BUILTINS(), name, res);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (PyStackRef_IsNull(*res)) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _PUSH_NULL_CONDITIONAL_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *null;
oparg = CURRENT_OPARG();
null = &stack_pointer[0];
if (oparg & 1) {
null[0] = PyStackRef_NULL;
}
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += (oparg & 1);
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_GLOBALS_VERSION_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
uint16_t version = (uint16_t)CURRENT_OPERAND0_16();
PyDictObject *dict = (PyDictObject *)GLOBALS();
if (!PyDict_CheckExact(dict)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyDictKeysObject *keys = FT_ATOMIC_LOAD_PTR_ACQUIRE(dict->ma_keys);
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
assert(DK_IS_UNICODE(keys));
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_GLOBALS_VERSION_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
uint16_t version = (uint16_t)CURRENT_OPERAND0_16();
PyDictObject *dict = (PyDictObject *)GLOBALS();
if (!PyDict_CheckExact(dict)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
PyDictKeysObject *keys = FT_ATOMIC_LOAD_PTR_ACQUIRE(dict->ma_keys);
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
assert(DK_IS_UNICODE(keys));
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_GLOBALS_VERSION_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
uint16_t version = (uint16_t)CURRENT_OPERAND0_16();
PyDictObject *dict = (PyDictObject *)GLOBALS();
if (!PyDict_CheckExact(dict)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
PyDictKeysObject *keys = FT_ATOMIC_LOAD_PTR_ACQUIRE(dict->ma_keys);
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
assert(DK_IS_UNICODE(keys));
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_GLOBALS_VERSION_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
uint16_t version = (uint16_t)CURRENT_OPERAND0_16();
PyDictObject *dict = (PyDictObject *)GLOBALS();
if (!PyDict_CheckExact(dict)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
PyDictKeysObject *keys = FT_ATOMIC_LOAD_PTR_ACQUIRE(dict->ma_keys);
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
assert(DK_IS_UNICODE(keys));
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_GLOBAL_MODULE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef res;
uint16_t version = (uint16_t)CURRENT_OPERAND0_16();
uint16_t index = (uint16_t)CURRENT_OPERAND1_16();
PyDictObject *dict = (PyDictObject *)GLOBALS();
if (!PyDict_CheckExact(dict)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyDictKeysObject *keys = FT_ATOMIC_LOAD_PTR_ACQUIRE(dict->ma_keys);
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
assert(DK_IS_UNICODE(keys));
PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(keys);
assert(index < DK_SIZE(keys));
PyObject *res_o = FT_ATOMIC_LOAD_PTR_RELAXED(entries[index].me_value);
if (res_o == NULL) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#if Py_GIL_DISABLED
int increfed = _Py_TryIncrefCompareStackRef(&entries[index].me_value, res_o, &res);
if (!increfed) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#else
res = PyStackRef_FromPyObjectNew(res_o);
#endif
STAT_INC(LOAD_GLOBAL, hit);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_GLOBAL_BUILTINS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef res;
uint16_t version = (uint16_t)CURRENT_OPERAND0_16();
uint16_t index = (uint16_t)CURRENT_OPERAND1_16();
PyDictObject *dict = (PyDictObject *)BUILTINS();
if (!PyDict_CheckExact(dict)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyDictKeysObject *keys = FT_ATOMIC_LOAD_PTR_ACQUIRE(dict->ma_keys);
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
assert(DK_IS_UNICODE(keys));
PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(keys);
PyObject *res_o = FT_ATOMIC_LOAD_PTR_RELAXED(entries[index].me_value);
if (res_o == NULL) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#if Py_GIL_DISABLED
int increfed = _Py_TryIncrefCompareStackRef(&entries[index].me_value, res_o, &res);
if (!increfed) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#else
res = PyStackRef_FromPyObjectNew(res_o);
#endif
STAT_INC(LOAD_GLOBAL, hit);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DELETE_FAST_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
_PyStackRef v = GETLOCAL(oparg);
if (PyStackRef_IsNull(v)) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcCheckArg(tstate, PyExc_UnboundLocalError,
UNBOUNDLOCAL_ERROR_MSG,
PyTuple_GetItem(_PyFrame_GetCode(frame)->co_localsplusnames, oparg)
);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = PyStackRef_NULL;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MAKE_CELL_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
PyObject *initial = PyStackRef_AsPyObjectBorrow(GETLOCAL(oparg));
PyObject *cell = PyCell_New(initial);
if (cell == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyStackRef tmp = GETLOCAL(oparg);
GETLOCAL(oparg) = PyStackRef_FromPyObjectSteal(cell);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_XCLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DELETE_DEREF_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
PyObject *cell = PyStackRef_AsPyObjectBorrow(GETLOCAL(oparg));
PyObject *oldobj = PyCell_SwapTakeRef((PyCellObject *)cell, NULL);
if (oldobj == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcUnbound(tstate, _PyFrame_GetCode(frame), oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_DECREF(oldobj);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_FROM_DICT_OR_DEREF_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef class_dict_st;
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
class_dict_st = _stack_item_0;
PyObject *name;
PyObject *class_dict = PyStackRef_AsPyObjectBorrow(class_dict_st);
assert(class_dict);
assert(oparg >= 0 && oparg < _PyFrame_GetCode(frame)->co_nlocalsplus);
name = PyTuple_GET_ITEM(_PyFrame_GetCode(frame)->co_localsplusnames, oparg);
int err;
stack_pointer[0] = class_dict_st;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject* value_o = _PyMapping_GetOptionalItem2(class_dict, name, &err);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (!value_o) {
PyCellObject *cell = (PyCellObject *)PyStackRef_AsPyObjectBorrow(GETLOCAL(oparg));
value_o = PyCell_GetRef(cell);
if (value_o == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcUnbound(tstate, _PyFrame_GetCode(frame), oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
}
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(class_dict_st);
stack_pointer = _PyFrame_GetStackPointer(frame);
value = PyStackRef_FromPyObjectSteal(value_o);
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_DEREF_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
oparg = CURRENT_OPARG();
PyCellObject *cell = (PyCellObject *)PyStackRef_AsPyObjectBorrow(GETLOCAL(oparg));
_PyFrame_SetStackPointer(frame, stack_pointer);
value = _PyCell_GetStackRef(cell);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (PyStackRef_IsNull(value)) {
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatExcUnbound(tstate, _PyFrame_GetCode(frame), oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_DEREF_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef v;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
v = _stack_item_0;
PyCellObject *cell = (PyCellObject *)PyStackRef_AsPyObjectBorrow(GETLOCAL(oparg));
stack_pointer[0] = v;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyCell_SetTakeRef(cell, PyStackRef_AsPyObjectSteal(v));
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_FREE_VARS_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
PyCodeObject *co = _PyFrame_GetCode(frame);
assert(PyStackRef_FunctionCheck(frame->f_funcobj));
PyFunctionObject *func = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
PyObject *closure = func->func_closure;
assert(oparg == co->co_nfreevars);
int offset = co->co_nlocalsplus - oparg;
for (int i = 0; i < oparg; ++i) {
PyObject *o = PyTuple_GET_ITEM(closure, i);
frame->localsplus[offset + i] = PyStackRef_FromPyObjectNew(o);
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_FREE_VARS_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
PyCodeObject *co = _PyFrame_GetCode(frame);
assert(PyStackRef_FunctionCheck(frame->f_funcobj));
PyFunctionObject *func = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
PyObject *closure = func->func_closure;
assert(oparg == co->co_nfreevars);
int offset = co->co_nlocalsplus - oparg;
for (int i = 0; i < oparg; ++i) {
PyObject *o = PyTuple_GET_ITEM(closure, i);
frame->localsplus[offset + i] = PyStackRef_FromPyObjectNew(o);
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_FREE_VARS_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
PyCodeObject *co = _PyFrame_GetCode(frame);
assert(PyStackRef_FunctionCheck(frame->f_funcobj));
PyFunctionObject *func = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
PyObject *closure = func->func_closure;
assert(oparg == co->co_nfreevars);
int offset = co->co_nlocalsplus - oparg;
for (int i = 0; i < oparg; ++i) {
PyObject *o = PyTuple_GET_ITEM(closure, i);
frame->localsplus[offset + i] = PyStackRef_FromPyObjectNew(o);
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_FREE_VARS_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
PyCodeObject *co = _PyFrame_GetCode(frame);
assert(PyStackRef_FunctionCheck(frame->f_funcobj));
PyFunctionObject *func = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
PyObject *closure = func->func_closure;
assert(oparg == co->co_nfreevars);
int offset = co->co_nlocalsplus - oparg;
for (int i = 0; i < oparg; ++i) {
PyObject *o = PyTuple_GET_ITEM(closure, i);
frame->localsplus[offset + i] = PyStackRef_FromPyObjectNew(o);
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BUILD_STRING_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *pieces;
_PyStackRef str;
oparg = CURRENT_OPARG();
pieces = &stack_pointer[-oparg];
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *str_o = _Py_BuildString_StackRefSteal(pieces, oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (str_o == NULL) {
stack_pointer += -oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
str = PyStackRef_FromPyObjectSteal(str_o);
_tos_cache0 = str;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BUILD_INTERPOLATION_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *format;
_PyStackRef str;
_PyStackRef value;
_PyStackRef interpolation;
oparg = CURRENT_OPARG();
format = &stack_pointer[-(oparg & 1)];
str = stack_pointer[-1 - (oparg & 1)];
value = stack_pointer[-2 - (oparg & 1)];
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
PyObject *str_o = PyStackRef_AsPyObjectBorrow(str);
int conversion = oparg >> 2;
PyObject *format_o;
if (oparg & 1) {
format_o = PyStackRef_AsPyObjectBorrow(format[0]);
}
else {
format_o = &_Py_STR(empty);
}
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *interpolation_o = _PyInterpolation_Build(value_o, str_o, conversion, format_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (oparg & 1) {
stack_pointer += -(oparg & 1);
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(format[0]);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
else {
stack_pointer += -(oparg & 1);
}
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(str);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (interpolation_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
interpolation = PyStackRef_FromPyObjectSteal(interpolation_o);
_tos_cache0 = interpolation;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BUILD_TEMPLATE_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef interpolations;
_PyStackRef strings;
_PyStackRef template;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
interpolations = _stack_item_1;
strings = _stack_item_0;
PyObject *strings_o = PyStackRef_AsPyObjectBorrow(strings);
PyObject *interpolations_o = PyStackRef_AsPyObjectBorrow(interpolations);
stack_pointer[0] = strings;
stack_pointer[1] = interpolations;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *template_o = _PyTemplate_Build(strings_o, interpolations_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(interpolations);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(strings);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (template_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
template = PyStackRef_FromPyObjectSteal(template_o);
_tos_cache0 = template;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BUILD_TUPLE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *values;
_PyStackRef tup;
oparg = CURRENT_OPARG();
values = &stack_pointer[-oparg];
PyObject *tup_o = _PyTuple_FromStackRefStealOnSuccess(values, oparg);
if (tup_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
tup = PyStackRef_FromPyObjectStealMortal(tup_o);
_tos_cache0 = tup;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BUILD_LIST_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *values;
_PyStackRef list;
oparg = CURRENT_OPARG();
values = &stack_pointer[-oparg];
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *list_o = _PyList_FromStackRefStealOnSuccess(values, oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (list_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
list = PyStackRef_FromPyObjectStealMortal(list_o);
_tos_cache0 = list;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LIST_EXTEND_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iterable_st;
_PyStackRef list_st;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
iterable_st = _stack_item_0;
list_st = stack_pointer[-1 - (oparg-1)];
PyObject *list = PyStackRef_AsPyObjectBorrow(list_st);
PyObject *iterable = PyStackRef_AsPyObjectBorrow(iterable_st);
stack_pointer[0] = iterable_st;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *none_val = _PyList_Extend((PyListObject *)list, iterable);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (none_val == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
int matches = _PyErr_ExceptionMatches(tstate, PyExc_TypeError);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (matches &&
(Py_TYPE(iterable)->tp_iter == NULL && !PySequence_Check(iterable)))
{
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_Clear(tstate);
_PyErr_Format(tstate, PyExc_TypeError,
"Value after * must be an iterable, not %.200s",
Py_TYPE(iterable)->tp_name);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(iterable_st);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
assert(Py_IsNone(none_val));
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(iterable_st);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_UPDATE_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iterable;
_PyStackRef set;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
iterable = _stack_item_0;
set = stack_pointer[-1 - (oparg-1)];
stack_pointer[0] = iterable;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _PySet_Update(PyStackRef_AsPyObjectBorrow(set),
PyStackRef_AsPyObjectBorrow(iterable));
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(iterable);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BUILD_SET_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *values;
_PyStackRef set;
oparg = CURRENT_OPARG();
values = &stack_pointer[-oparg];
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *set_o = PySet_New(NULL);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (set_o == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp;
for (int _i = oparg; --_i >= 0;) {
tmp = values[_i];
values[_i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
int err = 0;
for (Py_ssize_t i = 0; i < oparg; i++) {
_PyStackRef value = values[i];
values[i] = PyStackRef_NULL;
if (err == 0) {
_PyFrame_SetStackPointer(frame, stack_pointer);
err = _PySet_AddTakeRef((PySetObject *)set_o, PyStackRef_AsPyObjectSteal(value));
stack_pointer = _PyFrame_GetStackPointer(frame);
}
else {
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
}
if (err) {
stack_pointer += -oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_DECREF(set_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
set = PyStackRef_FromPyObjectStealMortal(set_o);
_tos_cache0 = set;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BUILD_MAP_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *values;
_PyStackRef map;
oparg = CURRENT_OPARG();
values = &stack_pointer[-oparg*2];
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *map_o = _Py_BuildMap_StackRefSteal(values, oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (map_o == NULL) {
stack_pointer += -oparg*2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
map = PyStackRef_FromPyObjectStealMortal(map_o);
_tos_cache0 = map;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -oparg*2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SETUP_ANNOTATIONS_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
if (LOCALS() == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_Format(tstate, PyExc_SystemError,
"no locals found when setting up annotations");
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
int err;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject* ann_dict = _PyMapping_GetOptionalItem2(LOCALS(), &_Py_ID(__annotations__), &err);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (ann_dict == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
ann_dict = PyDict_New();
stack_pointer = _PyFrame_GetStackPointer(frame);
if (ann_dict == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyFrame_SetStackPointer(frame, stack_pointer);
err = PyObject_SetItem(LOCALS(), &_Py_ID(__annotations__),
ann_dict);
Py_DECREF(ann_dict);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
}
else {
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_DECREF(ann_dict);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DICT_UPDATE_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef update;
_PyStackRef dict;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
update = _stack_item_0;
dict = stack_pointer[-1 - (oparg - 1)];
PyObject *dict_o = PyStackRef_AsPyObjectBorrow(dict);
PyObject *update_o = PyStackRef_AsPyObjectBorrow(update);
stack_pointer[0] = update;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = PyDict_Update(dict_o, update_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
_PyFrame_SetStackPointer(frame, stack_pointer);
int matches = _PyErr_ExceptionMatches(tstate, PyExc_AttributeError);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (matches) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_Format(tstate, PyExc_TypeError,
"'%.200s' object is not a mapping",
Py_TYPE(update_o)->tp_name);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(update);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(update);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DICT_MERGE_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef update;
_PyStackRef dict;
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
update = _stack_item_0;
dict = stack_pointer[-1 - (oparg - 1)];
callable = stack_pointer[-4 - (oparg - 1)];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyObject *dict_o = PyStackRef_AsPyObjectBorrow(dict);
PyObject *update_o = PyStackRef_AsPyObjectBorrow(update);
stack_pointer[0] = update;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _PyDict_MergeEx(dict_o, update_o, 2);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FormatKwargsError(tstate, callable_o, update_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(update);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(update);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MAP_ADD_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef key;
_PyStackRef dict_st;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
value = _stack_item_1;
key = _stack_item_0;
dict_st = stack_pointer[-1 - (oparg - 1)];
PyObject *dict = PyStackRef_AsPyObjectBorrow(dict_st);
assert(PyDict_CheckExact(dict));
stack_pointer[0] = key;
stack_pointer[1] = value;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _PyDict_SetItem_Take2(
(PyDictObject *)dict,
PyStackRef_AsPyObjectSteal(key),
PyStackRef_AsPyObjectSteal(value)
);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err != 0) {
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SUPER_ATTR_ATTR_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_st;
_PyStackRef class_st;
_PyStackRef global_super_st;
_PyStackRef attr_st;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
self_st = _stack_item_2;
class_st = _stack_item_1;
global_super_st = _stack_item_0;
PyObject *global_super = PyStackRef_AsPyObjectBorrow(global_super_st);
PyObject *class = PyStackRef_AsPyObjectBorrow(class_st);
PyObject *self = PyStackRef_AsPyObjectBorrow(self_st);
assert(!(oparg & 1));
if (global_super != (PyObject *)&PySuper_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = self_st;
_tos_cache1 = class_st;
_tos_cache0 = global_super_st;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
if (!PyType_Check(class)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = self_st;
_tos_cache1 = class_st;
_tos_cache0 = global_super_st;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(LOAD_SUPER_ATTR, hit);
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 2);
stack_pointer[0] = global_super_st;
stack_pointer[1] = class_st;
stack_pointer[2] = self_st;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *attr = _PySuper_Lookup((PyTypeObject *)class, self, name, NULL);
_PyStackRef tmp = self_st;
self_st = PyStackRef_NULL;
stack_pointer[-1] = self_st;
PyStackRef_CLOSE(tmp);
tmp = class_st;
class_st = PyStackRef_NULL;
stack_pointer[-2] = class_st;
PyStackRef_CLOSE(tmp);
tmp = global_super_st;
global_super_st = PyStackRef_NULL;
stack_pointer[-3] = global_super_st;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (attr == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
attr_st = PyStackRef_FromPyObjectSteal(attr);
_tos_cache0 = attr_st;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SUPER_ATTR_METHOD_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_st;
_PyStackRef class_st;
_PyStackRef global_super_st;
_PyStackRef attr;
_PyStackRef self_or_null;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
self_st = _stack_item_2;
class_st = _stack_item_1;
global_super_st = _stack_item_0;
PyObject *global_super = PyStackRef_AsPyObjectBorrow(global_super_st);
PyObject *class = PyStackRef_AsPyObjectBorrow(class_st);
PyObject *self = PyStackRef_AsPyObjectBorrow(self_st);
assert(oparg & 1);
if (global_super != (PyObject *)&PySuper_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = self_st;
_tos_cache1 = class_st;
_tos_cache0 = global_super_st;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
if (!PyType_Check(class)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = self_st;
_tos_cache1 = class_st;
_tos_cache0 = global_super_st;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(LOAD_SUPER_ATTR, hit);
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 2);
PyTypeObject *cls = (PyTypeObject *)class;
int method_found = 0;
PyObject *attr_o;
{
int *method_found_ptr = &method_found;
stack_pointer[0] = global_super_st;
stack_pointer[1] = class_st;
stack_pointer[2] = self_st;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
attr_o = _PySuper_Lookup(cls, self, name,
Py_TYPE(self)->tp_getattro == PyObject_GenericGetAttr ? method_found_ptr : NULL);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
if (attr_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (method_found) {
self_or_null = self_st;
} else {
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(self_st);
stack_pointer = _PyFrame_GetStackPointer(frame);
self_or_null = PyStackRef_NULL;
stack_pointer += 1;
}
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = global_super_st;
global_super_st = self_or_null;
stack_pointer[-2] = global_super_st;
PyStackRef_CLOSE(tmp);
tmp = class_st;
class_st = PyStackRef_NULL;
stack_pointer[-1] = class_st;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
attr = PyStackRef_FromPyObjectSteal(attr_o);
_tos_cache1 = self_or_null;
_tos_cache0 = attr;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef *self_or_null;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
self_or_null = &stack_pointer[1];
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 1);
if (oparg & 1) {
_PyCStackRef method;
stack_pointer[0] = owner;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyThreadState_PushCStackRef(tstate, &method);
int is_meth = _PyObject_GetMethodStackRef(tstate, PyStackRef_AsPyObjectBorrow(owner), name, &method.ref);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (is_meth) {
assert(!PyStackRef_IsNull(method.ref));
self_or_null[0] = owner;
attr = _PyThreadState_PopCStackRefSteal(tstate, &method);
}
else {
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
self_or_null[0] = PyStackRef_NULL;
attr = _PyThreadState_PopCStackRefSteal(tstate, &method);
if (PyStackRef_IsNull(attr)) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
stack_pointer += 1;
}
}
else {
stack_pointer[0] = owner;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *attr_o = PyObject_GetAttr(PyStackRef_AsPyObjectBorrow(owner), name);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (attr_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
attr = PyStackRef_FromPyObjectSteal(attr_o);
stack_pointer += 1;
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[-1] = attr;
stack_pointer += (oparg&1);
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TYPE_VERSION_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
owner = stack_pointer[-1];
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyTypeObject *tp = Py_TYPE(PyStackRef_AsPyObjectBorrow(owner));
assert(type_version != 0);
if (FT_ATOMIC_LOAD_UINT_RELAXED(tp->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TYPE_VERSION_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyTypeObject *tp = Py_TYPE(PyStackRef_AsPyObjectBorrow(owner));
assert(type_version != 0);
if (FT_ATOMIC_LOAD_UINT_RELAXED(tp->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TYPE_VERSION_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyTypeObject *tp = Py_TYPE(PyStackRef_AsPyObjectBorrow(owner));
assert(type_version != 0);
if (FT_ATOMIC_LOAD_UINT_RELAXED(tp->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TYPE_VERSION_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
owner = _stack_item_2;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyTypeObject *tp = Py_TYPE(PyStackRef_AsPyObjectBorrow(owner));
assert(type_version != 0);
if (FT_ATOMIC_LOAD_UINT_RELAXED(tp->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TYPE_VERSION_AND_LOCK_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
owner = stack_pointer[-1];
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(type_version != 0);
if (!LOCK_OBJECT(owner_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyTypeObject *tp = Py_TYPE(owner_o);
if (FT_ATOMIC_LOAD_UINT_RELAXED(tp->tp_version_tag) != type_version) {
UNLOCK_OBJECT(owner_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TYPE_VERSION_AND_LOCK_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(type_version != 0);
if (!LOCK_OBJECT(owner_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
PyTypeObject *tp = Py_TYPE(owner_o);
if (FT_ATOMIC_LOAD_UINT_RELAXED(tp->tp_version_tag) != type_version) {
UNLOCK_OBJECT(owner_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TYPE_VERSION_AND_LOCK_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(type_version != 0);
if (!LOCK_OBJECT(owner_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
PyTypeObject *tp = Py_TYPE(owner_o);
if (FT_ATOMIC_LOAD_UINT_RELAXED(tp->tp_version_tag) != type_version) {
UNLOCK_OBJECT(owner_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TYPE_VERSION_AND_LOCK_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
owner = _stack_item_2;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(type_version != 0);
if (!LOCK_OBJECT(owner_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
PyTypeObject *tp = Py_TYPE(owner_o);
if (FT_ATOMIC_LOAD_UINT_RELAXED(tp->tp_version_tag) != type_version) {
UNLOCK_OBJECT(owner_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_MANAGED_OBJECT_HAS_VALUES_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
owner = stack_pointer[-1];
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_dictoffset < 0);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
if (!FT_ATOMIC_LOAD_UINT8(_PyObject_InlineValues(owner_o)->valid)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_MANAGED_OBJECT_HAS_VALUES_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_dictoffset < 0);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
if (!FT_ATOMIC_LOAD_UINT8(_PyObject_InlineValues(owner_o)->valid)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_MANAGED_OBJECT_HAS_VALUES_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_dictoffset < 0);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
if (!FT_ATOMIC_LOAD_UINT8(_PyObject_InlineValues(owner_o)->valid)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_MANAGED_OBJECT_HAS_VALUES_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
owner = _stack_item_2;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_dictoffset < 0);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
if (!FT_ATOMIC_LOAD_UINT8(_PyObject_InlineValues(owner_o)->valid)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_INSTANCE_VALUE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef o;
owner = stack_pointer[-1];
uint16_t offset = (uint16_t)CURRENT_OPERAND0_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
PyObject **value_ptr = (PyObject**)(((char *)owner_o) + offset);
PyObject *attr_o = FT_ATOMIC_LOAD_PTR_ACQUIRE(*value_ptr);
if (attr_o == NULL) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
int increfed = _Py_TryIncrefCompareStackRef(value_ptr, attr_o, &attr);
if (!increfed) {
if (true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
#else
attr = PyStackRef_FromPyObjectNew(attr_o);
#endif
STAT_INC(LOAD_ATTR, hit);
o = owner;
_tos_cache1 = o;
_tos_cache0 = attr;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_INSTANCE_VALUE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef o;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
uint16_t offset = (uint16_t)CURRENT_OPERAND0_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
PyObject **value_ptr = (PyObject**)(((char *)owner_o) + offset);
PyObject *attr_o = FT_ATOMIC_LOAD_PTR_ACQUIRE(*value_ptr);
if (attr_o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
int increfed = _Py_TryIncrefCompareStackRef(value_ptr, attr_o, &attr);
if (!increfed) {
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
#else
attr = PyStackRef_FromPyObjectNew(attr_o);
#endif
STAT_INC(LOAD_ATTR, hit);
o = owner;
_tos_cache1 = o;
_tos_cache0 = attr;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_INSTANCE_VALUE_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef o;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
uint16_t offset = (uint16_t)CURRENT_OPERAND0_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
PyObject **value_ptr = (PyObject**)(((char *)owner_o) + offset);
PyObject *attr_o = FT_ATOMIC_LOAD_PTR_ACQUIRE(*value_ptr);
if (attr_o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
int increfed = _Py_TryIncrefCompareStackRef(value_ptr, attr_o, &attr);
if (!increfed) {
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
#else
attr = PyStackRef_FromPyObjectNew(attr_o);
#endif
STAT_INC(LOAD_ATTR, hit);
o = owner;
_tos_cache2 = o;
_tos_cache1 = attr;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_MODULE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
uint32_t dict_version = (uint32_t)CURRENT_OPERAND0_32();
uint16_t index = (uint16_t)CURRENT_OPERAND1_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
if (Py_TYPE(owner_o)->tp_getattro != PyModule_Type.tp_getattro) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner_o)->md_dict;
assert(dict != NULL);
PyDictKeysObject *keys = FT_ATOMIC_LOAD_PTR_ACQUIRE(dict->ma_keys);
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != dict_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
assert(keys->dk_kind == DICT_KEYS_UNICODE);
assert(index < FT_ATOMIC_LOAD_SSIZE_RELAXED(keys->dk_nentries));
PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(keys) + index;
PyObject *attr_o = FT_ATOMIC_LOAD_PTR_RELAXED(ep->me_value);
if (attr_o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
int increfed = _Py_TryIncrefCompareStackRef(&ep->me_value, attr_o, &attr);
if (!increfed) {
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
#else
attr = PyStackRef_FromPyObjectNew(attr_o);
#endif
STAT_INC(LOAD_ATTR, hit);
stack_pointer[0] = attr;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = attr;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_WITH_HINT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef o;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
uint16_t hint = (uint16_t)CURRENT_OPERAND0_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_MANAGED_DICT);
PyDictObject *dict = _PyObject_GetManagedDict(owner_o);
if (dict == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
PyDictKeysObject *dk = FT_ATOMIC_LOAD_PTR(dict->ma_keys);
assert(PyDict_CheckExact((PyObject *)dict));
#ifdef Py_GIL_DISABLED
if (!_Py_IsOwnedByCurrentThread((PyObject *)dict) && !_PyObject_GC_IS_SHARED(dict)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#endif
PyObject *attr_o;
if (hint >= (size_t)FT_ATOMIC_LOAD_SSIZE_RELAXED(dk->dk_nentries)) {
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
if (dk->dk_kind != DICT_KEYS_UNICODE) {
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dk) + hint;
if (FT_ATOMIC_LOAD_PTR_RELAXED(ep->me_key) != name) {
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
attr_o = FT_ATOMIC_LOAD_PTR(ep->me_value);
if (attr_o == NULL) {
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
STAT_INC(LOAD_ATTR, hit);
#ifdef Py_GIL_DISABLED
int increfed = _Py_TryIncrefCompareStackRef(&ep->me_value, attr_o, &attr);
if (!increfed) {
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
#else
attr = PyStackRef_FromPyObjectNew(attr_o);
#endif
o = owner;
_tos_cache1 = o;
_tos_cache0 = attr;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_SLOT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
uint16_t index = (uint16_t)CURRENT_OPERAND0_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
PyObject **addr = (PyObject **)((char *)owner_o + index);
PyObject *attr_o = FT_ATOMIC_LOAD_PTR(*addr);
if (attr_o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
int increfed = _Py_TryIncrefCompareStackRef(addr, attr_o, &attr);
if (!increfed) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#else
attr = PyStackRef_FromPyObjectNew(attr_o);
#endif
STAT_INC(LOAD_ATTR, hit);
stack_pointer[0] = owner;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = owner;
owner = attr;
stack_pointer[-1] = owner;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = attr;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_ATTR_CLASS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
owner = stack_pointer[-1];
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
if (!PyType_Check(owner_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
assert(type_version != 0);
if (FT_ATOMIC_LOAD_UINT_RELAXED(((PyTypeObject *)owner_o)->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_ATTR_CLASS_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
if (!PyType_Check(owner_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
assert(type_version != 0);
if (FT_ATOMIC_LOAD_UINT_RELAXED(((PyTypeObject *)owner_o)->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_ATTR_CLASS_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
if (!PyType_Check(owner_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
assert(type_version != 0);
if (FT_ATOMIC_LOAD_UINT_RELAXED(((PyTypeObject *)owner_o)->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_ATTR_CLASS_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
owner = _stack_item_2;
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
if (!PyType_Check(owner_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
assert(type_version != 0);
if (FT_ATOMIC_LOAD_UINT_RELAXED(((PyTypeObject *)owner_o)->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_CLASS_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
attr = PyStackRef_FromPyObjectNew(descr);
stack_pointer[0] = owner;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = owner;
owner = attr;
stack_pointer[-1] = owner;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = attr;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_PROPERTY_FRAME_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef new_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
PyObject *fget = (PyObject *)CURRENT_OPERAND0_64();
assert((oparg & 1) == 0);
assert(Py_IS_TYPE(fget, &PyFunction_Type));
PyFunctionObject *f = (PyFunctionObject *)fget;
PyCodeObject *code = (PyCodeObject *)f->func_code;
if ((code->co_flags & (CO_VARKEYWORDS | CO_VARARGS | CO_OPTIMIZED)) != CO_OPTIMIZED) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (code->co_kwonlyargcount) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (code->co_argcount != 1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (!_PyThreadState_HasStackSpace(tstate, code->co_framesize)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(LOAD_ATTR, hit);
_PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, PyStackRef_FromPyObjectNew(fget), 1, frame);
pushed_frame->localsplus[0] = owner;
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN is not a viable micro-op for tier 2 because it has too many cache entries */
case _GUARD_DORV_NO_DICT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
owner = stack_pointer[-1];
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_dictoffset < 0);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
if (_PyObject_GetManagedDict(owner_o) ||
!FT_ATOMIC_LOAD_UINT8(_PyObject_InlineValues(owner_o)->valid)) {
UNLOCK_OBJECT(owner_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_DORV_NO_DICT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_dictoffset < 0);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
if (_PyObject_GetManagedDict(owner_o) ||
!FT_ATOMIC_LOAD_UINT8(_PyObject_InlineValues(owner_o)->valid)) {
UNLOCK_OBJECT(owner_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_DORV_NO_DICT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_dictoffset < 0);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
if (_PyObject_GetManagedDict(owner_o) ||
!FT_ATOMIC_LOAD_UINT8(_PyObject_InlineValues(owner_o)->valid)) {
UNLOCK_OBJECT(owner_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_DORV_NO_DICT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
owner = _stack_item_2;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_dictoffset < 0);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
if (_PyObject_GetManagedDict(owner_o) ||
!FT_ATOMIC_LOAD_UINT8(_PyObject_InlineValues(owner_o)->valid)) {
UNLOCK_OBJECT(owner_o);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_ATTR_INSTANCE_VALUE_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef value;
_PyStackRef o;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
value = _stack_item_0;
uint16_t offset = (uint16_t)CURRENT_OPERAND0_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
STAT_INC(STORE_ATTR, hit);
assert(_PyObject_GetManagedDict(owner_o) == NULL);
PyObject **value_ptr = (PyObject**)(((char *)owner_o) + offset);
PyObject *old_value = *value_ptr;
FT_ATOMIC_STORE_PTR_RELEASE(*value_ptr, PyStackRef_AsPyObjectSteal(value));
if (old_value == NULL) {
PyDictValues *values = _PyObject_InlineValues(owner_o);
Py_ssize_t index = value_ptr - values->values;
_PyDictValues_AddToInsertionOrder(values, index);
}
UNLOCK_OBJECT(owner_o);
o = owner;
stack_pointer[0] = o;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_XDECREF(old_value);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = o;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_ATTR_WITH_HINT_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef value;
_PyStackRef o;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
owner = _stack_item_1;
value = _stack_item_0;
uint16_t hint = (uint16_t)CURRENT_OPERAND0_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_MANAGED_DICT);
PyDictObject *dict = _PyObject_GetManagedDict(owner_o);
if (dict == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
if (!LOCK_OBJECT(dict)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
assert(PyDict_CheckExact((PyObject *)dict));
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
if (hint >= (size_t)dict->ma_keys->dk_nentries ||
!DK_IS_UNICODE(dict->ma_keys)) {
UNLOCK_OBJECT(dict);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + hint;
if (ep->me_key != name) {
UNLOCK_OBJECT(dict);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
PyObject *old_value = ep->me_value;
if (old_value == NULL) {
UNLOCK_OBJECT(dict);
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
stack_pointer[0] = value;
stack_pointer[1] = owner;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyDict_NotifyEvent(PyDict_EVENT_MODIFIED, dict, name, PyStackRef_AsPyObjectBorrow(value));
stack_pointer = _PyFrame_GetStackPointer(frame);
FT_ATOMIC_STORE_PTR_RELEASE(ep->me_value, PyStackRef_AsPyObjectSteal(value));
UNLOCK_OBJECT(dict);
STAT_INC(STORE_ATTR, hit);
o = owner;
stack_pointer[-2] = o;
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_XDECREF(old_value);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = o;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _STORE_ATTR_SLOT_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef value;
_PyStackRef o;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
value = _stack_item_0;
uint16_t index = (uint16_t)CURRENT_OPERAND0_16();
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
if (!LOCK_OBJECT(owner_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
char *addr = (char *)owner_o + index;
STAT_INC(STORE_ATTR, hit);
PyObject *old_value = *(PyObject **)addr;
FT_ATOMIC_STORE_PTR_RELEASE(*(PyObject **)addr, PyStackRef_AsPyObjectSteal(value));
UNLOCK_OBJECT(owner_o);
o = owner;
stack_pointer[0] = o;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_XDECREF(old_value);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = o;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COMPARE_OP_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert((oparg >> 5) <= Py_GE);
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = PyObject_RichCompare(left_o, right_o, oparg >> 5);
_PyStackRef tmp = right;
right = PyStackRef_NULL;
stack_pointer[-1] = right;
PyStackRef_CLOSE(tmp);
tmp = left;
left = PyStackRef_NULL;
stack_pointer[-2] = left;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (oparg & 16) {
_PyFrame_SetStackPointer(frame, stack_pointer);
int res_bool = PyObject_IsTrue(res_o);
Py_DECREF(res_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_bool < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = res_bool ? PyStackRef_True : PyStackRef_False;
}
else {
res = PyStackRef_FromPyObjectSteal(res_o);
}
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COMPARE_OP_FLOAT_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
oparg = CURRENT_OPARG();
right = stack_pointer[-1];
left = stack_pointer[-2];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
STAT_INC(COMPARE_OP, hit);
double dleft = PyFloat_AS_DOUBLE(left_o);
double dright = PyFloat_AS_DOUBLE(right_o);
int sign_ish = COMPARISON_BIT(dleft, dright);
l = left;
r = right;
res = (sign_ish & oparg) ? PyStackRef_True : PyStackRef_False;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COMPARE_OP_FLOAT_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
right = _stack_item_0;
left = stack_pointer[-1];
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
STAT_INC(COMPARE_OP, hit);
double dleft = PyFloat_AS_DOUBLE(left_o);
double dright = PyFloat_AS_DOUBLE(right_o);
int sign_ish = COMPARISON_BIT(dleft, dright);
l = left;
r = right;
res = (sign_ish & oparg) ? PyStackRef_True : PyStackRef_False;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COMPARE_OP_FLOAT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
STAT_INC(COMPARE_OP, hit);
double dleft = PyFloat_AS_DOUBLE(left_o);
double dright = PyFloat_AS_DOUBLE(right_o);
int sign_ish = COMPARISON_BIT(dleft, dright);
l = left;
r = right;
res = (sign_ish & oparg) ? PyStackRef_True : PyStackRef_False;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COMPARE_OP_INT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(_PyLong_IsCompact((PyLongObject *)left_o));
assert(_PyLong_IsCompact((PyLongObject *)right_o));
STAT_INC(COMPARE_OP, hit);
assert(_PyLong_DigitCount((PyLongObject *)left_o) <= 1 &&
_PyLong_DigitCount((PyLongObject *)right_o) <= 1);
Py_ssize_t ileft = _PyLong_CompactValue((PyLongObject *)left_o);
Py_ssize_t iright = _PyLong_CompactValue((PyLongObject *)right_o);
int sign_ish = COMPARISON_BIT(ileft, iright);
l = left;
r = right;
res = (sign_ish & oparg) ? PyStackRef_True : PyStackRef_False;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COMPARE_OP_STR_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef res;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
STAT_INC(COMPARE_OP, hit);
int eq = _PyUnicode_Equal(left_o, right_o);
assert((oparg >> 5) == Py_EQ || (oparg >> 5) == Py_NE);
l = left;
r = right;
assert(eq == 0 || eq == 1);
assert((oparg & 0xf) == COMPARISON_NOT_EQUALS || (oparg & 0xf) == COMPARISON_EQUALS);
assert(COMPARISON_NOT_EQUALS + 1 == COMPARISON_EQUALS);
res = ((COMPARISON_NOT_EQUALS + eq) & oparg) ? PyStackRef_True : PyStackRef_False;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _IS_OP_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef b;
_PyStackRef l;
_PyStackRef r;
oparg = CURRENT_OPARG();
right = stack_pointer[-1];
left = stack_pointer[-2];
int res = Py_Is(PyStackRef_AsPyObjectBorrow(left), PyStackRef_AsPyObjectBorrow(right)) ^ oparg;
b = res ? PyStackRef_True : PyStackRef_False;
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = b;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _IS_OP_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef b;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
right = _stack_item_0;
left = stack_pointer[-1];
int res = Py_Is(PyStackRef_AsPyObjectBorrow(left), PyStackRef_AsPyObjectBorrow(right)) ^ oparg;
b = res ? PyStackRef_True : PyStackRef_False;
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = b;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _IS_OP_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef b;
_PyStackRef l;
_PyStackRef r;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
right = _stack_item_1;
left = _stack_item_0;
int res = Py_Is(PyStackRef_AsPyObjectBorrow(left), PyStackRef_AsPyObjectBorrow(right)) ^ oparg;
b = res ? PyStackRef_True : PyStackRef_False;
l = left;
r = right;
_tos_cache2 = r;
_tos_cache1 = l;
_tos_cache0 = b;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CONTAINS_OP_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef b;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int res = PySequence_Contains(right_o, left_o);
_PyStackRef tmp = right;
right = PyStackRef_NULL;
stack_pointer[-1] = right;
PyStackRef_CLOSE(tmp);
tmp = left;
left = PyStackRef_NULL;
stack_pointer[-2] = left;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
b = (res ^ oparg) ? PyStackRef_True : PyStackRef_False;
_tos_cache0 = b;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_ANY_SET_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
tos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyAnySet_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_ANY_SET_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
tos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyAnySet_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = tos;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_ANY_SET_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
tos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyAnySet_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = tos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_TOS_ANY_SET_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef tos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
tos = _stack_item_2;
PyObject *o = PyStackRef_AsPyObjectBorrow(tos);
if (!PyAnySet_CheckExact(o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = tos;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CONTAINS_OP_SET_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef b;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyAnySet_CheckExact(right_o));
STAT_INC(CONTAINS_OP, hit);
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int res = _PySet_Contains((PySetObject *)right_o, left_o);
_PyStackRef tmp = right;
right = PyStackRef_NULL;
stack_pointer[-1] = right;
PyStackRef_CLOSE(tmp);
tmp = left;
left = PyStackRef_NULL;
stack_pointer[-2] = left;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
b = (res ^ oparg) ? PyStackRef_True : PyStackRef_False;
_tos_cache0 = b;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CONTAINS_OP_DICT_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef b;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyDict_CheckExact(right_o));
STAT_INC(CONTAINS_OP, hit);
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int res = PyDict_Contains(right_o, left_o);
_PyStackRef tmp = right;
right = PyStackRef_NULL;
stack_pointer[-1] = right;
PyStackRef_CLOSE(tmp);
tmp = left;
left = PyStackRef_NULL;
stack_pointer[-2] = left;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
b = (res ^ oparg) ? PyStackRef_True : PyStackRef_False;
_tos_cache0 = b;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_EG_MATCH_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef match_type_st;
_PyStackRef exc_value_st;
_PyStackRef rest;
_PyStackRef match;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
match_type_st = _stack_item_1;
exc_value_st = _stack_item_0;
PyObject *exc_value = PyStackRef_AsPyObjectBorrow(exc_value_st);
PyObject *match_type = PyStackRef_AsPyObjectBorrow(match_type_st);
stack_pointer[0] = exc_value_st;
stack_pointer[1] = match_type_st;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _PyEval_CheckExceptStarTypeValid(tstate, match_type);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = match_type_st;
match_type_st = PyStackRef_NULL;
stack_pointer[-1] = match_type_st;
PyStackRef_CLOSE(tmp);
tmp = exc_value_st;
exc_value_st = PyStackRef_NULL;
stack_pointer[-2] = exc_value_st;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
PyObject *match_o = NULL;
PyObject *rest_o = NULL;
_PyFrame_SetStackPointer(frame, stack_pointer);
int res = _PyEval_ExceptionGroupMatch(frame, exc_value, match_type,
&match_o, &rest_o);
_PyStackRef tmp = match_type_st;
match_type_st = PyStackRef_NULL;
stack_pointer[-1] = match_type_st;
PyStackRef_CLOSE(tmp);
tmp = exc_value_st;
exc_value_st = PyStackRef_NULL;
stack_pointer[-2] = exc_value_st;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
assert((match_o == NULL) == (rest_o == NULL));
if (match_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (!Py_IsNone(match_o)) {
_PyFrame_SetStackPointer(frame, stack_pointer);
PyErr_SetHandledException(match_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
rest = PyStackRef_FromPyObjectSteal(rest_o);
match = PyStackRef_FromPyObjectSteal(match_o);
_tos_cache1 = match;
_tos_cache0 = rest;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_EXC_MATCH_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef right;
_PyStackRef left;
_PyStackRef b;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
right = _stack_item_1;
left = _stack_item_0;
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
assert(PyExceptionInstance_Check(left_o));
stack_pointer[0] = left;
stack_pointer[1] = right;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _PyEval_CheckExceptTypeValid(tstate, right_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyFrame_SetStackPointer(frame, stack_pointer);
int res = PyErr_GivenExceptionMatches(left_o, right_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(right);
stack_pointer = _PyFrame_GetStackPointer(frame);
b = res ? PyStackRef_True : PyStackRef_False;
_tos_cache1 = b;
_tos_cache0 = left;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _IMPORT_NAME_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef fromlist;
_PyStackRef level;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
fromlist = _stack_item_1;
level = _stack_item_0;
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
stack_pointer[0] = level;
stack_pointer[1] = fromlist;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyEval_ImportName(tstate, frame, name,
PyStackRef_AsPyObjectBorrow(fromlist),
PyStackRef_AsPyObjectBorrow(level));
_PyStackRef tmp = fromlist;
fromlist = PyStackRef_NULL;
stack_pointer[-1] = fromlist;
PyStackRef_CLOSE(tmp);
tmp = level;
level = PyStackRef_NULL;
stack_pointer[-2] = level;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _IMPORT_FROM_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef from;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
from = _stack_item_0;
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
stack_pointer[0] = from;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyEval_ImportFrom(tstate, PyStackRef_AsPyObjectBorrow(from), name);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache1 = res;
_tos_cache0 = from;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _POP_JUMP_IF_FALSE is not a viable micro-op for tier 2 because it is replaced */
/* _POP_JUMP_IF_TRUE is not a viable micro-op for tier 2 because it is replaced */
case _IS_NONE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef b;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
if (PyStackRef_IsNone(value)) {
b = PyStackRef_True;
}
else {
b = PyStackRef_False;
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = value;
value = b;
stack_pointer[-1] = value;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
}
_tos_cache0 = b;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
/* _JUMP_BACKWARD_NO_INTERRUPT is not a viable micro-op for tier 2 because it is replaced */
case _GET_LEN_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef obj;
_PyStackRef len;
_PyStackRef _stack_item_0 = _tos_cache0;
obj = _stack_item_0;
stack_pointer[0] = obj;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_ssize_t len_i = PyObject_Length(PyStackRef_AsPyObjectBorrow(obj));
stack_pointer = _PyFrame_GetStackPointer(frame);
if (len_i < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
PyObject *len_o = PyLong_FromSsize_t(len_i);
if (len_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
len = PyStackRef_FromPyObjectSteal(len_o);
_tos_cache1 = len;
_tos_cache0 = obj;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MATCH_CLASS_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef names;
_PyStackRef type;
_PyStackRef subject;
_PyStackRef attrs;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
names = _stack_item_2;
type = _stack_item_1;
subject = _stack_item_0;
assert(PyTuple_CheckExact(PyStackRef_AsPyObjectBorrow(names)));
stack_pointer[0] = subject;
stack_pointer[1] = type;
stack_pointer[2] = names;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *attrs_o = _PyEval_MatchClass(tstate,
PyStackRef_AsPyObjectBorrow(subject),
PyStackRef_AsPyObjectBorrow(type), oparg,
PyStackRef_AsPyObjectBorrow(names));
_PyStackRef tmp = names;
names = PyStackRef_NULL;
stack_pointer[-1] = names;
PyStackRef_CLOSE(tmp);
tmp = type;
type = PyStackRef_NULL;
stack_pointer[-2] = type;
PyStackRef_CLOSE(tmp);
tmp = subject;
subject = PyStackRef_NULL;
stack_pointer[-3] = subject;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (attrs_o) {
assert(PyTuple_CheckExact(attrs_o));
attrs = PyStackRef_FromPyObjectSteal(attrs_o);
}
else {
if (_PyErr_Occurred(tstate)) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
attrs = PyStackRef_None;
}
_tos_cache0 = attrs;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MATCH_MAPPING_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef subject;
_PyStackRef res;
subject = stack_pointer[-1];
int match = PyStackRef_TYPE(subject)->tp_flags & Py_TPFLAGS_MAPPING;
res = match ? PyStackRef_True : PyStackRef_False;
_tos_cache1 = res;
_tos_cache0 = subject;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MATCH_MAPPING_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef subject;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
subject = _stack_item_0;
int match = PyStackRef_TYPE(subject)->tp_flags & Py_TPFLAGS_MAPPING;
res = match ? PyStackRef_True : PyStackRef_False;
_tos_cache1 = res;
_tos_cache0 = subject;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MATCH_MAPPING_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef subject;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
subject = _stack_item_1;
int match = PyStackRef_TYPE(subject)->tp_flags & Py_TPFLAGS_MAPPING;
res = match ? PyStackRef_True : PyStackRef_False;
_tos_cache2 = res;
_tos_cache1 = subject;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MATCH_SEQUENCE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef subject;
_PyStackRef res;
subject = stack_pointer[-1];
int match = PyStackRef_TYPE(subject)->tp_flags & Py_TPFLAGS_SEQUENCE;
res = match ? PyStackRef_True : PyStackRef_False;
_tos_cache1 = res;
_tos_cache0 = subject;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MATCH_SEQUENCE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef subject;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
subject = _stack_item_0;
int match = PyStackRef_TYPE(subject)->tp_flags & Py_TPFLAGS_SEQUENCE;
res = match ? PyStackRef_True : PyStackRef_False;
_tos_cache1 = res;
_tos_cache0 = subject;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MATCH_SEQUENCE_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef subject;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
subject = _stack_item_1;
int match = PyStackRef_TYPE(subject)->tp_flags & Py_TPFLAGS_SEQUENCE;
res = match ? PyStackRef_True : PyStackRef_False;
_tos_cache2 = res;
_tos_cache1 = subject;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MATCH_KEYS_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef keys;
_PyStackRef subject;
_PyStackRef values_or_none;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
keys = _stack_item_1;
subject = _stack_item_0;
stack_pointer[0] = subject;
stack_pointer[1] = keys;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *values_or_none_o = _PyEval_MatchKeys(tstate,
PyStackRef_AsPyObjectBorrow(subject), PyStackRef_AsPyObjectBorrow(keys));
stack_pointer = _PyFrame_GetStackPointer(frame);
if (values_or_none_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
values_or_none = PyStackRef_FromPyObjectSteal(values_or_none_o);
_tos_cache2 = values_or_none;
_tos_cache1 = keys;
_tos_cache0 = subject;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GET_ITER_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iterable;
_PyStackRef iter;
_PyStackRef index_or_null;
_PyStackRef _stack_item_0 = _tos_cache0;
iterable = _stack_item_0;
#ifdef Py_STATS
stack_pointer[0] = iterable;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_Py_GatherStats_GetIter(iterable);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
#endif
PyTypeObject *tp = PyStackRef_TYPE(iterable);
if (tp == &PyTuple_Type || tp == &PyList_Type) {
iter = iterable;
index_or_null = PyStackRef_TagInt(0);
}
else {
stack_pointer[0] = iterable;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *iter_o = PyObject_GetIter(PyStackRef_AsPyObjectBorrow(iterable));
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(iterable);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (iter_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
iter = PyStackRef_FromPyObjectSteal(iter_o);
index_or_null = PyStackRef_NULL;
}
_tos_cache1 = index_or_null;
_tos_cache0 = iter;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GET_YIELD_FROM_ITER_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iterable;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
iterable = _stack_item_0;
PyObject *iterable_o = PyStackRef_AsPyObjectBorrow(iterable);
if (PyCoro_CheckExact(iterable_o)) {
if (!(_PyFrame_GetCode(frame)->co_flags & (CO_COROUTINE | CO_ITERABLE_COROUTINE))) {
stack_pointer[0] = iterable;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_SetString(tstate, PyExc_TypeError,
"cannot 'yield from' a coroutine object "
"in a non-coroutine generator");
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
iter = iterable;
}
else if (PyGen_CheckExact(iterable_o)) {
iter = iterable;
}
else {
stack_pointer[0] = iterable;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *iter_o = PyObject_GetIter(iterable_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (iter_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
iter = PyStackRef_FromPyObjectSteal(iter_o);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = iterable;
iterable = iter;
stack_pointer[-1] = iterable;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
}
_tos_cache0 = iter;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _FOR_ITER is not a viable micro-op for tier 2 because it is replaced */
case _FOR_ITER_TIER_TWO_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef next;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null_or_index = _stack_item_1;
iter = _stack_item_0;
stack_pointer[0] = iter;
stack_pointer[1] = null_or_index;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (!PyStackRef_IsValid(item)) {
if (PyStackRef_IsError(item)) {
stack_pointer[-1] = null_or_index;
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
JUMP_TO_JUMP_TARGET();
}
}
next = item;
_tos_cache2 = next;
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _INSTRUMENTED_FOR_ITER is not a viable micro-op for tier 2 because it is instrumented */
case _ITER_CHECK_LIST_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
null_or_index = stack_pointer[-1];
iter = stack_pointer[-2];
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyList_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
assert(PyStackRef_IsTaggedInt(null_or_index));
#ifdef Py_GIL_DISABLED
if (!_Py_IsOwnedByCurrentThread(iter_o) && !_PyObject_GC_IS_SHARED(iter_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_LIST_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
null_or_index = _stack_item_0;
iter = stack_pointer[-1];
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyList_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = null_or_index;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
assert(PyStackRef_IsTaggedInt(null_or_index));
#ifdef Py_GIL_DISABLED
if (!_Py_IsOwnedByCurrentThread(iter_o) && !_PyObject_GC_IS_SHARED(iter_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = null_or_index;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_LIST_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null_or_index = _stack_item_1;
iter = _stack_item_0;
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyList_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
assert(PyStackRef_IsTaggedInt(null_or_index));
#ifdef Py_GIL_DISABLED
if (!_Py_IsOwnedByCurrentThread(iter_o) && !_PyObject_GC_IS_SHARED(iter_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_LIST_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
null_or_index = _stack_item_2;
iter = _stack_item_1;
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyList_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
assert(PyStackRef_IsTaggedInt(null_or_index));
#ifdef Py_GIL_DISABLED
if (!_Py_IsOwnedByCurrentThread(iter_o) && !_PyObject_GC_IS_SHARED(iter_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _ITER_JUMP_LIST is not a viable micro-op for tier 2 because it is replaced */
case _GUARD_NOT_EXHAUSTED_LIST_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
null_or_index = stack_pointer[-1];
iter = stack_pointer[-2];
#ifndef Py_GIL_DISABLED
PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(list_o) == &PyList_Type);
if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyList_GET_SIZE(list_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_LIST_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
null_or_index = _stack_item_0;
iter = stack_pointer[-1];
#ifndef Py_GIL_DISABLED
PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(list_o) == &PyList_Type);
if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyList_GET_SIZE(list_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = null_or_index;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_LIST_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null_or_index = _stack_item_1;
iter = _stack_item_0;
#ifndef Py_GIL_DISABLED
PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(list_o) == &PyList_Type);
if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyList_GET_SIZE(list_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_LIST_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
null_or_index = _stack_item_2;
iter = _stack_item_1;
#ifndef Py_GIL_DISABLED
PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(list_o) == &PyList_Type);
if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyList_GET_SIZE(list_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _ITER_NEXT_LIST is not a viable micro-op for tier 2 because it is replaced */
case _ITER_NEXT_LIST_TIER_TWO_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef next;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null_or_index = _stack_item_1;
iter = _stack_item_0;
PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
assert(PyList_CheckExact(list_o));
#ifdef Py_GIL_DISABLED
assert(_Py_IsOwnedByCurrentThread((PyObject *)list_o) ||
_PyObject_GC_IS_SHARED(list_o));
STAT_INC(FOR_ITER, hit);
stack_pointer[0] = iter;
stack_pointer[1] = null_or_index;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int result = _PyList_GetItemRefNoLock((PyListObject *)list_o, PyStackRef_UntagInt(null_or_index), &next);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (result <= 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
JUMP_TO_JUMP_TARGET();
}
#else
assert(PyStackRef_UntagInt(null_or_index) < PyList_GET_SIZE(list_o));
next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(list_o, PyStackRef_UntagInt(null_or_index)));
stack_pointer += 2;
#endif
null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
_tos_cache2 = next;
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_TUPLE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
null_or_index = stack_pointer[-1];
iter = stack_pointer[-2];
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
assert(PyStackRef_IsTaggedInt(null_or_index));
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_TUPLE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
null_or_index = _stack_item_0;
iter = stack_pointer[-1];
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = null_or_index;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
assert(PyStackRef_IsTaggedInt(null_or_index));
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_TUPLE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null_or_index = _stack_item_1;
iter = _stack_item_0;
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
assert(PyStackRef_IsTaggedInt(null_or_index));
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_TUPLE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
null_or_index = _stack_item_2;
iter = _stack_item_1;
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
assert(PyStackRef_IsTaggedInt(null_or_index));
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _ITER_JUMP_TUPLE is not a viable micro-op for tier 2 because it is replaced */
case _GUARD_NOT_EXHAUSTED_TUPLE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
null_or_index = stack_pointer[-1];
iter = stack_pointer[-2];
PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(tuple_o) == &PyTuple_Type);
if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyTuple_GET_SIZE(tuple_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_TUPLE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
null_or_index = _stack_item_0;
iter = stack_pointer[-1];
PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(tuple_o) == &PyTuple_Type);
if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyTuple_GET_SIZE(tuple_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = null_or_index;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_TUPLE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null_or_index = _stack_item_1;
iter = _stack_item_0;
PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(tuple_o) == &PyTuple_Type);
if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyTuple_GET_SIZE(tuple_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_TUPLE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
null_or_index = _stack_item_2;
iter = _stack_item_1;
PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(tuple_o) == &PyTuple_Type);
if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyTuple_GET_SIZE(tuple_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = null_or_index;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_NEXT_TUPLE_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef next;
null_or_index = stack_pointer[-1];
iter = stack_pointer[-2];
PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(tuple_o) == &PyTuple_Type);
uintptr_t i = PyStackRef_UntagInt(null_or_index);
assert((size_t)i < (size_t)PyTuple_GET_SIZE(tuple_o));
next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(tuple_o, i));
null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
_tos_cache2 = next;
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_NEXT_TUPLE_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef next;
_PyStackRef _stack_item_0 = _tos_cache0;
null_or_index = _stack_item_0;
iter = stack_pointer[-1];
PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(tuple_o) == &PyTuple_Type);
uintptr_t i = PyStackRef_UntagInt(null_or_index);
assert((size_t)i < (size_t)PyTuple_GET_SIZE(tuple_o));
next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(tuple_o, i));
null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
_tos_cache2 = next;
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_NEXT_TUPLE_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef next;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null_or_index = _stack_item_1;
iter = _stack_item_0;
PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(tuple_o) == &PyTuple_Type);
uintptr_t i = PyStackRef_UntagInt(null_or_index);
assert((size_t)i < (size_t)PyTuple_GET_SIZE(tuple_o));
next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(tuple_o, i));
null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
_tos_cache2 = next;
_tos_cache1 = null_or_index;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_RANGE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
iter = stack_pointer[-2];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(r) != &PyRangeIter_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced((PyObject *)r)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_RANGE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
iter = stack_pointer[-1];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(r) != &PyRangeIter_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced((PyObject *)r)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = _stack_item_0;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_RANGE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
iter = _stack_item_0;
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(r) != &PyRangeIter_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced((PyObject *)r)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_CHECK_RANGE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
iter = _stack_item_1;
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(r) != &PyRangeIter_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced((PyObject *)r)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
#endif
_tos_cache2 = _stack_item_2;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _ITER_JUMP_RANGE is not a viable micro-op for tier 2 because it is replaced */
case _GUARD_NOT_EXHAUSTED_RANGE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
iter = stack_pointer[-2];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
if (r->len <= 0) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_RANGE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
iter = stack_pointer[-1];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
if (r->len <= 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_RANGE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
iter = _stack_item_0;
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
if (r->len <= 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOT_EXHAUSTED_RANGE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
iter = _stack_item_1;
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
if (r->len <= 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = iter;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_NEXT_RANGE_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef next;
iter = stack_pointer[-2];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
assert(r->len > 0);
long value = r->start;
r->start = value + r->step;
r->len--;
PyObject *res = PyLong_FromLong(value);
if (res == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
next = PyStackRef_FromPyObjectSteal(res);
_tos_cache2 = next;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_NEXT_RANGE_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef next;
_PyStackRef _stack_item_0 = _tos_cache0;
iter = stack_pointer[-1];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
assert(r->len > 0);
long value = r->start;
r->start = value + r->step;
r->len--;
PyObject *res = PyLong_FromLong(value);
if (res == NULL) {
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
next = PyStackRef_FromPyObjectSteal(res);
_tos_cache2 = next;
_tos_cache1 = _stack_item_0;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _ITER_NEXT_RANGE_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef next;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
iter = _stack_item_0;
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
assert(r->len > 0);
long value = r->start;
r->start = value + r->step;
r->len--;
PyObject *res = PyLong_FromLong(value);
if (res == NULL) {
stack_pointer[0] = iter;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
next = PyStackRef_FromPyObjectSteal(res);
_tos_cache2 = next;
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FOR_ITER_GEN_FRAME_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef gen_frame;
oparg = CURRENT_OPARG();
iter = stack_pointer[-2];
PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(gen) != &PyGen_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (!gen_try_set_executing((PyGenObject *)gen)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(FOR_ITER, hit);
_PyInterpreterFrame *pushed_frame = &gen->gi_iframe;
_PyFrame_StackPush(pushed_frame, PyStackRef_None);
gen->gi_exc_state.previous_item = tstate->exc_info;
tstate->exc_info = &gen->gi_exc_state;
pushed_frame->previous = frame;
frame->return_offset = (uint16_t)( 2u + oparg);
gen_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache2 = gen_frame;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FOR_ITER_GEN_FRAME_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef gen_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
iter = stack_pointer[-1];
PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(gen) != &PyGen_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (!gen_try_set_executing((PyGenObject *)gen)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(FOR_ITER, hit);
_PyInterpreterFrame *pushed_frame = &gen->gi_iframe;
_PyFrame_StackPush(pushed_frame, PyStackRef_None);
gen->gi_exc_state.previous_item = tstate->exc_info;
tstate->exc_info = &gen->gi_exc_state;
pushed_frame->previous = frame;
frame->return_offset = (uint16_t)( 2u + oparg);
gen_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache2 = gen_frame;
_tos_cache1 = _stack_item_0;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FOR_ITER_GEN_FRAME_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef iter;
_PyStackRef gen_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
iter = _stack_item_0;
PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(gen) != &PyGen_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
if (!gen_try_set_executing((PyGenObject *)gen)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(FOR_ITER, hit);
_PyInterpreterFrame *pushed_frame = &gen->gi_iframe;
_PyFrame_StackPush(pushed_frame, PyStackRef_None);
gen->gi_exc_state.previous_item = tstate->exc_info;
tstate->exc_info = &gen->gi_exc_state;
pushed_frame->previous = frame;
frame->return_offset = (uint16_t)( 2u + oparg);
gen_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache2 = gen_frame;
_tos_cache1 = _stack_item_1;
_tos_cache0 = iter;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _INSERT_NULL_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self;
_PyStackRef *method_and_self;
_PyStackRef _stack_item_0 = _tos_cache0;
self = _stack_item_0;
method_and_self = &stack_pointer[0];
method_and_self[1] = self;
method_and_self[0] = PyStackRef_NULL;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_SPECIAL_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *method_and_self;
oparg = CURRENT_OPARG();
method_and_self = &stack_pointer[-2];
PyObject *name = _Py_SpecialMethods[oparg].name;
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _PyObject_LookupSpecialMethod(name, method_and_self);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err <= 0) {
if (err == 0) {
PyObject *owner = PyStackRef_AsPyObjectBorrow(method_and_self[1]);
_PyFrame_SetStackPointer(frame, stack_pointer);
const char *errfmt = _PyEval_SpecialMethodCanSuggest(owner, oparg)
? _Py_SpecialMethods[oparg].error_suggestion
: _Py_SpecialMethods[oparg].error;
stack_pointer = _PyFrame_GetStackPointer(frame);
assert(!_PyErr_Occurred(tstate));
assert(errfmt != NULL);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyErr_Format(tstate, PyExc_TypeError, errfmt, owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _WITH_EXCEPT_START_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef val;
_PyStackRef lasti;
_PyStackRef exit_self;
_PyStackRef exit_func;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
val = _stack_item_2;
lasti = _stack_item_0;
exit_self = stack_pointer[-1];
exit_func = stack_pointer[-2];
PyObject *exc, *tb;
PyObject *val_o = PyStackRef_AsPyObjectBorrow(val);
PyObject *exit_func_o = PyStackRef_AsPyObjectBorrow(exit_func);
assert(val_o && PyExceptionInstance_Check(val_o));
exc = PyExceptionInstance_Class(val_o);
PyObject *original_tb = tb = PyException_GetTraceback(val_o);
if (tb == NULL) {
tb = Py_None;
}
assert(PyStackRef_IsTaggedInt(lasti));
(void)lasti;
PyObject* res_o;
{
PyObject *stack[5] = {NULL, PyStackRef_AsPyObjectBorrow(exit_self), exc, val_o, tb};
int has_self = !PyStackRef_IsNull(exit_self);
stack_pointer[0] = lasti;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = val;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
res_o = PyObject_Vectorcall(exit_func_o, stack + 2 - has_self,
(3 + has_self) | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_XDECREF(original_tb);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache2 = res;
_tos_cache1 = val;
_tos_cache0 = _stack_item_1;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _PUSH_EXC_INFO_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef exc;
_PyStackRef prev_exc;
_PyStackRef new_exc;
exc = stack_pointer[-1];
_PyErr_StackItem *exc_info = tstate->exc_info;
if (exc_info->exc_value != NULL) {
prev_exc = PyStackRef_FromPyObjectSteal(exc_info->exc_value);
}
else {
prev_exc = PyStackRef_None;
}
assert(PyStackRef_ExceptionInstanceCheck(exc));
exc_info->exc_value = PyStackRef_AsPyObjectNew(exc);
new_exc = exc;
_tos_cache1 = new_exc;
_tos_cache0 = prev_exc;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _PUSH_EXC_INFO_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef exc;
_PyStackRef prev_exc;
_PyStackRef new_exc;
_PyStackRef _stack_item_0 = _tos_cache0;
exc = _stack_item_0;
_PyErr_StackItem *exc_info = tstate->exc_info;
if (exc_info->exc_value != NULL) {
prev_exc = PyStackRef_FromPyObjectSteal(exc_info->exc_value);
}
else {
prev_exc = PyStackRef_None;
}
assert(PyStackRef_ExceptionInstanceCheck(exc));
exc_info->exc_value = PyStackRef_AsPyObjectNew(exc);
new_exc = exc;
_tos_cache1 = new_exc;
_tos_cache0 = prev_exc;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _PUSH_EXC_INFO_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef exc;
_PyStackRef prev_exc;
_PyStackRef new_exc;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
exc = _stack_item_1;
_PyErr_StackItem *exc_info = tstate->exc_info;
if (exc_info->exc_value != NULL) {
prev_exc = PyStackRef_FromPyObjectSteal(exc_info->exc_value);
}
else {
prev_exc = PyStackRef_None;
}
assert(PyStackRef_ExceptionInstanceCheck(exc));
exc_info->exc_value = PyStackRef_AsPyObjectNew(exc);
new_exc = exc;
_tos_cache2 = new_exc;
_tos_cache1 = prev_exc;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
owner = stack_pointer[-1];
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
PyDictValues *ivs = _PyObject_InlineValues(owner_o);
if (!FT_ATOMIC_LOAD_UINT8(ivs->valid)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
PyDictValues *ivs = _PyObject_InlineValues(owner_o);
if (!FT_ATOMIC_LOAD_UINT8(ivs->valid)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
PyDictValues *ivs = _PyObject_InlineValues(owner_o);
if (!FT_ATOMIC_LOAD_UINT8(ivs->valid)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
owner = _stack_item_2;
PyObject *owner_o = PyStackRef_AsPyObjectBorrow(owner);
assert(Py_TYPE(owner_o)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
PyDictValues *ivs = _PyObject_InlineValues(owner_o);
if (!FT_ATOMIC_LOAD_UINT8(ivs->valid)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_KEYS_VERSION_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
owner = stack_pointer[-1];
uint32_t keys_version = (uint32_t)CURRENT_OPERAND0_32();
PyTypeObject *owner_cls = Py_TYPE(PyStackRef_AsPyObjectBorrow(owner));
PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls;
PyDictKeysObject *keys = owner_heap_type->ht_cached_keys;
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != keys_version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_KEYS_VERSION_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
uint32_t keys_version = (uint32_t)CURRENT_OPERAND0_32();
PyTypeObject *owner_cls = Py_TYPE(PyStackRef_AsPyObjectBorrow(owner));
PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls;
PyDictKeysObject *keys = owner_heap_type->ht_cached_keys;
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != keys_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_KEYS_VERSION_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
uint32_t keys_version = (uint32_t)CURRENT_OPERAND0_32();
PyTypeObject *owner_cls = Py_TYPE(PyStackRef_AsPyObjectBorrow(owner));
PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls;
PyDictKeysObject *keys = owner_heap_type->ht_cached_keys;
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != keys_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_KEYS_VERSION_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
owner = _stack_item_2;
uint32_t keys_version = (uint32_t)CURRENT_OPERAND0_32();
PyTypeObject *owner_cls = Py_TYPE(PyStackRef_AsPyObjectBorrow(owner));
PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls;
PyDictKeysObject *keys = owner_heap_type->ht_cached_keys;
if (FT_ATOMIC_LOAD_UINT32_RELAXED(keys->dk_version) != keys_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_WITH_VALUES_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
oparg = CURRENT_OPARG();
owner = stack_pointer[-1];
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache1 = self;
_tos_cache0 = attr;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_WITH_VALUES_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache1 = self;
_tos_cache0 = attr;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_WITH_VALUES_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
owner = _stack_item_1;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache2 = self;
_tos_cache1 = attr;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_NO_DICT_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
oparg = CURRENT_OPARG();
owner = stack_pointer[-1];
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
assert(Py_TYPE(PyStackRef_AsPyObjectBorrow(owner))->tp_dictoffset == 0);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache1 = self;
_tos_cache0 = attr;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_NO_DICT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
assert(Py_TYPE(PyStackRef_AsPyObjectBorrow(owner))->tp_dictoffset == 0);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache1 = self;
_tos_cache0 = attr;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_NO_DICT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
owner = _stack_item_1;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
assert(Py_TYPE(PyStackRef_AsPyObjectBorrow(owner))->tp_dictoffset == 0);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache2 = self;
_tos_cache1 = attr;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert((oparg & 1) == 0);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
attr = PyStackRef_FromPyObjectNew(descr);
_tos_cache0 = attr;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert((oparg & 1) == 0);
assert(Py_TYPE(PyStackRef_AsPyObjectBorrow(owner))->tp_dictoffset == 0);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
attr = PyStackRef_FromPyObjectNew(descr);
_tos_cache0 = attr;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_ATTR_METHOD_LAZY_DICT_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
owner = stack_pointer[-1];
uint16_t dictoffset = (uint16_t)CURRENT_OPERAND0_16();
char *ptr = ((char *)PyStackRef_AsPyObjectBorrow(owner)) + MANAGED_DICT_OFFSET + dictoffset;
PyObject *dict = FT_ATOMIC_LOAD_PTR_ACQUIRE(*(PyObject **)ptr);
if (dict != NULL) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_ATTR_METHOD_LAZY_DICT_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
owner = _stack_item_0;
uint16_t dictoffset = (uint16_t)CURRENT_OPERAND0_16();
char *ptr = ((char *)PyStackRef_AsPyObjectBorrow(owner)) + MANAGED_DICT_OFFSET + dictoffset;
PyObject *dict = FT_ATOMIC_LOAD_PTR_ACQUIRE(*(PyObject **)ptr);
if (dict != NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = owner;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_ATTR_METHOD_LAZY_DICT_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
owner = _stack_item_1;
uint16_t dictoffset = (uint16_t)CURRENT_OPERAND0_16();
char *ptr = ((char *)PyStackRef_AsPyObjectBorrow(owner)) + MANAGED_DICT_OFFSET + dictoffset;
PyObject *dict = FT_ATOMIC_LOAD_PTR_ACQUIRE(*(PyObject **)ptr);
if (dict != NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = owner;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_ATTR_METHOD_LAZY_DICT_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
owner = _stack_item_2;
uint16_t dictoffset = (uint16_t)CURRENT_OPERAND0_16();
char *ptr = ((char *)PyStackRef_AsPyObjectBorrow(owner)) + MANAGED_DICT_OFFSET + dictoffset;
PyObject *dict = FT_ATOMIC_LOAD_PTR_ACQUIRE(*(PyObject **)ptr);
if (dict != NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = owner;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_LAZY_DICT_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
oparg = CURRENT_OPARG();
owner = stack_pointer[-1];
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache1 = self;
_tos_cache0 = attr;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_LAZY_DICT_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
owner = _stack_item_0;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache1 = self;
_tos_cache0 = attr;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_ATTR_METHOD_LAZY_DICT_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef owner;
_PyStackRef attr;
_PyStackRef self;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
owner = _stack_item_1;
PyObject *descr = (PyObject *)CURRENT_OPERAND0_64();
assert(oparg & 1);
STAT_INC(LOAD_ATTR, hit);
assert(descr != NULL);
assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR));
attr = PyStackRef_FromPyObjectNew(descr);
self = owner;
_tos_cache2 = self;
_tos_cache1 = attr;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MAYBE_EXPAND_METHOD_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_or_null;
_PyStackRef callable;
oparg = CURRENT_OPARG();
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
if (PyStackRef_TYPE(callable) == &PyMethod_Type && PyStackRef_IsNull(self_or_null)) {
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyObject *self = ((PyMethodObject *)callable_o)->im_self;
self_or_null = PyStackRef_FromPyObjectNew(self);
PyObject *method = ((PyMethodObject *)callable_o)->im_func;
_PyStackRef temp = callable;
callable = PyStackRef_FromPyObjectNew(method);
stack_pointer[-2 - oparg] = callable;
stack_pointer[-1 - oparg] = self_or_null;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(temp);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[-2 - oparg] = callable;
stack_pointer[-1 - oparg] = self_or_null;
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _DO_CALL is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
/* _MONITOR_CALL is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
case _PY_FRAME_GENERAL_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef new_frame;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
if (!PyStackRef_IsNull(self_or_null)) {
args--;
total_args++;
}
assert(Py_TYPE(callable_o) == &PyFunction_Type);
int code_flags = ((PyCodeObject*)PyFunction_GET_CODE(callable_o))->co_flags;
PyObject *locals = code_flags & CO_OPTIMIZED ? NULL : Py_NewRef(PyFunction_GET_GLOBALS(callable_o));
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyInterpreterFrame *temp = _PyEvalFramePushAndInit(
tstate, callable, locals,
args, total_args, NULL, frame
);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (temp == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
new_frame = PyStackRef_Wrap(temp);
_tos_cache0 = new_frame;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_FUNCTION_VERSION_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
oparg = CURRENT_OPARG();
callable = stack_pointer[-2 - oparg];
uint32_t func_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (!PyFunction_Check(callable_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyFunctionObject *func = (PyFunctionObject *)callable_o;
if (func->func_version != func_version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_FUNCTION_VERSION_INLINE_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
uint32_t func_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = (PyObject *)CURRENT_OPERAND1_64();
assert(PyFunction_Check(callable_o));
PyFunctionObject *func = (PyFunctionObject *)callable_o;
if (func->func_version != func_version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_FUNCTION_VERSION_INLINE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
uint32_t func_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = (PyObject *)CURRENT_OPERAND1_64();
assert(PyFunction_Check(callable_o));
PyFunctionObject *func = (PyFunctionObject *)callable_o;
if (func->func_version != func_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_FUNCTION_VERSION_INLINE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
uint32_t func_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = (PyObject *)CURRENT_OPERAND1_64();
assert(PyFunction_Check(callable_o));
PyFunctionObject *func = (PyFunctionObject *)callable_o;
if (func->func_version != func_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_FUNCTION_VERSION_INLINE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
uint32_t func_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = (PyObject *)CURRENT_OPERAND1_64();
assert(PyFunction_Check(callable_o));
PyFunctionObject *func = (PyFunctionObject *)callable_o;
if (func->func_version != func_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_METHOD_VERSION_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef callable;
oparg = CURRENT_OPARG();
null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
uint32_t func_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (Py_TYPE(callable_o) != &PyMethod_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyObject *func = ((PyMethodObject *)callable_o)->im_func;
if (!PyFunction_Check(func)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (((PyFunctionObject *)func)->func_version != func_version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _EXPAND_METHOD_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_or_null;
_PyStackRef callable;
oparg = CURRENT_OPARG();
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
assert(PyStackRef_IsNull(self_or_null));
assert(Py_TYPE(callable_o) == &PyMethod_Type);
self_or_null = PyStackRef_FromPyObjectNew(((PyMethodObject *)callable_o)->im_self);
_PyStackRef temp = callable;
callable = PyStackRef_FromPyObjectNew(((PyMethodObject *)callable_o)->im_func);
assert(PyStackRef_FunctionCheck(callable));
stack_pointer[-2 - oparg] = callable;
stack_pointer[-1 - oparg] = self_or_null;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(temp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_IS_NOT_PY_CALLABLE_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
oparg = CURRENT_OPARG();
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (PyFunction_Check(callable_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (Py_TYPE(callable_o) == &PyMethod_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_NON_PY_GENERAL_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
#if TIER_ONE
assert(opcode != INSTRUMENTED_CALL);
#endif
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _Py_VectorCall_StackRefSteal(
callable,
arguments,
total_args,
PyStackRef_NULL);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_CALL_BOUND_METHOD_EXACT_ARGS_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef callable;
oparg = CURRENT_OPARG();
null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (Py_TYPE(PyStackRef_AsPyObjectBorrow(callable)) != &PyMethod_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _INIT_CALL_BOUND_METHOD_EXACT_ARGS_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_or_null;
_PyStackRef callable;
oparg = CURRENT_OPARG();
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
assert(PyStackRef_IsNull(self_or_null));
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
STAT_INC(CALL, hit);
self_or_null = PyStackRef_FromPyObjectNew(((PyMethodObject *)callable_o)->im_self);
_PyStackRef temp = callable;
callable = PyStackRef_FromPyObjectNew(((PyMethodObject *)callable_o)->im_func);
stack_pointer[-2 - oparg] = callable;
stack_pointer[-1 - oparg] = self_or_null;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(temp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_PEP_523_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
if (tstate->interp->eval_frame) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_PEP_523_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
if (tstate->interp->eval_frame) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_PEP_523_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
if (tstate->interp->eval_frame) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_PEP_523_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
if (tstate->interp->eval_frame) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_FUNCTION_EXACT_ARGS_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_or_null;
_PyStackRef callable;
oparg = CURRENT_OPARG();
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
assert(PyFunction_Check(callable_o));
PyFunctionObject *func = (PyFunctionObject *)callable_o;
PyCodeObject *code = (PyCodeObject *)func->func_code;
if (code->co_argcount != oparg + (!PyStackRef_IsNull(self_or_null))) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_STACK_SPACE_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
oparg = CURRENT_OPARG();
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyFunctionObject *func = (PyFunctionObject *)callable_o;
PyCodeObject *code = (PyCodeObject *)func->func_code;
if (!_PyThreadState_HasStackSpace(tstate, code->co_framesize)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_RECURSION_REMAINING_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
if (tstate->py_recursion_remaining <= 1) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_RECURSION_REMAINING_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
if (tstate->py_recursion_remaining <= 1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_RECURSION_REMAINING_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
if (tstate->py_recursion_remaining <= 1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_RECURSION_REMAINING_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
if (tstate->py_recursion_remaining <= 1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _INIT_CALL_PY_EXACT_ARGS_0_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef new_frame;
oparg = 0;
assert(oparg == CURRENT_OPARG());
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
int has_self = !PyStackRef_IsNull(self_or_null);
STAT_INC(CALL, hit);
_PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame);
_PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self;
pushed_frame->localsplus[0] = self_or_null;
for (int i = 0; i < oparg; i++) {
first_non_self_local[i] = args[i];
}
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _INIT_CALL_PY_EXACT_ARGS_1_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef new_frame;
oparg = 1;
assert(oparg == CURRENT_OPARG());
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
int has_self = !PyStackRef_IsNull(self_or_null);
STAT_INC(CALL, hit);
_PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame);
_PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self;
pushed_frame->localsplus[0] = self_or_null;
for (int i = 0; i < oparg; i++) {
first_non_self_local[i] = args[i];
}
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _INIT_CALL_PY_EXACT_ARGS_2_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef new_frame;
oparg = 2;
assert(oparg == CURRENT_OPARG());
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
int has_self = !PyStackRef_IsNull(self_or_null);
STAT_INC(CALL, hit);
_PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame);
_PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self;
pushed_frame->localsplus[0] = self_or_null;
for (int i = 0; i < oparg; i++) {
first_non_self_local[i] = args[i];
}
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _INIT_CALL_PY_EXACT_ARGS_3_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef new_frame;
oparg = 3;
assert(oparg == CURRENT_OPARG());
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
int has_self = !PyStackRef_IsNull(self_or_null);
STAT_INC(CALL, hit);
_PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame);
_PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self;
pushed_frame->localsplus[0] = self_or_null;
for (int i = 0; i < oparg; i++) {
first_non_self_local[i] = args[i];
}
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _INIT_CALL_PY_EXACT_ARGS_4_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef new_frame;
oparg = 4;
assert(oparg == CURRENT_OPARG());
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
int has_self = !PyStackRef_IsNull(self_or_null);
STAT_INC(CALL, hit);
_PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame);
_PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self;
pushed_frame->localsplus[0] = self_or_null;
for (int i = 0; i < oparg; i++) {
first_non_self_local[i] = args[i];
}
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _INIT_CALL_PY_EXACT_ARGS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef new_frame;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
int has_self = !PyStackRef_IsNull(self_or_null);
STAT_INC(CALL, hit);
_PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame);
_PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self;
pushed_frame->localsplus[0] = self_or_null;
for (int i = 0; i < oparg; i++) {
first_non_self_local[i] = args[i];
}
new_frame = PyStackRef_Wrap(pushed_frame);
_tos_cache0 = new_frame;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _PUSH_FRAME_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef new_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
new_frame = _stack_item_0;
assert(tstate->interp->eval_frame == NULL);
_PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame);
_PyFrame_SetStackPointer(frame, stack_pointer);
assert(temp->previous == frame || temp->previous->previous == frame);
CALL_STAT_INC(inlined_py_calls);
frame = tstate->current_frame = temp;
tstate->py_recursion_remaining--;
LOAD_SP();
LOAD_IP(0);
LLTRACE_RESUME_FRAME();
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_NULL_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
null = stack_pointer[-2];
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_NULL_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef _stack_item_0 = _tos_cache0;
null = stack_pointer[-1];
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_NULL_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null = _stack_item_0;
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_NULL_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
null = _stack_item_1;
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = null;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = null;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_NOT_NULL_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
nos = stack_pointer[-2];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (o == NULL) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_NOT_NULL_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
nos = stack_pointer[-1];
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_0;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_NOT_NULL_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
nos = _stack_item_0;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = nos;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_NOS_NOT_NULL_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef nos;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
nos = _stack_item_1;
PyObject *o = PyStackRef_AsPyObjectBorrow(nos);
if (o == NULL) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = nos;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_THIRD_NULL_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
null = stack_pointer[-3];
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = stack_pointer[-1];
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_THIRD_NULL_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef _stack_item_0 = _tos_cache0;
null = stack_pointer[-2];
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_0;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_THIRD_NULL_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null = stack_pointer[-1];
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_1;
_tos_cache1 = _stack_item_0;
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_THIRD_NULL_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
null = _stack_item_0;
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = null;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_TYPE_1_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
callable = stack_pointer[-3];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyType_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = stack_pointer[-1];
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_TYPE_1_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
callable = stack_pointer[-2];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyType_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_0;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_TYPE_1_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
callable = stack_pointer[-1];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyType_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_1;
_tos_cache1 = _stack_item_0;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_TYPE_1_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
callable = _stack_item_0;
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyType_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_TYPE_1_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
oparg = CURRENT_OPARG();
arg = stack_pointer[-1];
PyObject *arg_o = PyStackRef_AsPyObjectBorrow(arg);
assert(oparg == 1);
STAT_INC(CALL, hit);
a = arg;
res = PyStackRef_FromPyObjectNew(Py_TYPE(arg_o));
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_TYPE_1_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
arg = _stack_item_0;
PyObject *arg_o = PyStackRef_AsPyObjectBorrow(arg);
assert(oparg == 1);
STAT_INC(CALL, hit);
a = arg;
res = PyStackRef_FromPyObjectNew(Py_TYPE(arg_o));
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_TYPE_1_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
arg = _stack_item_1;
PyObject *arg_o = PyStackRef_AsPyObjectBorrow(arg);
assert(oparg == 1);
STAT_INC(CALL, hit);
a = arg;
res = PyStackRef_FromPyObjectNew(Py_TYPE(arg_o));
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_TYPE_1_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
arg = _stack_item_2;
PyObject *arg_o = PyStackRef_AsPyObjectBorrow(arg);
assert(oparg == 1);
STAT_INC(CALL, hit);
a = arg;
res = PyStackRef_FromPyObjectNew(Py_TYPE(arg_o));
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_STR_1_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
callable = stack_pointer[-3];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyUnicode_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = stack_pointer[-1];
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_STR_1_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
callable = stack_pointer[-2];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyUnicode_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_0;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_STR_1_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
callable = stack_pointer[-1];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyUnicode_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_1;
_tos_cache1 = _stack_item_0;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_STR_1_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
callable = _stack_item_0;
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyUnicode_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_STR_1_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
arg = _stack_item_2;
PyObject *arg_o = PyStackRef_AsPyObjectBorrow(arg);
assert(oparg == 1);
STAT_INC(CALL, hit);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = arg;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = PyObject_Str(arg_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
a = arg;
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache1 = a;
_tos_cache0 = res;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_TUPLE_1_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
callable = stack_pointer[-3];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = stack_pointer[-1];
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_TUPLE_1_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
callable = stack_pointer[-2];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_0;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_TUPLE_1_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
callable = stack_pointer[-1];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_1;
_tos_cache1 = _stack_item_0;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_TUPLE_1_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
callable = _stack_item_0;
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (callable_o != (PyObject *)&PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_TUPLE_1_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
arg = _stack_item_2;
PyObject *arg_o = PyStackRef_AsPyObjectBorrow(arg);
assert(oparg == 1);
STAT_INC(CALL, hit);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = arg;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = PySequence_Tuple(arg_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
a = arg;
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache1 = a;
_tos_cache0 = res;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_AND_ALLOCATE_OBJECT_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_or_null;
_PyStackRef callable;
oparg = CURRENT_OPARG();
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
uint32_t type_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (!PyStackRef_IsNull(self_or_null)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (!PyType_Check(callable_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyTypeObject *tp = (PyTypeObject *)callable_o;
if (FT_ATOMIC_LOAD_UINT32_RELAXED(tp->tp_version_tag) != type_version) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
assert(tp->tp_new == PyBaseObject_Type.tp_new);
assert(tp->tp_flags & Py_TPFLAGS_HEAPTYPE);
assert(tp->tp_alloc == PyType_GenericAlloc);
PyHeapTypeObject *cls = (PyHeapTypeObject *)callable_o;
PyFunctionObject *init_func = (PyFunctionObject *)FT_ATOMIC_LOAD_PTR_ACQUIRE(cls->_spec_cache.init);
PyCodeObject *code = (PyCodeObject *)init_func->func_code;
if (!_PyThreadState_HasStackSpace(tstate, code->co_framesize + _Py_InitCleanup.co_framesize)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *self_o = PyType_GenericAlloc(tp, 0);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (self_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
self_or_null = PyStackRef_FromPyObjectSteal(self_o);
_PyStackRef temp = callable;
callable = PyStackRef_FromPyObjectNew(init_func);
stack_pointer[-2 - oparg] = callable;
stack_pointer[-1 - oparg] = self_or_null;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(temp);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CREATE_INIT_FRAME_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self;
_PyStackRef init;
_PyStackRef init_frame;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self = stack_pointer[-1 - oparg];
init = stack_pointer[-2 - oparg];
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyInterpreterFrame *shim = _PyFrame_PushTrampolineUnchecked(
tstate, (PyCodeObject *)&_Py_InitCleanup, 1, frame);
stack_pointer = _PyFrame_GetStackPointer(frame);
assert(_PyFrame_GetBytecode(shim)[0].op.code == EXIT_INIT_CHECK);
assert(_PyFrame_GetBytecode(shim)[1].op.code == RETURN_VALUE);
shim->localsplus[0] = PyStackRef_DUP(self);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyInterpreterFrame *temp = _PyEvalFramePushAndInit(
tstate, init, NULL, args-1, oparg+1, NULL, shim);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (temp == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyEval_FrameClearAndPop(tstate, shim);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
frame->return_offset = 1 + INLINE_CACHE_ENTRIES_CALL;
tstate->py_recursion_remaining--;
init_frame = PyStackRef_Wrap(temp);
_tos_cache0 = init_frame;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _EXIT_INIT_CHECK_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef should_be_none;
_PyStackRef _stack_item_0 = _tos_cache0;
should_be_none = _stack_item_0;
if (!PyStackRef_IsNone(should_be_none)) {
stack_pointer[0] = should_be_none;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyErr_Format(PyExc_TypeError,
"__init__() should return None, not '%.200s'",
Py_TYPE(PyStackRef_AsPyObjectBorrow(should_be_none))->tp_name);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_BUILTIN_CLASS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (!PyType_Check(callable_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyTypeObject *tp = (PyTypeObject *)callable_o;
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
if (tp->tp_vectorcall == NULL) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _Py_CallBuiltinClass_StackRefSteal(
callable,
arguments,
total_args);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_BUILTIN_O_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
_PyStackRef a;
_PyStackRef c;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
if (!PyStackRef_IsNull(self_or_null)) {
args--;
total_args++;
}
if (total_args != 1) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (!PyCFunction_CheckExact(callable_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (PyCFunction_GET_FLAGS(callable_o) != METH_O) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (_Py_ReachedRecursionLimit(tstate)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable_o);
_PyStackRef arg = args[0];
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyCFunction_TrampolineCall(cfunc, PyCFunction_GET_SELF(callable_o), PyStackRef_AsPyObjectBorrow(arg));
stack_pointer = _PyFrame_GetStackPointer(frame);
_Py_LeaveRecursiveCallTstate(tstate);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
a = arg;
c = callable;
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache2 = c;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_BUILTIN_FAST_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (!PyCFunction_CheckExact(callable_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (PyCFunction_GET_FLAGS(callable_o) != METH_FASTCALL) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _Py_BuiltinCallFast_StackRefSteal(
callable,
arguments,
total_args
);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_BUILTIN_FAST_WITH_KEYWORDS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (!PyCFunction_CheckExact(callable_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (PyCFunction_GET_FLAGS(callable_o) != (METH_FASTCALL | METH_KEYWORDS)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _Py_BuiltinCallFastWithKeywords_StackRefSteal(callable, arguments, total_args);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_LEN_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
callable = stack_pointer[-3];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.len) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = stack_pointer[-1];
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_LEN_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
callable = stack_pointer[-2];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.len) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_0;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_LEN_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
callable = stack_pointer[-1];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.len) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_1;
_tos_cache1 = _stack_item_0;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_LEN_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
callable = _stack_item_0;
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.len) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_LEN_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef callable;
_PyStackRef res;
_PyStackRef a;
_PyStackRef c;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
arg = _stack_item_2;
callable = _stack_item_0;
STAT_INC(CALL, hit);
PyObject *arg_o = PyStackRef_AsPyObjectBorrow(arg);
stack_pointer[0] = callable;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = arg;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_ssize_t len_i = PyObject_Length(arg_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (len_i < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
PyObject *res_o = PyLong_FromSsize_t(len_i);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
a = arg;
c = callable;
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache2 = c;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_ISINSTANCE_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
callable = stack_pointer[-4];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.isinstance) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = stack_pointer[-1];
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = stack_pointer[-3];
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_ISINSTANCE_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
callable = stack_pointer[-3];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.isinstance) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_0;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = stack_pointer[-2];
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_ISINSTANCE_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
callable = stack_pointer[-2];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.isinstance) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_1;
_tos_cache1 = _stack_item_0;
_tos_cache0 = stack_pointer[-1];
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_ISINSTANCE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
callable = stack_pointer[-1];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.isinstance) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_ISINSTANCE_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef cls;
_PyStackRef instance;
_PyStackRef null;
_PyStackRef callable;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
cls = _stack_item_2;
instance = _stack_item_1;
null = _stack_item_0;
callable = stack_pointer[-1];
STAT_INC(CALL, hit);
PyObject *inst_o = PyStackRef_AsPyObjectBorrow(instance);
PyObject *cls_o = PyStackRef_AsPyObjectBorrow(cls);
stack_pointer[0] = null;
stack_pointer[1] = instance;
stack_pointer[2] = cls;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int retval = PyObject_IsInstance(inst_o, cls_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (retval < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
(void)null;
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(cls);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(instance);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable);
stack_pointer = _PyFrame_GetStackPointer(frame);
res = retval ? PyStackRef_True : PyStackRef_False;
assert((!PyStackRef_IsNull(res)) ^ (_PyErr_Occurred(tstate) != NULL));
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_LIST_APPEND_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
callable = stack_pointer[-3];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.list_append) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = stack_pointer[-1];
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_LIST_APPEND_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
callable = stack_pointer[-2];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.list_append) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_0;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_LIST_APPEND_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
callable = stack_pointer[-1];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.list_append) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_1;
_tos_cache1 = _stack_item_0;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_CALLABLE_LIST_APPEND_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
callable = _stack_item_0;
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyInterpreterState *interp = tstate->interp;
if (callable_o != interp->callable_cache.list_append) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_LIST_APPEND_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef self;
_PyStackRef callable;
_PyStackRef none;
_PyStackRef c;
_PyStackRef s;
oparg = CURRENT_OPARG();
arg = stack_pointer[-1];
self = stack_pointer[-2];
callable = stack_pointer[-3];
assert(oparg == 1);
PyObject *self_o = PyStackRef_AsPyObjectBorrow(self);
if (!LOCK_OBJECT(self_o)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
int err = _PyList_AppendTakeRef((PyListObject *)self_o, PyStackRef_AsPyObjectSteal(arg));
UNLOCK_OBJECT(self_o);
if (err) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
c = callable;
s = self;
none = PyStackRef_None;
_tos_cache2 = s;
_tos_cache1 = c;
_tos_cache0 = none;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_LIST_APPEND_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef self;
_PyStackRef callable;
_PyStackRef none;
_PyStackRef c;
_PyStackRef s;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
arg = _stack_item_0;
self = stack_pointer[-1];
callable = stack_pointer[-2];
assert(oparg == 1);
PyObject *self_o = PyStackRef_AsPyObjectBorrow(self);
if (!LOCK_OBJECT(self_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = arg;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
int err = _PyList_AppendTakeRef((PyListObject *)self_o, PyStackRef_AsPyObjectSteal(arg));
UNLOCK_OBJECT(self_o);
if (err) {
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
c = callable;
s = self;
none = PyStackRef_None;
_tos_cache2 = s;
_tos_cache1 = c;
_tos_cache0 = none;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_LIST_APPEND_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef self;
_PyStackRef callable;
_PyStackRef none;
_PyStackRef c;
_PyStackRef s;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
arg = _stack_item_1;
self = _stack_item_0;
callable = stack_pointer[-1];
assert(oparg == 1);
PyObject *self_o = PyStackRef_AsPyObjectBorrow(self);
if (!LOCK_OBJECT(self_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = arg;
_tos_cache0 = self;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
int err = _PyList_AppendTakeRef((PyListObject *)self_o, PyStackRef_AsPyObjectSteal(arg));
UNLOCK_OBJECT(self_o);
if (err) {
stack_pointer[0] = self;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
c = callable;
s = self;
none = PyStackRef_None;
_tos_cache2 = s;
_tos_cache1 = c;
_tos_cache0 = none;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_LIST_APPEND_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef self;
_PyStackRef callable;
_PyStackRef none;
_PyStackRef c;
_PyStackRef s;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
arg = _stack_item_2;
self = _stack_item_1;
callable = _stack_item_0;
assert(oparg == 1);
PyObject *self_o = PyStackRef_AsPyObjectBorrow(self);
if (!LOCK_OBJECT(self_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = arg;
_tos_cache1 = self;
_tos_cache0 = callable;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
int err = _PyList_AppendTakeRef((PyListObject *)self_o, PyStackRef_AsPyObjectSteal(arg));
UNLOCK_OBJECT(self_o);
if (err) {
stack_pointer[0] = callable;
stack_pointer[1] = self;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
c = callable;
s = self;
none = PyStackRef_None;
_tos_cache2 = s;
_tos_cache1 = c;
_tos_cache0 = none;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_METHOD_DESCRIPTOR_O_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
PyMethodDescrObject *method = (PyMethodDescrObject *)callable_o;
if (total_args != 2) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (!Py_IS_TYPE(method, &PyMethodDescr_Type)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyMethodDef *meth = method->d_method;
if (meth->ml_flags != METH_O) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (_Py_ReachedRecursionLimit(tstate)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_PyStackRef arg_stackref = arguments[1];
_PyStackRef self_stackref = arguments[0];
if (!Py_IS_TYPE(PyStackRef_AsPyObjectBorrow(self_stackref),
method->d_common.d_type)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
PyCFunction cfunc = meth->ml_meth;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyCFunction_TrampolineCall(cfunc,
PyStackRef_AsPyObjectBorrow(self_stackref),
PyStackRef_AsPyObjectBorrow(arg_stackref));
stack_pointer = _PyFrame_GetStackPointer(frame);
_Py_LeaveRecursiveCallTstate(tstate);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp;
for (int _i = oparg; --_i >= 0;) {
tmp = args[_i];
args[_i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
tmp = self_or_null;
self_or_null = PyStackRef_NULL;
stack_pointer[-1 - oparg] = self_or_null;
PyStackRef_XCLOSE(tmp);
tmp = callable;
callable = PyStackRef_NULL;
stack_pointer[-2 - oparg] = callable;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
if (total_args == 0) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyMethodDescrObject *method = (PyMethodDescrObject *)callable_o;
if (!Py_IS_TYPE(method, &PyMethodDescr_Type)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyMethodDef *meth = method->d_method;
if (meth->ml_flags != (METH_FASTCALL|METH_KEYWORDS)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyTypeObject *d_type = method->d_common.d_type;
PyObject *self = PyStackRef_AsPyObjectBorrow(arguments[0]);
assert(self != NULL);
if (!Py_IS_TYPE(self, d_type)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyCallMethodDescriptorFastWithKeywords_StackRefSteal(
callable,
meth,
self,
arguments,
total_args
);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_METHOD_DESCRIPTOR_NOARGS_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
assert(oparg == 0 || oparg == 1);
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
if (!PyStackRef_IsNull(self_or_null)) {
args--;
total_args++;
}
if (total_args != 1) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyMethodDescrObject *method = (PyMethodDescrObject *)callable_o;
if (!Py_IS_TYPE(method, &PyMethodDescr_Type)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyMethodDef *meth = method->d_method;
_PyStackRef self_stackref = args[0];
PyObject *self = PyStackRef_AsPyObjectBorrow(self_stackref);
if (!Py_IS_TYPE(self, method->d_common.d_type)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (meth->ml_flags != METH_NOARGS) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (_Py_ReachedRecursionLimit(tstate)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
PyCFunction cfunc = meth->ml_meth;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyCFunction_TrampolineCall(cfunc, self, NULL);
stack_pointer = _PyFrame_GetStackPointer(frame);
_Py_LeaveRecursiveCallTstate(tstate);
assert((res_o != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(self_stackref);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_METHOD_DESCRIPTOR_FAST_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
if (total_args == 0) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyMethodDescrObject *method = (PyMethodDescrObject *)callable_o;
if (!Py_IS_TYPE(method, &PyMethodDescr_Type)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyMethodDef *meth = method->d_method;
if (meth->ml_flags != METH_FASTCALL) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
PyObject *self = PyStackRef_AsPyObjectBorrow(arguments[0]);
assert(self != NULL);
if (!Py_IS_TYPE(self, method->d_common.d_type)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
STAT_INC(CALL, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyCallMethodDescriptorFast_StackRefSteal(
callable,
meth,
self,
arguments,
total_args
);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _MONITOR_CALL_KW is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
case _MAYBE_EXPAND_METHOD_KW_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
if (PyStackRef_TYPE(callable) == &PyMethod_Type && PyStackRef_IsNull(self_or_null)) {
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
PyObject *self = ((PyMethodObject *)callable_o)->im_self;
self_or_null = PyStackRef_FromPyObjectNew(self);
PyObject *method = ((PyMethodObject *)callable_o)->im_func;
_PyStackRef temp = callable;
callable = PyStackRef_FromPyObjectNew(method);
stack_pointer[-2 - oparg] = callable;
stack_pointer[-1 - oparg] = self_or_null;
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(temp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
}
_tos_cache0 = _stack_item_0;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer[-2 - oparg] = callable;
stack_pointer[-1 - oparg] = self_or_null;
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _DO_CALL_KW is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
case _PY_FRAME_KW_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef kwnames;
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef new_frame;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
kwnames = _stack_item_0;
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
PyObject *kwnames_o = PyStackRef_AsPyObjectBorrow(kwnames);
int positional_args = total_args - (int)PyTuple_GET_SIZE(kwnames_o);
assert(Py_TYPE(callable_o) == &PyFunction_Type);
int code_flags = ((PyCodeObject*)PyFunction_GET_CODE(callable_o))->co_flags;
PyObject *locals = code_flags & CO_OPTIMIZED ? NULL : Py_NewRef(PyFunction_GET_GLOBALS(callable_o));
stack_pointer[0] = kwnames;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyInterpreterFrame *temp = _PyEvalFramePushAndInit(
tstate, callable, locals,
arguments, positional_args, kwnames_o, frame
);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(kwnames);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (temp == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
new_frame = PyStackRef_Wrap(temp);
_tos_cache0 = new_frame;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_FUNCTION_VERSION_KW_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
callable = stack_pointer[-2 - oparg];
uint32_t func_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (!PyFunction_Check(callable_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
PyFunctionObject *func = (PyFunctionObject *)callable_o;
if (func->func_version != func_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_METHOD_VERSION_KW_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
uint32_t func_version = (uint32_t)CURRENT_OPERAND0_32();
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (Py_TYPE(callable_o) != &PyMethod_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
PyObject *func = ((PyMethodObject *)callable_o)->im_func;
if (!PyFunction_Check(func)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (((PyFunctionObject *)func)->func_version != func_version) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (!PyStackRef_IsNull(null)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _EXPAND_METHOD_KW_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
assert(PyStackRef_IsNull(self_or_null));
_PyStackRef callable_s = callable;
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
assert(Py_TYPE(callable_o) == &PyMethod_Type);
self_or_null = PyStackRef_FromPyObjectNew(((PyMethodObject *)callable_o)->im_self);
callable = PyStackRef_FromPyObjectNew(((PyMethodObject *)callable_o)->im_func);
assert(PyStackRef_FunctionCheck(callable));
stack_pointer[-2 - oparg] = callable;
stack_pointer[-1 - oparg] = self_or_null;
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable_s);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = _stack_item_0;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_IS_NOT_PY_CALLABLE_KW_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
callable = stack_pointer[-2 - oparg];
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);
if (PyFunction_Check(callable_o)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (Py_TYPE(callable_o) == &PyMethod_Type) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CALL_KW_NON_PY_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef kwnames;
_PyStackRef *args;
_PyStackRef self_or_null;
_PyStackRef callable;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
kwnames = _stack_item_0;
args = &stack_pointer[-oparg];
self_or_null = stack_pointer[-1 - oparg];
callable = stack_pointer[-2 - oparg];
#if TIER_ONE
assert(opcode != INSTRUMENTED_CALL);
#endif
int total_args = oparg;
_PyStackRef *arguments = args;
if (!PyStackRef_IsNull(self_or_null)) {
arguments--;
total_args++;
}
stack_pointer[0] = kwnames;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _Py_VectorCall_StackRefSteal(
callable,
arguments,
total_args,
kwnames);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
stack_pointer += -3 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -3 - oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MAKE_CALLARGS_A_TUPLE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef callargs;
_PyStackRef func;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
callargs = _stack_item_1;
func = stack_pointer[-1];
PyObject *callargs_o = PyStackRef_AsPyObjectBorrow(callargs);
if (!PyTuple_CheckExact(callargs_o)) {
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = callargs;
stack_pointer[2] = _stack_item_2;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _Py_Check_ArgsIterable(tstate, PyStackRef_AsPyObjectBorrow(func), callargs_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err < 0) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *tuple_o = PySequence_Tuple(callargs_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (tuple_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyStackRef temp = callargs;
callargs = PyStackRef_FromPyObjectSteal(tuple_o);
stack_pointer[-2] = callargs;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(temp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -3;
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = callargs;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _DO_CALL_FUNCTION_EX is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
case _MAKE_FUNCTION_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef codeobj_st;
_PyStackRef func;
_PyStackRef _stack_item_0 = _tos_cache0;
codeobj_st = _stack_item_0;
PyObject *codeobj = PyStackRef_AsPyObjectBorrow(codeobj_st);
stack_pointer[0] = codeobj_st;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyFunctionObject *func_obj = (PyFunctionObject *)
PyFunction_New(codeobj, GLOBALS());
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(codeobj_st);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (func_obj == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
_PyFunction_SetVersion(
func_obj, ((PyCodeObject *)codeobj)->co_version);
func = PyStackRef_FromPyObjectSteal((PyObject *)func_obj);
_tos_cache0 = func;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_FUNCTION_ATTRIBUTE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef func_in;
_PyStackRef attr_st;
_PyStackRef func_out;
oparg = CURRENT_OPARG();
func_in = stack_pointer[-1];
attr_st = stack_pointer[-2];
PyObject *func = PyStackRef_AsPyObjectBorrow(func_in);
PyObject *attr = PyStackRef_AsPyObjectSteal(attr_st);
func_out = func_in;
assert(PyFunction_Check(func));
size_t offset = _Py_FunctionAttributeOffsets[oparg];
assert(offset != 0);
PyObject **ptr = (PyObject **)(((char *)func) + offset);
assert(*ptr == NULL);
*ptr = attr;
_tos_cache0 = func_out;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_FUNCTION_ATTRIBUTE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef func_in;
_PyStackRef attr_st;
_PyStackRef func_out;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
func_in = _stack_item_0;
attr_st = stack_pointer[-1];
PyObject *func = PyStackRef_AsPyObjectBorrow(func_in);
PyObject *attr = PyStackRef_AsPyObjectSteal(attr_st);
func_out = func_in;
assert(PyFunction_Check(func));
size_t offset = _Py_FunctionAttributeOffsets[oparg];
assert(offset != 0);
PyObject **ptr = (PyObject **)(((char *)func) + offset);
assert(*ptr == NULL);
*ptr = attr;
_tos_cache0 = func_out;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_FUNCTION_ATTRIBUTE_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef func_in;
_PyStackRef attr_st;
_PyStackRef func_out;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
func_in = _stack_item_1;
attr_st = _stack_item_0;
PyObject *func = PyStackRef_AsPyObjectBorrow(func_in);
PyObject *attr = PyStackRef_AsPyObjectSteal(attr_st);
func_out = func_in;
assert(PyFunction_Check(func));
size_t offset = _Py_FunctionAttributeOffsets[oparg];
assert(offset != 0);
PyObject **ptr = (PyObject **)(((char *)func) + offset);
assert(*ptr == NULL);
*ptr = attr;
_tos_cache0 = func_out;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_FUNCTION_ATTRIBUTE_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef func_in;
_PyStackRef attr_st;
_PyStackRef func_out;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
func_in = _stack_item_2;
attr_st = _stack_item_1;
PyObject *func = PyStackRef_AsPyObjectBorrow(func_in);
PyObject *attr = PyStackRef_AsPyObjectSteal(attr_st);
func_out = func_in;
assert(PyFunction_Check(func));
size_t offset = _Py_FunctionAttributeOffsets[oparg];
assert(offset != 0);
PyObject **ptr = (PyObject **)(((char *)func) + offset);
assert(*ptr == NULL);
*ptr = attr;
_tos_cache1 = func_out;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _RETURN_GENERATOR_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef res;
assert(PyStackRef_FunctionCheck(frame->f_funcobj));
PyFunctionObject *func = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(frame->f_funcobj);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyGenObject *gen = (PyGenObject *)_Py_MakeCoro(func);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (gen == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
assert(STACK_LEVEL() == 0);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyInterpreterFrame *gen_frame = &gen->gi_iframe;
frame->instr_ptr++;
_PyFrame_Copy(frame, gen_frame);
assert(frame->frame_obj == NULL);
gen->gi_frame_state = FRAME_CREATED;
gen_frame->owner = FRAME_OWNED_BY_GENERATOR;
_Py_LeaveRecursiveCallPy(tstate);
_PyInterpreterFrame *prev = frame->previous;
_PyThreadState_PopFrame(tstate, frame);
frame = tstate->current_frame = prev;
LOAD_IP(frame->return_offset);
stack_pointer = _PyFrame_GetStackPointer(frame);
res = PyStackRef_FromPyObjectStealMortal((PyObject *)gen);
LLTRACE_RESUME_FRAME();
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BUILD_SLICE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef *args;
_PyStackRef slice;
oparg = CURRENT_OPARG();
args = &stack_pointer[-oparg];
PyObject *start_o = PyStackRef_AsPyObjectBorrow(args[0]);
PyObject *stop_o = PyStackRef_AsPyObjectBorrow(args[1]);
PyObject *step_o = oparg == 3 ? PyStackRef_AsPyObjectBorrow(args[2]) : NULL;
PyObject *slice_o = PySlice_New(start_o, stop_o, step_o);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp;
for (int _i = oparg; --_i >= 0;) {
tmp = args[_i];
args[_i] = PyStackRef_NULL;
PyStackRef_CLOSE(tmp);
}
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -oparg;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (slice_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
slice = PyStackRef_FromPyObjectStealMortal(slice_o);
_tos_cache0 = slice;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CONVERT_VALUE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef result;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
value = _stack_item_0;
conversion_func conv_fn;
assert(oparg >= FVC_STR && oparg <= FVC_ASCII);
conv_fn = _PyEval_ConversionFuncs[oparg];
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *result_o = conv_fn(PyStackRef_AsPyObjectBorrow(value));
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (result_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
result = PyStackRef_FromPyObjectSteal(result_o);
_tos_cache0 = result;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FORMAT_SIMPLE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
value = _stack_item_0;
PyObject *value_o = PyStackRef_AsPyObjectBorrow(value);
if (!PyUnicode_CheckExact(value_o)) {
stack_pointer[0] = value;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = PyObject_Format(value_o, NULL);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
}
else {
res = value;
}
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FORMAT_WITH_SPEC_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef fmt_spec;
_PyStackRef value;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
fmt_spec = _stack_item_1;
value = _stack_item_0;
stack_pointer[0] = value;
stack_pointer[1] = fmt_spec;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = PyObject_Format(PyStackRef_AsPyObjectBorrow(value), PyStackRef_AsPyObjectBorrow(fmt_spec));
_PyStackRef tmp = fmt_spec;
fmt_spec = PyStackRef_NULL;
stack_pointer[-1] = fmt_spec;
PyStackRef_CLOSE(tmp);
tmp = value;
value = PyStackRef_NULL;
stack_pointer[-2] = value;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_1_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
bottom = stack_pointer[-1];
top = PyStackRef_DUP(bottom);
_tos_cache1 = top;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_1_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
_PyStackRef _stack_item_0 = _tos_cache0;
bottom = _stack_item_0;
top = PyStackRef_DUP(bottom);
_tos_cache1 = top;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_1_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
bottom = _stack_item_1;
top = PyStackRef_DUP(bottom);
_tos_cache2 = top;
_tos_cache1 = bottom;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_2_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
bottom = stack_pointer[-2];
top = PyStackRef_DUP(bottom);
_tos_cache2 = top;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_2_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
_PyStackRef _stack_item_0 = _tos_cache0;
bottom = stack_pointer[-1];
top = PyStackRef_DUP(bottom);
_tos_cache2 = top;
_tos_cache1 = _stack_item_0;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_2_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
bottom = _stack_item_0;
top = PyStackRef_DUP(bottom);
_tos_cache2 = top;
_tos_cache1 = _stack_item_1;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_3_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
bottom = stack_pointer[-3];
top = PyStackRef_DUP(bottom);
_tos_cache2 = top;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = stack_pointer[-2];
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_3_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
_PyStackRef _stack_item_0 = _tos_cache0;
bottom = stack_pointer[-2];
top = PyStackRef_DUP(bottom);
_tos_cache2 = top;
_tos_cache1 = _stack_item_0;
_tos_cache0 = stack_pointer[-1];
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_3_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
bottom = stack_pointer[-1];
top = PyStackRef_DUP(bottom);
_tos_cache2 = top;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_3_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
bottom = _stack_item_0;
top = PyStackRef_DUP(bottom);
_tos_cache2 = top;
_tos_cache1 = _stack_item_2;
_tos_cache0 = _stack_item_1;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer[0] = bottom;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COPY_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef bottom;
_PyStackRef top;
oparg = CURRENT_OPARG();
bottom = stack_pointer[-1 - (oparg-1)];
top = PyStackRef_DUP(bottom);
_tos_cache0 = top;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _BINARY_OP_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef rhs;
_PyStackRef lhs;
_PyStackRef res;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
rhs = _stack_item_1;
lhs = _stack_item_0;
PyObject *lhs_o = PyStackRef_AsPyObjectBorrow(lhs);
PyObject *rhs_o = PyStackRef_AsPyObjectBorrow(rhs);
assert(_PyEval_BinaryOps[oparg]);
stack_pointer[0] = lhs;
stack_pointer[1] = rhs;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyObject *res_o = _PyEval_BinaryOps[oparg](lhs_o, rhs_o);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (res_o == NULL) {
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_ERROR();
}
res = PyStackRef_FromPyObjectSteal(res_o);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyStackRef tmp = lhs;
lhs = res;
stack_pointer[-2] = lhs;
PyStackRef_CLOSE(tmp);
tmp = rhs;
rhs = PyStackRef_NULL;
stack_pointer[-1] = rhs;
PyStackRef_CLOSE(tmp);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_tos_cache0 = res;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_2_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
top = stack_pointer[-1];
bottom = stack_pointer[-2];
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache1 = top;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_2_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
top = _stack_item_0;
bottom = stack_pointer[-1];
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache1 = top;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_2_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
top = _stack_item_1;
bottom = _stack_item_0;
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache1 = top;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_2_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
top = _stack_item_2;
bottom = _stack_item_1;
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache2 = top;
_tos_cache1 = bottom;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_3_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
top = stack_pointer[-1];
bottom = stack_pointer[-3];
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache2 = top;
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_3_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
top = _stack_item_0;
bottom = stack_pointer[-2];
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache2 = top;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_3_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
top = _stack_item_1;
bottom = stack_pointer[-1];
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache2 = top;
_tos_cache1 = _stack_item_0;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_3_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
top = _stack_item_2;
bottom = _stack_item_0;
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache2 = top;
_tos_cache1 = _stack_item_1;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SWAP_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
top = _stack_item_0;
bottom = stack_pointer[-1 - (oparg-2)];
_PyStackRef temp = bottom;
bottom = top;
top = temp;
_tos_cache0 = top;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer[-1 - (oparg-2)] = bottom;
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
/* _INSTRUMENTED_LINE is not a viable micro-op for tier 2 because it is instrumented */
/* _INSTRUMENTED_INSTRUCTION is not a viable micro-op for tier 2 because it is instrumented */
/* _INSTRUMENTED_JUMP_FORWARD is not a viable micro-op for tier 2 because it is instrumented */
/* _MONITOR_JUMP_BACKWARD is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
/* _INSTRUMENTED_NOT_TAKEN is not a viable micro-op for tier 2 because it is instrumented */
/* _INSTRUMENTED_POP_JUMP_IF_TRUE is not a viable micro-op for tier 2 because it is instrumented */
/* _INSTRUMENTED_POP_JUMP_IF_FALSE is not a viable micro-op for tier 2 because it is instrumented */
/* _INSTRUMENTED_POP_JUMP_IF_NONE is not a viable micro-op for tier 2 because it is instrumented */
/* _INSTRUMENTED_POP_JUMP_IF_NOT_NONE is not a viable micro-op for tier 2 because it is instrumented */
case _GUARD_IS_TRUE_POP_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef flag;
flag = stack_pointer[-1];
int is_true = PyStackRef_IsTrue(flag);
if (!is_true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_TRUE_POP_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef flag;
_PyStackRef _stack_item_0 = _tos_cache0;
flag = _stack_item_0;
int is_true = PyStackRef_IsTrue(flag);
if (!is_true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_TRUE_POP_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef flag;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
flag = _stack_item_1;
int is_true = PyStackRef_IsTrue(flag);
if (!is_true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_TRUE_POP_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef flag;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
flag = _stack_item_2;
int is_true = PyStackRef_IsTrue(flag);
if (!is_true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_FALSE_POP_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef flag;
flag = stack_pointer[-1];
int is_false = PyStackRef_IsFalse(flag);
if (!is_false) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_FALSE_POP_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef flag;
_PyStackRef _stack_item_0 = _tos_cache0;
flag = _stack_item_0;
int is_false = PyStackRef_IsFalse(flag);
if (!is_false) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_FALSE_POP_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef flag;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
flag = _stack_item_1;
int is_false = PyStackRef_IsFalse(flag);
if (!is_false) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_FALSE_POP_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef flag;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
flag = _stack_item_2;
int is_false = PyStackRef_IsFalse(flag);
if (!is_false) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_NONE_POP_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef val;
val = stack_pointer[-1];
int is_none = PyStackRef_IsNone(val);
if (!is_none) {
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(val);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (1) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
SET_CURRENT_CACHED_VALUES(0);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_NONE_POP_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef val;
_PyStackRef _stack_item_0 = _tos_cache0;
val = _stack_item_0;
int is_none = PyStackRef_IsNone(val);
if (!is_none) {
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(val);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (1) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_NONE_POP_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef val;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
val = _stack_item_1;
int is_none = PyStackRef_IsNone(val);
if (!is_none) {
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(val);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_NONE_POP_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef val;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
val = _stack_item_2;
int is_none = PyStackRef_IsNone(val);
if (!is_none) {
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(val);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IS_NOT_NONE_POP_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef val;
_PyStackRef _stack_item_0 = _tos_cache0;
val = _stack_item_0;
int is_none = PyStackRef_IsNone(val);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(val);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (is_none) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _JUMP_TO_TOP_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
JUMP_TO_JUMP_TARGET();
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_IP_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND0_64();
frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_IP_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND0_64();
frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_IP_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND0_64();
frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SET_IP_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND0_64();
frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr;
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_STACK_SPACE_OPERAND_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
uint32_t framesize = (uint32_t)CURRENT_OPERAND0_32();
assert(framesize <= INT_MAX);
if (!_PyThreadState_HasStackSpace(tstate, framesize)) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
if (tstate->py_recursion_remaining <= 1) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_STACK_SPACE_OPERAND_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
uint32_t framesize = (uint32_t)CURRENT_OPERAND0_32();
assert(framesize <= INT_MAX);
if (!_PyThreadState_HasStackSpace(tstate, framesize)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
if (tstate->py_recursion_remaining <= 1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_STACK_SPACE_OPERAND_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
uint32_t framesize = (uint32_t)CURRENT_OPERAND0_32();
assert(framesize <= INT_MAX);
if (!_PyThreadState_HasStackSpace(tstate, framesize)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
if (tstate->py_recursion_remaining <= 1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_STACK_SPACE_OPERAND_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
uint32_t framesize = (uint32_t)CURRENT_OPERAND0_32();
assert(framesize <= INT_MAX);
if (!_PyThreadState_HasStackSpace(tstate, framesize)) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
if (tstate->py_recursion_remaining <= 1) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SAVE_RETURN_OFFSET_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
#if TIER_ONE
frame->return_offset = (uint16_t)(next_instr - this_instr);
#endif
#if TIER_TWO
frame->return_offset = oparg;
#endif
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SAVE_RETURN_OFFSET_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
oparg = CURRENT_OPARG();
#if TIER_ONE
frame->return_offset = (uint16_t)(next_instr - this_instr);
#endif
#if TIER_TWO
frame->return_offset = oparg;
#endif
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SAVE_RETURN_OFFSET_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
oparg = CURRENT_OPARG();
#if TIER_ONE
frame->return_offset = (uint16_t)(next_instr - this_instr);
#endif
#if TIER_TWO
frame->return_offset = oparg;
#endif
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SAVE_RETURN_OFFSET_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
oparg = CURRENT_OPARG();
#if TIER_ONE
frame->return_offset = (uint16_t)(next_instr - this_instr);
#endif
#if TIER_TWO
frame->return_offset = oparg;
#endif
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _EXIT_TRACE_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
PyObject *exit_p = (PyObject *)CURRENT_OPERAND0_64();
_PyExitData *exit = (_PyExitData *)exit_p;
#if defined(Py_DEBUG) && !defined(_Py_JIT)
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
const _Py_CODEUNIT *target = ((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame))
+ exit->target;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
if (frame->lltrace >= 3) {
_PyFrame_SetStackPointer(frame, stack_pointer);
printf("SIDE EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
printf(", exit %tu, temp %d, target %d -> %s, is_control_flow %d]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
_PyOpcode_OpName[target->op.code], exit->is_control_flow);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
#endif
tstate->jit_exit = exit;
SET_CURRENT_CACHED_VALUES(0);
TIER2_TO_TIER2(exit->executor);
}
case _EXIT_TRACE_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
PyObject *exit_p = (PyObject *)CURRENT_OPERAND0_64();
_PyExitData *exit = (_PyExitData *)exit_p;
#if defined(Py_DEBUG) && !defined(_Py_JIT)
const _Py_CODEUNIT *target = ((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame))
+ exit->target;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 3) {
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
printf("SIDE EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s, is_control_flow %d]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code], exit->is_control_flow);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
}
#endif
tstate->jit_exit = exit;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
TIER2_TO_TIER2(exit->executor);
}
case _EXIT_TRACE_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
PyObject *exit_p = (PyObject *)CURRENT_OPERAND0_64();
_PyExitData *exit = (_PyExitData *)exit_p;
#if defined(Py_DEBUG) && !defined(_Py_JIT)
const _Py_CODEUNIT *target = ((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame))
+ exit->target;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 3) {
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
printf("SIDE EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s, is_control_flow %d]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code], exit->is_control_flow);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
}
#endif
tstate->jit_exit = exit;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
TIER2_TO_TIER2(exit->executor);
}
case _EXIT_TRACE_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
PyObject *exit_p = (PyObject *)CURRENT_OPERAND0_64();
_PyExitData *exit = (_PyExitData *)exit_p;
#if defined(Py_DEBUG) && !defined(_Py_JIT)
const _Py_CODEUNIT *target = ((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame))
+ exit->target;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 3) {
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = _stack_item_2;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
printf("SIDE EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s, is_control_flow %d]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code], exit->is_control_flow);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -3;
}
#endif
tstate->jit_exit = exit;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = _stack_item_2;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
TIER2_TO_TIER2(exit->executor);
}
case _DYNAMIC_EXIT_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
PyObject *exit_p = (PyObject *)CURRENT_OPERAND0_64();
#if defined(Py_DEBUG) && !defined(_Py_JIT)
_PyExitData *exit = (_PyExitData *)exit_p;
_Py_CODEUNIT *target = frame->instr_ptr;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 3) {
_PyFrame_SetStackPointer(frame, stack_pointer);
printf("DYNAMIC EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code]);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
#endif
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE(frame->instr_ptr);
}
case _DYNAMIC_EXIT_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
PyObject *exit_p = (PyObject *)CURRENT_OPERAND0_64();
#if defined(Py_DEBUG) && !defined(_Py_JIT)
_PyExitData *exit = (_PyExitData *)exit_p;
_Py_CODEUNIT *target = frame->instr_ptr;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 3) {
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
printf("DYNAMIC EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code]);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
}
#endif
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
GOTO_TIER_ONE(frame->instr_ptr);
}
case _DYNAMIC_EXIT_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
PyObject *exit_p = (PyObject *)CURRENT_OPERAND0_64();
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#if defined(Py_DEBUG) && !defined(_Py_JIT)
_PyExitData *exit = (_PyExitData *)exit_p;
_Py_CODEUNIT *target = frame->instr_ptr;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 3) {
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
_PyFrame_SetStackPointer(frame, stack_pointer);
printf("DYNAMIC EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code]);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -2;
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
}
#endif
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
GOTO_TIER_ONE(frame->instr_ptr);
}
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
case _DYNAMIC_EXIT_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
PyObject *exit_p = (PyObject *)CURRENT_OPERAND0_64();
#if defined(Py_DEBUG) && !defined(_Py_JIT)
_PyExitData *exit = (_PyExitData *)exit_p;
_Py_CODEUNIT *target = frame->instr_ptr;
OPT_HIST(trace_uop_execution_counter, trace_run_length_hist);
if (frame->lltrace >= 3) {
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = _stack_item_2;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
printf("DYNAMIC EXIT: [UOp ");
_PyUOpPrint(&next_uop[-1]);
printf(", exit %tu, temp %d, target %d -> %s]\n",
exit - current_executor->exits, exit->temperature.value_and_backoff,
(int)(target - _PyFrame_GetBytecode(frame)),
_PyOpcode_OpName[target->op.code]);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -3;
}
#endif
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = _stack_item_2;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
GOTO_TIER_ONE(frame->instr_ptr);
}
case _CHECK_VALIDITY_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
if (!current_executor->vm_data.valid) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_VALIDITY_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
if (!current_executor->vm_data.valid) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _CHECK_VALIDITY_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
if (!current_executor->vm_data.valid) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
break;
}
case _CHECK_VALIDITY_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
if (!current_executor->vm_data.valid) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_INLINE_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
value = PyStackRef_FromPyObjectNew(ptr);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_INLINE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
value = PyStackRef_FromPyObjectNew(ptr);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_INLINE_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
value = PyStackRef_FromPyObjectNew(ptr);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_LOAD_CONST_INLINE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef pop;
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
pop = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop);
stack_pointer = _PyFrame_GetStackPointer(frame);
value = PyStackRef_FromPyObjectNew(ptr);
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_INLINE_BORROW_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_INLINE_BORROW_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_INLINE_BORROW_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache2 = value;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_CALL_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null = _stack_item_1;
callable = _stack_item_0;
(void)null;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_CALL_ONE_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef pop;
_PyStackRef null;
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
pop = _stack_item_2;
null = _stack_item_1;
callable = _stack_item_0;
stack_pointer[0] = callable;
stack_pointer[1] = null;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop);
stack_pointer = _PyFrame_GetStackPointer(frame);
(void)null;
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_CALL_TWO_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef pop2;
_PyStackRef pop1;
_PyStackRef null;
_PyStackRef callable;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
pop2 = _stack_item_2;
pop1 = _stack_item_1;
null = _stack_item_0;
callable = stack_pointer[-1];
stack_pointer[0] = null;
stack_pointer[1] = pop1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop2);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop1);
stack_pointer = _PyFrame_GetStackPointer(frame);
(void)null;
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable);
stack_pointer = _PyFrame_GetStackPointer(frame);
_tos_cache0 = PyStackRef_ZERO_BITS;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TOP_LOAD_CONST_INLINE_BORROW_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef pop;
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
pop = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop);
stack_pointer = _PyFrame_GetStackPointer(frame);
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_TWO_LOAD_CONST_INLINE_BORROW_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef pop2;
_PyStackRef pop1;
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
pop2 = _stack_item_1;
pop1 = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
stack_pointer[0] = pop1;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop2);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop1);
stack_pointer = _PyFrame_GetStackPointer(frame);
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_CALL_LOAD_CONST_INLINE_BORROW_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef null;
_PyStackRef callable;
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
null = _stack_item_1;
callable = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
(void)null;
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable);
stack_pointer = _PyFrame_GetStackPointer(frame);
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_CALL_ONE_LOAD_CONST_INLINE_BORROW_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef pop;
_PyStackRef null;
_PyStackRef callable;
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
pop = _stack_item_2;
null = _stack_item_1;
callable = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
stack_pointer[0] = callable;
stack_pointer[1] = null;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop);
stack_pointer = _PyFrame_GetStackPointer(frame);
(void)null;
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable);
stack_pointer = _PyFrame_GetStackPointer(frame);
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SHUFFLE_2_LOAD_CONST_INLINE_BORROW_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
arg = stack_pointer[-1];
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
res = PyStackRef_FromPyObjectBorrow(ptr);
a = arg;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SHUFFLE_2_LOAD_CONST_INLINE_BORROW_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
_PyStackRef _stack_item_0 = _tos_cache0;
arg = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
res = PyStackRef_FromPyObjectBorrow(ptr);
a = arg;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SHUFFLE_2_LOAD_CONST_INLINE_BORROW_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
arg = _stack_item_1;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
res = PyStackRef_FromPyObjectBorrow(ptr);
a = arg;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SHUFFLE_2_LOAD_CONST_INLINE_BORROW_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef res;
_PyStackRef a;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
arg = _stack_item_2;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
res = PyStackRef_FromPyObjectBorrow(ptr);
a = arg;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SHUFFLE_3_LOAD_CONST_INLINE_BORROW_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef callable;
_PyStackRef res;
_PyStackRef a;
_PyStackRef c;
arg = stack_pointer[-1];
callable = stack_pointer[-3];
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
res = PyStackRef_FromPyObjectBorrow(ptr);
a = arg;
c = callable;
_tos_cache2 = c;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SHUFFLE_3_LOAD_CONST_INLINE_BORROW_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef callable;
_PyStackRef res;
_PyStackRef a;
_PyStackRef c;
_PyStackRef _stack_item_0 = _tos_cache0;
arg = _stack_item_0;
callable = stack_pointer[-2];
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
res = PyStackRef_FromPyObjectBorrow(ptr);
a = arg;
c = callable;
_tos_cache2 = c;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SHUFFLE_3_LOAD_CONST_INLINE_BORROW_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef callable;
_PyStackRef res;
_PyStackRef a;
_PyStackRef c;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
arg = _stack_item_1;
callable = stack_pointer[-1];
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
res = PyStackRef_FromPyObjectBorrow(ptr);
a = arg;
c = callable;
_tos_cache2 = c;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SHUFFLE_3_LOAD_CONST_INLINE_BORROW_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef arg;
_PyStackRef callable;
_PyStackRef res;
_PyStackRef a;
_PyStackRef c;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
arg = _stack_item_2;
callable = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
res = PyStackRef_FromPyObjectBorrow(ptr);
a = arg;
c = callable;
_tos_cache2 = c;
_tos_cache1 = a;
_tos_cache0 = res;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _POP_CALL_TWO_LOAD_CONST_INLINE_BORROW_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef pop2;
_PyStackRef pop1;
_PyStackRef null;
_PyStackRef callable;
_PyStackRef value;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
pop2 = _stack_item_2;
pop1 = _stack_item_1;
null = _stack_item_0;
callable = stack_pointer[-1];
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
stack_pointer[0] = null;
stack_pointer[1] = pop1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop2);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(pop1);
stack_pointer = _PyFrame_GetStackPointer(frame);
(void)null;
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(callable);
stack_pointer = _PyFrame_GetStackPointer(frame);
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache0 = value;
_tos_cache1 = PyStackRef_ZERO_BITS;
_tos_cache2 = PyStackRef_ZERO_BITS;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_UNDER_INLINE_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef old;
_PyStackRef value;
_PyStackRef new;
old = stack_pointer[-1];
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
new = old;
value = PyStackRef_FromPyObjectNew(ptr);
_tos_cache1 = new;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_UNDER_INLINE_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef old;
_PyStackRef value;
_PyStackRef new;
_PyStackRef _stack_item_0 = _tos_cache0;
old = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
new = old;
value = PyStackRef_FromPyObjectNew(ptr);
_tos_cache1 = new;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_UNDER_INLINE_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef old;
_PyStackRef value;
_PyStackRef new;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
old = _stack_item_1;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
new = old;
value = PyStackRef_FromPyObjectNew(ptr);
_tos_cache2 = new;
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_UNDER_INLINE_BORROW_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef old;
_PyStackRef value;
_PyStackRef new;
old = stack_pointer[-1];
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
new = old;
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache1 = new;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_UNDER_INLINE_BORROW_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef old;
_PyStackRef value;
_PyStackRef new;
_PyStackRef _stack_item_0 = _tos_cache0;
old = _stack_item_0;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
new = old;
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache1 = new;
_tos_cache0 = value;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _LOAD_CONST_UNDER_INLINE_BORROW_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef old;
_PyStackRef value;
_PyStackRef new;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
old = _stack_item_1;
PyObject *ptr = (PyObject *)CURRENT_OPERAND0_64();
new = old;
value = PyStackRef_FromPyObjectBorrow(ptr);
_tos_cache2 = new;
_tos_cache1 = value;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _START_EXECUTOR_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
PyObject *executor = (PyObject *)CURRENT_OPERAND0_64();
#ifndef _Py_JIT
assert(current_executor == (_PyExecutorObject*)executor);
#endif
assert(tstate->jit_exit == NULL || tstate->jit_exit->executor == current_executor);
tstate->current_executor = (PyObject *)current_executor;
if (!current_executor->vm_data.valid) {
assert(tstate->jit_exit->executor == current_executor);
assert(tstate->current_executor == executor);
_PyFrame_SetStackPointer(frame, stack_pointer);
_PyExecutor_ClearExit(tstate->jit_exit);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MAKE_WARM_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
current_executor->vm_data.warm = true;
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MAKE_WARM_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
current_executor->vm_data.warm = true;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MAKE_WARM_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
current_executor->vm_data.warm = true;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _MAKE_WARM_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
current_executor->vm_data.warm = true;
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FATAL_ERROR_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
assert(0);
Py_FatalError("Fatal error uop executed.");
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FATAL_ERROR_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
assert(0);
Py_FatalError("Fatal error uop executed.");
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FATAL_ERROR_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
assert(0);
Py_FatalError("Fatal error uop executed.");
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _FATAL_ERROR_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
assert(0);
Py_FatalError("Fatal error uop executed.");
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _DEOPT_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
case _DEOPT_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
case _DEOPT_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
case _DEOPT_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = _stack_item_2;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE((frame->owner == FRAME_OWNED_BY_INTERPRETER)
? _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS_PTR : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
case _HANDLE_PENDING_AND_DEOPT_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _Py_HandlePending(tstate);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE(err ? NULL : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
case _HANDLE_PENDING_AND_DEOPT_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _Py_HandlePending(tstate);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE(err ? NULL : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
case _HANDLE_PENDING_AND_DEOPT_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _Py_HandlePending(tstate);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE(err ? NULL : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
case _HANDLE_PENDING_AND_DEOPT_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = _stack_item_2;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _Py_HandlePending(tstate);
stack_pointer = _PyFrame_GetStackPointer(frame);
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE(err ? NULL : _PyFrame_GetBytecode(frame) + CURRENT_TARGET());
}
case _ERROR_POP_N_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
oparg = CURRENT_OPARG();
uint32_t target = (uint32_t)CURRENT_OPERAND0_32();
assert(oparg == 0);
frame->instr_ptr = _PyFrame_GetBytecode(frame) + target;
SET_CURRENT_CACHED_VALUES(0);
GOTO_TIER_ONE(NULL);
}
case _SPILL_OR_RELOAD_r01: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_tos_cache0 = stack_pointer[-1];
SET_CURRENT_CACHED_VALUES(1);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r02: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = stack_pointer[-2];
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_tos_cache2 = stack_pointer[-1];
_tos_cache1 = stack_pointer[-2];
_tos_cache0 = stack_pointer[-3];
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r10: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r12: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_tos_cache1 = _stack_item_0;
_tos_cache0 = stack_pointer[-1];
SET_CURRENT_CACHED_VALUES(2);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_tos_cache2 = _stack_item_0;
_tos_cache1 = stack_pointer[-1];
_tos_cache0 = stack_pointer[-2];
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r20: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r21: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_tos_cache0 = _stack_item_1;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_tos_cache2 = _stack_item_1;
_tos_cache1 = _stack_item_0;
_tos_cache0 = stack_pointer[-1];
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r30: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
SET_CURRENT_CACHED_VALUES(0);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer[2] = _stack_item_2;
stack_pointer += 3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r31: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
_tos_cache0 = _stack_item_2;
SET_CURRENT_CACHED_VALUES(1);
stack_pointer[0] = _stack_item_0;
stack_pointer[1] = _stack_item_1;
stack_pointer += 2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _SPILL_OR_RELOAD_r32: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
_tos_cache1 = _stack_item_2;
_tos_cache0 = _stack_item_1;
SET_CURRENT_CACHED_VALUES(2);
stack_pointer[0] = _stack_item_0;
stack_pointer += 1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TIER2_RESUME_CHECK_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
#if defined(__EMSCRIPTEN__)
if (_Py_emscripten_signal_clock == 0) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
_Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING;
#endif
uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
if (eval_breaker & _PY_EVAL_EVENTS_MASK) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
assert(tstate->tracing || eval_breaker == FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(_PyFrame_GetCode(frame)->_co_instrumentation_version));
SET_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TIER2_RESUME_CHECK_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
#if defined(__EMSCRIPTEN__)
if (_Py_emscripten_signal_clock == 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
_Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING;
#endif
uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
if (eval_breaker & _PY_EVAL_EVENTS_MASK) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
assert(tstate->tracing || eval_breaker == FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(_PyFrame_GetCode(frame)->_co_instrumentation_version));
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TIER2_RESUME_CHECK_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
#if defined(__EMSCRIPTEN__)
if (_Py_emscripten_signal_clock == 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
_Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING;
#endif
uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
if (eval_breaker & _PY_EVAL_EVENTS_MASK) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
assert(tstate->tracing || eval_breaker == FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(_PyFrame_GetCode(frame)->_co_instrumentation_version));
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _TIER2_RESUME_CHECK_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
#if defined(__EMSCRIPTEN__)
if (_Py_emscripten_signal_clock == 0) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
_Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING;
#endif
uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
if (eval_breaker & _PY_EVAL_EVENTS_MASK) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
JUMP_TO_JUMP_TARGET();
}
assert(tstate->tracing || eval_breaker == FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(_PyFrame_GetCode(frame)->_co_instrumentation_version));
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _COLD_EXIT_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyExitData *exit = tstate->jit_exit;
assert(exit != NULL);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
assert(frame->owner < FRAME_OWNED_BY_INTERPRETER);
_Py_CODEUNIT *target = _PyFrame_GetBytecode(frame) + exit->target;
_Py_BackoffCounter temperature = exit->temperature;
_PyExecutorObject *executor;
if (target->op.code == ENTER_EXECUTOR) {
PyCodeObject *code = _PyFrame_GetCode(frame);
executor = code->co_executors->executors[target->op.arg];
Py_INCREF(executor);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
assert(tstate->jit_exit == exit);
exit->executor = executor;
SET_CURRENT_CACHED_VALUES(0);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
TIER2_TO_TIER2(exit->executor);
}
else {
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
if (!backoff_counter_triggers(temperature)) {
exit->temperature = advance_backoff_counter(temperature);
SET_CURRENT_CACHED_VALUES(0);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
GOTO_TIER_ONE(target);
}
_PyExecutorObject *previous_executor = _PyExecutor_FromExit(exit);
assert(tstate->current_executor == (PyObject *)previous_executor);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
int chain_depth = previous_executor->vm_data.chain_depth + !exit->is_control_flow;
int succ = _PyJit_TryInitializeTracing(tstate, frame, target, target, target, STACK_LEVEL(), chain_depth, exit, target->op.arg);
exit->temperature = restart_backoff_counter(exit->temperature);
if (succ) {
GOTO_TIER_ONE_CONTINUE_TRACING(target);
}
SET_CURRENT_CACHED_VALUES(0);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
GOTO_TIER_ONE(target);
}
}
case _COLD_DYNAMIC_EXIT_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
_Py_CODEUNIT *target = frame->instr_ptr;
SET_CURRENT_CACHED_VALUES(0);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
GOTO_TIER_ONE(target);
}
case _GUARD_IP__PUSH_FRAME_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
#define OFFSET_OF__PUSH_FRAME ((0))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF__PUSH_FRAME;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF__PUSH_FRAME;
if (true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
SET_CURRENT_CACHED_VALUES(0);
#undef OFFSET_OF__PUSH_FRAME
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP__PUSH_FRAME_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
#define OFFSET_OF__PUSH_FRAME ((0))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF__PUSH_FRAME;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF__PUSH_FRAME;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
#undef OFFSET_OF__PUSH_FRAME
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP__PUSH_FRAME_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
#define OFFSET_OF__PUSH_FRAME ((0))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF__PUSH_FRAME;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF__PUSH_FRAME;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
#undef OFFSET_OF__PUSH_FRAME
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
break;
}
case _GUARD_IP__PUSH_FRAME_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#define OFFSET_OF__PUSH_FRAME ((0))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF__PUSH_FRAME;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF__PUSH_FRAME;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#undef OFFSET_OF__PUSH_FRAME
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_YIELD_VALUE_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
#define OFFSET_OF_YIELD_VALUE ((1+INLINE_CACHE_ENTRIES_SEND))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_YIELD_VALUE;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_YIELD_VALUE;
if (true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
SET_CURRENT_CACHED_VALUES(0);
#undef OFFSET_OF_YIELD_VALUE
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_YIELD_VALUE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
#define OFFSET_OF_YIELD_VALUE ((1+INLINE_CACHE_ENTRIES_SEND))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_YIELD_VALUE;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_YIELD_VALUE;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
#undef OFFSET_OF_YIELD_VALUE
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_YIELD_VALUE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
#define OFFSET_OF_YIELD_VALUE ((1+INLINE_CACHE_ENTRIES_SEND))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_YIELD_VALUE;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_YIELD_VALUE;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
#undef OFFSET_OF_YIELD_VALUE
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
break;
}
case _GUARD_IP_YIELD_VALUE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#define OFFSET_OF_YIELD_VALUE ((1+INLINE_CACHE_ENTRIES_SEND))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_YIELD_VALUE;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_YIELD_VALUE;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#undef OFFSET_OF_YIELD_VALUE
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_RETURN_VALUE_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
#define OFFSET_OF_RETURN_VALUE ((frame->return_offset))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_RETURN_VALUE;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_RETURN_VALUE;
if (true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
SET_CURRENT_CACHED_VALUES(0);
#undef OFFSET_OF_RETURN_VALUE
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_RETURN_VALUE_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
#define OFFSET_OF_RETURN_VALUE ((frame->return_offset))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_RETURN_VALUE;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_RETURN_VALUE;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
#undef OFFSET_OF_RETURN_VALUE
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_RETURN_VALUE_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
#define OFFSET_OF_RETURN_VALUE ((frame->return_offset))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_RETURN_VALUE;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_RETURN_VALUE;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
#undef OFFSET_OF_RETURN_VALUE
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
break;
}
case _GUARD_IP_RETURN_VALUE_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#define OFFSET_OF_RETURN_VALUE ((frame->return_offset))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_RETURN_VALUE;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_RETURN_VALUE;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#undef OFFSET_OF_RETURN_VALUE
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_RETURN_GENERATOR_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
#define OFFSET_OF_RETURN_GENERATOR ((frame->return_offset))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_RETURN_GENERATOR;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_RETURN_GENERATOR;
if (true) {
UOP_STAT_INC(uopcode, miss);
SET_CURRENT_CACHED_VALUES(0);
JUMP_TO_JUMP_TARGET();
}
}
SET_CURRENT_CACHED_VALUES(0);
#undef OFFSET_OF_RETURN_GENERATOR
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_RETURN_GENERATOR_r11: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
#define OFFSET_OF_RETURN_GENERATOR ((frame->return_offset))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_RETURN_GENERATOR;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_RETURN_GENERATOR;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(1);
#undef OFFSET_OF_RETURN_GENERATOR
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _GUARD_IP_RETURN_GENERATOR_r22: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
#define OFFSET_OF_RETURN_GENERATOR ((frame->return_offset))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_RETURN_GENERATOR;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_RETURN_GENERATOR;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(2);
#undef OFFSET_OF_RETURN_GENERATOR
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
break;
}
case _GUARD_IP_RETURN_GENERATOR_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#define OFFSET_OF_RETURN_GENERATOR ((frame->return_offset))
PyObject *ip = (PyObject *)CURRENT_OPERAND0_64();
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
_Py_CODEUNIT *target = frame->instr_ptr + OFFSET_OF_RETURN_GENERATOR;
if (target != (_Py_CODEUNIT *)ip) {
frame->instr_ptr += OFFSET_OF_RETURN_GENERATOR;
if (true) {
UOP_STAT_INC(uopcode, miss);
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
JUMP_TO_JUMP_TARGET();
}
}
_tos_cache2 = _stack_item_2;
_tos_cache1 = _stack_item_1;
_tos_cache0 = _stack_item_0;
SET_CURRENT_CACHED_VALUES(3);
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#undef OFFSET_OF_RETURN_GENERATOR
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
break;
}
/* _TRACE_RECORD is not a viable micro-op for tier 2 because it uses the 'this_instr' variable */
gh-139109: A new tracing JIT compiler frontend for CPython (GH-140310) This PR changes the current JIT model from trace projection to trace recording. Benchmarking: better pyperformance (about 1.7% overall) geomean versus current https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251108-3.15.0a1%2B-7e2bc1d-JIT/bm-20251108-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-7e2bc1d-vs-base.svg, 100% faster Richards on the most improved benchmark versus the current JIT. Slowdown of about 10-15% on the worst benchmark versus the current JIT. **Note: the fastest version isn't the one merged, as it relies on fixing bugs in the specializing interpreter, which is left to another PR**. The speedup in the merged version is about 1.1%. https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20251112-3.15.0a1%2B-f8a764a-JIT/bm-20251112-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-f8a764a-vs-base.svg Stats: 50% more uops executed, 30% more traces entered the last time we ran them. It also suggests our trace lengths for a real trace recording JIT are too short, as a lot of trace too long aborts https://github.com/facebookexperimental/free-threading-benchmarking/blob/main/results/bm-20251023-3.15.0a1%2B-eb73378-CLANG%2CJIT/bm-20251023-vultr-x86_64-Fidget%252dSpinner-tracing_jit-3.15.0a1%2B-eb73378-pystats-vs-base.md . This new JIT frontend is already able to record/execute significantly more instructions than the previous JIT frontend. In this PR, we are now able to record through custom dunders, simple object creation, generators, etc. None of these were done by the old JIT frontend. Some custom dunders uops were discovered to be broken as part of this work gh-140277 The optimizer stack space check is disabled, as it's no longer valid to deal with underflow. Pros: * Ignoring the generated tracer code as it's automatically created, this is only additional 1k lines of code. The maintenance burden is handled by the DSL and code generator. * `optimizer.c` is now significantly simpler, as we don't have to do strange things to recover the bytecode from a trace. * The new JIT frontend is able to handle a lot more control-flow than the old one. * Tracing is very low overhead. We use the tail calling interpreter/computed goto interpreter to switch between tracing mode and non-tracing mode. I call this mechanism dual dispatch, as we have two dispatch tables dispatching to each other. Specialization is still enabled while tracing. * Better handling of polymorphism. We leverage the specializing interpreter for this. Cons: * (For now) requires tail calling interpreter or computed gotos. This means no Windows JIT for now :(. Not to fret, tail calling is coming soon to Windows though https://github.com/python/cpython/pull/139962 Design: * After each instruction, the `record_previous_inst` function/label is executed. This does as the name suggests. * The tracing interpreter lowers bytecode to uops directly so that it can obtain "fresh" values at the point of lowering. * The tracing version behaves nearly identical to the normal interpreter, in fact it even has specialization! This allows it to run without much of a slowdown when tracing. The actual cost of tracing is only a function call and writes to memory. * The tracing interpreter uses the specializing interpreter's deopt to naturally form the side exit chains. This allows it to side exit chain effectively, without repeating much code. We force a re-specializing when tracing a deopt. * The tracing interpreter can even handle goto errors/exceptions, but I chose to disable them for now as it's not tested. * Because we do not share interpreter dispatch, there is should be no significant slowdown to the original specializing interpreter on tailcall and computed got with JIT disabled. With JIT enabled, there might be a slowdown in the form of the JIT trying to trace. * Things that could have dynamic instruction pointer effects are guarded on. The guard deopts to a new instruction --- `_DYNAMIC_EXIT`.
2025-11-14 02:08:32 +08:00
#undef TIER_TWO