mirror of
https://github.com/python/cpython.git
synced 2025-12-31 12:33:28 +00:00
gh-143092: Make CALL_LIST_APPEND and BINARY_OP_INPLACE_ADD_UNICODE normal instructions (GH-143124)
These super instructions need many special cases in the interpreter, specializer, and JIT. It's best we convert them to normal instructions.
This commit is contained in:
parent
594a4631c3
commit
cf6758ff9e
14 changed files with 167 additions and 205 deletions
|
|
@ -588,7 +588,7 @@ dummy_func(
|
|||
BINARY_OP_SUBSCR_STR_INT,
|
||||
BINARY_OP_SUBSCR_DICT,
|
||||
BINARY_OP_SUBSCR_GETITEM,
|
||||
// BINARY_OP_INPLACE_ADD_UNICODE, // See comments at that opcode.
|
||||
BINARY_OP_INPLACE_ADD_UNICODE,
|
||||
BINARY_OP_EXTEND,
|
||||
};
|
||||
|
||||
|
|
@ -762,13 +762,10 @@ dummy_func(
|
|||
macro(BINARY_OP_ADD_UNICODE) =
|
||||
_GUARD_TOS_UNICODE + _GUARD_NOS_UNICODE + unused/5 + _BINARY_OP_ADD_UNICODE + _POP_TOP_UNICODE + _POP_TOP_UNICODE;
|
||||
|
||||
// This is a subtle one. It's a super-instruction for
|
||||
// BINARY_OP_ADD_UNICODE followed by STORE_FAST
|
||||
// where the store goes into the left argument.
|
||||
// So the inputs are the same as for all BINARY_OP
|
||||
// specializations, but there is no output.
|
||||
// At the end we just skip over the STORE_FAST.
|
||||
op(_BINARY_OP_INPLACE_ADD_UNICODE, (left, right --)) {
|
||||
// This is a subtle one. We write NULL to the local
|
||||
// of the following STORE_FAST and leave the result for STORE_FAST
|
||||
// later to store.
|
||||
op(_BINARY_OP_INPLACE_ADD_UNICODE, (left, right -- res)) {
|
||||
PyObject *left_o = PyStackRef_AsPyObjectBorrow(left);
|
||||
assert(PyUnicode_CheckExact(left_o));
|
||||
assert(PyUnicode_CheckExact(PyStackRef_AsPyObjectBorrow(right)));
|
||||
|
|
@ -796,20 +793,16 @@ dummy_func(
|
|||
* that the string is safe to mutate.
|
||||
*/
|
||||
assert(Py_REFCNT(left_o) >= 2 || !PyStackRef_IsHeapSafe(left));
|
||||
PyObject *temp = PyStackRef_AsPyObjectSteal(*target_local);
|
||||
PyObject *right_o = PyStackRef_AsPyObjectBorrow(right);
|
||||
PyUnicode_Append(&temp, right_o);
|
||||
PyStackRef_CLOSE_SPECIALIZED(right, _PyUnicode_ExactDealloc);
|
||||
DEAD(right);
|
||||
PyStackRef_CLOSE_SPECIALIZED(left, _PyUnicode_ExactDealloc);
|
||||
DEAD(left);
|
||||
PyObject *temp = PyStackRef_AsPyObjectSteal(*target_local);
|
||||
PyObject *right_o = PyStackRef_AsPyObjectSteal(right);
|
||||
PyUnicode_Append(&temp, right_o);
|
||||
*target_local = PyStackRef_FromPyObjectSteal(temp);
|
||||
Py_DECREF(right_o);
|
||||
ERROR_IF(PyStackRef_IsNull(*target_local));
|
||||
#if TIER_ONE
|
||||
// The STORE_FAST is already done. This is done here in tier one,
|
||||
// and during trace projection in tier two:
|
||||
assert(next_instr->op.code == STORE_FAST);
|
||||
SKIP_OVER(1);
|
||||
#endif
|
||||
ERROR_IF(temp == NULL);
|
||||
res = PyStackRef_FromPyObjectSteal(temp);
|
||||
*target_local = PyStackRef_NULL;
|
||||
}
|
||||
|
||||
op(_GUARD_BINARY_OP_EXTEND, (descr/4, left, right -- left, right)) {
|
||||
|
|
@ -4330,8 +4323,7 @@ dummy_func(
|
|||
DEOPT_IF(callable_o != interp->callable_cache.list_append);
|
||||
}
|
||||
|
||||
// This is secretly a super-instruction
|
||||
op(_CALL_LIST_APPEND, (callable, self, arg -- c, s)) {
|
||||
op(_CALL_LIST_APPEND, (callable, self, arg -- none, c, s)) {
|
||||
assert(oparg == 1);
|
||||
PyObject *self_o = PyStackRef_AsPyObjectBorrow(self);
|
||||
|
||||
|
|
@ -4344,13 +4336,9 @@ dummy_func(
|
|||
}
|
||||
c = callable;
|
||||
s = self;
|
||||
INPUTS_DEAD();
|
||||
#if TIER_ONE
|
||||
// Skip the following POP_TOP. This is done here in tier one, and
|
||||
// during trace projection in tier two:
|
||||
assert(next_instr->op.code == POP_TOP);
|
||||
SKIP_OVER(1);
|
||||
#endif
|
||||
DEAD(callable);
|
||||
DEAD(self);
|
||||
none = PyStackRef_None;
|
||||
}
|
||||
|
||||
op(_CALL_METHOD_DESCRIPTOR_O, (callable, self_or_null, args[oparg] -- res)) {
|
||||
|
|
@ -5598,15 +5586,9 @@ dummy_func(
|
|||
// Super instructions. Instruction deopted. There's a mismatch in what the stack expects
|
||||
// in the optimizer. So we have to reflect in the trace correctly.
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
if ((_tstate->jit_tracer_state.prev_state.instr->op.code == CALL_LIST_APPEND &&
|
||||
opcode == POP_TOP) ||
|
||||
(_tstate->jit_tracer_state.prev_state.instr->op.code == BINARY_OP_INPLACE_ADD_UNICODE &&
|
||||
opcode == STORE_FAST)) {
|
||||
_tstate->jit_tracer_state.prev_state.instr_is_super = true;
|
||||
}
|
||||
else {
|
||||
_tstate->jit_tracer_state.prev_state.instr = next_instr;
|
||||
}
|
||||
// JIT should have disabled super instructions, as we can
|
||||
// do these optimizations ourselves in the JIT.
|
||||
_tstate->jit_tracer_state.prev_state.instr = next_instr;
|
||||
PyObject *prev_code = PyStackRef_AsPyObjectBorrow(frame->f_executable);
|
||||
if (_tstate->jit_tracer_state.prev_state.instr_code != (PyCodeObject *)prev_code) {
|
||||
Py_SETREF(_tstate->jit_tracer_state.prev_state.instr_code, (PyCodeObject*)Py_NewRef((prev_code)));
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue