gh-144289: Remove ENABLE_SPECIALIZATION_FT (gh-144290)

Now that the specializing interpreter works with free threading,
replace ENABLE_SPECIALIZATION_FT with ENABLE_SPECIALIZATION and
replace requires_specialization_ft with requires_specialization.

Also limit the uniquely referenced check to FOR_ITER_RANGE. It's not
necessary for FOR_ITER_GEN and would cause test_for_iter_gen to fail.
This commit is contained in:
Sam Gross 2026-01-27 17:52:50 -05:00 committed by GitHub
parent 6b4538192d
commit 6ea3f8cd7f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 167 additions and 189 deletions

View file

@ -44,7 +44,7 @@ do { \
void
_PyCode_Quicken(_Py_CODEUNIT *instructions, Py_ssize_t size, int enable_counters)
{
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
_Py_BackoffCounter jump_counter, adaptive_counter;
if (enable_counters) {
PyThreadState *tstate = _PyThreadState_GET();
@ -85,7 +85,7 @@ _PyCode_Quicken(_Py_CODEUNIT *instructions, Py_ssize_t size, int enable_counters
oparg = 0;
}
}
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
#define SIMPLE_FUNCTION 0
@ -431,7 +431,7 @@ _Py_Specialize_LoadSuperAttr(_PyStackRef global_super_st, _PyStackRef cls_st, _P
PyObject *global_super = PyStackRef_AsPyObjectBorrow(global_super_st);
PyObject *cls = PyStackRef_AsPyObjectBorrow(cls_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[LOAD_SUPER_ATTR] == INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR);
if (global_super != (PyObject *)&PySuper_Type) {
SPECIALIZATION_FAIL(LOAD_SUPER_ATTR, SPEC_FAIL_SUPER_SHADOWED);
@ -952,7 +952,7 @@ _Py_Specialize_LoadAttr(_PyStackRef owner_st, _Py_CODEUNIT *instr, PyObject *nam
{
PyObject *owner = PyStackRef_AsPyObjectBorrow(owner_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[LOAD_ATTR] == INLINE_CACHE_ENTRIES_LOAD_ATTR);
PyTypeObject *type = Py_TYPE(owner);
bool fail;
@ -983,7 +983,7 @@ _Py_Specialize_StoreAttr(_PyStackRef owner_st, _Py_CODEUNIT *instr, PyObject *na
{
PyObject *owner = PyStackRef_AsPyObjectBorrow(owner_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[STORE_ATTR] == INLINE_CACHE_ENTRIES_STORE_ATTR);
PyObject *descr = NULL;
_PyAttrCache *cache = (_PyAttrCache *)(instr + 1);
@ -1293,7 +1293,7 @@ specialize_load_global_lock_held(
PyObject *globals, PyObject *builtins,
_Py_CODEUNIT *instr, PyObject *name)
{
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[LOAD_GLOBAL] == INLINE_CACHE_ENTRIES_LOAD_GLOBAL);
/* Use inline cache */
_PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)(instr + 1);
@ -1514,7 +1514,7 @@ _Py_Specialize_StoreSubscr(_PyStackRef container_st, _PyStackRef sub_st, _Py_COD
PyObject *container = PyStackRef_AsPyObjectBorrow(container_st);
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
PyTypeObject *container_type = Py_TYPE(container);
if (container_type == &PyList_Type) {
if (PyLong_CheckExact(sub)) {
@ -1802,7 +1802,7 @@ _Py_Specialize_Call(_PyStackRef callable_st, _PyStackRef self_or_null_st, _Py_CO
{
PyObject *callable = PyStackRef_AsPyObjectBorrow(callable_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CALL] == INLINE_CACHE_ENTRIES_CALL);
assert(_Py_OPCODE(*instr) != INSTRUMENTED_CALL);
int fail;
@ -1844,7 +1844,7 @@ _Py_Specialize_CallKw(_PyStackRef callable_st, _Py_CODEUNIT *instr, int nargs)
{
PyObject *callable = PyStackRef_AsPyObjectBorrow(callable_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CALL_KW] == INLINE_CACHE_ENTRIES_CALL_KW);
assert(_Py_OPCODE(*instr) != INSTRUMENTED_CALL_KW);
int fail;
@ -2200,7 +2200,7 @@ _Py_Specialize_BinaryOp(_PyStackRef lhs_st, _PyStackRef rhs_st, _Py_CODEUNIT *in
{
PyObject *lhs = PyStackRef_AsPyObjectBorrow(lhs_st);
PyObject *rhs = PyStackRef_AsPyObjectBorrow(rhs_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[BINARY_OP] == INLINE_CACHE_ENTRIES_BINARY_OP);
_PyBinaryOpCache *cache = (_PyBinaryOpCache *)(instr + 1);
@ -2369,7 +2369,7 @@ _Py_Specialize_CompareOp(_PyStackRef lhs_st, _PyStackRef rhs_st, _Py_CODEUNIT *i
PyObject *rhs = PyStackRef_AsPyObjectBorrow(rhs_st);
uint8_t specialized_op;
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[COMPARE_OP] == INLINE_CACHE_ENTRIES_COMPARE_OP);
// All of these specializations compute boolean values, so they're all valid
// regardless of the fifth-lowest oparg bit.
@ -2429,7 +2429,7 @@ _Py_Specialize_UnpackSequence(_PyStackRef seq_st, _Py_CODEUNIT *instr, int oparg
{
PyObject *seq = PyStackRef_AsPyObjectBorrow(seq_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[UNPACK_SEQUENCE] ==
INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE);
if (PyTuple_CheckExact(seq)) {
@ -2534,29 +2534,27 @@ int
Py_NO_INLINE void
_Py_Specialize_ForIter(_PyStackRef iter, _PyStackRef null_or_index, _Py_CODEUNIT *instr, int oparg)
{
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[FOR_ITER] == INLINE_CACHE_ENTRIES_FOR_ITER);
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
PyTypeObject *tp = Py_TYPE(iter_o);
if (PyStackRef_IsNull(null_or_index)) {
#ifdef Py_GIL_DISABLED
// Only specialize for uniquely referenced iterators, so that we know
// they're only referenced by this one thread. This is more limiting
// than we need (even `it = iter(mylist); for item in it:` won't get
// specialized) but we don't have a way to check whether we're the only
// _thread_ who has access to the object.
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
goto failure;
}
#endif
if (tp == &PyRangeIter_Type) {
#ifdef Py_GIL_DISABLED
// Only specialize for uniquely referenced iterators, so that we know
// they're only referenced by this one thread. This is more limiting
// than we need (even `it = iter(mylist); for item in it:` won't get
// specialized) but we don't have a way to check whether we're the only
// _thread_ who has access to the object.
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
goto failure;
}
#endif
specialize(instr, FOR_ITER_RANGE);
return;
}
else if (tp == &PyGen_Type && oparg <= SHRT_MAX) {
// Generators are very much not thread-safe, so don't worry about
// the specialization not being thread-safe.
assert(instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == END_FOR ||
instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == INSTRUMENTED_END_FOR
);
@ -2595,7 +2593,7 @@ _Py_Specialize_Send(_PyStackRef receiver_st, _Py_CODEUNIT *instr)
{
PyObject *receiver = PyStackRef_AsPyObjectBorrow(receiver_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[SEND] == INLINE_CACHE_ENTRIES_SEND);
PyTypeObject *tp = Py_TYPE(receiver);
if (tp == &PyGen_Type || tp == &PyCoro_Type) {
@ -2618,7 +2616,7 @@ _Py_Specialize_CallFunctionEx(_PyStackRef func_st, _Py_CODEUNIT *instr)
{
PyObject *func = PyStackRef_AsPyObjectBorrow(func_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CALL_FUNCTION_EX] == INLINE_CACHE_ENTRIES_CALL_FUNCTION_EX);
if (Py_TYPE(func) == &PyFunction_Type &&
@ -2685,7 +2683,7 @@ check_type_always_true(PyTypeObject *ty)
Py_NO_INLINE void
_Py_Specialize_ToBool(_PyStackRef value_o, _Py_CODEUNIT *instr)
{
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[TO_BOOL] == INLINE_CACHE_ENTRIES_TO_BOOL);
_PyToBoolCache *cache = (_PyToBoolCache *)(instr + 1);
PyObject *value = PyStackRef_AsPyObjectBorrow(value_o);
@ -2761,7 +2759,7 @@ _Py_Specialize_ContainsOp(_PyStackRef value_st, _Py_CODEUNIT *instr)
{
PyObject *value = PyStackRef_AsPyObjectBorrow(value_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CONTAINS_OP] == INLINE_CACHE_ENTRIES_COMPARE_OP);
if (PyDict_CheckExact(value)) {
specialize(instr, CONTAINS_OP_DICT);