gh-144289: Remove ENABLE_SPECIALIZATION_FT (gh-144290)

Now that the specializing interpreter works with free threading,
replace ENABLE_SPECIALIZATION_FT with ENABLE_SPECIALIZATION and
replace requires_specialization_ft with requires_specialization.

Also limit the uniquely referenced check to FOR_ITER_RANGE. It's not
necessary for FOR_ITER_GEN and would cause test_for_iter_gen to fail.
This commit is contained in:
Sam Gross 2026-01-27 17:52:50 -05:00 committed by GitHub
parent 6b4538192d
commit 6ea3f8cd7f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 167 additions and 189 deletions

View file

@ -292,17 +292,7 @@ extern int _PyCode_SafeAddr2Line(PyCodeObject *co, int addr);
extern void _PyCode_Clear_Executors(PyCodeObject *code);
#ifdef Py_GIL_DISABLED
// gh-115999 tracks progress on addressing this.
#define ENABLE_SPECIALIZATION 0
// Use this to enable specialization families once they are thread-safe. All
// uses will be replaced with ENABLE_SPECIALIZATION once all families are
// thread-safe.
#define ENABLE_SPECIALIZATION_FT 1
#else
#define ENABLE_SPECIALIZATION 1
#define ENABLE_SPECIALIZATION_FT ENABLE_SPECIALIZATION
#endif
/* Specialization functions, these are exported only for other re-generated
* interpreters to call */

View file

@ -1459,11 +1459,6 @@ def requires_specialization(test):
_opcode.ENABLE_SPECIALIZATION, "requires specialization")(test)
def requires_specialization_ft(test):
return unittest.skipUnless(
_opcode.ENABLE_SPECIALIZATION_FT, "requires specialization")(test)
def reset_code(f: types.FunctionType) -> types.FunctionType:
"""Clear all specializations, local instrumentation, and JIT code for the given function."""
f.__code__ = f.__code__.replace()

View file

@ -12,7 +12,7 @@
import unittest
import test.support
from test.support import import_helper, requires_specialization_ft, script_helper
from test.support import import_helper, requires_specialization, script_helper
_testcapi = import_helper.import_module("_testcapi")
_testinternalcapi = import_helper.import_module("_testinternalcapi")
@ -1047,7 +1047,7 @@ def func():
)
self.assertEqual(events[0], ("throw", IndexError))
@requires_specialization_ft
@requires_specialization
def test_no_unwind_for_shim_frame(self):
class ValueErrorRaiser:
def __init__(self):

View file

@ -5,7 +5,7 @@
import types
import unittest
from test.support import (threading_helper, check_impl_detail,
requires_specialization, requires_specialization_ft,
requires_specialization,
cpython_only, requires_jit_disabled, reset_code)
from test.support.import_helper import import_module
@ -524,7 +524,7 @@ def f(x, y):
f()
@requires_jit_disabled
@requires_specialization_ft
@requires_specialization
def test_assign_init_code(self):
class MyClass:
def __init__(self):
@ -547,7 +547,7 @@ def count_args(self, *args):
instantiate()
@requires_jit_disabled
@requires_specialization_ft
@requires_specialization
def test_push_init_frame_fails(self):
def instantiate():
return InitTakesArg()
@ -576,7 +576,7 @@ def f():
f()
@requires_jit_disabled
@requires_specialization_ft
@requires_specialization
def test_specialize_call_function_ex_py(self):
def ex_py(*args, **kwargs):
return 1
@ -592,7 +592,7 @@ def instantiate():
self.assert_specialized(instantiate, "CALL_EX_PY")
@requires_jit_disabled
@requires_specialization_ft
@requires_specialization
def test_specialize_call_function_ex_py_fail(self):
def ex_py(*args, **kwargs):
return 1
@ -660,7 +660,7 @@ def assert_races_do_not_crash(
for writer in writers:
writer.join()
@requires_specialization_ft
@requires_specialization
def test_binary_subscr_getitem(self):
def get_items():
class C:
@ -690,7 +690,7 @@ def write(items):
opname = "BINARY_OP_SUBSCR_GETITEM"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_binary_subscr_list_int(self):
def get_items():
items = []
@ -768,7 +768,7 @@ def write(items):
opname = "FOR_ITER_LIST"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_class(self):
def get_items():
class C:
@ -798,7 +798,7 @@ def write(items):
opname = "LOAD_ATTR_CLASS"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_class_with_metaclass_check(self):
def get_items():
class Meta(type):
@ -831,7 +831,7 @@ def write(items):
opname = "LOAD_ATTR_CLASS_WITH_METACLASS_CHECK"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_getattribute_overridden(self):
def get_items():
class C:
@ -861,7 +861,7 @@ def write(items):
opname = "LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_instance_value(self):
def get_items():
class C:
@ -885,7 +885,7 @@ def write(items):
opname = "LOAD_ATTR_INSTANCE_VALUE"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_method_lazy_dict(self):
def get_items():
class C(Exception):
@ -915,7 +915,7 @@ def write(items):
opname = "LOAD_ATTR_METHOD_LAZY_DICT"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_method_no_dict(self):
def get_items():
class C:
@ -946,7 +946,7 @@ def write(items):
opname = "LOAD_ATTR_METHOD_NO_DICT"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_method_with_values(self):
def get_items():
class C:
@ -976,7 +976,7 @@ def write(items):
opname = "LOAD_ATTR_METHOD_WITH_VALUES"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_module(self):
def get_items():
items = []
@ -1001,7 +1001,7 @@ def write(items):
opname = "LOAD_ATTR_MODULE"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_property(self):
def get_items():
class C:
@ -1031,7 +1031,7 @@ def write(items):
opname = "LOAD_ATTR_PROPERTY"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_slot(self):
def get_items():
class C:
@ -1058,7 +1058,7 @@ def write(items):
opname = "LOAD_ATTR_SLOT"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_attr_with_hint(self):
def get_items():
class C:
@ -1085,7 +1085,7 @@ def write(items):
opname = "LOAD_ATTR_WITH_HINT"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_load_global_module(self):
if not have_dict_key_versions():
raise unittest.SkipTest("Low on dict key versions")
@ -1158,7 +1158,7 @@ def write(items):
opname = "STORE_ATTR_WITH_HINT"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_store_subscr_list_int(self):
def get_items():
items = []
@ -1182,7 +1182,7 @@ def write(items):
opname = "STORE_SUBSCR_LIST_INT"
self.assert_races_do_not_crash(opname, get_items, read, write)
@requires_specialization_ft
@requires_specialization
def test_unpack_sequence_list(self):
def get_items():
items = []
@ -1362,7 +1362,7 @@ def f(o, n):
class TestSpecializer(TestBase):
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_binary_op(self):
def binary_op_add_int():
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
@ -1497,7 +1497,7 @@ def binary_op_bitwise_extend():
self.assert_no_opcode(binary_op_bitwise_extend, "BINARY_OP")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_load_super_attr(self):
"""Ensure that LOAD_SUPER_ATTR is specialized as expected."""
@ -1536,7 +1536,7 @@ def init(self):
self.assert_no_opcode(A.__init__, "LOAD_SUPER_ATTR_METHOD")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_contain_op(self):
def contains_op_dict():
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
@ -1559,7 +1559,7 @@ def contains_op_set():
self.assert_no_opcode(contains_op_set, "CONTAINS_OP")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_send_with(self):
def run_async(coro):
while True:
@ -1586,7 +1586,7 @@ async def send_with():
self.assert_specialized(send_with, "SEND_GEN")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_send_yield_from(self):
def g():
yield None
@ -1601,7 +1601,7 @@ def send_yield_from():
self.assert_no_opcode(send_yield_from, "SEND")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_store_attr_slot(self):
class C:
__slots__ = ['x']
@ -1622,7 +1622,7 @@ def set_slot(n):
self.assert_no_opcode(set_slot, "STORE_ATTR_SLOT")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_store_attr_instance_value(self):
class C:
pass
@ -1644,7 +1644,7 @@ def set_value(n):
self.assert_no_opcode(set_value, "STORE_ATTR_INSTANCE_VALUE")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_store_attr_with_hint(self):
class C:
pass
@ -1669,7 +1669,7 @@ def set_value(n):
self.assert_no_opcode(set_value, "STORE_ATTR_WITH_HINT")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_to_bool(self):
def to_bool_bool():
true_cnt, false_cnt = 0, 0
@ -1737,7 +1737,7 @@ def to_bool_str():
self.assert_no_opcode(to_bool_str, "TO_BOOL")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_unpack_sequence(self):
def unpack_sequence_two_tuple():
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
@ -1774,7 +1774,7 @@ def unpack_sequence_list():
self.assert_no_opcode(unpack_sequence_list, "UNPACK_SEQUENCE")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_binary_subscr(self):
def binary_subscr_list_int():
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
@ -1844,7 +1844,7 @@ def __getitem__(self, item):
self.assert_no_opcode(binary_subscr_getitems, "BINARY_OP")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_compare_op(self):
def compare_op_int():
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
@ -1878,7 +1878,7 @@ def compare_op_str():
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_for_iter(self):
L = list(range(10))
def for_iter_list():
@ -1916,7 +1916,7 @@ def for_iter_generator():
self.assert_no_opcode(for_iter_generator, "FOR_ITER")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_call_list_append(self):
# gh-141367: only exact lists should use
# CALL_LIST_APPEND instruction after specialization.
@ -1943,7 +1943,7 @@ class MyList(list): pass
self.assert_no_opcode(my_list_append, "CALL")
@cpython_only
@requires_specialization_ft
@requires_specialization
def test_load_attr_module_with_getattr(self):
module = types.ModuleType("test_module_with_getattr")
module.__dict__["some_attr"] = "foo"

View file

@ -3,7 +3,7 @@
import unittest
from test import support
from test.support import cpython_only, import_helper, requires_specialization_ft
from test.support import cpython_only, import_helper, requires_specialization
from test.support.script_helper import assert_python_ok
from test.support.threading_helper import requires_working_threading
@ -15,7 +15,7 @@
@requires_working_threading()
@unittest.skipUnless(support.Py_GIL_DISABLED, "only in free-threaded builds")
class TLBCTests(unittest.TestCase):
@requires_specialization_ft
@requires_specialization
def test_new_threads_start_with_unspecialized_code(self):
code = textwrap.dedent("""
import dis
@ -46,7 +46,7 @@ def f(a, b, q=None):
""")
assert_python_ok("-X", "tlbc=1", "-c", code)
@requires_specialization_ft
@requires_specialization
def test_threads_specialize_independently(self):
code = textwrap.dedent("""
import dis

View file

@ -3,7 +3,7 @@
import unittest
import warnings
from test import support
from test.support import import_helper, requires_specialization, requires_specialization_ft
from test.support import import_helper, requires_specialization
try:
from sys import _clear_type_cache
except ImportError:
@ -219,7 +219,7 @@ def store_bar_2(type_):
self._check_specialization(store_bar_2, B(), "STORE_ATTR", should_specialize=False)
@requires_specialization_ft
@requires_specialization
def test_class_call_specialization_user_type(self):
class F:
def __init__(self):

View file

@ -423,9 +423,6 @@ _opcode_exec(PyObject *m) {
if (PyModule_AddIntMacro(m, ENABLE_SPECIALIZATION) < 0) {
return -1;
}
if (PyModule_AddIntMacro(m, ENABLE_SPECIALIZATION_FT) < 0) {
return -1;
}
return 0;
}

View file

@ -41,7 +41,7 @@
lhs = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -51,7 +51,7 @@
}
OPCODE_DEFERRED_INC(BINARY_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
assert(NB_ADD <= oparg);
assert(oparg <= NB_OPARG_LAST);
}
@ -1742,7 +1742,7 @@
callable = stack_pointer[-2 - oparg];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -1752,7 +1752,7 @@
}
OPCODE_DEFERRED_INC(CALL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 2 cache entries */
// _MAYBE_EXPAND_METHOD
@ -2783,7 +2783,7 @@
func = stack_pointer[-4];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -2793,7 +2793,7 @@
}
OPCODE_DEFERRED_INC(CALL_FUNCTION_EX);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _MAKE_CALLARGS_A_TUPLE
{
@ -3111,7 +3111,7 @@
callable = stack_pointer[-3 - oparg];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -3121,7 +3121,7 @@
}
OPCODE_DEFERRED_INC(CALL_KW);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 2 cache entries */
// _MAYBE_EXPAND_METHOD_KW
@ -4771,7 +4771,7 @@
left = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -4781,7 +4781,7 @@
}
OPCODE_DEFERRED_INC(COMPARE_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _COMPARE_OP
{
@ -5061,7 +5061,7 @@
right = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -5071,7 +5071,7 @@
}
OPCODE_DEFERRED_INC(CONTAINS_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _CONTAINS_OP
{
@ -5815,7 +5815,7 @@
iter = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -5825,7 +5825,7 @@
}
OPCODE_DEFERRED_INC(FOR_ITER);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _FOR_ITER
{
@ -7880,7 +7880,7 @@
owner = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
next_instr = this_instr;
@ -7891,7 +7891,7 @@
}
OPCODE_DEFERRED_INC(LOAD_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 8 cache entries */
// _LOAD_ATTR
@ -9215,7 +9215,7 @@
{
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
next_instr = this_instr;
@ -9226,7 +9226,7 @@
}
OPCODE_DEFERRED_INC(LOAD_GLOBAL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 1 cache entry */
/* Skip 1 cache entry */
@ -9535,7 +9535,7 @@
global_super_st = stack_pointer[-3];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
int load_method = oparg & 1;
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
@ -9546,7 +9546,7 @@
}
OPCODE_DEFERRED_INC(LOAD_SUPER_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _LOAD_SUPER_ATTR
{
@ -10381,11 +10381,11 @@
}
// _QUICKEN_RESUME
{
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (tstate->tracing == 0 && this_instr->op.code == RESUME) {
FT_ATOMIC_STORE_UINT8_RELAXED(this_instr->op.code, RESUME_CHECK);
}
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _CHECK_PERIODIC_IF_NOT_YIELD_FROM
{
@ -10529,7 +10529,7 @@
receiver = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -10539,7 +10539,7 @@
}
OPCODE_DEFERRED_INC(SEND);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _SEND
{
@ -10825,7 +10825,7 @@
owner = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
next_instr = this_instr;
@ -10836,7 +10836,7 @@
}
OPCODE_DEFERRED_INC(STORE_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 3 cache entries */
// _STORE_ATTR
@ -11365,7 +11365,7 @@
container = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -11375,7 +11375,7 @@
}
OPCODE_DEFERRED_INC(STORE_SUBSCR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _STORE_SUBSCR
{
@ -11604,7 +11604,7 @@
value = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -11614,7 +11614,7 @@
}
OPCODE_DEFERRED_INC(TO_BOOL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 2 cache entries */
// _TO_BOOL
@ -12059,7 +12059,7 @@
seq = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -12069,7 +12069,7 @@
}
OPCODE_DEFERRED_INC(UNPACK_SEQUENCE);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
(void)seq;
(void)counter;
}

View file

@ -169,11 +169,11 @@ dummy_func(
}
op(_QUICKEN_RESUME, (--)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (tstate->tracing == 0 && this_instr->op.code == RESUME) {
FT_ATOMIC_STORE_UINT8_RELAXED(this_instr->op.code, RESUME_CHECK);
}
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
tier1 op(_MAYBE_INSTRUMENT, (--)) {
@ -463,7 +463,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_TO_BOOL, (counter/1, value -- value)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_ToBool(value, next_instr);
@ -471,7 +471,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(TO_BOOL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
op(_TO_BOOL, (value -- res)) {
@ -1131,7 +1131,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_STORE_SUBSCR, (counter/1, container, sub -- container, sub)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_StoreSubscr(container, sub, next_instr);
@ -1139,7 +1139,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(STORE_SUBSCR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
op(_STORE_SUBSCR, (v, container, sub -- )) {
@ -1351,7 +1351,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_SEND, (counter/1, receiver, unused -- receiver, unused)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_Send(receiver, next_instr);
@ -1359,7 +1359,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(SEND);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
op(_SEND, (receiver, v -- receiver, retval)) {
@ -1620,7 +1620,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_UNPACK_SEQUENCE, (counter/1, seq -- seq)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_UnpackSequence(seq, next_instr, oparg);
@ -1628,7 +1628,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(UNPACK_SEQUENCE);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
(void)seq;
(void)counter;
}
@ -1705,7 +1705,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_STORE_ATTR, (counter/1, owner -- owner)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
next_instr = this_instr;
@ -1714,7 +1714,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(STORE_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
op(_STORE_ATTR, (v, owner --)) {
@ -1823,7 +1823,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_LOAD_GLOBAL, (counter/1 -- )) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
next_instr = this_instr;
@ -1832,7 +1832,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(LOAD_GLOBAL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// res[1] because we need a pointer to res to pass it to _PyEval_LoadGlobalStackRef
@ -2213,7 +2213,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_LOAD_SUPER_ATTR, (counter/1, global_super_st, class_st, unused -- global_super_st, class_st, unused)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
int load_method = oparg & 1;
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
@ -2222,7 +2222,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(LOAD_SUPER_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
tier1 op(_LOAD_SUPER_ATTR, (global_super_st, class_st, self_st -- attr)) {
@ -2345,7 +2345,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_LOAD_ATTR, (counter/1, owner -- owner)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
next_instr = this_instr;
@ -2354,7 +2354,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(LOAD_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
op(_LOAD_ATTR, (owner -- attr, self_or_null[oparg&1])) {
@ -2746,7 +2746,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_COMPARE_OP, (counter/1, left, right -- left, right)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_CompareOp(left, right, next_instr, oparg);
@ -2754,7 +2754,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(COMPARE_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
op(_COMPARE_OP, (left, right -- res)) {
@ -2875,7 +2875,7 @@ dummy_func(
}
specializing op(_SPECIALIZE_CONTAINS_OP, (counter/1, left, right -- left, right)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_ContainsOp(right, next_instr);
@ -2883,7 +2883,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(CONTAINS_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
macro(CONTAINS_OP) = _SPECIALIZE_CONTAINS_OP + _CONTAINS_OP + POP_TOP + POP_TOP;
@ -3259,7 +3259,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_FOR_ITER, (counter/1, iter, null_or_index -- iter, null_or_index)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_ForIter(iter, null_or_index, next_instr, oparg);
@ -3267,7 +3267,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(FOR_ITER);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
replaced op(_FOR_ITER, (iter, null_or_index -- iter, null_or_index, next)) {
@ -3742,7 +3742,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_CALL, (counter/1, callable, self_or_null, unused[oparg] -- callable, self_or_null, unused[oparg])) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_Call(callable, self_or_null, next_instr, oparg + !PyStackRef_IsNull(self_or_null));
@ -3750,7 +3750,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(CALL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
op(_MAYBE_EXPAND_METHOD, (callable, self_or_null, unused[oparg] -- callable, self_or_null, unused[oparg])) {
@ -4758,7 +4758,7 @@ dummy_func(
_PUSH_FRAME;
specializing op(_SPECIALIZE_CALL_KW, (counter/1, callable, self_or_null, unused[oparg], unused -- callable, self_or_null, unused[oparg], unused)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_CallKw(callable, next_instr, oparg + !PyStackRef_IsNull(self_or_null));
@ -4766,7 +4766,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(CALL_KW);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
macro(CALL_KW) =
@ -4921,7 +4921,7 @@ dummy_func(
}
specializing op(_SPECIALIZE_CALL_FUNCTION_EX, (counter/1, func, unused, unused, unused -- func, unused, unused, unused)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_CallFunctionEx(func, next_instr);
@ -4929,7 +4929,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(CALL_FUNCTION_EX);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
macro(CALL_FUNCTION_EX) =
@ -5108,7 +5108,7 @@ dummy_func(
}
specializing op(_SPECIALIZE_BINARY_OP, (counter/1, lhs, rhs -- lhs, rhs)) {
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_BinaryOp(lhs, rhs, next_instr, oparg, LOCALS_ARRAY);
@ -5116,7 +5116,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(BINARY_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
assert(NB_ADD <= oparg);
assert(oparg <= NB_OPARG_LAST);
}

View file

@ -1321,8 +1321,6 @@ _PyTier2Interpreter(
#undef ENABLE_SPECIALIZATION
#define ENABLE_SPECIALIZATION 0
#undef ENABLE_SPECIALIZATION_FT
#define ENABLE_SPECIALIZATION_FT 0
uint16_t uopcode;
#ifdef Py_STATS

View file

@ -351,7 +351,7 @@ GETITEM(PyObject *v, Py_ssize_t i) {
(COUNTER) = pause_backoff_counter((COUNTER)); \
} while (0);
#ifdef ENABLE_SPECIALIZATION_FT
#ifdef ENABLE_SPECIALIZATION
/* Multiple threads may execute these concurrently if thread-local bytecode is
* disabled and they all execute the main copy of the bytecode. Specialization
* is disabled in that case so the value is unused, but the RMW cycle should be

View file

@ -41,7 +41,7 @@
lhs = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -51,7 +51,7 @@
}
OPCODE_DEFERRED_INC(BINARY_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
assert(NB_ADD <= oparg);
assert(oparg <= NB_OPARG_LAST);
}
@ -1742,7 +1742,7 @@
callable = stack_pointer[-2 - oparg];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -1752,7 +1752,7 @@
}
OPCODE_DEFERRED_INC(CALL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 2 cache entries */
// _MAYBE_EXPAND_METHOD
@ -2783,7 +2783,7 @@
func = stack_pointer[-4];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -2793,7 +2793,7 @@
}
OPCODE_DEFERRED_INC(CALL_FUNCTION_EX);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _MAKE_CALLARGS_A_TUPLE
{
@ -3111,7 +3111,7 @@
callable = stack_pointer[-3 - oparg];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -3121,7 +3121,7 @@
}
OPCODE_DEFERRED_INC(CALL_KW);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 2 cache entries */
// _MAYBE_EXPAND_METHOD_KW
@ -4771,7 +4771,7 @@
left = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -4781,7 +4781,7 @@
}
OPCODE_DEFERRED_INC(COMPARE_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _COMPARE_OP
{
@ -5061,7 +5061,7 @@
right = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -5071,7 +5071,7 @@
}
OPCODE_DEFERRED_INC(CONTAINS_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _CONTAINS_OP
{
@ -5815,7 +5815,7 @@
iter = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -5825,7 +5825,7 @@
}
OPCODE_DEFERRED_INC(FOR_ITER);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _FOR_ITER
{
@ -7879,7 +7879,7 @@
owner = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
next_instr = this_instr;
@ -7890,7 +7890,7 @@
}
OPCODE_DEFERRED_INC(LOAD_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 8 cache entries */
// _LOAD_ATTR
@ -9213,7 +9213,7 @@
{
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
next_instr = this_instr;
@ -9224,7 +9224,7 @@
}
OPCODE_DEFERRED_INC(LOAD_GLOBAL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 1 cache entry */
/* Skip 1 cache entry */
@ -9533,7 +9533,7 @@
global_super_st = stack_pointer[-3];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
int load_method = oparg & 1;
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
@ -9544,7 +9544,7 @@
}
OPCODE_DEFERRED_INC(LOAD_SUPER_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _LOAD_SUPER_ATTR
{
@ -10379,11 +10379,11 @@
}
// _QUICKEN_RESUME
{
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (tstate->tracing == 0 && this_instr->op.code == RESUME) {
FT_ATOMIC_STORE_UINT8_RELAXED(this_instr->op.code, RESUME_CHECK);
}
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _CHECK_PERIODIC_IF_NOT_YIELD_FROM
{
@ -10526,7 +10526,7 @@
receiver = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -10536,7 +10536,7 @@
}
OPCODE_DEFERRED_INC(SEND);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _SEND
{
@ -10822,7 +10822,7 @@
owner = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
next_instr = this_instr;
@ -10833,7 +10833,7 @@
}
OPCODE_DEFERRED_INC(STORE_ATTR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 3 cache entries */
// _STORE_ATTR
@ -11362,7 +11362,7 @@
container = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -11372,7 +11372,7 @@
}
OPCODE_DEFERRED_INC(STORE_SUBSCR);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
// _STORE_SUBSCR
{
@ -11601,7 +11601,7 @@
value = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -11611,7 +11611,7 @@
}
OPCODE_DEFERRED_INC(TO_BOOL);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
/* Skip 2 cache entries */
// _TO_BOOL
@ -12056,7 +12056,7 @@
seq = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -12066,7 +12066,7 @@
}
OPCODE_DEFERRED_INC(UNPACK_SEQUENCE);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
(void)seq;
(void)counter;
}

View file

@ -44,7 +44,7 @@ do { \
void
_PyCode_Quicken(_Py_CODEUNIT *instructions, Py_ssize_t size, int enable_counters)
{
#if ENABLE_SPECIALIZATION_FT
#if ENABLE_SPECIALIZATION
_Py_BackoffCounter jump_counter, adaptive_counter;
if (enable_counters) {
PyThreadState *tstate = _PyThreadState_GET();
@ -85,7 +85,7 @@ _PyCode_Quicken(_Py_CODEUNIT *instructions, Py_ssize_t size, int enable_counters
oparg = 0;
}
}
#endif /* ENABLE_SPECIALIZATION_FT */
#endif /* ENABLE_SPECIALIZATION */
}
#define SIMPLE_FUNCTION 0
@ -431,7 +431,7 @@ _Py_Specialize_LoadSuperAttr(_PyStackRef global_super_st, _PyStackRef cls_st, _P
PyObject *global_super = PyStackRef_AsPyObjectBorrow(global_super_st);
PyObject *cls = PyStackRef_AsPyObjectBorrow(cls_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[LOAD_SUPER_ATTR] == INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR);
if (global_super != (PyObject *)&PySuper_Type) {
SPECIALIZATION_FAIL(LOAD_SUPER_ATTR, SPEC_FAIL_SUPER_SHADOWED);
@ -952,7 +952,7 @@ _Py_Specialize_LoadAttr(_PyStackRef owner_st, _Py_CODEUNIT *instr, PyObject *nam
{
PyObject *owner = PyStackRef_AsPyObjectBorrow(owner_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[LOAD_ATTR] == INLINE_CACHE_ENTRIES_LOAD_ATTR);
PyTypeObject *type = Py_TYPE(owner);
bool fail;
@ -983,7 +983,7 @@ _Py_Specialize_StoreAttr(_PyStackRef owner_st, _Py_CODEUNIT *instr, PyObject *na
{
PyObject *owner = PyStackRef_AsPyObjectBorrow(owner_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[STORE_ATTR] == INLINE_CACHE_ENTRIES_STORE_ATTR);
PyObject *descr = NULL;
_PyAttrCache *cache = (_PyAttrCache *)(instr + 1);
@ -1293,7 +1293,7 @@ specialize_load_global_lock_held(
PyObject *globals, PyObject *builtins,
_Py_CODEUNIT *instr, PyObject *name)
{
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[LOAD_GLOBAL] == INLINE_CACHE_ENTRIES_LOAD_GLOBAL);
/* Use inline cache */
_PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)(instr + 1);
@ -1514,7 +1514,7 @@ _Py_Specialize_StoreSubscr(_PyStackRef container_st, _PyStackRef sub_st, _Py_COD
PyObject *container = PyStackRef_AsPyObjectBorrow(container_st);
PyObject *sub = PyStackRef_AsPyObjectBorrow(sub_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
PyTypeObject *container_type = Py_TYPE(container);
if (container_type == &PyList_Type) {
if (PyLong_CheckExact(sub)) {
@ -1802,7 +1802,7 @@ _Py_Specialize_Call(_PyStackRef callable_st, _PyStackRef self_or_null_st, _Py_CO
{
PyObject *callable = PyStackRef_AsPyObjectBorrow(callable_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CALL] == INLINE_CACHE_ENTRIES_CALL);
assert(_Py_OPCODE(*instr) != INSTRUMENTED_CALL);
int fail;
@ -1844,7 +1844,7 @@ _Py_Specialize_CallKw(_PyStackRef callable_st, _Py_CODEUNIT *instr, int nargs)
{
PyObject *callable = PyStackRef_AsPyObjectBorrow(callable_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CALL_KW] == INLINE_CACHE_ENTRIES_CALL_KW);
assert(_Py_OPCODE(*instr) != INSTRUMENTED_CALL_KW);
int fail;
@ -2200,7 +2200,7 @@ _Py_Specialize_BinaryOp(_PyStackRef lhs_st, _PyStackRef rhs_st, _Py_CODEUNIT *in
{
PyObject *lhs = PyStackRef_AsPyObjectBorrow(lhs_st);
PyObject *rhs = PyStackRef_AsPyObjectBorrow(rhs_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[BINARY_OP] == INLINE_CACHE_ENTRIES_BINARY_OP);
_PyBinaryOpCache *cache = (_PyBinaryOpCache *)(instr + 1);
@ -2369,7 +2369,7 @@ _Py_Specialize_CompareOp(_PyStackRef lhs_st, _PyStackRef rhs_st, _Py_CODEUNIT *i
PyObject *rhs = PyStackRef_AsPyObjectBorrow(rhs_st);
uint8_t specialized_op;
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[COMPARE_OP] == INLINE_CACHE_ENTRIES_COMPARE_OP);
// All of these specializations compute boolean values, so they're all valid
// regardless of the fifth-lowest oparg bit.
@ -2429,7 +2429,7 @@ _Py_Specialize_UnpackSequence(_PyStackRef seq_st, _Py_CODEUNIT *instr, int oparg
{
PyObject *seq = PyStackRef_AsPyObjectBorrow(seq_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[UNPACK_SEQUENCE] ==
INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE);
if (PyTuple_CheckExact(seq)) {
@ -2534,29 +2534,27 @@ int
Py_NO_INLINE void
_Py_Specialize_ForIter(_PyStackRef iter, _PyStackRef null_or_index, _Py_CODEUNIT *instr, int oparg)
{
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[FOR_ITER] == INLINE_CACHE_ENTRIES_FOR_ITER);
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
PyTypeObject *tp = Py_TYPE(iter_o);
if (PyStackRef_IsNull(null_or_index)) {
#ifdef Py_GIL_DISABLED
// Only specialize for uniquely referenced iterators, so that we know
// they're only referenced by this one thread. This is more limiting
// than we need (even `it = iter(mylist); for item in it:` won't get
// specialized) but we don't have a way to check whether we're the only
// _thread_ who has access to the object.
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
goto failure;
}
#endif
if (tp == &PyRangeIter_Type) {
#ifdef Py_GIL_DISABLED
// Only specialize for uniquely referenced iterators, so that we know
// they're only referenced by this one thread. This is more limiting
// than we need (even `it = iter(mylist); for item in it:` won't get
// specialized) but we don't have a way to check whether we're the only
// _thread_ who has access to the object.
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
goto failure;
}
#endif
specialize(instr, FOR_ITER_RANGE);
return;
}
else if (tp == &PyGen_Type && oparg <= SHRT_MAX) {
// Generators are very much not thread-safe, so don't worry about
// the specialization not being thread-safe.
assert(instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == END_FOR ||
instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == INSTRUMENTED_END_FOR
);
@ -2595,7 +2593,7 @@ _Py_Specialize_Send(_PyStackRef receiver_st, _Py_CODEUNIT *instr)
{
PyObject *receiver = PyStackRef_AsPyObjectBorrow(receiver_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[SEND] == INLINE_CACHE_ENTRIES_SEND);
PyTypeObject *tp = Py_TYPE(receiver);
if (tp == &PyGen_Type || tp == &PyCoro_Type) {
@ -2618,7 +2616,7 @@ _Py_Specialize_CallFunctionEx(_PyStackRef func_st, _Py_CODEUNIT *instr)
{
PyObject *func = PyStackRef_AsPyObjectBorrow(func_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CALL_FUNCTION_EX] == INLINE_CACHE_ENTRIES_CALL_FUNCTION_EX);
if (Py_TYPE(func) == &PyFunction_Type &&
@ -2685,7 +2683,7 @@ check_type_always_true(PyTypeObject *ty)
Py_NO_INLINE void
_Py_Specialize_ToBool(_PyStackRef value_o, _Py_CODEUNIT *instr)
{
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[TO_BOOL] == INLINE_CACHE_ENTRIES_TO_BOOL);
_PyToBoolCache *cache = (_PyToBoolCache *)(instr + 1);
PyObject *value = PyStackRef_AsPyObjectBorrow(value_o);
@ -2761,7 +2759,7 @@ _Py_Specialize_ContainsOp(_PyStackRef value_st, _Py_CODEUNIT *instr)
{
PyObject *value = PyStackRef_AsPyObjectBorrow(value_st);
assert(ENABLE_SPECIALIZATION_FT);
assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CONTAINS_OP] == INLINE_CACHE_ENTRIES_COMPARE_OP);
if (PyDict_CheckExact(value)) {
specialize(instr, CONTAINS_OP_DICT);