Merge branch 'main' into feat/gc-gen-3.15+heap_size

This commit is contained in:
Sergey Miryanov 2026-05-03 17:05:09 +05:00
commit da497d1cbd
178 changed files with 7136 additions and 2419 deletions

61
Python/Python-ast.c generated
View file

@ -178,6 +178,7 @@ void _PyAST_Fini(PyInterpreterState *interp)
Py_CLEAR(state->__module__);
Py_CLEAR(state->_attributes);
Py_CLEAR(state->_fields);
Py_CLEAR(state->abstract_types);
Py_CLEAR(state->alias_type);
Py_CLEAR(state->annotation);
Py_CLEAR(state->arg);
@ -5269,6 +5270,19 @@ ast_type_init(PyObject *self, PyObject *args, PyObject *kw)
return -1;
}
int contains = PySet_Contains(state->abstract_types, (PyObject *)Py_TYPE(self));
if (contains == -1) {
return -1;
}
else if (contains == 1) {
if (PyErr_WarnFormat(
PyExc_DeprecationWarning, 1,
"Instantiating abstract AST node class %T is deprecated. "
"This will become an error in Python 3.20", self) < 0) {
return -1;
}
}
Py_ssize_t i, numfields = 0;
int res = -1;
PyObject *key, *value, *fields, *attributes = NULL, *remaining_fields = NULL;
@ -6100,6 +6114,13 @@ init_types(void *arg)
if (!state->AST_type) {
return -1;
}
state->abstract_types = PySet_New(NULL);
if (!state->abstract_types) {
return -1;
}
if (PySet_Add(state->abstract_types, state->AST_type) < 0) {
return -1;
}
if (add_ast_fields(state) < 0) {
return -1;
}
@ -6110,6 +6131,7 @@ init_types(void *arg)
" | FunctionType(expr* argtypes, expr returns)");
if (!state->mod_type) return -1;
if (add_attributes(state, state->mod_type, NULL, 0) < 0) return -1;
if (PySet_Add(state->abstract_types, state->mod_type) < 0) return -1;
state->Module_type = make_type(state, "Module", state->mod_type,
Module_fields, 2,
"Module(stmt* body, type_ignore* type_ignores)");
@ -6159,6 +6181,7 @@ init_types(void *arg)
if (!state->stmt_type) return -1;
if (add_attributes(state, state->stmt_type, stmt_attributes, 4) < 0) return
-1;
if (PySet_Add(state->abstract_types, state->stmt_type) < 0) return -1;
if (PyObject_SetAttr(state->stmt_type, state->end_lineno, Py_None) == -1)
return -1;
if (PyObject_SetAttr(state->stmt_type, state->end_col_offset, Py_None) ==
@ -6348,6 +6371,7 @@ init_types(void *arg)
if (!state->expr_type) return -1;
if (add_attributes(state, state->expr_type, expr_attributes, 4) < 0) return
-1;
if (PySet_Add(state->abstract_types, state->expr_type) < 0) return -1;
if (PyObject_SetAttr(state->expr_type, state->end_lineno, Py_None) == -1)
return -1;
if (PyObject_SetAttr(state->expr_type, state->end_col_offset, Py_None) ==
@ -6494,6 +6518,8 @@ init_types(void *arg)
"expr_context = Load | Store | Del");
if (!state->expr_context_type) return -1;
if (add_attributes(state, state->expr_context_type, NULL, 0) < 0) return -1;
if (PySet_Add(state->abstract_types, state->expr_context_type) < 0) return
-1;
state->Load_type = make_type(state, "Load", state->expr_context_type, NULL,
0,
"Load");
@ -6518,6 +6544,7 @@ init_types(void *arg)
"boolop = And | Or");
if (!state->boolop_type) return -1;
if (add_attributes(state, state->boolop_type, NULL, 0) < 0) return -1;
if (PySet_Add(state->abstract_types, state->boolop_type) < 0) return -1;
state->And_type = make_type(state, "And", state->boolop_type, NULL, 0,
"And");
if (!state->And_type) return -1;
@ -6535,6 +6562,7 @@ init_types(void *arg)
"operator = Add | Sub | Mult | MatMult | Div | Mod | Pow | LShift | RShift | BitOr | BitXor | BitAnd | FloorDiv");
if (!state->operator_type) return -1;
if (add_attributes(state, state->operator_type, NULL, 0) < 0) return -1;
if (PySet_Add(state->abstract_types, state->operator_type) < 0) return -1;
state->Add_type = make_type(state, "Add", state->operator_type, NULL, 0,
"Add");
if (!state->Add_type) return -1;
@ -6629,6 +6657,7 @@ init_types(void *arg)
"unaryop = Invert | Not | UAdd | USub");
if (!state->unaryop_type) return -1;
if (add_attributes(state, state->unaryop_type, NULL, 0) < 0) return -1;
if (PySet_Add(state->abstract_types, state->unaryop_type) < 0) return -1;
state->Invert_type = make_type(state, "Invert", state->unaryop_type, NULL,
0,
"Invert");
@ -6659,6 +6688,7 @@ init_types(void *arg)
"cmpop = Eq | NotEq | Lt | LtE | Gt | GtE | Is | IsNot | In | NotIn");
if (!state->cmpop_type) return -1;
if (add_attributes(state, state->cmpop_type, NULL, 0) < 0) return -1;
if (PySet_Add(state->abstract_types, state->cmpop_type) < 0) return -1;
state->Eq_type = make_type(state, "Eq", state->cmpop_type, NULL, 0,
"Eq");
if (!state->Eq_type) return -1;
@ -6732,6 +6762,8 @@ init_types(void *arg)
if (!state->excepthandler_type) return -1;
if (add_attributes(state, state->excepthandler_type,
excepthandler_attributes, 4) < 0) return -1;
if (PySet_Add(state->abstract_types, state->excepthandler_type) < 0) return
-1;
if (PyObject_SetAttr(state->excepthandler_type, state->end_lineno, Py_None)
== -1)
return -1;
@ -6822,6 +6854,7 @@ init_types(void *arg)
if (!state->pattern_type) return -1;
if (add_attributes(state, state->pattern_type, pattern_attributes, 4) < 0)
return -1;
if (PySet_Add(state->abstract_types, state->pattern_type) < 0) return -1;
state->MatchValue_type = make_type(state, "MatchValue",
state->pattern_type, MatchValue_fields,
1,
@ -6872,6 +6905,8 @@ init_types(void *arg)
"type_ignore = TypeIgnore(int lineno, string tag)");
if (!state->type_ignore_type) return -1;
if (add_attributes(state, state->type_ignore_type, NULL, 0) < 0) return -1;
if (PySet_Add(state->abstract_types, state->type_ignore_type) < 0) return
-1;
state->TypeIgnore_type = make_type(state, "TypeIgnore",
state->type_ignore_type,
TypeIgnore_fields, 2,
@ -6885,6 +6920,7 @@ init_types(void *arg)
if (!state->type_param_type) return -1;
if (add_attributes(state, state->type_param_type, type_param_attributes, 4)
< 0) return -1;
if (PySet_Add(state->abstract_types, state->type_param_type) < 0) return -1;
state->TypeVar_type = make_type(state, "TypeVar", state->type_param_type,
TypeVar_fields, 3,
"TypeVar(identifier name, expr? bound, expr? default_value)");
@ -17956,6 +17992,28 @@ obj2ast_type_param(struct ast_state *state, PyObject* obj, type_param_ty* out,
}
/* Helper for checking if a node class is abstract in the tests. */
static PyObject *
ast_is_abstract(PyObject *Py_UNUSED(module), PyObject *cls) {
struct ast_state *state = get_ast_state();
if (state == NULL) {
return NULL;
}
int contains = PySet_Contains(state->abstract_types, cls);
if (contains == -1) {
return NULL;
}
else if (contains == 1) {
Py_RETURN_TRUE;
}
Py_RETURN_FALSE;
}
static struct PyMethodDef astmodule_methods[] = {
{"_is_abstract", ast_is_abstract, METH_O, NULL},
{NULL} /* Sentinel */
};
static int
astmodule_exec(PyObject *m)
{
@ -18382,7 +18440,8 @@ static struct PyModuleDef _astmodule = {
.m_name = "_ast",
// The _ast module uses a per-interpreter state (PyInterpreterState.ast)
.m_size = 0,
.m_slots = astmodule_slots,
.m_methods = astmodule_methods,
.m_slots = astmodule_slots
};
PyMODINIT_FUNC

View file

@ -716,7 +716,7 @@ static int
call_show_warning(PyThreadState *tstate, PyObject *category,
PyObject *text, PyObject *message,
PyObject *filename, int lineno, PyObject *lineno_obj,
PyObject *sourceline, PyObject *source)
PyObject *sourceline, PyObject *source, PyObject *module)
{
PyObject *show_fn, *msg, *res, *warnmsg_cls = NULL;
PyInterpreterState *interp = tstate->interp;
@ -747,7 +747,8 @@ call_show_warning(PyThreadState *tstate, PyObject *category,
}
msg = PyObject_CallFunctionObjArgs(warnmsg_cls, message, category,
filename, lineno_obj, Py_None, Py_None, source,
filename, lineno_obj, Py_None, Py_None,
source ? source : Py_None, module,
NULL);
Py_DECREF(warnmsg_cls);
if (msg == NULL)
@ -878,7 +879,7 @@ warn_explicit(PyThreadState *tstate, PyObject *category, PyObject *message,
goto return_none;
if (rc == 0) {
if (call_show_warning(tstate, category, text, message, filename,
lineno, lineno_obj, sourceline, source) < 0)
lineno, lineno_obj, sourceline, source, module) < 0)
goto cleanup;
}
else /* if (rc == -1) */

View file

@ -2984,6 +2984,7 @@ dummy_func(
macro(LOAD_ATTR_CLASS) =
unused/1 +
_RECORD_TOS +
_CHECK_ATTR_CLASS +
unused/2 +
_LOAD_ATTR_CLASS +
@ -2991,7 +2992,7 @@ dummy_func(
macro(LOAD_ATTR_CLASS_WITH_METACLASS_CHECK) =
unused/1 +
_RECORD_TOS_TYPE +
_RECORD_TOS +
_GUARD_TYPE_VERSION +
_CHECK_ATTR_CLASS +
_LOAD_ATTR_CLASS +
@ -4736,6 +4737,7 @@ dummy_func(
unused/1 +
_CHECK_PEP_523 +
_CHECK_OBJECT +
_CHECK_RECURSION_REMAINING +
_ALLOCATE_OBJECT +
_CREATE_INIT_FRAME +
_PUSH_FRAME;
@ -6107,6 +6109,13 @@ dummy_func(
value = PyStackRef_FromPyObjectBorrow(ptr);
}
tier2 pure op(_RROT_3, (bottom, middle, top -- bottom, middle, top)) {
_PyStackRef temp = top;
top = middle;
middle = bottom;
bottom = temp;
}
tier2 op(_START_EXECUTOR, (executor/4 --)) {
#ifndef _Py_JIT
assert(current_executor == (_PyExecutorObject*)executor);

View file

@ -1305,7 +1305,7 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int
}
#ifdef _Py_TIER2
#ifdef _Py_JIT
_PyJitEntryFuncPtr _Py_jit_entry = _PyJIT;
_PyJitEntryFuncPtr _Py_jit_entry = _PyJIT_Entry;
#else
_PyJitEntryFuncPtr _Py_jit_entry = _PyTier2Interpreter;
#endif

View file

@ -168,7 +168,6 @@
#define STOP_TRACING() ((void)(0));
#endif
/* PRE_DISPATCH_GOTO() does lltrace if enabled. Normally a no-op */
#ifdef Py_DEBUG
#define PRE_DISPATCH_GOTO() if (frame->lltrace >= 5) { \

View file

@ -22337,6 +22337,102 @@
break;
}
case _RROT_3_r03: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef middle;
_PyStackRef bottom;
top = stack_pointer[-1];
middle = stack_pointer[-2];
bottom = stack_pointer[-3];
_PyStackRef temp = top;
top = middle;
middle = bottom;
bottom = temp;
_tos_cache2 = top;
_tos_cache1 = middle;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -3;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _RROT_3_r13: {
CHECK_CURRENT_CACHED_VALUES(1);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef middle;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
top = _stack_item_0;
middle = stack_pointer[-1];
bottom = stack_pointer[-2];
_PyStackRef temp = top;
top = middle;
middle = bottom;
bottom = temp;
_tos_cache2 = top;
_tos_cache1 = middle;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -2;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _RROT_3_r23: {
CHECK_CURRENT_CACHED_VALUES(2);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef middle;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
top = _stack_item_1;
middle = _stack_item_0;
bottom = stack_pointer[-1];
_PyStackRef temp = top;
top = middle;
middle = bottom;
bottom = temp;
_tos_cache2 = top;
_tos_cache1 = middle;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
stack_pointer += -1;
ASSERT_WITHIN_STACK_BOUNDS(__FILE__, __LINE__);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _RROT_3_r33: {
CHECK_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
_PyStackRef top;
_PyStackRef middle;
_PyStackRef bottom;
_PyStackRef _stack_item_0 = _tos_cache0;
_PyStackRef _stack_item_1 = _tos_cache1;
_PyStackRef _stack_item_2 = _tos_cache2;
top = _stack_item_2;
middle = _stack_item_1;
bottom = _stack_item_0;
_PyStackRef temp = top;
top = middle;
middle = bottom;
bottom = temp;
_tos_cache2 = top;
_tos_cache1 = middle;
_tos_cache0 = bottom;
SET_CURRENT_CACHED_VALUES(3);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());
break;
}
case _START_EXECUTOR_r00: {
CHECK_CURRENT_CACHED_VALUES(0);
assert(WITHIN_STACK_BOUNDS_IGNORING_CACHE());

View file

@ -10,6 +10,7 @@
#include "pycore_opcode_utils.h"
#include "pycore_opcode_metadata.h" // OPCODE_HAS_ARG, etc
#include "pycore_pystate.h" // _PyInterpreterState_GET()
#include <stdbool.h>
@ -1125,6 +1126,8 @@ remove_redundant_nops(cfg_builder *g) {
return changes;
}
static int loads_const(int opcode);
static int
remove_redundant_nops_and_pairs(basicblock *entryblock)
{
@ -1148,7 +1151,7 @@ remove_redundant_nops_and_pairs(basicblock *entryblock)
int opcode = instr->i_opcode;
bool is_redundant_pair = false;
if (opcode == POP_TOP) {
if (prev_opcode == LOAD_CONST || prev_opcode == LOAD_SMALL_INT) {
if (loads_const(prev_opcode)) {
is_redundant_pair = true;
}
else if (prev_opcode == COPY && prev_oparg == 1) {
@ -1300,7 +1303,9 @@ jump_thread(basicblock *bb, cfg_instr *inst, cfg_instr *target, int opcode)
static int
loads_const(int opcode)
{
return OPCODE_HAS_CONST(opcode) || opcode == LOAD_SMALL_INT;
return OPCODE_HAS_CONST(opcode)
|| opcode == LOAD_SMALL_INT
|| opcode == LOAD_COMMON_CONSTANT;
}
/* Returns new reference */
@ -1323,6 +1328,10 @@ get_const_value(int opcode, int oparg, PyObject *co_consts)
if (opcode == LOAD_SMALL_INT) {
return PyLong_FromLong(oparg);
}
if (opcode == LOAD_COMMON_CONSTANT) {
assert(oparg < NUM_COMMON_CONSTANTS);
return Py_NewRef(_PyInterpreterState_GET()->common_consts[oparg]);
}
if (constant == NULL) {
PyErr_SetString(PyExc_SystemError,
@ -1437,6 +1446,46 @@ maybe_instr_make_load_smallint(cfg_instr *instr, PyObject *newconst,
return 0;
}
/* Does not steal reference to "newconst".
Return 1 if changed instruction to LOAD_COMMON_CONSTANT.
Return 0 if could not change instruction to LOAD_COMMON_CONSTANT.
Return -1 on error.
*/
static int
maybe_instr_make_load_common_const(cfg_instr *instr, PyObject *newconst)
{
int oparg;
if (newconst == Py_None) {
oparg = CONSTANT_NONE;
}
else if (newconst == Py_True) {
oparg = CONSTANT_TRUE;
}
else if (newconst == Py_False) {
oparg = CONSTANT_FALSE;
}
else if (PyUnicode_CheckExact(newconst)
&& PyUnicode_GET_LENGTH(newconst) == 0) {
oparg = CONSTANT_EMPTY_STR;
}
else if (PyLong_CheckExact(newconst)) {
int overflow;
long val = PyLong_AsLongAndOverflow(newconst, &overflow);
if (val == -1 && PyErr_Occurred()) {
return -1;
}
if (overflow || val != -1) {
return 0;
}
oparg = CONSTANT_MINUS_ONE;
}
else {
return 0;
}
assert(_Py_IsImmortal(newconst));
INSTR_SET_OP1(instr, LOAD_COMMON_CONSTANT, oparg);
return 1;
}
/* Steals reference to "newconst" */
static int
@ -1452,6 +1501,14 @@ instr_make_load_const(cfg_instr *instr, PyObject *newconst,
if (res > 0) {
return SUCCESS;
}
res = maybe_instr_make_load_common_const(instr, newconst);
if (res < 0) {
Py_DECREF(newconst);
return ERROR;
}
if (res > 0) {
return SUCCESS;
}
int oparg = add_const(newconst, consts, const_cache, consts_index);
RETURN_IF_ERROR(oparg);
INSTR_SET_OP1(instr, LOAD_CONST, oparg);
@ -2208,7 +2265,7 @@ basicblock_optimize_load_const(PyObject *const_cache, basicblock *bb,
oparg = inst->i_oparg;
}
assert(!IS_ASSEMBLER_OPCODE(opcode));
if (opcode != LOAD_CONST && opcode != LOAD_SMALL_INT) {
if (!loads_const(opcode)) {
continue;
}
int nextop = i+1 < bb->b_iused ? bb->b_instr[i+1].i_opcode : 0;
@ -2308,6 +2365,17 @@ basicblock_optimize_load_const(PyObject *const_cache, basicblock *bb,
break;
}
}
if (inst->i_opcode == LOAD_CONST) {
PyObject *constant = get_const_value(inst->i_opcode, inst->i_oparg, consts);
if (constant == NULL) {
return ERROR;
}
int res = maybe_instr_make_load_common_const(inst, constant);
Py_DECREF(constant);
if (res < 0) {
return ERROR;
}
}
}
return SUCCESS;
}

View file

@ -1405,14 +1405,15 @@ add_stats(GCState *gcstate, int gen, struct gc_generation_stats *stats)
memcpy(cur_stats, prev_stats, sizeof(struct gc_generation_stats));
cur_stats->ts_start = stats->ts_start;
cur_stats->heap_size = stats->heap_size;
cur_stats->collections += 1;
cur_stats->collected += stats->collected;
cur_stats->uncollectable += stats->uncollectable;
cur_stats->candidates += stats->candidates;
cur_stats->duration += stats->duration;
cur_stats->heap_size = stats->heap_size;
/* Publish ts_stop last so remote readers do not select a partially
updated stats record as the latest collection. */
cur_stats->ts_stop = stats->ts_stop;
}
@ -1455,10 +1456,14 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
assert(generation >= 0 && generation < NUM_GENERATIONS);
#ifdef Py_STATS
if (_Py_stats) {
_Py_stats->object_stats.object_visits = 0;
{
PyStats *s = _PyStats_GET();
if (s) {
s->object_stats.object_visits = 0;
}
}
#endif
GC_STAT_ADD(generation, collections, 1);
struct gc_generation_stats stats = { 0 };
@ -1617,12 +1622,16 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
/* Update stats */
add_stats(gcstate, generation, &stats);
GC_STAT_ADD(generation, objects_collected, m);
GC_STAT_ADD(generation, objects_collected, stats.collected);
#ifdef Py_STATS
if (_Py_stats) {
GC_STAT_ADD(generation, object_visits,
_Py_stats->object_stats.object_visits);
_Py_stats->object_stats.object_visits = 0;
{
PyStats *s = _PyStats_GET();
if (s) {
GC_STAT_ADD(generation, object_visits,
s->object_stats.object_visits);
s->object_stats.object_visits = 0;
}
}
#endif

View file

@ -2492,6 +2492,8 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
/* Update stats */
struct gc_generation_stats *stats = get_stats(gcstate, generation);
stats->ts_start = start;
stats->ts_stop = stop;
stats->collections++;
stats->collected += m;
stats->uncollectable += n;

View file

@ -1925,6 +1925,14 @@
JUMP_TO_PREDICTED(CALL);
}
}
// _CHECK_RECURSION_REMAINING
{
if (tstate->py_recursion_remaining <= 1) {
UPDATE_MISS_STATS(CALL);
assert(_PyOpcode_Deopt[opcode] == (CALL));
JUMP_TO_PREDICTED(CALL);
}
}
// _ALLOCATE_OBJECT
{
PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable);

View file

@ -15,6 +15,7 @@
#include "pycore_interpframe.h"
#include "pycore_interpolation.h"
#include "pycore_intrinsics.h"
#include "pycore_jit_unwind.h"
#include "pycore_lazyimportobject.h"
#include "pycore_list.h"
#include "pycore_long.h"
@ -60,6 +61,40 @@ jit_error(const char *message)
PyErr_Format(PyExc_RuntimeWarning, "JIT %s (%d)", message, hint);
}
/*
* Publish JIT code to optional tooling backends.
*
* The return value is a backend-specific deregistration handle, not a
* success/failure indicator. NULL means there is nothing to unregister later:
* perf does not need a handle, and GDB registration failures are intentionally
* non-fatal because tooling support must not make JIT compilation fail.
*/
static void *
jit_record_code(const void *code_addr, size_t code_size,
const char *entry, const char *filename)
{
#ifdef PY_HAVE_PERF_TRAMPOLINE
_PyPerf_Callbacks callbacks;
_PyPerfTrampoline_GetCallbacks(&callbacks);
if (callbacks.write_state == _Py_perfmap_jit_callbacks.write_state) {
_PyPerfJit_WriteNamedCode(
code_addr, code_size, entry, filename);
return NULL;
}
#endif
#if defined(PY_HAVE_JIT_GDB_UNWIND)
return _PyJitUnwind_GdbRegisterCode(
code_addr, code_size, entry, filename);
#else
(void)code_addr;
(void)code_size;
(void)entry;
(void)filename;
return NULL;
#endif
}
static int
address_in_executor_array(_PyExecutorObject **ptrs, size_t count, uintptr_t addr)
{
@ -715,6 +750,10 @@ _PyJIT_Compile(_PyExecutorObject *executor, const _PyUOpInstruction trace[], siz
}
executor->jit_code = memory;
executor->jit_size = total_size;
executor->jit_gdb_handle = jit_record_code(memory,
code_size + state.trampolines.size,
"jit",
"executor");
return 0;
}
@ -727,6 +766,12 @@ _PyJIT_Free(_PyExecutorObject *executor)
if (memory) {
executor->jit_code = NULL;
executor->jit_size = 0;
#if defined(PY_HAVE_JIT_GDB_UNWIND)
if (executor->jit_gdb_handle != NULL) {
_PyJitUnwind_GdbUnregisterCode(executor->jit_gdb_handle);
executor->jit_gdb_handle = NULL;
}
#endif
if (jit_free(memory, size)) {
PyErr_FormatUnraisable("Exception ignored while "
"freeing JIT memory");

986
Python/jit_unwind.c Normal file
View file

@ -0,0 +1,986 @@
/*
* Python JIT - DWARF .eh_frame builder
*
* This file contains the DWARF CFI generator used to build .eh_frame
* data for JIT code (perf jitdump and other unwinders).
*/
#include "Python.h"
#include "pycore_jit_unwind.h"
#include "pycore_lock.h"
#if defined(PY_HAVE_JIT_GDB_UNWIND)
# include "jit_unwind_info.h"
# if !JIT_UNWIND_INFO_SUPPORTED
# error "JIT unwind info was not generated for this target"
# endif
#endif
#if defined(PY_HAVE_PERF_TRAMPOLINE) || defined(PY_HAVE_JIT_GDB_UNWIND)
#if defined(PY_HAVE_JIT_GDB_UNWIND)
# include <elf.h>
#endif
#include <stdio.h>
#include <string.h>
// =============================================================================
// DWARF CONSTANTS
// =============================================================================
/*
* DWARF (Debug With Arbitrary Record Formats) constants
*
* DWARF is a debugging data format used to provide stack unwinding information.
* These constants define the various encoding types and opcodes used in
* DWARF Call Frame Information (CFI) records.
*/
/* DWARF Call Frame Information version */
#define DWRF_CIE_VERSION 1
/* DWARF CFA (Call Frame Address) opcodes */
enum {
DWRF_CFA_nop = 0x0, // No operation
DWRF_CFA_offset_extended = 0x5, // Extended offset instruction
DWRF_CFA_def_cfa = 0xc, // Define CFA rule
DWRF_CFA_def_cfa_register = 0xd, // Define CFA register
DWRF_CFA_def_cfa_offset = 0xe, // Define CFA offset
DWRF_CFA_offset_extended_sf = 0x11, // Extended signed offset
DWRF_CFA_advance_loc = 0x40, // Advance location counter
DWRF_CFA_offset = 0x80, // Simple offset instruction
DWRF_CFA_restore = 0xc0 // Restore register
};
/*
* Architecture-specific DWARF register numbers
*
* These constants define the register numbering scheme used by DWARF
* for each supported architecture. The numbers must match the ABI
* specification for proper stack unwinding.
*/
enum {
#ifdef __x86_64__
/* x86_64 register numbering (note: order is defined by x86_64 ABI) */
DWRF_REG_AX, // RAX
DWRF_REG_DX, // RDX
DWRF_REG_CX, // RCX
DWRF_REG_BX, // RBX
DWRF_REG_SI, // RSI
DWRF_REG_DI, // RDI
DWRF_REG_BP, // RBP
DWRF_REG_SP, // RSP
DWRF_REG_8, // R8
DWRF_REG_9, // R9
DWRF_REG_10, // R10
DWRF_REG_11, // R11
DWRF_REG_12, // R12
DWRF_REG_13, // R13
DWRF_REG_14, // R14
DWRF_REG_15, // R15
DWRF_REG_RA, // Return address (RIP)
#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
/* AArch64 register numbering */
DWRF_REG_FP = 29, // Frame Pointer
DWRF_REG_RA = 30, // Link register (return address)
DWRF_REG_SP = 31, // Stack pointer
#else
# error "Unsupported target architecture"
#endif
};
// =============================================================================
// ELF OBJECT CONTEXT
// =============================================================================
/*
* Context for building ELF/DWARF structures
*
* This structure maintains state while constructing DWARF unwind information.
* It acts as a simple buffer manager with pointers to track current position
* and important landmarks within the buffer.
*/
typedef struct ELFObjectContext {
uint8_t* p; // Current write position in buffer
uint8_t* startp; // Start of buffer (for offset calculations)
uint8_t* fde_p; // Start of FDE data (for PC-relative calculations)
uintptr_t code_addr; // Address of the code section
size_t code_size; // Size of the code section
} ELFObjectContext;
// =============================================================================
// DWARF GENERATION UTILITIES
// =============================================================================
/*
* Append a null-terminated string to the ELF context buffer.
*
* Args:
* ctx: ELF object context
* str: String to append (must be null-terminated)
*
* Returns: Offset from start of buffer where string was written
*/
static uint32_t elfctx_append_string(ELFObjectContext* ctx, const char* str) {
uint8_t* p = ctx->p;
uint32_t ofs = (uint32_t)(p - ctx->startp);
/* Copy string including null terminator */
do {
*p++ = (uint8_t)*str;
} while (*str++);
ctx->p = p;
return ofs;
}
/*
* Append a SLEB128 (Signed Little Endian Base 128) value
*
* SLEB128 is a variable-length encoding used extensively in DWARF.
* It efficiently encodes small numbers in fewer bytes.
*
* Args:
* ctx: ELF object context
* v: Signed value to encode
*/
static void elfctx_append_sleb128(ELFObjectContext* ctx, int32_t v) {
uint8_t* p = ctx->p;
/* Encode 7 bits at a time, with continuation bit in MSB */
for (; (uint32_t)(v + 0x40) >= 0x80; v >>= 7) {
*p++ = (uint8_t)((v & 0x7f) | 0x80); // Set continuation bit
}
*p++ = (uint8_t)(v & 0x7f); // Final byte without continuation bit
ctx->p = p;
}
/*
* Append a ULEB128 (Unsigned Little Endian Base 128) value
*
* Similar to SLEB128 but for unsigned values.
*
* Args:
* ctx: ELF object context
* v: Unsigned value to encode
*/
static void elfctx_append_uleb128(ELFObjectContext* ctx, uint32_t v) {
uint8_t* p = ctx->p;
/* Encode 7 bits at a time, with continuation bit in MSB */
for (; v >= 0x80; v >>= 7) {
*p++ = (char)((v & 0x7f) | 0x80); // Set continuation bit
}
*p++ = (char)v; // Final byte without continuation bit
ctx->p = p;
}
/*
* Macros for generating DWARF structures
*
* These macros provide a convenient way to write various data types
* to the DWARF buffer while automatically advancing the pointer.
*/
#define DWRF_U8(x) (*p++ = (x)) // Write unsigned 8-bit
#define DWRF_I8(x) (*(int8_t*)p = (x), p++) // Write signed 8-bit
#define DWRF_U16(x) (*(uint16_t*)p = (x), p += 2) // Write unsigned 16-bit
#define DWRF_U32(x) (*(uint32_t*)p = (x), p += 4) // Write unsigned 32-bit
#define DWRF_ADDR(x) (*(uintptr_t*)p = (x), p += sizeof(uintptr_t)) // Write address
#define DWRF_UV(x) (ctx->p = p, elfctx_append_uleb128(ctx, (x)), p = ctx->p) // Write ULEB128
#define DWRF_SV(x) (ctx->p = p, elfctx_append_sleb128(ctx, (x)), p = ctx->p) // Write SLEB128
#define DWRF_STR(str) (ctx->p = p, elfctx_append_string(ctx, (str)), p = ctx->p) // Write string
/* Align to specified boundary with NOP instructions */
#define DWRF_ALIGNNOP(s) \
while ((uintptr_t)p & ((s)-1)) { \
*p++ = DWRF_CFA_nop; \
}
/* Write a DWARF section with automatic size calculation */
#define DWRF_SECTION(name, stmt) \
{ \
uint32_t* szp_##name = (uint32_t*)p; \
p += 4; \
stmt; \
*szp_##name = (uint32_t)((p - (uint8_t*)szp_##name) - 4); \
}
// =============================================================================
// DWARF EH FRAME GENERATION
// =============================================================================
static void elf_init_ehframe_perf(ELFObjectContext* ctx);
#if defined(PY_HAVE_JIT_GDB_UNWIND)
static void elf_init_ehframe_gdb(ELFObjectContext* ctx);
#endif
static inline void elf_init_ehframe(ELFObjectContext* ctx, int absolute_addr) {
if (absolute_addr) {
#if defined(PY_HAVE_JIT_GDB_UNWIND)
elf_init_ehframe_gdb(ctx);
#else
Py_UNREACHABLE();
#endif
}
else {
elf_init_ehframe_perf(ctx);
}
}
size_t
_PyJitUnwind_EhFrameSize(int absolute_addr)
{
/* The .eh_frame we emit is small and bounded; keep a generous buffer. */
uint8_t scratch[512];
_Static_assert(sizeof(scratch) >= 256,
"scratch buffer may be too small for elf_init_ehframe");
ELFObjectContext ctx;
ctx.code_size = 1;
ctx.code_addr = 0;
ctx.startp = ctx.p = scratch;
ctx.fde_p = NULL;
/* Generate once into scratch to learn the required size. */
elf_init_ehframe(&ctx, absolute_addr);
ptrdiff_t size = ctx.p - ctx.startp;
assert(size <= (ptrdiff_t)sizeof(scratch));
return (size_t)size;
}
size_t
_PyJitUnwind_BuildEhFrame(uint8_t *buffer, size_t buffer_size,
const void *code_addr, size_t code_size,
int absolute_addr)
{
if (buffer == NULL || code_addr == NULL || code_size == 0) {
return 0;
}
/* Generate the frame twice: once to size-check, once to write. */
size_t required = _PyJitUnwind_EhFrameSize(absolute_addr);
if (required == 0 || required > buffer_size) {
return 0;
}
ELFObjectContext ctx;
ctx.code_size = code_size;
ctx.code_addr = (uintptr_t)code_addr;
ctx.startp = ctx.p = buffer;
ctx.fde_p = NULL;
elf_init_ehframe(&ctx, absolute_addr);
size_t written = (size_t)(ctx.p - ctx.startp);
/* The frame size is independent of code_addr/code_size (fixed-width fields). */
assert(written == required);
return written;
}
/*
* Generate a minimal .eh_frame for a single JIT code region.
*
* The .eh_frame section contains Call Frame Information (CFI) that describes
* how to unwind the stack at any point in the code. This is essential for
* unwinding through JIT-generated code.
*
* The generated data contains:
* 1. A CIE (Common Information Entry) describing the calling convention.
* 2. An FDE (Frame Description Entry) describing how to unwind the JIT frame.
*
* Two flavors are emitted, dispatched on the absolute_addr flag:
*
* - absolute_addr == 0 (elf_init_ehframe_perf): PC-relative FDE address
* encoding for perf's synthesized DSO layout. The CIE describes the
* trampoline's entry state and the FDE walks through the prologue and
* epilogue with advance_loc instructions. This matches the pre-existing
* perf_jit_trampoline behavior byte-for-byte.
*
* - absolute_addr == 1 (elf_init_ehframe_gdb): absolute FDE address
* encoding for the GDB JIT in-memory ELF. The CIE describes the
* steady-state frame layout (CFA = %rbp+16 / x29+16, with saved fp and
* return-address column at fixed offsets) and the FDE emits no further
* CFI. The same rule applies at every PC in the registered region,
* which is correct for executor stencils (they pin the frame pointer
* across the region). This is the GDB-side fix; see elf_init_ehframe_gdb
* for details.
*/
static void elf_init_ehframe_perf(ELFObjectContext* ctx) {
int fde_ptr_enc = DWRF_EH_PE_pcrel | DWRF_EH_PE_sdata4;
uint8_t* p = ctx->p;
uint8_t* framep = p; // Remember start of frame data
/*
* DWARF Unwind Table for Trampoline Function
*
* This section defines DWARF Call Frame Information (CFI) using encoded macros
* like `DWRF_U8`, `DWRF_UV`, and `DWRF_SECTION` to describe how the trampoline function
* preserves and restores registers. This is used by profiling tools (e.g., `perf`)
* and debuggers for stack unwinding in JIT-compiled code.
*
* -------------------------------------------------
* TO REGENERATE THIS TABLE FROM GCC OBJECTS:
* -------------------------------------------------
*
* 1. Create a trampoline source file (e.g., `trampoline.c`):
*
* #include <Python.h>
* typedef PyObject* (*py_evaluator)(void*, void*, int);
* PyObject* trampoline(void *ts, void *f, int throwflag, py_evaluator evaluator) {
* return evaluator(ts, f, throwflag);
* }
*
* 2. Compile to an object file with frame pointer preservation:
*
* gcc trampoline.c -I. -I./Include -O2 -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -c
*
* 3. Extract DWARF unwind info from the object file:
*
* readelf -w trampoline.o
*
* Example output from `.eh_frame`:
*
* 00000000 CIE
* Version: 1
* Augmentation: "zR"
* Code alignment factor: 4
* Data alignment factor: -8
* Return address column: 30
* DW_CFA_def_cfa: r31 (sp) ofs 0
*
* 00000014 FDE cie=00000000 pc=0..14
* DW_CFA_advance_loc: 4
* DW_CFA_def_cfa_offset: 16
* DW_CFA_offset: r29 at cfa-16
* DW_CFA_offset: r30 at cfa-8
* DW_CFA_advance_loc: 12
* DW_CFA_restore: r30
* DW_CFA_restore: r29
* DW_CFA_def_cfa_offset: 0
*
* -- These values can be verified by comparing with `readelf -w` or `llvm-dwarfdump --eh-frame`.
*
* ----------------------------------
* HOW TO TRANSLATE TO DWRF_* MACROS:
* ----------------------------------
*
* After compiling your trampoline with:
*
* gcc trampoline.c -I. -I./Include -O2 -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -c
*
* run:
*
* readelf -w trampoline.o
*
* to inspect the generated `.eh_frame` data. You will see two main components:
*
* 1. A CIE (Common Information Entry): shared configuration used by all FDEs.
* 2. An FDE (Frame Description Entry): function-specific unwind instructions.
*
* ---------------------
* Translating the CIE:
* ---------------------
* From `readelf -w`, you might see:
*
* 00000000 0000000000000010 00000000 CIE
* Version: 1
* Augmentation: "zR"
* Code alignment factor: 4
* Data alignment factor: -8
* Return address column: 30
* Augmentation data: 1b
* DW_CFA_def_cfa: r31 (sp) ofs 0
*
* Map this to:
*
* DWRF_SECTION(CIE,
* DWRF_U32(0); // CIE ID (always 0 for CIEs)
* DWRF_U8(DWRF_CIE_VERSION); // Version: 1
* DWRF_STR("zR"); // Augmentation string "zR"
* DWRF_UV(4); // Code alignment factor = 4
* DWRF_SV(-8); // Data alignment factor = -8
* DWRF_U8(DWRF_REG_RA); // Return address register (e.g., x30 = 30)
* DWRF_UV(1); // Augmentation data length = 1
* DWRF_U8(DWRF_EH_PE_pcrel | DWRF_EH_PE_sdata4); // Encoding for FDE pointers
*
* DWRF_U8(DWRF_CFA_def_cfa); // DW_CFA_def_cfa
* DWRF_UV(DWRF_REG_SP); // Register: SP (r31)
* DWRF_UV(0); // Offset = 0
*
* DWRF_ALIGNNOP(sizeof(uintptr_t)); // Align to pointer size boundary
* )
*
* Notes:
* - Use `DWRF_UV` for unsigned LEB128, `DWRF_SV` for signed LEB128.
* - `DWRF_REG_RA` and `DWRF_REG_SP` are architecture-defined constants.
*
* ---------------------
* Translating the FDE:
* ---------------------
* From `readelf -w`:
*
* 00000014 0000000000000020 00000018 FDE cie=00000000 pc=0000000000000000..0000000000000014
* DW_CFA_advance_loc: 4
* DW_CFA_def_cfa_offset: 16
* DW_CFA_offset: r29 at cfa-16
* DW_CFA_offset: r30 at cfa-8
* DW_CFA_advance_loc: 12
* DW_CFA_restore: r30
* DW_CFA_restore: r29
* DW_CFA_def_cfa_offset: 0
*
* Map the FDE header and instructions to:
*
* DWRF_SECTION(FDE,
* DWRF_U32((uint32_t)(p - framep)); // Offset to CIE (relative from here)
* DWRF_U32(pc_relative_offset); // PC-relative location of the code (calculated dynamically)
* DWRF_U32(ctx->code_size); // Code range covered by this FDE
* DWRF_U8(0); // Augmentation data length (none)
*
* DWRF_U8(DWRF_CFA_advance_loc | 1); // Advance location by 1 unit (1 * 4 = 4 bytes)
* DWRF_U8(DWRF_CFA_def_cfa_offset); // CFA = SP + 16
* DWRF_UV(16);
*
* DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // Save x29 (frame pointer)
* DWRF_UV(2); // At offset 2 * 8 = 16 bytes
*
* DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // Save x30 (return address)
* DWRF_UV(1); // At offset 1 * 8 = 8 bytes
*
* DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance location by 3 units (3 * 4 = 12 bytes)
*
* DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // Restore x30
* DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // Restore x29
*
* DWRF_U8(DWRF_CFA_def_cfa_offset); // CFA = SP
* DWRF_UV(0);
* )
*
* To regenerate:
* 1. Get the `code alignment factor`, `data alignment factor`, and `RA column` from the CIE.
* 2. Note the range of the function from the FDE's `pc=...` line and map it to the JIT code as
* the code is in a different address space every time.
* 3. For each `DW_CFA_*` entry, use the corresponding `DWRF_*` macro:
* - `DW_CFA_def_cfa_offset` DWRF_U8(DWRF_CFA_def_cfa_offset), DWRF_UV(value)
* - `DW_CFA_offset: rX` DWRF_U8(DWRF_CFA_offset | reg), DWRF_UV(offset)
* - `DW_CFA_restore: rX` DWRF_U8(DWRF_CFA_offset | reg) // restore is same as reusing offset
* - `DW_CFA_advance_loc: N` DWRF_U8(DWRF_CFA_advance_loc | (N / code_alignment_factor))
* 4. Use `DWRF_REG_FP`, `DWRF_REG_RA`, etc., for register numbers.
* 5. Use `sizeof(uintptr_t)` (typically 8) for pointer size calculations and alignment.
*/
/*
* Emit DWARF EH CIE (Common Information Entry)
*
* The CIE describes the calling conventions and basic unwinding rules
* that apply to all functions in this compilation unit.
*/
DWRF_SECTION(CIE,
DWRF_U32(0); // CIE ID (0 indicates this is a CIE)
DWRF_U8(DWRF_CIE_VERSION); // CIE version (1)
DWRF_STR("zR"); // Augmentation string ("zR" = has LSDA)
#ifdef __x86_64__
DWRF_UV(1); // Code alignment factor (x86_64: 1 byte)
#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
DWRF_UV(4); // Code alignment factor (AArch64: 4 bytes per instruction)
#endif
DWRF_SV(-(int64_t)sizeof(uintptr_t)); // Data alignment factor (negative)
DWRF_U8(DWRF_REG_RA); // Return address register number
DWRF_UV(1); // Augmentation data length
DWRF_U8(fde_ptr_enc); // FDE pointer encoding
/* Initial CFI instructions - describe default calling convention */
#ifdef __x86_64__
/* x86_64 initial CFI state */
DWRF_U8(DWRF_CFA_def_cfa); // Define CFA (Call Frame Address)
DWRF_UV(DWRF_REG_SP); // CFA = SP register
DWRF_UV(sizeof(uintptr_t)); // CFA = SP + pointer_size
DWRF_U8(DWRF_CFA_offset|DWRF_REG_RA); // Return address is saved
DWRF_UV(1); // At offset 1 from CFA
#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
/* AArch64 initial CFI state */
DWRF_U8(DWRF_CFA_def_cfa); // Define CFA (Call Frame Address)
DWRF_UV(DWRF_REG_SP); // CFA = SP register
DWRF_UV(0); // CFA = SP + 0 (AArch64 starts with offset 0)
// No initial register saves in AArch64 CIE
#endif
DWRF_ALIGNNOP(sizeof(uintptr_t)); // Align to pointer boundary
)
/*
* Emit DWARF EH FDE (Frame Description Entry)
*
* The FDE describes unwinding information specific to this function.
* It references the CIE and provides function-specific CFI instructions.
*
* The PC-relative offset is calculated after the entire EH frame is built
* to ensure accurate positioning relative to the synthesized DSO layout.
*/
DWRF_SECTION(FDE,
DWRF_U32((uint32_t)(p - framep)); // Offset to CIE (backwards reference)
/*
* In perf jitdump mode the FDE PC field is encoded PC-relative and
* points back to code_start. Record where that field lives so we can
* patch in the final offset after the rest of the synthetic DSO
* layout is known.
*/
ctx->fde_p = p; // Remember where PC offset field is located for later calculation
DWRF_U32(0); // Placeholder for PC-relative offset (calculated below)
DWRF_U32(ctx->code_size); // Address range covered by this FDE (code length)
DWRF_U8(0); // Augmentation data length (none)
/*
* Architecture-specific CFI instructions
*
* These instructions describe how registers are saved and restored
* during function calls. Each architecture has different calling
* conventions and register usage patterns.
*/
#ifdef __x86_64__
/* x86_64 calling convention unwinding rules */
# if defined(__CET__) && (__CET__ & 1)
DWRF_U8(DWRF_CFA_advance_loc | 4); // Advance past endbr64 (4 bytes)
# endif
DWRF_U8(DWRF_CFA_advance_loc | 1); // Advance past push %rbp (1 byte)
DWRF_U8(DWRF_CFA_def_cfa_offset); // def_cfa_offset 16
DWRF_UV(16); // New offset: SP + 16
DWRF_U8(DWRF_CFA_offset | DWRF_REG_BP); // offset r6 at cfa-16
DWRF_UV(2); // Offset factor: 2 * 8 = 16 bytes
DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance past mov %rsp,%rbp (3 bytes)
DWRF_U8(DWRF_CFA_def_cfa_register); // def_cfa_register r6
DWRF_UV(DWRF_REG_BP); // Use base pointer register
DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance past call *%rcx (2 bytes) + pop %rbp (1 byte) = 3
DWRF_U8(DWRF_CFA_def_cfa); // def_cfa r7 ofs 8
DWRF_UV(DWRF_REG_SP); // Use stack pointer register
DWRF_UV(8); // New offset: SP + 8
#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
/* AArch64 calling convention unwinding rules */
DWRF_U8(DWRF_CFA_advance_loc | 1); // Advance by 1 instruction (4 bytes)
DWRF_U8(DWRF_CFA_def_cfa_offset); // CFA = SP + 16
DWRF_UV(16); // Stack pointer moved by 16 bytes
DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // x29 (frame pointer) saved
DWRF_UV(2); // At CFA-16 (2 * 8 = 16 bytes from CFA)
DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // x30 (link register) saved
DWRF_UV(1); // At CFA-8 (1 * 8 = 8 bytes from CFA)
DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance by 3 instructions (12 bytes)
DWRF_U8(DWRF_CFA_def_cfa_register); // CFA = FP (x29) + 16
DWRF_UV(DWRF_REG_FP);
DWRF_U8(DWRF_CFA_restore | DWRF_REG_RA); // Restore x30 - NO DWRF_UV() after this!
DWRF_U8(DWRF_CFA_restore | DWRF_REG_FP); // Restore x29 - NO DWRF_UV() after this!
DWRF_U8(DWRF_CFA_def_cfa); // CFA = SP + 0 (stack restored)
DWRF_UV(DWRF_REG_SP);
DWRF_UV(0);
#else
# error "Unsupported target architecture"
#endif
DWRF_ALIGNNOP(sizeof(uintptr_t)); // Align to pointer boundary
)
ctx->p = p; // Update context pointer to end of generated data
/* Calculate and update the PC-relative offset in the FDE
*
* When perf processes the jitdump, it creates a synthesized DSO with this layout:
*
* Synthesized DSO Memory Layout:
* < code_start
* Code Section
* (round_up(code_size, 8) bytes)
* < start of EH frame data
* EH Frame Data
*
* CIE data
*
*
* FDE Header:
* - CIE offset (4 bytes)
* - PC offset (4 bytes) < fde_offset_in_frame > points to code_start
* - address range (4 bytes) (this specific field)
* CFI Instructions...
*
* < reference_point
* EhFrameHeader
* (navigation metadata)
*
*
* The PC offset field in the FDE must contain the distance from itself to code_start:
*
* distance = code_start - fde_pc_field
*
* Where:
* fde_pc_field_location = reference_point - eh_frame_size + fde_offset_in_frame
* code_start_location = reference_point - eh_frame_size - round_up(code_size, 8)
*
* Therefore:
* distance = code_start_location - fde_pc_field_location
* = (ref - eh_frame_size - rounded_code_size) - (ref - eh_frame_size + fde_offset_in_frame)
* = -rounded_code_size - fde_offset_in_frame
* = -(round_up(code_size, 8) + fde_offset_in_frame)
*
* Note: fde_offset_in_frame is the offset from EH frame start to the PC offset field.
*
*/
int32_t rounded_code_size =
(int32_t)_Py_SIZE_ROUND_UP(ctx->code_size, 8);
int32_t fde_offset_in_frame = (int32_t)(ctx->fde_p - framep);
*(int32_t *)ctx->fde_p = -(rounded_code_size + fde_offset_in_frame);
}
/*
* Build .eh_frame data for the GDB JIT interface.
*
* The executor runs inside the frame established by _PyJIT_Entry, but the
* synthetic executor FDE collapses that state into a single logical JIT frame
* that unwinds directly into _PyEval_*. Executor stencils never touch the
* frame pointer - enforced by Tools/jit/_optimizers.py _validate() and
* -mframe-pointer=reserved - so the steady-state rule is valid at every PC
* and the FDE body is empty. Tools/jit/_targets.py derives the initial CFI
* rules from the row active at the executor call in the compiled shim object.
*/
#if defined(PY_HAVE_JIT_GDB_UNWIND)
static void elf_init_ehframe_gdb(ELFObjectContext* ctx) {
int fde_ptr_enc = DWRF_EH_PE_absptr;
uint8_t* p = ctx->p;
uint8_t* framep = p;
DWRF_SECTION(CIE,
DWRF_U32(0); // CIE ID
DWRF_U8(DWRF_CIE_VERSION);
DWRF_STR("zR"); // aug data length + FDE ptr encoding follow
DWRF_UV(JIT_UNWIND_CODE_ALIGNMENT_FACTOR);
DWRF_SV(JIT_UNWIND_DATA_ALIGNMENT_FACTOR);
DWRF_U8(JIT_UNWIND_RA_REG);
DWRF_UV(1); // Augmentation data length
DWRF_U8(fde_ptr_enc); // FDE pointer encoding
/* Executor steady-state rule (our invariant, not the compiler's). */
DWRF_U8(DWRF_CFA_def_cfa);
DWRF_UV(JIT_UNWIND_CFA_REG);
DWRF_UV(JIT_UNWIND_CFA_OFFSET);
DWRF_U8(DWRF_CFA_offset | JIT_UNWIND_FP_REG);
DWRF_UV(JIT_UNWIND_FP_OFFSET);
DWRF_U8(DWRF_CFA_offset | JIT_UNWIND_RA_REG);
DWRF_UV(JIT_UNWIND_RA_OFFSET);
DWRF_ALIGNNOP(sizeof(uintptr_t));
)
DWRF_SECTION(FDE,
DWRF_U32((uint32_t)(p - framep)); // Offset to CIE (backwards reference)
DWRF_ADDR(ctx->code_addr); // Absolute code start
DWRF_ADDR((uintptr_t)ctx->code_size); // Code range covered
DWRF_U8(0); // Augmentation data length (none)
DWRF_ALIGNNOP(sizeof(uintptr_t));
)
ctx->p = p;
}
#endif
#if defined(PY_HAVE_JIT_GDB_UNWIND)
enum {
JIT_NOACTION = 0,
JIT_REGISTER_FN = 1,
JIT_UNREGISTER_FN = 2,
};
struct jit_code_entry {
struct jit_code_entry *next;
struct jit_code_entry *prev;
const char *symfile_addr;
uint64_t symfile_size;
const void *code_addr;
};
struct jit_descriptor {
uint32_t version;
uint32_t action_flag;
struct jit_code_entry *relevant_entry;
struct jit_code_entry *first_entry;
};
PyMutex _Py_jit_debug_mutex = {0};
Py_EXPORTED_SYMBOL volatile struct jit_descriptor __jit_debug_descriptor = {
1, JIT_NOACTION, NULL, NULL
};
Py_EXPORTED_SYMBOL void __attribute__((noinline))
__jit_debug_register_code(void)
{
/* Keep this call visible to debuggers and not optimized away. */
(void)__jit_debug_descriptor.action_flag;
#if defined(__GNUC__) || defined(__clang__)
__asm__ __volatile__("" ::: "memory");
#endif
}
static uint16_t
gdb_jit_machine_id(void)
{
/* Map the current target to ELF e_machine; return 0 to skip registration. */
#if defined(__x86_64__) || defined(_M_X64)
return EM_X86_64;
#elif defined(__aarch64__) && !defined(__ILP32__)
return EM_AARCH64;
#else
return 0;
#endif
}
static struct jit_code_entry *
gdb_jit_register_code(
const void *code_addr,
size_t code_size,
const char *symname,
const uint8_t *eh_frame,
size_t eh_frame_size
)
{
/*
* Build a minimal in-memory ELF for GDB's JIT interface and link it into
* __jit_debug_descriptor so debuggers can resolve JIT code.
*/
if (code_addr == NULL || code_size == 0 || symname == NULL) {
return NULL;
}
const uint16_t machine = gdb_jit_machine_id();
if (machine == 0) {
return NULL;
}
enum {
SH_NULL = 0,
SH_TEXT,
SH_EH_FRAME,
SH_SHSTRTAB,
SH_STRTAB,
SH_SYMTAB,
SH_NUM,
};
static const char shstrtab[] =
"\0.text\0.eh_frame\0.shstrtab\0.strtab\0.symtab";
_Static_assert(sizeof(shstrtab) ==
1 + sizeof(".text") + sizeof(".eh_frame") +
sizeof(".shstrtab") + sizeof(".strtab") + sizeof(".symtab"),
"shstrtab size mismatch");
const size_t shstrtab_size = sizeof(shstrtab);
const size_t sh_text = 1;
const size_t sh_eh_frame = sh_text + sizeof(".text");
const size_t sh_shstrtab = sh_eh_frame + sizeof(".eh_frame");
const size_t sh_strtab = sh_shstrtab + sizeof(".shstrtab");
const size_t sh_symtab = sh_strtab + sizeof(".strtab");
const size_t text_size = code_size;
const size_t text_padded = _Py_SIZE_ROUND_UP(text_size, 8);
const size_t strtab_size = 1 + strlen(symname) + 1;
const size_t symtab_size = 3 * sizeof(Elf64_Sym);
size_t offset = sizeof(Elf64_Ehdr);
offset = _Py_SIZE_ROUND_UP(offset, 16);
const size_t text_off = offset;
const size_t eh_off = text_off + text_padded;
offset = eh_off + eh_frame_size;
const size_t shstr_off = offset;
offset += shstrtab_size;
const size_t str_off = offset;
offset += strtab_size;
/* Elf64_Sym requires 8-byte alignment for st_value/st_size. */
offset = _Py_SIZE_ROUND_UP(offset, 8);
const size_t sym_off = offset;
offset += symtab_size;
offset = _Py_SIZE_ROUND_UP(offset, sizeof(Elf64_Shdr));
const size_t sh_off = offset;
const size_t shnum = SH_NUM;
const size_t total_size = sh_off + shnum * sizeof(Elf64_Shdr);
uint8_t *buf = (uint8_t *)PyMem_RawMalloc(total_size);
if (buf == NULL) {
return NULL;
}
memset(buf, 0, total_size);
Elf64_Ehdr *ehdr = (Elf64_Ehdr *)buf;
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
ehdr->e_ident[EI_CLASS] = ELFCLASS64;
ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
ehdr->e_type = ET_DYN;
ehdr->e_machine = machine;
ehdr->e_version = EV_CURRENT;
ehdr->e_entry = 0;
ehdr->e_phoff = 0;
ehdr->e_shoff = sh_off;
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_shentsize = sizeof(Elf64_Shdr);
ehdr->e_shnum = shnum;
ehdr->e_shstrndx = SH_SHSTRTAB;
memcpy(buf + text_off, code_addr, text_size);
memcpy(buf + eh_off, eh_frame, eh_frame_size);
char *shstr = (char *)(buf + shstr_off);
memcpy(shstr, shstrtab, shstrtab_size);
char *strtab = (char *)(buf + str_off);
strtab[0] = '\0';
memcpy(strtab + 1, symname, strlen(symname));
strtab[strtab_size - 1] = '\0';
Elf64_Sym *syms = (Elf64_Sym *)(buf + sym_off);
memset(syms, 0, symtab_size);
/* Section symbol for .text (local) */
syms[1].st_info = ELF64_ST_INFO(STB_LOCAL, STT_SECTION);
syms[1].st_shndx = 1;
/* Function symbol */
syms[2].st_name = 1;
syms[2].st_info = ELF64_ST_INFO(STB_GLOBAL, STT_FUNC);
syms[2].st_other = STV_DEFAULT;
syms[2].st_shndx = 1;
/* For ET_DYN/ET_EXEC, st_value is the absolute virtual address. */
syms[2].st_value = (Elf64_Addr)(uintptr_t)code_addr;
syms[2].st_size = code_size;
Elf64_Shdr *shdrs = (Elf64_Shdr *)(buf + sh_off);
memset(shdrs, 0, shnum * sizeof(Elf64_Shdr));
shdrs[SH_TEXT].sh_name = sh_text;
shdrs[SH_TEXT].sh_type = SHT_PROGBITS;
shdrs[SH_TEXT].sh_flags = SHF_ALLOC | SHF_EXECINSTR;
shdrs[SH_TEXT].sh_addr = (Elf64_Addr)(uintptr_t)code_addr;
shdrs[SH_TEXT].sh_offset = text_off;
shdrs[SH_TEXT].sh_size = text_size;
shdrs[SH_TEXT].sh_addralign = 16;
shdrs[SH_EH_FRAME].sh_name = sh_eh_frame;
shdrs[SH_EH_FRAME].sh_type = SHT_PROGBITS;
shdrs[SH_EH_FRAME].sh_flags = SHF_ALLOC;
shdrs[SH_EH_FRAME].sh_addr =
(Elf64_Addr)((uintptr_t)code_addr + text_padded);
shdrs[SH_EH_FRAME].sh_offset = eh_off;
shdrs[SH_EH_FRAME].sh_size = eh_frame_size;
shdrs[SH_EH_FRAME].sh_addralign = 8;
shdrs[SH_SHSTRTAB].sh_name = sh_shstrtab;
shdrs[SH_SHSTRTAB].sh_type = SHT_STRTAB;
shdrs[SH_SHSTRTAB].sh_offset = shstr_off;
shdrs[SH_SHSTRTAB].sh_size = shstrtab_size;
shdrs[SH_SHSTRTAB].sh_addralign = 1;
shdrs[SH_STRTAB].sh_name = sh_strtab;
shdrs[SH_STRTAB].sh_type = SHT_STRTAB;
shdrs[SH_STRTAB].sh_offset = str_off;
shdrs[SH_STRTAB].sh_size = strtab_size;
shdrs[SH_STRTAB].sh_addralign = 1;
shdrs[SH_SYMTAB].sh_name = sh_symtab;
shdrs[SH_SYMTAB].sh_type = SHT_SYMTAB;
shdrs[SH_SYMTAB].sh_offset = sym_off;
shdrs[SH_SYMTAB].sh_size = symtab_size;
shdrs[SH_SYMTAB].sh_link = SH_STRTAB;
shdrs[SH_SYMTAB].sh_info = 2;
shdrs[SH_SYMTAB].sh_addralign = 8;
shdrs[SH_SYMTAB].sh_entsize = sizeof(Elf64_Sym);
struct jit_code_entry *entry = PyMem_RawMalloc(sizeof(*entry));
if (entry == NULL) {
PyMem_RawFree(buf);
return NULL;
}
entry->symfile_addr = (const char *)buf;
entry->symfile_size = total_size;
entry->code_addr = code_addr;
PyMutex_Lock(&_Py_jit_debug_mutex);
entry->prev = NULL;
entry->next = __jit_debug_descriptor.first_entry;
if (entry->next != NULL) {
entry->next->prev = entry;
}
__jit_debug_descriptor.first_entry = entry;
__jit_debug_descriptor.relevant_entry = entry;
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
__jit_debug_register_code();
__jit_debug_descriptor.action_flag = JIT_NOACTION;
__jit_debug_descriptor.relevant_entry = NULL;
PyMutex_Unlock(&_Py_jit_debug_mutex);
return entry;
}
#endif // defined(PY_HAVE_JIT_GDB_UNWIND)
void *
_PyJitUnwind_GdbRegisterCode(const void *code_addr,
size_t code_size,
const char *entry,
const char *filename)
{
#if defined(PY_HAVE_JIT_GDB_UNWIND)
/* GDB expects a stable symbol name and absolute addresses in .eh_frame. */
if (entry == NULL) {
entry = "";
}
if (filename == NULL) {
filename = "";
}
size_t name_size = snprintf(NULL, 0, "py::%s:%s", entry, filename) + 1;
char *name = (char *)PyMem_RawMalloc(name_size);
if (name == NULL) {
return NULL;
}
snprintf(name, name_size, "py::%s:%s", entry, filename);
uint8_t buffer[1024];
size_t eh_frame_size = _PyJitUnwind_BuildEhFrame(
buffer, sizeof(buffer), code_addr, code_size, 1);
if (eh_frame_size == 0) {
PyMem_RawFree(name);
return NULL;
}
void *handle = gdb_jit_register_code(code_addr, code_size, name,
buffer, eh_frame_size);
PyMem_RawFree(name);
return handle;
#else
(void)code_addr;
(void)code_size;
(void)entry;
(void)filename;
return NULL;
#endif
}
void
_PyJitUnwind_GdbUnregisterCode(void *handle)
{
#if defined(PY_HAVE_JIT_GDB_UNWIND)
struct jit_code_entry *entry = (struct jit_code_entry *)handle;
if (entry == NULL) {
return;
}
PyMutex_Lock(&_Py_jit_debug_mutex);
if (entry->prev != NULL) {
entry->prev->next = entry->next;
}
else {
__jit_debug_descriptor.first_entry = entry->next;
}
if (entry->next != NULL) {
entry->next->prev = entry->prev;
}
__jit_debug_descriptor.relevant_entry = entry;
__jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
__jit_debug_register_code();
__jit_debug_descriptor.action_flag = JIT_NOACTION;
__jit_debug_descriptor.relevant_entry = NULL;
PyMutex_Unlock(&_Py_jit_debug_mutex);
PyMem_RawFree((void *)entry->symfile_addr);
PyMem_RawFree(entry);
#else
(void)handle;
#endif
}
#endif // defined(PY_HAVE_PERF_TRAMPOLINE) || defined(PY_HAVE_JIT_GDB_UNWIND)

View file

@ -1448,6 +1448,7 @@ allocate_executor(int exit_count, int length)
res->trace = (_PyUOpInstruction *)(res->exits + exit_count);
res->code_size = length;
res->exit_count = exit_count;
res->jit_gdb_handle = NULL;
return res;
}

View file

@ -156,6 +156,16 @@ type_watcher_callback(PyTypeObject* type)
return 0;
}
static void
watch_type(PyTypeObject *type, _PyBloomFilter *filter)
{
if (_Py_IsImmortal(type) && (type->tp_flags & Py_TPFLAGS_IMMUTABLETYPE)) {
return;
}
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)type);
_Py_BloomFilter_Add(filter, type);
}
static PyObject *
convert_global_to_const(_PyUOpInstruction *inst, PyObject *obj)
{
@ -367,8 +377,7 @@ optimize_dict_known_hash(
// for user-defined objects which don't override tp_hash
Py_hash_t hash = PyObject_Hash(sub);
ADD_OP(opcode, 0, hash);
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)Py_TYPE(sub));
_Py_BloomFilter_Add(dependencies, Py_TYPE(sub));
watch_type(Py_TYPE(sub), dependencies);
}
}
@ -401,8 +410,7 @@ lookup_attr(JitOptContext *ctx, _PyBloomFilter *dependencies, _PyUOpInstruction
ADD_OP(suffix, 2, 0);
}
if ((type->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)type);
_Py_BloomFilter_Add(dependencies, type);
watch_type(type, dependencies);
}
return sym_new_const(ctx, lookup);
}
@ -473,10 +481,8 @@ lookup_super_attr(JitOptContext *ctx, _PyBloomFilter *dependencies,
}
// if obj_type is immutable, then all its superclasses are immutable
if ((obj_type->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)su_type);
_Py_BloomFilter_Add(dependencies, su_type);
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)obj_type);
_Py_BloomFilter_Add(dependencies, obj_type);
watch_type(su_type, dependencies);
watch_type(obj_type, dependencies);
}
return sym_new_const_steal(ctx, lookup);
}

View file

@ -1,4 +1,6 @@
#include "Python.h"
#include "pycore_long.h"
#include "pycore_opcode_utils.h"
#include "pycore_optimizer.h"
#include "pycore_uops.h"
#include "pycore_uop_ids.h"
@ -147,10 +149,7 @@ dummy_func(void) {
// Promote the probable type version to a known one.
sym_set_type(owner, probable_type);
sym_set_type_version(owner, type_version);
if ((probable_type->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)probable_type);
_Py_BloomFilter_Add(dependencies, probable_type);
}
watch_type(probable_type, dependencies);
}
else {
ctx->contradiction = true;
@ -229,24 +228,21 @@ dummy_func(void) {
}
op(_CHECK_ATTR_CLASS, (type_version/2, owner -- owner)) {
PyObject *type = (PyObject *)_PyType_LookupByVersion(type_version);
if (type) {
PyObject *type = sym_get_probable_value(owner);
if (type != NULL && ((PyTypeObject *)type)->tp_version_tag == type_version) {
if (type == sym_get_const(ctx, owner)) {
ADD_OP(_NOP, 0, 0);
}
else {
sym_set_const(owner, type);
if ((((PyTypeObject *)type)->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
PyType_Watch(TYPE_WATCHER_ID, type);
_Py_BloomFilter_Add(dependencies, type);
}
watch_type((PyTypeObject *)type, dependencies);
}
}
}
op(_GUARD_TYPE_VERSION, (type_version/2, owner -- owner)) {
assert(type_version);
assert(this_instr[-1].opcode == _RECORD_TOS_TYPE);
assert(this_instr[-1].opcode == _RECORD_TOS_TYPE || this_instr[-1].opcode == _RECORD_TOS);
if (sym_matches_type_version(owner, type_version)) {
ADD_OP(_NOP, 0, 0);
}
@ -256,8 +252,7 @@ dummy_func(void) {
probable_type->tp_version_tag == type_version) {
sym_set_type(owner, probable_type);
sym_set_type_version(owner, type_version);
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)probable_type);
_Py_BloomFilter_Add(dependencies, probable_type);
watch_type(probable_type, dependencies);
}
else {
ctx->contradiction = true;
@ -875,8 +870,14 @@ dummy_func(void) {
op(_LOAD_COMMON_CONSTANT, (-- value)) {
assert(oparg < NUM_COMMON_CONSTANTS);
PyObject *val = _PyInterpreterState_GET()->common_consts[oparg];
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)val);
value = PyJitRef_Borrow(sym_new_const(ctx, val));
if (_Py_IsImmortal(val)) {
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)val);
value = PyJitRef_Borrow(sym_new_const(ctx, val));
}
else {
ADD_OP(_LOAD_CONST_INLINE, 0, (uintptr_t)val);
value = sym_new_const(ctx, val);
}
}
op(_LOAD_SMALL_INT, (-- value)) {
@ -936,6 +937,13 @@ dummy_func(void) {
assert(oparg >= 2);
}
op(_RROT_3, (bottom, middle, top -- bottom, middle, top)) {
JitOptRef temp = top;
top = middle;
middle = bottom;
bottom = temp;
}
op(_LOAD_ATTR_INSTANCE_VALUE, (offset/1, owner -- attr, o)) {
attr = sym_new_not_null(ctx);
(void)offset;
@ -1311,8 +1319,7 @@ dummy_func(void) {
assert(init != NULL);
assert(PyFunction_Check(init));
callable = sym_new_const(ctx, init);
PyType_Watch(TYPE_WATCHER_ID, callable_o);
_Py_BloomFilter_Add(dependencies, callable_o);;
watch_type((PyTypeObject *)callable_o, dependencies);
}
else {
callable = sym_new_not_null(ctx);
@ -2018,10 +2025,7 @@ dummy_func(void) {
0, (uintptr_t)descr);
ADD_OP(_SWAP, 3, 0);
optimize_pop_top(ctx, this_instr, method_and_self[0]);
if ((type->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)type);
_Py_BloomFilter_Add(dependencies, type);
}
watch_type(type, dependencies);
method_and_self[0] = sym_new_const(ctx, descr);
optimized = true;
}

View file

@ -638,10 +638,9 @@
if (sym_is_const(ctx, res)) {
PyObject *result = sym_get_const(ctx, res);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -710,10 +709,9 @@
if (sym_is_const(ctx, res)) {
PyObject *result = sym_get_const(ctx, res);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -782,10 +780,9 @@
if (sym_is_const(ctx, res)) {
PyObject *result = sym_get_const(ctx, res);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -1605,10 +1602,9 @@
if (sym_is_const(ctx, res)) {
PyObject *result = sym_get_const(ctx, res);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -1915,8 +1911,14 @@
JitOptRef value;
assert(oparg < NUM_COMMON_CONSTANTS);
PyObject *val = _PyInterpreterState_GET()->common_consts[oparg];
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)val);
value = PyJitRef_Borrow(sym_new_const(ctx, val));
if (_Py_IsImmortal(val)) {
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)val);
value = PyJitRef_Borrow(sym_new_const(ctx, val));
}
else {
ADD_OP(_LOAD_CONST_INLINE, 0, (uintptr_t)val);
value = sym_new_const(ctx, val);
}
CHECK_STACK_BOUNDS(1);
stack_pointer[0] = value;
stack_pointer += 1;
@ -2522,7 +2524,7 @@
owner = stack_pointer[-1];
uint32_t type_version = (uint32_t)this_instr->operand0;
assert(type_version);
assert(this_instr[-1].opcode == _RECORD_TOS_TYPE);
assert(this_instr[-1].opcode == _RECORD_TOS_TYPE || this_instr[-1].opcode == _RECORD_TOS);
if (sym_matches_type_version(owner, type_version)) {
ADD_OP(_NOP, 0, 0);
}
@ -2532,8 +2534,7 @@
probable_type->tp_version_tag == type_version) {
sym_set_type(owner, probable_type);
sym_set_type_version(owner, type_version);
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)probable_type);
_Py_BloomFilter_Add(dependencies, probable_type);
watch_type(probable_type, dependencies);
}
else {
ctx->contradiction = true;
@ -2558,10 +2559,7 @@
probable_type->tp_version_tag == type_version) {
sym_set_type(owner, probable_type);
sym_set_type_version(owner, type_version);
if ((probable_type->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)probable_type);
_Py_BloomFilter_Add(dependencies, probable_type);
}
watch_type(probable_type, dependencies);
}
else {
ctx->contradiction = true;
@ -2680,17 +2678,14 @@
JitOptRef owner;
owner = stack_pointer[-1];
uint32_t type_version = (uint32_t)this_instr->operand0;
PyObject *type = (PyObject *)_PyType_LookupByVersion(type_version);
if (type) {
PyObject *type = sym_get_probable_value(owner);
if (type != NULL && ((PyTypeObject *)type)->tp_version_tag == type_version) {
if (type == sym_get_const(ctx, owner)) {
ADD_OP(_NOP, 0, 0);
}
else {
sym_set_const(owner, type);
if ((((PyTypeObject *)type)->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
PyType_Watch(TYPE_WATCHER_ID, type);
_Py_BloomFilter_Add(dependencies, type);
}
watch_type((PyTypeObject *)type, dependencies);
}
}
break;
@ -2935,10 +2930,9 @@
if (sym_is_const(ctx, res)) {
PyObject *result = sym_get_const(ctx, res);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -3010,10 +3004,9 @@
if (sym_is_const(ctx, res)) {
PyObject *result = sym_get_const(ctx, res);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -3074,10 +3067,9 @@
if (sym_is_const(ctx, res)) {
PyObject *result = sym_get_const(ctx, res);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -3156,10 +3148,9 @@
if (sym_is_const(ctx, b)) {
PyObject *result = sym_get_const(ctx, b);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -3265,10 +3256,9 @@
if (sym_is_const(ctx, b)) {
PyObject *result = sym_get_const(ctx, b);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -3332,10 +3322,9 @@
if (sym_is_const(ctx, b)) {
PyObject *result = sym_get_const(ctx, b);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -3785,10 +3774,7 @@
0, (uintptr_t)descr);
ADD_OP(_SWAP, 3, 0);
optimize_pop_top(ctx, this_instr, method_and_self[0]);
if ((type->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
PyType_Watch(TYPE_WATCHER_ID, (PyObject *)type);
_Py_BloomFilter_Add(dependencies, type);
}
watch_type(type, dependencies);
method_and_self[0] = sym_new_const(ctx, descr);
optimized = true;
}
@ -4324,8 +4310,7 @@
assert(PyFunction_Check(init));
callable = sym_new_const(ctx, init);
stack_pointer[-2 - oparg] = callable;
PyType_Watch(TYPE_WATCHER_ID, callable_o);
_Py_BloomFilter_Add(dependencies, callable_o);;
watch_type((PyTypeObject *)callable_o, dependencies);
}
else {
callable = sym_new_not_null(ctx);
@ -5220,10 +5205,9 @@
if (sym_is_const(ctx, res)) {
PyObject *result = sym_get_const(ctx, res);
if (_Py_IsImmortal(result)) {
// Replace with _LOAD_CONST_INLINE_BORROW + _SWAP + _SWAP since we have two inputs and an immortal result
// Replace with _LOAD_CONST_INLINE_BORROW + _RROT_3 since we have two inputs and an immortal result
ADD_OP(_LOAD_CONST_INLINE_BORROW, 0, (uintptr_t)result);
ADD_OP(_SWAP, 3, 0);
ADD_OP(_SWAP, 2, 0);
ADD_OP(_RROT_3, 0, 0);
}
}
CHECK_STACK_BOUNDS(1);
@ -5504,6 +5488,23 @@
break;
}
case _RROT_3: {
JitOptRef top;
JitOptRef middle;
JitOptRef bottom;
top = stack_pointer[-1];
middle = stack_pointer[-2];
bottom = stack_pointer[-3];
JitOptRef temp = top;
top = middle;
middle = bottom;
bottom = temp;
stack_pointer[-3] = bottom;
stack_pointer[-2] = middle;
stack_pointer[-1] = top;
break;
}
case _START_EXECUTOR: {
break;
}

File diff suppressed because it is too large Load diff

View file

@ -243,7 +243,7 @@ perf_trampoline_code_watcher(PyCodeEvent event, PyCodeObject *co)
static void
perf_map_write_entry(void *state, const void *code_addr,
unsigned int code_size, PyCodeObject *co)
size_t code_size, PyCodeObject *co)
{
const char *entry = "";
if (co->co_qualname != NULL) {

View file

@ -879,13 +879,19 @@ pycore_init_builtins(PyThreadState *tstate)
interp->common_consts[CONSTANT_ASSERTIONERROR] = PyExc_AssertionError;
interp->common_consts[CONSTANT_NOTIMPLEMENTEDERROR] = PyExc_NotImplementedError;
interp->common_consts[CONSTANT_BUILTIN_TUPLE] = (PyObject*)&PyTuple_Type;
interp->common_consts[CONSTANT_BUILTIN_TUPLE] = (PyObject *)&PyTuple_Type;
interp->common_consts[CONSTANT_BUILTIN_ALL] = all;
interp->common_consts[CONSTANT_BUILTIN_ANY] = any;
interp->common_consts[CONSTANT_BUILTIN_LIST] = (PyObject*)&PyList_Type;
interp->common_consts[CONSTANT_BUILTIN_SET] = (PyObject*)&PySet_Type;
for (int i=0; i < NUM_COMMON_CONSTANTS; i++) {
interp->common_consts[CONSTANT_BUILTIN_LIST] = (PyObject *)&PyList_Type;
interp->common_consts[CONSTANT_BUILTIN_SET] = (PyObject *)&PySet_Type;
interp->common_consts[CONSTANT_NONE] = Py_None;
interp->common_consts[CONSTANT_EMPTY_STR] =
Py_GetConstantBorrowed(Py_CONSTANT_EMPTY_STR);
interp->common_consts[CONSTANT_TRUE] = Py_True;
interp->common_consts[CONSTANT_FALSE] = Py_False;
interp->common_consts[CONSTANT_MINUS_ONE] =
(PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS - 1];
for (int i = 0; i < NUM_COMMON_CONSTANTS; i++) {
assert(interp->common_consts[i] != NULL);
}

View file

@ -101,10 +101,11 @@ void _PyOpcode_RecordFunction_CODE(_PyInterpreterFrame *frame, _PyStackRef *stac
#define _RECORD_TOS_TYPE_INDEX 1
#define _RECORD_NOS_INDEX 2
#define _RECORD_3OS_GEN_FUNC_INDEX 3
#define _RECORD_NOS_GEN_FUNC_INDEX 4
#define _RECORD_CALLABLE_INDEX 5
#define _RECORD_CALLABLE_KW_INDEX 6
#define _RECORD_4OS_INDEX 7
#define _RECORD_TOS_INDEX 4
#define _RECORD_NOS_GEN_FUNC_INDEX 5
#define _RECORD_CALLABLE_INDEX 6
#define _RECORD_CALLABLE_KW_INDEX 7
#define _RECORD_4OS_INDEX 8
const _PyOpcodeRecordEntry _PyOpcode_RecordEntries[256] = {
[TO_BOOL_BOOL] = {1, {_RECORD_TOS_TYPE_INDEX}},
@ -136,15 +137,15 @@ const _PyOpcodeRecordEntry _PyOpcode_RecordEntries[256] = {
[STORE_ATTR] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_SUPER_ATTR] = {1, {_RECORD_NOS_INDEX}},
[LOAD_SUPER_ATTR_METHOD] = {1, {_RECORD_NOS_INDEX}},
[LOAD_ATTR] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_INSTANCE_VALUE] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_MODULE] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_WITH_HINT] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_SLOT] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_CLASS] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_CLASS_WITH_METACLASS_CHECK] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_PROPERTY] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_INSTANCE_VALUE] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_MODULE] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_WITH_HINT] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_SLOT] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_CLASS] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_CLASS_WITH_METACLASS_CHECK] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_PROPERTY] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = {1, {_RECORD_TOS_INDEX}},
[STORE_ATTR_INSTANCE_VALUE] = {1, {_RECORD_TOS_TYPE_INDEX}},
[STORE_ATTR_WITH_HINT] = {1, {_RECORD_TOS_TYPE_INDEX}},
[STORE_ATTR_SLOT] = {1, {_RECORD_TOS_TYPE_INDEX}},
@ -158,11 +159,11 @@ const _PyOpcodeRecordEntry _PyOpcode_RecordEntries[256] = {
[FOR_ITER_RANGE] = {1, {_RECORD_NOS_GEN_FUNC_INDEX}},
[FOR_ITER_GEN] = {1, {_RECORD_NOS_GEN_FUNC_INDEX}},
[LOAD_SPECIAL] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_METHOD_WITH_VALUES] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_METHOD_NO_DICT] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_METHOD_LAZY_DICT] = {1, {_RECORD_TOS_TYPE_INDEX}},
[LOAD_ATTR_METHOD_WITH_VALUES] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_METHOD_NO_DICT] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = {1, {_RECORD_TOS_INDEX}},
[LOAD_ATTR_METHOD_LAZY_DICT] = {1, {_RECORD_TOS_INDEX}},
[CALL] = {1, {_RECORD_CALLABLE_INDEX}},
[CALL_PY_GENERAL] = {1, {_RECORD_CALLABLE_INDEX}},
[CALL_BOUND_METHOD_GENERAL] = {1, {_RECORD_CALLABLE_INDEX}},
@ -199,12 +200,13 @@ const _PyOpcodeRecordSlotMap _PyOpcode_RecordSlotMaps[256] = {
[BINARY_OP_SUBSCR_GETITEM] = {1, 0, {0}},
[SEND_GEN] = {1, 0, {0}},
[LOAD_SUPER_ATTR_METHOD] = {1, 0, {0}},
[LOAD_ATTR_INSTANCE_VALUE] = {1, 0, {0}},
[LOAD_ATTR_WITH_HINT] = {1, 0, {0}},
[LOAD_ATTR_SLOT] = {1, 0, {0}},
[LOAD_ATTR_INSTANCE_VALUE] = {1, 1, {0}},
[LOAD_ATTR_WITH_HINT] = {1, 1, {0}},
[LOAD_ATTR_SLOT] = {1, 1, {0}},
[LOAD_ATTR_CLASS] = {1, 0, {0}},
[LOAD_ATTR_CLASS_WITH_METACLASS_CHECK] = {1, 0, {0}},
[LOAD_ATTR_PROPERTY] = {1, 0, {0}},
[LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = {1, 0, {0}},
[LOAD_ATTR_PROPERTY] = {1, 1, {0}},
[LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = {1, 1, {0}},
[STORE_ATTR_INSTANCE_VALUE] = {1, 0, {0}},
[STORE_ATTR_WITH_HINT] = {1, 0, {0}},
[STORE_ATTR_SLOT] = {1, 0, {0}},
@ -213,11 +215,11 @@ const _PyOpcodeRecordSlotMap _PyOpcode_RecordSlotMaps[256] = {
[GET_ITER_VIRTUAL] = {1, 0, {0}},
[FOR_ITER_GEN] = {1, 0, {0}},
[LOAD_SPECIAL] = {1, 0, {0}},
[LOAD_ATTR_METHOD_WITH_VALUES] = {1, 0, {0}},
[LOAD_ATTR_METHOD_NO_DICT] = {1, 0, {0}},
[LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = {1, 0, {0}},
[LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = {1, 0, {0}},
[LOAD_ATTR_METHOD_LAZY_DICT] = {1, 0, {0}},
[LOAD_ATTR_METHOD_WITH_VALUES] = {1, 1, {0}},
[LOAD_ATTR_METHOD_NO_DICT] = {1, 1, {0}},
[LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = {1, 1, {0}},
[LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = {1, 1, {0}},
[LOAD_ATTR_METHOD_LAZY_DICT] = {1, 1, {0}},
[CALL_PY_GENERAL] = {1, 0, {0}},
[CALL_BOUND_METHOD_GENERAL] = {1, 1, {0}},
[CALL_NON_PY_GENERAL] = {1, 0, {0}},
@ -237,11 +239,12 @@ const _PyOpcodeRecordSlotMap _PyOpcode_RecordSlotMaps[256] = {
[BINARY_OP] = {2, 2, {1, 0}},
};
const _Py_RecordFuncPtr _PyOpcode_RecordFunctions[8] = {
const _Py_RecordFuncPtr _PyOpcode_RecordFunctions[9] = {
[0] = NULL,
[_RECORD_TOS_TYPE_INDEX] = _PyOpcode_RecordFunction_TOS_TYPE,
[_RECORD_NOS_INDEX] = _PyOpcode_RecordFunction_NOS,
[_RECORD_3OS_GEN_FUNC_INDEX] = _PyOpcode_RecordFunction_3OS_GEN_FUNC,
[_RECORD_TOS_INDEX] = _PyOpcode_RecordFunction_TOS,
[_RECORD_NOS_GEN_FUNC_INDEX] = _PyOpcode_RecordFunction_NOS_GEN_FUNC,
[_RECORD_CALLABLE_INDEX] = _PyOpcode_RecordFunction_CALLABLE,
[_RECORD_CALLABLE_KW_INDEX] = _PyOpcode_RecordFunction_CALLABLE_KW,

View file

@ -2707,7 +2707,7 @@ PyAPI_FUNC(int) PyUnstable_PerfMapState_Init(void) {
PyAPI_FUNC(int) PyUnstable_WritePerfMapEntry(
const void *code_addr,
unsigned int code_size,
size_t code_size,
const char *entry_name
) {
#ifndef MS_WINDOWS
@ -2718,7 +2718,7 @@ PyAPI_FUNC(int) PyUnstable_WritePerfMapEntry(
}
}
PyThread_acquire_lock(perf_map_state.map_lock, 1);
fprintf(perf_map_state.perf_map, "%" PRIxPTR " %x %s\n", (uintptr_t) code_addr, code_size, entry_name);
fprintf(perf_map_state.perf_map, "%" PRIxPTR " %zx %s\n", (uintptr_t) code_addr, code_size, entry_name);
fflush(perf_map_state.perf_map);
PyThread_release_lock(perf_map_state.map_lock);
#endif