mirror of
https://github.com/python/cpython.git
synced 2025-12-08 06:10:17 +00:00
Merge branch 'pyrepl-module-completion-check-for-already-imported-modules' of https://github.com/loic-simon/cpython into pyrepl-module-completion-check-for-already-imported-modules
This commit is contained in:
commit
bdd7bdf71d
58 changed files with 3487 additions and 2603 deletions
18
.github/CODEOWNERS
vendored
18
.github/CODEOWNERS
vendored
|
|
@ -241,10 +241,10 @@ Lib/test/test_getpath.py @FFY00
|
|||
Modules/getpath* @FFY00
|
||||
|
||||
# Hashing / ``hash()`` and related
|
||||
Include/cpython/pyhash.h @gpshead @picnixz @tiran
|
||||
Include/internal/pycore_pyhash.h @gpshead @picnixz @tiran
|
||||
Include/pyhash.h @gpshead @picnixz @tiran
|
||||
Python/pyhash.c @gpshead @picnixz @tiran
|
||||
Include/cpython/pyhash.h @gpshead @picnixz
|
||||
Include/internal/pycore_pyhash.h @gpshead @picnixz
|
||||
Include/pyhash.h @gpshead @picnixz
|
||||
Python/pyhash.c @gpshead @picnixz
|
||||
|
||||
# The import system (including importlib)
|
||||
**/*import* @brettcannon @ericsnowcurrently @ncoghlan @warsaw
|
||||
|
|
@ -371,14 +371,14 @@ Lib/calendar.py @AA-Turner
|
|||
Lib/test/test_calendar.py @AA-Turner
|
||||
|
||||
# Cryptographic Primitives and Applications
|
||||
**/*hashlib* @gpshead @picnixz @tiran
|
||||
**/*hashopenssl* @gpshead @picnixz @tiran
|
||||
**/*hashlib* @gpshead @picnixz
|
||||
**/*hashopenssl* @gpshead @picnixz
|
||||
**/*hmac* @gpshead @picnixz
|
||||
**/*ssl* @gpshead @picnixz
|
||||
Modules/_hacl/ @gpshead @picnixz
|
||||
Modules/*blake* @gpshead @picnixz @tiran
|
||||
Modules/*md5* @gpshead @picnixz @tiran
|
||||
Modules/*sha* @gpshead @picnixz @tiran
|
||||
Modules/*blake* @gpshead @picnixz
|
||||
Modules/*md5* @gpshead @picnixz
|
||||
Modules/*sha* @gpshead @picnixz
|
||||
|
||||
# Codecs
|
||||
Modules/cjkcodecs/ @corona10
|
||||
|
|
|
|||
|
|
@ -1,28 +1,28 @@
|
|||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.12.8
|
||||
rev: v0.13.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
- id: ruff-check
|
||||
name: Run Ruff (lint) on Doc/
|
||||
args: [--exit-non-zero-on-fix]
|
||||
files: ^Doc/
|
||||
- id: ruff
|
||||
- id: ruff-check
|
||||
name: Run Ruff (lint) on Lib/test/
|
||||
args: [--exit-non-zero-on-fix]
|
||||
files: ^Lib/test/
|
||||
- id: ruff
|
||||
- id: ruff-check
|
||||
name: Run Ruff (lint) on Tools/build/
|
||||
args: [--exit-non-zero-on-fix, --config=Tools/build/.ruff.toml]
|
||||
files: ^Tools/build/
|
||||
- id: ruff
|
||||
- id: ruff-check
|
||||
name: Run Ruff (lint) on Tools/i18n/
|
||||
args: [--exit-non-zero-on-fix, --config=Tools/i18n/.ruff.toml]
|
||||
files: ^Tools/i18n/
|
||||
- id: ruff
|
||||
- id: ruff-check
|
||||
name: Run Ruff (lint) on Argument Clinic
|
||||
args: [--exit-non-zero-on-fix, --config=Tools/clinic/.ruff.toml]
|
||||
files: ^Tools/clinic/|Lib/test/test_clinic.py
|
||||
- id: ruff
|
||||
- id: ruff-check
|
||||
name: Run Ruff (lint) on Tools/peg_generator/
|
||||
args: [--exit-non-zero-on-fix, --config=Tools/peg_generator/.ruff.toml]
|
||||
files: ^Tools/peg_generator/
|
||||
|
|
@ -36,7 +36,7 @@ repos:
|
|||
files: ^Tools/build/check_warnings.py
|
||||
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 25.1.0
|
||||
rev: 25.9.0
|
||||
hooks:
|
||||
- id: black
|
||||
name: Run Black on Tools/jit/
|
||||
|
|
@ -47,7 +47,6 @@ repos:
|
|||
hooks:
|
||||
- id: remove-tabs
|
||||
types: [python]
|
||||
exclude: ^Tools/c-analyzer/cpython/_parser.py
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v6.0.0
|
||||
|
|
@ -68,7 +67,7 @@ repos:
|
|||
files: '^\.github/CODEOWNERS|\.(gram)$'
|
||||
|
||||
- repo: https://github.com/python-jsonschema/check-jsonschema
|
||||
rev: 0.33.2
|
||||
rev: 0.34.0
|
||||
hooks:
|
||||
- id: check-dependabot
|
||||
- id: check-github-workflows
|
||||
|
|
@ -80,7 +79,7 @@ repos:
|
|||
- id: actionlint
|
||||
|
||||
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
||||
rev: v1.11.0
|
||||
rev: v1.14.1
|
||||
hooks:
|
||||
- id: zizmor
|
||||
|
||||
|
|
|
|||
|
|
@ -1382,6 +1382,9 @@ All of the following functions must be called after :c:func:`Py_Initialize`.
|
|||
This is not a replacement for :c:func:`PyModule_GetState()`, which
|
||||
extensions should use to store interpreter-specific state information.
|
||||
|
||||
The returned dictionary is borrowed from the interpreter and is valid until
|
||||
interpreter shutdown.
|
||||
|
||||
.. versionadded:: 3.8
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1141,6 +1141,9 @@ PyInterpreterState_Clear:PyInterpreterState*:interp::
|
|||
PyInterpreterState_Delete:void:::
|
||||
PyInterpreterState_Delete:PyInterpreterState*:interp::
|
||||
|
||||
PyInterpreterState_GetDict:PyObject*::0:
|
||||
PyInterpreterState_GetDict:PyInterpreterState*:interp::
|
||||
|
||||
PyInterpreterState_GetID:int64_t:::
|
||||
PyInterpreterState_GetID:PyInterpreterState*:interp::
|
||||
|
||||
|
|
|
|||
|
|
@ -315,6 +315,7 @@ Data Types
|
|||
Returns ``['__class__', '__doc__', '__module__', 'name', 'value']`` and
|
||||
any public methods defined on *self.__class__*::
|
||||
|
||||
>>> from enum import Enum
|
||||
>>> from datetime import date
|
||||
>>> class Weekday(Enum):
|
||||
... MONDAY = 1
|
||||
|
|
@ -341,7 +342,7 @@ Data Types
|
|||
A *staticmethod* that is used to determine the next value returned by
|
||||
:class:`auto`::
|
||||
|
||||
>>> from enum import auto
|
||||
>>> from enum import auto, Enum
|
||||
>>> class PowersOfThree(Enum):
|
||||
... @staticmethod
|
||||
... def _generate_next_value_(name, start, count, last_values):
|
||||
|
|
@ -373,7 +374,7 @@ Data Types
|
|||
A *classmethod* for looking up values not found in *cls*. By default it
|
||||
does nothing, but can be overridden to implement custom search behavior::
|
||||
|
||||
>>> from enum import StrEnum
|
||||
>>> from enum import auto, StrEnum
|
||||
>>> class Build(StrEnum):
|
||||
... DEBUG = auto()
|
||||
... OPTIMIZED = auto()
|
||||
|
|
@ -412,6 +413,7 @@ Data Types
|
|||
Returns the string used for *repr()* calls. By default, returns the
|
||||
*Enum* name, member name, and value, but can be overridden::
|
||||
|
||||
>>> from enum import auto, Enum
|
||||
>>> class OtherStyle(Enum):
|
||||
... ALTERNATE = auto()
|
||||
... OTHER = auto()
|
||||
|
|
@ -428,6 +430,7 @@ Data Types
|
|||
Returns the string used for *str()* calls. By default, returns the
|
||||
*Enum* name and member name, but can be overridden::
|
||||
|
||||
>>> from enum import auto, Enum
|
||||
>>> class OtherStyle(Enum):
|
||||
... ALTERNATE = auto()
|
||||
... OTHER = auto()
|
||||
|
|
@ -443,6 +446,7 @@ Data Types
|
|||
Returns the string used for *format()* and *f-string* calls. By default,
|
||||
returns :meth:`__str__` return value, but can be overridden::
|
||||
|
||||
>>> from enum import auto, Enum
|
||||
>>> class OtherStyle(Enum):
|
||||
... ALTERNATE = auto()
|
||||
... OTHER = auto()
|
||||
|
|
|
|||
|
|
@ -310,7 +310,7 @@ a file or file-like object.
|
|||
.. versionadded:: 3.11
|
||||
|
||||
.. versionchanged:: 3.14
|
||||
Now raises a :exc:`BlockingIOError` if the file is opened in blocking
|
||||
Now raises a :exc:`BlockingIOError` if the file is opened in non-blocking
|
||||
mode. Previously, spurious null bytes were added to the digest.
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -398,6 +398,192 @@ See also the description of the :keyword:`try` statement in section :ref:`try`
|
|||
and :keyword:`raise` statement in section :ref:`raise`.
|
||||
|
||||
|
||||
.. _execcomponents:
|
||||
|
||||
Runtime Components
|
||||
==================
|
||||
|
||||
General Computing Model
|
||||
-----------------------
|
||||
|
||||
Python's execution model does not operate in a vacuum. It runs on
|
||||
a host machine and through that host's runtime environment, including
|
||||
its operating system (OS), if there is one. When a program runs,
|
||||
the conceptual layers of how it runs on the host look something
|
||||
like this:
|
||||
|
||||
| **host machine**
|
||||
| **process** (global resources)
|
||||
| **thread** (runs machine code)
|
||||
|
||||
Each process represents a program running on the host. Think of each
|
||||
process itself as the data part of its program. Think of the process'
|
||||
threads as the execution part of the program. This distinction will
|
||||
be important to understand the conceptual Python runtime.
|
||||
|
||||
The process, as the data part, is the execution context in which the
|
||||
program runs. It mostly consists of the set of resources assigned to
|
||||
the program by the host, including memory, signals, file handles,
|
||||
sockets, and environment variables.
|
||||
|
||||
Processes are isolated and independent from one another. (The same
|
||||
is true for hosts.) The host manages the process' access to its
|
||||
assigned resources, in addition to coordinating between processes.
|
||||
|
||||
Each thread represents the actual execution of the program's machine
|
||||
code, running relative to the resources assigned to the program's
|
||||
process. It's strictly up to the host how and when that execution
|
||||
takes place.
|
||||
|
||||
From the point of view of Python, a program always starts with exactly
|
||||
one thread. However, the program may grow to run in multiple
|
||||
simultaneous threads. Not all hosts support multiple threads per
|
||||
process, but most do. Unlike processes, threads in a process are not
|
||||
isolated and independent from one another. Specifically, all threads
|
||||
in a process share all of the process' resources.
|
||||
|
||||
The fundamental point of threads is that each one does *run*
|
||||
independently, at the same time as the others. That may be only
|
||||
conceptually at the same time ("concurrently") or physically
|
||||
("in parallel"). Either way, the threads effectively run
|
||||
at a non-synchronized rate.
|
||||
|
||||
.. note::
|
||||
|
||||
That non-synchronized rate means none of the process' memory is
|
||||
guaranteed to stay consistent for the code running in any given
|
||||
thread. Thus multi-threaded programs must take care to coordinate
|
||||
access to intentionally shared resources. Likewise, they must take
|
||||
care to be absolutely diligent about not accessing any *other*
|
||||
resources in multiple threads; otherwise two threads running at the
|
||||
same time might accidentally interfere with each other's use of some
|
||||
shared data. All this is true for both Python programs and the
|
||||
Python runtime.
|
||||
|
||||
The cost of this broad, unstructured requirement is the tradeoff for
|
||||
the kind of raw concurrency that threads provide. The alternative
|
||||
to the required discipline generally means dealing with
|
||||
non-deterministic bugs and data corruption.
|
||||
|
||||
Python Runtime Model
|
||||
--------------------
|
||||
|
||||
The same conceptual layers apply to each Python program, with some
|
||||
extra data layers specific to Python:
|
||||
|
||||
| **host machine**
|
||||
| **process** (global resources)
|
||||
| Python global runtime (*state*)
|
||||
| Python interpreter (*state*)
|
||||
| **thread** (runs Python bytecode and "C-API")
|
||||
| Python thread *state*
|
||||
|
||||
At the conceptual level: when a Python program starts, it looks exactly
|
||||
like that diagram, with one of each. The runtime may grow to include
|
||||
multiple interpreters, and each interpreter may grow to include
|
||||
multiple thread states.
|
||||
|
||||
.. note::
|
||||
|
||||
A Python implementation won't necessarily implement the runtime
|
||||
layers distinctly or even concretely. The only exception is places
|
||||
where distinct layers are directly specified or exposed to users,
|
||||
like through the :mod:`threading` module.
|
||||
|
||||
.. note::
|
||||
|
||||
The initial interpreter is typically called the "main" interpreter.
|
||||
Some Python implementations, like CPython, assign special roles
|
||||
to the main interpreter.
|
||||
|
||||
Likewise, the host thread where the runtime was initialized is known
|
||||
as the "main" thread. It may be different from the process' initial
|
||||
thread, though they are often the same. In some cases "main thread"
|
||||
may be even more specific and refer to the initial thread state.
|
||||
A Python runtime might assign specific responsibilities
|
||||
to the main thread, such as handling signals.
|
||||
|
||||
As a whole, the Python runtime consists of the global runtime state,
|
||||
interpreters, and thread states. The runtime ensures all that state
|
||||
stays consistent over its lifetime, particularly when used with
|
||||
multiple host threads.
|
||||
|
||||
The global runtime, at the conceptual level, is just a set of
|
||||
interpreters. While those interpreters are otherwise isolated and
|
||||
independent from one another, they may share some data or other
|
||||
resources. The runtime is responsible for managing these global
|
||||
resources safely. The actual nature and management of these resources
|
||||
is implementation-specific. Ultimately, the external utility of the
|
||||
global runtime is limited to managing interpreters.
|
||||
|
||||
In contrast, an "interpreter" is conceptually what we would normally
|
||||
think of as the (full-featured) "Python runtime". When machine code
|
||||
executing in a host thread interacts with the Python runtime, it calls
|
||||
into Python in the context of a specific interpreter.
|
||||
|
||||
.. note::
|
||||
|
||||
The term "interpreter" here is not the same as the "bytecode
|
||||
interpreter", which is what regularly runs in threads, executing
|
||||
compiled Python code.
|
||||
|
||||
In an ideal world, "Python runtime" would refer to what we currently
|
||||
call "interpreter". However, it's been called "interpreter" at least
|
||||
since introduced in 1997 (`CPython:a027efa5b`_).
|
||||
|
||||
.. _CPython:a027efa5b: https://github.com/python/cpython/commit/a027efa5b
|
||||
|
||||
Each interpreter completely encapsulates all of the non-process-global,
|
||||
non-thread-specific state needed for the Python runtime to work.
|
||||
Notably, the interpreter's state persists between uses. It includes
|
||||
fundamental data like :data:`sys.modules`. The runtime ensures
|
||||
multiple threads using the same interpreter will safely
|
||||
share it between them.
|
||||
|
||||
A Python implementation may support using multiple interpreters at the
|
||||
same time in the same process. They are independent and isolated from
|
||||
one another. For example, each interpreter has its own
|
||||
:data:`sys.modules`.
|
||||
|
||||
For thread-specific runtime state, each interpreter has a set of thread
|
||||
states, which it manages, in the same way the global runtime contains
|
||||
a set of interpreters. It can have thread states for as many host
|
||||
threads as it needs. It may even have multiple thread states for
|
||||
the same host thread, though that isn't as common.
|
||||
|
||||
Each thread state, conceptually, has all the thread-specific runtime
|
||||
data an interpreter needs to operate in one host thread. The thread
|
||||
state includes the current raised exception and the thread's Python
|
||||
call stack. It may include other thread-specific resources.
|
||||
|
||||
.. note::
|
||||
|
||||
The term "Python thread" can sometimes refer to a thread state, but
|
||||
normally it means a thread created using the :mod:`threading` module.
|
||||
|
||||
Each thread state, over its lifetime, is always tied to exactly one
|
||||
interpreter and exactly one host thread. It will only ever be used in
|
||||
that thread and with that interpreter.
|
||||
|
||||
Multiple thread states may be tied to the same host thread, whether for
|
||||
different interpreters or even the same interpreter. However, for any
|
||||
given host thread, only one of the thread states tied to it can be used
|
||||
by the thread at a time.
|
||||
|
||||
Thread states are isolated and independent from one another and don't
|
||||
share any data, except for possibly sharing an interpreter and objects
|
||||
or other resources belonging to that interpreter.
|
||||
|
||||
Once a program is running, new Python threads can be created using the
|
||||
:mod:`threading` module (on platforms and Python implementations that
|
||||
support threads). Additional processes can be created using the
|
||||
:mod:`os`, :mod:`subprocess`, and :mod:`multiprocessing` modules.
|
||||
Interpreters can be created and used with the
|
||||
:mod:`~concurrent.interpreters` module. Coroutines (async) can
|
||||
be run using :mod:`asyncio` in each interpreter, typically only
|
||||
in a single thread (often the main thread).
|
||||
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#] This limitation occurs because the code that is executed by these operations
|
||||
|
|
|
|||
|
|
@ -89,12 +89,12 @@ and improvements in user-friendliness and correctness.
|
|||
* :ref:`PEP 750: Template strings <whatsnew314-pep750>`
|
||||
* :ref:`PEP 758: Allow except and except* expressions without parentheses <whatsnew314-pep758>`
|
||||
* :ref:`PEP 761: Discontinuation of PGP signatures <whatsnew314-no-more-pgp>`
|
||||
* :ref:`PEP 765: Disallow return/break/continue that exit a finally block <whatsnew314-pep765>`
|
||||
* :ref:`PEP 765: Disallow return/break/continue that exit a finally block <whatsnew314-finally-syntaxwarning>`
|
||||
* :ref:`Free-threaded mode improvements <whatsnew314-free-threaded-cpython>`
|
||||
* :ref:`PEP 768: Safe external debugger interface for CPython <whatsnew314-pep768>`
|
||||
* :ref:`PEP 784: Adding Zstandard to the standard library <whatsnew314-pep784>`
|
||||
* :ref:`A new type of interpreter <whatsnew314-tail-call>`
|
||||
* :ref:`Syntax highlighting in PyREPL <whatsnew314-pyrepl-highlighting>`,
|
||||
* :ref:`Syntax highlighting in the default interactive shell <whatsnew314-pyrepl-highlighting>`,
|
||||
and color output in :ref:`unittest <whatsnew314-color-unittest>`,
|
||||
:ref:`argparse <whatsnew314-color-argparse>`,
|
||||
:ref:`json <whatsnew314-color-json>` and
|
||||
|
|
@ -102,25 +102,6 @@ and improvements in user-friendliness and correctness.
|
|||
* :ref:`Binary releases for the experimental just-in-time compiler <whatsnew314-jit-compiler>`
|
||||
|
||||
|
||||
Incompatible changes
|
||||
====================
|
||||
|
||||
On platforms other than macOS and Windows, the default :ref:`start
|
||||
method <multiprocessing-start-methods>` for :mod:`multiprocessing`
|
||||
and :class:`~concurrent.futures.ProcessPoolExecutor` switches from
|
||||
*fork* to *forkserver*.
|
||||
|
||||
See :ref:`(1) <whatsnew314-concurrent-futures-start-method>` and
|
||||
:ref:`(2) <whatsnew314-multiprocessing-start-method>` for details.
|
||||
|
||||
If you encounter :exc:`NameError`\s or pickling errors coming out of
|
||||
:mod:`multiprocessing` or :mod:`concurrent.futures`, see the
|
||||
:ref:`forkserver restrictions <multiprocessing-programming-forkserver>`.
|
||||
|
||||
The interpreter avoids some reference count modifications internally when
|
||||
it's safe to do so. This can lead to different values returned from
|
||||
:func:`sys.getrefcount` and :c:func:`Py_REFCNT` compared to previous versions
|
||||
of Python. See :ref:`below <whatsnew314-refcount>` for details.
|
||||
|
||||
New features
|
||||
============
|
||||
|
|
@ -751,6 +732,12 @@ Improved error messages
|
|||
~^^^
|
||||
TypeError: cannot use 'list' as a dict key (unhashable type: 'list')
|
||||
|
||||
* Improved error message when an object supporting the synchronous
|
||||
context manager protocol is entered using :keyword:`async with`
|
||||
instead of :keyword:`with`,
|
||||
and vice versa for the asynchronous context manager protocol.
|
||||
(Contributed by Bénédikt Tran in :gh:`128398`.)
|
||||
|
||||
|
||||
.. _whatsnew314-pep741:
|
||||
|
||||
|
|
@ -996,26 +983,6 @@ affects other modules that use context variables, such as the :mod:`decimal`
|
|||
context manager.
|
||||
|
||||
|
||||
.. _whatsnew314-pyrepl-highlighting:
|
||||
|
||||
Syntax highlighting in PyREPL
|
||||
-----------------------------
|
||||
|
||||
The default :term:`interactive` shell now highlights Python syntax as you
|
||||
type. The feature is enabled by default unless the
|
||||
:envvar:`PYTHON_BASIC_REPL` environment is set or any color-disabling
|
||||
environment variables are used. See :ref:`using-on-controlling-color` for
|
||||
details.
|
||||
|
||||
The default color theme for syntax highlighting strives for good contrast
|
||||
and uses exclusively the 4-bit VGA standard ANSI color codes for maximum
|
||||
compatibility. The theme can be customized using an experimental API
|
||||
``_colorize.set_theme()``. This can be called interactively, as well as
|
||||
in the :envvar:`PYTHONSTARTUP` script.
|
||||
|
||||
(Contributed by Łukasz Langa in :gh:`131507`.)
|
||||
|
||||
|
||||
.. _whatsnew314-jit-compiler:
|
||||
|
||||
Binary releases for the experimental just-in-time compiler
|
||||
|
|
@ -1058,6 +1025,138 @@ free-threaded build and false for the GIL-enabled build.
|
|||
|
||||
(Contributed by Neil Schemenauer and Kumar Aditya in :gh:`130010`.)
|
||||
|
||||
|
||||
Platform support
|
||||
================
|
||||
|
||||
* :pep:`776`: Emscripten is now an officially supported platform at
|
||||
:pep:`tier 3 <11#tier-3>`. As a part of this effort, more than 25 bugs in
|
||||
`Emscripten libc`__ were fixed. Emscripten now includes support
|
||||
for :mod:`ctypes`, :mod:`termios`, and :mod:`fcntl`, as well as
|
||||
experimental support for the new :ref:`default interactive shell
|
||||
<tut-interactive>`.
|
||||
|
||||
(Contributed by R. Hood Chatham in :gh:`127146`, :gh:`127683`, and :gh:`136931`.)
|
||||
|
||||
__ https://emscripten.org/docs/porting/emscripten-runtime-environment.html
|
||||
|
||||
* iOS and macOS apps can now be configured to redirect ``stdout`` and
|
||||
``stderr`` content to the system log.
|
||||
(Contributed by Russell Keith-Magee in :gh:`127592`.)
|
||||
|
||||
* The iOS testbed is now able to stream test output while the test is running.
|
||||
The testbed can also be used to run the test suite of projects other than
|
||||
CPython itself.
|
||||
(Contributed by Russell Keith-Magee in :gh:`127592`.)
|
||||
|
||||
|
||||
Other language changes
|
||||
======================
|
||||
|
||||
* All Windows code pages are now supported as 'cpXXX' codecs on Windows.
|
||||
(Contributed by Serhiy Storchaka in :gh:`123803`.)
|
||||
|
||||
* Implement mixed-mode arithmetic rules combining real and complex numbers
|
||||
as specified by the C standard since C99.
|
||||
(Contributed by Sergey B Kirpichev in :gh:`69639`.)
|
||||
|
||||
* More syntax errors are now detected regardless of optimisation and
|
||||
the :option:`-O` command-line option.
|
||||
This includes writes to ``__debug__``, incorrect use of :keyword:`await`,
|
||||
and asynchronous comprehensions outside asynchronous functions.
|
||||
For example, ``python -O -c 'assert (__debug__ := 1)'``
|
||||
or ``python -O -c 'assert await 1'`` now produce :exc:`SyntaxError`\ s.
|
||||
(Contributed by Irit Katriel and Jelle Zijlstra in :gh:`122245` & :gh:`121637`.)
|
||||
|
||||
* When subclassing a pure C type, the C slots for the new type
|
||||
are no longer replaced with a wrapped version on class creation
|
||||
if they are not explicitly overridden in the subclass.
|
||||
(Contributed by Tomasz Pytel in :gh:`132284`.)
|
||||
|
||||
|
||||
Built-ins
|
||||
---------
|
||||
|
||||
* The :meth:`bytes.fromhex` and :meth:`bytearray.fromhex` methods now accept
|
||||
ASCII :class:`bytes` and :term:`bytes-like objects <bytes-like object>`.
|
||||
(Contributed by Daniel Pope in :gh:`129349`.)
|
||||
|
||||
* Add class methods :meth:`float.from_number` and :meth:`complex.from_number`
|
||||
to convert a number to :class:`float` or :class:`complex` type correspondingly.
|
||||
They raise a :exc:`TypeError` if the argument is not a real number.
|
||||
(Contributed by Serhiy Storchaka in :gh:`84978`.)
|
||||
|
||||
* Support underscore and comma as thousands separators in the fractional part
|
||||
for floating-point presentation types of the new-style string formatting
|
||||
(with :func:`format` or :ref:`f-strings`).
|
||||
(Contributed by Sergey B Kirpichev in :gh:`87790`.)
|
||||
|
||||
* The :func:`int` function no longer delegates to :meth:`~object.__trunc__`.
|
||||
Classes that want to support conversion to :func:`!int` must implement
|
||||
either :meth:`~object.__int__` or :meth:`~object.__index__`.
|
||||
(Contributed by Mark Dickinson in :gh:`119743`.)
|
||||
|
||||
* The :func:`map` function now has an optional keyword-only *strict* flag
|
||||
like :func:`zip` to check that all the iterables are of equal length.
|
||||
(Contributed by Wannes Boeykens in :gh:`119793`.)
|
||||
|
||||
* The :class:`memoryview` type now supports subscription,
|
||||
making it a :term:`generic type`.
|
||||
(Contributed by Brian Schubert in :gh:`126012`.)
|
||||
|
||||
* Using :data:`NotImplemented` in a boolean context
|
||||
will now raise a :exc:`TypeError`.
|
||||
This has raised a :exc:`DeprecationWarning` since Python 3.9.
|
||||
(Contributed by Jelle Zijlstra in :gh:`118767`.)
|
||||
|
||||
* Three-argument :func:`pow` now tries calling :meth:`~object.__rpow__`
|
||||
if necessary.
|
||||
Previously it was only called in two-argument :func:`!pow`
|
||||
and the binary power operator.
|
||||
(Contributed by Serhiy Storchaka in :gh:`130104`.)
|
||||
|
||||
* :class:`super` objects are now :mod:`copyable <copy>` and :mod:`pickleable
|
||||
<pickle>`.
|
||||
(Contributed by Serhiy Storchaka in :gh:`125767`.)
|
||||
|
||||
|
||||
Command line and environment
|
||||
----------------------------
|
||||
|
||||
* The import time flag can now track modules that are already loaded ('cached'),
|
||||
via the new :option:`-X importtime=2 <-X>`.
|
||||
When such a module is imported, the ``self`` and ``cumulative`` times
|
||||
are replaced by the string ``cached``.
|
||||
|
||||
Values above ``2`` for ``-X importtime`` are now reserved for future use.
|
||||
|
||||
(Contributed by Noah Kim and Adam Turner in :gh:`118655`.)
|
||||
|
||||
* The command-line option :option:`-c` now automatically dedents its code
|
||||
argument before execution. The auto-dedentation behavior mirrors
|
||||
:func:`textwrap.dedent`.
|
||||
(Contributed by Jon Crall and Steven Sun in :gh:`103998`.)
|
||||
|
||||
* :option:`!-J` is no longer a reserved flag for Jython_,
|
||||
and now has no special meaning.
|
||||
(Contributed by Adam Turner in :gh:`133336`.)
|
||||
|
||||
.. _Jython: https://www.jython.org/
|
||||
|
||||
|
||||
.. _whatsnew314-finally-syntaxwarning:
|
||||
|
||||
PEP 765: Control flow in :keyword:`finally` blocks
|
||||
--------------------------------------------------
|
||||
|
||||
The compiler now emits a :exc:`SyntaxWarning` when a :keyword:`return`,
|
||||
:keyword:`break`, or :keyword:`continue` statement have the effect of
|
||||
leaving a :keyword:`finally` block.
|
||||
This change is specified in :pep:`765`.
|
||||
|
||||
(Contributed by Irit Katriel in :gh:`130080`.)
|
||||
|
||||
|
||||
.. _whatsnew314-incremental-gc:
|
||||
|
||||
Incremental garbage collection
|
||||
|
|
@ -1081,149 +1180,34 @@ The behavior of :func:`!gc.collect` changes slightly:
|
|||
|
||||
(Contributed by Mark Shannon in :gh:`108362`.)
|
||||
|
||||
Platform support
|
||||
================
|
||||
|
||||
* :pep:`776`: Emscripten is now an officially supported platform at
|
||||
:pep:`tier 3 <11#tier-3>`. As a part of this effort, more than 25 bugs in
|
||||
`Emscripten libc`__ were fixed. Emscripten now includes support
|
||||
for :mod:`ctypes`, :mod:`termios`, and :mod:`fcntl`, as well as
|
||||
experimental support for :ref:`PyREPL <tut-interactive>`.
|
||||
Default interactive shell
|
||||
-------------------------
|
||||
|
||||
(Contributed by R. Hood Chatham in :gh:`127146`, :gh:`127683`, and :gh:`136931`.)
|
||||
.. _whatsnew314-pyrepl-highlighting:
|
||||
|
||||
__ https://emscripten.org/docs/porting/emscripten-runtime-environment.html
|
||||
* The default :term:`interactive` shell now highlights Python syntax.
|
||||
The feature is enabled by default, save if :envvar:`PYTHON_BASIC_REPL`
|
||||
or any other environment variable that disables colour is set.
|
||||
See :ref:`using-on-controlling-color` for details.
|
||||
|
||||
Other language changes
|
||||
======================
|
||||
The default color theme for syntax highlighting strives for good contrast
|
||||
and exclusively uses the 4-bit VGA standard ANSI color codes for maximum
|
||||
compatibility. The theme can be customized using an experimental API
|
||||
:func:`!_colorize.set_theme`.
|
||||
This can be called interactively or in the :envvar:`PYTHONSTARTUP` script.
|
||||
Note that this function has no stability guarantees,
|
||||
and may change or be removed.
|
||||
|
||||
* The default :term:`interactive` shell now supports import autocompletion.
|
||||
This means that typing ``import foo`` and pressing ``<tab>`` will suggest
|
||||
modules starting with ``foo``. Similarly, typing ``from foo import b`` will
|
||||
suggest submodules of ``foo`` starting with ``b``. Note that autocompletion
|
||||
of module attributes is not currently supported.
|
||||
(Contributed by Łukasz Langa in :gh:`131507`.)
|
||||
|
||||
* The default :term:`interactive` shell now supports import auto-completion.
|
||||
This means that typing ``import co`` and pressing :kbd:`<Tab>` will suggest
|
||||
modules starting with ``co``. Similarly, typing ``from concurrent import i``
|
||||
will suggest submodules of ``concurrent`` starting with ``i``.
|
||||
Note that autocompletion of module attributes is not currently supported.
|
||||
(Contributed by Tomas Roun in :gh:`69605`.)
|
||||
|
||||
* The :func:`map` built-in now has an optional keyword-only *strict* flag
|
||||
like :func:`zip` to check that all the iterables are of equal length.
|
||||
(Contributed by Wannes Boeykens in :gh:`119793`.)
|
||||
|
||||
* Incorrect usage of :keyword:`await` and asynchronous comprehensions
|
||||
is now detected even if the code is optimized away by the :option:`-O`
|
||||
command-line option. For example, ``python -O -c 'assert await 1'``
|
||||
now produces a :exc:`SyntaxError`. (Contributed by Jelle Zijlstra in :gh:`121637`.)
|
||||
|
||||
* Writes to ``__debug__`` are now detected even if the code is optimized
|
||||
away by the :option:`-O` command-line option. For example,
|
||||
``python -O -c 'assert (__debug__ := 1)'`` now produces a
|
||||
:exc:`SyntaxError`. (Contributed by Irit Katriel in :gh:`122245`.)
|
||||
|
||||
* Add class methods :meth:`float.from_number` and :meth:`complex.from_number`
|
||||
to convert a number to :class:`float` or :class:`complex` type correspondingly.
|
||||
They raise an error if the argument is a string.
|
||||
(Contributed by Serhiy Storchaka in :gh:`84978`.)
|
||||
|
||||
* Implement mixed-mode arithmetic rules combining real and complex numbers as
|
||||
specified by C standards since C99.
|
||||
(Contributed by Sergey B Kirpichev in :gh:`69639`.)
|
||||
|
||||
* All Windows code pages are now supported as "cpXXX" codecs on Windows.
|
||||
(Contributed by Serhiy Storchaka in :gh:`123803`.)
|
||||
|
||||
* :class:`super` objects are now :mod:`pickleable <pickle>` and
|
||||
:mod:`copyable <copy>`.
|
||||
(Contributed by Serhiy Storchaka in :gh:`125767`.)
|
||||
|
||||
* The :class:`memoryview` type now supports subscription,
|
||||
making it a :term:`generic type`.
|
||||
(Contributed by Brian Schubert in :gh:`126012`.)
|
||||
|
||||
* Support underscore and comma as thousands separators in the fractional part
|
||||
for floating-point presentation types of the new-style string formatting
|
||||
(with :func:`format` or :ref:`f-strings`).
|
||||
(Contributed by Sergey B Kirpichev in :gh:`87790`.)
|
||||
|
||||
* The :func:`bytes.fromhex` and :func:`bytearray.fromhex` methods now accept
|
||||
ASCII :class:`bytes` and :term:`bytes-like objects <bytes-like object>`.
|
||||
(Contributed by Daniel Pope in :gh:`129349`.)
|
||||
|
||||
* Support ``\z`` as a synonym for ``\Z`` in :mod:`regular expressions <re>`.
|
||||
It is interpreted unambiguously in many other regular expression engines,
|
||||
unlike ``\Z``, which has subtly different behavior.
|
||||
(Contributed by Serhiy Storchaka in :gh:`133306`.)
|
||||
|
||||
* ``\B`` in :mod:`regular expression <re>` now matches the empty input string.
|
||||
Now it is always the opposite of ``\b``.
|
||||
(Contributed by Serhiy Storchaka in :gh:`124130`.)
|
||||
|
||||
* iOS and macOS apps can now be configured to redirect ``stdout`` and
|
||||
``stderr`` content to the system log. (Contributed by Russell Keith-Magee in
|
||||
:gh:`127592`.)
|
||||
|
||||
* The iOS testbed is now able to stream test output while the test is running.
|
||||
The testbed can also be used to run the test suite of projects other than
|
||||
CPython itself. (Contributed by Russell Keith-Magee in :gh:`127592`.)
|
||||
|
||||
* Three-argument :func:`pow` now tries calling :meth:`~object.__rpow__` if
|
||||
necessary. Previously it was only called in two-argument :func:`!pow` and the
|
||||
binary power operator.
|
||||
(Contributed by Serhiy Storchaka in :gh:`130104`.)
|
||||
|
||||
* Add a built-in implementation for HMAC (:rfc:`2104`) using formally verified
|
||||
code from the `HACL* <https://github.com/hacl-star/hacl-star/>`__ project.
|
||||
This implementation is used as a fallback when the OpenSSL implementation
|
||||
of HMAC is not available.
|
||||
(Contributed by Bénédikt Tran in :gh:`99108`.)
|
||||
|
||||
* The import time flag can now track modules that are already loaded ('cached'),
|
||||
via the new :option:`-X importtime=2 <-X>`.
|
||||
When such a module is imported, the ``self`` and ``cumulative`` times
|
||||
are replaced by the string ``cached``.
|
||||
Values above ``2`` for ``-X importtime`` are now reserved for future use.
|
||||
(Contributed by Noah Kim and Adam Turner in :gh:`118655`.)
|
||||
|
||||
* When subclassing from a pure C type, the C slots for the new type are no
|
||||
longer replaced with a wrapped version on class creation if they are not
|
||||
explicitly overridden in the subclass.
|
||||
(Contributed by Tomasz Pytel in :gh:`132329`.)
|
||||
|
||||
* The command-line option :option:`-c` now automatically dedents its code
|
||||
argument before execution. The auto-dedentation behavior mirrors
|
||||
:func:`textwrap.dedent`.
|
||||
(Contributed by Jon Crall and Steven Sun in :gh:`103998`.)
|
||||
|
||||
* Improve error message when an object supporting the synchronous
|
||||
context manager protocol is entered using :keyword:`async
|
||||
with` instead of :keyword:`with`.
|
||||
And vice versa with the asynchronous context manager protocol.
|
||||
(Contributed by Bénédikt Tran in :gh:`128398`.)
|
||||
|
||||
* :option:`!-J` is no longer a reserved flag for Jython_,
|
||||
and now has no special meaning.
|
||||
(Contributed by Adam Turner in :gh:`133336`.)
|
||||
|
||||
.. _Jython: https://www.jython.org/
|
||||
|
||||
* The :func:`int` built-in no longer delegates to :meth:`~object.__trunc__`.
|
||||
Classes that want to support conversion to :func:`!int` must implement
|
||||
either :meth:`~object.__int__` or :meth:`~object.__index__`.
|
||||
(Contributed by Mark Dickinson in :gh:`119743`.)
|
||||
|
||||
* Using :data:`NotImplemented` in a boolean context
|
||||
will now raise a :exc:`TypeError`.
|
||||
This has raised a :exc:`DeprecationWarning` since Python 3.9.
|
||||
(Contributed by Jelle Zijlstra in :gh:`118767`.)
|
||||
|
||||
|
||||
.. _whatsnew314-pep765:
|
||||
|
||||
PEP 765: Disallow ``return``/``break``/``continue`` that exit a ``finally`` block
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
The compiler emits a :exc:`SyntaxWarning` when a :keyword:`return`, :keyword:`break` or
|
||||
:keyword:`continue` statement appears where it exits a :keyword:`finally` block.
|
||||
This change is specified in :pep:`765`.
|
||||
|
||||
|
||||
New modules
|
||||
===========
|
||||
|
|
@ -1331,11 +1315,13 @@ concurrent.futures
|
|||
|
||||
.. _whatsnew314-concurrent-futures-start-method:
|
||||
|
||||
* The default :class:`~concurrent.futures.ProcessPoolExecutor`
|
||||
:ref:`start method <multiprocessing-start-methods>` changed
|
||||
from :ref:`fork <multiprocessing-start-method-fork>` to :ref:`forkserver
|
||||
<multiprocessing-start-method-forkserver>` on platforms other than macOS and
|
||||
Windows where it was already :ref:`spawn <multiprocessing-start-method-spawn>`.
|
||||
* On Unix platforms other than macOS, :ref:`'forkserver'
|
||||
<multiprocessing-start-method-forkserver>` is now the the default :ref:`start
|
||||
method <multiprocessing-start-methods>` for
|
||||
:class:`~concurrent.futures.ProcessPoolExecutor`
|
||||
(replacing :ref:`'fork' <multiprocessing-start-method-fork>`).
|
||||
This change does not affect Windows or macOS, where :ref:`'spawn'
|
||||
<multiprocessing-start-method-spawn>` remains the default start method.
|
||||
|
||||
If the threading incompatible *fork* method is required, you must explicitly
|
||||
request it by supplying a multiprocessing context *mp_context* to
|
||||
|
|
@ -1575,6 +1561,8 @@ hmac
|
|||
|
||||
* Add a built-in implementation for HMAC (:rfc:`2104`) using formally verified
|
||||
code from the `HACL* <https://github.com/hacl-star/hacl-star/>`__ project.
|
||||
This implementation is used as a fallback when the OpenSSL implementation
|
||||
of HMAC is not available.
|
||||
(Contributed by Bénédikt Tran in :gh:`99108`.)
|
||||
|
||||
|
||||
|
|
@ -1762,10 +1750,12 @@ multiprocessing
|
|||
|
||||
.. _whatsnew314-multiprocessing-start-method:
|
||||
|
||||
* The default :ref:`start method <multiprocessing-start-methods>` changed
|
||||
from :ref:`fork <multiprocessing-start-method-fork>` to :ref:`forkserver
|
||||
<multiprocessing-start-method-forkserver>` on platforms other than macOS and
|
||||
Windows where it was already :ref:`spawn <multiprocessing-start-method-spawn>`.
|
||||
* On Unix platforms other than macOS, :ref:`'forkserver'
|
||||
<multiprocessing-start-method-forkserver>` is now the the default :ref:`start
|
||||
method <multiprocessing-start-methods>`
|
||||
(replacing :ref:`'fork' <multiprocessing-start-method-fork>`).
|
||||
This change does not affect Windows or macOS, where :ref:`'spawn'
|
||||
<multiprocessing-start-method-spawn>` remains the default start method.
|
||||
|
||||
If the threading incompatible *fork* method is required, you must explicitly
|
||||
request it via a context from :func:`multiprocessing.get_context` (preferred)
|
||||
|
|
@ -1905,8 +1895,8 @@ pdb
|
|||
(Contributed by Tian Gao in :gh:`132576`.)
|
||||
|
||||
* Source code displayed in :mod:`pdb` will be syntax-highlighted. This feature
|
||||
can be controlled using the same methods as PyREPL, in addition to the newly
|
||||
added ``colorize`` argument of :class:`pdb.Pdb`.
|
||||
can be controlled using the same methods as the default :term:`interactive`
|
||||
shell, in addition to the newly added ``colorize`` argument of :class:`pdb.Pdb`.
|
||||
(Contributed by Tian Gao and Łukasz Langa in :gh:`133355`.)
|
||||
|
||||
|
||||
|
|
@ -1936,6 +1926,19 @@ pydoc
|
|||
(Contributed by Jelle Zijlstra in :gh:`101552`.)
|
||||
|
||||
|
||||
re
|
||||
--
|
||||
|
||||
* Support ``\z`` as a synonym for ``\Z`` in :mod:`regular expressions <re>`.
|
||||
It is interpreted unambiguously in many other regular expression engines,
|
||||
unlike ``\Z``, which has subtly different behavior.
|
||||
(Contributed by Serhiy Storchaka in :gh:`133306`.)
|
||||
|
||||
* ``\B`` in :mod:`regular expression <re>` now matches the empty input string.
|
||||
Now it is always the opposite of ``\b``.
|
||||
(Contributed by Serhiy Storchaka in :gh:`124130`.)
|
||||
|
||||
|
||||
socket
|
||||
------
|
||||
|
||||
|
|
@ -2253,6 +2256,11 @@ Optimizations
|
|||
(Contributed by Adam Turner, Bénédikt Tran, Chris Markiewicz, Eli Schwartz,
|
||||
Hugo van Kemenade, Jelle Zijlstra, and others in :gh:`118761`.)
|
||||
|
||||
* The interpreter avoids some reference count modifications internally when
|
||||
it's safe to do so. This can lead to different values returned from
|
||||
:func:`sys.getrefcount` and :c:func:`Py_REFCNT` compared to previous versions
|
||||
of Python. See :ref:`below <whatsnew314-refcount>` for details.
|
||||
|
||||
|
||||
asyncio
|
||||
-------
|
||||
|
|
@ -2660,7 +2668,7 @@ urllib
|
|||
Deprecated
|
||||
==========
|
||||
|
||||
New Deprecations
|
||||
New deprecations
|
||||
----------------
|
||||
|
||||
* Passing a complex number as the *real* or *imag* argument in the
|
||||
|
|
@ -3219,6 +3227,20 @@ that may require changes to your code.
|
|||
Changes in the Python API
|
||||
-------------------------
|
||||
|
||||
* On Unix platforms other than macOS, *forkserver* is now the default
|
||||
:ref:`start method <multiprocessing-start-methods>` for :mod:`multiprocessing`
|
||||
and :class:`~concurrent.futures.ProcessPoolExecutor`, instead of *fork*.
|
||||
|
||||
See :ref:`(1) <whatsnew314-concurrent-futures-start-method>` and
|
||||
:ref:`(2) <whatsnew314-multiprocessing-start-method>` for details.
|
||||
|
||||
If you encounter :exc:`NameError`\s or pickling errors coming out of
|
||||
:mod:`multiprocessing` or :mod:`concurrent.futures`, see the
|
||||
:ref:`forkserver restrictions <multiprocessing-programming-forkserver>`.
|
||||
|
||||
This change does not affect Windows or macOS, where :ref:`'spawn'
|
||||
<multiprocessing-start-method-spawn>` remains the default start method.
|
||||
|
||||
* :class:`functools.partial` is now a method descriptor.
|
||||
Wrap it in :func:`staticmethod` if you want to preserve the old behavior.
|
||||
(Contributed by Serhiy Storchaka and Dominykas Grigonis in :gh:`121027`.)
|
||||
|
|
|
|||
|
|
@ -672,11 +672,6 @@ struct _Py_interp_cached_objects {
|
|||
|
||||
/* object.__reduce__ */
|
||||
PyObject *objreduce;
|
||||
#ifndef Py_GIL_DISABLED
|
||||
/* resolve_slotdups() */
|
||||
PyObject *type_slots_pname;
|
||||
pytype_slotdef *type_slots_ptrs[MAX_EQUIV];
|
||||
#endif
|
||||
|
||||
/* TypeVar and related types */
|
||||
PyTypeObject *generic_type;
|
||||
|
|
|
|||
|
|
@ -152,6 +152,9 @@ typedef int (*_py_validate_type)(PyTypeObject *);
|
|||
extern int _PyType_Validate(PyTypeObject *ty, _py_validate_type validate, unsigned int *tp_version);
|
||||
extern int _PyType_CacheGetItemForSpecialization(PyHeapTypeObject *ht, PyObject *descriptor, uint32_t tp_version);
|
||||
|
||||
// Precalculates count of non-unique slots and fills wrapperbase.name_count.
|
||||
extern int _PyType_InitSlotDefs(PyInterpreterState *interp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -128,15 +128,12 @@ def _find_modules(self, path: str, prefix: str) -> list[str]:
|
|||
for segment in path.split('.'):
|
||||
modules = [mod_info for mod_info in modules
|
||||
if mod_info.ispkg and mod_info.name == segment]
|
||||
print(f"{segment=}, {modules=}") # TEMPORARY -- debugging tests on windows
|
||||
if is_stdlib_import is None:
|
||||
# Top-level import decide if we import from stdlib or not
|
||||
is_stdlib_import = all(
|
||||
self._is_stdlib_module(mod_info) for mod_info in modules
|
||||
)
|
||||
modules = self.iter_submodules(modules)
|
||||
modules = list(modules) # TEMPORARY -- debugging tests on windows
|
||||
print(f"segment=last, {modules=}") # TEMPORARY -- debugging tests on windows
|
||||
|
||||
module_names = [module.name for module in modules]
|
||||
if is_stdlib_import:
|
||||
|
|
@ -215,67 +212,7 @@ def global_cache(self) -> list[pkgutil.ModuleInfo]:
|
|||
"""Global module cache"""
|
||||
if not self._global_cache or self._curr_sys_path != sys.path:
|
||||
self._curr_sys_path = sys.path[:]
|
||||
print('getting packages/') # TEMPORARY -- debugging tests on windows
|
||||
self._global_cache = list(pkgutil.iter_modules())
|
||||
# === BEGIN TEMPORARY -- debugging tests on windows ===
|
||||
print(f"\n\n{self._global_cache=}\n\n")
|
||||
mymod = next((p for p in self._global_cache if p.name == "mymodule"), None)
|
||||
if mymod:
|
||||
print("0a", mymod)
|
||||
spec = mymod.module_finder.find_spec(mymod.name, None)
|
||||
if spec:
|
||||
print("1")
|
||||
assert spec.submodule_search_locations and len(spec.submodule_search_locations) == 1
|
||||
print("2")
|
||||
importer = pkgutil.get_importer(spec.submodule_search_locations[0])
|
||||
print("3")
|
||||
assert importer and isinstance(importer, FileFinder)
|
||||
print("4")
|
||||
if importer.path is None or not os.path.isdir(importer.path):
|
||||
print("4a")
|
||||
return
|
||||
yielded = {}
|
||||
import inspect
|
||||
try:
|
||||
filenames = os.listdir(importer.path)
|
||||
except OSError:
|
||||
# ignore unreadable directories like import does
|
||||
print("4b")
|
||||
filenames = []
|
||||
print("4c", filenames)
|
||||
filenames.sort() # handle packages before same-named modules
|
||||
submods = []
|
||||
for fn in filenames:
|
||||
print("4d", fn)
|
||||
modname = inspect.getmodulename(fn)
|
||||
print("4e", modname)
|
||||
if modname=='__init__' or modname in yielded:
|
||||
print("4f", modname)
|
||||
continue
|
||||
path = os.path.join(importer.path, fn)
|
||||
ispkg = False
|
||||
if not modname and os.path.isdir(path) and '.' not in fn:
|
||||
print("4g")
|
||||
modname = fn
|
||||
try:
|
||||
dircontents = os.listdir(path)
|
||||
except OSError:
|
||||
# ignore unreadable directories like import does
|
||||
dircontents = []
|
||||
for fn in dircontents:
|
||||
subname = inspect.getmodulename(fn)
|
||||
if subname=='__init__':
|
||||
ispkg = True
|
||||
break
|
||||
else:
|
||||
continue # not a package
|
||||
if modname and '.' not in modname:
|
||||
print("4h")
|
||||
yielded[modname] = 1
|
||||
submods.append((importer, modname, ispkg))
|
||||
print("4i")
|
||||
print("module:", mymod, submods)
|
||||
# === END TEMPORARY ===
|
||||
return self._global_cache
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -113,6 +113,7 @@ def run(self):
|
|||
run_multiline_interactive_console,
|
||||
)
|
||||
try:
|
||||
sys.ps1 = ps1
|
||||
run_multiline_interactive_console(console)
|
||||
except SystemExit:
|
||||
# expected via the `exit` and `quit` commands
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
from .collector import Collector
|
||||
from .pstats_collector import PstatsCollector
|
||||
from .stack_collector import CollapsedStackCollector
|
||||
from .gecko_collector import GeckoCollector
|
||||
from .string_table import StringTable
|
||||
|
||||
__all__ = ("Collector", "PstatsCollector", "CollapsedStackCollector", "StringTable")
|
||||
__all__ = ("Collector", "PstatsCollector", "CollapsedStackCollector", "GeckoCollector", "StringTable")
|
||||
|
|
|
|||
467
Lib/profiling/sampling/gecko_collector.py
Normal file
467
Lib/profiling/sampling/gecko_collector.py
Normal file
|
|
@ -0,0 +1,467 @@
|
|||
import json
|
||||
import os
|
||||
import platform
|
||||
import time
|
||||
|
||||
from .collector import Collector, THREAD_STATE_RUNNING
|
||||
|
||||
|
||||
# Categories matching Firefox Profiler expectations
|
||||
GECKO_CATEGORIES = [
|
||||
{"name": "Other", "color": "grey", "subcategories": ["Other"]},
|
||||
{"name": "Python", "color": "yellow", "subcategories": ["Other"]},
|
||||
{"name": "Native", "color": "blue", "subcategories": ["Other"]},
|
||||
{"name": "Idle", "color": "transparent", "subcategories": ["Other"]},
|
||||
]
|
||||
|
||||
# Category indices
|
||||
CATEGORY_OTHER = 0
|
||||
CATEGORY_PYTHON = 1
|
||||
CATEGORY_NATIVE = 2
|
||||
CATEGORY_IDLE = 3
|
||||
|
||||
# Subcategory indices
|
||||
DEFAULT_SUBCATEGORY = 0
|
||||
|
||||
GECKO_FORMAT_VERSION = 32
|
||||
GECKO_PREPROCESSED_VERSION = 57
|
||||
|
||||
# Resource type constants
|
||||
RESOURCE_TYPE_LIBRARY = 1
|
||||
|
||||
# Frame constants
|
||||
FRAME_ADDRESS_NONE = -1
|
||||
FRAME_INLINE_DEPTH_ROOT = 0
|
||||
|
||||
# Process constants
|
||||
PROCESS_TYPE_MAIN = 0
|
||||
STACKWALK_DISABLED = 0
|
||||
|
||||
|
||||
class GeckoCollector(Collector):
|
||||
def __init__(self, *, skip_idle=False):
|
||||
self.skip_idle = skip_idle
|
||||
self.start_time = time.time() * 1000 # milliseconds since epoch
|
||||
|
||||
# Global string table (shared across all threads)
|
||||
self.global_strings = ["(root)"] # Start with root
|
||||
self.global_string_map = {"(root)": 0}
|
||||
|
||||
# Per-thread data structures
|
||||
self.threads = {} # tid -> thread data
|
||||
|
||||
# Global tables
|
||||
self.libs = []
|
||||
|
||||
# Sampling interval tracking
|
||||
self.sample_count = 0
|
||||
self.last_sample_time = 0
|
||||
self.interval = 1.0 # Will be calculated from actual sampling
|
||||
|
||||
def collect(self, stack_frames):
|
||||
"""Collect a sample from stack frames."""
|
||||
current_time = (time.time() * 1000) - self.start_time
|
||||
|
||||
# Update interval calculation
|
||||
if self.sample_count > 0 and self.last_sample_time > 0:
|
||||
self.interval = (
|
||||
current_time - self.last_sample_time
|
||||
) / self.sample_count
|
||||
self.last_sample_time = current_time
|
||||
|
||||
for interpreter_info in stack_frames:
|
||||
for thread_info in interpreter_info.threads:
|
||||
if (
|
||||
self.skip_idle
|
||||
and thread_info.status != THREAD_STATE_RUNNING
|
||||
):
|
||||
continue
|
||||
|
||||
frames = thread_info.frame_info
|
||||
if not frames:
|
||||
continue
|
||||
|
||||
tid = thread_info.thread_id
|
||||
|
||||
# Initialize thread if needed
|
||||
if tid not in self.threads:
|
||||
self.threads[tid] = self._create_thread(tid)
|
||||
|
||||
thread_data = self.threads[tid]
|
||||
|
||||
# Process the stack
|
||||
stack_index = self._process_stack(thread_data, frames)
|
||||
|
||||
# Add sample - cache references to avoid dictionary lookups
|
||||
samples = thread_data["samples"]
|
||||
samples["stack"].append(stack_index)
|
||||
samples["time"].append(current_time)
|
||||
samples["eventDelay"].append(None)
|
||||
|
||||
self.sample_count += 1
|
||||
|
||||
def _create_thread(self, tid):
|
||||
"""Create a new thread structure with processed profile format."""
|
||||
import threading
|
||||
|
||||
# Determine if this is the main thread
|
||||
try:
|
||||
is_main = tid == threading.main_thread().ident
|
||||
except (RuntimeError, AttributeError):
|
||||
is_main = False
|
||||
|
||||
thread = {
|
||||
"name": f"Thread-{tid}",
|
||||
"isMainThread": is_main,
|
||||
"processStartupTime": 0,
|
||||
"processShutdownTime": None,
|
||||
"registerTime": 0,
|
||||
"unregisterTime": None,
|
||||
"pausedRanges": [],
|
||||
"pid": str(os.getpid()),
|
||||
"tid": tid,
|
||||
"processType": "default",
|
||||
"processName": "Python Process",
|
||||
# Sample data - processed format with direct arrays
|
||||
"samples": {
|
||||
"stack": [],
|
||||
"time": [],
|
||||
"eventDelay": [],
|
||||
"weight": None,
|
||||
"weightType": "samples",
|
||||
"length": 0, # Will be updated on export
|
||||
},
|
||||
# Stack table - processed format
|
||||
"stackTable": {
|
||||
"frame": [],
|
||||
"category": [],
|
||||
"subcategory": [],
|
||||
"prefix": [],
|
||||
"length": 0, # Will be updated on export
|
||||
},
|
||||
# Frame table - processed format
|
||||
"frameTable": {
|
||||
"address": [],
|
||||
"category": [],
|
||||
"subcategory": [],
|
||||
"func": [],
|
||||
"innerWindowID": [],
|
||||
"implementation": [],
|
||||
"optimizations": [],
|
||||
"line": [],
|
||||
"column": [],
|
||||
"inlineDepth": [],
|
||||
"nativeSymbol": [],
|
||||
"length": 0, # Will be updated on export
|
||||
},
|
||||
# Function table - processed format
|
||||
"funcTable": {
|
||||
"name": [],
|
||||
"isJS": [],
|
||||
"relevantForJS": [],
|
||||
"resource": [],
|
||||
"fileName": [],
|
||||
"lineNumber": [],
|
||||
"columnNumber": [],
|
||||
"length": 0, # Will be updated on export
|
||||
},
|
||||
# Resource table - processed format
|
||||
"resourceTable": {
|
||||
"lib": [],
|
||||
"name": [],
|
||||
"host": [],
|
||||
"type": [],
|
||||
"length": 0, # Will be updated on export
|
||||
},
|
||||
# Native symbols table (empty for Python)
|
||||
"nativeSymbols": {
|
||||
"libIndex": [],
|
||||
"address": [],
|
||||
"name": [],
|
||||
"functionSize": [],
|
||||
"length": 0,
|
||||
},
|
||||
# Markers - processed format
|
||||
"markers": {
|
||||
"data": [],
|
||||
"name": [],
|
||||
"startTime": [],
|
||||
"endTime": [],
|
||||
"phase": [],
|
||||
"category": [],
|
||||
"length": 0,
|
||||
},
|
||||
# Caches for deduplication
|
||||
"_stackCache": {},
|
||||
"_frameCache": {},
|
||||
"_funcCache": {},
|
||||
"_resourceCache": {},
|
||||
}
|
||||
|
||||
return thread
|
||||
|
||||
def _is_python(self, filename: str) -> bool:
|
||||
return not filename.startswith("<") or filename in ["<stdin>", "<string>"]
|
||||
|
||||
def _get_category(self, filename: str) -> int:
|
||||
return CATEGORY_PYTHON if self._is_python(filename) else CATEGORY_NATIVE
|
||||
|
||||
def _intern_string(self, s):
|
||||
"""Intern a string in the global string table."""
|
||||
if s in self.global_string_map:
|
||||
return self.global_string_map[s]
|
||||
idx = len(self.global_strings)
|
||||
self.global_strings.append(s)
|
||||
self.global_string_map[s] = idx
|
||||
return idx
|
||||
|
||||
def _process_stack(self, thread_data, frames):
|
||||
"""Process a stack and return the stack index."""
|
||||
if not frames:
|
||||
return None
|
||||
|
||||
# Cache references to avoid repeated dictionary lookups
|
||||
stack_cache = thread_data["_stackCache"]
|
||||
stack_table = thread_data["stackTable"]
|
||||
stack_frames = stack_table["frame"]
|
||||
stack_prefix = stack_table["prefix"]
|
||||
stack_category = stack_table["category"]
|
||||
stack_subcategory = stack_table["subcategory"]
|
||||
|
||||
# Build stack bottom-up (from root to leaf)
|
||||
prefix_stack_idx = None
|
||||
|
||||
for frame_tuple in reversed(frames):
|
||||
# frame_tuple is (filename, lineno, funcname)
|
||||
filename, lineno, funcname = frame_tuple
|
||||
|
||||
# Get or create function
|
||||
func_idx = self._get_or_create_func(
|
||||
thread_data, filename, funcname, lineno
|
||||
)
|
||||
|
||||
# Get or create frame
|
||||
frame_idx = self._get_or_create_frame(
|
||||
thread_data, func_idx, lineno
|
||||
)
|
||||
|
||||
# Check stack cache
|
||||
stack_key = (frame_idx, prefix_stack_idx)
|
||||
if stack_key in stack_cache:
|
||||
prefix_stack_idx = stack_cache[stack_key]
|
||||
else:
|
||||
# Create new stack entry
|
||||
stack_idx = len(stack_frames)
|
||||
stack_frames.append(frame_idx)
|
||||
stack_prefix.append(prefix_stack_idx)
|
||||
|
||||
# Determine category
|
||||
category = self._get_category(filename)
|
||||
stack_category.append(category)
|
||||
stack_subcategory.append(DEFAULT_SUBCATEGORY)
|
||||
|
||||
stack_cache[stack_key] = stack_idx
|
||||
prefix_stack_idx = stack_idx
|
||||
|
||||
return prefix_stack_idx
|
||||
|
||||
def _get_or_create_func(self, thread_data, filename, funcname, lineno):
|
||||
"""Get or create a function entry."""
|
||||
func_cache = thread_data["_funcCache"]
|
||||
func_key = (filename, funcname)
|
||||
|
||||
if func_key in func_cache:
|
||||
return func_cache[func_key]
|
||||
|
||||
# Cache references for func table
|
||||
func_table = thread_data["funcTable"]
|
||||
func_names = func_table["name"]
|
||||
func_is_js = func_table["isJS"]
|
||||
func_relevant = func_table["relevantForJS"]
|
||||
func_resources = func_table["resource"]
|
||||
func_filenames = func_table["fileName"]
|
||||
func_line_numbers = func_table["lineNumber"]
|
||||
func_column_numbers = func_table["columnNumber"]
|
||||
|
||||
func_idx = len(func_names)
|
||||
|
||||
# Intern strings in global table
|
||||
name_idx = self._intern_string(funcname)
|
||||
|
||||
# Determine if Python
|
||||
is_python = self._is_python(filename)
|
||||
|
||||
# Create resource
|
||||
resource_idx = self._get_or_create_resource(thread_data, filename)
|
||||
|
||||
# Add function
|
||||
func_names.append(name_idx)
|
||||
func_is_js.append(is_python)
|
||||
func_relevant.append(is_python)
|
||||
func_resources.append(resource_idx)
|
||||
|
||||
if is_python:
|
||||
filename_idx = self._intern_string(os.path.basename(filename))
|
||||
func_filenames.append(filename_idx)
|
||||
func_line_numbers.append(lineno)
|
||||
else:
|
||||
func_filenames.append(None)
|
||||
func_line_numbers.append(None)
|
||||
func_column_numbers.append(None)
|
||||
|
||||
func_cache[func_key] = func_idx
|
||||
return func_idx
|
||||
|
||||
def _get_or_create_resource(self, thread_data, filename):
|
||||
"""Get or create a resource entry."""
|
||||
resource_cache = thread_data["_resourceCache"]
|
||||
|
||||
if filename in resource_cache:
|
||||
return resource_cache[filename]
|
||||
|
||||
# Cache references for resource table
|
||||
resource_table = thread_data["resourceTable"]
|
||||
resource_libs = resource_table["lib"]
|
||||
resource_names = resource_table["name"]
|
||||
resource_hosts = resource_table["host"]
|
||||
resource_types = resource_table["type"]
|
||||
|
||||
resource_idx = len(resource_names)
|
||||
resource_name = (
|
||||
os.path.basename(filename) if "/" in filename else filename
|
||||
)
|
||||
name_idx = self._intern_string(resource_name)
|
||||
|
||||
resource_libs.append(None)
|
||||
resource_names.append(name_idx)
|
||||
resource_hosts.append(None)
|
||||
resource_types.append(RESOURCE_TYPE_LIBRARY)
|
||||
|
||||
resource_cache[filename] = resource_idx
|
||||
return resource_idx
|
||||
|
||||
def _get_or_create_frame(self, thread_data, func_idx, lineno):
|
||||
"""Get or create a frame entry."""
|
||||
frame_cache = thread_data["_frameCache"]
|
||||
frame_key = (func_idx, lineno)
|
||||
|
||||
if frame_key in frame_cache:
|
||||
return frame_cache[frame_key]
|
||||
|
||||
# Cache references for frame table
|
||||
frame_table = thread_data["frameTable"]
|
||||
frame_addresses = frame_table["address"]
|
||||
frame_inline_depths = frame_table["inlineDepth"]
|
||||
frame_categories = frame_table["category"]
|
||||
frame_subcategories = frame_table["subcategory"]
|
||||
frame_funcs = frame_table["func"]
|
||||
frame_native_symbols = frame_table["nativeSymbol"]
|
||||
frame_inner_window_ids = frame_table["innerWindowID"]
|
||||
frame_implementations = frame_table["implementation"]
|
||||
frame_lines = frame_table["line"]
|
||||
frame_columns = frame_table["column"]
|
||||
frame_optimizations = frame_table["optimizations"]
|
||||
|
||||
frame_idx = len(frame_funcs)
|
||||
|
||||
# Determine category based on function - use cached func table reference
|
||||
is_python = thread_data["funcTable"]["isJS"][func_idx]
|
||||
category = CATEGORY_PYTHON if is_python else CATEGORY_NATIVE
|
||||
|
||||
frame_addresses.append(FRAME_ADDRESS_NONE)
|
||||
frame_inline_depths.append(FRAME_INLINE_DEPTH_ROOT)
|
||||
frame_categories.append(category)
|
||||
frame_subcategories.append(DEFAULT_SUBCATEGORY)
|
||||
frame_funcs.append(func_idx)
|
||||
frame_native_symbols.append(None)
|
||||
frame_inner_window_ids.append(None)
|
||||
frame_implementations.append(None)
|
||||
frame_lines.append(lineno if lineno else None)
|
||||
frame_columns.append(None)
|
||||
frame_optimizations.append(None)
|
||||
|
||||
frame_cache[frame_key] = frame_idx
|
||||
return frame_idx
|
||||
|
||||
def export(self, filename):
|
||||
"""Export the profile to a Gecko JSON file."""
|
||||
if self.sample_count > 0 and self.last_sample_time > 0:
|
||||
self.interval = self.last_sample_time / self.sample_count
|
||||
|
||||
profile = self._build_profile()
|
||||
|
||||
with open(filename, "w") as f:
|
||||
json.dump(profile, f, separators=(",", ":"))
|
||||
|
||||
print(f"Gecko profile written to {filename}")
|
||||
print(
|
||||
f"Open in Firefox Profiler: https://profiler.firefox.com/"
|
||||
)
|
||||
|
||||
def _build_profile(self):
|
||||
"""Build the complete profile structure in processed format."""
|
||||
# Convert thread data to final format
|
||||
threads = []
|
||||
|
||||
for tid, thread_data in self.threads.items():
|
||||
# Update lengths
|
||||
samples = thread_data["samples"]
|
||||
stack_table = thread_data["stackTable"]
|
||||
frame_table = thread_data["frameTable"]
|
||||
func_table = thread_data["funcTable"]
|
||||
resource_table = thread_data["resourceTable"]
|
||||
|
||||
samples["length"] = len(samples["stack"])
|
||||
stack_table["length"] = len(stack_table["frame"])
|
||||
frame_table["length"] = len(frame_table["func"])
|
||||
func_table["length"] = len(func_table["name"])
|
||||
resource_table["length"] = len(resource_table["name"])
|
||||
|
||||
# Clean up internal caches
|
||||
del thread_data["_stackCache"]
|
||||
del thread_data["_frameCache"]
|
||||
del thread_data["_funcCache"]
|
||||
del thread_data["_resourceCache"]
|
||||
|
||||
threads.append(thread_data)
|
||||
|
||||
# Main profile structure in processed format
|
||||
profile = {
|
||||
"meta": {
|
||||
"interval": self.interval,
|
||||
"startTime": self.start_time,
|
||||
"abi": platform.machine(),
|
||||
"misc": "Python profiler",
|
||||
"oscpu": platform.machine(),
|
||||
"platform": platform.system(),
|
||||
"processType": PROCESS_TYPE_MAIN,
|
||||
"categories": GECKO_CATEGORIES,
|
||||
"stackwalk": STACKWALK_DISABLED,
|
||||
"toolkit": "",
|
||||
"version": GECKO_FORMAT_VERSION,
|
||||
"preprocessedProfileVersion": GECKO_PREPROCESSED_VERSION,
|
||||
"appBuildID": "",
|
||||
"physicalCPUs": os.cpu_count() or 0,
|
||||
"logicalCPUs": os.cpu_count() or 0,
|
||||
"CPUName": "",
|
||||
"product": "Python",
|
||||
"symbolicated": True,
|
||||
"markerSchema": [],
|
||||
"importedFrom": "Tachyon Sampling Profiler",
|
||||
"extensions": {
|
||||
"id": [],
|
||||
"name": [],
|
||||
"baseURL": [],
|
||||
"length": 0,
|
||||
},
|
||||
},
|
||||
"libs": self.libs,
|
||||
"threads": threads,
|
||||
"pages": [],
|
||||
"shared": {
|
||||
"stringArray": self.global_strings,
|
||||
"sources": {"length": 0, "uuid": [], "filename": []},
|
||||
},
|
||||
}
|
||||
|
||||
return profile
|
||||
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
from .pstats_collector import PstatsCollector
|
||||
from .stack_collector import CollapsedStackCollector, FlamegraphCollector
|
||||
from .gecko_collector import GeckoCollector
|
||||
|
||||
_FREE_THREADED_BUILD = sysconfig.get_config_var("Py_GIL_DISABLED") is not None
|
||||
|
||||
|
|
@ -631,6 +632,9 @@ def sample(
|
|||
case "flamegraph":
|
||||
collector = FlamegraphCollector(skip_idle=skip_idle)
|
||||
filename = filename or f"flamegraph.{pid}.html"
|
||||
case "gecko":
|
||||
collector = GeckoCollector(skip_idle=skip_idle)
|
||||
filename = filename or f"gecko.{pid}.json"
|
||||
case _:
|
||||
raise ValueError(f"Invalid output format: {output_format}")
|
||||
|
||||
|
|
@ -675,10 +679,13 @@ def _validate_collapsed_format_args(args, parser):
|
|||
|
||||
def wait_for_process_and_sample(pid, sort_value, args):
|
||||
"""Sample the process immediately since it has already signaled readiness."""
|
||||
# Set default collapsed filename with subprocess PID if not already set
|
||||
# Set default filename with subprocess PID if not already set
|
||||
filename = args.outfile
|
||||
if not filename and args.format == "collapsed":
|
||||
if not filename:
|
||||
if args.format == "collapsed":
|
||||
filename = f"collapsed.{pid}.txt"
|
||||
elif args.format == "gecko":
|
||||
filename = f"gecko.{pid}.json"
|
||||
|
||||
mode = _parse_mode(args.mode)
|
||||
|
||||
|
|
@ -782,6 +789,13 @@ def main():
|
|||
dest="format",
|
||||
help="Generate HTML flamegraph visualization",
|
||||
)
|
||||
output_format.add_argument(
|
||||
"--gecko",
|
||||
action="store_const",
|
||||
const="gecko",
|
||||
dest="format",
|
||||
help="Generate Gecko format for Firefox Profiler",
|
||||
)
|
||||
|
||||
output_group.add_argument(
|
||||
"-o",
|
||||
|
|
@ -860,7 +874,7 @@ def main():
|
|||
args = parser.parse_args()
|
||||
|
||||
# Validate format-specific arguments
|
||||
if args.format == "collapsed":
|
||||
if args.format in ("collapsed", "gecko"):
|
||||
_validate_collapsed_format_args(args, parser)
|
||||
|
||||
sort_value = args.sort if args.sort is not None else 2
|
||||
|
|
|
|||
|
|
@ -25,10 +25,11 @@
|
|||
"test_gdb",
|
||||
"test_inspect",
|
||||
"test_io",
|
||||
"test_pydoc",
|
||||
"test_multiprocessing_fork",
|
||||
"test_multiprocessing_forkserver",
|
||||
"test_multiprocessing_spawn",
|
||||
"test_os",
|
||||
"test_pydoc",
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
import time
|
||||
import unittest
|
||||
|
||||
from test.support import import_helper, skip_if_sanitizer
|
||||
from test.support import import_helper
|
||||
|
||||
_channels = import_helper.import_module('_interpchannels')
|
||||
from concurrent.interpreters import _crossinterp
|
||||
|
|
@ -365,7 +365,6 @@ def test_shareable(self):
|
|||
#self.assertIsNot(got, obj)
|
||||
|
||||
|
||||
@skip_if_sanitizer('gh-129824: race on _waiting_release', thread=True)
|
||||
class ChannelTests(TestBase):
|
||||
|
||||
def test_create_cid(self):
|
||||
|
|
|
|||
|
|
@ -316,11 +316,9 @@ def tearDown(self):
|
|||
asyncio.all_tasks = asyncio.tasks.all_tasks = self._all_tasks
|
||||
return super().tearDown()
|
||||
|
||||
|
||||
@unittest.skip("skip")
|
||||
def test_issue105987(self):
|
||||
code = """if 1:
|
||||
from _asyncio import _swap_current_task
|
||||
from _asyncio import _swap_current_task, _set_running_loop
|
||||
|
||||
class DummyTask:
|
||||
pass
|
||||
|
|
@ -329,6 +327,7 @@ class DummyLoop:
|
|||
pass
|
||||
|
||||
l = DummyLoop()
|
||||
_set_running_loop(l)
|
||||
_swap_current_task(l, DummyTask())
|
||||
t = _swap_current_task(l, None)
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1232,21 +1232,6 @@ def test_init_dont_configure_locale(self):
|
|||
self.check_all_configs("test_init_dont_configure_locale", {}, preconfig,
|
||||
api=API_PYTHON)
|
||||
|
||||
@unittest.skip('as of 3.11 this test no longer works because '
|
||||
'path calculations do not occur on read')
|
||||
def test_init_read_set(self):
|
||||
config = {
|
||||
'program_name': './init_read_set',
|
||||
'executable': 'my_executable',
|
||||
'base_executable': 'my_executable',
|
||||
}
|
||||
def modify_path(path):
|
||||
path.insert(1, "test_path_insert1")
|
||||
path.append("test_path_append")
|
||||
self.check_all_configs("test_init_read_set", config,
|
||||
api=API_PYTHON,
|
||||
modify_path_cb=modify_path)
|
||||
|
||||
def test_init_sys_add(self):
|
||||
config = {
|
||||
'faulthandler': 1,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
6
Lib/test/test_os/__init__.py
Normal file
6
Lib/test/test_os/__init__.py
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
import os.path
|
||||
from test.support import load_package_tests
|
||||
|
||||
|
||||
def load_tests(*args):
|
||||
return load_package_tests(os.path.dirname(__file__), *args)
|
||||
|
|
@ -7,7 +7,6 @@
|
|||
import contextlib
|
||||
import decimal
|
||||
import errno
|
||||
import fnmatch
|
||||
import fractions
|
||||
import itertools
|
||||
import locale
|
||||
|
|
@ -31,12 +30,12 @@
|
|||
import uuid
|
||||
import warnings
|
||||
from test import support
|
||||
from test.support import import_helper
|
||||
from test.support import os_helper
|
||||
from test.support import socket_helper
|
||||
from test.support import infinite_recursion
|
||||
from test.support import warnings_helper
|
||||
from platform import win32_is_iot
|
||||
from .utils import create_file
|
||||
|
||||
try:
|
||||
import resource
|
||||
|
|
@ -46,10 +45,6 @@
|
|||
import fcntl
|
||||
except ImportError:
|
||||
fcntl = None
|
||||
try:
|
||||
import _winapi
|
||||
except ImportError:
|
||||
_winapi = None
|
||||
try:
|
||||
import pwd
|
||||
all_users = [u.pw_uid for u in pwd.getpwall()]
|
||||
|
|
@ -93,11 +88,6 @@ def requires_os_func(name):
|
|||
return unittest.skipUnless(hasattr(os, name), 'requires os.%s' % name)
|
||||
|
||||
|
||||
def create_file(filename, content=b'content'):
|
||||
with open(filename, "xb", 0) as fp:
|
||||
fp.write(content)
|
||||
|
||||
|
||||
# bpo-41625: On AIX, splice() only works with a socket, not with a pipe.
|
||||
requires_splice_pipe = unittest.skipIf(sys.platform.startswith("aix"),
|
||||
'on AIX, splice() only accepts sockets')
|
||||
|
|
@ -2466,42 +2456,6 @@ def test_execve_with_empty_path(self):
|
|||
self.fail('No OSError raised')
|
||||
|
||||
|
||||
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
|
||||
class Win32ErrorTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
try:
|
||||
os.stat(os_helper.TESTFN)
|
||||
except FileNotFoundError:
|
||||
exists = False
|
||||
except OSError as exc:
|
||||
exists = True
|
||||
self.fail("file %s must not exist; os.stat failed with %s"
|
||||
% (os_helper.TESTFN, exc))
|
||||
else:
|
||||
self.fail("file %s must not exist" % os_helper.TESTFN)
|
||||
|
||||
def test_rename(self):
|
||||
self.assertRaises(OSError, os.rename, os_helper.TESTFN, os_helper.TESTFN+".bak")
|
||||
|
||||
def test_remove(self):
|
||||
self.assertRaises(OSError, os.remove, os_helper.TESTFN)
|
||||
|
||||
def test_chdir(self):
|
||||
self.assertRaises(OSError, os.chdir, os_helper.TESTFN)
|
||||
|
||||
def test_mkdir(self):
|
||||
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
|
||||
|
||||
with open(os_helper.TESTFN, "x") as f:
|
||||
self.assertRaises(OSError, os.mkdir, os_helper.TESTFN)
|
||||
|
||||
def test_utime(self):
|
||||
self.assertRaises(OSError, os.utime, os_helper.TESTFN, None)
|
||||
|
||||
def test_chmod(self):
|
||||
self.assertRaises(OSError, os.chmod, os_helper.TESTFN, 0)
|
||||
|
||||
|
||||
@unittest.skipIf(support.is_wasi, "Cannot create invalid FD on WASI.")
|
||||
class TestInvalidFD(unittest.TestCase):
|
||||
singles = ["fchdir", "dup", "fstat", "fstatvfs", "tcgetpgrp", "ttyname"]
|
||||
|
|
@ -2836,224 +2790,6 @@ def test_stat(self):
|
|||
for fn in self.unicodefn:
|
||||
os.stat(os.path.join(self.dir, fn))
|
||||
|
||||
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
|
||||
class Win32KillTests(unittest.TestCase):
|
||||
def _kill(self, sig):
|
||||
# Start sys.executable as a subprocess and communicate from the
|
||||
# subprocess to the parent that the interpreter is ready. When it
|
||||
# becomes ready, send *sig* via os.kill to the subprocess and check
|
||||
# that the return code is equal to *sig*.
|
||||
import ctypes
|
||||
from ctypes import wintypes
|
||||
import msvcrt
|
||||
|
||||
# Since we can't access the contents of the process' stdout until the
|
||||
# process has exited, use PeekNamedPipe to see what's inside stdout
|
||||
# without waiting. This is done so we can tell that the interpreter
|
||||
# is started and running at a point where it could handle a signal.
|
||||
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
|
||||
PeekNamedPipe.restype = wintypes.BOOL
|
||||
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
|
||||
ctypes.POINTER(ctypes.c_char), # stdout buf
|
||||
wintypes.DWORD, # Buffer size
|
||||
ctypes.POINTER(wintypes.DWORD), # bytes read
|
||||
ctypes.POINTER(wintypes.DWORD), # bytes avail
|
||||
ctypes.POINTER(wintypes.DWORD)) # bytes left
|
||||
msg = "running"
|
||||
proc = subprocess.Popen([sys.executable, "-c",
|
||||
"import sys;"
|
||||
"sys.stdout.write('{}');"
|
||||
"sys.stdout.flush();"
|
||||
"input()".format(msg)],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE)
|
||||
self.addCleanup(proc.stdout.close)
|
||||
self.addCleanup(proc.stderr.close)
|
||||
self.addCleanup(proc.stdin.close)
|
||||
|
||||
count, max = 0, 100
|
||||
while count < max and proc.poll() is None:
|
||||
# Create a string buffer to store the result of stdout from the pipe
|
||||
buf = ctypes.create_string_buffer(len(msg))
|
||||
# Obtain the text currently in proc.stdout
|
||||
# Bytes read/avail/left are left as NULL and unused
|
||||
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
|
||||
buf, ctypes.sizeof(buf), None, None, None)
|
||||
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
|
||||
if buf.value:
|
||||
self.assertEqual(msg, buf.value.decode())
|
||||
break
|
||||
time.sleep(0.1)
|
||||
count += 1
|
||||
else:
|
||||
self.fail("Did not receive communication from the subprocess")
|
||||
|
||||
os.kill(proc.pid, sig)
|
||||
self.assertEqual(proc.wait(), sig)
|
||||
|
||||
def test_kill_sigterm(self):
|
||||
# SIGTERM doesn't mean anything special, but make sure it works
|
||||
self._kill(signal.SIGTERM)
|
||||
|
||||
def test_kill_int(self):
|
||||
# os.kill on Windows can take an int which gets set as the exit code
|
||||
self._kill(100)
|
||||
|
||||
@unittest.skipIf(mmap is None, "requires mmap")
|
||||
def _kill_with_event(self, event, name):
|
||||
tagname = "test_os_%s" % uuid.uuid1()
|
||||
m = mmap.mmap(-1, 1, tagname)
|
||||
m[0] = 0
|
||||
|
||||
# Run a script which has console control handling enabled.
|
||||
script = os.path.join(os.path.dirname(__file__),
|
||||
"win_console_handler.py")
|
||||
cmd = [sys.executable, script, tagname]
|
||||
proc = subprocess.Popen(cmd,
|
||||
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
|
||||
|
||||
with proc:
|
||||
# Let the interpreter startup before we send signals. See #3137.
|
||||
for _ in support.sleeping_retry(support.SHORT_TIMEOUT):
|
||||
if proc.poll() is None:
|
||||
break
|
||||
else:
|
||||
# Forcefully kill the process if we weren't able to signal it.
|
||||
proc.kill()
|
||||
self.fail("Subprocess didn't finish initialization")
|
||||
|
||||
os.kill(proc.pid, event)
|
||||
|
||||
try:
|
||||
# proc.send_signal(event) could also be done here.
|
||||
# Allow time for the signal to be passed and the process to exit.
|
||||
proc.wait(timeout=support.SHORT_TIMEOUT)
|
||||
except subprocess.TimeoutExpired:
|
||||
# Forcefully kill the process if we weren't able to signal it.
|
||||
proc.kill()
|
||||
self.fail("subprocess did not stop on {}".format(name))
|
||||
|
||||
@unittest.skip("subprocesses aren't inheriting Ctrl+C property")
|
||||
@support.requires_subprocess()
|
||||
def test_CTRL_C_EVENT(self):
|
||||
from ctypes import wintypes
|
||||
import ctypes
|
||||
|
||||
# Make a NULL value by creating a pointer with no argument.
|
||||
NULL = ctypes.POINTER(ctypes.c_int)()
|
||||
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
|
||||
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
|
||||
wintypes.BOOL)
|
||||
SetConsoleCtrlHandler.restype = wintypes.BOOL
|
||||
|
||||
# Calling this with NULL and FALSE causes the calling process to
|
||||
# handle Ctrl+C, rather than ignore it. This property is inherited
|
||||
# by subprocesses.
|
||||
SetConsoleCtrlHandler(NULL, 0)
|
||||
|
||||
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
|
||||
|
||||
@support.requires_subprocess()
|
||||
def test_CTRL_BREAK_EVENT(self):
|
||||
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
|
||||
|
||||
|
||||
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
|
||||
class Win32ListdirTests(unittest.TestCase):
|
||||
"""Test listdir on Windows."""
|
||||
|
||||
def setUp(self):
|
||||
self.created_paths = []
|
||||
for i in range(2):
|
||||
dir_name = 'SUB%d' % i
|
||||
dir_path = os.path.join(os_helper.TESTFN, dir_name)
|
||||
file_name = 'FILE%d' % i
|
||||
file_path = os.path.join(os_helper.TESTFN, file_name)
|
||||
os.makedirs(dir_path)
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write("I'm %s and proud of it. Blame test_os.\n" % file_path)
|
||||
self.created_paths.extend([dir_name, file_name])
|
||||
self.created_paths.sort()
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(os_helper.TESTFN)
|
||||
|
||||
def test_listdir_no_extended_path(self):
|
||||
"""Test when the path is not an "extended" path."""
|
||||
# unicode
|
||||
self.assertEqual(
|
||||
sorted(os.listdir(os_helper.TESTFN)),
|
||||
self.created_paths)
|
||||
|
||||
# bytes
|
||||
self.assertEqual(
|
||||
sorted(os.listdir(os.fsencode(os_helper.TESTFN))),
|
||||
[os.fsencode(path) for path in self.created_paths])
|
||||
|
||||
def test_listdir_extended_path(self):
|
||||
"""Test when the path starts with '\\\\?\\'."""
|
||||
# See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
|
||||
# unicode
|
||||
path = '\\\\?\\' + os.path.abspath(os_helper.TESTFN)
|
||||
self.assertEqual(
|
||||
sorted(os.listdir(path)),
|
||||
self.created_paths)
|
||||
|
||||
# bytes
|
||||
path = b'\\\\?\\' + os.fsencode(os.path.abspath(os_helper.TESTFN))
|
||||
self.assertEqual(
|
||||
sorted(os.listdir(path)),
|
||||
[os.fsencode(path) for path in self.created_paths])
|
||||
|
||||
|
||||
@unittest.skipUnless(os.name == "nt", "NT specific tests")
|
||||
class Win32ListdriveTests(unittest.TestCase):
|
||||
"""Test listdrive, listmounts and listvolume on Windows."""
|
||||
|
||||
def setUp(self):
|
||||
# Get drives and volumes from fsutil
|
||||
out = subprocess.check_output(
|
||||
["fsutil.exe", "volume", "list"],
|
||||
cwd=os.path.join(os.getenv("SystemRoot", "\\Windows"), "System32"),
|
||||
encoding="mbcs",
|
||||
errors="ignore",
|
||||
)
|
||||
lines = out.splitlines()
|
||||
self.known_volumes = {l for l in lines if l.startswith('\\\\?\\')}
|
||||
self.known_drives = {l for l in lines if l[1:] == ':\\'}
|
||||
self.known_mounts = {l for l in lines if l[1:3] == ':\\'}
|
||||
|
||||
def test_listdrives(self):
|
||||
drives = os.listdrives()
|
||||
self.assertIsInstance(drives, list)
|
||||
self.assertSetEqual(
|
||||
self.known_drives,
|
||||
self.known_drives & set(drives),
|
||||
)
|
||||
|
||||
def test_listvolumes(self):
|
||||
volumes = os.listvolumes()
|
||||
self.assertIsInstance(volumes, list)
|
||||
self.assertSetEqual(
|
||||
self.known_volumes,
|
||||
self.known_volumes & set(volumes),
|
||||
)
|
||||
|
||||
def test_listmounts(self):
|
||||
for volume in os.listvolumes():
|
||||
try:
|
||||
mounts = os.listmounts(volume)
|
||||
except OSError as ex:
|
||||
if support.verbose:
|
||||
print("Skipping", volume, "because of", ex)
|
||||
else:
|
||||
self.assertIsInstance(mounts, list)
|
||||
self.assertSetEqual(
|
||||
set(mounts),
|
||||
self.known_mounts & set(mounts),
|
||||
)
|
||||
|
||||
|
||||
@unittest.skipUnless(hasattr(os, 'readlink'), 'needs os.readlink()')
|
||||
class ReadlinkTests(unittest.TestCase):
|
||||
|
|
@ -3116,370 +2852,6 @@ def test_bytes(self):
|
|||
self.assertIsInstance(path, bytes)
|
||||
|
||||
|
||||
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
|
||||
@os_helper.skip_unless_symlink
|
||||
class Win32SymlinkTests(unittest.TestCase):
|
||||
filelink = 'filelinktest'
|
||||
filelink_target = os.path.abspath(__file__)
|
||||
dirlink = 'dirlinktest'
|
||||
dirlink_target = os.path.dirname(filelink_target)
|
||||
missing_link = 'missing link'
|
||||
|
||||
def setUp(self):
|
||||
assert os.path.exists(self.dirlink_target)
|
||||
assert os.path.exists(self.filelink_target)
|
||||
assert not os.path.exists(self.dirlink)
|
||||
assert not os.path.exists(self.filelink)
|
||||
assert not os.path.exists(self.missing_link)
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.exists(self.filelink):
|
||||
os.remove(self.filelink)
|
||||
if os.path.exists(self.dirlink):
|
||||
os.rmdir(self.dirlink)
|
||||
if os.path.lexists(self.missing_link):
|
||||
os.remove(self.missing_link)
|
||||
|
||||
def test_directory_link(self):
|
||||
os.symlink(self.dirlink_target, self.dirlink)
|
||||
self.assertTrue(os.path.exists(self.dirlink))
|
||||
self.assertTrue(os.path.isdir(self.dirlink))
|
||||
self.assertTrue(os.path.islink(self.dirlink))
|
||||
self.check_stat(self.dirlink, self.dirlink_target)
|
||||
|
||||
def test_file_link(self):
|
||||
os.symlink(self.filelink_target, self.filelink)
|
||||
self.assertTrue(os.path.exists(self.filelink))
|
||||
self.assertTrue(os.path.isfile(self.filelink))
|
||||
self.assertTrue(os.path.islink(self.filelink))
|
||||
self.check_stat(self.filelink, self.filelink_target)
|
||||
|
||||
def _create_missing_dir_link(self):
|
||||
'Create a "directory" link to a non-existent target'
|
||||
linkname = self.missing_link
|
||||
if os.path.lexists(linkname):
|
||||
os.remove(linkname)
|
||||
target = r'c:\\target does not exist.29r3c740'
|
||||
assert not os.path.exists(target)
|
||||
target_is_dir = True
|
||||
os.symlink(target, linkname, target_is_dir)
|
||||
|
||||
def test_remove_directory_link_to_missing_target(self):
|
||||
self._create_missing_dir_link()
|
||||
# For compatibility with Unix, os.remove will check the
|
||||
# directory status and call RemoveDirectory if the symlink
|
||||
# was created with target_is_dir==True.
|
||||
os.remove(self.missing_link)
|
||||
|
||||
def test_isdir_on_directory_link_to_missing_target(self):
|
||||
self._create_missing_dir_link()
|
||||
self.assertFalse(os.path.isdir(self.missing_link))
|
||||
|
||||
def test_rmdir_on_directory_link_to_missing_target(self):
|
||||
self._create_missing_dir_link()
|
||||
os.rmdir(self.missing_link)
|
||||
|
||||
def check_stat(self, link, target):
|
||||
self.assertEqual(os.stat(link), os.stat(target))
|
||||
self.assertNotEqual(os.lstat(link), os.stat(link))
|
||||
|
||||
bytes_link = os.fsencode(link)
|
||||
self.assertEqual(os.stat(bytes_link), os.stat(target))
|
||||
self.assertNotEqual(os.lstat(bytes_link), os.stat(bytes_link))
|
||||
|
||||
def test_12084(self):
|
||||
level1 = os.path.abspath(os_helper.TESTFN)
|
||||
level2 = os.path.join(level1, "level2")
|
||||
level3 = os.path.join(level2, "level3")
|
||||
self.addCleanup(os_helper.rmtree, level1)
|
||||
|
||||
os.mkdir(level1)
|
||||
os.mkdir(level2)
|
||||
os.mkdir(level3)
|
||||
|
||||
file1 = os.path.abspath(os.path.join(level1, "file1"))
|
||||
create_file(file1)
|
||||
|
||||
orig_dir = os.getcwd()
|
||||
try:
|
||||
os.chdir(level2)
|
||||
link = os.path.join(level2, "link")
|
||||
os.symlink(os.path.relpath(file1), "link")
|
||||
self.assertIn("link", os.listdir(os.getcwd()))
|
||||
|
||||
# Check os.stat calls from the same dir as the link
|
||||
self.assertEqual(os.stat(file1), os.stat("link"))
|
||||
|
||||
# Check os.stat calls from a dir below the link
|
||||
os.chdir(level1)
|
||||
self.assertEqual(os.stat(file1),
|
||||
os.stat(os.path.relpath(link)))
|
||||
|
||||
# Check os.stat calls from a dir above the link
|
||||
os.chdir(level3)
|
||||
self.assertEqual(os.stat(file1),
|
||||
os.stat(os.path.relpath(link)))
|
||||
finally:
|
||||
os.chdir(orig_dir)
|
||||
|
||||
@unittest.skipUnless(os.path.lexists(r'C:\Users\All Users')
|
||||
and os.path.exists(r'C:\ProgramData'),
|
||||
'Test directories not found')
|
||||
def test_29248(self):
|
||||
# os.symlink() calls CreateSymbolicLink, which creates
|
||||
# the reparse data buffer with the print name stored
|
||||
# first, so the offset is always 0. CreateSymbolicLink
|
||||
# stores the "PrintName" DOS path (e.g. "C:\") first,
|
||||
# with an offset of 0, followed by the "SubstituteName"
|
||||
# NT path (e.g. "\??\C:\"). The "All Users" link, on
|
||||
# the other hand, seems to have been created manually
|
||||
# with an inverted order.
|
||||
target = os.readlink(r'C:\Users\All Users')
|
||||
self.assertTrue(os.path.samefile(target, r'C:\ProgramData'))
|
||||
|
||||
def test_buffer_overflow(self):
|
||||
# Older versions would have a buffer overflow when detecting
|
||||
# whether a link source was a directory. This test ensures we
|
||||
# no longer crash, but does not otherwise validate the behavior
|
||||
segment = 'X' * 27
|
||||
path = os.path.join(*[segment] * 10)
|
||||
test_cases = [
|
||||
# overflow with absolute src
|
||||
('\\' + path, segment),
|
||||
# overflow dest with relative src
|
||||
(segment, path),
|
||||
# overflow when joining src
|
||||
(path[:180], path[:180]),
|
||||
]
|
||||
for src, dest in test_cases:
|
||||
try:
|
||||
os.symlink(src, dest)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
os.remove(dest)
|
||||
except OSError:
|
||||
pass
|
||||
# Also test with bytes, since that is a separate code path.
|
||||
try:
|
||||
os.symlink(os.fsencode(src), os.fsencode(dest))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
os.remove(dest)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def test_appexeclink(self):
|
||||
root = os.path.expandvars(r'%LOCALAPPDATA%\Microsoft\WindowsApps')
|
||||
if not os.path.isdir(root):
|
||||
self.skipTest("test requires a WindowsApps directory")
|
||||
|
||||
aliases = [os.path.join(root, a)
|
||||
for a in fnmatch.filter(os.listdir(root), '*.exe')]
|
||||
|
||||
for alias in aliases:
|
||||
if support.verbose:
|
||||
print()
|
||||
print("Testing with", alias)
|
||||
st = os.lstat(alias)
|
||||
self.assertEqual(st, os.stat(alias))
|
||||
self.assertFalse(stat.S_ISLNK(st.st_mode))
|
||||
self.assertEqual(st.st_reparse_tag, stat.IO_REPARSE_TAG_APPEXECLINK)
|
||||
self.assertTrue(os.path.isfile(alias))
|
||||
# testing the first one we see is sufficient
|
||||
break
|
||||
else:
|
||||
self.skipTest("test requires an app execution alias")
|
||||
|
||||
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
|
||||
class Win32JunctionTests(unittest.TestCase):
|
||||
junction = 'junctiontest'
|
||||
junction_target = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def setUp(self):
|
||||
assert os.path.exists(self.junction_target)
|
||||
assert not os.path.lexists(self.junction)
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.lexists(self.junction):
|
||||
os.unlink(self.junction)
|
||||
|
||||
def test_create_junction(self):
|
||||
_winapi.CreateJunction(self.junction_target, self.junction)
|
||||
self.assertTrue(os.path.lexists(self.junction))
|
||||
self.assertTrue(os.path.exists(self.junction))
|
||||
self.assertTrue(os.path.isdir(self.junction))
|
||||
self.assertNotEqual(os.stat(self.junction), os.lstat(self.junction))
|
||||
self.assertEqual(os.stat(self.junction), os.stat(self.junction_target))
|
||||
|
||||
# bpo-37834: Junctions are not recognized as links.
|
||||
self.assertFalse(os.path.islink(self.junction))
|
||||
self.assertEqual(os.path.normcase("\\\\?\\" + self.junction_target),
|
||||
os.path.normcase(os.readlink(self.junction)))
|
||||
|
||||
def test_unlink_removes_junction(self):
|
||||
_winapi.CreateJunction(self.junction_target, self.junction)
|
||||
self.assertTrue(os.path.exists(self.junction))
|
||||
self.assertTrue(os.path.lexists(self.junction))
|
||||
|
||||
os.unlink(self.junction)
|
||||
self.assertFalse(os.path.exists(self.junction))
|
||||
|
||||
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
|
||||
class Win32NtTests(unittest.TestCase):
|
||||
def test_getfinalpathname_handles(self):
|
||||
nt = import_helper.import_module('nt')
|
||||
ctypes = import_helper.import_module('ctypes')
|
||||
# Ruff false positive -- it thinks we're redefining `ctypes` here
|
||||
import ctypes.wintypes # noqa: F811
|
||||
|
||||
kernel = ctypes.WinDLL('Kernel32.dll', use_last_error=True)
|
||||
kernel.GetCurrentProcess.restype = ctypes.wintypes.HANDLE
|
||||
|
||||
kernel.GetProcessHandleCount.restype = ctypes.wintypes.BOOL
|
||||
kernel.GetProcessHandleCount.argtypes = (ctypes.wintypes.HANDLE,
|
||||
ctypes.wintypes.LPDWORD)
|
||||
|
||||
# This is a pseudo-handle that doesn't need to be closed
|
||||
hproc = kernel.GetCurrentProcess()
|
||||
|
||||
handle_count = ctypes.wintypes.DWORD()
|
||||
ok = kernel.GetProcessHandleCount(hproc, ctypes.byref(handle_count))
|
||||
self.assertEqual(1, ok)
|
||||
|
||||
before_count = handle_count.value
|
||||
|
||||
# The first two test the error path, __file__ tests the success path
|
||||
filenames = [
|
||||
r'\\?\C:',
|
||||
r'\\?\NUL',
|
||||
r'\\?\CONIN',
|
||||
__file__,
|
||||
]
|
||||
|
||||
for _ in range(10):
|
||||
for name in filenames:
|
||||
try:
|
||||
nt._getfinalpathname(name)
|
||||
except Exception:
|
||||
# Failure is expected
|
||||
pass
|
||||
try:
|
||||
os.stat(name)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
ok = kernel.GetProcessHandleCount(hproc, ctypes.byref(handle_count))
|
||||
self.assertEqual(1, ok)
|
||||
|
||||
handle_delta = handle_count.value - before_count
|
||||
|
||||
self.assertEqual(0, handle_delta)
|
||||
|
||||
@support.requires_subprocess()
|
||||
def test_stat_unlink_race(self):
|
||||
# bpo-46785: the implementation of os.stat() falls back to reading
|
||||
# the parent directory if CreateFileW() fails with a permission
|
||||
# error. If reading the parent directory fails because the file or
|
||||
# directory are subsequently unlinked, or because the volume or
|
||||
# share are no longer available, then the original permission error
|
||||
# should not be restored.
|
||||
filename = os_helper.TESTFN
|
||||
self.addCleanup(os_helper.unlink, filename)
|
||||
deadline = time.time() + 5
|
||||
command = textwrap.dedent("""\
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
filename = sys.argv[1]
|
||||
deadline = float(sys.argv[2])
|
||||
|
||||
while time.time() < deadline:
|
||||
try:
|
||||
with open(filename, "w") as f:
|
||||
pass
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
os.remove(filename)
|
||||
except OSError:
|
||||
pass
|
||||
""")
|
||||
|
||||
with subprocess.Popen([sys.executable, '-c', command, filename, str(deadline)]) as proc:
|
||||
while time.time() < deadline:
|
||||
try:
|
||||
os.stat(filename)
|
||||
except FileNotFoundError as e:
|
||||
assert e.winerror == 2 # ERROR_FILE_NOT_FOUND
|
||||
try:
|
||||
proc.wait(1)
|
||||
except subprocess.TimeoutExpired:
|
||||
proc.terminate()
|
||||
|
||||
@support.requires_subprocess()
|
||||
def test_stat_inaccessible_file(self):
|
||||
filename = os_helper.TESTFN
|
||||
ICACLS = os.path.expandvars(r"%SystemRoot%\System32\icacls.exe")
|
||||
|
||||
with open(filename, "wb") as f:
|
||||
f.write(b'Test data')
|
||||
|
||||
stat1 = os.stat(filename)
|
||||
|
||||
try:
|
||||
# Remove all permissions from the file
|
||||
subprocess.check_output([ICACLS, filename, "/inheritance:r"],
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as ex:
|
||||
if support.verbose:
|
||||
print(ICACLS, filename, "/inheritance:r", "failed.")
|
||||
print(ex.stdout.decode("oem", "replace").rstrip())
|
||||
try:
|
||||
os.unlink(filename)
|
||||
except OSError:
|
||||
pass
|
||||
self.skipTest("Unable to create inaccessible file")
|
||||
|
||||
def cleanup():
|
||||
# Give delete permission to the owner (us)
|
||||
subprocess.check_output([ICACLS, filename, "/grant", "*WD:(D)"],
|
||||
stderr=subprocess.STDOUT)
|
||||
os.unlink(filename)
|
||||
|
||||
self.addCleanup(cleanup)
|
||||
|
||||
if support.verbose:
|
||||
print("File:", filename)
|
||||
print("stat with access:", stat1)
|
||||
|
||||
# First test - we shouldn't raise here, because we still have access to
|
||||
# the directory and can extract enough information from its metadata.
|
||||
stat2 = os.stat(filename)
|
||||
|
||||
if support.verbose:
|
||||
print(" without access:", stat2)
|
||||
|
||||
# We may not get st_dev/st_ino, so ensure those are 0 or match
|
||||
self.assertIn(stat2.st_dev, (0, stat1.st_dev))
|
||||
self.assertIn(stat2.st_ino, (0, stat1.st_ino))
|
||||
|
||||
# st_mode and st_size should match (for a normal file, at least)
|
||||
self.assertEqual(stat1.st_mode, stat2.st_mode)
|
||||
self.assertEqual(stat1.st_size, stat2.st_size)
|
||||
|
||||
# st_ctime and st_mtime should be the same
|
||||
self.assertEqual(stat1.st_ctime, stat2.st_ctime)
|
||||
self.assertEqual(stat1.st_mtime, stat2.st_mtime)
|
||||
|
||||
# st_atime should be the same or later
|
||||
self.assertGreaterEqual(stat1.st_atime, stat2.st_atime)
|
||||
|
||||
|
||||
@os_helper.skip_unless_symlink
|
||||
class NonLocalSymlinkTests(unittest.TestCase):
|
||||
|
||||
|
|
@ -3825,13 +3197,16 @@ def test_spawnvpe_invalid_env(self):
|
|||
self._test_invalid_env(os.spawnvpe)
|
||||
|
||||
|
||||
# The introduction of this TestCase caused at least two different errors on
|
||||
# *nix buildbots. Temporarily skip this to let the buildbots move along.
|
||||
@unittest.skip("Skip due to platform/environment differences on *NIX buildbots")
|
||||
@unittest.skipUnless(hasattr(os, 'getlogin'), "test needs os.getlogin")
|
||||
class LoginTests(unittest.TestCase):
|
||||
def test_getlogin(self):
|
||||
try:
|
||||
user_name = os.getlogin()
|
||||
except OSError as exc:
|
||||
if exc.errno in (errno.ENOTTY, errno.ENXIO):
|
||||
self.skipTest(str(exc))
|
||||
else:
|
||||
raise
|
||||
self.assertNotEqual(len(user_name), 0)
|
||||
|
||||
|
||||
|
|
@ -4708,6 +4083,7 @@ def test_oserror_filename(self):
|
|||
(self.filenames, os.listdir,),
|
||||
(self.filenames, os.rename, "dst"),
|
||||
(self.filenames, os.replace, "dst"),
|
||||
(self.filenames, os.utime, None),
|
||||
]
|
||||
if os_helper.can_chmod():
|
||||
funcs.append((self.filenames, os.chmod, 0o777))
|
||||
|
|
@ -4748,6 +4124,19 @@ def test_oserror_filename(self):
|
|||
else:
|
||||
self.fail(f"No exception thrown by {func}")
|
||||
|
||||
def test_mkdir(self):
|
||||
filename = os_helper.TESTFN
|
||||
subdir = os.path.join(filename, 'subdir')
|
||||
self.assertRaises(FileNotFoundError, os.mkdir, subdir)
|
||||
|
||||
self.addCleanup(os_helper.unlink, filename)
|
||||
create_file(filename)
|
||||
self.assertRaises(FileExistsError, os.mkdir, filename)
|
||||
|
||||
self.assertRaises((NotADirectoryError, FileNotFoundError),
|
||||
os.mkdir, subdir)
|
||||
|
||||
|
||||
class CPUCountTests(unittest.TestCase):
|
||||
def check_cpu_count(self, cpus):
|
||||
if cpus is None:
|
||||
605
Lib/test/test_os/test_windows.py
Normal file
605
Lib/test/test_os/test_windows.py
Normal file
|
|
@ -0,0 +1,605 @@
|
|||
import sys
|
||||
import unittest
|
||||
|
||||
if sys.platform != "win32":
|
||||
raise unittest.SkipTest("Win32 specific tests")
|
||||
|
||||
import _winapi
|
||||
import fnmatch
|
||||
import mmap
|
||||
import os
|
||||
import shutil
|
||||
import signal
|
||||
import stat
|
||||
import subprocess
|
||||
import textwrap
|
||||
import time
|
||||
import uuid
|
||||
from test import support
|
||||
from test.support import import_helper
|
||||
from test.support import os_helper
|
||||
from .utils import create_file
|
||||
|
||||
|
||||
class Win32KillTests(unittest.TestCase):
|
||||
def _kill(self, sig):
|
||||
# Start sys.executable as a subprocess and communicate from the
|
||||
# subprocess to the parent that the interpreter is ready. When it
|
||||
# becomes ready, send *sig* via os.kill to the subprocess and check
|
||||
# that the return code is equal to *sig*.
|
||||
import ctypes
|
||||
from ctypes import wintypes
|
||||
import msvcrt
|
||||
|
||||
# Since we can't access the contents of the process' stdout until the
|
||||
# process has exited, use PeekNamedPipe to see what's inside stdout
|
||||
# without waiting. This is done so we can tell that the interpreter
|
||||
# is started and running at a point where it could handle a signal.
|
||||
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
|
||||
PeekNamedPipe.restype = wintypes.BOOL
|
||||
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
|
||||
ctypes.POINTER(ctypes.c_char), # stdout buf
|
||||
wintypes.DWORD, # Buffer size
|
||||
ctypes.POINTER(wintypes.DWORD), # bytes read
|
||||
ctypes.POINTER(wintypes.DWORD), # bytes avail
|
||||
ctypes.POINTER(wintypes.DWORD)) # bytes left
|
||||
msg = "running"
|
||||
proc = subprocess.Popen([sys.executable, "-c",
|
||||
"import sys;"
|
||||
"sys.stdout.write('{}');"
|
||||
"sys.stdout.flush();"
|
||||
"input()".format(msg)],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE)
|
||||
self.addCleanup(proc.stdout.close)
|
||||
self.addCleanup(proc.stderr.close)
|
||||
self.addCleanup(proc.stdin.close)
|
||||
|
||||
count, max = 0, 100
|
||||
while count < max and proc.poll() is None:
|
||||
# Create a string buffer to store the result of stdout from the pipe
|
||||
buf = ctypes.create_string_buffer(len(msg))
|
||||
# Obtain the text currently in proc.stdout
|
||||
# Bytes read/avail/left are left as NULL and unused
|
||||
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
|
||||
buf, ctypes.sizeof(buf), None, None, None)
|
||||
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
|
||||
if buf.value:
|
||||
self.assertEqual(msg, buf.value.decode())
|
||||
break
|
||||
time.sleep(0.1)
|
||||
count += 1
|
||||
else:
|
||||
self.fail("Did not receive communication from the subprocess")
|
||||
|
||||
os.kill(proc.pid, sig)
|
||||
self.assertEqual(proc.wait(), sig)
|
||||
|
||||
def test_kill_sigterm(self):
|
||||
# SIGTERM doesn't mean anything special, but make sure it works
|
||||
self._kill(signal.SIGTERM)
|
||||
|
||||
def test_kill_int(self):
|
||||
# os.kill on Windows can take an int which gets set as the exit code
|
||||
self._kill(100)
|
||||
|
||||
@unittest.skipIf(mmap is None, "requires mmap")
|
||||
def _kill_with_event(self, event, name):
|
||||
tagname = "test_os_%s" % uuid.uuid1()
|
||||
m = mmap.mmap(-1, 1, tagname)
|
||||
m[0] = 0
|
||||
|
||||
# Run a script which has console control handling enabled.
|
||||
script = os.path.join(os.path.dirname(__file__),
|
||||
"win_console_handler.py")
|
||||
cmd = [sys.executable, script, tagname]
|
||||
proc = subprocess.Popen(cmd,
|
||||
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
|
||||
|
||||
with proc:
|
||||
# Let the interpreter startup before we send signals. See #3137.
|
||||
for _ in support.sleeping_retry(support.SHORT_TIMEOUT):
|
||||
if proc.poll() is None:
|
||||
break
|
||||
else:
|
||||
# Forcefully kill the process if we weren't able to signal it.
|
||||
proc.kill()
|
||||
self.fail("Subprocess didn't finish initialization")
|
||||
|
||||
os.kill(proc.pid, event)
|
||||
|
||||
try:
|
||||
# proc.send_signal(event) could also be done here.
|
||||
# Allow time for the signal to be passed and the process to exit.
|
||||
proc.wait(timeout=support.SHORT_TIMEOUT)
|
||||
except subprocess.TimeoutExpired:
|
||||
# Forcefully kill the process if we weren't able to signal it.
|
||||
proc.kill()
|
||||
self.fail("subprocess did not stop on {}".format(name))
|
||||
|
||||
@unittest.skip("subprocesses aren't inheriting Ctrl+C property")
|
||||
@support.requires_subprocess()
|
||||
def test_CTRL_C_EVENT(self):
|
||||
from ctypes import wintypes
|
||||
import ctypes
|
||||
|
||||
# Make a NULL value by creating a pointer with no argument.
|
||||
NULL = ctypes.POINTER(ctypes.c_int)()
|
||||
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
|
||||
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
|
||||
wintypes.BOOL)
|
||||
SetConsoleCtrlHandler.restype = wintypes.BOOL
|
||||
|
||||
# Calling this with NULL and FALSE causes the calling process to
|
||||
# handle Ctrl+C, rather than ignore it. This property is inherited
|
||||
# by subprocesses.
|
||||
SetConsoleCtrlHandler(NULL, 0)
|
||||
|
||||
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
|
||||
|
||||
@support.requires_subprocess()
|
||||
def test_CTRL_BREAK_EVENT(self):
|
||||
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
|
||||
|
||||
|
||||
class Win32ListdirTests(unittest.TestCase):
|
||||
"""Test listdir on Windows."""
|
||||
|
||||
def setUp(self):
|
||||
self.created_paths = []
|
||||
for i in range(2):
|
||||
dir_name = 'SUB%d' % i
|
||||
dir_path = os.path.join(os_helper.TESTFN, dir_name)
|
||||
file_name = 'FILE%d' % i
|
||||
file_path = os.path.join(os_helper.TESTFN, file_name)
|
||||
os.makedirs(dir_path)
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write("I'm %s and proud of it. Blame test_os.\n" % file_path)
|
||||
self.created_paths.extend([dir_name, file_name])
|
||||
self.created_paths.sort()
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(os_helper.TESTFN)
|
||||
|
||||
def test_listdir_no_extended_path(self):
|
||||
"""Test when the path is not an "extended" path."""
|
||||
# unicode
|
||||
self.assertEqual(
|
||||
sorted(os.listdir(os_helper.TESTFN)),
|
||||
self.created_paths)
|
||||
|
||||
# bytes
|
||||
self.assertEqual(
|
||||
sorted(os.listdir(os.fsencode(os_helper.TESTFN))),
|
||||
[os.fsencode(path) for path in self.created_paths])
|
||||
|
||||
def test_listdir_extended_path(self):
|
||||
"""Test when the path starts with '\\\\?\\'."""
|
||||
# See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
|
||||
# unicode
|
||||
path = '\\\\?\\' + os.path.abspath(os_helper.TESTFN)
|
||||
self.assertEqual(
|
||||
sorted(os.listdir(path)),
|
||||
self.created_paths)
|
||||
|
||||
# bytes
|
||||
path = b'\\\\?\\' + os.fsencode(os.path.abspath(os_helper.TESTFN))
|
||||
self.assertEqual(
|
||||
sorted(os.listdir(path)),
|
||||
[os.fsencode(path) for path in self.created_paths])
|
||||
|
||||
|
||||
@unittest.skipUnless(os.name == "nt", "NT specific tests")
|
||||
class Win32ListdriveTests(unittest.TestCase):
|
||||
"""Test listdrive, listmounts and listvolume on Windows."""
|
||||
|
||||
def setUp(self):
|
||||
# Get drives and volumes from fsutil
|
||||
out = subprocess.check_output(
|
||||
["fsutil.exe", "volume", "list"],
|
||||
cwd=os.path.join(os.getenv("SystemRoot", "\\Windows"), "System32"),
|
||||
encoding="mbcs",
|
||||
errors="ignore",
|
||||
)
|
||||
lines = out.splitlines()
|
||||
self.known_volumes = {l for l in lines if l.startswith('\\\\?\\')}
|
||||
self.known_drives = {l for l in lines if l[1:] == ':\\'}
|
||||
self.known_mounts = {l for l in lines if l[1:3] == ':\\'}
|
||||
|
||||
def test_listdrives(self):
|
||||
drives = os.listdrives()
|
||||
self.assertIsInstance(drives, list)
|
||||
self.assertSetEqual(
|
||||
self.known_drives,
|
||||
self.known_drives & set(drives),
|
||||
)
|
||||
|
||||
def test_listvolumes(self):
|
||||
volumes = os.listvolumes()
|
||||
self.assertIsInstance(volumes, list)
|
||||
self.assertSetEqual(
|
||||
self.known_volumes,
|
||||
self.known_volumes & set(volumes),
|
||||
)
|
||||
|
||||
def test_listmounts(self):
|
||||
for volume in os.listvolumes():
|
||||
try:
|
||||
mounts = os.listmounts(volume)
|
||||
except OSError as ex:
|
||||
if support.verbose:
|
||||
print("Skipping", volume, "because of", ex)
|
||||
else:
|
||||
self.assertIsInstance(mounts, list)
|
||||
self.assertSetEqual(
|
||||
set(mounts),
|
||||
self.known_mounts & set(mounts),
|
||||
)
|
||||
|
||||
|
||||
@os_helper.skip_unless_symlink
|
||||
class Win32SymlinkTests(unittest.TestCase):
|
||||
filelink = 'filelinktest'
|
||||
filelink_target = os.path.abspath(__file__)
|
||||
dirlink = 'dirlinktest'
|
||||
dirlink_target = os.path.dirname(filelink_target)
|
||||
missing_link = 'missing link'
|
||||
|
||||
def setUp(self):
|
||||
assert os.path.exists(self.dirlink_target)
|
||||
assert os.path.exists(self.filelink_target)
|
||||
assert not os.path.exists(self.dirlink)
|
||||
assert not os.path.exists(self.filelink)
|
||||
assert not os.path.exists(self.missing_link)
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.exists(self.filelink):
|
||||
os.remove(self.filelink)
|
||||
if os.path.exists(self.dirlink):
|
||||
os.rmdir(self.dirlink)
|
||||
if os.path.lexists(self.missing_link):
|
||||
os.remove(self.missing_link)
|
||||
|
||||
def test_directory_link(self):
|
||||
os.symlink(self.dirlink_target, self.dirlink)
|
||||
self.assertTrue(os.path.exists(self.dirlink))
|
||||
self.assertTrue(os.path.isdir(self.dirlink))
|
||||
self.assertTrue(os.path.islink(self.dirlink))
|
||||
self.check_stat(self.dirlink, self.dirlink_target)
|
||||
|
||||
def test_file_link(self):
|
||||
os.symlink(self.filelink_target, self.filelink)
|
||||
self.assertTrue(os.path.exists(self.filelink))
|
||||
self.assertTrue(os.path.isfile(self.filelink))
|
||||
self.assertTrue(os.path.islink(self.filelink))
|
||||
self.check_stat(self.filelink, self.filelink_target)
|
||||
|
||||
def _create_missing_dir_link(self):
|
||||
'Create a "directory" link to a non-existent target'
|
||||
linkname = self.missing_link
|
||||
if os.path.lexists(linkname):
|
||||
os.remove(linkname)
|
||||
target = r'c:\\target does not exist.29r3c740'
|
||||
assert not os.path.exists(target)
|
||||
target_is_dir = True
|
||||
os.symlink(target, linkname, target_is_dir)
|
||||
|
||||
def test_remove_directory_link_to_missing_target(self):
|
||||
self._create_missing_dir_link()
|
||||
# For compatibility with Unix, os.remove will check the
|
||||
# directory status and call RemoveDirectory if the symlink
|
||||
# was created with target_is_dir==True.
|
||||
os.remove(self.missing_link)
|
||||
|
||||
def test_isdir_on_directory_link_to_missing_target(self):
|
||||
self._create_missing_dir_link()
|
||||
self.assertFalse(os.path.isdir(self.missing_link))
|
||||
|
||||
def test_rmdir_on_directory_link_to_missing_target(self):
|
||||
self._create_missing_dir_link()
|
||||
os.rmdir(self.missing_link)
|
||||
|
||||
def check_stat(self, link, target):
|
||||
self.assertEqual(os.stat(link), os.stat(target))
|
||||
self.assertNotEqual(os.lstat(link), os.stat(link))
|
||||
|
||||
bytes_link = os.fsencode(link)
|
||||
self.assertEqual(os.stat(bytes_link), os.stat(target))
|
||||
self.assertNotEqual(os.lstat(bytes_link), os.stat(bytes_link))
|
||||
|
||||
def test_12084(self):
|
||||
level1 = os.path.abspath(os_helper.TESTFN)
|
||||
level2 = os.path.join(level1, "level2")
|
||||
level3 = os.path.join(level2, "level3")
|
||||
self.addCleanup(os_helper.rmtree, level1)
|
||||
|
||||
os.mkdir(level1)
|
||||
os.mkdir(level2)
|
||||
os.mkdir(level3)
|
||||
|
||||
file1 = os.path.abspath(os.path.join(level1, "file1"))
|
||||
create_file(file1)
|
||||
|
||||
orig_dir = os.getcwd()
|
||||
try:
|
||||
os.chdir(level2)
|
||||
link = os.path.join(level2, "link")
|
||||
os.symlink(os.path.relpath(file1), "link")
|
||||
self.assertIn("link", os.listdir(os.getcwd()))
|
||||
|
||||
# Check os.stat calls from the same dir as the link
|
||||
self.assertEqual(os.stat(file1), os.stat("link"))
|
||||
|
||||
# Check os.stat calls from a dir below the link
|
||||
os.chdir(level1)
|
||||
self.assertEqual(os.stat(file1),
|
||||
os.stat(os.path.relpath(link)))
|
||||
|
||||
# Check os.stat calls from a dir above the link
|
||||
os.chdir(level3)
|
||||
self.assertEqual(os.stat(file1),
|
||||
os.stat(os.path.relpath(link)))
|
||||
finally:
|
||||
os.chdir(orig_dir)
|
||||
|
||||
@unittest.skipUnless(os.path.lexists(r'C:\Users\All Users')
|
||||
and os.path.exists(r'C:\ProgramData'),
|
||||
'Test directories not found')
|
||||
def test_29248(self):
|
||||
# os.symlink() calls CreateSymbolicLink, which creates
|
||||
# the reparse data buffer with the print name stored
|
||||
# first, so the offset is always 0. CreateSymbolicLink
|
||||
# stores the "PrintName" DOS path (e.g. "C:\") first,
|
||||
# with an offset of 0, followed by the "SubstituteName"
|
||||
# NT path (e.g. "\??\C:\"). The "All Users" link, on
|
||||
# the other hand, seems to have been created manually
|
||||
# with an inverted order.
|
||||
target = os.readlink(r'C:\Users\All Users')
|
||||
self.assertTrue(os.path.samefile(target, r'C:\ProgramData'))
|
||||
|
||||
def test_buffer_overflow(self):
|
||||
# Older versions would have a buffer overflow when detecting
|
||||
# whether a link source was a directory. This test ensures we
|
||||
# no longer crash, but does not otherwise validate the behavior
|
||||
segment = 'X' * 27
|
||||
path = os.path.join(*[segment] * 10)
|
||||
test_cases = [
|
||||
# overflow with absolute src
|
||||
('\\' + path, segment),
|
||||
# overflow dest with relative src
|
||||
(segment, path),
|
||||
# overflow when joining src
|
||||
(path[:180], path[:180]),
|
||||
]
|
||||
for src, dest in test_cases:
|
||||
try:
|
||||
os.symlink(src, dest)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
os.remove(dest)
|
||||
except OSError:
|
||||
pass
|
||||
# Also test with bytes, since that is a separate code path.
|
||||
try:
|
||||
os.symlink(os.fsencode(src), os.fsencode(dest))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
os.remove(dest)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def test_appexeclink(self):
|
||||
root = os.path.expandvars(r'%LOCALAPPDATA%\Microsoft\WindowsApps')
|
||||
if not os.path.isdir(root):
|
||||
self.skipTest("test requires a WindowsApps directory")
|
||||
|
||||
aliases = [os.path.join(root, a)
|
||||
for a in fnmatch.filter(os.listdir(root), '*.exe')]
|
||||
|
||||
for alias in aliases:
|
||||
if support.verbose:
|
||||
print()
|
||||
print("Testing with", alias)
|
||||
st = os.lstat(alias)
|
||||
self.assertEqual(st, os.stat(alias))
|
||||
self.assertFalse(stat.S_ISLNK(st.st_mode))
|
||||
self.assertEqual(st.st_reparse_tag, stat.IO_REPARSE_TAG_APPEXECLINK)
|
||||
self.assertTrue(os.path.isfile(alias))
|
||||
# testing the first one we see is sufficient
|
||||
break
|
||||
else:
|
||||
self.skipTest("test requires an app execution alias")
|
||||
|
||||
|
||||
class Win32JunctionTests(unittest.TestCase):
|
||||
junction = 'junctiontest'
|
||||
junction_target = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def setUp(self):
|
||||
assert os.path.exists(self.junction_target)
|
||||
assert not os.path.lexists(self.junction)
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.lexists(self.junction):
|
||||
os.unlink(self.junction)
|
||||
|
||||
def test_create_junction(self):
|
||||
_winapi.CreateJunction(self.junction_target, self.junction)
|
||||
self.assertTrue(os.path.lexists(self.junction))
|
||||
self.assertTrue(os.path.exists(self.junction))
|
||||
self.assertTrue(os.path.isdir(self.junction))
|
||||
self.assertNotEqual(os.stat(self.junction), os.lstat(self.junction))
|
||||
self.assertEqual(os.stat(self.junction), os.stat(self.junction_target))
|
||||
|
||||
# bpo-37834: Junctions are not recognized as links.
|
||||
self.assertFalse(os.path.islink(self.junction))
|
||||
self.assertEqual(os.path.normcase("\\\\?\\" + self.junction_target),
|
||||
os.path.normcase(os.readlink(self.junction)))
|
||||
|
||||
def test_unlink_removes_junction(self):
|
||||
_winapi.CreateJunction(self.junction_target, self.junction)
|
||||
self.assertTrue(os.path.exists(self.junction))
|
||||
self.assertTrue(os.path.lexists(self.junction))
|
||||
|
||||
os.unlink(self.junction)
|
||||
self.assertFalse(os.path.exists(self.junction))
|
||||
|
||||
|
||||
class Win32NtTests(unittest.TestCase):
|
||||
def test_getfinalpathname_handles(self):
|
||||
nt = import_helper.import_module('nt')
|
||||
ctypes = import_helper.import_module('ctypes')
|
||||
# Ruff false positive -- it thinks we're redefining `ctypes` here
|
||||
import ctypes.wintypes # noqa: F811
|
||||
|
||||
kernel = ctypes.WinDLL('Kernel32.dll', use_last_error=True)
|
||||
kernel.GetCurrentProcess.restype = ctypes.wintypes.HANDLE
|
||||
|
||||
kernel.GetProcessHandleCount.restype = ctypes.wintypes.BOOL
|
||||
kernel.GetProcessHandleCount.argtypes = (ctypes.wintypes.HANDLE,
|
||||
ctypes.wintypes.LPDWORD)
|
||||
|
||||
# This is a pseudo-handle that doesn't need to be closed
|
||||
hproc = kernel.GetCurrentProcess()
|
||||
|
||||
handle_count = ctypes.wintypes.DWORD()
|
||||
ok = kernel.GetProcessHandleCount(hproc, ctypes.byref(handle_count))
|
||||
self.assertEqual(1, ok)
|
||||
|
||||
before_count = handle_count.value
|
||||
|
||||
# The first two test the error path, __file__ tests the success path
|
||||
filenames = [
|
||||
r'\\?\C:',
|
||||
r'\\?\NUL',
|
||||
r'\\?\CONIN',
|
||||
__file__,
|
||||
]
|
||||
|
||||
for _ in range(10):
|
||||
for name in filenames:
|
||||
try:
|
||||
nt._getfinalpathname(name)
|
||||
except Exception:
|
||||
# Failure is expected
|
||||
pass
|
||||
try:
|
||||
os.stat(name)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
ok = kernel.GetProcessHandleCount(hproc, ctypes.byref(handle_count))
|
||||
self.assertEqual(1, ok)
|
||||
|
||||
handle_delta = handle_count.value - before_count
|
||||
|
||||
self.assertEqual(0, handle_delta)
|
||||
|
||||
@support.requires_subprocess()
|
||||
def test_stat_unlink_race(self):
|
||||
# bpo-46785: the implementation of os.stat() falls back to reading
|
||||
# the parent directory if CreateFileW() fails with a permission
|
||||
# error. If reading the parent directory fails because the file or
|
||||
# directory are subsequently unlinked, or because the volume or
|
||||
# share are no longer available, then the original permission error
|
||||
# should not be restored.
|
||||
filename = os_helper.TESTFN
|
||||
self.addCleanup(os_helper.unlink, filename)
|
||||
deadline = time.time() + 5
|
||||
command = textwrap.dedent("""\
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
filename = sys.argv[1]
|
||||
deadline = float(sys.argv[2])
|
||||
|
||||
while time.time() < deadline:
|
||||
try:
|
||||
with open(filename, "w") as f:
|
||||
pass
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
os.remove(filename)
|
||||
except OSError:
|
||||
pass
|
||||
""")
|
||||
|
||||
with subprocess.Popen([sys.executable, '-c', command, filename, str(deadline)]) as proc:
|
||||
while time.time() < deadline:
|
||||
try:
|
||||
os.stat(filename)
|
||||
except FileNotFoundError as e:
|
||||
assert e.winerror == 2 # ERROR_FILE_NOT_FOUND
|
||||
try:
|
||||
proc.wait(1)
|
||||
except subprocess.TimeoutExpired:
|
||||
proc.terminate()
|
||||
|
||||
@support.requires_subprocess()
|
||||
def test_stat_inaccessible_file(self):
|
||||
filename = os_helper.TESTFN
|
||||
ICACLS = os.path.expandvars(r"%SystemRoot%\System32\icacls.exe")
|
||||
|
||||
with open(filename, "wb") as f:
|
||||
f.write(b'Test data')
|
||||
|
||||
stat1 = os.stat(filename)
|
||||
|
||||
try:
|
||||
# Remove all permissions from the file
|
||||
subprocess.check_output([ICACLS, filename, "/inheritance:r"],
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as ex:
|
||||
if support.verbose:
|
||||
print(ICACLS, filename, "/inheritance:r", "failed.")
|
||||
print(ex.stdout.decode("oem", "replace").rstrip())
|
||||
try:
|
||||
os.unlink(filename)
|
||||
except OSError:
|
||||
pass
|
||||
self.skipTest("Unable to create inaccessible file")
|
||||
|
||||
def cleanup():
|
||||
# Give delete permission to the owner (us)
|
||||
subprocess.check_output([ICACLS, filename, "/grant", "*WD:(D)"],
|
||||
stderr=subprocess.STDOUT)
|
||||
os.unlink(filename)
|
||||
|
||||
self.addCleanup(cleanup)
|
||||
|
||||
if support.verbose:
|
||||
print("File:", filename)
|
||||
print("stat with access:", stat1)
|
||||
|
||||
# First test - we shouldn't raise here, because we still have access to
|
||||
# the directory and can extract enough information from its metadata.
|
||||
stat2 = os.stat(filename)
|
||||
|
||||
if support.verbose:
|
||||
print(" without access:", stat2)
|
||||
|
||||
# We may not get st_dev/st_ino, so ensure those are 0 or match
|
||||
self.assertIn(stat2.st_dev, (0, stat1.st_dev))
|
||||
self.assertIn(stat2.st_ino, (0, stat1.st_ino))
|
||||
|
||||
# st_mode and st_size should match (for a normal file, at least)
|
||||
self.assertEqual(stat1.st_mode, stat2.st_mode)
|
||||
self.assertEqual(stat1.st_size, stat2.st_size)
|
||||
|
||||
# st_ctime and st_mtime should be the same
|
||||
self.assertEqual(stat1.st_ctime, stat2.st_ctime)
|
||||
self.assertEqual(stat1.st_mtime, stat2.st_mtime)
|
||||
|
||||
# st_atime should be the same or later
|
||||
self.assertGreaterEqual(stat1.st_atime, stat2.st_atime)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
3
Lib/test/test_os/utils.py
Normal file
3
Lib/test/test_os/utils.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
def create_file(filename, content=b'content'):
|
||||
with open(filename, "xb", 0) as fp:
|
||||
fp.write(content)
|
||||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
import contextlib
|
||||
import io
|
||||
import json
|
||||
import marshal
|
||||
import os
|
||||
import shutil
|
||||
|
|
@ -17,6 +18,7 @@
|
|||
CollapsedStackCollector,
|
||||
FlamegraphCollector,
|
||||
)
|
||||
from profiling.sampling.gecko_collector import GeckoCollector
|
||||
|
||||
from test.support.os_helper import unlink
|
||||
from test.support import force_not_colorized_test_class, SHORT_TIMEOUT
|
||||
|
|
@ -527,6 +529,142 @@ def test_flamegraph_collector_export(self):
|
|||
self.assertIn('"value":', content)
|
||||
self.assertIn('"children":', content)
|
||||
|
||||
def test_gecko_collector_basic(self):
|
||||
"""Test basic GeckoCollector functionality."""
|
||||
collector = GeckoCollector()
|
||||
|
||||
# Test empty state
|
||||
self.assertEqual(len(collector.threads), 0)
|
||||
self.assertEqual(collector.sample_count, 0)
|
||||
self.assertEqual(len(collector.global_strings), 1) # "(root)"
|
||||
|
||||
# Test collecting sample data
|
||||
test_frames = [
|
||||
MockInterpreterInfo(
|
||||
0,
|
||||
[MockThreadInfo(
|
||||
1,
|
||||
[("file.py", 10, "func1"), ("file.py", 20, "func2")],
|
||||
)]
|
||||
)
|
||||
]
|
||||
collector.collect(test_frames)
|
||||
|
||||
# Should have recorded one thread and one sample
|
||||
self.assertEqual(len(collector.threads), 1)
|
||||
self.assertEqual(collector.sample_count, 1)
|
||||
self.assertIn(1, collector.threads)
|
||||
|
||||
profile_data = collector._build_profile()
|
||||
|
||||
# Verify profile structure
|
||||
self.assertIn("meta", profile_data)
|
||||
self.assertIn("threads", profile_data)
|
||||
self.assertIn("shared", profile_data)
|
||||
|
||||
# Check shared string table
|
||||
shared = profile_data["shared"]
|
||||
self.assertIn("stringArray", shared)
|
||||
string_array = shared["stringArray"]
|
||||
self.assertGreater(len(string_array), 0)
|
||||
|
||||
# Should contain our functions in the string array
|
||||
self.assertIn("func1", string_array)
|
||||
self.assertIn("func2", string_array)
|
||||
|
||||
# Check thread data structure
|
||||
threads = profile_data["threads"]
|
||||
self.assertEqual(len(threads), 1)
|
||||
thread_data = threads[0]
|
||||
|
||||
# Verify thread structure
|
||||
self.assertIn("samples", thread_data)
|
||||
self.assertIn("funcTable", thread_data)
|
||||
self.assertIn("frameTable", thread_data)
|
||||
self.assertIn("stackTable", thread_data)
|
||||
|
||||
# Verify samples
|
||||
samples = thread_data["samples"]
|
||||
self.assertEqual(len(samples["stack"]), 1)
|
||||
self.assertEqual(len(samples["time"]), 1)
|
||||
self.assertEqual(samples["length"], 1)
|
||||
|
||||
# Verify function table structure and content
|
||||
func_table = thread_data["funcTable"]
|
||||
self.assertIn("name", func_table)
|
||||
self.assertIn("fileName", func_table)
|
||||
self.assertIn("lineNumber", func_table)
|
||||
self.assertEqual(func_table["length"], 2) # Should have 2 functions
|
||||
|
||||
# Verify actual function content through string array indices
|
||||
func_names = []
|
||||
for idx in func_table["name"]:
|
||||
func_name = string_array[idx] if isinstance(idx, int) and 0 <= idx < len(string_array) else str(idx)
|
||||
func_names.append(func_name)
|
||||
|
||||
self.assertIn("func1", func_names, f"func1 not found in {func_names}")
|
||||
self.assertIn("func2", func_names, f"func2 not found in {func_names}")
|
||||
|
||||
# Verify frame table
|
||||
frame_table = thread_data["frameTable"]
|
||||
self.assertEqual(frame_table["length"], 2) # Should have frames for both functions
|
||||
self.assertEqual(len(frame_table["func"]), 2)
|
||||
|
||||
# Verify stack structure
|
||||
stack_table = thread_data["stackTable"]
|
||||
self.assertGreater(stack_table["length"], 0)
|
||||
self.assertGreater(len(stack_table["frame"]), 0)
|
||||
|
||||
def test_gecko_collector_export(self):
|
||||
"""Test Gecko profile export functionality."""
|
||||
gecko_out = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
|
||||
self.addCleanup(close_and_unlink, gecko_out)
|
||||
|
||||
collector = GeckoCollector()
|
||||
|
||||
test_frames1 = [
|
||||
MockInterpreterInfo(0, [MockThreadInfo(1, [("file.py", 10, "func1"), ("file.py", 20, "func2")])])
|
||||
]
|
||||
test_frames2 = [
|
||||
MockInterpreterInfo(0, [MockThreadInfo(1, [("file.py", 10, "func1"), ("file.py", 20, "func2")])])
|
||||
] # Same stack
|
||||
test_frames3 = [MockInterpreterInfo(0, [MockThreadInfo(1, [("other.py", 5, "other_func")])])]
|
||||
|
||||
collector.collect(test_frames1)
|
||||
collector.collect(test_frames2)
|
||||
collector.collect(test_frames3)
|
||||
|
||||
# Export gecko profile
|
||||
with (captured_stdout(), captured_stderr()):
|
||||
collector.export(gecko_out.name)
|
||||
|
||||
# Verify file was created and contains valid data
|
||||
self.assertTrue(os.path.exists(gecko_out.name))
|
||||
self.assertGreater(os.path.getsize(gecko_out.name), 0)
|
||||
|
||||
# Check file contains valid JSON
|
||||
with open(gecko_out.name, "r") as f:
|
||||
profile_data = json.load(f)
|
||||
|
||||
# Should be valid Gecko profile format
|
||||
self.assertIn("meta", profile_data)
|
||||
self.assertIn("threads", profile_data)
|
||||
self.assertIn("shared", profile_data)
|
||||
|
||||
# Check meta information
|
||||
self.assertIn("categories", profile_data["meta"])
|
||||
self.assertIn("interval", profile_data["meta"])
|
||||
|
||||
# Check shared string table
|
||||
self.assertIn("stringArray", profile_data["shared"])
|
||||
self.assertGreater(len(profile_data["shared"]["stringArray"]), 0)
|
||||
|
||||
# Should contain our functions
|
||||
string_array = profile_data["shared"]["stringArray"]
|
||||
self.assertIn("func1", string_array)
|
||||
self.assertIn("func2", string_array)
|
||||
self.assertIn("other_func", string_array)
|
||||
|
||||
def test_pstats_collector_export(self):
|
||||
collector = PstatsCollector(
|
||||
sample_interval_usec=1000000
|
||||
|
|
@ -1919,7 +2057,7 @@ def test_esrch_signal_handling(self):
|
|||
|
||||
def test_valid_output_formats(self):
|
||||
"""Test that all valid output formats are accepted."""
|
||||
valid_formats = ["pstats", "collapsed", "flamegraph"]
|
||||
valid_formats = ["pstats", "collapsed", "flamegraph", "gecko"]
|
||||
|
||||
tempdir = tempfile.TemporaryDirectory(delete=False)
|
||||
self.addCleanup(shutil.rmtree, tempdir.name)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
import itertools
|
||||
import os
|
||||
import pathlib
|
||||
import pkgutil
|
||||
import re
|
||||
import rlcompleter
|
||||
import select
|
||||
|
|
@ -1131,6 +1132,8 @@ def test_already_imported_custom_module_no_other_suggestions(self):
|
|||
(dir2 / "mymodule").mkdir()
|
||||
(dir2 / "mymodule" / "__init__.py").touch()
|
||||
(dir2 / "mymodule" / "bar.py").touch()
|
||||
# Purge FileFinder cache after adding files
|
||||
pkgutil.get_importer(_dir2).invalidate_caches()
|
||||
# mymodule found in dir2 before dir1, but it was already imported
|
||||
# from dir1 -> suggest dir1 submodules only
|
||||
events = code_to_events("import mymodule.\t\n")
|
||||
|
|
@ -1139,9 +1142,6 @@ def test_already_imported_custom_module_no_other_suggestions(self):
|
|||
self.assertEqual(output, "import mymodule.foo")
|
||||
|
||||
del sys.modules["mymodule"]
|
||||
print(f"{dir1=}, {dir2=}") # TEMPORARY -- debugging tests on windows
|
||||
print(f"{[p.relative_to(dir1) for p in dir1.glob("**")]=}") # TEMPORARY -- debugging tests on windows
|
||||
print(f"{[p.relative_to(dir2) for p in dir2.glob("**")]=}") # TEMPORARY -- debugging tests on windows
|
||||
# mymodule not imported anymore -> suggest dir2 submodules
|
||||
events = code_to_events("import mymodule.\t\n")
|
||||
reader = self.prepare_reader(events, namespace={})
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
import sys
|
||||
import unittest
|
||||
from functools import partial
|
||||
from test import support
|
||||
from test.support import os_helper, force_not_colorized_test_class
|
||||
from test.support import script_helper
|
||||
|
||||
|
|
@ -359,7 +360,8 @@ def test_repl_eio(self):
|
|||
self.fail("Child process failed to start properly")
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
_, err = proc.communicate(timeout=5) # sleep for pty to settle
|
||||
# sleep for pty to settle
|
||||
_, err = proc.communicate(timeout=support.SHORT_TIMEOUT)
|
||||
self.assertEqual(
|
||||
proc.returncode,
|
||||
1,
|
||||
|
|
|
|||
|
|
@ -4346,8 +4346,14 @@ def test_client_sigalgs_mismatch(self):
|
|||
client_context.set_client_sigalgs("rsa_pss_rsae_sha256")
|
||||
server_context.set_client_sigalgs("rsa_pss_rsae_sha384")
|
||||
|
||||
# Some systems return ConnectionResetError on handshake failures
|
||||
with self.assertRaises((ssl.SSLError, ConnectionResetError)):
|
||||
with self.assertRaises((
|
||||
ssl.SSLError,
|
||||
# On handshake failures, some systems raise a ConnectionResetError.
|
||||
ConnectionResetError,
|
||||
# On handshake failures, macOS may raise a BrokenPipeError.
|
||||
# See https://github.com/python/cpython/issues/139504.
|
||||
BrokenPipeError,
|
||||
)):
|
||||
server_params_test(client_context, server_context,
|
||||
chatty=True, connectionchatty=True,
|
||||
sni_name=hostname)
|
||||
|
|
|
|||
|
|
@ -96,8 +96,12 @@ def test_get_attribute(self):
|
|||
self.test_get_attribute)
|
||||
self.assertRaises(unittest.SkipTest, support.get_attribute, self, "foo")
|
||||
|
||||
@unittest.skip("failing buildbots")
|
||||
def test_get_original_stdout(self):
|
||||
if isinstance(sys.stdout, io.StringIO):
|
||||
# gh-55258: When --junit-xml is used, stdout is a StringIO:
|
||||
# use sys.__stdout__ in this case.
|
||||
self.assertEqual(support.get_original_stdout(), sys.__stdout__)
|
||||
else:
|
||||
self.assertEqual(support.get_original_stdout(), sys.stdout)
|
||||
|
||||
def test_unload(self):
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@
|
|||
from typing import dataclass_transform
|
||||
from typing import no_type_check, no_type_check_decorator
|
||||
from typing import Type
|
||||
from typing import NamedTuple, NotRequired, Required, ReadOnly, TypedDict
|
||||
from typing import NamedTuple, NotRequired, Required, ReadOnly, TypedDict, NoExtraItems
|
||||
from typing import IO, TextIO, BinaryIO
|
||||
from typing import Pattern, Match
|
||||
from typing import Annotated, ForwardRef
|
||||
|
|
@ -8820,6 +8820,32 @@ class ChildWithInlineAndOptional(Untotal, Inline):
|
|||
class Wrong(*bases):
|
||||
pass
|
||||
|
||||
def test_closed_values(self):
|
||||
class Implicit(TypedDict): ...
|
||||
class ExplicitTrue(TypedDict, closed=True): ...
|
||||
class ExplicitFalse(TypedDict, closed=False): ...
|
||||
|
||||
self.assertIsNone(Implicit.__closed__)
|
||||
self.assertIs(ExplicitTrue.__closed__, True)
|
||||
self.assertIs(ExplicitFalse.__closed__, False)
|
||||
|
||||
def test_extra_items_class_arg(self):
|
||||
class TD(TypedDict, extra_items=int):
|
||||
a: str
|
||||
|
||||
self.assertIs(TD.__extra_items__, int)
|
||||
self.assertEqual(TD.__annotations__, {'a': str})
|
||||
self.assertEqual(TD.__required_keys__, frozenset({'a'}))
|
||||
self.assertEqual(TD.__optional_keys__, frozenset())
|
||||
|
||||
class NoExtra(TypedDict):
|
||||
a: str
|
||||
|
||||
self.assertIs(NoExtra.__extra_items__, NoExtraItems)
|
||||
self.assertEqual(NoExtra.__annotations__, {'a': str})
|
||||
self.assertEqual(NoExtra.__required_keys__, frozenset({'a'}))
|
||||
self.assertEqual(NoExtra.__optional_keys__, frozenset())
|
||||
|
||||
def test_is_typeddict(self):
|
||||
self.assertIs(is_typeddict(Point2D), True)
|
||||
self.assertIs(is_typeddict(Union[str, int]), False)
|
||||
|
|
@ -9147,6 +9173,71 @@ class AllTheThings(TypedDict):
|
|||
},
|
||||
)
|
||||
|
||||
def test_closed_inheritance(self):
|
||||
class Base(TypedDict, extra_items=ReadOnly[Union[str, None]]):
|
||||
a: int
|
||||
|
||||
self.assertEqual(Base.__required_keys__, frozenset({"a"}))
|
||||
self.assertEqual(Base.__optional_keys__, frozenset({}))
|
||||
self.assertEqual(Base.__readonly_keys__, frozenset({}))
|
||||
self.assertEqual(Base.__mutable_keys__, frozenset({"a"}))
|
||||
self.assertEqual(Base.__annotations__, {"a": int})
|
||||
self.assertEqual(Base.__extra_items__, ReadOnly[Union[str, None]])
|
||||
self.assertIsNone(Base.__closed__)
|
||||
|
||||
class Child(Base, extra_items=int):
|
||||
a: str
|
||||
|
||||
self.assertEqual(Child.__required_keys__, frozenset({'a'}))
|
||||
self.assertEqual(Child.__optional_keys__, frozenset({}))
|
||||
self.assertEqual(Child.__readonly_keys__, frozenset({}))
|
||||
self.assertEqual(Child.__mutable_keys__, frozenset({'a'}))
|
||||
self.assertEqual(Child.__annotations__, {"a": str})
|
||||
self.assertIs(Child.__extra_items__, int)
|
||||
self.assertIsNone(Child.__closed__)
|
||||
|
||||
class GrandChild(Child, closed=True):
|
||||
a: float
|
||||
|
||||
self.assertEqual(GrandChild.__required_keys__, frozenset({'a'}))
|
||||
self.assertEqual(GrandChild.__optional_keys__, frozenset({}))
|
||||
self.assertEqual(GrandChild.__readonly_keys__, frozenset({}))
|
||||
self.assertEqual(GrandChild.__mutable_keys__, frozenset({'a'}))
|
||||
self.assertEqual(GrandChild.__annotations__, {"a": float})
|
||||
self.assertIs(GrandChild.__extra_items__, NoExtraItems)
|
||||
self.assertIs(GrandChild.__closed__, True)
|
||||
|
||||
class GrandGrandChild(GrandChild):
|
||||
...
|
||||
self.assertEqual(GrandGrandChild.__required_keys__, frozenset({'a'}))
|
||||
self.assertEqual(GrandGrandChild.__optional_keys__, frozenset({}))
|
||||
self.assertEqual(GrandGrandChild.__readonly_keys__, frozenset({}))
|
||||
self.assertEqual(GrandGrandChild.__mutable_keys__, frozenset({'a'}))
|
||||
self.assertEqual(GrandGrandChild.__annotations__, {"a": float})
|
||||
self.assertIs(GrandGrandChild.__extra_items__, NoExtraItems)
|
||||
self.assertIsNone(GrandGrandChild.__closed__)
|
||||
|
||||
def test_implicit_extra_items(self):
|
||||
class Base(TypedDict):
|
||||
a: int
|
||||
|
||||
self.assertIs(Base.__extra_items__, NoExtraItems)
|
||||
self.assertIsNone(Base.__closed__)
|
||||
|
||||
class ChildA(Base, closed=True):
|
||||
...
|
||||
|
||||
self.assertEqual(ChildA.__extra_items__, NoExtraItems)
|
||||
self.assertIs(ChildA.__closed__, True)
|
||||
|
||||
def test_cannot_combine_closed_and_extra_items(self):
|
||||
with self.assertRaisesRegex(
|
||||
TypeError,
|
||||
"Cannot combine closed=True and extra_items"
|
||||
):
|
||||
class TD(TypedDict, closed=True, extra_items=range):
|
||||
x: str
|
||||
|
||||
def test_annotations(self):
|
||||
# _type_check is applied
|
||||
with self.assertRaisesRegex(TypeError, "Plain typing.Final is not valid as type argument"):
|
||||
|
|
@ -9376,6 +9467,12 @@ class A(typing.Match):
|
|||
class B(typing.Pattern):
|
||||
pass
|
||||
|
||||
def test_typed_dict_signature(self):
|
||||
self.assertListEqual(
|
||||
list(inspect.signature(TypedDict).parameters),
|
||||
['typename', 'fields', 'total', 'closed', 'extra_items']
|
||||
)
|
||||
|
||||
|
||||
class AnnotatedTests(BaseTestCase):
|
||||
|
||||
|
|
|
|||
|
|
@ -218,27 +218,6 @@ def test_custom_headers(self):
|
|||
opener.open(request)
|
||||
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
|
||||
|
||||
@unittest.skip('XXX: http://www.imdb.com is gone')
|
||||
def test_sites_no_connection_close(self):
|
||||
# Some sites do not send Connection: close header.
|
||||
# Verify that those work properly. (#issue12576)
|
||||
|
||||
URL = 'http://www.imdb.com' # mangles Connection:close
|
||||
|
||||
with socket_helper.transient_internet(URL):
|
||||
try:
|
||||
with urllib.request.urlopen(URL) as res:
|
||||
pass
|
||||
except ValueError:
|
||||
self.fail("urlopen failed for site not sending \
|
||||
Connection:close")
|
||||
else:
|
||||
self.assertTrue(res)
|
||||
|
||||
req = urllib.request.urlopen(URL)
|
||||
res = req.read()
|
||||
self.assertTrue(res)
|
||||
|
||||
def _test_urls(self, urls, handlers, retry=True):
|
||||
import time
|
||||
import logging
|
||||
|
|
|
|||
|
|
@ -141,6 +141,7 @@
|
|||
'no_type_check',
|
||||
'no_type_check_decorator',
|
||||
'NoDefault',
|
||||
'NoExtraItems',
|
||||
'NoReturn',
|
||||
'NotRequired',
|
||||
'overload',
|
||||
|
|
@ -3063,6 +3064,33 @@ def _namedtuple_mro_entries(bases):
|
|||
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
|
||||
|
||||
|
||||
class _SingletonMeta(type):
|
||||
def __setattr__(cls, attr, value):
|
||||
# TypeError is consistent with the behavior of NoneType
|
||||
raise TypeError(
|
||||
f"cannot set {attr!r} attribute of immutable type {cls.__name__!r}"
|
||||
)
|
||||
|
||||
|
||||
class _NoExtraItemsType(metaclass=_SingletonMeta):
|
||||
"""The type of the NoExtraItems singleton."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls):
|
||||
return globals().get("NoExtraItems") or object.__new__(cls)
|
||||
|
||||
def __repr__(self):
|
||||
return 'typing.NoExtraItems'
|
||||
|
||||
def __reduce__(self):
|
||||
return 'NoExtraItems'
|
||||
|
||||
NoExtraItems = _NoExtraItemsType()
|
||||
del _NoExtraItemsType
|
||||
del _SingletonMeta
|
||||
|
||||
|
||||
def _get_typeddict_qualifiers(annotation_type):
|
||||
while True:
|
||||
annotation_origin = get_origin(annotation_type)
|
||||
|
|
@ -3086,7 +3114,8 @@ def _get_typeddict_qualifiers(annotation_type):
|
|||
|
||||
|
||||
class _TypedDictMeta(type):
|
||||
def __new__(cls, name, bases, ns, total=True):
|
||||
def __new__(cls, name, bases, ns, total=True, closed=None,
|
||||
extra_items=NoExtraItems):
|
||||
"""Create a new typed dict class object.
|
||||
|
||||
This method is called when TypedDict is subclassed,
|
||||
|
|
@ -3098,6 +3127,8 @@ def __new__(cls, name, bases, ns, total=True):
|
|||
if type(base) is not _TypedDictMeta and base is not Generic:
|
||||
raise TypeError('cannot inherit from both a TypedDict type '
|
||||
'and a non-TypedDict base class')
|
||||
if closed is not None and extra_items is not NoExtraItems:
|
||||
raise TypeError(f"Cannot combine closed={closed!r} and extra_items")
|
||||
|
||||
if any(issubclass(b, Generic) for b in bases):
|
||||
generic_base = (Generic,)
|
||||
|
|
@ -3209,6 +3240,8 @@ def __annotate__(format):
|
|||
tp_dict.__readonly_keys__ = frozenset(readonly_keys)
|
||||
tp_dict.__mutable_keys__ = frozenset(mutable_keys)
|
||||
tp_dict.__total__ = total
|
||||
tp_dict.__closed__ = closed
|
||||
tp_dict.__extra_items__ = extra_items
|
||||
return tp_dict
|
||||
|
||||
__call__ = dict # static method
|
||||
|
|
@ -3220,7 +3253,8 @@ def __subclasscheck__(cls, other):
|
|||
__instancecheck__ = __subclasscheck__
|
||||
|
||||
|
||||
def TypedDict(typename, fields, /, *, total=True):
|
||||
def TypedDict(typename, fields, /, *, total=True, closed=None,
|
||||
extra_items=NoExtraItems):
|
||||
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
|
||||
|
||||
TypedDict creates a dictionary type such that a type checker will expect all
|
||||
|
|
@ -3274,6 +3308,32 @@ class DatabaseUser(TypedDict):
|
|||
id: ReadOnly[int] # the "id" key must not be modified
|
||||
username: str # the "username" key can be changed
|
||||
|
||||
The closed argument controls whether the TypedDict allows additional
|
||||
non-required items during inheritance and assignability checks.
|
||||
If closed=True, the TypedDict does not allow additional items::
|
||||
|
||||
Point2D = TypedDict('Point2D', {'x': int, 'y': int}, closed=True)
|
||||
class Point3D(Point2D):
|
||||
z: int # Type checker error
|
||||
|
||||
Passing closed=False explicitly requests TypedDict's default open behavior.
|
||||
If closed is not provided, the behavior is inherited from the superclass.
|
||||
A type checker is only expected to support a literal False or True as the
|
||||
value of the closed argument.
|
||||
|
||||
The extra_items argument can instead be used to specify the assignable type
|
||||
of unknown non-required keys::
|
||||
|
||||
Point2D = TypedDict('Point2D', {'x': int, 'y': int}, extra_items=int)
|
||||
class Point3D(Point2D):
|
||||
z: int # OK
|
||||
label: str # Type checker error
|
||||
|
||||
The extra_items argument is also inherited through subclassing. It is unset
|
||||
by default, and it may not be used with the closed argument at the same
|
||||
time.
|
||||
|
||||
See PEP 728 for more information about closed and extra_items.
|
||||
"""
|
||||
ns = {'__annotations__': dict(fields)}
|
||||
module = _caller()
|
||||
|
|
@ -3281,7 +3341,8 @@ class DatabaseUser(TypedDict):
|
|||
# Setting correct module is necessary to make typed dict classes pickleable.
|
||||
ns['__module__'] = module
|
||||
|
||||
td = _TypedDictMeta(typename, (), ns, total=total)
|
||||
td = _TypedDictMeta(typename, (), ns, total=total, closed=closed,
|
||||
extra_items=extra_items)
|
||||
td.__orig_bases__ = (TypedDict,)
|
||||
return td
|
||||
|
||||
|
|
|
|||
|
|
@ -2682,6 +2682,7 @@ TESTSUBDIRS= idlelib/idle_test \
|
|||
test/test_multiprocessing_fork \
|
||||
test/test_multiprocessing_forkserver \
|
||||
test/test_multiprocessing_spawn \
|
||||
test/test_os \
|
||||
test/test_pathlib \
|
||||
test/test_pathlib/support \
|
||||
test/test_peg_generator \
|
||||
|
|
|
|||
|
|
@ -0,0 +1,2 @@
|
|||
Improve class creation times by up to 12% by pre-computing type slots
|
||||
just once. Patch by Sergey Miryanov.
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
:class:`typing.TypedDict` now supports the ``closed`` and ``extra_items``
|
||||
keyword arguments (as described in :pep:`728`) to control whether additional
|
||||
non-required keys are allowed and to specify their value type.
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
Executing ``quit`` command in :mod:`pdb` will raise :exc:`bdb.BdbQuit` when
|
||||
:mod:`pdb` is started from an asyncio console using :func:`breakpoint` or
|
||||
:func:`pdb.set_trace`.
|
||||
|
|
@ -0,0 +1 @@
|
|||
Add a Gecko format output to the tachyon profiler via ``--gecko``.
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
Fix :func:`os.getlogin` error handling: fix the error number. Patch by
|
||||
Victor Stinner.
|
||||
|
|
@ -391,13 +391,6 @@ BZ2Compressor_dealloc(PyObject *op)
|
|||
Py_DECREF(tp);
|
||||
}
|
||||
|
||||
static int
|
||||
BZ2Compressor_traverse(PyObject *self, visitproc visit, void *arg)
|
||||
{
|
||||
Py_VISIT(Py_TYPE(self));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static PyMethodDef BZ2Compressor_methods[] = {
|
||||
_BZ2_BZ2COMPRESSOR_COMPRESS_METHODDEF
|
||||
_BZ2_BZ2COMPRESSOR_FLUSH_METHODDEF
|
||||
|
|
@ -409,7 +402,6 @@ static PyType_Slot bz2_compressor_type_slots[] = {
|
|||
{Py_tp_methods, BZ2Compressor_methods},
|
||||
{Py_tp_new, _bz2_BZ2Compressor},
|
||||
{Py_tp_doc, (char *)_bz2_BZ2Compressor__doc__},
|
||||
{Py_tp_traverse, BZ2Compressor_traverse},
|
||||
{0, 0}
|
||||
};
|
||||
|
||||
|
|
@ -701,13 +693,6 @@ BZ2Decompressor_dealloc(PyObject *op)
|
|||
Py_DECREF(tp);
|
||||
}
|
||||
|
||||
static int
|
||||
BZ2Decompressor_traverse(PyObject *self, visitproc visit, void *arg)
|
||||
{
|
||||
Py_VISIT(Py_TYPE(self));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static PyMethodDef BZ2Decompressor_methods[] = {
|
||||
_BZ2_BZ2DECOMPRESSOR_DECOMPRESS_METHODDEF
|
||||
{NULL}
|
||||
|
|
@ -738,7 +723,6 @@ static PyType_Slot bz2_decompressor_type_slots[] = {
|
|||
{Py_tp_doc, (char *)_bz2_BZ2Decompressor__doc__},
|
||||
{Py_tp_members, BZ2Decompressor_members},
|
||||
{Py_tp_new, _bz2_BZ2Decompressor},
|
||||
{Py_tp_traverse, BZ2Decompressor_traverse},
|
||||
{0, 0}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -511,12 +511,12 @@ _waiting_release(_waiting_t *waiting, int received)
|
|||
assert(!waiting->received);
|
||||
|
||||
waiting->status = WAITING_RELEASING;
|
||||
PyThread_release_lock(waiting->mutex);
|
||||
if (waiting->received != received) {
|
||||
assert(received == 1);
|
||||
waiting->received = received;
|
||||
}
|
||||
waiting->status = WAITING_RELEASED;
|
||||
PyThread_release_lock(waiting->mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
|||
|
|
@ -882,13 +882,6 @@ static PyMethodDef Compressor_methods[] = {
|
|||
{NULL}
|
||||
};
|
||||
|
||||
static int
|
||||
Compressor_traverse(PyObject *self, visitproc visit, void *arg)
|
||||
{
|
||||
Py_VISIT(Py_TYPE(self));
|
||||
return 0;
|
||||
}
|
||||
|
||||
PyDoc_STRVAR(Compressor_doc,
|
||||
"LZMACompressor(format=FORMAT_XZ, check=-1, preset=None, filters=None)\n"
|
||||
"\n"
|
||||
|
|
@ -922,7 +915,6 @@ static PyType_Slot lzma_compressor_type_slots[] = {
|
|||
{Py_tp_methods, Compressor_methods},
|
||||
{Py_tp_new, Compressor_new},
|
||||
{Py_tp_doc, (char *)Compressor_doc},
|
||||
{Py_tp_traverse, Compressor_traverse},
|
||||
{0, 0}
|
||||
};
|
||||
|
||||
|
|
@ -1325,13 +1317,6 @@ Decompressor_dealloc(PyObject *op)
|
|||
Py_DECREF(tp);
|
||||
}
|
||||
|
||||
static int
|
||||
Decompressor_traverse(PyObject *self, visitproc visit, void *arg)
|
||||
{
|
||||
Py_VISIT(Py_TYPE(self));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static PyMethodDef Decompressor_methods[] = {
|
||||
_LZMA_LZMADECOMPRESSOR_DECOMPRESS_METHODDEF
|
||||
{NULL}
|
||||
|
|
@ -1366,7 +1351,6 @@ static PyType_Slot lzma_decompressor_type_slots[] = {
|
|||
{Py_tp_methods, Decompressor_methods},
|
||||
{Py_tp_new, _lzma_LZMADecompressor},
|
||||
{Py_tp_doc, (char *)_lzma_LZMADecompressor__doc__},
|
||||
{Py_tp_traverse, Decompressor_traverse},
|
||||
{Py_tp_members, Decompressor_members},
|
||||
{0, 0}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ class _sqlite3.Connection "pysqlite_Connection *" "clinic_state()->ConnectionTyp
|
|||
[clinic start generated code]*/
|
||||
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=67369db2faf80891]*/
|
||||
|
||||
static void _pysqlite_drop_unused_cursor_references(pysqlite_Connection* self);
|
||||
static int _pysqlite_drop_unused_cursor_references(pysqlite_Connection* self);
|
||||
static void free_callback_context(callback_context *ctx);
|
||||
static void set_callback_context(callback_context **ctx_pp,
|
||||
callback_context *ctx);
|
||||
|
|
@ -561,7 +561,10 @@ pysqlite_connection_cursor_impl(pysqlite_Connection *self, PyObject *factory)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
_pysqlite_drop_unused_cursor_references(self);
|
||||
if (_pysqlite_drop_unused_cursor_references(self) < 0) {
|
||||
Py_DECREF(cursor);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (cursor && self->row_factory != Py_None) {
|
||||
Py_INCREF(self->row_factory);
|
||||
|
|
@ -1060,32 +1063,36 @@ final_callback(sqlite3_context *context)
|
|||
PyGILState_Release(threadstate);
|
||||
}
|
||||
|
||||
static void _pysqlite_drop_unused_cursor_references(pysqlite_Connection* self)
|
||||
static int
|
||||
_pysqlite_drop_unused_cursor_references(pysqlite_Connection* self)
|
||||
{
|
||||
/* we only need to do this once in a while */
|
||||
if (self->created_cursors++ < 200) {
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
self->created_cursors = 0;
|
||||
|
||||
PyObject* new_list = PyList_New(0);
|
||||
if (!new_list) {
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (Py_ssize_t i = 0; i < PyList_Size(self->cursors); i++) {
|
||||
PyObject* weakref = PyList_GetItem(self->cursors, i);
|
||||
assert(PyList_CheckExact(self->cursors));
|
||||
Py_ssize_t imax = PyList_GET_SIZE(self->cursors);
|
||||
for (Py_ssize_t i = 0; i < imax; i++) {
|
||||
PyObject* weakref = PyList_GET_ITEM(self->cursors, i);
|
||||
if (_PyWeakref_IsDead(weakref)) {
|
||||
continue;
|
||||
}
|
||||
if (PyList_Append(new_list, weakref) != 0) {
|
||||
Py_DECREF(new_list);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
Py_SETREF(self->cursors, new_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate a UDF/callback context structure. In order to ensure that the state
|
||||
|
|
|
|||
|
|
@ -471,6 +471,9 @@ static int check_cursor(pysqlite_Cursor* cur)
|
|||
return 0;
|
||||
}
|
||||
|
||||
assert(cur->connection != NULL);
|
||||
assert(cur->connection->state != NULL);
|
||||
|
||||
if (cur->closed) {
|
||||
PyErr_SetString(cur->connection->state->ProgrammingError,
|
||||
"Cannot operate on a closed cursor.");
|
||||
|
|
@ -567,43 +570,40 @@ bind_param(pysqlite_state *state, pysqlite_Statement *self, int pos,
|
|||
switch (paramtype) {
|
||||
case TYPE_LONG: {
|
||||
sqlite_int64 value = _pysqlite_long_as_int64(parameter);
|
||||
if (value == -1 && PyErr_Occurred())
|
||||
rc = -1;
|
||||
else
|
||||
rc = sqlite3_bind_int64(self->st, pos, value);
|
||||
rc = (value == -1 && PyErr_Occurred())
|
||||
? SQLITE_ERROR
|
||||
: sqlite3_bind_int64(self->st, pos, value);
|
||||
break;
|
||||
}
|
||||
case TYPE_FLOAT: {
|
||||
double value = PyFloat_AsDouble(parameter);
|
||||
if (value == -1 && PyErr_Occurred()) {
|
||||
rc = -1;
|
||||
}
|
||||
else {
|
||||
rc = sqlite3_bind_double(self->st, pos, value);
|
||||
}
|
||||
rc = (value == -1 && PyErr_Occurred())
|
||||
? SQLITE_ERROR
|
||||
: sqlite3_bind_double(self->st, pos, value);
|
||||
break;
|
||||
}
|
||||
case TYPE_UNICODE:
|
||||
string = PyUnicode_AsUTF8AndSize(parameter, &buflen);
|
||||
if (string == NULL)
|
||||
return -1;
|
||||
if (string == NULL) {
|
||||
return SQLITE_ERROR;
|
||||
}
|
||||
if (buflen > INT_MAX) {
|
||||
PyErr_SetString(PyExc_OverflowError,
|
||||
"string longer than INT_MAX bytes");
|
||||
return -1;
|
||||
return SQLITE_ERROR;
|
||||
}
|
||||
rc = sqlite3_bind_text(self->st, pos, string, (int)buflen, SQLITE_TRANSIENT);
|
||||
break;
|
||||
case TYPE_BUFFER: {
|
||||
Py_buffer view;
|
||||
if (PyObject_GetBuffer(parameter, &view, PyBUF_SIMPLE) != 0) {
|
||||
return -1;
|
||||
return SQLITE_ERROR;
|
||||
}
|
||||
if (view.len > INT_MAX) {
|
||||
PyErr_SetString(PyExc_OverflowError,
|
||||
"BLOB longer than INT_MAX bytes");
|
||||
PyBuffer_Release(&view);
|
||||
return -1;
|
||||
return SQLITE_ERROR;
|
||||
}
|
||||
rc = sqlite3_bind_blob(self->st, pos, view.buf, (int)view.len, SQLITE_TRANSIENT);
|
||||
PyBuffer_Release(&view);
|
||||
|
|
@ -613,7 +613,7 @@ bind_param(pysqlite_state *state, pysqlite_Statement *self, int pos,
|
|||
PyErr_Format(state->ProgrammingError,
|
||||
"Error binding parameter %d: type '%s' is not supported",
|
||||
pos, Py_TYPE(parameter)->tp_name);
|
||||
rc = -1;
|
||||
rc = SQLITE_ERROR;
|
||||
}
|
||||
|
||||
final:
|
||||
|
|
@ -733,14 +733,17 @@ bind_parameters(pysqlite_state *state, pysqlite_Statement *self,
|
|||
}
|
||||
|
||||
binding_name++; /* skip first char (the colon) */
|
||||
PyObject *current_param;
|
||||
(void)PyMapping_GetOptionalItemString(parameters, binding_name, ¤t_param);
|
||||
if (!current_param) {
|
||||
if (!PyErr_Occurred() || PyErr_ExceptionMatches(PyExc_LookupError)) {
|
||||
PyObject *current_param = NULL;
|
||||
int found = PyMapping_GetOptionalItemString(parameters,
|
||||
binding_name,
|
||||
¤t_param);
|
||||
if (found == -1) {
|
||||
return;
|
||||
}
|
||||
else if (found == 0) {
|
||||
PyErr_Format(state->ProgrammingError,
|
||||
"You did not supply a value for binding "
|
||||
"parameter :%s.", binding_name);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5793,7 +5793,6 @@ memory_bio_dealloc(PyObject *op)
|
|||
{
|
||||
PySSLMemoryBIO *self = PySSLMemoryBIO_CAST(op);
|
||||
PyTypeObject *tp = Py_TYPE(self);
|
||||
PyObject_GC_UnTrack(self);
|
||||
(void)BIO_free(self->bio);
|
||||
tp->tp_free(self);
|
||||
Py_DECREF(tp);
|
||||
|
|
@ -5957,15 +5956,13 @@ static PyType_Slot PySSLMemoryBIO_slots[] = {
|
|||
{Py_tp_getset, memory_bio_getsetlist},
|
||||
{Py_tp_new, _ssl_MemoryBIO},
|
||||
{Py_tp_dealloc, memory_bio_dealloc},
|
||||
{Py_tp_traverse, _PyObject_VisitType},
|
||||
{0, 0},
|
||||
};
|
||||
|
||||
static PyType_Spec PySSLMemoryBIO_spec = {
|
||||
.name = "_ssl.MemoryBIO",
|
||||
.basicsize = sizeof(PySSLMemoryBIO),
|
||||
.flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_IMMUTABLETYPE |
|
||||
Py_TPFLAGS_HAVE_GC),
|
||||
.flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_IMMUTABLETYPE),
|
||||
.slots = PySSLMemoryBIO_slots,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -756,7 +756,7 @@ _hmac_new_impl(PyObject *module, PyObject *keyobj, PyObject *msgobj,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
HMACObject *self = PyObject_GC_New(HMACObject, state->hmac_type);
|
||||
HMACObject *self = PyObject_New(HMACObject, state->hmac_type);
|
||||
if (self == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -791,7 +791,6 @@ _hmac_new_impl(PyObject *module, PyObject *keyobj, PyObject *msgobj,
|
|||
#endif
|
||||
}
|
||||
assert(rc == 0);
|
||||
PyObject_GC_Track(self);
|
||||
return (PyObject *)self;
|
||||
|
||||
error_on_key:
|
||||
|
|
@ -852,7 +851,7 @@ _hmac_HMAC_copy_impl(HMACObject *self, PyTypeObject *cls)
|
|||
/*[clinic end generated code: output=a955bfa55b65b215 input=17b2c0ad0b147e36]*/
|
||||
{
|
||||
hmacmodule_state *state = get_hmacmodule_state_by_cls(cls);
|
||||
HMACObject *copy = PyObject_GC_New(HMACObject, state->hmac_type);
|
||||
HMACObject *copy = PyObject_New(HMACObject, state->hmac_type);
|
||||
if (copy == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -870,7 +869,6 @@ _hmac_HMAC_copy_impl(HMACObject *self, PyTypeObject *cls)
|
|||
}
|
||||
|
||||
HASHLIB_INIT_MUTEX(copy);
|
||||
PyObject_GC_Track(copy);
|
||||
return (PyObject *)copy;
|
||||
}
|
||||
|
||||
|
|
@ -1026,7 +1024,6 @@ static void
|
|||
HMACObject_dealloc(PyObject *op)
|
||||
{
|
||||
PyTypeObject *type = Py_TYPE(op);
|
||||
PyObject_GC_UnTrack(op);
|
||||
(void)HMACObject_clear(op);
|
||||
type->tp_free(op);
|
||||
Py_DECREF(type);
|
||||
|
|
@ -1051,9 +1048,7 @@ static PyType_Slot HMACObject_Type_slots[] = {
|
|||
{Py_tp_repr, HMACObject_repr},
|
||||
{Py_tp_methods, HMACObject_methods},
|
||||
{Py_tp_getset, HMACObject_getsets},
|
||||
{Py_tp_clear, HMACObject_clear},
|
||||
{Py_tp_dealloc, HMACObject_dealloc},
|
||||
{Py_tp_traverse, _PyObject_VisitType},
|
||||
{0, NULL} /* sentinel */
|
||||
};
|
||||
|
||||
|
|
@ -1063,8 +1058,7 @@ static PyType_Spec HMAC_Type_spec = {
|
|||
.flags = Py_TPFLAGS_DEFAULT
|
||||
| Py_TPFLAGS_DISALLOW_INSTANTIATION
|
||||
| Py_TPFLAGS_HEAPTYPE
|
||||
| Py_TPFLAGS_IMMUTABLETYPE
|
||||
| Py_TPFLAGS_HAVE_GC,
|
||||
| Py_TPFLAGS_IMMUTABLETYPE,
|
||||
.slots = HMACObject_Type_slots,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -9605,7 +9605,7 @@ os_getlogin_impl(PyObject *module)
|
|||
int err = getlogin_r(name, sizeof(name));
|
||||
if (err) {
|
||||
int old_errno = errno;
|
||||
errno = -err;
|
||||
errno = err;
|
||||
posix_error();
|
||||
errno = old_errno;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11422,6 +11422,11 @@ static pytype_slotdef slotdefs[] = {
|
|||
{NULL}
|
||||
};
|
||||
|
||||
/* Stores the number of times where slotdefs has elements with same name.
|
||||
This counter precalculated by _PyType_InitSlotDefs() when the main
|
||||
interpreter starts. */
|
||||
static uint8_t slotdefs_name_counts[Py_ARRAY_LENGTH(slotdefs)];
|
||||
|
||||
/* Given a type pointer and an offset gotten from a slotdef entry, return a
|
||||
pointer to the actual slot. This is not quite the same as simply adding
|
||||
the offset to the type pointer, since it takes care to indirect through the
|
||||
|
|
@ -11464,61 +11469,6 @@ slotptr(PyTypeObject *type, int ioffset)
|
|||
return (void **)ptr;
|
||||
}
|
||||
|
||||
/* Return a slot pointer for a given name, but ONLY if the attribute has
|
||||
exactly one slot function. The name must be an interned string. */
|
||||
static void **
|
||||
resolve_slotdups(PyTypeObject *type, PyObject *name)
|
||||
{
|
||||
/* XXX Maybe this could be optimized more -- but is it worth it? */
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
pytype_slotdef *ptrs[MAX_EQUIV];
|
||||
pytype_slotdef **pp = ptrs;
|
||||
/* Collect all slotdefs that match name into ptrs. */
|
||||
for (pytype_slotdef *p = slotdefs; p->name_strobj; p++) {
|
||||
if (p->name_strobj == name)
|
||||
*pp++ = p;
|
||||
}
|
||||
*pp = NULL;
|
||||
#else
|
||||
/* pname and ptrs act as a little cache */
|
||||
PyInterpreterState *interp = _PyInterpreterState_GET();
|
||||
#define pname _Py_INTERP_CACHED_OBJECT(interp, type_slots_pname)
|
||||
#define ptrs _Py_INTERP_CACHED_OBJECT(interp, type_slots_ptrs)
|
||||
pytype_slotdef *p, **pp;
|
||||
|
||||
if (pname != name) {
|
||||
/* Collect all slotdefs that match name into ptrs. */
|
||||
pname = name;
|
||||
pp = ptrs;
|
||||
for (p = slotdefs; p->name_strobj; p++) {
|
||||
if (p->name_strobj == name)
|
||||
*pp++ = p;
|
||||
}
|
||||
*pp = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Look in all slots of the type matching the name. If exactly one of these
|
||||
has a filled-in slot, return a pointer to that slot.
|
||||
Otherwise, return NULL. */
|
||||
void **res, **ptr;
|
||||
res = NULL;
|
||||
for (pp = ptrs; *pp; pp++) {
|
||||
ptr = slotptr(type, (*pp)->offset);
|
||||
if (ptr == NULL || *ptr == NULL)
|
||||
continue;
|
||||
if (res != NULL)
|
||||
return NULL;
|
||||
res = ptr;
|
||||
}
|
||||
#ifndef Py_GIL_DISABLED
|
||||
#undef pname
|
||||
#undef ptrs
|
||||
#endif
|
||||
return res;
|
||||
}
|
||||
|
||||
// Return true if "name" corresponds to at least one slot definition. This is
|
||||
// a more accurate but more expensive test compared to is_dunder_name().
|
||||
static bool
|
||||
|
|
@ -11645,7 +11595,15 @@ update_one_slot(PyTypeObject *type, pytype_slotdef *p, pytype_slotdef **next_p,
|
|||
}
|
||||
if (Py_IS_TYPE(descr, &PyWrapperDescr_Type) &&
|
||||
((PyWrapperDescrObject *)descr)->d_base->name_strobj == p->name_strobj) {
|
||||
void **tptr = resolve_slotdups(type, p->name_strobj);
|
||||
void **tptr;
|
||||
size_t index = (p - slotdefs) / sizeof(slotdefs[0]);
|
||||
if (slotdefs_name_counts[index] == 1) {
|
||||
tptr = slotptr(type, p->offset);
|
||||
}
|
||||
else {
|
||||
tptr = NULL;
|
||||
}
|
||||
|
||||
if (tptr == NULL || tptr == ptr)
|
||||
generic = p->function;
|
||||
d = (PyWrapperDescrObject *)descr;
|
||||
|
|
@ -11858,6 +11816,76 @@ update_all_slots(PyTypeObject* type)
|
|||
|
||||
#endif
|
||||
|
||||
int
|
||||
_PyType_InitSlotDefs(PyInterpreterState *interp)
|
||||
{
|
||||
if (!_Py_IsMainInterpreter(interp)) {
|
||||
return 0;
|
||||
}
|
||||
PyObject *bytearray = NULL;
|
||||
PyObject *cache = PyDict_New();
|
||||
if (!cache) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pytype_slotdef *p;
|
||||
Py_ssize_t idx = 0;
|
||||
for (p = slotdefs; p->name_strobj; p++, idx++) {
|
||||
assert(idx < 255);
|
||||
|
||||
if (PyDict_GetItemRef(cache, p->name_strobj, &bytearray) < 0) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!bytearray) {
|
||||
Py_ssize_t size = sizeof(uint8_t) * (1 + MAX_EQUIV);
|
||||
bytearray = PyByteArray_FromStringAndSize(NULL, size);
|
||||
if (!bytearray) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
uint8_t *data = (uint8_t *)PyByteArray_AS_STRING(bytearray);
|
||||
data[0] = 0;
|
||||
|
||||
if (PyDict_SetItem(cache, p->name_strobj, bytearray) < 0) {
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
assert(PyByteArray_CheckExact(bytearray));
|
||||
uint8_t *data = (uint8_t *)PyByteArray_AS_STRING(bytearray);
|
||||
|
||||
data[0] += 1;
|
||||
assert(data[0] < MAX_EQUIV);
|
||||
|
||||
data[data[0]] = (uint8_t)idx;
|
||||
|
||||
Py_CLEAR(bytearray);
|
||||
}
|
||||
|
||||
memset(slotdefs_name_counts, 0, sizeof(slotdefs_name_counts));
|
||||
|
||||
Py_ssize_t pos = 0;
|
||||
PyObject *key = NULL;
|
||||
PyObject *value = NULL;
|
||||
while (PyDict_Next(cache, &pos, &key, &value)) {
|
||||
uint8_t *data = (uint8_t *)PyByteArray_AS_STRING(value);
|
||||
uint8_t n = data[0];
|
||||
for (uint8_t i = 0; i < n; i++) {
|
||||
uint8_t idx = data[i + 1];
|
||||
slotdefs_name_counts[idx] = n;
|
||||
}
|
||||
}
|
||||
|
||||
Py_DECREF(cache);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
Py_XDECREF(bytearray);
|
||||
Py_DECREF(cache);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
PyObject *
|
||||
_PyType_GetSlotWrapperNames(void)
|
||||
|
|
|
|||
|
|
@ -1506,44 +1506,6 @@ static int test_audit_run_stdin(void)
|
|||
return run_audit_run_test(Py_ARRAY_LENGTH(argv), argv, &test);
|
||||
}
|
||||
|
||||
static int test_init_read_set(void)
|
||||
{
|
||||
PyStatus status;
|
||||
PyConfig config;
|
||||
PyConfig_InitPythonConfig(&config);
|
||||
|
||||
config_set_string(&config, &config.program_name, L"./init_read_set");
|
||||
|
||||
status = PyConfig_Read(&config);
|
||||
if (PyStatus_Exception(status)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
status = PyWideStringList_Insert(&config.module_search_paths,
|
||||
1, L"test_path_insert1");
|
||||
if (PyStatus_Exception(status)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
status = PyWideStringList_Append(&config.module_search_paths,
|
||||
L"test_path_append");
|
||||
if (PyStatus_Exception(status)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* override executable computed by PyConfig_Read() */
|
||||
config_set_string(&config, &config.executable, L"my_executable");
|
||||
init_from_config_clear(&config);
|
||||
|
||||
dump_config();
|
||||
Py_Finalize();
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
PyConfig_Clear(&config);
|
||||
Py_ExitStatusException(status);
|
||||
}
|
||||
|
||||
|
||||
static int test_init_sys_add(void)
|
||||
{
|
||||
|
|
@ -2398,7 +2360,6 @@ static struct TestCase TestCases[] = {
|
|||
{"test_preinit_isolated2", test_preinit_isolated2},
|
||||
{"test_preinit_parse_argv", test_preinit_parse_argv},
|
||||
{"test_preinit_dont_parse_argv", test_preinit_dont_parse_argv},
|
||||
{"test_init_read_set", test_init_read_set},
|
||||
{"test_init_run_main", test_init_run_main},
|
||||
{"test_init_sys_add", test_init_sys_add},
|
||||
{"test_init_setpath", test_init_setpath},
|
||||
|
|
|
|||
|
|
@ -83,8 +83,6 @@ PyCodec_Unregister(PyObject *search_function)
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern int _Py_normalize_encoding(const char *, char *, size_t);
|
||||
|
||||
/* Convert a string to a normalized Python string: all ASCII letters are
|
||||
converted to lower case, spaces are replaced with hyphens. */
|
||||
|
||||
|
|
|
|||
|
|
@ -836,6 +836,10 @@ pycore_init_builtins(PyThreadState *tstate)
|
|||
}
|
||||
interp->callable_cache.object__getattribute__ = object__getattribute__;
|
||||
|
||||
if (_PyType_InitSlotDefs(interp) < 0) {
|
||||
return _PyStatus_ERR("failed to init slotdefs");
|
||||
}
|
||||
|
||||
if (_PyBuiltins_AddExceptions(bimod) < 0) {
|
||||
return _PyStatus_ERR("failed to add exceptions to builtins");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ tracemalloc_get_frame(_PyInterpreterFrame *pyframe, frame_t *frame)
|
|||
}
|
||||
frame->lineno = (unsigned int)lineno;
|
||||
|
||||
PyObject *filename = filename = _PyFrame_GetCode(pyframe)->co_filename;
|
||||
PyObject *filename = _PyFrame_GetCode(pyframe)->co_filename;
|
||||
if (filename == NULL) {
|
||||
#ifdef TRACE_DEBUG
|
||||
tracemalloc_error("failed to get the filename of the code object");
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ ignore = [
|
|||
"F541", # f-string without any placeholders
|
||||
"PYI024", # Use `typing.NamedTuple` instead of `collections.namedtuple`
|
||||
"PYI025", # Use `from collections.abc import Set as AbstractSet`
|
||||
"UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)`
|
||||
]
|
||||
|
||||
[lint.per-file-ignores]
|
||||
|
|
|
|||
|
|
@ -344,6 +344,8 @@ Objects/obmalloc.c - obmalloc_state_main -
|
|||
Objects/obmalloc.c - obmalloc_state_initialized -
|
||||
Objects/typeobject.c - name_op -
|
||||
Objects/typeobject.c - slotdefs -
|
||||
# It initialized only once when main interpeter starts
|
||||
Objects/typeobject.c - slotdefs_name_counts -
|
||||
Objects/unicodeobject.c - stripfuncnames -
|
||||
Objects/unicodeobject.c - utf7_category -
|
||||
Objects/unicodeobject.c unicode_decode_call_errorhandler_wchar argparse -
|
||||
|
|
|
|||
|
Can't render this file because it has a wrong number of fields in line 4.
|
|
|
@ -17,9 +17,6 @@ ignore = [
|
|||
# Use f-strings instead of format specifiers.
|
||||
# Doesn't always make code more readable.
|
||||
"UP032",
|
||||
# Use PEP-604 unions rather than tuples for isinstance() checks.
|
||||
# Makes code slower and more verbose. https://github.com/astral-sh/ruff/issues/7871.
|
||||
"UP038",
|
||||
]
|
||||
unfixable = [
|
||||
# The autofixes sometimes do the wrong things for these;
|
||||
|
|
|
|||
|
|
@ -13,11 +13,6 @@ select = [
|
|||
"RUF100", # Ban unused `# noqa` comments
|
||||
"PGH004", # Ban blanket `# noqa` comments (only ignore specific error codes)
|
||||
]
|
||||
ignore = [
|
||||
# Use PEP-604 unions rather than tuples for isinstance() checks.
|
||||
# Makes code slower and more verbose. https://github.com/astral-sh/ruff/issues/7871.
|
||||
"UP038",
|
||||
]
|
||||
unfixable = [
|
||||
# The autofixes sometimes do the wrong things for these;
|
||||
# it's better to have to manually look at the code and see how it needs fixing
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue