diff --git a/Doc/c-api/init.rst b/Doc/c-api/init.rst index 60f5c81cff572c..d164d1a752e295 100644 --- a/Doc/c-api/init.rst +++ b/Doc/c-api/init.rst @@ -870,6 +870,19 @@ code, or when embedding the Python interpreter: When the current thread state is ``NULL``, this issues a fatal error (so that the caller needn't check for ``NULL``). + See also :c:func:`PyThreadState_GetUnchecked`. + + +.. c:function:: PyThreadState* PyThreadState_GetUnchecked() + + Similar to :c:func:`PyThreadState_Get`, but don't kill the process with a + fatal error if it is NULL. The caller is responsible to check if the result + is NULL. + + .. versionadded:: 3.13 + In Python 3.5 to 3.12, the function was private and known as + ``_PyThreadState_UncheckedGet()``. + .. c:function:: PyThreadState* PyThreadState_Swap(PyThreadState *tstate) diff --git a/Doc/library/ast.rst b/Doc/library/ast.rst index 10b3e2dae472e1..4ebbe0e5471c88 100644 --- a/Doc/library/ast.rst +++ b/Doc/library/ast.rst @@ -2483,26 +2483,26 @@ The following options are accepted: .. program:: ast -.. cmdoption:: -h, --help +.. option:: -h, --help Show the help message and exit. -.. cmdoption:: -m - --mode +.. option:: -m + --mode Specify what kind of code must be compiled, like the *mode* argument in :func:`parse`. -.. cmdoption:: --no-type-comments +.. option:: --no-type-comments Don't parse type comments. -.. cmdoption:: -a, --include-attributes +.. option:: -a, --include-attributes Include attributes such as line numbers and column offsets. -.. cmdoption:: -i - --indent +.. option:: -i + --indent Indentation of nodes in AST (number of spaces). diff --git a/Doc/library/compileall.rst b/Doc/library/compileall.rst index b4723b98f67bb2..df1eefab839cc1 100644 --- a/Doc/library/compileall.rst +++ b/Doc/library/compileall.rst @@ -26,28 +26,28 @@ compile Python sources. .. program:: compileall -.. cmdoption:: directory ... - file ... +.. option:: directory ... + file ... Positional arguments are files to compile or directories that contain source files, traversed recursively. If no argument is given, behave as if the command line was :samp:`-l {}`. -.. cmdoption:: -l +.. option:: -l Do not recurse into subdirectories, only compile source code files directly contained in the named or implied directories. -.. cmdoption:: -f +.. option:: -f Force rebuild even if timestamps are up-to-date. -.. cmdoption:: -q +.. option:: -q Do not print the list of files compiled. If passed once, error messages will still be printed. If passed twice (``-qq``), all output is suppressed. -.. cmdoption:: -d destdir +.. option:: -d destdir Directory prepended to the path to each file being compiled. This will appear in compilation time tracebacks, and is also compiled in to the @@ -55,45 +55,45 @@ compile Python sources. cases where the source file does not exist at the time the byte-code file is executed. -.. cmdoption:: -s strip_prefix -.. cmdoption:: -p prepend_prefix +.. option:: -s strip_prefix +.. option:: -p prepend_prefix Remove (``-s``) or append (``-p``) the given prefix of paths recorded in the ``.pyc`` files. Cannot be combined with ``-d``. -.. cmdoption:: -x regex +.. option:: -x regex regex is used to search the full path to each file considered for compilation, and if the regex produces a match, the file is skipped. -.. cmdoption:: -i list +.. option:: -i list Read the file ``list`` and add each line that it contains to the list of files and directories to compile. If ``list`` is ``-``, read lines from ``stdin``. -.. cmdoption:: -b +.. option:: -b Write the byte-code files to their legacy locations and names, which may overwrite byte-code files created by another version of Python. The default is to write files to their :pep:`3147` locations and names, which allows byte-code files from multiple versions of Python to coexist. -.. cmdoption:: -r +.. option:: -r Control the maximum recursion level for subdirectories. If this is given, then ``-l`` option will not be taken into account. :program:`python -m compileall -r 0` is equivalent to :program:`python -m compileall -l`. -.. cmdoption:: -j N +.. option:: -j N Use *N* workers to compile the files within the given directory. If ``0`` is used, then the result of :func:`os.process_cpu_count()` will be used. -.. cmdoption:: --invalidation-mode [timestamp|checked-hash|unchecked-hash] +.. option:: --invalidation-mode [timestamp|checked-hash|unchecked-hash] Control how the generated byte-code files are invalidated at runtime. The ``timestamp`` value, means that ``.pyc`` files with the source timestamp @@ -106,17 +106,17 @@ compile Python sources. variable is not set, and ``checked-hash`` if the ``SOURCE_DATE_EPOCH`` environment variable is set. -.. cmdoption:: -o level +.. option:: -o level Compile with the given optimization level. May be used multiple times to compile for multiple levels at a time (for example, ``compileall -o 1 -o 2``). -.. cmdoption:: -e dir +.. option:: -e dir Ignore symlinks pointing outside the given directory. -.. cmdoption:: --hardlink-dupes +.. option:: --hardlink-dupes If two ``.pyc`` files with different optimization level have the same content, use hard links to consolidate duplicate files. diff --git a/Doc/library/gzip.rst b/Doc/library/gzip.rst index 6a4f2c76ae4e10..f931d0e399c9f2 100644 --- a/Doc/library/gzip.rst +++ b/Doc/library/gzip.rst @@ -268,23 +268,23 @@ Once executed the :mod:`gzip` module keeps the input file(s). Command line options ^^^^^^^^^^^^^^^^^^^^ -.. cmdoption:: file +.. option:: file If *file* is not specified, read from :data:`sys.stdin`. -.. cmdoption:: --fast +.. option:: --fast Indicates the fastest compression method (less compression). -.. cmdoption:: --best +.. option:: --best Indicates the slowest compression method (best compression). -.. cmdoption:: -d, --decompress +.. option:: -d, --decompress Decompress the given file. -.. cmdoption:: -h, --help +.. option:: -h, --help Show the help message. diff --git a/Doc/library/inspect.rst b/Doc/library/inspect.rst index fe0ed135029f0f..d0c3dd761e4d56 100644 --- a/Doc/library/inspect.rst +++ b/Doc/library/inspect.rst @@ -1655,6 +1655,6 @@ By default, accepts the name of a module and prints the source of that module. A class or function within the module can be printed instead by appended a colon and the qualified name of the target object. -.. cmdoption:: --details +.. option:: --details Print information about the specified object rather than the source code diff --git a/Doc/library/json.rst b/Doc/library/json.rst index b337b5f9960e8e..0ce4b697145cb3 100644 --- a/Doc/library/json.rst +++ b/Doc/library/json.rst @@ -714,7 +714,7 @@ specified, :data:`sys.stdin` and :data:`sys.stdout` will be used respectively: Command line options ^^^^^^^^^^^^^^^^^^^^ -.. cmdoption:: infile +.. option:: infile The JSON file to be validated or pretty-printed: @@ -734,36 +734,36 @@ Command line options If *infile* is not specified, read from :data:`sys.stdin`. -.. cmdoption:: outfile +.. option:: outfile Write the output of the *infile* to the given *outfile*. Otherwise, write it to :data:`sys.stdout`. -.. cmdoption:: --sort-keys +.. option:: --sort-keys Sort the output of dictionaries alphabetically by key. .. versionadded:: 3.5 -.. cmdoption:: --no-ensure-ascii +.. option:: --no-ensure-ascii Disable escaping of non-ascii characters, see :func:`json.dumps` for more information. .. versionadded:: 3.9 -.. cmdoption:: --json-lines +.. option:: --json-lines Parse every input line as separate JSON object. .. versionadded:: 3.8 -.. cmdoption:: --indent, --tab, --no-indent, --compact +.. option:: --indent, --tab, --no-indent, --compact Mutually exclusive options for whitespace control. .. versionadded:: 3.9 -.. cmdoption:: -h, --help +.. option:: -h, --help Show the help message. diff --git a/Doc/library/pickletools.rst b/Doc/library/pickletools.rst index 76f5b0cadf975a..41930f8cbe8412 100644 --- a/Doc/library/pickletools.rst +++ b/Doc/library/pickletools.rst @@ -53,24 +53,24 @@ Command line options .. program:: pickletools -.. cmdoption:: -a, --annotate +.. option:: -a, --annotate Annotate each line with a short opcode description. -.. cmdoption:: -o, --output= +.. option:: -o, --output= Name of a file where the output should be written. -.. cmdoption:: -l, --indentlevel= +.. option:: -l, --indentlevel= The number of blanks by which to indent a new MARK level. -.. cmdoption:: -m, --memo +.. option:: -m, --memo When multiple objects are disassembled, preserve memo between disassemblies. -.. cmdoption:: -p, --preamble= +.. option:: -p, --preamble= When more than one pickle file are specified, print given preamble before each disassembly. diff --git a/Doc/library/py_compile.rst b/Doc/library/py_compile.rst index 5501db8f87de81..38c416f9ad0305 100644 --- a/Doc/library/py_compile.rst +++ b/Doc/library/py_compile.rst @@ -139,13 +139,13 @@ not be compiled. .. program:: python -m py_compile -.. cmdoption:: ... - - +.. option:: ... + - Positional arguments are files to compile. If ``-`` is the only parameter, the list of files is taken from standard input. -.. cmdoption:: -q, --quiet +.. option:: -q, --quiet Suppress errors output. diff --git a/Doc/library/site.rst b/Doc/library/site.rst index 02880c56416615..2dc9fb09d727e2 100644 --- a/Doc/library/site.rst +++ b/Doc/library/site.rst @@ -266,11 +266,11 @@ If it is called without arguments, it will print the contents of :data:`USER_BASE` and whether the directory exists, then the same thing for :data:`USER_SITE`, and finally the value of :data:`ENABLE_USER_SITE`. -.. cmdoption:: --user-base +.. option:: --user-base Print the path to the user base directory. -.. cmdoption:: --user-site +.. option:: --user-site Print the path to the user site-packages directory. diff --git a/Doc/library/sqlite3.rst b/Doc/library/sqlite3.rst index 7b8c7810165164..aa34bcc9388e1c 100644 --- a/Doc/library/sqlite3.rst +++ b/Doc/library/sqlite3.rst @@ -2437,9 +2437,9 @@ or if :attr:`~Connection.autocommit` is ``True``, the context manager does nothing. .. note:: - The context manager neither implicitly opens a new transaction - nor closes the connection. + nor closes the connection. If you need a closing context manager, consider + using :meth:`contextlib.closing`. .. testcode:: diff --git a/Doc/library/tarfile.rst b/Doc/library/tarfile.rst index 62d67bc577c7d0..3e5723a66780ca 100644 --- a/Doc/library/tarfile.rst +++ b/Doc/library/tarfile.rst @@ -1156,31 +1156,31 @@ For a list of the files in a tar archive, use the :option:`-l` option: Command-line options ~~~~~~~~~~~~~~~~~~~~ -.. cmdoption:: -l - --list +.. option:: -l + --list List files in a tarfile. -.. cmdoption:: -c ... - --create ... +.. option:: -c ... + --create ... Create tarfile from source files. -.. cmdoption:: -e [] - --extract [] +.. option:: -e [] + --extract [] Extract tarfile into the current directory if *output_dir* is not specified. -.. cmdoption:: -t - --test +.. option:: -t + --test Test whether the tarfile is valid or not. -.. cmdoption:: -v, --verbose +.. option:: -v, --verbose Verbose output. -.. cmdoption:: --filter +.. option:: --filter Specifies the *filter* for ``--extract``. See :ref:`tarfile-extraction-filter` for details. diff --git a/Doc/library/timeit.rst b/Doc/library/timeit.rst index b3d2a1b9e0600f..60ec135873c414 100644 --- a/Doc/library/timeit.rst +++ b/Doc/library/timeit.rst @@ -214,36 +214,36 @@ Where the following options are understood: .. program:: timeit -.. cmdoption:: -n N, --number=N +.. option:: -n N, --number=N how many times to execute 'statement' -.. cmdoption:: -r N, --repeat=N +.. option:: -r N, --repeat=N how many times to repeat the timer (default 5) -.. cmdoption:: -s S, --setup=S +.. option:: -s S, --setup=S statement to be executed once initially (default ``pass``) -.. cmdoption:: -p, --process +.. option:: -p, --process measure process time, not wallclock time, using :func:`time.process_time` instead of :func:`time.perf_counter`, which is the default .. versionadded:: 3.3 -.. cmdoption:: -u, --unit=U +.. option:: -u, --unit=U specify a time unit for timer output; can select ``nsec``, ``usec``, ``msec``, or ``sec`` .. versionadded:: 3.5 -.. cmdoption:: -v, --verbose +.. option:: -v, --verbose print raw timing results; repeat for more digits precision -.. cmdoption:: -h, --help +.. option:: -h, --help print a short usage message and exit diff --git a/Doc/library/tokenize.rst b/Doc/library/tokenize.rst index bffe93006edc7b..92bdb052267a68 100644 --- a/Doc/library/tokenize.rst +++ b/Doc/library/tokenize.rst @@ -166,11 +166,11 @@ The following options are accepted: .. program:: tokenize -.. cmdoption:: -h, --help +.. option:: -h, --help show this help message and exit -.. cmdoption:: -e, --exact +.. option:: -e, --exact display token names using the exact type diff --git a/Doc/library/trace.rst b/Doc/library/trace.rst index 40cf198f1287d7..e9b59a6d186ba2 100644 --- a/Doc/library/trace.rst +++ b/Doc/library/trace.rst @@ -34,11 +34,11 @@ all Python modules imported during the execution into the current directory. .. program:: trace -.. cmdoption:: --help +.. option:: --help Display usage and exit. -.. cmdoption:: --version +.. option:: --version Display the version of the module and exit. @@ -56,28 +56,28 @@ the :option:`--trace <-t>` and :option:`--count <-c>` options. When .. program:: trace -.. cmdoption:: -c, --count +.. option:: -c, --count Produce a set of annotated listing files upon program completion that shows how many times each statement was executed. See also :option:`--coverdir <-C>`, :option:`--file <-f>` and :option:`--no-report <-R>` below. -.. cmdoption:: -t, --trace +.. option:: -t, --trace Display lines as they are executed. -.. cmdoption:: -l, --listfuncs +.. option:: -l, --listfuncs Display the functions executed by running the program. -.. cmdoption:: -r, --report +.. option:: -r, --report Produce an annotated list from an earlier program run that used the :option:`--count <-c>` and :option:`--file <-f>` option. This does not execute any code. -.. cmdoption:: -T, --trackcalls +.. option:: -T, --trackcalls Display the calling relationships exposed by running the program. @@ -86,33 +86,33 @@ Modifiers .. program:: trace -.. cmdoption:: -f, --file= +.. option:: -f, --file= Name of a file to accumulate counts over several tracing runs. Should be used with the :option:`--count <-c>` option. -.. cmdoption:: -C, --coverdir= +.. option:: -C, --coverdir= Directory where the report files go. The coverage report for ``package.module`` is written to file :file:`{dir}/{package}/{module}.cover`. -.. cmdoption:: -m, --missing +.. option:: -m, --missing When generating annotated listings, mark lines which were not executed with ``>>>>>>``. -.. cmdoption:: -s, --summary +.. option:: -s, --summary When using :option:`--count <-c>` or :option:`--report <-r>`, write a brief summary to stdout for each file processed. -.. cmdoption:: -R, --no-report +.. option:: -R, --no-report Do not generate annotated listings. This is useful if you intend to make several runs with :option:`--count <-c>`, and then produce a single set of annotated listings at the end. -.. cmdoption:: -g, --timing +.. option:: -g, --timing Prefix each line with the time since the program started. Only used while tracing. @@ -124,12 +124,12 @@ These options may be repeated multiple times. .. program:: trace -.. cmdoption:: --ignore-module= +.. option:: --ignore-module= Ignore each of the given module names and its submodules (if it is a package). The argument can be a list of names separated by a comma. -.. cmdoption:: --ignore-dir= +.. option:: --ignore-dir= Ignore all modules and packages in the named directory and subdirectories. The argument can be a list of directories separated by :data:`os.pathsep`. diff --git a/Doc/library/tty.rst b/Doc/library/tty.rst index fc7f98c7931fa5..a4777772e1fc6c 100644 --- a/Doc/library/tty.rst +++ b/Doc/library/tty.rst @@ -43,6 +43,9 @@ The :mod:`tty` module defines the following functions: :func:`termios.tcsetattr`. The return value of :func:`termios.tcgetattr` is saved before setting *fd* to raw mode; this value is returned. + .. versionchanged:: 3.12 + The return value is now the original tty attributes, instead of None. + .. function:: setcbreak(fd, when=termios.TCSAFLUSH) @@ -51,6 +54,9 @@ The :mod:`tty` module defines the following functions: :func:`termios.tcsetattr`. The return value of :func:`termios.tcgetattr` is saved before setting *fd* to cbreak mode; this value is returned. + .. versionchanged:: 3.12 + The return value is now the original tty attributes, instead of None. + .. seealso:: diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst index 4c28e8fae8b088..21abc583f853a7 100644 --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -206,13 +206,13 @@ Command-line options .. program:: unittest -.. cmdoption:: -b, --buffer +.. option:: -b, --buffer The standard output and standard error streams are buffered during the test run. Output during a passing test is discarded. Output is echoed normally on test fail or error and is added to the failure messages. -.. cmdoption:: -c, --catch +.. option:: -c, --catch :kbd:`Control-C` during the test run waits for the current test to end and then reports all the results so far. A second :kbd:`Control-C` raises the normal @@ -220,11 +220,11 @@ Command-line options See `Signal Handling`_ for the functions that provide this functionality. -.. cmdoption:: -f, --failfast +.. option:: -f, --failfast Stop the test run on the first error or failure. -.. cmdoption:: -k +.. option:: -k Only run test methods and classes that match the pattern or substring. This option may be used multiple times, in which case all test cases that @@ -240,11 +240,11 @@ Command-line options For example, ``-k foo`` matches ``foo_tests.SomeTest.test_something``, ``bar_tests.SomeTest.test_foo``, but not ``bar_tests.FooTest.test_something``. -.. cmdoption:: --locals +.. option:: --locals Show local variables in tracebacks. -.. cmdoption:: --durations N +.. option:: --durations N Show the N slowest test cases (N=0 for all). @@ -292,19 +292,19 @@ The ``discover`` sub-command has the following options: .. program:: unittest discover -.. cmdoption:: -v, --verbose +.. option:: -v, --verbose Verbose output -.. cmdoption:: -s, --start-directory directory +.. option:: -s, --start-directory directory Directory to start discovery (``.`` default) -.. cmdoption:: -p, --pattern pattern +.. option:: -p, --pattern pattern Pattern to match test files (``test*.py`` default) -.. cmdoption:: -t, --top-level-directory directory +.. option:: -t, --top-level-directory directory Top level directory of project (defaults to start directory) diff --git a/Doc/library/uuid.rst b/Doc/library/uuid.rst index adf01770656754..e2d231da38fd9a 100644 --- a/Doc/library/uuid.rst +++ b/Doc/library/uuid.rst @@ -289,25 +289,25 @@ The following options are accepted: .. program:: uuid -.. cmdoption:: -h, --help +.. option:: -h, --help Show the help message and exit. -.. cmdoption:: -u - --uuid +.. option:: -u + --uuid Specify the function name to use to generate the uuid. By default :func:`uuid4` is used. -.. cmdoption:: -n - --namespace +.. option:: -n + --namespace The namespace is a ``UUID``, or ``@ns`` where ``ns`` is a well-known predefined UUID addressed by namespace name. Such as ``@dns``, ``@url``, ``@oid``, and ``@x500``. Only required for :func:`uuid3` / :func:`uuid5` functions. -.. cmdoption:: -N - --name +.. option:: -N + --name The name used as part of generating the uuid. Only required for :func:`uuid3` / :func:`uuid5` functions. diff --git a/Doc/library/zipapp.rst b/Doc/library/zipapp.rst index 7c01fc102fca07..104afca23a20b4 100644 --- a/Doc/library/zipapp.rst +++ b/Doc/library/zipapp.rst @@ -54,7 +54,7 @@ The following options are understood: .. program:: zipapp -.. cmdoption:: -o , --output= +.. option:: -o , --output= Write the output to a file named *output*. If this option is not specified, the output filename will be the same as the input *source*, with the @@ -64,13 +64,13 @@ The following options are understood: An output filename must be specified if the *source* is an archive (and in that case, *output* must not be the same as *source*). -.. cmdoption:: -p , --python= +.. option:: -p , --python= Add a ``#!`` line to the archive specifying *interpreter* as the command to run. Also, on POSIX, make the archive executable. The default is to write no ``#!`` line, and not make the file executable. -.. cmdoption:: -m , --main= +.. option:: -m , --main= Write a ``__main__.py`` file to the archive that executes *mainfn*. The *mainfn* argument should have the form "pkg.mod:fn", where "pkg.mod" is a @@ -79,7 +79,7 @@ The following options are understood: :option:`--main` cannot be specified when copying an archive. -.. cmdoption:: -c, --compress +.. option:: -c, --compress Compress files with the deflate method, reducing the size of the output file. By default, files are stored uncompressed in the archive. @@ -88,13 +88,13 @@ The following options are understood: .. versionadded:: 3.7 -.. cmdoption:: --info +.. option:: --info Display the interpreter embedded in the archive, for diagnostic purposes. In this case, any other options are ignored and SOURCE must be an archive, not a directory. -.. cmdoption:: -h, --help +.. option:: -h, --help Print a short usage message and exit. diff --git a/Doc/library/zipfile.rst b/Doc/library/zipfile.rst index bd951e4872f113..a77e49a7643826 100644 --- a/Doc/library/zipfile.rst +++ b/Doc/library/zipfile.rst @@ -906,27 +906,27 @@ For a list of the files in a ZIP archive, use the :option:`-l` option: Command-line options ~~~~~~~~~~~~~~~~~~~~ -.. cmdoption:: -l - --list +.. option:: -l + --list List files in a zipfile. -.. cmdoption:: -c ... - --create ... +.. option:: -c ... + --create ... Create zipfile from source files. -.. cmdoption:: -e - --extract +.. option:: -e + --extract Extract zipfile into target directory. -.. cmdoption:: -t - --test +.. option:: -t + --test Test whether the zipfile is valid or not. -.. cmdoption:: --metadata-encoding +.. option:: --metadata-encoding Specify encoding of member names for :option:`-l`, :option:`-e` and :option:`-t`. diff --git a/Doc/requirements-oldest-sphinx.txt b/Doc/requirements-oldest-sphinx.txt index d3ef5bc17650ae..5de739fc10b085 100644 --- a/Doc/requirements-oldest-sphinx.txt +++ b/Doc/requirements-oldest-sphinx.txt @@ -13,16 +13,16 @@ python-docs-theme>=2022.1 # Sphinx 4.2 comes from ``needs_sphinx = '4.2'`` in ``Doc/conf.py``. alabaster==0.7.13 -Babel==2.12.1 +Babel==2.13.0 certifi==2023.7.22 -charset-normalizer==3.2.0 +charset-normalizer==3.3.0 colorama==0.4.6 -docutils==0.16 +docutils==0.17.1 idna==3.4 imagesize==1.4.1 -Jinja2==2.11.3 -MarkupSafe==1.1.1 -packaging==23.1 +Jinja2==3.1.2 +MarkupSafe==2.1.3 +packaging==23.2 Pygments==2.16.1 requests==2.31.0 snowballstemmer==2.2.0 @@ -33,4 +33,4 @@ sphinxcontrib-htmlhelp==2.0.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 -urllib3==2.0.4 +urllib3==2.0.6 diff --git a/Doc/using/cmdline.rst b/Doc/using/cmdline.rst index bade3ca6650e62..f68a2251f06d4a 100644 --- a/Doc/using/cmdline.rst +++ b/Doc/using/cmdline.rst @@ -59,7 +59,7 @@ all consecutive arguments will end up in :data:`sys.argv` -- note that the first element, subscript zero (``sys.argv[0]``), is a string reflecting the program's source. -.. cmdoption:: -c +.. option:: -c Execute the Python code in *command*. *command* can be one or more statements separated by newlines, with significant leading whitespace as in @@ -72,7 +72,7 @@ source. .. audit-event:: cpython.run_command command cmdoption-c -.. cmdoption:: -m +.. option:: -m Search :data:`sys.path` for the named module and execute its contents as the :mod:`__main__` module. @@ -188,35 +188,35 @@ automatically enabled, if available on your platform (see Generic options ~~~~~~~~~~~~~~~ -.. cmdoption:: -? - -h - --help +.. option:: -? + -h + --help Print a short description of all command line options and corresponding environment variables and exit. -.. cmdoption:: --help-env +.. option:: --help-env Print a short description of Python-specific environment variables and exit. .. versionadded:: 3.11 -.. cmdoption:: --help-xoptions +.. option:: --help-xoptions Print a description of implementation-specific :option:`-X` options and exit. .. versionadded:: 3.11 -.. cmdoption:: --help-all +.. option:: --help-all Print complete usage information and exit. .. versionadded:: 3.11 -.. cmdoption:: -V - --version +.. option:: -V + --version Print the Python version number and exit. Example output could be: @@ -240,7 +240,7 @@ Generic options Miscellaneous options ~~~~~~~~~~~~~~~~~~~~~ -.. cmdoption:: -b +.. option:: -b Issue a warning when comparing :class:`bytes` or :class:`bytearray` with :class:`str` or :class:`bytes` with :class:`int`. Issue an error when the @@ -249,13 +249,13 @@ Miscellaneous options .. versionchanged:: 3.5 Affects comparisons of :class:`bytes` with :class:`int`. -.. cmdoption:: -B +.. option:: -B If given, Python won't try to write ``.pyc`` files on the import of source modules. See also :envvar:`PYTHONDONTWRITEBYTECODE`. -.. cmdoption:: --check-hash-based-pycs default|always|never +.. option:: --check-hash-based-pycs default|always|never Control the validation behavior of hash-based ``.pyc`` files. See :ref:`pyc-invalidation`. When set to ``default``, checked and unchecked @@ -269,7 +269,7 @@ Miscellaneous options option. -.. cmdoption:: -d +.. option:: -d Turn on parser debugging output (for expert only). See also the :envvar:`PYTHONDEBUG` environment variable. @@ -278,7 +278,7 @@ Miscellaneous options it's ignored. -.. cmdoption:: -E +.. option:: -E Ignore all :envvar:`PYTHON*` environment variables, e.g. :envvar:`PYTHONPATH` and :envvar:`PYTHONHOME`, that might be set. @@ -286,7 +286,7 @@ Miscellaneous options See also the :option:`-P` and :option:`-I` (isolated) options. -.. cmdoption:: -i +.. option:: -i When a script is passed as first argument or the :option:`-c` option is used, enter interactive mode after executing the script or the command, even when @@ -297,7 +297,7 @@ Miscellaneous options raises an exception. See also :envvar:`PYTHONINSPECT`. -.. cmdoption:: -I +.. option:: -I Run Python in isolated mode. This also implies :option:`-E`, :option:`-P` and :option:`-s` options. @@ -310,7 +310,7 @@ Miscellaneous options .. versionadded:: 3.4 -.. cmdoption:: -O +.. option:: -O Remove assert statements and any code conditional on the value of :const:`__debug__`. Augment the filename for compiled @@ -321,7 +321,7 @@ Miscellaneous options Modify ``.pyc`` filenames according to :pep:`488`. -.. cmdoption:: -OO +.. option:: -OO Do :option:`-O` and also discard docstrings. Augment the filename for compiled (:term:`bytecode`) files by adding ``.opt-2`` before the @@ -331,7 +331,7 @@ Miscellaneous options Modify ``.pyc`` filenames according to :pep:`488`. -.. cmdoption:: -P +.. option:: -P Don't prepend a potentially unsafe path to :data:`sys.path`: @@ -348,14 +348,14 @@ Miscellaneous options .. versionadded:: 3.11 -.. cmdoption:: -q +.. option:: -q Don't display the copyright and version messages even in interactive mode. .. versionadded:: 3.2 -.. cmdoption:: -R +.. option:: -R Turn on hash randomization. This option only has an effect if the :envvar:`PYTHONHASHSEED` environment variable is set to ``0``, since hash @@ -381,7 +381,7 @@ Miscellaneous options .. versionadded:: 3.2.3 -.. cmdoption:: -s +.. option:: -s Don't add the :data:`user site-packages directory ` to :data:`sys.path`. @@ -391,7 +391,7 @@ Miscellaneous options :pep:`370` -- Per user site-packages directory -.. cmdoption:: -S +.. option:: -S Disable the import of the module :mod:`site` and the site-dependent manipulations of :data:`sys.path` that it entails. Also disable these @@ -399,7 +399,7 @@ Miscellaneous options :func:`site.main` if you want them to be triggered). -.. cmdoption:: -u +.. option:: -u Force the stdout and stderr streams to be unbuffered. This option has no effect on the stdin stream. @@ -410,7 +410,7 @@ Miscellaneous options The text layer of the stdout and stderr streams now is unbuffered. -.. cmdoption:: -v +.. option:: -v Print a message each time a module is initialized, showing the place (filename or built-in module) from which it is loaded. When given twice @@ -425,7 +425,7 @@ Miscellaneous options .. _using-on-warnings: -.. cmdoption:: -W arg +.. option:: -W arg Warning control. Python's warning machinery by default prints warning messages to :data:`sys.stderr`. @@ -484,13 +484,13 @@ Miscellaneous options details. -.. cmdoption:: -x +.. option:: -x Skip the first line of the source, allowing use of non-Unix forms of ``#!cmd``. This is intended for a DOS specific hack only. -.. cmdoption:: -X +.. option:: -X Reserved for various implementation-specific options. CPython currently defines the following possible values: @@ -597,7 +597,7 @@ Miscellaneous options Options you shouldn't use ~~~~~~~~~~~~~~~~~~~~~~~~~ -.. cmdoption:: -J +.. option:: -J Reserved for use by Jython_. diff --git a/Doc/using/configure.rst b/Doc/using/configure.rst index 83b4c7aa0481e9..5f9e695d10ad44 100644 --- a/Doc/using/configure.rst +++ b/Doc/using/configure.rst @@ -97,7 +97,7 @@ See also the :file:`Misc/SpecialBuilds.txt` in the Python source distribution. General Options --------------- -.. cmdoption:: --enable-loadable-sqlite-extensions +.. option:: --enable-loadable-sqlite-extensions Support loadable extensions in the :mod:`!_sqlite` extension module (default is no) of the :mod:`sqlite3` module. @@ -107,12 +107,12 @@ General Options .. versionadded:: 3.6 -.. cmdoption:: --disable-ipv6 +.. option:: --disable-ipv6 Disable IPv6 support (enabled by default if supported), see the :mod:`socket` module. -.. cmdoption:: --enable-big-digits=[15|30] +.. option:: --enable-big-digits=[15|30] Define the size in bits of Python :class:`int` digits: 15 or 30 bits. @@ -122,7 +122,7 @@ General Options See :data:`sys.int_info.bits_per_digit `. -.. cmdoption:: --with-suffix=SUFFIX +.. option:: --with-suffix=SUFFIX Set the Python executable suffix to *SUFFIX*. @@ -135,7 +135,7 @@ General Options The default suffix on WASM platform is one of ``.js``, ``.html`` or ``.wasm``. -.. cmdoption:: --with-tzpath= +.. option:: --with-tzpath= Select the default time zone search path for :const:`zoneinfo.TZPATH`. See the :ref:`Compile-time configuration @@ -147,7 +147,7 @@ General Options .. versionadded:: 3.9 -.. cmdoption:: --without-decimal-contextvar +.. option:: --without-decimal-contextvar Build the ``_decimal`` extension module using a thread-local context rather than a coroutine-local context (default), see the :mod:`decimal` module. @@ -156,7 +156,7 @@ General Options .. versionadded:: 3.9 -.. cmdoption:: --with-dbmliborder= +.. option:: --with-dbmliborder= Override order to check db backends for the :mod:`dbm` module @@ -166,7 +166,7 @@ General Options * ``gdbm``; * ``bdb``. -.. cmdoption:: --without-c-locale-coercion +.. option:: --without-c-locale-coercion Disable C locale coercion to a UTF-8 based locale (enabled by default). @@ -174,13 +174,13 @@ General Options See :envvar:`PYTHONCOERCECLOCALE` and the :pep:`538`. -.. cmdoption:: --without-freelists +.. option:: --without-freelists Disable all freelists except the empty tuple singleton. .. versionadded:: 3.11 -.. cmdoption:: --with-platlibdir=DIRNAME +.. option:: --with-platlibdir=DIRNAME Python library directory name (default is ``lib``). @@ -190,7 +190,7 @@ General Options .. versionadded:: 3.9 -.. cmdoption:: --with-wheel-pkg-dir=PATH +.. option:: --with-wheel-pkg-dir=PATH Directory of wheel packages used by the :mod:`ensurepip` module (none by default). @@ -202,7 +202,7 @@ General Options .. versionadded:: 3.10 -.. cmdoption:: --with-pkg-config=[check|yes|no] +.. option:: --with-pkg-config=[check|yes|no] Whether configure should use :program:`pkg-config` to detect build dependencies. @@ -213,7 +213,7 @@ General Options .. versionadded:: 3.11 -.. cmdoption:: --enable-pystats +.. option:: --enable-pystats Turn on internal Python performance statistics gathering. @@ -280,7 +280,7 @@ General Options .. versionadded:: 3.11 -.. cmdoption:: --disable-gil +.. option:: --disable-gil Enables **experimental** support for running Python without the :term:`global interpreter lock` (GIL). @@ -289,12 +289,12 @@ General Options .. versionadded:: 3.13 -.. cmdoption:: PKG_CONFIG +.. option:: PKG_CONFIG Path to ``pkg-config`` utility. -.. cmdoption:: PKG_CONFIG_LIBDIR -.. cmdoption:: PKG_CONFIG_PATH +.. option:: PKG_CONFIG_LIBDIR +.. option:: PKG_CONFIG_PATH ``pkg-config`` options. @@ -302,19 +302,19 @@ General Options C compiler options ------------------ -.. cmdoption:: CC +.. option:: CC C compiler command. -.. cmdoption:: CFLAGS +.. option:: CFLAGS C compiler flags. -.. cmdoption:: CPP +.. option:: CPP C preprocessor command. -.. cmdoption:: CPPFLAGS +.. option:: CPPFLAGS C preprocessor flags, e.g. :samp:`-I{include_dir}`. @@ -322,15 +322,15 @@ C compiler options Linker options -------------- -.. cmdoption:: LDFLAGS +.. option:: LDFLAGS Linker flags, e.g. :samp:`-L{library_directory}`. -.. cmdoption:: LIBS +.. option:: LIBS Libraries to pass to the linker, e.g. :samp:`-l{library}`. -.. cmdoption:: MACHDEP +.. option:: MACHDEP Name for machine-dependent library files. @@ -340,80 +340,80 @@ Options for third-party dependencies .. versionadded:: 3.11 -.. cmdoption:: BZIP2_CFLAGS -.. cmdoption:: BZIP2_LIBS +.. option:: BZIP2_CFLAGS +.. option:: BZIP2_LIBS C compiler and linker flags to link Python to ``libbz2``, used by :mod:`bz2` module, overriding ``pkg-config``. -.. cmdoption:: CURSES_CFLAGS -.. cmdoption:: CURSES_LIBS +.. option:: CURSES_CFLAGS +.. option:: CURSES_LIBS C compiler and linker flags for ``libncurses`` or ``libncursesw``, used by :mod:`curses` module, overriding ``pkg-config``. -.. cmdoption:: GDBM_CFLAGS -.. cmdoption:: GDBM_LIBS +.. option:: GDBM_CFLAGS +.. option:: GDBM_LIBS C compiler and linker flags for ``gdbm``. -.. cmdoption:: LIBB2_CFLAGS -.. cmdoption:: LIBB2_LIBS +.. option:: LIBB2_CFLAGS +.. option:: LIBB2_LIBS C compiler and linker flags for ``libb2`` (:ref:`BLAKE2 `), used by :mod:`hashlib` module, overriding ``pkg-config``. -.. cmdoption:: LIBEDIT_CFLAGS -.. cmdoption:: LIBEDIT_LIBS +.. option:: LIBEDIT_CFLAGS +.. option:: LIBEDIT_LIBS C compiler and linker flags for ``libedit``, used by :mod:`readline` module, overriding ``pkg-config``. -.. cmdoption:: LIBFFI_CFLAGS -.. cmdoption:: LIBFFI_LIBS +.. option:: LIBFFI_CFLAGS +.. option:: LIBFFI_LIBS C compiler and linker flags for ``libffi``, used by :mod:`ctypes` module, overriding ``pkg-config``. -.. cmdoption:: LIBLZMA_CFLAGS -.. cmdoption:: LIBLZMA_LIBS +.. option:: LIBLZMA_CFLAGS +.. option:: LIBLZMA_LIBS C compiler and linker flags for ``liblzma``, used by :mod:`lzma` module, overriding ``pkg-config``. -.. cmdoption:: LIBREADLINE_CFLAGS -.. cmdoption:: LIBREADLINE_LIBS +.. option:: LIBREADLINE_CFLAGS +.. option:: LIBREADLINE_LIBS C compiler and linker flags for ``libreadline``, used by :mod:`readline` module, overriding ``pkg-config``. -.. cmdoption:: LIBSQLITE3_CFLAGS -.. cmdoption:: LIBSQLITE3_LIBS +.. option:: LIBSQLITE3_CFLAGS +.. option:: LIBSQLITE3_LIBS C compiler and linker flags for ``libsqlite3``, used by :mod:`sqlite3` module, overriding ``pkg-config``. -.. cmdoption:: LIBUUID_CFLAGS -.. cmdoption:: LIBUUID_LIBS +.. option:: LIBUUID_CFLAGS +.. option:: LIBUUID_LIBS C compiler and linker flags for ``libuuid``, used by :mod:`uuid` module, overriding ``pkg-config``. -.. cmdoption:: PANEL_CFLAGS -.. cmdoption:: PANEL_LIBS +.. option:: PANEL_CFLAGS +.. option:: PANEL_LIBS C compiler and Linker flags for PANEL, overriding ``pkg-config``. C compiler and linker flags for ``libpanel`` or ``libpanelw``, used by :mod:`curses.panel` module, overriding ``pkg-config``. -.. cmdoption:: TCLTK_CFLAGS -.. cmdoption:: TCLTK_LIBS +.. option:: TCLTK_CFLAGS +.. option:: TCLTK_LIBS C compiler and linker flags for TCLTK, overriding ``pkg-config``. -.. cmdoption:: ZLIB_CFLAGS -.. cmdoption:: ZLIB_LIBS +.. option:: ZLIB_CFLAGS +.. option:: ZLIB_LIBS C compiler and linker flags for ``libzlib``, used by :mod:`gzip` module, overriding ``pkg-config``. @@ -422,7 +422,7 @@ Options for third-party dependencies WebAssembly Options ------------------- -.. cmdoption:: --with-emscripten-target=[browser|node] +.. option:: --with-emscripten-target=[browser|node] Set build flavor for ``wasm32-emscripten``. @@ -431,7 +431,7 @@ WebAssembly Options .. versionadded:: 3.11 -.. cmdoption:: --enable-wasm-dynamic-linking +.. option:: --enable-wasm-dynamic-linking Turn on dynamic linking support for WASM. @@ -440,7 +440,7 @@ WebAssembly Options .. versionadded:: 3.11 -.. cmdoption:: --enable-wasm-pthreads +.. option:: --enable-wasm-pthreads Turn on pthreads support for WASM. @@ -450,7 +450,7 @@ WebAssembly Options Install Options --------------- -.. cmdoption:: --prefix=PREFIX +.. option:: --prefix=PREFIX Install architecture-independent files in PREFIX. On Unix, it defaults to :file:`/usr/local`. @@ -460,20 +460,20 @@ Install Options As an example, one can use ``--prefix="$HOME/.local/"`` to install a Python in its home directory. -.. cmdoption:: --exec-prefix=EPREFIX +.. option:: --exec-prefix=EPREFIX Install architecture-dependent files in EPREFIX, defaults to :option:`--prefix`. This value can be retrieved at runtime using :data:`sys.exec_prefix`. -.. cmdoption:: --disable-test-modules +.. option:: --disable-test-modules Don't build nor install test modules, like the :mod:`test` package or the :mod:`!_testcapi` extension module (built and installed by default). .. versionadded:: 3.10 -.. cmdoption:: --with-ensurepip=[upgrade|install|no] +.. option:: --with-ensurepip=[upgrade|install|no] Select the :mod:`ensurepip` command run on Python installation: @@ -492,7 +492,7 @@ Configuring Python using ``--enable-optimizations --with-lto`` (PGO + LTO) is recommended for best performance. The experimental ``--enable-bolt`` flag can also be used to improve performance. -.. cmdoption:: --enable-optimizations +.. option:: --enable-optimizations Enable Profile Guided Optimization (PGO) using :envvar:`PROFILE_TASK` (disabled by default). @@ -518,7 +518,10 @@ also be used to improve performance. .. versionadded:: 3.8 -.. cmdoption:: --with-lto=[full|thin|no|yes] + .. versionchanged:: 3.13 + Task failure is no longer ignored silently. + +.. option:: --with-lto=[full|thin|no|yes] Enable Link Time Optimization (LTO) in any build (disabled by default). @@ -533,7 +536,7 @@ also be used to improve performance. .. versionchanged:: 3.12 Use ThinLTO as the default optimization policy on Clang if the compiler accepts the flag. -.. cmdoption:: --enable-bolt +.. option:: --enable-bolt Enable usage of the `BOLT post-link binary optimizer `_ (disabled by @@ -558,32 +561,32 @@ also be used to improve performance. .. versionadded:: 3.12 -.. cmdoption:: BOLT_APPLY_FLAGS +.. option:: BOLT_APPLY_FLAGS Arguments to ``llvm-bolt`` when creating a `BOLT optimized binary `_. .. versionadded:: 3.12 -.. cmdoption:: BOLT_INSTRUMENT_FLAGS +.. option:: BOLT_INSTRUMENT_FLAGS Arguments to ``llvm-bolt`` when instrumenting binaries. .. versionadded:: 3.12 -.. cmdoption:: --with-computed-gotos +.. option:: --with-computed-gotos Enable computed gotos in evaluation loop (enabled by default on supported compilers). -.. cmdoption:: --without-pymalloc +.. option:: --without-pymalloc Disable the specialized Python memory allocator :ref:`pymalloc ` (enabled by default). See also :envvar:`PYTHONMALLOC` environment variable. -.. cmdoption:: --without-doc-strings +.. option:: --without-doc-strings Disable static documentation strings to reduce the memory footprint (enabled by default). Documentation strings defined in Python are not affected. @@ -592,11 +595,11 @@ also be used to improve performance. See the ``PyDoc_STRVAR()`` macro. -.. cmdoption:: --enable-profiling +.. option:: --enable-profiling Enable C-level code profiling with ``gprof`` (disabled by default). -.. cmdoption:: --with-strict-overflow +.. option:: --with-strict-overflow Add ``-fstrict-overflow`` to the C compiler flags (by default we add ``-fno-strict-overflow`` instead). @@ -652,12 +655,12 @@ See also the :ref:`Python Development Mode ` and the Debug options ------------- -.. cmdoption:: --with-pydebug +.. option:: --with-pydebug :ref:`Build Python in debug mode `: define the ``Py_DEBUG`` macro (disabled by default). -.. cmdoption:: --with-trace-refs +.. option:: --with-trace-refs Enable tracing references for debugging purpose (disabled by default). @@ -678,7 +681,7 @@ Debug options .. versionadded:: 3.8 -.. cmdoption:: --with-assertions +.. option:: --with-assertions Build with C assertions enabled (default is no): ``assert(...);`` and ``_PyObject_ASSERT(...);``. @@ -691,11 +694,11 @@ Debug options .. versionadded:: 3.6 -.. cmdoption:: --with-valgrind +.. option:: --with-valgrind Enable Valgrind support (default is no). -.. cmdoption:: --with-dtrace +.. option:: --with-dtrace Enable DTrace support (default is no). @@ -704,19 +707,19 @@ Debug options .. versionadded:: 3.6 -.. cmdoption:: --with-address-sanitizer +.. option:: --with-address-sanitizer Enable AddressSanitizer memory error detector, ``asan`` (default is no). .. versionadded:: 3.6 -.. cmdoption:: --with-memory-sanitizer +.. option:: --with-memory-sanitizer Enable MemorySanitizer allocation error detector, ``msan`` (default is no). .. versionadded:: 3.6 -.. cmdoption:: --with-undefined-behavior-sanitizer +.. option:: --with-undefined-behavior-sanitizer Enable UndefinedBehaviorSanitizer undefined behaviour detector, ``ubsan`` (default is no). @@ -727,11 +730,11 @@ Debug options Linker options -------------- -.. cmdoption:: --enable-shared +.. option:: --enable-shared Enable building a shared Python library: ``libpython`` (default is no). -.. cmdoption:: --without-static-libpython +.. option:: --without-static-libpython Do not build ``libpythonMAJOR.MINOR.a`` and do not install ``python.o`` (built and enabled by default). @@ -742,23 +745,23 @@ Linker options Libraries options ----------------- -.. cmdoption:: --with-libs='lib1 ...' +.. option:: --with-libs='lib1 ...' Link against additional libraries (default is no). -.. cmdoption:: --with-system-expat +.. option:: --with-system-expat Build the :mod:`!pyexpat` module using an installed ``expat`` library (default is no). -.. cmdoption:: --with-system-libmpdec +.. option:: --with-system-libmpdec Build the ``_decimal`` extension module using an installed ``mpdec`` library, see the :mod:`decimal` module (default is no). .. versionadded:: 3.3 -.. cmdoption:: --with-readline=readline|editline +.. option:: --with-readline=readline|editline Designate a backend library for the :mod:`readline` module. @@ -767,7 +770,7 @@ Libraries options .. versionadded:: 3.10 -.. cmdoption:: --without-readline +.. option:: --without-readline Don't build the :mod:`readline` module (built by default). @@ -775,21 +778,21 @@ Libraries options .. versionadded:: 3.10 -.. cmdoption:: --with-libm=STRING +.. option:: --with-libm=STRING Override ``libm`` math library to *STRING* (default is system-dependent). -.. cmdoption:: --with-libc=STRING +.. option:: --with-libc=STRING Override ``libc`` C library to *STRING* (default is system-dependent). -.. cmdoption:: --with-openssl=DIR +.. option:: --with-openssl=DIR Root of the OpenSSL directory. .. versionadded:: 3.7 -.. cmdoption:: --with-openssl-rpath=[no|auto|DIR] +.. option:: --with-openssl-rpath=[no|auto|DIR] Set runtime library directory (rpath) for OpenSSL libraries: @@ -804,7 +807,7 @@ Libraries options Security Options ---------------- -.. cmdoption:: --with-hash-algorithm=[fnv|siphash13|siphash24] +.. option:: --with-hash-algorithm=[fnv|siphash13|siphash24] Select hash algorithm for use in ``Python/pyhash.c``: @@ -817,7 +820,7 @@ Security Options .. versionadded:: 3.11 ``siphash13`` is added and it is the new default. -.. cmdoption:: --with-builtin-hashlib-hashes=md5,sha1,sha256,sha512,sha3,blake2 +.. option:: --with-builtin-hashlib-hashes=md5,sha1,sha256,sha512,sha3,blake2 Built-in hash modules: @@ -830,7 +833,7 @@ Security Options .. versionadded:: 3.9 -.. cmdoption:: --with-ssl-default-suites=[python|openssl|STRING] +.. option:: --with-ssl-default-suites=[python|openssl|STRING] Override the OpenSSL default cipher suites string: @@ -852,19 +855,19 @@ macOS Options See ``Mac/README.rst``. -.. cmdoption:: --enable-universalsdk -.. cmdoption:: --enable-universalsdk=SDKDIR +.. option:: --enable-universalsdk +.. option:: --enable-universalsdk=SDKDIR Create a universal binary build. *SDKDIR* specifies which macOS SDK should be used to perform the build (default is no). -.. cmdoption:: --enable-framework -.. cmdoption:: --enable-framework=INSTALLDIR +.. option:: --enable-framework +.. option:: --enable-framework=INSTALLDIR Create a Python.framework rather than a traditional Unix install. Optional *INSTALLDIR* specifies the installation path (default is no). -.. cmdoption:: --with-universal-archs=ARCH +.. option:: --with-universal-archs=ARCH Specify the kind of universal binary that should be created. This option is only valid when :option:`--enable-universalsdk` is set. @@ -880,7 +883,7 @@ See ``Mac/README.rst``. * ``intel-64``; * ``all``. -.. cmdoption:: --with-framework-name=FRAMEWORK +.. option:: --with-framework-name=FRAMEWORK Specify the name for the python framework on macOS only valid when :option:`--enable-framework` is set (default: ``Python``). @@ -894,21 +897,21 @@ for another CPU architecture or platform. Cross compiling requires a Python interpreter for the build platform. The version of the build Python must match the version of the cross compiled host Python. -.. cmdoption:: --build=BUILD +.. option:: --build=BUILD configure for building on BUILD, usually guessed by :program:`config.guess`. -.. cmdoption:: --host=HOST +.. option:: --host=HOST cross-compile to build programs to run on HOST (target platform) -.. cmdoption:: --with-build-python=path/to/python +.. option:: --with-build-python=path/to/python path to build ``python`` binary for cross compiling .. versionadded:: 3.11 -.. cmdoption:: CONFIG_SITE=file +.. option:: CONFIG_SITE=file An environment variable that points to a file with configure overrides. @@ -919,7 +922,7 @@ the version of the cross compiled host Python. ac_cv_file__dev_ptmx=yes ac_cv_file__dev_ptc=no -.. cmdoption:: HOSTRUNNER +.. option:: HOSTRUNNER Program to run CPython for the host platform for cross-compilation. diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst index 6fe00bb9eb5df9..78ebe6e8e2c166 100644 --- a/Doc/whatsnew/3.12.rst +++ b/Doc/whatsnew/3.12.rst @@ -303,7 +303,7 @@ Let's cover these in detail: See :pep:`701` for more details. As a positive side-effect of how this feature has been implemented (by parsing f-strings -with :pep:`the PEG parser <617>`, now error messages for f-strings are more precise +with :pep:`the PEG parser <617>`), now error messages for f-strings are more precise and include the exact location of the error. For example, in Python 3.11, the following f-string raises a :exc:`SyntaxError`: diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index 1ef04fa7ae6adc..785deea1c1e48f 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -998,9 +998,18 @@ New Features * Add :c:func:`PyObject_VisitManagedDict` and :c:func:`PyObject_ClearManagedDict` functions which must be called by the traverse and clear functions of a type using - :c:macro:`Py_TPFLAGS_MANAGED_DICT` flag. + :c:macro:`Py_TPFLAGS_MANAGED_DICT` flag. The `pythoncapi-compat project + `__ can be used to get these + functions on Python 3.11 and 3.12. (Contributed by Victor Stinner in :gh:`107073`.) +* Add :c:func:`PyThreadState_GetUnchecked()` function: similar to + :c:func:`PyThreadState_Get()`, but don't kill the process with a fatal error + if it is NULL. The caller is responsible to check if the result is NULL. + Previously, the function was private and known as + ``_PyThreadState_UncheckedGet()``. + (Contributed by Victor Stinner in :gh:`108867`.) + Porting to Python 3.13 ---------------------- diff --git a/Include/cpython/code.h b/Include/cpython/code.h index 45b09a1265df80..cf715c55a2b3b8 100644 --- a/Include/cpython/code.h +++ b/Include/cpython/code.h @@ -167,7 +167,7 @@ typedef struct { PyObject *co_weakreflist; /* to support weakrefs to code objects */ \ _PyExecutorArray *co_executors; /* executors from optimizer */ \ _PyCoCached *_co_cached; /* cached co_* attributes */ \ - uint64_t _co_instrumentation_version; /* current instrumentation version */ \ + uintptr_t _co_instrumentation_version; /* current instrumentation version */ \ _PyCoMonitoringData *_co_monitoring; /* Monitoring data */ \ int _co_firsttraceable; /* index of first traceable instruction */ \ /* Scratch space for extra data relating to the code object. \ diff --git a/Include/cpython/object.h b/Include/cpython/object.h index 3838f19c75a230..ede394d9673d7e 100644 --- a/Include/cpython/object.h +++ b/Include/cpython/object.h @@ -425,7 +425,7 @@ PyAPI_FUNC(int) _PyTrash_cond(PyObject *op, destructor dealloc); /* If "cond" is false, then _tstate remains NULL and the deallocator \ * is run normally without involving the trashcan */ \ if (cond) { \ - _tstate = _PyThreadState_UncheckedGet(); \ + _tstate = PyThreadState_GetUnchecked(); \ if (_PyTrash_begin(_tstate, _PyObject_CAST(op))) { \ break; \ } \ diff --git a/Include/cpython/pystate.h b/Include/cpython/pystate.h index 7e4c57efc7c00c..8fd06242bc82e0 100644 --- a/Include/cpython/pystate.h +++ b/Include/cpython/pystate.h @@ -92,6 +92,15 @@ struct _ts { /* padding to align to 4 bytes */ unsigned int :24; } _status; +#ifdef Py_BUILD_CORE +# define _PyThreadState_WHENCE_NOTSET -1 +# define _PyThreadState_WHENCE_UNKNOWN 0 +# define _PyThreadState_WHENCE_INTERP 1 +# define _PyThreadState_WHENCE_THREADING 2 +# define _PyThreadState_WHENCE_GILSTATE 3 +# define _PyThreadState_WHENCE_EXEC 4 +#endif + int _whence; int py_recursion_remaining; int py_recursion_limit; @@ -209,7 +218,7 @@ struct _ts { /* Similar to PyThreadState_Get(), but don't issue a fatal error * if it is NULL. */ -PyAPI_FUNC(PyThreadState *) _PyThreadState_UncheckedGet(void); +PyAPI_FUNC(PyThreadState *) PyThreadState_GetUnchecked(void); // Disable tracing and profiling. diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index 23d0fa399d7e6f..48fee697324f8f 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -193,6 +193,39 @@ int _PyEval_UnpackIterable(PyThreadState *tstate, PyObject *v, int argcnt, int a void _PyEval_FrameClearAndPop(PyThreadState *tstate, _PyInterpreterFrame *frame); +#define _PY_GIL_DROP_REQUEST_BIT 0 +#define _PY_SIGNALS_PENDING_BIT 1 +#define _PY_CALLS_TO_DO_BIT 2 +#define _PY_ASYNC_EXCEPTION_BIT 3 +#define _PY_GC_SCHEDULED_BIT 4 + +/* Reserve a few bits for future use */ +#define _PY_EVAL_EVENTS_BITS 8 +#define _PY_EVAL_EVENTS_MASK ((1 << _PY_EVAL_EVENTS_BITS)-1) + +static inline void +_Py_set_eval_breaker_bit(PyInterpreterState *interp, uint32_t bit, uint32_t set) +{ + assert(set == 0 || set == 1); + uintptr_t to_set = set << bit; + uintptr_t mask = ((uintptr_t)1) << bit; + uintptr_t old = _Py_atomic_load_uintptr(&interp->ceval.eval_breaker); + if ((old & mask) == to_set) { + return; + } + uintptr_t new; + do { + new = (old & ~mask) | to_set; + } while (!_Py_atomic_compare_exchange_uintptr(&interp->ceval.eval_breaker, &old, new)); +} + +static inline bool +_Py_eval_breaker_bit_is_set(PyInterpreterState *interp, int32_t bit) +{ + return _Py_atomic_load_uintptr_relaxed(&interp->ceval.eval_breaker) & (((uintptr_t)1) << bit); +} + + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_ceval_state.h b/Include/internal/pycore_ceval_state.h index d0af5b542233e0..47971fbf2b4bfe 100644 --- a/Include/internal/pycore_ceval_state.h +++ b/Include/internal/pycore_ceval_state.h @@ -17,11 +17,7 @@ struct _pending_calls { int busy; PyThread_type_lock lock; /* Request for running pending calls. */ - _Py_atomic_int calls_to_do; - /* Request for looking at the `async_exc` field of the current - thread state. - Guarded by the GIL. */ - int async_exc; + int32_t calls_to_do; #define NPENDINGCALLS 32 struct _pending_call { _Py_pending_call_func func; @@ -62,11 +58,6 @@ struct _ceval_runtime_state { int _not_used; #endif } perf; - /* Request for checking signals. It is shared by all interpreters (see - bpo-40513). Any thread of any interpreter can receive a signal, but only - the main thread of the main interpreter can handle signals: see - _Py_ThreadCanHandleSignals(). */ - _Py_atomic_int signals_pending; /* Pending calls to be made only on the main thread. */ struct _pending_calls pending_mainthread; }; @@ -87,14 +78,12 @@ struct _ceval_state { * the fast path in the eval loop. * It is by far the hottest field in this struct and * should be placed at the beginning. */ - _Py_atomic_int eval_breaker; - /* Request for dropping the GIL */ - _Py_atomic_int gil_drop_request; + uintptr_t eval_breaker; + /* Avoid false sharing */ + int64_t padding[7]; int recursion_limit; struct _gil_runtime_state *gil; int own_gil; - /* The GC is ready to be executed */ - _Py_atomic_int gc_scheduled; struct _pending_calls pending; }; diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h index ebf02281a7a2a6..21d1ee38b59e6b 100644 --- a/Include/internal/pycore_interp.h +++ b/Include/internal/pycore_interp.h @@ -67,8 +67,7 @@ struct _is { int _initialized; int finalizing; - uint64_t monitoring_version; - uint64_t last_restart_version; + uintptr_t last_restart_version; struct pythreads { uint64_t next_unique_id; /* The linked list of threads, newest first. */ diff --git a/Include/internal/pycore_opcode_metadata.h b/Include/internal/pycore_opcode_metadata.h index 16c1637e496033..8ef398c5db09f6 100644 --- a/Include/internal/pycore_opcode_metadata.h +++ b/Include/internal/pycore_opcode_metadata.h @@ -46,40 +46,49 @@ #define _GUARD_TYPE_VERSION 318 #define _CHECK_MANAGED_OBJECT_HAS_VALUES 319 #define _LOAD_ATTR_INSTANCE_VALUE 320 -#define _LOAD_ATTR_SLOT 321 -#define _GUARD_DORV_VALUES 322 -#define _STORE_ATTR_INSTANCE_VALUE 323 -#define _GUARD_TYPE_VERSION_STORE 324 -#define _STORE_ATTR_SLOT 325 -#define _IS_NONE 326 -#define _ITER_CHECK_LIST 327 -#define _ITER_JUMP_LIST 328 -#define _IS_ITER_EXHAUSTED_LIST 329 -#define _ITER_NEXT_LIST 330 -#define _ITER_CHECK_TUPLE 331 -#define _ITER_JUMP_TUPLE 332 -#define _IS_ITER_EXHAUSTED_TUPLE 333 -#define _ITER_NEXT_TUPLE 334 -#define _ITER_CHECK_RANGE 335 -#define _ITER_JUMP_RANGE 336 -#define _IS_ITER_EXHAUSTED_RANGE 337 -#define _ITER_NEXT_RANGE 338 -#define _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT 339 -#define _GUARD_KEYS_VERSION 340 -#define _LOAD_ATTR_METHOD_WITH_VALUES 341 -#define _LOAD_ATTR_METHOD_NO_DICT 342 -#define _CHECK_CALL_BOUND_METHOD_EXACT_ARGS 343 -#define _INIT_CALL_BOUND_METHOD_EXACT_ARGS 344 -#define _CHECK_PEP_523 345 -#define _CHECK_FUNCTION_EXACT_ARGS 346 -#define _CHECK_STACK_SPACE 347 -#define _INIT_CALL_PY_EXACT_ARGS 348 -#define _PUSH_FRAME 349 -#define _POP_JUMP_IF_FALSE 350 -#define _POP_JUMP_IF_TRUE 351 -#define _JUMP_TO_TOP 352 -#define _SAVE_CURRENT_IP 353 -#define _INSERT 354 +#define _CHECK_ATTR_MODULE 321 +#define _LOAD_ATTR_MODULE 322 +#define _CHECK_ATTR_WITH_HINT 323 +#define _LOAD_ATTR_WITH_HINT 324 +#define _LOAD_ATTR_SLOT 325 +#define _CHECK_ATTR_CLASS 326 +#define _LOAD_ATTR_CLASS 327 +#define _GUARD_DORV_VALUES 328 +#define _STORE_ATTR_INSTANCE_VALUE 329 +#define _STORE_ATTR_SLOT 330 +#define _IS_NONE 331 +#define _ITER_CHECK_LIST 332 +#define _ITER_JUMP_LIST 333 +#define _IS_ITER_EXHAUSTED_LIST 334 +#define _ITER_NEXT_LIST 335 +#define _ITER_CHECK_TUPLE 336 +#define _ITER_JUMP_TUPLE 337 +#define _IS_ITER_EXHAUSTED_TUPLE 338 +#define _ITER_NEXT_TUPLE 339 +#define _ITER_CHECK_RANGE 340 +#define _ITER_JUMP_RANGE 341 +#define _IS_ITER_EXHAUSTED_RANGE 342 +#define _ITER_NEXT_RANGE 343 +#define _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT 344 +#define _GUARD_KEYS_VERSION 345 +#define _LOAD_ATTR_METHOD_WITH_VALUES 346 +#define _LOAD_ATTR_METHOD_NO_DICT 347 +#define _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES 348 +#define _LOAD_ATTR_NONDESCRIPTOR_NO_DICT 349 +#define _CHECK_ATTR_METHOD_LAZY_DICT 350 +#define _LOAD_ATTR_METHOD_LAZY_DICT 351 +#define _CHECK_CALL_BOUND_METHOD_EXACT_ARGS 352 +#define _INIT_CALL_BOUND_METHOD_EXACT_ARGS 353 +#define _CHECK_PEP_523 354 +#define _CHECK_FUNCTION_EXACT_ARGS 355 +#define _CHECK_STACK_SPACE 356 +#define _INIT_CALL_PY_EXACT_ARGS 357 +#define _PUSH_FRAME 358 +#define _POP_JUMP_IF_FALSE 359 +#define _POP_JUMP_IF_TRUE 360 +#define _JUMP_TO_TOP 361 +#define _SAVE_CURRENT_IP 362 +#define _INSERT 363 extern int _PyOpcode_num_popped(int opcode, int oparg, bool jump); #ifdef NEED_OPCODE_METADATA @@ -361,14 +370,26 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) { return 1; case LOAD_ATTR_INSTANCE_VALUE: return 1; + case _CHECK_ATTR_MODULE: + return 1; + case _LOAD_ATTR_MODULE: + return 1; case LOAD_ATTR_MODULE: return 1; + case _CHECK_ATTR_WITH_HINT: + return 1; + case _LOAD_ATTR_WITH_HINT: + return 1; case LOAD_ATTR_WITH_HINT: return 1; case _LOAD_ATTR_SLOT: return 1; case LOAD_ATTR_SLOT: return 1; + case _CHECK_ATTR_CLASS: + return 1; + case _LOAD_ATTR_CLASS: + return 1; case LOAD_ATTR_CLASS: return 1; case LOAD_ATTR_PROPERTY: @@ -383,8 +404,6 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) { return 2; case STORE_ATTR_WITH_HINT: return 2; - case _GUARD_TYPE_VERSION_STORE: - return 1; case _STORE_ATTR_SLOT: return 2; case STORE_ATTR_SLOT: @@ -509,10 +528,18 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) { return 1; case LOAD_ATTR_METHOD_NO_DICT: return 1; + case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: + return 1; case LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: return 1; + case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT: + return 1; case LOAD_ATTR_NONDESCRIPTOR_NO_DICT: return 1; + case _CHECK_ATTR_METHOD_LAZY_DICT: + return 1; + case _LOAD_ATTR_METHOD_LAZY_DICT: + return 1; case LOAD_ATTR_METHOD_LAZY_DICT: return 1; case INSTRUMENTED_CALL: @@ -919,16 +946,28 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { return ((oparg & 1) ? 1 : 0) + 1; case LOAD_ATTR_INSTANCE_VALUE: return (oparg & 1 ? 1 : 0) + 1; + case _CHECK_ATTR_MODULE: + return 1; + case _LOAD_ATTR_MODULE: + return ((oparg & 1) ? 1 : 0) + 1; case LOAD_ATTR_MODULE: + return (oparg & 1 ? 1 : 0) + 1; + case _CHECK_ATTR_WITH_HINT: + return 1; + case _LOAD_ATTR_WITH_HINT: return ((oparg & 1) ? 1 : 0) + 1; case LOAD_ATTR_WITH_HINT: - return ((oparg & 1) ? 1 : 0) + 1; + return (oparg & 1 ? 1 : 0) + 1; case _LOAD_ATTR_SLOT: return ((oparg & 1) ? 1 : 0) + 1; case LOAD_ATTR_SLOT: return (oparg & 1 ? 1 : 0) + 1; - case LOAD_ATTR_CLASS: + case _CHECK_ATTR_CLASS: + return 1; + case _LOAD_ATTR_CLASS: return ((oparg & 1) ? 1 : 0) + 1; + case LOAD_ATTR_CLASS: + return (oparg & 1 ? 1 : 0) + 1; case LOAD_ATTR_PROPERTY: return 1; case LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN: @@ -941,8 +980,6 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { return 0; case STORE_ATTR_WITH_HINT: return 0; - case _GUARD_TYPE_VERSION_STORE: - return 1; case _STORE_ATTR_SLOT: return 0; case STORE_ATTR_SLOT: @@ -1067,10 +1104,18 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { return 2; case LOAD_ATTR_METHOD_NO_DICT: return 2; + case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: + return 1; case LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: return 1; + case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT: + return 1; case LOAD_ATTR_NONDESCRIPTOR_NO_DICT: return 1; + case _CHECK_ATTR_METHOD_LAZY_DICT: + return 1; + case _LOAD_ATTR_METHOD_LAZY_DICT: + return 2; case LOAD_ATTR_METHOD_LAZY_DICT: return 2; case INSTRUMENTED_CALL: @@ -1402,10 +1447,16 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = { [_CHECK_MANAGED_OBJECT_HAS_VALUES] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, [_LOAD_ATTR_INSTANCE_VALUE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, [LOAD_ATTR_INSTANCE_VALUE] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_ATTR_MODULE] = { true, INSTR_FMT_IXC0, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_MODULE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, [LOAD_ATTR_MODULE] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_ATTR_WITH_HINT] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_WITH_HINT] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG }, [LOAD_ATTR_WITH_HINT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG }, [_LOAD_ATTR_SLOT] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, [LOAD_ATTR_SLOT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_ATTR_CLASS] = { true, INSTR_FMT_IXC0, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_CLASS] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG }, [LOAD_ATTR_CLASS] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, [LOAD_ATTR_PROPERTY] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, [LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG }, @@ -1413,7 +1464,6 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = { [_STORE_ATTR_INSTANCE_VALUE] = { true, INSTR_FMT_IXC, 0 }, [STORE_ATTR_INSTANCE_VALUE] = { true, INSTR_FMT_IXC000, HAS_DEOPT_FLAG }, [STORE_ATTR_WITH_HINT] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_DEOPT_FLAG }, - [_GUARD_TYPE_VERSION_STORE] = { true, INSTR_FMT_IXC0, HAS_DEOPT_FLAG }, [_STORE_ATTR_SLOT] = { true, INSTR_FMT_IXC, 0 }, [STORE_ATTR_SLOT] = { true, INSTR_FMT_IXC000, HAS_DEOPT_FLAG }, [COMPARE_OP] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ERROR_FLAG }, @@ -1476,8 +1526,12 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = { [LOAD_ATTR_METHOD_WITH_VALUES] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, [_LOAD_ATTR_METHOD_NO_DICT] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG }, [LOAD_ATTR_METHOD_NO_DICT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG }, [LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG }, [LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, + [_CHECK_ATTR_METHOD_LAZY_DICT] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG }, + [_LOAD_ATTR_METHOD_LAZY_DICT] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG }, [LOAD_ATTR_METHOD_LAZY_DICT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG }, [INSTRUMENTED_CALL] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG }, [CALL] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG }, @@ -1637,9 +1691,12 @@ const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPAN [LOAD_SUPER_ATTR_METHOD] = { .nuops = 1, .uops = { { LOAD_SUPER_ATTR_METHOD, 0, 0 } } }, [LOAD_ATTR] = { .nuops = 1, .uops = { { LOAD_ATTR, 0, 0 } } }, [LOAD_ATTR_INSTANCE_VALUE] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _CHECK_MANAGED_OBJECT_HAS_VALUES, 0, 0 }, { _LOAD_ATTR_INSTANCE_VALUE, 1, 3 } } }, + [LOAD_ATTR_MODULE] = { .nuops = 2, .uops = { { _CHECK_ATTR_MODULE, 2, 1 }, { _LOAD_ATTR_MODULE, 1, 3 } } }, + [LOAD_ATTR_WITH_HINT] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _CHECK_ATTR_WITH_HINT, 0, 0 }, { _LOAD_ATTR_WITH_HINT, 1, 3 } } }, [LOAD_ATTR_SLOT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _LOAD_ATTR_SLOT, 1, 3 } } }, - [STORE_ATTR_INSTANCE_VALUE] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION_STORE, 2, 1 }, { _GUARD_DORV_VALUES, 0, 0 }, { _STORE_ATTR_INSTANCE_VALUE, 1, 3 } } }, - [STORE_ATTR_SLOT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION_STORE, 2, 1 }, { _STORE_ATTR_SLOT, 1, 3 } } }, + [LOAD_ATTR_CLASS] = { .nuops = 2, .uops = { { _CHECK_ATTR_CLASS, 2, 1 }, { _LOAD_ATTR_CLASS, 4, 5 } } }, + [STORE_ATTR_INSTANCE_VALUE] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _GUARD_DORV_VALUES, 0, 0 }, { _STORE_ATTR_INSTANCE_VALUE, 1, 3 } } }, + [STORE_ATTR_SLOT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _STORE_ATTR_SLOT, 1, 3 } } }, [COMPARE_OP] = { .nuops = 1, .uops = { { COMPARE_OP, 0, 0 } } }, [COMPARE_OP_FLOAT] = { .nuops = 1, .uops = { { COMPARE_OP_FLOAT, 0, 0 } } }, [COMPARE_OP_INT] = { .nuops = 1, .uops = { { COMPARE_OP_INT, 0, 0 } } }, @@ -1659,6 +1716,9 @@ const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPAN [PUSH_EXC_INFO] = { .nuops = 1, .uops = { { PUSH_EXC_INFO, 0, 0 } } }, [LOAD_ATTR_METHOD_WITH_VALUES] = { .nuops = 4, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, 0, 0 }, { _GUARD_KEYS_VERSION, 2, 3 }, { _LOAD_ATTR_METHOD_WITH_VALUES, 4, 5 } } }, [LOAD_ATTR_METHOD_NO_DICT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _LOAD_ATTR_METHOD_NO_DICT, 4, 5 } } }, + [LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = { .nuops = 4, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, 0, 0 }, { _GUARD_KEYS_VERSION, 2, 3 }, { _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES, 4, 5 } } }, + [LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = { .nuops = 2, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _LOAD_ATTR_NONDESCRIPTOR_NO_DICT, 4, 5 } } }, + [LOAD_ATTR_METHOD_LAZY_DICT] = { .nuops = 3, .uops = { { _GUARD_TYPE_VERSION, 2, 1 }, { _CHECK_ATTR_METHOD_LAZY_DICT, 0, 0 }, { _LOAD_ATTR_METHOD_LAZY_DICT, 4, 5 } } }, [CALL_BOUND_METHOD_EXACT_ARGS] = { .nuops = 9, .uops = { { _CHECK_PEP_523, 0, 0 }, { _CHECK_CALL_BOUND_METHOD_EXACT_ARGS, 0, 0 }, { _INIT_CALL_BOUND_METHOD_EXACT_ARGS, 0, 0 }, { _CHECK_FUNCTION_EXACT_ARGS, 2, 1 }, { _CHECK_STACK_SPACE, 0, 0 }, { _INIT_CALL_PY_EXACT_ARGS, 0, 0 }, { _SET_IP, 7, 3 }, { _SAVE_CURRENT_IP, 0, 0 }, { _PUSH_FRAME, 0, 0 } } }, [CALL_PY_EXACT_ARGS] = { .nuops = 7, .uops = { { _CHECK_PEP_523, 0, 0 }, { _CHECK_FUNCTION_EXACT_ARGS, 2, 1 }, { _CHECK_STACK_SPACE, 0, 0 }, { _INIT_CALL_PY_EXACT_ARGS, 0, 0 }, { _SET_IP, 7, 3 }, { _SAVE_CURRENT_IP, 0, 0 }, { _PUSH_FRAME, 0, 0 } } }, [CALL_TYPE_1] = { .nuops = 1, .uops = { { CALL_TYPE_1, 0, 0 } } }, @@ -1711,10 +1771,15 @@ const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE] = { [_GUARD_TYPE_VERSION] = "_GUARD_TYPE_VERSION", [_CHECK_MANAGED_OBJECT_HAS_VALUES] = "_CHECK_MANAGED_OBJECT_HAS_VALUES", [_LOAD_ATTR_INSTANCE_VALUE] = "_LOAD_ATTR_INSTANCE_VALUE", + [_CHECK_ATTR_MODULE] = "_CHECK_ATTR_MODULE", + [_LOAD_ATTR_MODULE] = "_LOAD_ATTR_MODULE", + [_CHECK_ATTR_WITH_HINT] = "_CHECK_ATTR_WITH_HINT", + [_LOAD_ATTR_WITH_HINT] = "_LOAD_ATTR_WITH_HINT", [_LOAD_ATTR_SLOT] = "_LOAD_ATTR_SLOT", + [_CHECK_ATTR_CLASS] = "_CHECK_ATTR_CLASS", + [_LOAD_ATTR_CLASS] = "_LOAD_ATTR_CLASS", [_GUARD_DORV_VALUES] = "_GUARD_DORV_VALUES", [_STORE_ATTR_INSTANCE_VALUE] = "_STORE_ATTR_INSTANCE_VALUE", - [_GUARD_TYPE_VERSION_STORE] = "_GUARD_TYPE_VERSION_STORE", [_STORE_ATTR_SLOT] = "_STORE_ATTR_SLOT", [_IS_NONE] = "_IS_NONE", [_ITER_CHECK_LIST] = "_ITER_CHECK_LIST", @@ -1733,6 +1798,10 @@ const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE] = { [_GUARD_KEYS_VERSION] = "_GUARD_KEYS_VERSION", [_LOAD_ATTR_METHOD_WITH_VALUES] = "_LOAD_ATTR_METHOD_WITH_VALUES", [_LOAD_ATTR_METHOD_NO_DICT] = "_LOAD_ATTR_METHOD_NO_DICT", + [_LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = "_LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES", + [_LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = "_LOAD_ATTR_NONDESCRIPTOR_NO_DICT", + [_CHECK_ATTR_METHOD_LAZY_DICT] = "_CHECK_ATTR_METHOD_LAZY_DICT", + [_LOAD_ATTR_METHOD_LAZY_DICT] = "_LOAD_ATTR_METHOD_LAZY_DICT", [_CHECK_CALL_BOUND_METHOD_EXACT_ARGS] = "_CHECK_CALL_BOUND_METHOD_EXACT_ARGS", [_INIT_CALL_BOUND_METHOD_EXACT_ARGS] = "_INIT_CALL_BOUND_METHOD_EXACT_ARGS", [_CHECK_PEP_523] = "_CHECK_PEP_523", diff --git a/Include/internal/pycore_pystate.h b/Include/internal/pycore_pystate.h index 6a36dba3708e38..a60d949bff1eba 100644 --- a/Include/internal/pycore_pystate.h +++ b/Include/internal/pycore_pystate.h @@ -48,6 +48,7 @@ _Py_IsMainInterpreterFinalizing(PyInterpreterState *interp) PyAPI_FUNC(int) _PyInterpreterState_SetRunningMain(PyInterpreterState *); PyAPI_FUNC(void) _PyInterpreterState_SetNotRunningMain(PyInterpreterState *); PyAPI_FUNC(int) _PyInterpreterState_IsRunningMain(PyInterpreterState *); +PyAPI_FUNC(int) _PyInterpreterState_FailIfRunningMain(PyInterpreterState *); static inline const PyConfig * @@ -92,7 +93,7 @@ PyAPI_FUNC(PyThreadState *) _PyThreadState_GetCurrent(void); The caller must hold the GIL. - See also PyThreadState_Get() and _PyThreadState_UncheckedGet(). */ + See also PyThreadState_Get() and PyThreadState_GetUnchecked(). */ static inline PyThreadState* _PyThreadState_GET(void) { @@ -139,7 +140,9 @@ static inline PyInterpreterState* _PyInterpreterState_GET(void) { // PyThreadState functions -extern PyThreadState * _PyThreadState_New(PyInterpreterState *interp); +extern PyThreadState * _PyThreadState_New( + PyInterpreterState *interp, + int whence); extern void _PyThreadState_Bind(PyThreadState *tstate); extern void _PyThreadState_DeleteExcept(PyThreadState *tstate); diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index 2deba02a89f33c..574a3c1a9db66c 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -185,6 +185,7 @@ extern PyTypeObject _PyExc_MemoryError; #define _PyThreadState_INIT \ { \ + ._whence = _PyThreadState_WHENCE_NOTSET, \ .py_recursion_limit = Py_DEFAULT_RECURSION_LIMIT, \ .context_ver = 1, \ } diff --git a/Include/pystate.h b/Include/pystate.h index e6b4de979c87b8..727b8fbfffe0e6 100644 --- a/Include/pystate.h +++ b/Include/pystate.h @@ -56,7 +56,7 @@ PyAPI_FUNC(void) PyThreadState_Delete(PyThreadState *); The caller must hold the GIL. - See also _PyThreadState_UncheckedGet() and _PyThreadState_GET(). */ + See also PyThreadState_GetUnchecked() and _PyThreadState_GET(). */ PyAPI_FUNC(PyThreadState *) PyThreadState_Get(void); // Alias to PyThreadState_Get() diff --git a/Lib/argparse.py b/Lib/argparse.py index dfc98695f64e0a..a32884db80d1ea 100644 --- a/Lib/argparse.py +++ b/Lib/argparse.py @@ -89,8 +89,6 @@ import re as _re import sys as _sys -import warnings - from gettext import gettext as _, ngettext SUPPRESS = '==SUPPRESS==' @@ -910,6 +908,7 @@ def __init__(self, # parser.add_argument('-f', action=BooleanOptionalAction, type=int) for field_name in ('type', 'choices', 'metavar'): if locals()[field_name] is not _deprecated_default: + import warnings warnings._deprecated( field_name, "{name!r} is deprecated as of Python 3.12 and will be " @@ -1700,6 +1699,7 @@ def _remove_action(self, action): self._group_actions.remove(action) def add_argument_group(self, *args, **kwargs): + import warnings warnings.warn( "Nesting argument groups is deprecated.", category=DeprecationWarning, @@ -1728,6 +1728,7 @@ def _remove_action(self, action): self._group_actions.remove(action) def add_mutually_exclusive_group(self, *args, **kwargs): + import warnings warnings.warn( "Nesting mutually exclusive groups is deprecated.", category=DeprecationWarning, diff --git a/Lib/calendar.py b/Lib/calendar.py index 2a4deb70a0111f..03469d8ac96bcd 100644 --- a/Lib/calendar.py +++ b/Lib/calendar.py @@ -10,7 +10,6 @@ from enum import IntEnum, global_enum import locale as _locale from itertools import repeat -import warnings __all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday", "firstweekday", "isleap", "leapdays", "weekday", "monthrange", @@ -44,6 +43,7 @@ def __str__(self): def __getattr__(name): if name in ('January', 'February'): + import warnings warnings.warn(f"The '{name}' attribute is deprecated, use '{name.upper()}' instead", DeprecationWarning, stacklevel=2) if name == 'January': diff --git a/Lib/dataclasses.py b/Lib/dataclasses.py index 84f8d68ce092a4..31dc6f8abce91a 100644 --- a/Lib/dataclasses.py +++ b/Lib/dataclasses.py @@ -1567,15 +1567,15 @@ def _replace(obj, /, **changes): if not f.init: # Error if this field is specified in changes. if f.name in changes: - raise ValueError(f'field {f.name} is declared with ' - 'init=False, it cannot be specified with ' - 'replace()') + raise TypeError(f'field {f.name} is declared with ' + f'init=False, it cannot be specified with ' + f'replace()') continue if f.name not in changes: if f._field_type is _FIELD_INITVAR and f.default is MISSING: - raise ValueError(f"InitVar {f.name!r} " - 'must be specified with replace()') + raise TypeError(f"InitVar {f.name!r} " + f'must be specified with replace()') changes[f.name] = getattr(obj, f.name) # Create the new object, which calls __init__() and diff --git a/Lib/dis.py b/Lib/dis.py index 7e4792e8a8dc62..633c01b6fce56a 100644 --- a/Lib/dis.py +++ b/Lib/dis.py @@ -901,12 +901,14 @@ def _test(): import argparse parser = argparse.ArgumentParser() + parser.add_argument('-C', '--show-caches', action='store_true', + help='show inline caches') parser.add_argument('infile', type=argparse.FileType('rb'), nargs='?', default='-') args = parser.parse_args() with args.infile as infile: source = infile.read() code = compile(source, args.infile.name, "exec") - dis(code) + dis(code, show_caches=args.show_caches) if __name__ == "__main__": _test() diff --git a/Lib/getpass.py b/Lib/getpass.py index 6970d8adfbab36..8b42c0a536b4c4 100644 --- a/Lib/getpass.py +++ b/Lib/getpass.py @@ -18,7 +18,6 @@ import io import os import sys -import warnings __all__ = ["getpass","getuser","GetPassWarning"] @@ -118,6 +117,7 @@ def win_getpass(prompt='Password: ', stream=None): def fallback_getpass(prompt='Password: ', stream=None): + import warnings warnings.warn("Can not control echo on the terminal.", GetPassWarning, stacklevel=2) if not stream: diff --git a/Lib/random.py b/Lib/random.py index 1d789b107904fb..1cfc2ba2f025b5 100644 --- a/Lib/random.py +++ b/Lib/random.py @@ -65,7 +65,7 @@ try: # hashlib is pretty heavy to load, try lean internal module first - from _sha512 import sha512 as _sha512 + from _sha2 import sha512 as _sha512 except ImportError: # fallback to official implementation from hashlib import sha512 as _sha512 diff --git a/Lib/shutil.py b/Lib/shutil.py index a278b74fab2ddb..0fed0117a63234 100644 --- a/Lib/shutil.py +++ b/Lib/shutil.py @@ -10,7 +10,6 @@ import fnmatch import collections import errno -import warnings try: import zlib @@ -723,6 +722,7 @@ def rmtree(path, ignore_errors=False, onerror=None, *, onexc=None, dir_fd=None): """ if onerror is not None: + import warnings warnings.warn("onerror argument is deprecated, use onexc instead", DeprecationWarning, stacklevel=2) diff --git a/Lib/sqlite3/__main__.py b/Lib/sqlite3/__main__.py index 3b59763375c147..b93b84384a0925 100644 --- a/Lib/sqlite3/__main__.py +++ b/Lib/sqlite3/__main__.py @@ -116,6 +116,10 @@ def main(*args): else: # No SQL provided; start the REPL. console = SqliteInteractiveConsole(con) + try: + import readline + except ImportError: + pass console.interact(banner, exitmsg="") finally: con.close() diff --git a/Lib/tarfile.py b/Lib/tarfile.py index 726f9f50ba2e72..ec32f9ba49b03f 100755 --- a/Lib/tarfile.py +++ b/Lib/tarfile.py @@ -46,7 +46,6 @@ import struct import copy import re -import warnings try: import pwd @@ -2219,6 +2218,7 @@ def _get_filter_function(self, filter): if filter is None: filter = self.extraction_filter if filter is None: + import warnings warnings.warn( 'Python 3.14 will, by default, filter extracted tar ' + 'archives and reject files or modify their metadata. ' diff --git a/Lib/test/.ruff.toml b/Lib/test/.ruff.toml index b1e6424e785408..f4e68eba14068d 100644 --- a/Lib/test/.ruff.toml +++ b/Lib/test/.ruff.toml @@ -8,9 +8,6 @@ extend-exclude = [ # Failed to lint "encoded_modules/module_iso_8859_1.py", "encoded_modules/module_koi8_r.py", - # Failed to parse - "support/socket_helper.py", - "test_fstring.py", # TODO Fix: F811 Redefinition of unused name "test__opcode.py", "test_buffer.py", @@ -26,7 +23,6 @@ extend-exclude = [ "test_keywordonlyarg.py", "test_pkg.py", "test_subclassinit.py", - "test_unittest/testmock/testpatch.py", "test_yield_from.py", "time_hashlib.py", # Pending https://github.com/python/cpython/pull/109139 diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py index 8562a48446b4a7..dd4cd335bef7e3 100644 --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -3,6 +3,7 @@ import shlex import sys from test.support import os_helper +from .utils import ALL_RESOURCES, RESOURCE_NAMES USAGE = """\ @@ -27,8 +28,10 @@ Additional option details: -r randomizes test execution order. You can use --randseed=int to provide an -int seed value for the randomizer; this is useful for reproducing troublesome -test orders. +int seed value for the randomizer. The randseed value will be used +to set seeds for all random usages in tests +(including randomizing the tests order if -r is set). +By default we always set random seed, but do not randomize test order. -s On the first invocation of regrtest using -s, the first test file found or the first test file given on the command line is run, and the name of @@ -130,19 +133,6 @@ """ -ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network', - 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime') - -# Other resources excluded from --use=all: -# -# - extralagefile (ex: test_zipfile64): really too slow to be enabled -# "by default" -# - tzdata: while needed to validate fully test_datetime, it makes -# test_datetime too slow (15-20 min on some buildbots) and so is disabled by -# default (see bpo-30822). -RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata') - - class Namespace(argparse.Namespace): def __init__(self, **kwargs) -> None: self.ci = False @@ -229,6 +219,9 @@ def _create_parser(): more_details) group.add_argument('-p', '--python', metavar='PYTHON', help='Command to run Python test subprocesses with.') + group.add_argument('--randseed', metavar='SEED', + dest='random_seed', type=int, + help='pass a global random seed') group = parser.add_argument_group('Verbosity') group.add_argument('-v', '--verbose', action='count', @@ -249,10 +242,6 @@ def _create_parser(): group = parser.add_argument_group('Selecting tests') group.add_argument('-r', '--randomize', action='store_true', help='randomize test execution order.' + more_details) - group.add_argument('--randseed', metavar='SEED', - dest='random_seed', type=int, - help='pass a random seed to reproduce a previous ' - 'random run') group.add_argument('-f', '--fromfile', metavar='FILE', help='read names of tests to run from a file.' + more_details) diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index af5fb0f464f5b5..60179ec7708c1c 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -129,7 +129,11 @@ def __init__(self, ns: Namespace, _add_python_opts: bool = False): # Randomize self.randomize: bool = ns.randomize - self.random_seed: int | None = ns.random_seed + self.random_seed: int | None = ( + ns.random_seed + if ns.random_seed is not None + else random.getrandbits(32) + ) if 'SOURCE_DATE_EPOCH' in os.environ: self.randomize = False self.random_seed = None @@ -214,10 +218,8 @@ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList print(f"Cannot find starting test: {self.starting_test}") sys.exit(1) + random.seed(self.random_seed) if self.randomize: - if self.random_seed is None: - self.random_seed = random.randrange(100_000_000) - random.seed(self.random_seed) random.shuffle(selected) return (tuple(selected), tests) @@ -439,8 +441,7 @@ def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int: or tests or self.cmdline_args)): display_header(self.use_resources, self.python_cmd) - if self.randomize: - print("Using random seed", self.random_seed) + print("Using random seed", self.random_seed) runtests = self.create_run_tests(selected) self.first_runtests = runtests diff --git a/Lib/test/libregrtest/pgo.py b/Lib/test/libregrtest/pgo.py index cabbba73d5eff5..e3a6927be5db1d 100644 --- a/Lib/test/libregrtest/pgo.py +++ b/Lib/test/libregrtest/pgo.py @@ -42,10 +42,10 @@ 'test_set', 'test_sqlite3', 'test_statistics', + 'test_str', 'test_struct', 'test_tabnanny', 'test_time', - 'test_unicode', 'test_xml_etree', 'test_xml_etree_c', ] diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py index f0d8d7ebaa2fdb..cb410da5acb4c3 100644 --- a/Lib/test/libregrtest/setup.py +++ b/Lib/test/libregrtest/setup.py @@ -126,5 +126,4 @@ def setup_tests(runtests: RunTests): if runtests.gc_threshold is not None: gc.set_threshold(runtests.gc_threshold) - if runtests.randomize: - random.seed(runtests.random_seed) + random.seed(runtests.random_seed) diff --git a/Lib/test/libregrtest/utils.py b/Lib/test/libregrtest/utils.py index 86fb820a23f535..ea2086cd71b173 100644 --- a/Lib/test/libregrtest/utils.py +++ b/Lib/test/libregrtest/utils.py @@ -33,6 +33,19 @@ EXIT_TIMEOUT = 120.0 +ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network', + 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime') + +# Other resources excluded from --use=all: +# +# - extralagefile (ex: test_zipfile64): really too slow to be enabled +# "by default" +# - tzdata: while needed to validate fully test_datetime, it makes +# test_datetime too slow (15-20 min on some buildbots) and so is disabled by +# default (see bpo-30822). +RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata') + + # Types for types hints StrPath = str TestName = str @@ -535,6 +548,30 @@ def is_cross_compiled(): return ('_PYTHON_HOST_PLATFORM' in os.environ) +def format_resources(use_resources: tuple[str, ...]): + use_resources = set(use_resources) + all_resources = set(ALL_RESOURCES) + + # Express resources relative to "all" + relative_all = ['all'] + for name in sorted(all_resources - use_resources): + relative_all.append(f'-{name}') + for name in sorted(use_resources - all_resources): + relative_all.append(f'{name}') + all_text = ','.join(relative_all) + all_text = f"resources: {all_text}" + + # List of enabled resources + text = ','.join(sorted(use_resources)) + text = f"resources ({len(use_resources)}): {text}" + + # Pick the shortest string (prefer relative to all if lengths are equal) + if len(all_text) <= len(text): + return all_text + else: + return text + + def display_header(use_resources: tuple[str, ...], python_cmd: tuple[str, ...] | None): # Print basic platform information @@ -550,14 +587,15 @@ def display_header(use_resources: tuple[str, ...], if process_cpu_count and process_cpu_count != cpu_count: cpu_count = f"{process_cpu_count} (process) / {cpu_count} (system)" print("== CPU count:", cpu_count) - print("== encodings: locale=%s, FS=%s" + print("== encodings: locale=%s FS=%s" % (locale.getencoding(), sys.getfilesystemencoding())) if use_resources: - print(f"== resources ({len(use_resources)}): " - f"{', '.join(sorted(use_resources))}") + text = format_resources(use_resources) + print(f"== {text}") else: - print("== resources: (all disabled, use -u option)") + print("== resources: all test resources are disabled, " + "use -u option to unskip tests") cross_compile = is_cross_compiled() if cross_compile: diff --git a/Lib/test/support/interpreters.py b/Lib/test/support/interpreters.py index eeff3abe0324e5..3b501614bc4b4d 100644 --- a/Lib/test/support/interpreters.py +++ b/Lib/test/support/interpreters.py @@ -7,7 +7,8 @@ # aliases: from _xxsubinterpreters import is_shareable from _xxinterpchannels import ( - ChannelError, ChannelNotFoundError, ChannelEmptyError, + ChannelError, ChannelNotFoundError, ChannelClosedError, + ChannelEmptyError, ChannelNotEmptyError, ) @@ -117,10 +118,16 @@ def list_all_channels(): class _ChannelEnd: """The base class for RecvChannel and SendChannel.""" - def __init__(self, id): - if not isinstance(id, (int, _channels.ChannelID)): - raise TypeError(f'id must be an int, got {id!r}') - self._id = id + _end = None + + def __init__(self, cid): + if self._end == 'send': + cid = _channels._channel_id(cid, send=True, force=True) + elif self._end == 'recv': + cid = _channels._channel_id(cid, recv=True, force=True) + else: + raise NotImplementedError(self._end) + self._id = cid def __repr__(self): return f'{type(self).__name__}(id={int(self._id)})' @@ -147,6 +154,8 @@ def id(self): class RecvChannel(_ChannelEnd): """The receiving end of a cross-interpreter channel.""" + _end = 'recv' + def recv(self, *, _sentinel=object(), _delay=10 / 1000): # 10 milliseconds """Return the next object from the channel. @@ -171,10 +180,15 @@ def recv_nowait(self, default=_NOT_SET): else: return _channels.recv(self._id, default) + def close(self): + _channels.close(self._id, recv=True) + class SendChannel(_ChannelEnd): """The sending end of a cross-interpreter channel.""" + _end = 'send' + def send(self, obj): """Send the object (i.e. its data) to the channel's receiving end. @@ -196,3 +210,10 @@ def send_nowait(self, obj): # None. This should be fixed when channel_send_wait() is added. # See bpo-32604 and gh-19829. return _channels.send(self._id, obj) + + def close(self): + _channels.close(self._id, send=True) + + +# XXX This is causing leaks (gh-110318): +#_channels._register_end_types(SendChannel, RecvChannel) diff --git a/Lib/test/test_asyncio/test_unix_events.py b/Lib/test/test_asyncio/test_unix_events.py index 7322be597ae2d2..d2c8cba6acfa31 100644 --- a/Lib/test/test_asyncio/test_unix_events.py +++ b/Lib/test/test_asyncio/test_unix_events.py @@ -4,6 +4,7 @@ import errno import io import multiprocessing +from multiprocessing.util import _cleanup_tests as multiprocessing_cleanup_tests import os import pathlib import signal @@ -15,6 +16,7 @@ import unittest from unittest import mock import warnings + from test import support from test.support import os_helper from test.support import socket_helper @@ -1903,6 +1905,8 @@ async def test_fork_not_share_event_loop(self): @hashlib_helper.requires_hashdigest('md5') def test_fork_signal_handling(self): + self.addCleanup(multiprocessing_cleanup_tests) + # Sending signal to the forked process should not affect the parent # process ctx = multiprocessing.get_context('fork') @@ -1947,6 +1951,8 @@ async def func(): @hashlib_helper.requires_hashdigest('md5') def test_fork_asyncio_run(self): + self.addCleanup(multiprocessing_cleanup_tests) + ctx = multiprocessing.get_context('fork') manager = ctx.Manager() self.addCleanup(manager.shutdown) @@ -1964,6 +1970,8 @@ async def child_main(): @hashlib_helper.requires_hashdigest('md5') def test_fork_asyncio_subprocess(self): + self.addCleanup(multiprocessing_cleanup_tests) + ctx = multiprocessing.get_context('fork') manager = ctx.Manager() self.addCleanup(manager.shutdown) diff --git a/Lib/test/test_concurrent_futures/test_deadlock.py b/Lib/test/test_concurrent_futures/test_deadlock.py index af702542081ad9..3c30c4558c0b3e 100644 --- a/Lib/test/test_concurrent_futures/test_deadlock.py +++ b/Lib/test/test_concurrent_futures/test_deadlock.py @@ -286,11 +286,12 @@ def wakeup(self): super().wakeup() def clear(self): + super().clear() try: while True: self._dummy_queue.get_nowait() except queue.Empty: - super().clear() + pass with (unittest.mock.patch.object(futures.process._ExecutorManagerThread, 'run', mock_run), diff --git a/Lib/test/test_copy.py b/Lib/test/test_copy.py index 55d6a4fb445221..c71fa8fba62155 100644 --- a/Lib/test/test_copy.py +++ b/Lib/test/test_copy.py @@ -956,14 +956,24 @@ def __replace__(self, **changes): def test_namedtuple(self): from collections import namedtuple - Point = namedtuple('Point', 'x y', defaults=(0,)) - p = Point(11, 22) - self.assertEqual(copy.replace(p), (11, 22)) - self.assertEqual(copy.replace(p, x=1), (1, 22)) - self.assertEqual(copy.replace(p, y=2), (11, 2)) - self.assertEqual(copy.replace(p, x=1, y=2), (1, 2)) - with self.assertRaisesRegex(ValueError, 'unexpected field name'): - copy.replace(p, x=1, error=2) + from typing import NamedTuple + PointFromCall = namedtuple('Point', 'x y', defaults=(0,)) + class PointFromInheritance(PointFromCall): + pass + class PointFromClass(NamedTuple): + x: int + y: int = 0 + for Point in (PointFromCall, PointFromInheritance, PointFromClass): + with self.subTest(Point=Point): + p = Point(11, 22) + self.assertIsInstance(p, Point) + self.assertEqual(copy.replace(p), (11, 22)) + self.assertIsInstance(copy.replace(p), Point) + self.assertEqual(copy.replace(p, x=1), (1, 22)) + self.assertEqual(copy.replace(p, y=2), (11, 2)) + self.assertEqual(copy.replace(p, x=1, y=2), (1, 2)) + with self.assertRaisesRegex(ValueError, 'unexpected field name'): + copy.replace(p, x=1, error=2) def test_dataclass(self): from dataclasses import dataclass diff --git a/Lib/test/test_dataclasses/__init__.py b/Lib/test/test_dataclasses/__init__.py index 7c07dfc77de208..f629d7bb53959b 100644 --- a/Lib/test/test_dataclasses/__init__.py +++ b/Lib/test/test_dataclasses/__init__.py @@ -3965,9 +3965,9 @@ class C: self.assertEqual((c1.x, c1.y, c1.z, c1.t), (3, 2, 10, 100)) - with self.assertRaisesRegex(ValueError, 'init=False'): + with self.assertRaisesRegex(TypeError, 'init=False'): replace(c, x=3, z=20, t=50) - with self.assertRaisesRegex(ValueError, 'init=False'): + with self.assertRaisesRegex(TypeError, 'init=False'): replace(c, z=20) replace(c, x=3, z=20, t=50) @@ -4020,10 +4020,10 @@ class C: self.assertEqual((c1.x, c1.y), (5, 10)) # Trying to replace y is an error. - with self.assertRaisesRegex(ValueError, 'init=False'): + with self.assertRaisesRegex(TypeError, 'init=False'): replace(c, x=2, y=30) - with self.assertRaisesRegex(ValueError, 'init=False'): + with self.assertRaisesRegex(TypeError, 'init=False'): replace(c, y=30) def test_classvar(self): @@ -4056,8 +4056,8 @@ def __post_init__(self, y): c = C(1, 10) self.assertEqual(c.x, 10) - with self.assertRaisesRegex(ValueError, r"InitVar 'y' must be " - "specified with replace()"): + with self.assertRaisesRegex(TypeError, r"InitVar 'y' must be " + r"specified with replace\(\)"): replace(c, x=3) c = replace(c, x=3, y=5) self.assertEqual(c.x, 15) diff --git a/Lib/test/test_gdb/test_cfunction_full.py b/Lib/test/test_gdb/test_cfunction_full.py index 3e90cb1d392f49..572cbdab5d77c0 100644 --- a/Lib/test/test_gdb/test_cfunction_full.py +++ b/Lib/test/test_gdb/test_cfunction_full.py @@ -18,7 +18,7 @@ def check(self, func_name, cmd): gdb_output = self.get_stack_trace( cmd, breakpoint=func_name, - cmds_after_breakpoint=['py-bt-full'], + cmds_after_breakpoint=['bt', 'py-bt-full'], # bpo-45207: Ignore 'Function "meth_varargs" not # defined.' message in stderr. ignore_stderr=True, diff --git a/Lib/test/test_generated_cases.py b/Lib/test/test_generated_cases.py index b5eaf824aee706..5971d2e436e4aa 100644 --- a/Lib/test/test_generated_cases.py +++ b/Lib/test/test_generated_cases.py @@ -239,16 +239,22 @@ def test_overlap(self): def test_predictions_and_eval_breaker(self): input = """ - inst(OP1, (--)) { + inst(OP1, (arg -- rest)) { } inst(OP3, (arg -- res)) { - DEOPT_IF(xxx, OP1); + DEOPT_IF(xxx); CHECK_EVAL_BREAKER(); } + family(OP1, INLINE_CACHE_ENTRIES_OP1) = { OP3 }; """ output = """ TARGET(OP1) { PREDICTED(OP1); + static_assert(INLINE_CACHE_ENTRIES_OP1 == 0, "incorrect cache size"); + PyObject *arg; + PyObject *rest; + arg = stack_pointer[-1]; + stack_pointer[-1] = rest; DISPATCH(); } @@ -371,6 +377,7 @@ def test_macro_instruction(self): } TARGET(OP) { + PREDICTED(OP); static_assert(INLINE_CACHE_ENTRIES_OP == 5, "incorrect cache size"); PyObject *right; PyObject *left; diff --git a/Lib/test/test_interpreters.py b/Lib/test/test_interpreters.py index e62859a9c2b08e..f2ef172d26dfc8 100644 --- a/Lib/test/test_interpreters.py +++ b/Lib/test/test_interpreters.py @@ -68,6 +68,17 @@ def run(): class TestBase(unittest.TestCase): + def pipe(self): + def ensure_closed(fd): + try: + os.close(fd) + except OSError: + pass + r, w = os.pipe() + self.addCleanup(lambda: ensure_closed(r)) + self.addCleanup(lambda: ensure_closed(w)) + return r, w + def tearDown(self): clean_up_interpreters() @@ -262,7 +273,7 @@ def test_subinterpreter(self): self.assertFalse(interp.is_running()) def test_finished(self): - r, w = os.pipe() + r, w = self.pipe() interp = interpreters.create() interp.run(f"""if True: import os @@ -299,8 +310,8 @@ def test_bad_id(self): interp.is_running() def test_with_only_background_threads(self): - r_interp, w_interp = os.pipe() - r_thread, w_thread = os.pipe() + r_interp, w_interp = self.pipe() + r_thread, w_thread = self.pipe() DONE = b'D' FINISHED = b'F' @@ -425,8 +436,8 @@ def test_still_running(self): self.assertTrue(interp.is_running()) def test_subthreads_still_running(self): - r_interp, w_interp = os.pipe() - r_thread, w_thread = os.pipe() + r_interp, w_interp = self.pipe() + r_thread, w_thread = self.pipe() FINISHED = b'F' @@ -532,8 +543,8 @@ def test_bytes_for_script(self): interp.run(b'print("spam")') def test_with_background_threads_still_running(self): - r_interp, w_interp = os.pipe() - r_thread, w_thread = os.pipe() + r_interp, w_interp = self.pipe() + r_thread, w_thread = self.pipe() RAN = b'R' DONE = b'D' @@ -822,6 +833,23 @@ def test_list_all(self): after = set(interpreters.list_all_channels()) self.assertEqual(after, created) + @unittest.expectedFailure # See gh-110318: + def test_shareable(self): + rch, sch = interpreters.create_channel() + + self.assertTrue( + interpreters.is_shareable(rch)) + self.assertTrue( + interpreters.is_shareable(sch)) + + sch.send_nowait(rch) + sch.send_nowait(sch) + rch2 = rch.recv() + sch2 = rch.recv() + + self.assertEqual(rch2, rch) + self.assertEqual(sch2, sch) + class TestRecvChannelAttrs(TestBase): diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index 38071341006092..f24d23e5c2f731 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -392,7 +392,7 @@ def check_ci_mode(self, args, use_resources, rerun=True): self.assertEqual(regrtest.num_workers, -1) self.assertEqual(regrtest.want_rerun, rerun) self.assertTrue(regrtest.randomize) - self.assertIsNone(regrtest.random_seed) + self.assertIsInstance(regrtest.random_seed, int) self.assertTrue(regrtest.fail_env_changed) self.assertTrue(regrtest.fail_rerun) self.assertTrue(regrtest.print_slowest) @@ -663,7 +663,7 @@ def list_regex(line_format, tests): def parse_random_seed(self, output): match = self.regex_search(r'Using random seed ([0-9]+)', output) randseed = int(match.group(1)) - self.assertTrue(0 <= randseed <= 100_000_000, randseed) + self.assertTrue(0 <= randseed, randseed) return randseed def run_command(self, args, input=None, exitcode=0, **kw): @@ -950,6 +950,10 @@ def test_random(self): test_random2 = int(match.group(1)) self.assertEqual(test_random2, test_random) + # check that random.seed is used by default + output = self.run_tests(test, exitcode=EXITCODE_NO_TESTS_RAN) + self.assertIsInstance(self.parse_random_seed(output), int) + def test_fromfile(self): # test --fromfile tests = [self.create_test() for index in range(5)] @@ -2076,6 +2080,26 @@ def test_get_signal_name(self): ): self.assertEqual(utils.get_signal_name(exitcode), expected, exitcode) + def test_format_resources(self): + format_resources = utils.format_resources + ALL_RESOURCES = utils.ALL_RESOURCES + self.assertEqual( + format_resources(("network",)), + 'resources (1): network') + self.assertEqual( + format_resources(("audio", "decimal", "network")), + 'resources (3): audio,decimal,network') + self.assertEqual( + format_resources(ALL_RESOURCES), + 'resources: all') + self.assertEqual( + format_resources(tuple(name for name in ALL_RESOURCES + if name != "cpu")), + 'resources: all,-cpu') + self.assertEqual( + format_resources((*ALL_RESOURCES, "tzdata")), + 'resources: all,tzdata') + if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_structseq.py b/Lib/test/test_structseq.py index a9fe193028ebe4..6aec63e2603412 100644 --- a/Lib/test/test_structseq.py +++ b/Lib/test/test_structseq.py @@ -1,4 +1,7 @@ +import copy import os +import pickle +import re import time import unittest @@ -89,10 +92,69 @@ def test_constructor(self): self.assertRaises(TypeError, t, "123") self.assertRaises(TypeError, t, "123", dict={}) self.assertRaises(TypeError, t, "123456789", dict=None) + self.assertRaises(TypeError, t, seq="123456789", dict={}) + + self.assertEqual(t("123456789"), tuple("123456789")) + self.assertEqual(t("123456789", {}), tuple("123456789")) + self.assertEqual(t("123456789", dict={}), tuple("123456789")) + self.assertEqual(t(sequence="123456789", dict={}), tuple("123456789")) + + self.assertEqual(t("1234567890"), tuple("123456789")) + self.assertEqual(t("1234567890").tm_zone, "0") + self.assertEqual(t("123456789", {"tm_zone": "some zone"}), tuple("123456789")) + self.assertEqual(t("123456789", {"tm_zone": "some zone"}).tm_zone, "some zone") s = "123456789" self.assertEqual("".join(t(s)), s) + def test_constructor_with_duplicate_fields(self): + t = time.struct_time + + error_message = re.escape("got duplicate or unexpected field name(s)") + with self.assertRaisesRegex(TypeError, error_message): + t("1234567890", dict={"tm_zone": "some zone"}) + with self.assertRaisesRegex(TypeError, error_message): + t("1234567890", dict={"tm_zone": "some zone", "tm_mon": 1}) + with self.assertRaisesRegex(TypeError, error_message): + t("1234567890", dict={"error": 0, "tm_zone": "some zone"}) + with self.assertRaisesRegex(TypeError, error_message): + t("1234567890", dict={"error": 0, "tm_zone": "some zone", "tm_mon": 1}) + + def test_constructor_with_duplicate_unnamed_fields(self): + assert os.stat_result.n_unnamed_fields > 0 + n_visible_fields = os.stat_result.n_sequence_fields + + r = os.stat_result(range(n_visible_fields), {'st_atime': -1.0}) + self.assertEqual(r.st_atime, -1.0) + self.assertEqual(r, tuple(range(n_visible_fields))) + + r = os.stat_result((*range(n_visible_fields), -1.0)) + self.assertEqual(r.st_atime, -1.0) + self.assertEqual(r, tuple(range(n_visible_fields))) + + with self.assertRaisesRegex(TypeError, + re.escape("got duplicate or unexpected field name(s)")): + os.stat_result((*range(n_visible_fields), -1.0), {'st_atime': -1.0}) + + def test_constructor_with_unknown_fields(self): + t = time.struct_time + + error_message = re.escape("got duplicate or unexpected field name(s)") + with self.assertRaisesRegex(TypeError, error_message): + t("123456789", dict={"tm_year": 0}) + with self.assertRaisesRegex(TypeError, error_message): + t("123456789", dict={"tm_year": 0, "tm_mon": 1}) + with self.assertRaisesRegex(TypeError, error_message): + t("123456789", dict={"tm_zone": "some zone", "tm_mon": 1}) + with self.assertRaisesRegex(TypeError, error_message): + t("123456789", dict={"tm_zone": "some zone", "error": 0}) + with self.assertRaisesRegex(TypeError, error_message): + t("123456789", dict={"error": 0, "tm_zone": "some zone", "tm_mon": 1}) + with self.assertRaisesRegex(TypeError, error_message): + t("123456789", dict={"error": 0}) + with self.assertRaisesRegex(TypeError, error_message): + t("123456789", dict={"tm_zone": "some zone", "error": 0}) + def test_eviltuple(self): class Exc(Exception): pass @@ -106,9 +168,78 @@ def __len__(self): self.assertRaises(Exc, time.struct_time, C()) - def test_reduce(self): + def test_pickling(self): t = time.gmtime() - x = t.__reduce__() + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + p = pickle.dumps(t, proto) + t2 = pickle.loads(p) + self.assertEqual(t2.__class__, t.__class__) + self.assertEqual(t2, t) + self.assertEqual(t2.tm_year, t.tm_year) + self.assertEqual(t2.tm_zone, t.tm_zone) + + def test_pickling_with_unnamed_fields(self): + assert os.stat_result.n_unnamed_fields > 0 + + r = os.stat_result(range(os.stat_result.n_sequence_fields), + {'st_atime': 1.0, 'st_atime_ns': 2.0}) + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + p = pickle.dumps(r, proto) + r2 = pickle.loads(p) + self.assertEqual(r2.__class__, r.__class__) + self.assertEqual(r2, r) + self.assertEqual(r2.st_mode, r.st_mode) + self.assertEqual(r2.st_atime, r.st_atime) + self.assertEqual(r2.st_atime_ns, r.st_atime_ns) + + def test_copying(self): + n_fields = time.struct_time.n_fields + t = time.struct_time([[i] for i in range(n_fields)]) + + t2 = copy.copy(t) + self.assertEqual(t2.__class__, t.__class__) + self.assertEqual(t2, t) + self.assertEqual(t2.tm_year, t.tm_year) + self.assertEqual(t2.tm_zone, t.tm_zone) + self.assertIs(t2[0], t[0]) + self.assertIs(t2.tm_year, t.tm_year) + + t3 = copy.deepcopy(t) + self.assertEqual(t3.__class__, t.__class__) + self.assertEqual(t3, t) + self.assertEqual(t3.tm_year, t.tm_year) + self.assertEqual(t3.tm_zone, t.tm_zone) + self.assertIsNot(t3[0], t[0]) + self.assertIsNot(t3.tm_year, t.tm_year) + + def test_copying_with_unnamed_fields(self): + assert os.stat_result.n_unnamed_fields > 0 + + n_sequence_fields = os.stat_result.n_sequence_fields + r = os.stat_result([[i] for i in range(n_sequence_fields)], + {'st_atime': [1.0], 'st_atime_ns': [2.0]}) + + r2 = copy.copy(r) + self.assertEqual(r2.__class__, r.__class__) + self.assertEqual(r2, r) + self.assertEqual(r2.st_mode, r.st_mode) + self.assertEqual(r2.st_atime, r.st_atime) + self.assertEqual(r2.st_atime_ns, r.st_atime_ns) + self.assertIs(r2[0], r[0]) + self.assertIs(r2.st_mode, r.st_mode) + self.assertIs(r2.st_atime, r.st_atime) + self.assertIs(r2.st_atime_ns, r.st_atime_ns) + + r3 = copy.deepcopy(r) + self.assertEqual(r3.__class__, r.__class__) + self.assertEqual(r3, r) + self.assertEqual(r3.st_mode, r.st_mode) + self.assertEqual(r3.st_atime, r.st_atime) + self.assertEqual(r3.st_atime_ns, r.st_atime_ns) + self.assertIsNot(r3[0], r[0]) + self.assertIsNot(r3.st_mode, r.st_mode) + self.assertIsNot(r3.st_atime, r.st_atime) + self.assertIsNot(r3.st_atime_ns, r.st_atime_ns) def test_extended_getslice(self): # Test extended slicing by comparing with list slicing. @@ -133,6 +264,84 @@ def test_match_args_with_unnamed_fields(self): self.assertEqual(os.stat_result.n_unnamed_fields, 3) self.assertEqual(os.stat_result.__match_args__, expected_args) + def test_copy_replace_all_fields_visible(self): + assert os.times_result.n_unnamed_fields == 0 + assert os.times_result.n_sequence_fields == os.times_result.n_fields + + t = os.times() + + # visible fields + self.assertEqual(copy.replace(t), t) + self.assertIsInstance(copy.replace(t), os.times_result) + self.assertEqual(copy.replace(t, user=1.5), (1.5, *t[1:])) + self.assertEqual(copy.replace(t, system=2.5), (t[0], 2.5, *t[2:])) + self.assertEqual(copy.replace(t, user=1.5, system=2.5), (1.5, 2.5, *t[2:])) + + # unknown fields + with self.assertRaisesRegex(TypeError, 'unexpected field name'): + copy.replace(t, error=-1) + with self.assertRaisesRegex(TypeError, 'unexpected field name'): + copy.replace(t, user=1, error=-1) + + def test_copy_replace_with_invisible_fields(self): + assert time.struct_time.n_unnamed_fields == 0 + assert time.struct_time.n_sequence_fields < time.struct_time.n_fields + + t = time.gmtime(0) + + # visible fields + t2 = copy.replace(t) + self.assertEqual(t2, (1970, 1, 1, 0, 0, 0, 3, 1, 0)) + self.assertIsInstance(t2, time.struct_time) + t3 = copy.replace(t, tm_year=2000) + self.assertEqual(t3, (2000, 1, 1, 0, 0, 0, 3, 1, 0)) + self.assertEqual(t3.tm_year, 2000) + t4 = copy.replace(t, tm_mon=2) + self.assertEqual(t4, (1970, 2, 1, 0, 0, 0, 3, 1, 0)) + self.assertEqual(t4.tm_mon, 2) + t5 = copy.replace(t, tm_year=2000, tm_mon=2) + self.assertEqual(t5, (2000, 2, 1, 0, 0, 0, 3, 1, 0)) + self.assertEqual(t5.tm_year, 2000) + self.assertEqual(t5.tm_mon, 2) + + # named invisible fields + self.assertTrue(hasattr(t, 'tm_zone'), f"{t} has no attribute 'tm_zone'") + with self.assertRaisesRegex(AttributeError, 'readonly attribute'): + t.tm_zone = 'some other zone' + self.assertEqual(t2.tm_zone, t.tm_zone) + self.assertEqual(t3.tm_zone, t.tm_zone) + self.assertEqual(t4.tm_zone, t.tm_zone) + t6 = copy.replace(t, tm_zone='some other zone') + self.assertEqual(t, t6) + self.assertEqual(t6.tm_zone, 'some other zone') + t7 = copy.replace(t, tm_year=2000, tm_zone='some other zone') + self.assertEqual(t7, (2000, 1, 1, 0, 0, 0, 3, 1, 0)) + self.assertEqual(t7.tm_year, 2000) + self.assertEqual(t7.tm_zone, 'some other zone') + + # unknown fields + with self.assertRaisesRegex(TypeError, 'unexpected field name'): + copy.replace(t, error=2) + with self.assertRaisesRegex(TypeError, 'unexpected field name'): + copy.replace(t, tm_year=2000, error=2) + with self.assertRaisesRegex(TypeError, 'unexpected field name'): + copy.replace(t, tm_zone='some other zone', error=2) + + def test_copy_replace_with_unnamed_fields(self): + assert os.stat_result.n_unnamed_fields > 0 + + r = os.stat_result(range(os.stat_result.n_sequence_fields)) + + error_message = re.escape('__replace__() is not supported') + with self.assertRaisesRegex(TypeError, error_message): + copy.replace(r) + with self.assertRaisesRegex(TypeError, error_message): + copy.replace(r, st_mode=1) + with self.assertRaisesRegex(TypeError, error_message): + copy.replace(r, error=2) + with self.assertRaisesRegex(TypeError, error_message): + copy.replace(r, st_mode=1, error=2) + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py index 9a099adc74f4b4..1dc8b91a453f92 100644 --- a/Lib/test/test_zlib.py +++ b/Lib/test/test_zlib.py @@ -512,18 +512,7 @@ def test_odd_flush(self): # Try 17K of data # generate random data stream - try: - # In 2.3 and later, WichmannHill is the RNG of the bug report - gen = random.WichmannHill() - except AttributeError: - try: - # 2.2 called it Random - gen = random.Random() - except AttributeError: - # others might simply have a single RNG - gen = random - gen.seed(1) - data = gen.randbytes(17 * 1024) + data = random.randbytes(17 * 1024) # compress, sync-flush, and decompress first = co.compress(data) diff --git a/Lib/threading.py b/Lib/threading.py index 0edfaf880f711a..41c3a9ff93856f 100644 --- a/Lib/threading.py +++ b/Lib/threading.py @@ -1593,8 +1593,11 @@ def _shutdown(): # The main thread isn't finished yet, so its thread state lock can't # have been released. assert tlock is not None - assert tlock.locked() - tlock.release() + if tlock.locked(): + # It should have been released already by + # _PyInterpreterState_SetNotRunningMain(), but there may be + # embedders that aren't calling that yet. + tlock.release() _main_thread._stop() else: # bpo-1596321: _shutdown() must be called in the main thread. diff --git a/Makefile.pre.in b/Makefile.pre.in index cf03c86f18b3c3..97eb767b8fcdeb 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -657,7 +657,7 @@ profile-run-stamp: $(MAKE) profile-gen-stamp # Next, run the profile task to generate the profile information. @ # FIXME: can't run for a cross build - $(LLVM_PROF_FILE) $(RUNSHARED) ./$(BUILDPYTHON) $(PROFILE_TASK) || true + $(LLVM_PROF_FILE) $(RUNSHARED) ./$(BUILDPYTHON) $(PROFILE_TASK) $(LLVM_PROF_MERGER) # Remove profile generation binary since we are done with it. $(MAKE) clean-retain-profile @@ -706,7 +706,7 @@ profile-bolt-stamp: $(BUILDPYTHON) mv "$${bin}.bolt_inst" "$${bin}"; \ done # Run instrumented binaries to collect data. - $(RUNSHARED) ./$(BUILDPYTHON) $(PROFILE_TASK) || true + $(RUNSHARED) ./$(BUILDPYTHON) $(PROFILE_TASK) # Merge all the data files together. for bin in $(BOLT_BINARIES); do \ @MERGE_FDATA@ $${bin}.*.fdata > "$${bin}.fdata"; \ diff --git a/Misc/NEWS.d/next/Build/2023-10-03-17-55-09.gh-issue-110276.luaKRg.rst b/Misc/NEWS.d/next/Build/2023-10-03-17-55-09.gh-issue-110276.luaKRg.rst new file mode 100644 index 00000000000000..392203d21ca45d --- /dev/null +++ b/Misc/NEWS.d/next/Build/2023-10-03-17-55-09.gh-issue-110276.luaKRg.rst @@ -0,0 +1,2 @@ +No longer ignore :envvar:`PROFILE_TASK` failure silently: command used by +Profile Guided Optimization (PGO). Patch by Victor Stinner. diff --git a/Misc/NEWS.d/next/C API/2023-09-04-11-47-12.gh-issue-108867.Cr_LKd.rst b/Misc/NEWS.d/next/C API/2023-09-04-11-47-12.gh-issue-108867.Cr_LKd.rst new file mode 100644 index 00000000000000..2f56466833f6dd --- /dev/null +++ b/Misc/NEWS.d/next/C API/2023-09-04-11-47-12.gh-issue-108867.Cr_LKd.rst @@ -0,0 +1,5 @@ +Add :c:func:`PyThreadState_GetUnchecked()` function: similar to +:c:func:`PyThreadState_Get()`, but don't kill the process with a fatal error if +it is NULL. The caller is responsible to check if the result is NULL. +Previously, the function was private and known as +``_PyThreadState_UncheckedGet()``. Patch by Victor Stinner. diff --git a/Misc/NEWS.d/next/C API/2023-10-03-06-19-10.gh-issue-110235.uec5AG.rst b/Misc/NEWS.d/next/C API/2023-10-03-06-19-10.gh-issue-110235.uec5AG.rst new file mode 100644 index 00000000000000..ff26f25fe71d61 --- /dev/null +++ b/Misc/NEWS.d/next/C API/2023-10-03-06-19-10.gh-issue-110235.uec5AG.rst @@ -0,0 +1,2 @@ +Raise :exc:`TypeError` for duplicate/unknown fields in ``PyStructSequence`` constructor. +Patched by Xuehai Pan. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-26-03-46-55.gh-issue-109369.OJbxbF.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-26-03-46-55.gh-issue-109369.OJbxbF.rst new file mode 100644 index 00000000000000..ca1f0f1bd44a8c --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-26-03-46-55.gh-issue-109369.OJbxbF.rst @@ -0,0 +1,2 @@ +The internal eval_breaker and supporting flags, plus the monitoring version +have been merged into a single atomic integer to speed up checks. diff --git a/Misc/NEWS.d/next/Library/2023-09-08-19-44-01.gh-issue-109151.GkzkQu.rst b/Misc/NEWS.d/next/Library/2023-09-08-19-44-01.gh-issue-109151.GkzkQu.rst new file mode 100644 index 00000000000000..78b4e882baba96 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-09-08-19-44-01.gh-issue-109151.GkzkQu.rst @@ -0,0 +1 @@ +Enable ``readline`` editing features in the :ref:`sqlite3 command-line interface ` (``python -m sqlite3``). diff --git a/Misc/NEWS.d/next/Library/2023-10-02-15-07-28.gh-issue-110222.zl_oHh.rst b/Misc/NEWS.d/next/Library/2023-10-02-15-07-28.gh-issue-110222.zl_oHh.rst new file mode 100644 index 00000000000000..fd2ecdf6269cf3 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-10-02-15-07-28.gh-issue-110222.zl_oHh.rst @@ -0,0 +1,2 @@ +Add support of struct sequence objects in :func:`copy.replace`. +Patched by Xuehai Pan. diff --git a/Misc/NEWS.d/next/Library/2023-10-02-15-40-10.gh-issue-109653.iB0peK.rst b/Misc/NEWS.d/next/Library/2023-10-02-15-40-10.gh-issue-109653.iB0peK.rst new file mode 100644 index 00000000000000..54330976d71dc1 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-10-02-15-40-10.gh-issue-109653.iB0peK.rst @@ -0,0 +1,2 @@ +Fix a Python 3.12 regression in the import time of :mod:`random`. Patch by Alex +Waygood. diff --git a/Misc/NEWS.d/next/Library/2023-10-03-00-04-26.gh-issue-110249.K0mMrs.rst b/Misc/NEWS.d/next/Library/2023-10-03-00-04-26.gh-issue-110249.K0mMrs.rst new file mode 100644 index 00000000000000..a7c9c0f60feea2 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-10-03-00-04-26.gh-issue-110249.K0mMrs.rst @@ -0,0 +1 @@ +Add ``--inline-caches`` flag to ``dis`` command line. diff --git a/Misc/NEWS.d/next/Library/2023-10-03-14-07-05.gh-issue-110273.QaDUmS.rst b/Misc/NEWS.d/next/Library/2023-10-03-14-07-05.gh-issue-110273.QaDUmS.rst new file mode 100644 index 00000000000000..98d87da6295ee5 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-10-03-14-07-05.gh-issue-110273.QaDUmS.rst @@ -0,0 +1,3 @@ +:func:`dataclasses.replace` now raises TypeError instead of ValueError if +specify keyword argument for a field declared with init=False or miss +keyword argument for required InitVar field. diff --git a/Misc/NEWS.d/next/Library/2023-10-03-15-17-03.gh-issue-109653.9DYOMD.rst b/Misc/NEWS.d/next/Library/2023-10-03-15-17-03.gh-issue-109653.9DYOMD.rst new file mode 100644 index 00000000000000..92e5a1cada9a69 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-10-03-15-17-03.gh-issue-109653.9DYOMD.rst @@ -0,0 +1,3 @@ +Slightly improve the import time of several standard-library modules by +deferring imports of :mod:`warnings` within those modules. Patch by Alex +Waygood. diff --git a/Misc/NEWS.d/next/Tests/2023-10-01-10-27-02.gh-issue-110171.ZPlo0h.rst b/Misc/NEWS.d/next/Tests/2023-10-01-10-27-02.gh-issue-110171.ZPlo0h.rst new file mode 100644 index 00000000000000..9b41b033bc7f2b --- /dev/null +++ b/Misc/NEWS.d/next/Tests/2023-10-01-10-27-02.gh-issue-110171.ZPlo0h.rst @@ -0,0 +1,3 @@ +``libregrtest`` now always sets and shows ``random.seed``, +so tests are more reproducible. Use ``--randseed`` flag +to pass the explicit random seed for tests. diff --git a/Misc/NEWS.d/next/Tests/2023-10-03-10-54-09.gh-issue-110267.O-c47G.rst b/Misc/NEWS.d/next/Tests/2023-10-03-10-54-09.gh-issue-110267.O-c47G.rst new file mode 100644 index 00000000000000..2bae7715cc3d5b --- /dev/null +++ b/Misc/NEWS.d/next/Tests/2023-10-03-10-54-09.gh-issue-110267.O-c47G.rst @@ -0,0 +1,2 @@ +Add tests for pickling and copying PyStructSequence objects. +Patched by Xuehai Pan. diff --git a/Misc/requirements-test.txt b/Misc/requirements-test.txt deleted file mode 100644 index 60e7ed20a3d510..00000000000000 --- a/Misc/requirements-test.txt +++ /dev/null @@ -1 +0,0 @@ -tzdata==2020.3 diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index 64bcb49d365774..a46d986c18ecd4 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -2458,8 +2458,8 @@ test_tstate_capi(PyObject *self, PyObject *Py_UNUSED(args)) PyThreadState *tstate2 = PyThreadState_Get(); assert(tstate2 == tstate); - // private _PyThreadState_UncheckedGet() - PyThreadState *tstate3 = _PyThreadState_UncheckedGet(); + // PyThreadState_GetUnchecked() + PyThreadState *tstate3 = PyThreadState_GetUnchecked(); assert(tstate3 == tstate); // PyThreadState_EnterTracing(), PyThreadState_LeaveTracing() diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c index c6b80fffdec16d..05bac0936b155d 100644 --- a/Modules/_testinternalcapi.c +++ b/Modules/_testinternalcapi.c @@ -675,7 +675,11 @@ record_eval(PyThreadState *tstate, struct _PyInterpreterFrame *f, int exc) assert(module != NULL); module_state *state = get_module_state(module); Py_DECREF(module); - PyList_Append(state->record_list, ((PyFunctionObject *)f->f_funcobj)->func_name); + int res = PyList_Append(state->record_list, + ((PyFunctionObject *)f->f_funcobj)->func_name); + if (res < 0) { + return NULL; + } } return _PyEval_EvalFrameDefault(tstate, f, exc); } diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c index ee46b37d92e128..86bd560b92ba6b 100644 --- a/Modules/_threadmodule.c +++ b/Modules/_threadmodule.c @@ -1205,7 +1205,7 @@ thread_PyThread_start_new_thread(PyObject *self, PyObject *fargs) if (boot == NULL) { return PyErr_NoMemory(); } - boot->tstate = _PyThreadState_New(interp); + boot->tstate = _PyThreadState_New(interp, _PyThreadState_WHENCE_THREADING); if (boot->tstate == NULL) { PyMem_RawFree(boot); if (!PyErr_Occurred()) { diff --git a/Modules/_xxinterpchannelsmodule.c b/Modules/_xxinterpchannelsmodule.c index 6096f88421a73a..d762f449c407a3 100644 --- a/Modules/_xxinterpchannelsmodule.c +++ b/Modules/_xxinterpchannelsmodule.c @@ -198,6 +198,9 @@ _release_xid_data(_PyCrossInterpreterData *data, int flags) /* module state *************************************************************/ typedef struct { + PyTypeObject *send_channel_type; + PyTypeObject *recv_channel_type; + /* heap types */ PyTypeObject *ChannelIDType; @@ -218,6 +221,21 @@ get_module_state(PyObject *mod) return state; } +static module_state * +_get_current_module_state(void) +{ + PyObject *mod = _get_current_module(); + if (mod == NULL) { + // XXX import it? + PyErr_SetString(PyExc_RuntimeError, + MODULE_NAME " module not imported yet"); + return NULL; + } + module_state *state = get_module_state(mod); + Py_DECREF(mod); + return state; +} + static int traverse_module_state(module_state *state, visitproc visit, void *arg) { @@ -237,6 +255,9 @@ traverse_module_state(module_state *state, visitproc visit, void *arg) static int clear_module_state(module_state *state) { + Py_CLEAR(state->send_channel_type); + Py_CLEAR(state->recv_channel_type); + /* heap types */ if (state->ChannelIDType != NULL) { (void)_PyCrossInterpreterData_UnregisterClass(state->ChannelIDType); @@ -1529,17 +1550,20 @@ typedef struct channelid { struct channel_id_converter_data { PyObject *module; int64_t cid; + int end; }; static int channel_id_converter(PyObject *arg, void *ptr) { int64_t cid; + int end = 0; struct channel_id_converter_data *data = ptr; module_state *state = get_module_state(data->module); assert(state != NULL); if (PyObject_TypeCheck(arg, state->ChannelIDType)) { cid = ((channelid *)arg)->id; + end = ((channelid *)arg)->end; } else if (PyIndex_Check(arg)) { cid = PyLong_AsLongLong(arg); @@ -1559,6 +1583,7 @@ channel_id_converter(PyObject *arg, void *ptr) return 0; } data->cid = cid; + data->end = end; return 1; } @@ -1600,6 +1625,7 @@ _channelid_new(PyObject *mod, PyTypeObject *cls, { static char *kwlist[] = {"id", "send", "recv", "force", "_resolve", NULL}; int64_t cid; + int end; struct channel_id_converter_data cid_data = { .module = mod, }; @@ -1614,6 +1640,7 @@ _channelid_new(PyObject *mod, PyTypeObject *cls, return NULL; } cid = cid_data.cid; + end = cid_data.end; // Handle "send" and "recv". if (send == 0 && recv == 0) { @@ -1621,14 +1648,17 @@ _channelid_new(PyObject *mod, PyTypeObject *cls, "'send' and 'recv' cannot both be False"); return NULL; } - - int end = 0; - if (send == 1) { + else if (send == 1) { if (recv == 0 || recv == -1) { end = CHANNEL_SEND; } + else { + assert(recv == 1); + end = 0; + } } else if (recv == 1) { + assert(send == 0 || send == -1); end = CHANNEL_RECV; } @@ -1773,21 +1803,12 @@ channelid_richcompare(PyObject *self, PyObject *other, int op) return res; } +static PyTypeObject * _get_current_channel_end_type(int end); + static PyObject * _channel_from_cid(PyObject *cid, int end) { - PyObject *highlevel = PyImport_ImportModule("interpreters"); - if (highlevel == NULL) { - PyErr_Clear(); - highlevel = PyImport_ImportModule("test.support.interpreters"); - if (highlevel == NULL) { - return NULL; - } - } - const char *clsname = (end == CHANNEL_RECV) ? "RecvChannel" : - "SendChannel"; - PyObject *cls = PyObject_GetAttrString(highlevel, clsname); - Py_DECREF(highlevel); + PyObject *cls = (PyObject *)_get_current_channel_end_type(end); if (cls == NULL) { return NULL; } @@ -1943,6 +1964,107 @@ static PyType_Spec ChannelIDType_spec = { }; +/* SendChannel and RecvChannel classes */ + +// XXX Use a new __xid__ protocol instead? + +static PyTypeObject * +_get_current_channel_end_type(int end) +{ + module_state *state = _get_current_module_state(); + if (state == NULL) { + return NULL; + } + PyTypeObject *cls; + if (end == CHANNEL_SEND) { + cls = state->send_channel_type; + } + else { + assert(end == CHANNEL_RECV); + cls = state->recv_channel_type; + } + if (cls == NULL) { + PyObject *highlevel = PyImport_ImportModule("interpreters"); + if (highlevel == NULL) { + PyErr_Clear(); + highlevel = PyImport_ImportModule("test.support.interpreters"); + if (highlevel == NULL) { + return NULL; + } + } + Py_DECREF(highlevel); + if (end == CHANNEL_SEND) { + cls = state->send_channel_type; + } + else { + cls = state->recv_channel_type; + } + assert(cls != NULL); + } + return cls; +} + +static PyObject * +_channel_end_from_xid(_PyCrossInterpreterData *data) +{ + channelid *cid = (channelid *)_channelid_from_xid(data); + if (cid == NULL) { + return NULL; + } + PyTypeObject *cls = _get_current_channel_end_type(cid->end); + if (cls == NULL) { + Py_DECREF(cid); + return NULL; + } + PyObject *obj = PyObject_CallOneArg((PyObject *)cls, (PyObject *)cid); + Py_DECREF(cid); + return obj; +} + +static int +_channel_end_shared(PyThreadState *tstate, PyObject *obj, + _PyCrossInterpreterData *data) +{ + PyObject *cidobj = PyObject_GetAttrString(obj, "_id"); + if (cidobj == NULL) { + return -1; + } + int res = _channelid_shared(tstate, cidobj, data); + Py_DECREF(cidobj); + if (res < 0) { + return -1; + } + data->new_object = _channel_end_from_xid; + return 0; +} + +static int +set_channel_end_types(PyObject *mod, PyTypeObject *send, PyTypeObject *recv) +{ + module_state *state = get_module_state(mod); + if (state == NULL) { + return -1; + } + + if (state->send_channel_type != NULL + || state->recv_channel_type != NULL) + { + PyErr_SetString(PyExc_TypeError, "already registered"); + return -1; + } + state->send_channel_type = (PyTypeObject *)Py_NewRef(send); + state->recv_channel_type = (PyTypeObject *)Py_NewRef(recv); + + if (_PyCrossInterpreterData_RegisterClass(send, _channel_end_shared)) { + return -1; + } + if (_PyCrossInterpreterData_RegisterClass(recv, _channel_end_shared)) { + return -1; + } + + return 0; +} + /* module level code ********************************************************/ /* globals is the process-global state for the module. It holds all @@ -2346,13 +2468,41 @@ channel__channel_id(PyObject *self, PyObject *args, PyObject *kwds) return NULL; } PyTypeObject *cls = state->ChannelIDType; + PyObject *mod = get_module_from_owned_type(cls); - if (mod == NULL) { + assert(mod == self); + Py_DECREF(mod); + + return _channelid_new(self, cls, args, kwds); +} + +static PyObject * +channel__register_end_types(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"send", "recv", NULL}; + PyObject *send; + PyObject *recv; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "OO:_register_end_types", kwlist, + &send, &recv)) { return NULL; } - PyObject *cid = _channelid_new(mod, cls, args, kwds); - Py_DECREF(mod); - return cid; + if (!PyType_Check(send)) { + PyErr_SetString(PyExc_TypeError, "expected a type for 'send'"); + return NULL; + } + if (!PyType_Check(recv)) { + PyErr_SetString(PyExc_TypeError, "expected a type for 'recv'"); + return NULL; + } + PyTypeObject *cls_send = (PyTypeObject *)send; + PyTypeObject *cls_recv = (PyTypeObject *)recv; + + if (set_channel_end_types(self, cls_send, cls_recv) < 0) { + return NULL; + } + + Py_RETURN_NONE; } static PyMethodDef module_functions[] = { @@ -2374,6 +2524,8 @@ static PyMethodDef module_functions[] = { METH_VARARGS | METH_KEYWORDS, channel_release_doc}, {"_channel_id", _PyCFunction_CAST(channel__channel_id), METH_VARARGS | METH_KEYWORDS, NULL}, + {"_register_end_types", _PyCFunction_CAST(channel__register_end_types), + METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL} /* sentinel */ }; diff --git a/Modules/_xxsubinterpretersmodule.c b/Modules/_xxsubinterpretersmodule.c index e1c7d4ab2fd78f..bca16ac8a62eca 100644 --- a/Modules/_xxsubinterpretersmodule.c +++ b/Modules/_xxsubinterpretersmodule.c @@ -242,6 +242,11 @@ _sharedns_apply(_sharedns *shared, PyObject *ns) // of the exception in the calling interpreter. typedef struct _sharedexception { + PyInterpreterState *interp; +#define ERR_NOT_SET 0 +#define ERR_NO_MEMORY 1 +#define ERR_ALREADY_RUNNING 2 + int code; const char *name; const char *msg; } _sharedexception; @@ -263,14 +268,26 @@ _sharedexception_clear(_sharedexception *exc) } static const char * -_sharedexception_bind(PyObject *exc, _sharedexception *sharedexc) +_sharedexception_bind(PyObject *exc, int code, _sharedexception *sharedexc) { + if (sharedexc->interp == NULL) { + sharedexc->interp = PyInterpreterState_Get(); + } + + if (code != ERR_NOT_SET) { + assert(exc == NULL); + assert(code > 0); + sharedexc->code = code; + return NULL; + } + assert(exc != NULL); const char *failure = NULL; PyObject *nameobj = PyUnicode_FromString(Py_TYPE(exc)->tp_name); if (nameobj == NULL) { failure = "unable to format exception type name"; + code = ERR_NO_MEMORY; goto error; } sharedexc->name = _copy_raw_string(nameobj); @@ -281,6 +298,7 @@ _sharedexception_bind(PyObject *exc, _sharedexception *sharedexc) } else { failure = "unable to encode and copy exception type name"; } + code = ERR_NO_MEMORY; goto error; } @@ -288,6 +306,7 @@ _sharedexception_bind(PyObject *exc, _sharedexception *sharedexc) PyObject *msgobj = PyUnicode_FromFormat("%S", exc); if (msgobj == NULL) { failure = "unable to format exception message"; + code = ERR_NO_MEMORY; goto error; } sharedexc->msg = _copy_raw_string(msgobj); @@ -298,6 +317,7 @@ _sharedexception_bind(PyObject *exc, _sharedexception *sharedexc) } else { failure = "unable to encode and copy exception message"; } + code = ERR_NO_MEMORY; goto error; } } @@ -308,7 +328,10 @@ _sharedexception_bind(PyObject *exc, _sharedexception *sharedexc) assert(failure != NULL); PyErr_Clear(); _sharedexception_clear(sharedexc); - *sharedexc = no_exception; + *sharedexc = (_sharedexception){ + .interp = sharedexc->interp, + .code = code, + }; return failure; } @@ -316,6 +339,7 @@ static void _sharedexception_apply(_sharedexception *exc, PyObject *wrapperclass) { if (exc->name != NULL) { + assert(exc->code == ERR_NOT_SET); if (exc->msg != NULL) { PyErr_Format(wrapperclass, "%s: %s", exc->name, exc->msg); } @@ -324,9 +348,19 @@ _sharedexception_apply(_sharedexception *exc, PyObject *wrapperclass) } } else if (exc->msg != NULL) { + assert(exc->code == ERR_NOT_SET); PyErr_SetString(wrapperclass, exc->msg); } + else if (exc->code == ERR_NO_MEMORY) { + PyErr_NoMemory(); + } + else if (exc->code == ERR_ALREADY_RUNNING) { + assert(exc->interp != NULL); + assert(_PyInterpreterState_IsRunningMain(exc->interp)); + _PyInterpreterState_FailIfRunningMain(exc->interp); + } else { + assert(exc->code == ERR_NOT_SET); PyErr_SetNone(wrapperclass); } } @@ -362,9 +396,16 @@ static int _run_script(PyInterpreterState *interp, const char *codestr, _sharedns *shared, _sharedexception *sharedexc) { + int errcode = ERR_NOT_SET; + if (_PyInterpreterState_SetRunningMain(interp) < 0) { - // We skip going through the shared exception. - return -1; + assert(PyErr_Occurred()); + // In the case where we didn't switch interpreters, it would + // be more efficient to leave the exception in place and return + // immediately. However, life is simpler if we don't. + PyErr_Clear(); + errcode = ERR_ALREADY_RUNNING; + goto error; } PyObject *excval = NULL; @@ -403,16 +444,23 @@ _run_script(PyInterpreterState *interp, const char *codestr, error: excval = PyErr_GetRaisedException(); - const char *failure = _sharedexception_bind(excval, sharedexc); + const char *failure = _sharedexception_bind(excval, errcode, sharedexc); if (failure != NULL) { fprintf(stderr, "RunFailedError: script raised an uncaught exception (%s)", failure); - PyErr_Clear(); } - Py_XDECREF(excval); + if (excval != NULL) { + // XXX Instead, store the rendered traceback on sharedexc, + // attach it to the exception when applied, + // and teach PyErr_Display() to print it. + PyErr_Display(NULL, excval, NULL); + Py_DECREF(excval); + } + if (errcode != ERR_ALREADY_RUNNING) { + _PyInterpreterState_SetNotRunningMain(interp); + } assert(!PyErr_Occurred()); - _PyInterpreterState_SetNotRunningMain(interp); return -1; } @@ -421,6 +469,7 @@ _run_script_in_interpreter(PyObject *mod, PyInterpreterState *interp, const char *codestr, PyObject *shareables) { module_state *state = get_module_state(mod); + assert(state != NULL); _sharedns *shared = _get_shared_ns(shareables); if (shared == NULL && PyErr_Occurred()) { @@ -429,50 +478,30 @@ _run_script_in_interpreter(PyObject *mod, PyInterpreterState *interp, // Switch to interpreter. PyThreadState *save_tstate = NULL; + PyThreadState *tstate = NULL; if (interp != PyInterpreterState_Get()) { - // XXX gh-109860: Using the "head" thread isn't strictly correct. - PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); - assert(tstate != NULL); - // Hack (until gh-109860): The interpreter's initial thread state - // is least likely to break. - while(tstate->next != NULL) { - tstate = tstate->next; - } - // We must do this check before switching interpreters, so any - // exception gets raised in the right one. - // XXX gh-109860: Drop this redundant check once we stop - // re-using tstates that might already be in use. - if (_PyInterpreterState_IsRunningMain(interp)) { - PyErr_SetString(PyExc_RuntimeError, - "interpreter already running"); - if (shared != NULL) { - _sharedns_free(shared); - } - return -1; - } + tstate = PyThreadState_New(interp); + tstate->_whence = _PyThreadState_WHENCE_EXEC; // XXX Possible GILState issues? save_tstate = PyThreadState_Swap(tstate); } // Run the script. - _sharedexception exc = {NULL, NULL}; + _sharedexception exc = (_sharedexception){ .interp = interp }; int result = _run_script(interp, codestr, shared, &exc); // Switch back. if (save_tstate != NULL) { + PyThreadState_Clear(tstate); PyThreadState_Swap(save_tstate); + PyThreadState_Delete(tstate); } // Propagate any exception out to the caller. - if (exc.name != NULL) { - assert(state != NULL); + if (result < 0) { + assert(!PyErr_Occurred()); _sharedexception_apply(&exc, state->RunFailedError); - } - else if (result != 0) { - if (!PyErr_Occurred()) { - // We were unable to allocate a shared exception. - PyErr_NoMemory(); - } + assert(PyErr_Occurred()); } if (shared != NULL) { @@ -502,6 +531,7 @@ interp_create(PyObject *self, PyObject *args, PyObject *kwds) const PyInterpreterConfig config = isolated ? (PyInterpreterConfig)_PyInterpreterConfig_INIT : (PyInterpreterConfig)_PyInterpreterConfig_LEGACY_INIT; + // XXX Possible GILState issues? PyThreadState *tstate = NULL; PyStatus status = Py_NewInterpreterFromConfig(&tstate, &config); @@ -517,6 +547,7 @@ interp_create(PyObject *self, PyObject *args, PyObject *kwds) return NULL; } assert(tstate != NULL); + PyInterpreterState *interp = PyThreadState_GetInterpreter(tstate); PyObject *idobj = PyInterpreterState_GetIDObject(interp); if (idobj == NULL) { @@ -526,6 +557,10 @@ interp_create(PyObject *self, PyObject *args, PyObject *kwds) PyThreadState_Swap(save_tstate); return NULL; } + + PyThreadState_Clear(tstate); + PyThreadState_Delete(tstate); + _PyInterpreterState_RequireIDRef(interp, 1); return idobj; } @@ -573,14 +608,8 @@ interp_destroy(PyObject *self, PyObject *args, PyObject *kwds) } // Destroy the interpreter. - // XXX gh-109860: Using the "head" thread isn't strictly correct. - PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); - assert(tstate != NULL); - // Hack (until gh-109860): The interpreter's initial thread state - // is least likely to break. - while(tstate->next != NULL) { - tstate = tstate->next; - } + PyThreadState *tstate = PyThreadState_New(interp); + tstate->_whence = _PyThreadState_WHENCE_INTERP; // XXX Possible GILState issues? PyThreadState *save_tstate = PyThreadState_Swap(tstate); Py_EndInterpreter(tstate); diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c index 632cabdf4bcfbd..592d527f0bd6a2 100644 --- a/Modules/gcmodule.c +++ b/Modules/gcmodule.c @@ -24,6 +24,7 @@ */ #include "Python.h" +#include "pycore_ceval.h" // _Py_set_eval_breaker_bit() #include "pycore_context.h" #include "pycore_dict.h" // _PyDict_MaybeUntrack() #include "pycore_initconfig.h" @@ -2274,11 +2275,7 @@ _Py_ScheduleGC(PyInterpreterState *interp) if (gcstate->collecting == 1) { return; } - struct _ceval_state *ceval = &interp->ceval; - if (!_Py_atomic_load_relaxed(&ceval->gc_scheduled)) { - _Py_atomic_store_relaxed(&ceval->gc_scheduled, 1); - _Py_atomic_store_relaxed(&ceval->eval_breaker, 1); - } + _Py_set_eval_breaker_bit(interp, _PY_GC_SCHEDULED_BIT, 1); } void diff --git a/Modules/getpath.c b/Modules/getpath.c index 3b926cac0d3f24..6f76a84e78bf62 100644 --- a/Modules/getpath.c +++ b/Modules/getpath.c @@ -6,6 +6,7 @@ #include "pycore_pathconfig.h" // _PyPathConfig_ReadGlobal() #include "pycore_pyerrors.h" // _PyErr_WriteUnraisableMsg() #include "pycore_pymem.h" // _PyMem_RawWcsdup() +#include "pycore_pystate.h" // _PyThreadState_GET() #include "marshal.h" // PyMarshal_ReadObjectFromString #include "osdefs.h" // DELIM @@ -821,7 +822,7 @@ _PyConfig_InitPathConfig(PyConfig *config, int compute_path_config) return status; } - if (!_PyThreadState_UncheckedGet()) { + if (!_PyThreadState_GET()) { return PyStatus_Error("cannot calculate path configuration without GIL"); } diff --git a/Modules/signalmodule.c b/Modules/signalmodule.c index 8d6556727b3a5a..ac3457003b0cb6 100644 --- a/Modules/signalmodule.c +++ b/Modules/signalmodule.c @@ -1767,9 +1767,8 @@ PyErr_CheckSignals(void) Python code to ensure signals are handled. Checking for the GC here allows long running native code to clean cycles created using the C-API even if it doesn't run the evaluation loop */ - struct _ceval_state *interp_ceval_state = &tstate->interp->ceval; - if (_Py_atomic_load_relaxed(&interp_ceval_state->gc_scheduled)) { - _Py_atomic_store_relaxed(&interp_ceval_state->gc_scheduled, 0); + if (_Py_eval_breaker_bit_is_set(tstate->interp, _PY_GC_SCHEDULED_BIT)) { + _Py_set_eval_breaker_bit(tstate->interp, _PY_GC_SCHEDULED_BIT, 0); _Py_RunGC(tstate); } diff --git a/Modules/termios.c b/Modules/termios.c index c779a757e4fa9b..6f07c93bcdb6b5 100644 --- a/Modules/termios.c +++ b/Modules/termios.c @@ -120,7 +120,7 @@ termios_tcgetattr_impl(PyObject *module, int fd) v = PyBytes_FromStringAndSize(&ch, 1); if (v == NULL) goto err; - PyList_SetItem(cc, i, v); + PyList_SET_ITEM(cc, i, v); } /* Convert the MIN and TIME slots to integer. On some systems, the @@ -128,29 +128,44 @@ termios_tcgetattr_impl(PyObject *module, int fd) only do this in noncanonical input mode. */ if ((mode.c_lflag & ICANON) == 0) { v = PyLong_FromLong((long)mode.c_cc[VMIN]); - if (v == NULL) + if (v == NULL) { + goto err; + } + if (PyList_SetItem(cc, VMIN, v) < 0) { goto err; - PyList_SetItem(cc, VMIN, v); + } v = PyLong_FromLong((long)mode.c_cc[VTIME]); - if (v == NULL) + if (v == NULL) { + goto err; + } + if (PyList_SetItem(cc, VTIME, v) < 0) { goto err; - PyList_SetItem(cc, VTIME, v); + } } - if (!(v = PyList_New(7))) - goto err; - - PyList_SetItem(v, 0, PyLong_FromLong((long)mode.c_iflag)); - PyList_SetItem(v, 1, PyLong_FromLong((long)mode.c_oflag)); - PyList_SetItem(v, 2, PyLong_FromLong((long)mode.c_cflag)); - PyList_SetItem(v, 3, PyLong_FromLong((long)mode.c_lflag)); - PyList_SetItem(v, 4, PyLong_FromLong((long)ispeed)); - PyList_SetItem(v, 5, PyLong_FromLong((long)ospeed)); - if (PyErr_Occurred()) { - Py_DECREF(v); + if (!(v = PyList_New(7))) { goto err; } - PyList_SetItem(v, 6, cc); + +#define ADD_LONG_ITEM(index, val) \ + do { \ + PyObject *l = PyLong_FromLong((long)val); \ + if (l == NULL) { \ + Py_DECREF(v); \ + goto err; \ + } \ + PyList_SET_ITEM(v, index, l); \ + } while (0) + + ADD_LONG_ITEM(0, mode.c_iflag); + ADD_LONG_ITEM(1, mode.c_oflag); + ADD_LONG_ITEM(2, mode.c_cflag); + ADD_LONG_ITEM(3, mode.c_lflag); + ADD_LONG_ITEM(4, ispeed); + ADD_LONG_ITEM(5, ospeed); +#undef ADD_LONG_ITEM + + PyList_SET_ITEM(v, 6, cc); return v; err: Py_DECREF(cc); diff --git a/Objects/structseq.c b/Objects/structseq.c index 0ca622edc2ba37..e4a4b45a8db626 100644 --- a/Objects/structseq.c +++ b/Objects/structseq.c @@ -8,6 +8,7 @@ */ #include "Python.h" +#include "pycore_dict.h" // _PyDict_Pop() #include "pycore_tuple.h" // _PyTuple_FromArray() #include "pycore_object.h" // _PyObject_GC_TRACK() @@ -216,19 +217,34 @@ structseq_new_impl(PyTypeObject *type, PyObject *arg, PyObject *dict) res->ob_item[i] = Py_NewRef(v); } Py_DECREF(arg); - for (; i < max_len; ++i) { - PyObject *ob = NULL; - if (dict != NULL) { - const char *name = type->tp_members[i-n_unnamed_fields].name; + if (dict != NULL && PyDict_GET_SIZE(dict) > 0) { + Py_ssize_t n_found_keys = 0; + for (i = len; i < max_len; ++i) { + PyObject *ob = NULL; + const char *name = type->tp_members[i - n_unnamed_fields].name; if (PyDict_GetItemStringRef(dict, name, &ob) < 0) { Py_DECREF(res); return NULL; } + if (ob == NULL) { + ob = Py_NewRef(Py_None); + } + else { + ++n_found_keys; + } + res->ob_item[i] = ob; + } + if (PyDict_GET_SIZE(dict) > n_found_keys) { + PyErr_Format(PyExc_TypeError, + "%.500s() got duplicate or unexpected field name(s)", + type->tp_name); + Py_DECREF(res); + return NULL; } - if (ob == NULL) { - ob = Py_NewRef(Py_None); + } else { + for (i = len; i < max_len; ++i) { + res->ob_item[i] = Py_NewRef(Py_None); } - res->ob_item[i] = ob; } _PyObject_GC_TRACK(res); @@ -365,9 +381,82 @@ structseq_reduce(PyStructSequence* self, PyObject *Py_UNUSED(ignored)) return NULL; } + +static PyObject * +structseq_replace(PyStructSequence *self, PyObject *args, PyObject *kwargs) +{ + PyStructSequence *result = NULL; + Py_ssize_t n_fields, n_unnamed_fields, i; + + if (!_PyArg_NoPositional("__replace__", args)) { + return NULL; + } + + n_fields = REAL_SIZE(self); + if (n_fields < 0) { + return NULL; + } + n_unnamed_fields = UNNAMED_FIELDS(self); + if (n_unnamed_fields < 0) { + return NULL; + } + if (n_unnamed_fields > 0) { + PyErr_Format(PyExc_TypeError, + "__replace__() is not supported for %.500s " + "because it has unnamed field(s)", + Py_TYPE(self)->tp_name); + return NULL; + } + + result = (PyStructSequence *) PyStructSequence_New(Py_TYPE(self)); + if (!result) { + return NULL; + } + + if (kwargs != NULL) { + // We do not support types with unnamed fields, so we can iterate over + // i >= n_visible_fields case without slicing with (i - n_unnamed_fields). + for (i = 0; i < n_fields; ++i) { + PyObject *key = PyUnicode_FromString(Py_TYPE(self)->tp_members[i].name); + if (!key) { + goto error; + } + PyObject *ob = _PyDict_Pop(kwargs, key, self->ob_item[i]); + Py_DECREF(key); + if (!ob) { + goto error; + } + result->ob_item[i] = ob; + } + // Check if there are any unexpected fields. + if (PyDict_GET_SIZE(kwargs) > 0) { + PyObject *names = PyDict_Keys(kwargs); + if (names) { + PyErr_Format(PyExc_TypeError, "Got unexpected field name(s): %R", names); + Py_DECREF(names); + } + goto error; + } + } + else + { + // Just create a copy of the original. + for (i = 0; i < n_fields; ++i) { + result->ob_item[i] = Py_NewRef(self->ob_item[i]); + } + } + + return (PyObject *)result; + +error: + Py_DECREF(result); + return NULL; +} + static PyMethodDef structseq_methods[] = { {"__reduce__", (PyCFunction)structseq_reduce, METH_NOARGS, NULL}, - {NULL, NULL} + {"__replace__", _PyCFunction_CAST(structseq_replace), METH_VARARGS | METH_KEYWORDS, NULL}, + {NULL, NULL} // sentinel }; static Py_ssize_t diff --git a/Python/abstract_interp_cases.c.h b/Python/abstract_interp_cases.c.h index 61b1db9e5a1543..04fe07fad39937 100644 --- a/Python/abstract_interp_cases.c.h +++ b/Python/abstract_interp_cases.c.h @@ -474,6 +474,28 @@ break; } + case _CHECK_ATTR_MODULE: { + break; + } + + case _LOAD_ATTR_MODULE: { + STACK_GROW(((oparg & 1) ? 1 : 0)); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-1 - (oparg & 1 ? 1 : 0))), true); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-(oparg & 1 ? 1 : 0))), true); + break; + } + + case _CHECK_ATTR_WITH_HINT: { + break; + } + + case _LOAD_ATTR_WITH_HINT: { + STACK_GROW(((oparg & 1) ? 1 : 0)); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-1 - (oparg & 1 ? 1 : 0))), true); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-(oparg & 1 ? 1 : 0))), true); + break; + } + case _LOAD_ATTR_SLOT: { STACK_GROW(((oparg & 1) ? 1 : 0)); PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-1 - (oparg & 1 ? 1 : 0))), true); @@ -481,16 +503,23 @@ break; } - case _GUARD_DORV_VALUES: { + case _CHECK_ATTR_CLASS: { break; } - case _STORE_ATTR_INSTANCE_VALUE: { - STACK_SHRINK(2); + case _LOAD_ATTR_CLASS: { + STACK_GROW(((oparg & 1) ? 1 : 0)); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-1 - (oparg & 1 ? 1 : 0))), true); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-(oparg & 1 ? 1 : 0))), true); break; } - case _GUARD_TYPE_VERSION_STORE: { + case _GUARD_DORV_VALUES: { + break; + } + + case _STORE_ATTR_INSTANCE_VALUE: { + STACK_SHRINK(2); break; } @@ -674,6 +703,29 @@ break; } + case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: { + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-1)), true); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(0)), true); + break; + } + + case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT: { + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-1)), true); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(0)), true); + break; + } + + case _CHECK_ATTR_METHOD_LAZY_DICT: { + break; + } + + case _LOAD_ATTR_METHOD_LAZY_DICT: { + STACK_GROW(1); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-2)), true); + PARTITIONNODE_OVERWRITE((_Py_PARTITIONNODE_t *)PARTITIONNODE_NULLROOT, PEEK(-(-1)), true); + break; + } + case _CHECK_CALL_BOUND_METHOD_EXACT_ARGS: { break; } diff --git a/Python/bytecodes.c b/Python/bytecodes.c index f7681bd234a43f..6482252eec9fb9 100644 --- a/Python/bytecodes.c +++ b/Python/bytecodes.c @@ -41,8 +41,6 @@ #include "ceval_macros.h" /* Flow control macros */ -#define DEOPT_IF(cond, instname) ((void)0) -#define ERROR_IF(cond, labelname) ((void)0) #define GO_TO_INSTRUCTION(instname) ((void)0) #define inst(name, ...) case name: @@ -138,7 +136,12 @@ dummy_func( inst(RESUME, (--)) { TIER_ONE_ONLY assert(frame == tstate->current_frame); - if (_PyFrame_GetCode(frame)->_co_instrumentation_version != tstate->interp->monitoring_version) { + uintptr_t global_version = + _Py_atomic_load_uintptr_relaxed(&tstate->interp->ceval.eval_breaker) & + ~_PY_EVAL_EVENTS_MASK; + uintptr_t code_version = _PyFrame_GetCode(frame)->_co_instrumentation_version; + assert((code_version & 255) == 0); + if (code_version != global_version) { int err = _Py_Instrument(_PyFrame_GetCode(frame), tstate->interp); ERROR_IF(err, error); next_instr--; @@ -153,21 +156,19 @@ dummy_func( inst(RESUME_CHECK, (--)) { #if defined(__EMSCRIPTEN__) - DEOPT_IF(_Py_emscripten_signal_clock == 0, RESUME); + DEOPT_IF(_Py_emscripten_signal_clock == 0); _Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING; #endif - /* Possibly combine these two checks */ - DEOPT_IF(_PyFrame_GetCode(frame)->_co_instrumentation_version - != tstate->interp->monitoring_version, RESUME); - DEOPT_IF(_Py_atomic_load_relaxed_int32(&tstate->interp->ceval.eval_breaker), RESUME); + uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->interp->ceval.eval_breaker); + uintptr_t version = _PyFrame_GetCode(frame)->_co_instrumentation_version; + assert((version & _PY_EVAL_EVENTS_MASK) == 0); + DEOPT_IF(eval_breaker != version); } inst(INSTRUMENTED_RESUME, (--)) { - /* Possible performance enhancement: - * We need to check the eval breaker anyway, can we - * combine the instrument verison check and the eval breaker test? - */ - if (_PyFrame_GetCode(frame)->_co_instrumentation_version != tstate->interp->monitoring_version) { + uintptr_t global_version = _Py_atomic_load_uintptr_relaxed(&tstate->interp->ceval.eval_breaker) & ~_PY_EVAL_EVENTS_MASK; + uintptr_t code_version = _PyFrame_GetCode(frame)->_co_instrumentation_version; + if (code_version != global_version) { if (_Py_Instrument(_PyFrame_GetCode(frame), tstate->interp)) { goto error; } @@ -325,12 +326,12 @@ dummy_func( } inst(TO_BOOL_BOOL, (unused/1, unused/2, value -- value)) { - DEOPT_IF(!PyBool_Check(value), TO_BOOL); + DEOPT_IF(!PyBool_Check(value)); STAT_INC(TO_BOOL, hit); } inst(TO_BOOL_INT, (unused/1, unused/2, value -- res)) { - DEOPT_IF(!PyLong_CheckExact(value), TO_BOOL); + DEOPT_IF(!PyLong_CheckExact(value)); STAT_INC(TO_BOOL, hit); if (_PyLong_IsZero((PyLongObject *)value)) { assert(_Py_IsImmortal(value)); @@ -343,7 +344,7 @@ dummy_func( } inst(TO_BOOL_LIST, (unused/1, unused/2, value -- res)) { - DEOPT_IF(!PyList_CheckExact(value), TO_BOOL); + DEOPT_IF(!PyList_CheckExact(value)); STAT_INC(TO_BOOL, hit); res = Py_SIZE(value) ? Py_True : Py_False; DECREF_INPUTS(); @@ -351,13 +352,13 @@ dummy_func( inst(TO_BOOL_NONE, (unused/1, unused/2, value -- res)) { // This one is a bit weird, because we expect *some* failures: - DEOPT_IF(!Py_IsNone(value), TO_BOOL); + DEOPT_IF(!Py_IsNone(value)); STAT_INC(TO_BOOL, hit); res = Py_False; } inst(TO_BOOL_STR, (unused/1, unused/2, value -- res)) { - DEOPT_IF(!PyUnicode_CheckExact(value), TO_BOOL); + DEOPT_IF(!PyUnicode_CheckExact(value)); STAT_INC(TO_BOOL, hit); if (value == &_Py_STR(empty)) { assert(_Py_IsImmortal(value)); @@ -373,7 +374,7 @@ dummy_func( inst(TO_BOOL_ALWAYS_TRUE, (unused/1, version/2, value -- res)) { // This one is a bit weird, because we expect *some* failures: assert(version); - DEOPT_IF(Py_TYPE(value)->tp_version_tag != version, TO_BOOL); + DEOPT_IF(Py_TYPE(value)->tp_version_tag != version); STAT_INC(TO_BOOL, hit); DECREF_INPUTS(); res = Py_True; @@ -397,8 +398,8 @@ dummy_func( }; op(_GUARD_BOTH_INT, (left, right -- left, right)) { - DEOPT_IF(!PyLong_CheckExact(left), BINARY_OP); - DEOPT_IF(!PyLong_CheckExact(right), BINARY_OP); + DEOPT_IF(!PyLong_CheckExact(left)); + DEOPT_IF(!PyLong_CheckExact(right)); } op(_BINARY_OP_MULTIPLY_INT, (unused/1, left, right -- res)) { @@ -433,8 +434,8 @@ dummy_func( _GUARD_BOTH_INT + _BINARY_OP_SUBTRACT_INT; op(_GUARD_BOTH_FLOAT, (left, right -- left, right)) { - DEOPT_IF(!PyFloat_CheckExact(left), BINARY_OP); - DEOPT_IF(!PyFloat_CheckExact(right), BINARY_OP); + DEOPT_IF(!PyFloat_CheckExact(left)); + DEOPT_IF(!PyFloat_CheckExact(right)); } op(_BINARY_OP_MULTIPLY_FLOAT, (unused/1, left, right -- res)) { @@ -469,8 +470,8 @@ dummy_func( _GUARD_BOTH_FLOAT + _BINARY_OP_SUBTRACT_FLOAT; op(_GUARD_BOTH_UNICODE, (left, right -- left, right)) { - DEOPT_IF(!PyUnicode_CheckExact(left), BINARY_OP); - DEOPT_IF(!PyUnicode_CheckExact(right), BINARY_OP); + DEOPT_IF(!PyUnicode_CheckExact(left)); + DEOPT_IF(!PyUnicode_CheckExact(right)); } op(_BINARY_OP_ADD_UNICODE, (unused/1, left, right -- res)) { @@ -494,7 +495,7 @@ dummy_func( _Py_CODEUNIT true_next = next_instr[INLINE_CACHE_ENTRIES_BINARY_OP]; assert(true_next.op.code == STORE_FAST); PyObject **target_local = &GETLOCAL(true_next.op.arg); - DEOPT_IF(*target_local != left, BINARY_OP); + DEOPT_IF(*target_local != left); STAT_INC(BINARY_OP, hit); /* Handle `left = left + right` or `left += right` for str. * @@ -574,13 +575,13 @@ dummy_func( } inst(BINARY_SUBSCR_LIST_INT, (unused/1, list, sub -- res)) { - DEOPT_IF(!PyLong_CheckExact(sub), BINARY_SUBSCR); - DEOPT_IF(!PyList_CheckExact(list), BINARY_SUBSCR); + DEOPT_IF(!PyLong_CheckExact(sub)); + DEOPT_IF(!PyList_CheckExact(list)); // Deopt unless 0 <= sub < PyList_Size(list) - DEOPT_IF(!_PyLong_IsNonNegativeCompact((PyLongObject *)sub), BINARY_SUBSCR); + DEOPT_IF(!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)); Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0]; - DEOPT_IF(index >= PyList_GET_SIZE(list), BINARY_SUBSCR); + DEOPT_IF(index >= PyList_GET_SIZE(list)); STAT_INC(BINARY_SUBSCR, hit); res = PyList_GET_ITEM(list, index); assert(res != NULL); @@ -590,14 +591,14 @@ dummy_func( } inst(BINARY_SUBSCR_STR_INT, (unused/1, str, sub -- res)) { - DEOPT_IF(!PyLong_CheckExact(sub), BINARY_SUBSCR); - DEOPT_IF(!PyUnicode_CheckExact(str), BINARY_SUBSCR); - DEOPT_IF(!_PyLong_IsNonNegativeCompact((PyLongObject *)sub), BINARY_SUBSCR); + DEOPT_IF(!PyLong_CheckExact(sub)); + DEOPT_IF(!PyUnicode_CheckExact(str)); + DEOPT_IF(!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)); Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0]; - DEOPT_IF(PyUnicode_GET_LENGTH(str) <= index, BINARY_SUBSCR); + DEOPT_IF(PyUnicode_GET_LENGTH(str) <= index); // Specialize for reading an ASCII character from any string: Py_UCS4 c = PyUnicode_READ_CHAR(str, index); - DEOPT_IF(Py_ARRAY_LENGTH(_Py_SINGLETON(strings).ascii) <= c, BINARY_SUBSCR); + DEOPT_IF(Py_ARRAY_LENGTH(_Py_SINGLETON(strings).ascii) <= c); STAT_INC(BINARY_SUBSCR, hit); res = (PyObject*)&_Py_SINGLETON(strings).ascii[c]; _Py_DECREF_SPECIALIZED(sub, (destructor)PyObject_Free); @@ -605,13 +606,13 @@ dummy_func( } inst(BINARY_SUBSCR_TUPLE_INT, (unused/1, tuple, sub -- res)) { - DEOPT_IF(!PyLong_CheckExact(sub), BINARY_SUBSCR); - DEOPT_IF(!PyTuple_CheckExact(tuple), BINARY_SUBSCR); + DEOPT_IF(!PyLong_CheckExact(sub)); + DEOPT_IF(!PyTuple_CheckExact(tuple)); // Deopt unless 0 <= sub < PyTuple_Size(list) - DEOPT_IF(!_PyLong_IsNonNegativeCompact((PyLongObject *)sub), BINARY_SUBSCR); + DEOPT_IF(!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)); Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0]; - DEOPT_IF(index >= PyTuple_GET_SIZE(tuple), BINARY_SUBSCR); + DEOPT_IF(index >= PyTuple_GET_SIZE(tuple)); STAT_INC(BINARY_SUBSCR, hit); res = PyTuple_GET_ITEM(tuple, index); assert(res != NULL); @@ -621,7 +622,7 @@ dummy_func( } inst(BINARY_SUBSCR_DICT, (unused/1, dict, sub -- res)) { - DEOPT_IF(!PyDict_CheckExact(dict), BINARY_SUBSCR); + DEOPT_IF(!PyDict_CheckExact(dict)); STAT_INC(BINARY_SUBSCR, hit); res = PyDict_GetItemWithError(dict, sub); if (res == NULL) { @@ -636,19 +637,19 @@ dummy_func( } inst(BINARY_SUBSCR_GETITEM, (unused/1, container, sub -- unused)) { - DEOPT_IF(tstate->interp->eval_frame, BINARY_SUBSCR); + DEOPT_IF(tstate->interp->eval_frame); PyTypeObject *tp = Py_TYPE(container); - DEOPT_IF(!PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE), BINARY_SUBSCR); + DEOPT_IF(!PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE)); PyHeapTypeObject *ht = (PyHeapTypeObject *)tp; PyObject *cached = ht->_spec_cache.getitem; - DEOPT_IF(cached == NULL, BINARY_SUBSCR); + DEOPT_IF(cached == NULL); assert(PyFunction_Check(cached)); PyFunctionObject *getitem = (PyFunctionObject *)cached; uint32_t cached_version = ht->_spec_cache.getitem_version; - DEOPT_IF(getitem->func_version != cached_version, BINARY_SUBSCR); + DEOPT_IF(getitem->func_version != cached_version); PyCodeObject *code = (PyCodeObject *)getitem->func_code; assert(code->co_argcount == 2); - DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), BINARY_SUBSCR); + DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize)); STAT_INC(BINARY_SUBSCR, hit); Py_INCREF(getitem); _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, getitem, 2); @@ -693,14 +694,14 @@ dummy_func( } inst(STORE_SUBSCR_LIST_INT, (unused/1, value, list, sub -- )) { - DEOPT_IF(!PyLong_CheckExact(sub), STORE_SUBSCR); - DEOPT_IF(!PyList_CheckExact(list), STORE_SUBSCR); + DEOPT_IF(!PyLong_CheckExact(sub)); + DEOPT_IF(!PyList_CheckExact(list)); // Ensure nonnegative, zero-or-one-digit ints. - DEOPT_IF(!_PyLong_IsNonNegativeCompact((PyLongObject *)sub), STORE_SUBSCR); + DEOPT_IF(!_PyLong_IsNonNegativeCompact((PyLongObject *)sub)); Py_ssize_t index = ((PyLongObject*)sub)->long_value.ob_digit[0]; // Ensure index < len(list) - DEOPT_IF(index >= PyList_GET_SIZE(list), STORE_SUBSCR); + DEOPT_IF(index >= PyList_GET_SIZE(list)); STAT_INC(STORE_SUBSCR, hit); PyObject *old_value = PyList_GET_ITEM(list, index); @@ -712,7 +713,7 @@ dummy_func( } inst(STORE_SUBSCR_DICT, (unused/1, value, dict, sub -- )) { - DEOPT_IF(!PyDict_CheckExact(dict), STORE_SUBSCR); + DEOPT_IF(!PyDict_CheckExact(dict)); STAT_INC(STORE_SUBSCR, hit); int err = _PyDict_SetItem_Take2((PyDictObject *)dict, sub, value); Py_DECREF(dict); @@ -1009,11 +1010,10 @@ dummy_func( } inst(SEND_GEN, (unused/1, receiver, v -- receiver, unused)) { - DEOPT_IF(tstate->interp->eval_frame, SEND); + DEOPT_IF(tstate->interp->eval_frame); PyGenObject *gen = (PyGenObject *)receiver; - DEOPT_IF(Py_TYPE(gen) != &PyGen_Type && - Py_TYPE(gen) != &PyCoro_Type, SEND); - DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING, SEND); + DEOPT_IF(Py_TYPE(gen) != &PyGen_Type && Py_TYPE(gen) != &PyCoro_Type); + DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING); STAT_INC(SEND, hit); _PyInterpreterFrame *gen_frame = (_PyInterpreterFrame *)gen->gi_iframe; STACK_SHRINK(1); @@ -1194,8 +1194,8 @@ dummy_func( } inst(UNPACK_SEQUENCE_TWO_TUPLE, (unused/1, seq -- values[oparg])) { - DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE); - DEOPT_IF(PyTuple_GET_SIZE(seq) != 2, UNPACK_SEQUENCE); + DEOPT_IF(!PyTuple_CheckExact(seq)); + DEOPT_IF(PyTuple_GET_SIZE(seq) != 2); assert(oparg == 2); STAT_INC(UNPACK_SEQUENCE, hit); values[0] = Py_NewRef(PyTuple_GET_ITEM(seq, 1)); @@ -1204,8 +1204,8 @@ dummy_func( } inst(UNPACK_SEQUENCE_TUPLE, (unused/1, seq -- values[oparg])) { - DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE); - DEOPT_IF(PyTuple_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE); + DEOPT_IF(!PyTuple_CheckExact(seq)); + DEOPT_IF(PyTuple_GET_SIZE(seq) != oparg); STAT_INC(UNPACK_SEQUENCE, hit); PyObject **items = _PyTuple_ITEMS(seq); for (int i = oparg; --i >= 0; ) { @@ -1215,8 +1215,8 @@ dummy_func( } inst(UNPACK_SEQUENCE_LIST, (unused/1, seq -- values[oparg])) { - DEOPT_IF(!PyList_CheckExact(seq), UNPACK_SEQUENCE); - DEOPT_IF(PyList_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE); + DEOPT_IF(!PyList_CheckExact(seq)); + DEOPT_IF(PyList_GET_SIZE(seq) != oparg); STAT_INC(UNPACK_SEQUENCE, hit); PyObject **items = _PyList_ITEMS(seq); for (int i = oparg; --i >= 0; ) { @@ -1412,15 +1412,15 @@ dummy_func( op(_GUARD_GLOBALS_VERSION, (version/1 --)) { PyDictObject *dict = (PyDictObject *)GLOBALS(); - DEOPT_IF(!PyDict_CheckExact(dict), LOAD_GLOBAL); - DEOPT_IF(dict->ma_keys->dk_version != version, LOAD_GLOBAL); + DEOPT_IF(!PyDict_CheckExact(dict)); + DEOPT_IF(dict->ma_keys->dk_version != version); assert(DK_IS_UNICODE(dict->ma_keys)); } op(_GUARD_BUILTINS_VERSION, (version/1 --)) { PyDictObject *dict = (PyDictObject *)BUILTINS(); - DEOPT_IF(!PyDict_CheckExact(dict), LOAD_GLOBAL); - DEOPT_IF(dict->ma_keys->dk_version != version, LOAD_GLOBAL); + DEOPT_IF(!PyDict_CheckExact(dict)); + DEOPT_IF(dict->ma_keys->dk_version != version); assert(DK_IS_UNICODE(dict->ma_keys)); } @@ -1428,7 +1428,7 @@ dummy_func( PyDictObject *dict = (PyDictObject *)GLOBALS(); PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(dict->ma_keys); res = entries[index].me_value; - DEOPT_IF(res == NULL, LOAD_GLOBAL); + DEOPT_IF(res == NULL); Py_INCREF(res); STAT_INC(LOAD_GLOBAL, hit); null = NULL; @@ -1438,7 +1438,7 @@ dummy_func( PyDictObject *bdict = (PyDictObject *)BUILTINS(); PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(bdict->ma_keys); res = entries[index].me_value; - DEOPT_IF(res == NULL, LOAD_GLOBAL); + DEOPT_IF(res == NULL); Py_INCREF(res); STAT_INC(LOAD_GLOBAL, hit); null = NULL; @@ -1763,8 +1763,8 @@ dummy_func( inst(LOAD_SUPER_ATTR_ATTR, (unused/1, global_super, class, self -- attr, unused if (0))) { assert(!(oparg & 1)); - DEOPT_IF(global_super != (PyObject *)&PySuper_Type, LOAD_SUPER_ATTR); - DEOPT_IF(!PyType_Check(class), LOAD_SUPER_ATTR); + DEOPT_IF(global_super != (PyObject *)&PySuper_Type); + DEOPT_IF(!PyType_Check(class)); STAT_INC(LOAD_SUPER_ATTR, hit); PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 2); attr = _PySuper_Lookup((PyTypeObject *)class, self, name, NULL); @@ -1774,8 +1774,8 @@ dummy_func( inst(LOAD_SUPER_ATTR_METHOD, (unused/1, global_super, class, self -- attr, self_or_null)) { assert(oparg & 1); - DEOPT_IF(global_super != (PyObject *)&PySuper_Type, LOAD_SUPER_ATTR); - DEOPT_IF(!PyType_Check(class), LOAD_SUPER_ATTR); + DEOPT_IF(global_super != (PyObject *)&PySuper_Type); + DEOPT_IF(!PyType_Check(class)); STAT_INC(LOAD_SUPER_ATTR, hit); PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 2); PyTypeObject *cls = (PyTypeObject *)class; @@ -1864,22 +1864,20 @@ dummy_func( op(_GUARD_TYPE_VERSION, (type_version/2, owner -- owner)) { PyTypeObject *tp = Py_TYPE(owner); assert(type_version != 0); - DEOPT_IF(tp->tp_version_tag != type_version, LOAD_ATTR); + DEOPT_IF(tp->tp_version_tag != type_version); } op(_CHECK_MANAGED_OBJECT_HAS_VALUES, (owner -- owner)) { assert(Py_TYPE(owner)->tp_dictoffset < 0); assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && - !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), - LOAD_ATTR); + DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv)); } op(_LOAD_ATTR_INSTANCE_VALUE, (index/1, owner -- attr, null if (oparg & 1))) { PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); attr = _PyDictOrValues_GetValues(dorv)->values[index]; - DEOPT_IF(attr == NULL, LOAD_ATTR); + DEOPT_IF(attr == NULL); STAT_INC(LOAD_ATTR, hit); Py_INCREF(attr); null = NULL; @@ -1893,56 +1891,74 @@ dummy_func( _LOAD_ATTR_INSTANCE_VALUE + unused/5; // Skip over rest of cache - inst(LOAD_ATTR_MODULE, (unused/1, type_version/2, index/1, unused/5, owner -- attr, null if (oparg & 1))) { - DEOPT_IF(!PyModule_CheckExact(owner), LOAD_ATTR); + op(_CHECK_ATTR_MODULE, (type_version/2, owner -- owner)) { + DEOPT_IF(!PyModule_CheckExact(owner)); PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner)->md_dict; assert(dict != NULL); - DEOPT_IF(dict->ma_keys->dk_version != type_version, LOAD_ATTR); + DEOPT_IF(dict->ma_keys->dk_version != type_version); + } + + op(_LOAD_ATTR_MODULE, (index/1, owner -- attr, null if (oparg & 1))) { + PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner)->md_dict; assert(dict->ma_keys->dk_kind == DICT_KEYS_UNICODE); assert(index < dict->ma_keys->dk_nentries); PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + index; attr = ep->me_value; - DEOPT_IF(attr == NULL, LOAD_ATTR); + DEOPT_IF(attr == NULL); STAT_INC(LOAD_ATTR, hit); Py_INCREF(attr); null = NULL; DECREF_INPUTS(); } - inst(LOAD_ATTR_WITH_HINT, (unused/1, type_version/2, index/1, unused/5, owner -- attr, null if (oparg & 1))) { - PyTypeObject *tp = Py_TYPE(owner); - assert(type_version != 0); - DEOPT_IF(tp->tp_version_tag != type_version, LOAD_ATTR); - assert(tp->tp_flags & Py_TPFLAGS_MANAGED_DICT); + macro(LOAD_ATTR_MODULE) = + unused/1 + + _CHECK_ATTR_MODULE + + _LOAD_ATTR_MODULE + + unused/5; + + op(_CHECK_ATTR_WITH_HINT, (owner -- owner)) { + assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); - DEOPT_IF(_PyDictOrValues_IsValues(dorv), LOAD_ATTR); + DEOPT_IF(_PyDictOrValues_IsValues(dorv)); PyDictObject *dict = (PyDictObject *)_PyDictOrValues_GetDict(dorv); - DEOPT_IF(dict == NULL, LOAD_ATTR); + DEOPT_IF(dict == NULL); assert(PyDict_CheckExact((PyObject *)dict)); + } + + op(_LOAD_ATTR_WITH_HINT, (hint/1, owner -- attr, null if (oparg & 1))) { + PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); + PyDictObject *dict = (PyDictObject *)_PyDictOrValues_GetDict(dorv); + DEOPT_IF(hint >= (size_t)dict->ma_keys->dk_nentries); PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1); - uint16_t hint = index; - DEOPT_IF(hint >= (size_t)dict->ma_keys->dk_nentries, LOAD_ATTR); if (DK_IS_UNICODE(dict->ma_keys)) { PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + hint; - DEOPT_IF(ep->me_key != name, LOAD_ATTR); + DEOPT_IF(ep->me_key != name); attr = ep->me_value; } else { PyDictKeyEntry *ep = DK_ENTRIES(dict->ma_keys) + hint; - DEOPT_IF(ep->me_key != name, LOAD_ATTR); + DEOPT_IF(ep->me_key != name); attr = ep->me_value; } - DEOPT_IF(attr == NULL, LOAD_ATTR); + DEOPT_IF(attr == NULL); STAT_INC(LOAD_ATTR, hit); Py_INCREF(attr); null = NULL; DECREF_INPUTS(); } + macro(LOAD_ATTR_WITH_HINT) = + unused/1 + + _GUARD_TYPE_VERSION + + _CHECK_ATTR_WITH_HINT + + _LOAD_ATTR_WITH_HINT + + unused/5; + op(_LOAD_ATTR_SLOT, (index/1, owner -- attr, null if (oparg & 1))) { char *addr = (char *)owner + index; attr = *(PyObject **)addr; - DEOPT_IF(attr == NULL, LOAD_ATTR); + DEOPT_IF(attr == NULL); STAT_INC(LOAD_ATTR, hit); Py_INCREF(attr); null = NULL; @@ -1955,35 +1971,41 @@ dummy_func( _LOAD_ATTR_SLOT + // NOTE: This action may also deopt unused/5; - inst(LOAD_ATTR_CLASS, (unused/1, type_version/2, unused/2, descr/4, owner -- attr, null if (oparg & 1))) { - - DEOPT_IF(!PyType_Check(owner), LOAD_ATTR); - DEOPT_IF(((PyTypeObject *)owner)->tp_version_tag != type_version, - LOAD_ATTR); + op(_CHECK_ATTR_CLASS, (type_version/2, owner -- owner)) { + DEOPT_IF(!PyType_Check(owner)); assert(type_version != 0); + DEOPT_IF(((PyTypeObject *)owner)->tp_version_tag != type_version); + + } + op(_LOAD_ATTR_CLASS, (descr/4, owner -- attr, null if (oparg & 1))) { STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + attr = Py_NewRef(descr); null = NULL; - attr = descr; - assert(attr != NULL); - Py_INCREF(attr); DECREF_INPUTS(); } + macro(LOAD_ATTR_CLASS) = + unused/1 + + _CHECK_ATTR_CLASS + + unused/2 + + _LOAD_ATTR_CLASS; + inst(LOAD_ATTR_PROPERTY, (unused/1, type_version/2, func_version/2, fget/4, owner -- unused, unused if (0))) { assert((oparg & 1) == 0); - DEOPT_IF(tstate->interp->eval_frame, LOAD_ATTR); + DEOPT_IF(tstate->interp->eval_frame); PyTypeObject *cls = Py_TYPE(owner); - DEOPT_IF(cls->tp_version_tag != type_version, LOAD_ATTR); + DEOPT_IF(cls->tp_version_tag != type_version); assert(type_version != 0); assert(Py_IS_TYPE(fget, &PyFunction_Type)); PyFunctionObject *f = (PyFunctionObject *)fget; assert(func_version != 0); - DEOPT_IF(f->func_version != func_version, LOAD_ATTR); + DEOPT_IF(f->func_version != func_version); PyCodeObject *code = (PyCodeObject *)f->func_code; assert(code->co_argcount == 1); - DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), LOAD_ATTR); + DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize)); STAT_INC(LOAD_ATTR, hit); Py_INCREF(fget); _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, f, 1); @@ -1997,17 +2019,17 @@ dummy_func( inst(LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN, (unused/1, type_version/2, func_version/2, getattribute/4, owner -- unused, unused if (0))) { assert((oparg & 1) == 0); - DEOPT_IF(tstate->interp->eval_frame, LOAD_ATTR); + DEOPT_IF(tstate->interp->eval_frame); PyTypeObject *cls = Py_TYPE(owner); - DEOPT_IF(cls->tp_version_tag != type_version, LOAD_ATTR); + DEOPT_IF(cls->tp_version_tag != type_version); assert(type_version != 0); assert(Py_IS_TYPE(getattribute, &PyFunction_Type)); PyFunctionObject *f = (PyFunctionObject *)getattribute; assert(func_version != 0); - DEOPT_IF(f->func_version != func_version, LOAD_ATTR); + DEOPT_IF(f->func_version != func_version); PyCodeObject *code = (PyCodeObject *)f->func_code; assert(code->co_argcount == 2); - DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), LOAD_ATTR); + DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize)); STAT_INC(LOAD_ATTR, hit); PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 1); @@ -2025,7 +2047,7 @@ dummy_func( op(_GUARD_DORV_VALUES, (owner -- owner)) { assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(dorv), STORE_ATTR); + DEOPT_IF(!_PyDictOrValues_IsValues(dorv)); } op(_STORE_ATTR_INSTANCE_VALUE, (index/1, value, owner --)) { @@ -2045,37 +2067,37 @@ dummy_func( macro(STORE_ATTR_INSTANCE_VALUE) = unused/1 + - _GUARD_TYPE_VERSION_STORE + + _GUARD_TYPE_VERSION + _GUARD_DORV_VALUES + _STORE_ATTR_INSTANCE_VALUE; inst(STORE_ATTR_WITH_HINT, (unused/1, type_version/2, hint/1, value, owner --)) { PyTypeObject *tp = Py_TYPE(owner); assert(type_version != 0); - DEOPT_IF(tp->tp_version_tag != type_version, STORE_ATTR); + DEOPT_IF(tp->tp_version_tag != type_version); assert(tp->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); - DEOPT_IF(_PyDictOrValues_IsValues(dorv), STORE_ATTR); + DEOPT_IF(_PyDictOrValues_IsValues(dorv)); PyDictObject *dict = (PyDictObject *)_PyDictOrValues_GetDict(dorv); - DEOPT_IF(dict == NULL, STORE_ATTR); + DEOPT_IF(dict == NULL); assert(PyDict_CheckExact((PyObject *)dict)); PyObject *name = GETITEM(FRAME_CO_NAMES, oparg); - DEOPT_IF(hint >= (size_t)dict->ma_keys->dk_nentries, STORE_ATTR); + DEOPT_IF(hint >= (size_t)dict->ma_keys->dk_nentries); PyObject *old_value; uint64_t new_version; if (DK_IS_UNICODE(dict->ma_keys)) { PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + hint; - DEOPT_IF(ep->me_key != name, STORE_ATTR); + DEOPT_IF(ep->me_key != name); old_value = ep->me_value; - DEOPT_IF(old_value == NULL, STORE_ATTR); + DEOPT_IF(old_value == NULL); new_version = _PyDict_NotifyEvent(tstate->interp, PyDict_EVENT_MODIFIED, dict, name, value); ep->me_value = value; } else { PyDictKeyEntry *ep = DK_ENTRIES(dict->ma_keys) + hint; - DEOPT_IF(ep->me_key != name, STORE_ATTR); + DEOPT_IF(ep->me_key != name); old_value = ep->me_value; - DEOPT_IF(old_value == NULL, STORE_ATTR); + DEOPT_IF(old_value == NULL); new_version = _PyDict_NotifyEvent(tstate->interp, PyDict_EVENT_MODIFIED, dict, name, value); ep->me_value = value; } @@ -2090,12 +2112,6 @@ dummy_func( Py_DECREF(owner); } - op(_GUARD_TYPE_VERSION_STORE, (type_version/2, owner -- owner)) { - PyTypeObject *tp = Py_TYPE(owner); - assert(type_version != 0); - DEOPT_IF(tp->tp_version_tag != type_version, STORE_ATTR); - } - op(_STORE_ATTR_SLOT, (index/1, value, owner --)) { char *addr = (char *)owner + index; STAT_INC(STORE_ATTR, hit); @@ -2107,7 +2123,7 @@ dummy_func( macro(STORE_ATTR_SLOT) = unused/1 + - _GUARD_TYPE_VERSION_STORE + + _GUARD_TYPE_VERSION + _STORE_ATTR_SLOT; family(COMPARE_OP, INLINE_CACHE_ENTRIES_COMPARE_OP) = { @@ -2140,8 +2156,8 @@ dummy_func( } inst(COMPARE_OP_FLOAT, (unused/1, left, right -- res)) { - DEOPT_IF(!PyFloat_CheckExact(left), COMPARE_OP); - DEOPT_IF(!PyFloat_CheckExact(right), COMPARE_OP); + DEOPT_IF(!PyFloat_CheckExact(left)); + DEOPT_IF(!PyFloat_CheckExact(right)); STAT_INC(COMPARE_OP, hit); double dleft = PyFloat_AS_DOUBLE(left); double dright = PyFloat_AS_DOUBLE(right); @@ -2155,10 +2171,10 @@ dummy_func( // Similar to COMPARE_OP_FLOAT inst(COMPARE_OP_INT, (unused/1, left, right -- res)) { - DEOPT_IF(!PyLong_CheckExact(left), COMPARE_OP); - DEOPT_IF(!PyLong_CheckExact(right), COMPARE_OP); - DEOPT_IF(!_PyLong_IsCompact((PyLongObject *)left), COMPARE_OP); - DEOPT_IF(!_PyLong_IsCompact((PyLongObject *)right), COMPARE_OP); + DEOPT_IF(!PyLong_CheckExact(left)); + DEOPT_IF(!PyLong_CheckExact(right)); + DEOPT_IF(!_PyLong_IsCompact((PyLongObject *)left)); + DEOPT_IF(!_PyLong_IsCompact((PyLongObject *)right)); STAT_INC(COMPARE_OP, hit); assert(_PyLong_DigitCount((PyLongObject *)left) <= 1 && _PyLong_DigitCount((PyLongObject *)right) <= 1); @@ -2174,8 +2190,8 @@ dummy_func( // Similar to COMPARE_OP_FLOAT, but for ==, != only inst(COMPARE_OP_STR, (unused/1, left, right -- res)) { - DEOPT_IF(!PyUnicode_CheckExact(left), COMPARE_OP); - DEOPT_IF(!PyUnicode_CheckExact(right), COMPARE_OP); + DEOPT_IF(!PyUnicode_CheckExact(left)); + DEOPT_IF(!PyUnicode_CheckExact(right)); STAT_INC(COMPARE_OP, hit); int eq = _PyUnicode_Equal(left, right); assert((oparg >> 5) == Py_EQ || (oparg >> 5) == Py_NE); @@ -2492,7 +2508,7 @@ dummy_func( } op(_ITER_CHECK_LIST, (iter -- iter)) { - DEOPT_IF(Py_TYPE(iter) != &PyListIter_Type, FOR_ITER); + DEOPT_IF(Py_TYPE(iter) != &PyListIter_Type); } op(_ITER_JUMP_LIST, (iter -- iter)) { @@ -2548,7 +2564,7 @@ dummy_func( _ITER_NEXT_LIST; op(_ITER_CHECK_TUPLE, (iter -- iter)) { - DEOPT_IF(Py_TYPE(iter) != &PyTupleIter_Type, FOR_ITER); + DEOPT_IF(Py_TYPE(iter) != &PyTupleIter_Type); } op(_ITER_JUMP_TUPLE, (iter -- iter)) { @@ -2605,7 +2621,7 @@ dummy_func( op(_ITER_CHECK_RANGE, (iter -- iter)) { _PyRangeIterObject *r = (_PyRangeIterObject *)iter; - DEOPT_IF(Py_TYPE(r) != &PyRangeIter_Type, FOR_ITER); + DEOPT_IF(Py_TYPE(r) != &PyRangeIter_Type); } op(_ITER_JUMP_RANGE, (iter -- iter)) { @@ -2647,10 +2663,10 @@ dummy_func( _ITER_NEXT_RANGE; inst(FOR_ITER_GEN, (unused/1, iter -- iter, unused)) { - DEOPT_IF(tstate->interp->eval_frame, FOR_ITER); + DEOPT_IF(tstate->interp->eval_frame); PyGenObject *gen = (PyGenObject *)iter; - DEOPT_IF(Py_TYPE(gen) != &PyGen_Type, FOR_ITER); - DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING, FOR_ITER); + DEOPT_IF(Py_TYPE(gen) != &PyGen_Type); + DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING); STAT_INC(FOR_ITER, hit); _PyInterpreterFrame *gen_frame = (_PyInterpreterFrame *)gen->gi_iframe; _PyFrame_StackPush(gen_frame, Py_None); @@ -2790,16 +2806,13 @@ dummy_func( op(_GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, (owner -- owner)) { assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && - !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), - LOAD_ATTR); + DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv)); } op(_GUARD_KEYS_VERSION, (keys_version/2, owner -- owner)) { PyTypeObject *owner_cls = Py_TYPE(owner); PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls; - DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != - keys_version, LOAD_ATTR); + DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != keys_version); } op(_LOAD_ATTR_METHOD_WITH_VALUES, (descr/4, owner -- attr, self if (1))) { @@ -2835,46 +2848,46 @@ dummy_func( unused/2 + _LOAD_ATTR_METHOD_NO_DICT; - inst(LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES, (unused/1, type_version/2, keys_version/2, descr/4, owner -- attr, unused if (0))) { + op(_LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES, (descr/4, owner -- attr, unused if (0))) { assert((oparg & 1) == 0); - PyTypeObject *owner_cls = Py_TYPE(owner); - assert(type_version != 0); - DEOPT_IF(owner_cls->tp_version_tag != type_version, LOAD_ATTR); - assert(owner_cls->tp_flags & Py_TPFLAGS_MANAGED_DICT); - PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && - !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), - LOAD_ATTR); - PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls; - DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != - keys_version, LOAD_ATTR); STAT_INC(LOAD_ATTR, hit); assert(descr != NULL); DECREF_INPUTS(); attr = Py_NewRef(descr); } - inst(LOAD_ATTR_NONDESCRIPTOR_NO_DICT, (unused/1, type_version/2, unused/2, descr/4, owner -- attr, unused if (0))) { + macro(LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES) = + unused/1 + + _GUARD_TYPE_VERSION + + _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT + + _GUARD_KEYS_VERSION + + _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES; + + op(_LOAD_ATTR_NONDESCRIPTOR_NO_DICT, (descr/4, owner -- attr, unused if (0))) { assert((oparg & 1) == 0); - PyTypeObject *owner_cls = Py_TYPE(owner); - assert(type_version != 0); - DEOPT_IF(owner_cls->tp_version_tag != type_version, LOAD_ATTR); - assert(owner_cls->tp_dictoffset == 0); + assert(Py_TYPE(owner)->tp_dictoffset == 0); STAT_INC(LOAD_ATTR, hit); assert(descr != NULL); DECREF_INPUTS(); attr = Py_NewRef(descr); } - inst(LOAD_ATTR_METHOD_LAZY_DICT, (unused/1, type_version/2, unused/2, descr/4, owner -- attr, self if (1))) { - assert(oparg & 1); - PyTypeObject *owner_cls = Py_TYPE(owner); - DEOPT_IF(owner_cls->tp_version_tag != type_version, LOAD_ATTR); - Py_ssize_t dictoffset = owner_cls->tp_dictoffset; + macro(LOAD_ATTR_NONDESCRIPTOR_NO_DICT) = + unused/1 + + _GUARD_TYPE_VERSION + + unused/2 + + _LOAD_ATTR_NONDESCRIPTOR_NO_DICT; + + op(_CHECK_ATTR_METHOD_LAZY_DICT, (owner -- owner)) { + Py_ssize_t dictoffset = Py_TYPE(owner)->tp_dictoffset; assert(dictoffset > 0); PyObject *dict = *(PyObject **)((char *)owner + dictoffset); /* This object has a __dict__, just not yet created */ - DEOPT_IF(dict != NULL, LOAD_ATTR); + DEOPT_IF(dict != NULL); + } + + op(_LOAD_ATTR_METHOD_LAZY_DICT, (descr/4, owner -- attr, self if (1))) { + assert(oparg & 1); STAT_INC(LOAD_ATTR, hit); assert(descr != NULL); assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)); @@ -2882,6 +2895,13 @@ dummy_func( self = owner; } + macro(LOAD_ATTR_METHOD_LAZY_DICT) = + unused/1 + + _GUARD_TYPE_VERSION + + _CHECK_ATTR_METHOD_LAZY_DICT + + unused/2 + + _LOAD_ATTR_METHOD_LAZY_DICT; + inst(INSTRUMENTED_CALL, ( -- )) { int is_meth = PEEK(oparg + 1) != NULL; int total_args = oparg + is_meth; @@ -3002,8 +3022,8 @@ dummy_func( } op(_CHECK_CALL_BOUND_METHOD_EXACT_ARGS, (callable, null, unused[oparg] -- callable, null, unused[oparg])) { - DEOPT_IF(null != NULL, CALL); - DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type, CALL); + DEOPT_IF(null != NULL); + DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type); } op(_INIT_CALL_BOUND_METHOD_EXACT_ARGS, (callable, unused, unused[oparg] -- func, self, unused[oparg])) { @@ -3016,22 +3036,22 @@ dummy_func( } op(_CHECK_PEP_523, (--)) { - DEOPT_IF(tstate->interp->eval_frame, CALL); + DEOPT_IF(tstate->interp->eval_frame); } op(_CHECK_FUNCTION_EXACT_ARGS, (func_version/2, callable, self_or_null, unused[oparg] -- callable, self_or_null, unused[oparg])) { - DEOPT_IF(!PyFunction_Check(callable), CALL); + DEOPT_IF(!PyFunction_Check(callable)); PyFunctionObject *func = (PyFunctionObject *)callable; - DEOPT_IF(func->func_version != func_version, CALL); + DEOPT_IF(func->func_version != func_version); PyCodeObject *code = (PyCodeObject *)func->func_code; - DEOPT_IF(code->co_argcount != oparg + (self_or_null != NULL), CALL); + DEOPT_IF(code->co_argcount != oparg + (self_or_null != NULL)); } op(_CHECK_STACK_SPACE, (callable, unused, unused[oparg] -- callable, unused, unused[oparg])) { PyFunctionObject *func = (PyFunctionObject *)callable; PyCodeObject *code = (PyCodeObject *)func->func_code; - DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL); - DEOPT_IF(tstate->py_recursion_remaining <= 1, CALL); + DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize)); + DEOPT_IF(tstate->py_recursion_remaining <= 1); } op(_INIT_CALL_PY_EXACT_ARGS, (callable, self_or_null, args[oparg] -- new_frame: _PyInterpreterFrame*)) { @@ -3094,24 +3114,24 @@ dummy_func( _PUSH_FRAME; inst(CALL_PY_WITH_DEFAULTS, (unused/1, func_version/2, callable, self_or_null, args[oparg] -- unused)) { - DEOPT_IF(tstate->interp->eval_frame, CALL); + DEOPT_IF(tstate->interp->eval_frame); int argcount = oparg; if (self_or_null != NULL) { args--; argcount++; } - DEOPT_IF(!PyFunction_Check(callable), CALL); + DEOPT_IF(!PyFunction_Check(callable)); PyFunctionObject *func = (PyFunctionObject *)callable; - DEOPT_IF(func->func_version != func_version, CALL); + DEOPT_IF(func->func_version != func_version); PyCodeObject *code = (PyCodeObject *)func->func_code; assert(func->func_defaults); assert(PyTuple_CheckExact(func->func_defaults)); int defcount = (int)PyTuple_GET_SIZE(func->func_defaults); assert(defcount <= code->co_argcount); int min_args = code->co_argcount - defcount; - DEOPT_IF(argcount > code->co_argcount, CALL); - DEOPT_IF(argcount < min_args, CALL); - DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL); + DEOPT_IF(argcount > code->co_argcount); + DEOPT_IF(argcount < min_args); + DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize)); STAT_INC(CALL, hit); _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, func, code->co_argcount); for (int i = 0; i < argcount; i++) { @@ -3130,9 +3150,9 @@ dummy_func( inst(CALL_TYPE_1, (unused/1, unused/2, callable, null, args[oparg] -- res)) { assert(oparg == 1); - DEOPT_IF(null != NULL, CALL); + DEOPT_IF(null != NULL); PyObject *obj = args[0]; - DEOPT_IF(callable != (PyObject *)&PyType_Type, CALL); + DEOPT_IF(callable != (PyObject *)&PyType_Type); STAT_INC(CALL, hit); res = Py_NewRef(Py_TYPE(obj)); Py_DECREF(obj); @@ -3141,8 +3161,8 @@ dummy_func( inst(CALL_STR_1, (unused/1, unused/2, callable, null, args[oparg] -- res)) { assert(oparg == 1); - DEOPT_IF(null != NULL, CALL); - DEOPT_IF(callable != (PyObject *)&PyUnicode_Type, CALL); + DEOPT_IF(null != NULL); + DEOPT_IF(callable != (PyObject *)&PyUnicode_Type); STAT_INC(CALL, hit); PyObject *arg = args[0]; res = PyObject_Str(arg); @@ -3154,8 +3174,8 @@ dummy_func( inst(CALL_TUPLE_1, (unused/1, unused/2, callable, null, args[oparg] -- res)) { assert(oparg == 1); - DEOPT_IF(null != NULL, CALL); - DEOPT_IF(callable != (PyObject *)&PyTuple_Type, CALL); + DEOPT_IF(null != NULL); + DEOPT_IF(callable != (PyObject *)&PyTuple_Type); STAT_INC(CALL, hit); PyObject *arg = args[0]; res = PySequence_Tuple(arg); @@ -3172,15 +3192,15 @@ dummy_func( * 3. Pushes the frame for ``__init__`` to the frame stack * */ _PyCallCache *cache = (_PyCallCache *)next_instr; - DEOPT_IF(null != NULL, CALL); - DEOPT_IF(!PyType_Check(callable), CALL); + DEOPT_IF(null != NULL); + DEOPT_IF(!PyType_Check(callable)); PyTypeObject *tp = (PyTypeObject *)callable; - DEOPT_IF(tp->tp_version_tag != read_u32(cache->func_version), CALL); + DEOPT_IF(tp->tp_version_tag != read_u32(cache->func_version)); PyHeapTypeObject *cls = (PyHeapTypeObject *)callable; PyFunctionObject *init = (PyFunctionObject *)cls->_spec_cache.init; PyCodeObject *code = (PyCodeObject *)init->func_code; - DEOPT_IF(code->co_argcount != oparg+1, CALL); - DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize + _Py_InitCleanup.co_framesize), CALL); + DEOPT_IF(code->co_argcount != oparg+1); + DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize + _Py_InitCleanup.co_framesize)); STAT_INC(CALL, hit); PyObject *self = _PyType_NewManagedObject(tp); if (self == NULL) { @@ -3233,9 +3253,9 @@ dummy_func( args--; total_args++; } - DEOPT_IF(!PyType_Check(callable), CALL); + DEOPT_IF(!PyType_Check(callable)); PyTypeObject *tp = (PyTypeObject *)callable; - DEOPT_IF(tp->tp_vectorcall == NULL, CALL); + DEOPT_IF(tp->tp_vectorcall == NULL); STAT_INC(CALL, hit); res = tp->tp_vectorcall((PyObject *)tp, args, total_args, NULL); /* Free the arguments. */ @@ -3254,9 +3274,9 @@ dummy_func( args--; total_args++; } - DEOPT_IF(total_args != 1, CALL); - DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); - DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_O, CALL); + DEOPT_IF(total_args != 1); + DEOPT_IF(!PyCFunction_CheckExact(callable)); + DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_O); STAT_INC(CALL, hit); PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable); // This is slower but CPython promises to check all non-vectorcall @@ -3282,8 +3302,8 @@ dummy_func( args--; total_args++; } - DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); - DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_FASTCALL, CALL); + DEOPT_IF(!PyCFunction_CheckExact(callable)); + DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_FASTCALL); STAT_INC(CALL, hit); PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable); /* res = func(self, args, nargs) */ @@ -3314,9 +3334,8 @@ dummy_func( args--; total_args++; } - DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); - DEOPT_IF(PyCFunction_GET_FLAGS(callable) != - (METH_FASTCALL | METH_KEYWORDS), CALL); + DEOPT_IF(!PyCFunction_CheckExact(callable)); + DEOPT_IF(PyCFunction_GET_FLAGS(callable) != (METH_FASTCALL | METH_KEYWORDS)); STAT_INC(CALL, hit); /* res = func(self, args, nargs, kwnames) */ _PyCFunctionFastWithKeywords cfunc = @@ -3341,9 +3360,9 @@ dummy_func( args--; total_args++; } - DEOPT_IF(total_args != 1, CALL); + DEOPT_IF(total_args != 1); PyInterpreterState *interp = tstate->interp; - DEOPT_IF(callable != interp->callable_cache.len, CALL); + DEOPT_IF(callable != interp->callable_cache.len); STAT_INC(CALL, hit); PyObject *arg = args[0]; Py_ssize_t len_i = PyObject_Length(arg); @@ -3365,9 +3384,9 @@ dummy_func( args--; total_args++; } - DEOPT_IF(total_args != 2, CALL); + DEOPT_IF(total_args != 2); PyInterpreterState *interp = tstate->interp; - DEOPT_IF(callable != interp->callable_cache.isinstance, CALL); + DEOPT_IF(callable != interp->callable_cache.isinstance); STAT_INC(CALL, hit); PyObject *cls = args[1]; PyObject *inst = args[0]; @@ -3388,9 +3407,9 @@ dummy_func( inst(CALL_LIST_APPEND, (unused/1, unused/2, callable, self, args[oparg] -- unused)) { assert(oparg == 1); PyInterpreterState *interp = tstate->interp; - DEOPT_IF(callable != interp->callable_cache.list_append, CALL); + DEOPT_IF(callable != interp->callable_cache.list_append); assert(self != NULL); - DEOPT_IF(!PyList_Check(self), CALL); + DEOPT_IF(!PyList_Check(self)); STAT_INC(CALL, hit); if (_PyList_AppendTakeRef((PyListObject *)self, args[0]) < 0) { goto pop_1_error; // Since arg is DECREF'ed already @@ -3411,13 +3430,13 @@ dummy_func( total_args++; } PyMethodDescrObject *method = (PyMethodDescrObject *)callable; - DEOPT_IF(total_args != 2, CALL); - DEOPT_IF(!Py_IS_TYPE(method, &PyMethodDescr_Type), CALL); + DEOPT_IF(total_args != 2); + DEOPT_IF(!Py_IS_TYPE(method, &PyMethodDescr_Type)); PyMethodDef *meth = method->d_method; - DEOPT_IF(meth->ml_flags != METH_O, CALL); + DEOPT_IF(meth->ml_flags != METH_O); PyObject *arg = args[1]; PyObject *self = args[0]; - DEOPT_IF(!Py_IS_TYPE(self, method->d_common.d_type), CALL); + DEOPT_IF(!Py_IS_TYPE(self, method->d_common.d_type)); STAT_INC(CALL, hit); PyCFunction cfunc = meth->ml_meth; // This is slower but CPython promises to check all non-vectorcall @@ -3442,12 +3461,12 @@ dummy_func( total_args++; } PyMethodDescrObject *method = (PyMethodDescrObject *)callable; - DEOPT_IF(!Py_IS_TYPE(method, &PyMethodDescr_Type), CALL); + DEOPT_IF(!Py_IS_TYPE(method, &PyMethodDescr_Type)); PyMethodDef *meth = method->d_method; - DEOPT_IF(meth->ml_flags != (METH_FASTCALL|METH_KEYWORDS), CALL); + DEOPT_IF(meth->ml_flags != (METH_FASTCALL|METH_KEYWORDS)); PyTypeObject *d_type = method->d_common.d_type; PyObject *self = args[0]; - DEOPT_IF(!Py_IS_TYPE(self, d_type), CALL); + DEOPT_IF(!Py_IS_TYPE(self, d_type)); STAT_INC(CALL, hit); int nargs = total_args - 1; _PyCFunctionFastWithKeywords cfunc = @@ -3471,13 +3490,13 @@ dummy_func( args--; total_args++; } - DEOPT_IF(total_args != 1, CALL); + DEOPT_IF(total_args != 1); PyMethodDescrObject *method = (PyMethodDescrObject *)callable; - DEOPT_IF(!Py_IS_TYPE(method, &PyMethodDescr_Type), CALL); + DEOPT_IF(!Py_IS_TYPE(method, &PyMethodDescr_Type)); PyMethodDef *meth = method->d_method; PyObject *self = args[0]; - DEOPT_IF(!Py_IS_TYPE(self, method->d_common.d_type), CALL); - DEOPT_IF(meth->ml_flags != METH_NOARGS, CALL); + DEOPT_IF(!Py_IS_TYPE(self, method->d_common.d_type)); + DEOPT_IF(meth->ml_flags != METH_NOARGS); STAT_INC(CALL, hit); PyCFunction cfunc = meth->ml_meth; // This is slower but CPython promises to check all non-vectorcall @@ -3502,11 +3521,11 @@ dummy_func( } PyMethodDescrObject *method = (PyMethodDescrObject *)callable; /* Builtin METH_FASTCALL methods, without keywords */ - DEOPT_IF(!Py_IS_TYPE(method, &PyMethodDescr_Type), CALL); + DEOPT_IF(!Py_IS_TYPE(method, &PyMethodDescr_Type)); PyMethodDef *meth = method->d_method; - DEOPT_IF(meth->ml_flags != METH_FASTCALL, CALL); + DEOPT_IF(meth->ml_flags != METH_FASTCALL); PyObject *self = args[0]; - DEOPT_IF(!Py_IS_TYPE(self, method->d_common.d_type), CALL); + DEOPT_IF(!Py_IS_TYPE(self, method->d_common.d_type)); STAT_INC(CALL, hit); _PyCFunctionFast cfunc = (_PyCFunctionFast)(void(*)(void))meth->ml_meth; diff --git a/Python/ceval_gil.c b/Python/ceval_gil.c index ba16f5eb9bfe74..6b4ec8eed03aab 100644 --- a/Python/ceval_gil.c +++ b/Python/ceval_gil.c @@ -57,113 +57,62 @@ #define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL) #endif -/* This can set eval_breaker to 0 even though gil_drop_request became - 1. We believe this is all right because the eval loop will release - the GIL eventually anyway. */ +/* bpo-40010: eval_breaker should be recomputed if there + is a pending signal: signal received by another thread which cannot + handle signals. + Similarly, we set CALLS_TO_DO and ASYNC_EXCEPTION to match the thread. +*/ static inline void -COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, - struct _ceval_runtime_state *ceval, - struct _ceval_state *ceval2) +update_eval_breaker_from_thread(PyInterpreterState *interp, PyThreadState *tstate) { - _Py_atomic_store_relaxed(&ceval2->eval_breaker, - _Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request) - | (_Py_atomic_load_relaxed_int32(&ceval->signals_pending) - && _Py_ThreadCanHandleSignals(interp)) - | (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do)) - | (_Py_IsMainThread() && _Py_IsMainInterpreter(interp) - &&_Py_atomic_load_relaxed_int32(&ceval->pending_mainthread.calls_to_do)) - | ceval2->pending.async_exc - | _Py_atomic_load_relaxed_int32(&ceval2->gc_scheduled)); -} + if (tstate == NULL) { + return; + } + if (_Py_IsMainThread()) { + int32_t calls_to_do = _Py_atomic_load_int32_relaxed( + &_PyRuntime.ceval.pending_mainthread.calls_to_do); + if (calls_to_do) { + _Py_set_eval_breaker_bit(interp, _PY_CALLS_TO_DO_BIT, 1); + } + if (_Py_ThreadCanHandleSignals(interp)) { + if (_Py_atomic_load(&_PyRuntime.signals.is_tripped)) { + _Py_set_eval_breaker_bit(interp, _PY_SIGNALS_PENDING_BIT, 1); + } + } + } + if (tstate->async_exc != NULL) { + _Py_set_eval_breaker_bit(interp, _PY_ASYNC_EXCEPTION_BIT, 1); + } +} static inline void SET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1); - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); + _Py_set_eval_breaker_bit(interp, _PY_GIL_DROP_REQUEST_BIT, 1); } static inline void RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + _Py_set_eval_breaker_bit(interp, _PY_GIL_DROP_REQUEST_BIT, 0); } static inline void -SIGNAL_PENDING_CALLS(struct _pending_calls *pending, PyInterpreterState *interp) +SIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&pending->calls_to_do, 1); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + _Py_set_eval_breaker_bit(interp, _PY_CALLS_TO_DO_BIT, 1); } static inline void UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) { - _Py_atomic_store_relaxed(&ceval->pending_mainthread.calls_to_do, 0); - } - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + _Py_set_eval_breaker_bit(interp, _PY_CALLS_TO_DO_BIT, 0); } - -static inline void -SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->signals_pending, 1); - if (force) { - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); - } - else { - /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); - } -} - - -static inline void -UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->signals_pending, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); -} - - -static inline void -SIGNAL_ASYNC_EXC(PyInterpreterState *interp) -{ - struct _ceval_state *ceval2 = &interp->ceval; - ceval2->pending.async_exc = 1; - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); -} - - -static inline void -UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) -{ - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - ceval2->pending.async_exc = 0; - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); -} - - /* * Implementation of the Global Interpreter Lock (GIL). */ @@ -271,8 +220,9 @@ static void recreate_gil(struct _gil_runtime_state *gil) #endif static void -drop_gil(struct _ceval_state *ceval, PyThreadState *tstate) +drop_gil(PyInterpreterState *interp, PyThreadState *tstate) { + struct _ceval_state *ceval = &interp->ceval; /* If tstate is NULL, the caller is indicating that we're releasing the GIL for the last time in this thread. This is particularly relevant when the current thread state is finalizing or its @@ -310,7 +260,7 @@ drop_gil(struct _ceval_state *ceval, PyThreadState *tstate) the GIL, and that's the only time we might delete the interpreter, so checking tstate first prevents the crash. See https://github.com/python/cpython/issues/104341. */ - if (tstate != NULL && _Py_atomic_load_relaxed(&ceval->gil_drop_request)) { + if (tstate != NULL && _Py_eval_breaker_bit_is_set(interp, _PY_GIL_DROP_REQUEST_BIT)) { MUTEX_LOCK(gil->switch_mutex); /* Not switched yet => wait */ if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate) @@ -356,8 +306,7 @@ take_gil(PyThreadState *tstate) assert(_PyThreadState_CheckConsistency(tstate)); PyInterpreterState *interp = tstate->interp; - struct _ceval_state *ceval = &interp->ceval; - struct _gil_runtime_state *gil = ceval->gil; + struct _gil_runtime_state *gil = interp->ceval.gil; /* Check that _PyEval_InitThreads() was called to create the lock */ assert(gil_created(gil)); @@ -431,27 +380,13 @@ take_gil(PyThreadState *tstate) in take_gil() while the main thread called wait_for_thread_shutdown() from Py_Finalize(). */ MUTEX_UNLOCK(gil->mutex); - drop_gil(ceval, tstate); + drop_gil(interp, tstate); PyThread_exit_thread(); } assert(_PyThreadState_CheckConsistency(tstate)); - if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { - RESET_GIL_DROP_REQUEST(interp); - } - else { - /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there - is a pending signal: signal received by another thread which cannot - handle signals. - - Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ - COMPUTE_EVAL_BREAKER(interp, &_PyRuntime.ceval, ceval); - } - - /* Don't access tstate if the thread must exit */ - if (tstate->async_exc != NULL) { - _PyEval_SignalAsyncExc(tstate->interp); - } + RESET_GIL_DROP_REQUEST(interp); + update_eval_breaker_from_thread(interp, tstate); MUTEX_UNLOCK(gil->mutex); @@ -611,8 +546,7 @@ PyEval_ReleaseLock(void) /* This function must succeed when the current thread state is NULL. We therefore avoid PyThreadState_Get() which dumps a fatal error in debug mode. */ - struct _ceval_state *ceval = &tstate->interp->ceval; - drop_gil(ceval, tstate); + drop_gil(tstate->interp, tstate); } void @@ -628,8 +562,7 @@ _PyEval_ReleaseLock(PyInterpreterState *interp, PyThreadState *tstate) /* If tstate is NULL then we do not expect the current thread to acquire the GIL ever again. */ assert(tstate == NULL || tstate->interp == interp); - struct _ceval_state *ceval = &interp->ceval; - drop_gil(ceval, tstate); + drop_gil(interp, tstate); } void @@ -653,8 +586,7 @@ PyEval_ReleaseThread(PyThreadState *tstate) if (new_tstate != tstate) { Py_FatalError("wrong thread state"); } - struct _ceval_state *ceval = &tstate->interp->ceval; - drop_gil(ceval, tstate); + drop_gil(tstate->interp, tstate); } #ifdef HAVE_FORK @@ -691,7 +623,7 @@ _PyEval_ReInitThreads(PyThreadState *tstate) void _PyEval_SignalAsyncExc(PyInterpreterState *interp) { - SIGNAL_ASYNC_EXC(interp); + _Py_set_eval_breaker_bit(interp, _PY_ASYNC_EXCEPTION_BIT, 1); } PyThreadState * @@ -700,9 +632,8 @@ PyEval_SaveThread(void) PyThreadState *tstate = _PyThreadState_SwapNoGIL(NULL); _Py_EnsureTstateNotNULL(tstate); - struct _ceval_state *ceval = &tstate->interp->ceval; - assert(gil_created(ceval->gil)); - drop_gil(ceval, tstate); + assert(gil_created(tstate->interp->ceval.gil)); + drop_gil(tstate->interp, tstate); return tstate; } @@ -742,22 +673,9 @@ PyEval_RestoreThread(PyThreadState *tstate) void _PyEval_SignalReceived(PyInterpreterState *interp) { -#ifdef MS_WINDOWS - // bpo-42296: On Windows, _PyEval_SignalReceived() is called from a signal - // handler which can run in a thread different than the Python thread, in - // which case _Py_ThreadCanHandleSignals() is wrong. Ignore - // _Py_ThreadCanHandleSignals() and always set eval_breaker to 1. - // - // The next eval_frame_handle_pending() call will call - // _Py_ThreadCanHandleSignals() to recompute eval_breaker. - int force = 1; -#else - int force = 0; -#endif - /* bpo-30703: Function called when the C signal handler of Python gets a - signal. We cannot queue a callback using _PyEval_AddPendingCall() since - that function is not async-signal-safe. */ - SIGNAL_PENDING_SIGNALS(interp, force); + if (_Py_ThreadCanHandleSignals(interp)) { + _Py_set_eval_breaker_bit(interp, _PY_SIGNALS_PENDING_BIT, 1); + } } /* Push one item onto the queue while holding the lock. */ @@ -773,6 +691,8 @@ _push_pending_call(struct _pending_calls *pending, pending->calls[i].func = func; pending->calls[i].arg = arg; pending->last = j; + assert(pending->calls_to_do < NPENDINGCALLS); + pending->calls_to_do++; return 0; } @@ -800,6 +720,8 @@ _pop_pending_call(struct _pending_calls *pending, if (i >= 0) { pending->calls[i] = (struct _pending_call){0}; pending->first = (i + 1) % NPENDINGCALLS; + assert(pending->calls_to_do > 0); + pending->calls_to_do--; } } @@ -829,7 +751,7 @@ _PyEval_AddPendingCall(PyInterpreterState *interp, PyThread_release_lock(pending->lock); /* signal main loop */ - SIGNAL_PENDING_CALLS(pending, interp); + SIGNAL_PENDING_CALLS(interp); return result; } @@ -846,33 +768,18 @@ static int handle_signals(PyThreadState *tstate) { assert(_PyThreadState_CheckConsistency(tstate)); + _Py_set_eval_breaker_bit(tstate->interp, _PY_SIGNALS_PENDING_BIT, 0); if (!_Py_ThreadCanHandleSignals(tstate->interp)) { return 0; } - - UNSIGNAL_PENDING_SIGNALS(tstate->interp); if (_PyErr_CheckSignalsTstate(tstate) < 0) { /* On failure, re-schedule a call to handle_signals(). */ - SIGNAL_PENDING_SIGNALS(tstate->interp, 0); + _Py_set_eval_breaker_bit(tstate->interp, _PY_SIGNALS_PENDING_BIT, 1); return -1; } return 0; } -static inline int -maybe_has_pending_calls(PyInterpreterState *interp) -{ - struct _pending_calls *pending = &interp->ceval.pending; - if (_Py_atomic_load_relaxed_int32(&pending->calls_to_do)) { - return 1; - } - if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(interp)) { - return 0; - } - pending = &_PyRuntime.ceval.pending_mainthread; - return _Py_atomic_load_relaxed_int32(&pending->calls_to_do); -} - static int _make_pending_calls(struct _pending_calls *pending) { @@ -930,7 +837,7 @@ make_pending_calls(PyInterpreterState *interp) if (_make_pending_calls(pending) != 0) { pending->busy = 0; /* There might not be more calls to make, but we play it safe. */ - SIGNAL_PENDING_CALLS(pending, interp); + SIGNAL_PENDING_CALLS(interp); return -1; } @@ -938,7 +845,7 @@ make_pending_calls(PyInterpreterState *interp) if (_make_pending_calls(pending_main) != 0) { pending->busy = 0; /* There might not be more calls to make, but we play it safe. */ - SIGNAL_PENDING_CALLS(pending_main, interp); + SIGNAL_PENDING_CALLS(interp); return -1; } } @@ -1083,38 +990,35 @@ _PyEval_FiniState(struct _ceval_state *ceval) int _Py_HandlePending(PyThreadState *tstate) { - _PyRuntimeState * const runtime = &_PyRuntime; - struct _ceval_runtime_state *ceval = &runtime->ceval; - struct _ceval_state *interp_ceval_state = &tstate->interp->ceval; + PyInterpreterState *interp = tstate->interp; /* Pending signals */ - if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) { + if (_Py_eval_breaker_bit_is_set(interp, _PY_SIGNALS_PENDING_BIT)) { if (handle_signals(tstate) != 0) { return -1; } } /* Pending calls */ - if (maybe_has_pending_calls(tstate->interp)) { - if (make_pending_calls(tstate->interp) != 0) { + if (_Py_eval_breaker_bit_is_set(interp, _PY_CALLS_TO_DO_BIT)) { + if (make_pending_calls(interp) != 0) { return -1; } } /* GC scheduled to run */ - if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gc_scheduled)) { - _Py_atomic_store_relaxed(&interp_ceval_state->gc_scheduled, 0); - COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state); + if (_Py_eval_breaker_bit_is_set(interp, _PY_GC_SCHEDULED_BIT)) { + _Py_set_eval_breaker_bit(interp, _PY_GC_SCHEDULED_BIT, 0); _Py_RunGC(tstate); } /* GIL drop request */ - if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gil_drop_request)) { + if (_Py_eval_breaker_bit_is_set(interp, _PY_GIL_DROP_REQUEST_BIT)) { /* Give another thread a chance */ if (_PyThreadState_SwapNoGIL(NULL) != tstate) { Py_FatalError("tstate mix-up"); } - drop_gil(interp_ceval_state, tstate); + drop_gil(interp, tstate); /* Other threads may run now */ @@ -1126,27 +1030,16 @@ _Py_HandlePending(PyThreadState *tstate) } /* Check for asynchronous exception. */ - if (tstate->async_exc != NULL) { - PyObject *exc = tstate->async_exc; - tstate->async_exc = NULL; - UNSIGNAL_ASYNC_EXC(tstate->interp); - _PyErr_SetNone(tstate, exc); - Py_DECREF(exc); - return -1; + if (_Py_eval_breaker_bit_is_set(interp, _PY_ASYNC_EXCEPTION_BIT)) { + _Py_set_eval_breaker_bit(interp, _PY_ASYNC_EXCEPTION_BIT, 0); + if (tstate->async_exc != NULL) { + PyObject *exc = tstate->async_exc; + tstate->async_exc = NULL; + _PyErr_SetNone(tstate, exc); + Py_DECREF(exc); + return -1; + } } - - - // It is possible that some of the conditions that trigger the eval breaker - // are called in a different thread than the Python thread. An example of - // this is bpo-42296: On Windows, _PyEval_SignalReceived() can be called in - // a different thread than the Python thread, in which case - // _Py_ThreadCanHandleSignals() is wrong. Recompute eval_breaker in the - // current Python thread with the correct _Py_ThreadCanHandleSignals() - // value. It prevents to interrupt the eval loop at every instruction if - // the current Python thread cannot handle signals (if - // _Py_ThreadCanHandleSignals() is false). - COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state); - return 0; } diff --git a/Python/ceval_macros.h b/Python/ceval_macros.h index 012750df387c1c..872e0a2b7f92ca 100644 --- a/Python/ceval_macros.h +++ b/Python/ceval_macros.h @@ -116,7 +116,7 @@ #define CHECK_EVAL_BREAKER() \ _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY(); \ - if (_Py_atomic_load_relaxed_int32(&tstate->interp->ceval.eval_breaker)) { \ + if (_Py_atomic_load_uintptr_relaxed(&tstate->interp->ceval.eval_breaker) & _PY_EVAL_EVENTS_MASK) { \ if (_Py_HandlePending(tstate) != 0) { \ goto error; \ } \ diff --git a/Python/executor_cases.c.h b/Python/executor_cases.c.h index 55a03c9a23a572..c6eeee4925ef1f 100644 --- a/Python/executor_cases.c.h +++ b/Python/executor_cases.c.h @@ -12,10 +12,10 @@ DEOPT_IF(_Py_emscripten_signal_clock == 0, RESUME); _Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING; #endif - /* Possibly combine these two checks */ - DEOPT_IF(_PyFrame_GetCode(frame)->_co_instrumentation_version - != tstate->interp->monitoring_version, RESUME); - DEOPT_IF(_Py_atomic_load_relaxed_int32(&tstate->interp->ceval.eval_breaker), RESUME); + uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->interp->ceval.eval_breaker); + uintptr_t version = _PyFrame_GetCode(frame)->_co_instrumentation_version; + assert((version & _PY_EVAL_EVENTS_MASK) == 0); + DEOPT_IF(eval_breaker != version, RESUME); break; } @@ -236,8 +236,8 @@ PyObject *left; right = stack_pointer[-1]; left = stack_pointer[-2]; - DEOPT_IF(!PyLong_CheckExact(left), BINARY_OP); - DEOPT_IF(!PyLong_CheckExact(right), BINARY_OP); + DEOPT_IF(!PyLong_CheckExact(left), _GUARD_BOTH_INT); + DEOPT_IF(!PyLong_CheckExact(right), _GUARD_BOTH_INT); break; } @@ -294,8 +294,8 @@ PyObject *left; right = stack_pointer[-1]; left = stack_pointer[-2]; - DEOPT_IF(!PyFloat_CheckExact(left), BINARY_OP); - DEOPT_IF(!PyFloat_CheckExact(right), BINARY_OP); + DEOPT_IF(!PyFloat_CheckExact(left), _GUARD_BOTH_FLOAT); + DEOPT_IF(!PyFloat_CheckExact(right), _GUARD_BOTH_FLOAT); break; } @@ -352,8 +352,8 @@ PyObject *left; right = stack_pointer[-1]; left = stack_pointer[-2]; - DEOPT_IF(!PyUnicode_CheckExact(left), BINARY_OP); - DEOPT_IF(!PyUnicode_CheckExact(right), BINARY_OP); + DEOPT_IF(!PyUnicode_CheckExact(left), _GUARD_BOTH_UNICODE); + DEOPT_IF(!PyUnicode_CheckExact(right), _GUARD_BOTH_UNICODE); break; } @@ -1189,8 +1189,8 @@ case _GUARD_GLOBALS_VERSION: { uint16_t version = (uint16_t)operand; PyDictObject *dict = (PyDictObject *)GLOBALS(); - DEOPT_IF(!PyDict_CheckExact(dict), LOAD_GLOBAL); - DEOPT_IF(dict->ma_keys->dk_version != version, LOAD_GLOBAL); + DEOPT_IF(!PyDict_CheckExact(dict), _GUARD_GLOBALS_VERSION); + DEOPT_IF(dict->ma_keys->dk_version != version, _GUARD_GLOBALS_VERSION); assert(DK_IS_UNICODE(dict->ma_keys)); break; } @@ -1198,8 +1198,8 @@ case _GUARD_BUILTINS_VERSION: { uint16_t version = (uint16_t)operand; PyDictObject *dict = (PyDictObject *)BUILTINS(); - DEOPT_IF(!PyDict_CheckExact(dict), LOAD_GLOBAL); - DEOPT_IF(dict->ma_keys->dk_version != version, LOAD_GLOBAL); + DEOPT_IF(!PyDict_CheckExact(dict), _GUARD_BUILTINS_VERSION); + DEOPT_IF(dict->ma_keys->dk_version != version, _GUARD_BUILTINS_VERSION); assert(DK_IS_UNICODE(dict->ma_keys)); break; } @@ -1211,7 +1211,7 @@ PyDictObject *dict = (PyDictObject *)GLOBALS(); PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(dict->ma_keys); res = entries[index].me_value; - DEOPT_IF(res == NULL, LOAD_GLOBAL); + DEOPT_IF(res == NULL, _LOAD_GLOBAL_MODULE); Py_INCREF(res); STAT_INC(LOAD_GLOBAL, hit); null = NULL; @@ -1229,7 +1229,7 @@ PyDictObject *bdict = (PyDictObject *)BUILTINS(); PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(bdict->ma_keys); res = entries[index].me_value; - DEOPT_IF(res == NULL, LOAD_GLOBAL); + DEOPT_IF(res == NULL, _LOAD_GLOBAL_BUILTINS); Py_INCREF(res); STAT_INC(LOAD_GLOBAL, hit); null = NULL; @@ -1679,7 +1679,7 @@ uint32_t type_version = (uint32_t)operand; PyTypeObject *tp = Py_TYPE(owner); assert(type_version != 0); - DEOPT_IF(tp->tp_version_tag != type_version, LOAD_ATTR); + DEOPT_IF(tp->tp_version_tag != type_version, _GUARD_TYPE_VERSION); break; } @@ -1689,9 +1689,7 @@ assert(Py_TYPE(owner)->tp_dictoffset < 0); assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && - !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), - LOAD_ATTR); + DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), _CHECK_MANAGED_OBJECT_HAS_VALUES); break; } @@ -1703,7 +1701,83 @@ uint16_t index = (uint16_t)operand; PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); attr = _PyDictOrValues_GetValues(dorv)->values[index]; - DEOPT_IF(attr == NULL, LOAD_ATTR); + DEOPT_IF(attr == NULL, _LOAD_ATTR_INSTANCE_VALUE); + STAT_INC(LOAD_ATTR, hit); + Py_INCREF(attr); + null = NULL; + Py_DECREF(owner); + STACK_GROW(((oparg & 1) ? 1 : 0)); + stack_pointer[-1 - (oparg & 1 ? 1 : 0)] = attr; + if (oparg & 1) { stack_pointer[-(oparg & 1 ? 1 : 0)] = null; } + break; + } + + case _CHECK_ATTR_MODULE: { + PyObject *owner; + owner = stack_pointer[-1]; + uint32_t type_version = (uint32_t)operand; + DEOPT_IF(!PyModule_CheckExact(owner), _CHECK_ATTR_MODULE); + PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner)->md_dict; + assert(dict != NULL); + DEOPT_IF(dict->ma_keys->dk_version != type_version, _CHECK_ATTR_MODULE); + break; + } + + case _LOAD_ATTR_MODULE: { + PyObject *owner; + PyObject *attr; + PyObject *null = NULL; + owner = stack_pointer[-1]; + uint16_t index = (uint16_t)operand; + PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner)->md_dict; + assert(dict->ma_keys->dk_kind == DICT_KEYS_UNICODE); + assert(index < dict->ma_keys->dk_nentries); + PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + index; + attr = ep->me_value; + DEOPT_IF(attr == NULL, _LOAD_ATTR_MODULE); + STAT_INC(LOAD_ATTR, hit); + Py_INCREF(attr); + null = NULL; + Py_DECREF(owner); + STACK_GROW(((oparg & 1) ? 1 : 0)); + stack_pointer[-1 - (oparg & 1 ? 1 : 0)] = attr; + if (oparg & 1) { stack_pointer[-(oparg & 1 ? 1 : 0)] = null; } + break; + } + + case _CHECK_ATTR_WITH_HINT: { + PyObject *owner; + owner = stack_pointer[-1]; + assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); + PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); + DEOPT_IF(_PyDictOrValues_IsValues(dorv), _CHECK_ATTR_WITH_HINT); + PyDictObject *dict = (PyDictObject *)_PyDictOrValues_GetDict(dorv); + DEOPT_IF(dict == NULL, _CHECK_ATTR_WITH_HINT); + assert(PyDict_CheckExact((PyObject *)dict)); + break; + } + + case _LOAD_ATTR_WITH_HINT: { + PyObject *owner; + PyObject *attr; + PyObject *null = NULL; + owner = stack_pointer[-1]; + uint16_t hint = (uint16_t)operand; + PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); + PyDictObject *dict = (PyDictObject *)_PyDictOrValues_GetDict(dorv); + DEOPT_IF(hint >= (size_t)dict->ma_keys->dk_nentries, _LOAD_ATTR_WITH_HINT); + PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1); + if (DK_IS_UNICODE(dict->ma_keys)) { + PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + hint; + DEOPT_IF(ep->me_key != name, _LOAD_ATTR_WITH_HINT); + attr = ep->me_value; + } + else { + PyDictKeyEntry *ep = DK_ENTRIES(dict->ma_keys) + hint; + DEOPT_IF(ep->me_key != name, _LOAD_ATTR_WITH_HINT); + attr = ep->me_value; + } + DEOPT_IF(attr == NULL, _LOAD_ATTR_WITH_HINT); STAT_INC(LOAD_ATTR, hit); Py_INCREF(attr); null = NULL; @@ -1722,7 +1796,7 @@ uint16_t index = (uint16_t)operand; char *addr = (char *)owner + index; attr = *(PyObject **)addr; - DEOPT_IF(attr == NULL, LOAD_ATTR); + DEOPT_IF(attr == NULL, _LOAD_ATTR_SLOT); STAT_INC(LOAD_ATTR, hit); Py_INCREF(attr); null = NULL; @@ -1733,12 +1807,39 @@ break; } + case _CHECK_ATTR_CLASS: { + PyObject *owner; + owner = stack_pointer[-1]; + uint32_t type_version = (uint32_t)operand; + DEOPT_IF(!PyType_Check(owner), _CHECK_ATTR_CLASS); + assert(type_version != 0); + DEOPT_IF(((PyTypeObject *)owner)->tp_version_tag != type_version, _CHECK_ATTR_CLASS); + break; + } + + case _LOAD_ATTR_CLASS: { + PyObject *owner; + PyObject *attr; + PyObject *null = NULL; + owner = stack_pointer[-1]; + PyObject *descr = (PyObject *)operand; + STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + attr = Py_NewRef(descr); + null = NULL; + Py_DECREF(owner); + STACK_GROW(((oparg & 1) ? 1 : 0)); + stack_pointer[-1 - (oparg & 1 ? 1 : 0)] = attr; + if (oparg & 1) { stack_pointer[-(oparg & 1 ? 1 : 0)] = null; } + break; + } + case _GUARD_DORV_VALUES: { PyObject *owner; owner = stack_pointer[-1]; assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(dorv), STORE_ATTR); + DEOPT_IF(!_PyDictOrValues_IsValues(dorv), _GUARD_DORV_VALUES); break; } @@ -1764,16 +1865,6 @@ break; } - case _GUARD_TYPE_VERSION_STORE: { - PyObject *owner; - owner = stack_pointer[-1]; - uint32_t type_version = (uint32_t)operand; - PyTypeObject *tp = Py_TYPE(owner); - assert(type_version != 0); - DEOPT_IF(tp->tp_version_tag != type_version, STORE_ATTR); - break; - } - case _STORE_ATTR_SLOT: { PyObject *owner; PyObject *value; @@ -2114,7 +2205,7 @@ case _ITER_CHECK_LIST: { PyObject *iter; iter = stack_pointer[-1]; - DEOPT_IF(Py_TYPE(iter) != &PyListIter_Type, FOR_ITER); + DEOPT_IF(Py_TYPE(iter) != &PyListIter_Type, _ITER_CHECK_LIST); break; } @@ -2159,7 +2250,7 @@ case _ITER_CHECK_TUPLE: { PyObject *iter; iter = stack_pointer[-1]; - DEOPT_IF(Py_TYPE(iter) != &PyTupleIter_Type, FOR_ITER); + DEOPT_IF(Py_TYPE(iter) != &PyTupleIter_Type, _ITER_CHECK_TUPLE); break; } @@ -2205,7 +2296,7 @@ PyObject *iter; iter = stack_pointer[-1]; _PyRangeIterObject *r = (_PyRangeIterObject *)iter; - DEOPT_IF(Py_TYPE(r) != &PyRangeIter_Type, FOR_ITER); + DEOPT_IF(Py_TYPE(r) != &PyRangeIter_Type, _ITER_CHECK_RANGE); break; } @@ -2300,9 +2391,7 @@ owner = stack_pointer[-1]; assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && - !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), - LOAD_ATTR); + DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT); break; } @@ -2312,8 +2401,7 @@ uint32_t keys_version = (uint32_t)operand; PyTypeObject *owner_cls = Py_TYPE(owner); PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls; - DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != - keys_version, LOAD_ATTR); + DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != keys_version, _GUARD_KEYS_VERSION); break; } @@ -2355,13 +2443,71 @@ break; } + case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: { + PyObject *owner; + PyObject *attr; + owner = stack_pointer[-1]; + PyObject *descr = (PyObject *)operand; + assert((oparg & 1) == 0); + STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + Py_DECREF(owner); + attr = Py_NewRef(descr); + stack_pointer[-1] = attr; + break; + } + + case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT: { + PyObject *owner; + PyObject *attr; + owner = stack_pointer[-1]; + PyObject *descr = (PyObject *)operand; + assert((oparg & 1) == 0); + assert(Py_TYPE(owner)->tp_dictoffset == 0); + STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + Py_DECREF(owner); + attr = Py_NewRef(descr); + stack_pointer[-1] = attr; + break; + } + + case _CHECK_ATTR_METHOD_LAZY_DICT: { + PyObject *owner; + owner = stack_pointer[-1]; + Py_ssize_t dictoffset = Py_TYPE(owner)->tp_dictoffset; + assert(dictoffset > 0); + PyObject *dict = *(PyObject **)((char *)owner + dictoffset); + /* This object has a __dict__, just not yet created */ + DEOPT_IF(dict != NULL, _CHECK_ATTR_METHOD_LAZY_DICT); + break; + } + + case _LOAD_ATTR_METHOD_LAZY_DICT: { + PyObject *owner; + PyObject *attr; + PyObject *self; + owner = stack_pointer[-1]; + PyObject *descr = (PyObject *)operand; + assert(oparg & 1); + STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)); + attr = Py_NewRef(descr); + self = owner; + STACK_GROW(1); + stack_pointer[-2] = attr; + stack_pointer[-1] = self; + break; + } + case _CHECK_CALL_BOUND_METHOD_EXACT_ARGS: { PyObject *null; PyObject *callable; null = stack_pointer[-1 - oparg]; callable = stack_pointer[-2 - oparg]; - DEOPT_IF(null != NULL, CALL); - DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type, CALL); + DEOPT_IF(null != NULL, _CHECK_CALL_BOUND_METHOD_EXACT_ARGS); + DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type, _CHECK_CALL_BOUND_METHOD_EXACT_ARGS); break; } @@ -2382,7 +2528,7 @@ } case _CHECK_PEP_523: { - DEOPT_IF(tstate->interp->eval_frame, CALL); + DEOPT_IF(tstate->interp->eval_frame, _CHECK_PEP_523); break; } @@ -2392,11 +2538,11 @@ self_or_null = stack_pointer[-1 - oparg]; callable = stack_pointer[-2 - oparg]; uint32_t func_version = (uint32_t)operand; - DEOPT_IF(!PyFunction_Check(callable), CALL); + DEOPT_IF(!PyFunction_Check(callable), _CHECK_FUNCTION_EXACT_ARGS); PyFunctionObject *func = (PyFunctionObject *)callable; - DEOPT_IF(func->func_version != func_version, CALL); + DEOPT_IF(func->func_version != func_version, _CHECK_FUNCTION_EXACT_ARGS); PyCodeObject *code = (PyCodeObject *)func->func_code; - DEOPT_IF(code->co_argcount != oparg + (self_or_null != NULL), CALL); + DEOPT_IF(code->co_argcount != oparg + (self_or_null != NULL), _CHECK_FUNCTION_EXACT_ARGS); break; } @@ -2405,8 +2551,8 @@ callable = stack_pointer[-2 - oparg]; PyFunctionObject *func = (PyFunctionObject *)callable; PyCodeObject *code = (PyCodeObject *)func->func_code; - DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL); - DEOPT_IF(tstate->py_recursion_remaining <= 1, CALL); + DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), _CHECK_STACK_SPACE); + DEOPT_IF(tstate->py_recursion_remaining <= 1, _CHECK_STACK_SPACE); break; } @@ -2671,8 +2817,7 @@ total_args++; } DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); - DEOPT_IF(PyCFunction_GET_FLAGS(callable) != - (METH_FASTCALL | METH_KEYWORDS), CALL); + DEOPT_IF(PyCFunction_GET_FLAGS(callable) != (METH_FASTCALL | METH_KEYWORDS), CALL); STAT_INC(CALL, hit); /* res = func(self, args, nargs, kwnames) */ _PyCFunctionFastWithKeywords cfunc = diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h index 2701d416648a20..221d4774beee7f 100644 --- a/Python/generated_cases.c.h +++ b/Python/generated_cases.c.h @@ -12,7 +12,12 @@ static_assert(0 == 0, "incorrect cache size"); TIER_ONE_ONLY assert(frame == tstate->current_frame); - if (_PyFrame_GetCode(frame)->_co_instrumentation_version != tstate->interp->monitoring_version) { + uintptr_t global_version = + _Py_atomic_load_uintptr_relaxed(&tstate->interp->ceval.eval_breaker) & + ~_PY_EVAL_EVENTS_MASK; + uintptr_t code_version = _PyFrame_GetCode(frame)->_co_instrumentation_version; + assert((code_version & 255) == 0); + if (code_version != global_version) { int err = _Py_Instrument(_PyFrame_GetCode(frame), tstate->interp); if (err) goto error; next_instr--; @@ -31,19 +36,17 @@ DEOPT_IF(_Py_emscripten_signal_clock == 0, RESUME); _Py_emscripten_signal_clock -= Py_EMSCRIPTEN_SIGNAL_HANDLING; #endif - /* Possibly combine these two checks */ - DEOPT_IF(_PyFrame_GetCode(frame)->_co_instrumentation_version - != tstate->interp->monitoring_version, RESUME); - DEOPT_IF(_Py_atomic_load_relaxed_int32(&tstate->interp->ceval.eval_breaker), RESUME); + uintptr_t eval_breaker = _Py_atomic_load_uintptr_relaxed(&tstate->interp->ceval.eval_breaker); + uintptr_t version = _PyFrame_GetCode(frame)->_co_instrumentation_version; + assert((version & _PY_EVAL_EVENTS_MASK) == 0); + DEOPT_IF(eval_breaker != version, RESUME); DISPATCH(); } TARGET(INSTRUMENTED_RESUME) { - /* Possible performance enhancement: - * We need to check the eval breaker anyway, can we - * combine the instrument verison check and the eval breaker test? - */ - if (_PyFrame_GetCode(frame)->_co_instrumentation_version != tstate->interp->monitoring_version) { + uintptr_t global_version = _Py_atomic_load_uintptr_relaxed(&tstate->interp->ceval.eval_breaker) & ~_PY_EVAL_EVENTS_MASK; + uintptr_t code_version = _PyFrame_GetCode(frame)->_co_instrumentation_version; + if (code_version != global_version) { if (_Py_Instrument(_PyFrame_GetCode(frame), tstate->interp)) { goto error; } @@ -1294,8 +1297,7 @@ receiver = stack_pointer[-2]; DEOPT_IF(tstate->interp->eval_frame, SEND); PyGenObject *gen = (PyGenObject *)receiver; - DEOPT_IF(Py_TYPE(gen) != &PyGen_Type && - Py_TYPE(gen) != &PyCoro_Type, SEND); + DEOPT_IF(Py_TYPE(gen) != &PyGen_Type && Py_TYPE(gen) != &PyCoro_Type, SEND); DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING, SEND); STAT_INC(SEND, hit); _PyInterpreterFrame *gen_frame = (_PyInterpreterFrame *)gen->gi_iframe; @@ -2410,9 +2412,7 @@ assert(Py_TYPE(owner)->tp_dictoffset < 0); assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && - !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), - LOAD_ATTR); + DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), LOAD_ATTR); } // _LOAD_ATTR_INSTANCE_VALUE { @@ -2436,22 +2436,29 @@ PyObject *owner; PyObject *attr; PyObject *null = NULL; + // _CHECK_ATTR_MODULE owner = stack_pointer[-1]; - uint32_t type_version = read_u32(&next_instr[1].cache); - uint16_t index = read_u16(&next_instr[3].cache); - DEOPT_IF(!PyModule_CheckExact(owner), LOAD_ATTR); - PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner)->md_dict; - assert(dict != NULL); - DEOPT_IF(dict->ma_keys->dk_version != type_version, LOAD_ATTR); - assert(dict->ma_keys->dk_kind == DICT_KEYS_UNICODE); - assert(index < dict->ma_keys->dk_nentries); - PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + index; - attr = ep->me_value; - DEOPT_IF(attr == NULL, LOAD_ATTR); - STAT_INC(LOAD_ATTR, hit); - Py_INCREF(attr); - null = NULL; - Py_DECREF(owner); + { + uint32_t type_version = read_u32(&next_instr[1].cache); + DEOPT_IF(!PyModule_CheckExact(owner), LOAD_ATTR); + PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner)->md_dict; + assert(dict != NULL); + DEOPT_IF(dict->ma_keys->dk_version != type_version, LOAD_ATTR); + } + // _LOAD_ATTR_MODULE + { + uint16_t index = read_u16(&next_instr[3].cache); + PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner)->md_dict; + assert(dict->ma_keys->dk_kind == DICT_KEYS_UNICODE); + assert(index < dict->ma_keys->dk_nentries); + PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + index; + attr = ep->me_value; + DEOPT_IF(attr == NULL, LOAD_ATTR); + STAT_INC(LOAD_ATTR, hit); + Py_INCREF(attr); + null = NULL; + Py_DECREF(owner); + } STACK_GROW(((oparg & 1) ? 1 : 0)); stack_pointer[-1 - (oparg & 1 ? 1 : 0)] = attr; if (oparg & 1) { stack_pointer[-(oparg & 1 ? 1 : 0)] = null; } @@ -2463,36 +2470,46 @@ PyObject *owner; PyObject *attr; PyObject *null = NULL; + // _GUARD_TYPE_VERSION owner = stack_pointer[-1]; - uint32_t type_version = read_u32(&next_instr[1].cache); - uint16_t index = read_u16(&next_instr[3].cache); - PyTypeObject *tp = Py_TYPE(owner); - assert(type_version != 0); - DEOPT_IF(tp->tp_version_tag != type_version, LOAD_ATTR); - assert(tp->tp_flags & Py_TPFLAGS_MANAGED_DICT); - PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); - DEOPT_IF(_PyDictOrValues_IsValues(dorv), LOAD_ATTR); - PyDictObject *dict = (PyDictObject *)_PyDictOrValues_GetDict(dorv); - DEOPT_IF(dict == NULL, LOAD_ATTR); - assert(PyDict_CheckExact((PyObject *)dict)); - PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1); - uint16_t hint = index; - DEOPT_IF(hint >= (size_t)dict->ma_keys->dk_nentries, LOAD_ATTR); - if (DK_IS_UNICODE(dict->ma_keys)) { - PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + hint; - DEOPT_IF(ep->me_key != name, LOAD_ATTR); - attr = ep->me_value; + { + uint32_t type_version = read_u32(&next_instr[1].cache); + PyTypeObject *tp = Py_TYPE(owner); + assert(type_version != 0); + DEOPT_IF(tp->tp_version_tag != type_version, LOAD_ATTR); } - else { - PyDictKeyEntry *ep = DK_ENTRIES(dict->ma_keys) + hint; - DEOPT_IF(ep->me_key != name, LOAD_ATTR); - attr = ep->me_value; + // _CHECK_ATTR_WITH_HINT + { + assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); + PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); + DEOPT_IF(_PyDictOrValues_IsValues(dorv), LOAD_ATTR); + PyDictObject *dict = (PyDictObject *)_PyDictOrValues_GetDict(dorv); + DEOPT_IF(dict == NULL, LOAD_ATTR); + assert(PyDict_CheckExact((PyObject *)dict)); + } + // _LOAD_ATTR_WITH_HINT + { + uint16_t hint = read_u16(&next_instr[3].cache); + PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); + PyDictObject *dict = (PyDictObject *)_PyDictOrValues_GetDict(dorv); + DEOPT_IF(hint >= (size_t)dict->ma_keys->dk_nentries, LOAD_ATTR); + PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1); + if (DK_IS_UNICODE(dict->ma_keys)) { + PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(dict->ma_keys) + hint; + DEOPT_IF(ep->me_key != name, LOAD_ATTR); + attr = ep->me_value; + } + else { + PyDictKeyEntry *ep = DK_ENTRIES(dict->ma_keys) + hint; + DEOPT_IF(ep->me_key != name, LOAD_ATTR); + attr = ep->me_value; + } + DEOPT_IF(attr == NULL, LOAD_ATTR); + STAT_INC(LOAD_ATTR, hit); + Py_INCREF(attr); + null = NULL; + Py_DECREF(owner); } - DEOPT_IF(attr == NULL, LOAD_ATTR); - STAT_INC(LOAD_ATTR, hit); - Py_INCREF(attr); - null = NULL; - Py_DECREF(owner); STACK_GROW(((oparg & 1) ? 1 : 0)); stack_pointer[-1 - (oparg & 1 ? 1 : 0)] = attr; if (oparg & 1) { stack_pointer[-(oparg & 1 ? 1 : 0)] = null; } @@ -2534,21 +2551,23 @@ PyObject *owner; PyObject *attr; PyObject *null = NULL; + // _CHECK_ATTR_CLASS owner = stack_pointer[-1]; - uint32_t type_version = read_u32(&next_instr[1].cache); - PyObject *descr = read_obj(&next_instr[5].cache); - - DEOPT_IF(!PyType_Check(owner), LOAD_ATTR); - DEOPT_IF(((PyTypeObject *)owner)->tp_version_tag != type_version, - LOAD_ATTR); - assert(type_version != 0); - - STAT_INC(LOAD_ATTR, hit); - null = NULL; - attr = descr; - assert(attr != NULL); - Py_INCREF(attr); - Py_DECREF(owner); + { + uint32_t type_version = read_u32(&next_instr[1].cache); + DEOPT_IF(!PyType_Check(owner), LOAD_ATTR); + assert(type_version != 0); + DEOPT_IF(((PyTypeObject *)owner)->tp_version_tag != type_version, LOAD_ATTR); + } + // _LOAD_ATTR_CLASS + { + PyObject *descr = read_obj(&next_instr[5].cache); + STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + attr = Py_NewRef(descr); + null = NULL; + Py_DECREF(owner); + } STACK_GROW(((oparg & 1) ? 1 : 0)); stack_pointer[-1 - (oparg & 1 ? 1 : 0)] = attr; if (oparg & 1) { stack_pointer[-(oparg & 1 ? 1 : 0)] = null; } @@ -2621,7 +2640,7 @@ TARGET(STORE_ATTR_INSTANCE_VALUE) { PyObject *owner; PyObject *value; - // _GUARD_TYPE_VERSION_STORE + // _GUARD_TYPE_VERSION owner = stack_pointer[-1]; { uint32_t type_version = read_u32(&next_instr[1].cache); @@ -2710,7 +2729,7 @@ TARGET(STORE_ATTR_SLOT) { PyObject *owner; PyObject *value; - // _GUARD_TYPE_VERSION_STORE + // _GUARD_TYPE_VERSION owner = stack_pointer[-1]; { uint32_t type_version = read_u32(&next_instr[1].cache); @@ -3591,17 +3610,14 @@ { assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && - !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), - LOAD_ATTR); + DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), LOAD_ATTR); } // _GUARD_KEYS_VERSION { uint32_t keys_version = read_u32(&next_instr[3].cache); PyTypeObject *owner_cls = Py_TYPE(owner); PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls; - DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != - keys_version, LOAD_ATTR); + DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != keys_version, LOAD_ATTR); } // _LOAD_ATTR_METHOD_WITH_VALUES { @@ -3654,26 +3670,36 @@ TARGET(LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES) { PyObject *owner; PyObject *attr; + // _GUARD_TYPE_VERSION owner = stack_pointer[-1]; - uint32_t type_version = read_u32(&next_instr[1].cache); - uint32_t keys_version = read_u32(&next_instr[3].cache); - PyObject *descr = read_obj(&next_instr[5].cache); - assert((oparg & 1) == 0); - PyTypeObject *owner_cls = Py_TYPE(owner); - assert(type_version != 0); - DEOPT_IF(owner_cls->tp_version_tag != type_version, LOAD_ATTR); - assert(owner_cls->tp_flags & Py_TPFLAGS_MANAGED_DICT); - PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); - DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && - !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), - LOAD_ATTR); - PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls; - DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != - keys_version, LOAD_ATTR); - STAT_INC(LOAD_ATTR, hit); - assert(descr != NULL); - Py_DECREF(owner); - attr = Py_NewRef(descr); + { + uint32_t type_version = read_u32(&next_instr[1].cache); + PyTypeObject *tp = Py_TYPE(owner); + assert(type_version != 0); + DEOPT_IF(tp->tp_version_tag != type_version, LOAD_ATTR); + } + // _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT + { + assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); + PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); + DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv), LOAD_ATTR); + } + // _GUARD_KEYS_VERSION + { + uint32_t keys_version = read_u32(&next_instr[3].cache); + PyTypeObject *owner_cls = Py_TYPE(owner); + PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls; + DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != keys_version, LOAD_ATTR); + } + // _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES + { + PyObject *descr = read_obj(&next_instr[5].cache); + assert((oparg & 1) == 0); + STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + Py_DECREF(owner); + attr = Py_NewRef(descr); + } stack_pointer[-1] = attr; next_instr += 9; DISPATCH(); @@ -3682,18 +3708,24 @@ TARGET(LOAD_ATTR_NONDESCRIPTOR_NO_DICT) { PyObject *owner; PyObject *attr; + // _GUARD_TYPE_VERSION owner = stack_pointer[-1]; - uint32_t type_version = read_u32(&next_instr[1].cache); - PyObject *descr = read_obj(&next_instr[5].cache); - assert((oparg & 1) == 0); - PyTypeObject *owner_cls = Py_TYPE(owner); - assert(type_version != 0); - DEOPT_IF(owner_cls->tp_version_tag != type_version, LOAD_ATTR); - assert(owner_cls->tp_dictoffset == 0); - STAT_INC(LOAD_ATTR, hit); - assert(descr != NULL); - Py_DECREF(owner); - attr = Py_NewRef(descr); + { + uint32_t type_version = read_u32(&next_instr[1].cache); + PyTypeObject *tp = Py_TYPE(owner); + assert(type_version != 0); + DEOPT_IF(tp->tp_version_tag != type_version, LOAD_ATTR); + } + // _LOAD_ATTR_NONDESCRIPTOR_NO_DICT + { + PyObject *descr = read_obj(&next_instr[5].cache); + assert((oparg & 1) == 0); + assert(Py_TYPE(owner)->tp_dictoffset == 0); + STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + Py_DECREF(owner); + attr = Py_NewRef(descr); + } stack_pointer[-1] = attr; next_instr += 9; DISPATCH(); @@ -3703,22 +3735,32 @@ PyObject *owner; PyObject *attr; PyObject *self; + // _GUARD_TYPE_VERSION owner = stack_pointer[-1]; - uint32_t type_version = read_u32(&next_instr[1].cache); - PyObject *descr = read_obj(&next_instr[5].cache); - assert(oparg & 1); - PyTypeObject *owner_cls = Py_TYPE(owner); - DEOPT_IF(owner_cls->tp_version_tag != type_version, LOAD_ATTR); - Py_ssize_t dictoffset = owner_cls->tp_dictoffset; - assert(dictoffset > 0); - PyObject *dict = *(PyObject **)((char *)owner + dictoffset); - /* This object has a __dict__, just not yet created */ - DEOPT_IF(dict != NULL, LOAD_ATTR); - STAT_INC(LOAD_ATTR, hit); - assert(descr != NULL); - assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)); - attr = Py_NewRef(descr); - self = owner; + { + uint32_t type_version = read_u32(&next_instr[1].cache); + PyTypeObject *tp = Py_TYPE(owner); + assert(type_version != 0); + DEOPT_IF(tp->tp_version_tag != type_version, LOAD_ATTR); + } + // _CHECK_ATTR_METHOD_LAZY_DICT + { + Py_ssize_t dictoffset = Py_TYPE(owner)->tp_dictoffset; + assert(dictoffset > 0); + PyObject *dict = *(PyObject **)((char *)owner + dictoffset); + /* This object has a __dict__, just not yet created */ + DEOPT_IF(dict != NULL, LOAD_ATTR); + } + // _LOAD_ATTR_METHOD_LAZY_DICT + { + PyObject *descr = read_obj(&next_instr[5].cache); + assert(oparg & 1); + STAT_INC(LOAD_ATTR, hit); + assert(descr != NULL); + assert(_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)); + attr = Py_NewRef(descr); + self = owner; + } STACK_GROW(1); stack_pointer[-2] = attr; stack_pointer[-1] = self; @@ -4328,8 +4370,7 @@ total_args++; } DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); - DEOPT_IF(PyCFunction_GET_FLAGS(callable) != - (METH_FASTCALL | METH_KEYWORDS), CALL); + DEOPT_IF(PyCFunction_GET_FLAGS(callable) != (METH_FASTCALL | METH_KEYWORDS), CALL); STAT_INC(CALL, hit); /* res = func(self, args, nargs, kwnames) */ _PyCFunctionFastWithKeywords cfunc = diff --git a/Python/instrumentation.c b/Python/instrumentation.c index 0b974f6133ce7d..eee1908e503e43 100644 --- a/Python/instrumentation.c +++ b/Python/instrumentation.c @@ -4,6 +4,7 @@ #include "pycore_bitutils.h" // _Py_popcount32 #include "pycore_call.h" +#include "pycore_ceval.h" // _PY_EVAL_EVENTS_BITS #include "pycore_code.h" // _PyCode_Clear_Executors() #include "pycore_frame.h" #include "pycore_interp.h" @@ -895,10 +896,27 @@ static inline int most_significant_bit(uint8_t bits) { return MOST_SIGNIFICANT_BITS[bits]; } +static uint32_t +global_version(PyInterpreterState *interp) +{ + return interp->ceval.eval_breaker & ~_PY_EVAL_EVENTS_MASK; +} + +static void +set_global_version(PyInterpreterState *interp, uint32_t version) +{ + assert((version & _PY_EVAL_EVENTS_MASK) == 0); + uintptr_t old = _Py_atomic_load_uintptr(&interp->ceval.eval_breaker); + intptr_t new; + do { + new = (old & _PY_EVAL_EVENTS_MASK) | version; + } while (!_Py_atomic_compare_exchange_uintptr(&interp->ceval.eval_breaker, &old, new)); +} + static bool is_version_up_to_date(PyCodeObject *code, PyInterpreterState *interp) { - return interp->monitoring_version == code->_co_instrumentation_version; + return global_version(interp) == code->_co_instrumentation_version; } #ifndef NDEBUG @@ -1556,7 +1574,7 @@ _Py_Instrument(PyCodeObject *code, PyInterpreterState *interp) { if (is_version_up_to_date(code, interp)) { assert( - interp->monitoring_version == 0 || + (interp->ceval.eval_breaker & ~_PY_EVAL_EVENTS_MASK) == 0 || instrumentation_cross_checks(interp, code) ); return 0; @@ -1594,7 +1612,7 @@ _Py_Instrument(PyCodeObject *code, PyInterpreterState *interp) assert(monitors_are_empty(monitors_and(new_events, removed_events))); } code->_co_monitoring->active_monitors = active_events; - code->_co_instrumentation_version = interp->monitoring_version; + code->_co_instrumentation_version = global_version(interp); if (monitors_are_empty(new_events) && monitors_are_empty(removed_events)) { #ifdef INSTRUMENT_DEBUG sanity_check_instrumentation(code); @@ -1761,6 +1779,10 @@ check_tool(PyInterpreterState *interp, int tool_id) return 0; } +/* We share the eval-breaker with flags, so the monitoring + * version goes in the top 24 bits */ +#define MONITORING_VERSION_INCREMENT (1 << _PY_EVAL_EVENTS_BITS) + int _PyMonitoring_SetEvents(int tool_id, _PyMonitoringEventSet events) { @@ -1775,7 +1797,12 @@ _PyMonitoring_SetEvents(int tool_id, _PyMonitoringEventSet events) return 0; } set_events(&interp->monitors, tool_id, events); - interp->monitoring_version++; + uint32_t new_version = global_version(interp) + MONITORING_VERSION_INCREMENT; + if (new_version == 0) { + PyErr_Format(PyExc_OverflowError, "events set too many times"); + return -1; + } + set_global_version(interp, new_version); return instrument_all_executing_code_objects(interp); } @@ -1803,7 +1830,7 @@ _PyMonitoring_SetLocalEvents(PyCodeObject *code, int tool_id, _PyMonitoringEvent set_local_events(local, tool_id, events); if (is_version_up_to_date(code, interp)) { /* Force instrumentation update */ - code->_co_instrumentation_version = UINT64_MAX; + code->_co_instrumentation_version -= MONITORING_VERSION_INCREMENT; } if (_Py_Instrument(code, interp)) { return -1; @@ -2086,8 +2113,14 @@ monitoring_restart_events_impl(PyObject *module) * last restart version < current version */ PyInterpreterState *interp = _PyInterpreterState_GET(); - interp->last_restart_version = interp->monitoring_version + 1; - interp->monitoring_version = interp->last_restart_version + 1; + uint32_t restart_version = global_version(interp) + MONITORING_VERSION_INCREMENT; + uint32_t new_version = restart_version + MONITORING_VERSION_INCREMENT; + if (new_version <= MONITORING_VERSION_INCREMENT) { + PyErr_Format(PyExc_OverflowError, "events set too many times"); + return NULL; + } + interp->last_restart_version = restart_version; + set_global_version(interp, new_version); if (instrument_all_executing_code_objects(interp)) { return NULL; } diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index c0323763f44890..eb10aa3562dfce 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -655,7 +655,8 @@ pycore_create_interpreter(_PyRuntimeState *runtime, return status; } - PyThreadState *tstate = _PyThreadState_New(interp); + PyThreadState *tstate = _PyThreadState_New(interp, + _PyThreadState_WHENCE_INTERP); if (tstate == NULL) { return _PyStatus_ERR("can't make first thread"); } @@ -2050,7 +2051,8 @@ new_interpreter(PyThreadState **tstate_p, const PyInterpreterConfig *config) return _PyStatus_OK(); } - PyThreadState *tstate = _PyThreadState_New(interp); + PyThreadState *tstate = _PyThreadState_New(interp, + _PyThreadState_WHENCE_INTERP); if (tstate == NULL) { PyInterpreterState_Delete(interp); *tstate_p = NULL; diff --git a/Python/pystate.c b/Python/pystate.c index fe056bf4687026..ae33259f8df2f6 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -1094,9 +1094,7 @@ _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime) int _PyInterpreterState_SetRunningMain(PyInterpreterState *interp) { - if (interp->threads.main != NULL) { - PyErr_SetString(PyExc_RuntimeError, - "interpreter already running"); + if (_PyInterpreterState_FailIfRunningMain(interp) < 0) { return -1; } PyThreadState *tstate = current_fast_get(&_PyRuntime); @@ -1113,7 +1111,20 @@ _PyInterpreterState_SetRunningMain(PyInterpreterState *interp) void _PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp) { - assert(interp->threads.main == current_fast_get(&_PyRuntime)); + PyThreadState *tstate = interp->threads.main; + assert(tstate == current_fast_get(&_PyRuntime)); + + if (tstate->on_delete != NULL) { + // The threading module was imported for the first time in this + // thread, so it was set as threading._main_thread. (See gh-75698.) + // The thread has finished running the Python program so we mark + // the thread object as finished. + assert(tstate->_whence != _PyThreadState_WHENCE_THREADING); + tstate->on_delete(tstate->on_delete_data); + tstate->on_delete = NULL; + tstate->on_delete_data = NULL; + } + interp->threads.main = NULL; } @@ -1123,6 +1134,17 @@ _PyInterpreterState_IsRunningMain(PyInterpreterState *interp) return (interp->threads.main != NULL); } +int +_PyInterpreterState_FailIfRunningMain(PyInterpreterState *interp) +{ + if (interp->threads.main != NULL) { + PyErr_SetString(PyExc_RuntimeError, + "interpreter already running"); + return -1; + } + return 0; +} + //---------- // accessors @@ -1183,8 +1205,10 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp) PyThread_release_lock(interp->id_mutex); if (refcount == 0 && interp->requires_idref) { - // XXX Using the "head" thread isn't strictly correct. - PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); + PyThreadState *tstate = _PyThreadState_New(interp, + _PyThreadState_WHENCE_INTERP); + _PyThreadState_Bind(tstate); + // XXX Possible GILState issues? PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate); Py_EndInterpreter(tstate); @@ -1338,7 +1362,14 @@ free_threadstate(PyThreadState *tstate) { // The initial thread state of the interpreter is allocated // as part of the interpreter state so should not be freed. - if (tstate != &tstate->interp->_initial_thread) { + if (tstate == &tstate->interp->_initial_thread) { + // Restore to _PyThreadState_INIT. + tstate = &tstate->interp->_initial_thread; + memcpy(tstate, + &initial._main_interpreter._initial_thread, + sizeof(*tstate)); + } + else { PyMem_RawFree(tstate); } } @@ -1353,7 +1384,7 @@ free_threadstate(PyThreadState *tstate) static void init_threadstate(PyThreadState *tstate, - PyInterpreterState *interp, uint64_t id) + PyInterpreterState *interp, uint64_t id, int whence) { if (tstate->_status.initialized) { Py_FatalError("thread state already initialized"); @@ -1366,6 +1397,10 @@ init_threadstate(PyThreadState *tstate, assert(tstate->next == NULL); assert(tstate->prev == NULL); + assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET); + assert(whence >= 0 && whence <= _PyThreadState_WHENCE_EXEC); + tstate->_whence = whence; + assert(id > 0); tstate->id = id; @@ -1395,8 +1430,6 @@ add_threadstate(PyInterpreterState *interp, PyThreadState *tstate, PyThreadState *next) { assert(interp->threads.head != tstate); - assert((next != NULL && tstate->id != 1) || - (next == NULL && tstate->id == 1)); if (next != NULL) { assert(next->prev == NULL || next->prev == tstate); next->prev = tstate; @@ -1407,7 +1440,7 @@ add_threadstate(PyInterpreterState *interp, PyThreadState *tstate, } static PyThreadState * -new_threadstate(PyInterpreterState *interp) +new_threadstate(PyInterpreterState *interp, int whence) { PyThreadState *tstate; _PyRuntimeState *runtime = interp->runtime; @@ -1430,10 +1463,10 @@ new_threadstate(PyInterpreterState *interp) PyThreadState *old_head = interp->threads.head; if (old_head == NULL) { // It's the interpreter's initial thread state. - assert(id == 1); used_newtstate = 0; tstate = &interp->_initial_thread; } + // XXX Re-use interp->_initial_thread if not in use? else { // Every valid interpreter must have at least one thread. assert(id > 1); @@ -1446,7 +1479,7 @@ new_threadstate(PyInterpreterState *interp) sizeof(*tstate)); } - init_threadstate(tstate, interp, id); + init_threadstate(tstate, interp, id, whence); add_threadstate(interp, tstate, old_head); HEAD_UNLOCK(runtime); @@ -1460,7 +1493,8 @@ new_threadstate(PyInterpreterState *interp) PyThreadState * PyThreadState_New(PyInterpreterState *interp) { - PyThreadState *tstate = new_threadstate(interp); + PyThreadState *tstate = new_threadstate(interp, + _PyThreadState_WHENCE_UNKNOWN); if (tstate) { bind_tstate(tstate); // This makes sure there's a gilstate tstate bound @@ -1474,16 +1508,16 @@ PyThreadState_New(PyInterpreterState *interp) // This must be followed by a call to _PyThreadState_Bind(); PyThreadState * -_PyThreadState_New(PyInterpreterState *interp) +_PyThreadState_New(PyInterpreterState *interp, int whence) { - return new_threadstate(interp); + return new_threadstate(interp, whence); } // We keep this for stable ABI compabibility. PyAPI_FUNC(PyThreadState*) _PyThreadState_Prealloc(PyInterpreterState *interp) { - return _PyThreadState_New(interp); + return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN); } // We keep this around for (accidental) stable ABI compatibility. @@ -1580,6 +1614,12 @@ PyThreadState_Clear(PyThreadState *tstate) Py_CLEAR(tstate->context); if (tstate->on_delete != NULL) { + // For the "main" thread of each interpreter, this is meant + // to be done in _PyInterpreterState_SetNotRunningMain(). + // That leaves threads created by the threading module, + // and any threads killed by forking. + // However, we also accommodate "main" threads that still + // don't call _PyInterpreterState_SetNotRunningMain() yet. tstate->on_delete(tstate->on_delete_data); } @@ -1868,7 +1908,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) //--------------------------------- PyThreadState * -_PyThreadState_UncheckedGet(void) +PyThreadState_GetUnchecked(void) { return current_fast_get(&_PyRuntime); } @@ -2240,7 +2280,9 @@ PyGILState_Ensure(void) int has_gil; if (tcur == NULL) { /* Create a new Python thread state for this thread */ - tcur = new_threadstate(runtime->gilstate.autoInterpreterState); + // XXX Use PyInterpreterState_EnsureThreadState()? + tcur = new_threadstate(runtime->gilstate.autoInterpreterState, + _PyThreadState_WHENCE_GILSTATE); if (tcur == NULL) { Py_FatalError("Couldn't create thread-state for new thread"); } diff --git a/Tools/cases_generator/analysis.py b/Tools/cases_generator/analysis.py index 91dcba8ceee13d..7bbc924e5083f1 100644 --- a/Tools/cases_generator/analysis.py +++ b/Tools/cases_generator/analysis.py @@ -26,7 +26,7 @@ "co_names": "Use FRAME_CO_NAMES.", } -RE_PREDICTED = r"^\s*(?:GO_TO_INSTRUCTION\(|DEOPT_IF\(.*?,\s*)(\w+)\);\s*(?://.*)?$" +RE_GO_TO_INSTR = r"^\s*GO_TO_INSTRUCTION\((\w+)\);\s*(?://.*)?$" class Analyzer: @@ -187,16 +187,23 @@ def analyze(self) -> None: Raises SystemExit if there is an error. """ self.analyze_macros_and_pseudos() - self.find_predictions() self.map_families() + self.mark_predictions() self.check_families() - def find_predictions(self) -> None: - """Find the instructions that need PREDICTED() labels.""" + def mark_predictions(self) -> None: + """Mark the instructions that need PREDICTED() labels.""" + # Start with family heads + for family in self.families.values(): + if family.name in self.instrs: + self.instrs[family.name].predicted = True + if family.name in self.macro_instrs: + self.macro_instrs[family.name].predicted = True + # Also look for GO_TO_INSTRUCTION() calls for instr in self.instrs.values(): targets: set[str] = set() for line in instr.block_text: - if m := re.match(RE_PREDICTED, line): + if m := re.match(RE_GO_TO_INSTR, line): targets.add(m.group(1)) for target in targets: if target_instr := self.instrs.get(target): @@ -225,11 +232,18 @@ def map_families(self) -> None: ) else: member_instr.family = family - elif not self.macro_instrs.get(member): + if member_mac := self.macro_instrs.get(member): + assert member_mac.family is None, (member, member_mac.family.name) + member_mac.family = family + if not member_instr and not member_mac: self.error( f"Unknown instruction {member!r} referenced in family {family.name!r}", family, ) + # A sanctioned exception: + # This opcode is a member of the family but it doesn't pass the checks. + if mac := self.macro_instrs.get("BINARY_OP_INPLACE_ADD_UNICODE"): + mac.family = self.families.get("BINARY_OP") def check_families(self) -> None: """Check each family: diff --git a/Tools/cases_generator/generate_cases.py b/Tools/cases_generator/generate_cases.py index 9192d1038ab7d6..01ab83bedb2985 100644 --- a/Tools/cases_generator/generate_cases.py +++ b/Tools/cases_generator/generate_cases.py @@ -781,9 +781,7 @@ def write_instructions( case parsing.Macro(): n_macros += 1 mac = self.macro_instrs[thing.name] - stacking.write_macro_instr( - mac, self.out, self.families.get(mac.name) - ) + stacking.write_macro_instr(mac, self.out) case parsing.Pseudo(): pass case _: diff --git a/Tools/cases_generator/instructions.py b/Tools/cases_generator/instructions.py index 6fbf7d93f42fde..bd7b7dfbaa8f70 100644 --- a/Tools/cases_generator/instructions.py +++ b/Tools/cases_generator/instructions.py @@ -144,7 +144,8 @@ def write_body( out: Formatter, dedent: int, active_caches: list[ActiveCacheEffect], - tier: Tiers = TIER_ONE, + tier: Tiers, + family: parsing.Family | None, ) -> None: """Write the instruction body.""" # Write cache effect variable declarations and initializations @@ -207,6 +208,16 @@ def write_body( ) else: out.write_raw(f"{space}if ({cond}) goto {label};\n") + elif m := re.match(r"(\s*)DEOPT_IF\((.+)\);\s*(?://.*)?$", line): + space, cond = m.groups() + space = extra + space + target = family.name if family else self.name + out.write_raw(f"{space}DEOPT_IF({cond}, {target});\n") + elif "DEOPT" in line: + filename = context.owner.filename + lineno = context.owner.tokens[context.begin].line + print(f"{filename}:{lineno}: ERROR: DEOPT_IF() must be all on one line") + out.write_raw(extra + line) elif m := re.match(r"(\s*)DECREF_INPUTS\(\);\s*(?://.*)?$", line): out.reset_lineno() space = extra + m.group(1) @@ -244,7 +255,8 @@ def write_body( out: Formatter, dedent: int, active_caches: list[ActiveCacheEffect], - tier: Tiers = TIER_ONE, + tier: Tiers, + family: parsing.Family | None, ) -> None: pass @@ -268,7 +280,9 @@ class MacroInstruction: macro: parsing.Macro parts: MacroParts cache_offset: int + # Set later predicted: bool = False + family: parsing.Family | None = None @dataclasses.dataclass diff --git a/Tools/cases_generator/stacking.py b/Tools/cases_generator/stacking.py index 1f9fda66a5f034..bba2db8b059da8 100644 --- a/Tools/cases_generator/stacking.py +++ b/Tools/cases_generator/stacking.py @@ -351,14 +351,13 @@ def write_single_instr( out, tier, 0, + instr.family, ) except AssertionError as err: raise AssertionError(f"Error writing instruction {instr.name}") from err -def write_macro_instr( - mac: MacroInstruction, out: Formatter, family: Family | None -) -> None: +def write_macro_instr(mac: MacroInstruction, out: Formatter) -> None: parts = [ part for part in mac.parts @@ -368,9 +367,11 @@ def write_macro_instr( with out.block(f"TARGET({mac.name})"): if mac.predicted: out.emit(f"PREDICTED({mac.name});") - out.static_assert_family_size(mac.name, family, mac.cache_offset) + out.static_assert_family_size(mac.name, mac.family, mac.cache_offset) try: - next_instr_is_set = write_components(parts, out, TIER_ONE, mac.cache_offset) + next_instr_is_set = write_components( + parts, out, TIER_ONE, mac.cache_offset, mac.family + ) except AssertionError as err: raise AssertionError(f"Error writing macro {mac.name}") from err if not parts[-1].instr.always_exits: @@ -386,6 +387,7 @@ def write_components( out: Formatter, tier: Tiers, cache_offset: int, + family: Family | None, ) -> bool: managers = get_managers(parts) @@ -454,10 +456,10 @@ def write_components( assert_no_pokes(managers) if len(parts) == 1: - mgr.instr.write_body(out, 0, mgr.active_caches, tier) + mgr.instr.write_body(out, 0, mgr.active_caches, tier, family) else: with out.block(""): - mgr.instr.write_body(out, -4, mgr.active_caches, tier) + mgr.instr.write_body(out, -4, mgr.active_caches, tier, family) if mgr is managers[-1] and not next_instr_is_set and not mgr.instr.always_exits: # Adjust the stack to its final depth, *then* write the