From 553c90ccc2f5b15be76a2bb6e38d23e58d739e2f Mon Sep 17 00:00:00 2001 From: Nikita Sobolev Date: Fri, 9 Feb 2024 09:40:28 +0300 Subject: [PATCH 01/11] gh-101100: Fix sphinx warnings in `library/enum.rst` (#114696) Co-authored-by: Ethan Furman --- Doc/library/enum.rst | 17 +++++++++++++++-- Doc/tools/.nitignore | 1 - 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst index 534939943d3326..30d80ce8d488cc 100644 --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -286,6 +286,19 @@ Data Types appropriate value will be chosen for you. See :class:`auto` for the details. + .. attribute:: Enum._name_ + + Name of the member. + + .. attribute:: Enum._value_ + + Value of the member, can be set in :meth:`~object.__new__`. + + .. attribute:: Enum._order_ + + No longer used, kept for backward compatibility. + (class attribute, removed during class creation). + .. attribute:: Enum._ignore_ ``_ignore_`` is only used during creation and is removed from the @@ -823,8 +836,8 @@ Supported ``_sunder_`` names - :attr:`~Enum._ignore_` -- a list of names, either as a :class:`list` or a :class:`str`, that will not be transformed into members, and will be removed from the final class -- :attr:`~Enum._order_` -- used in Python 2/3 code to ensure member order is - consistent (class attribute, removed during class creation) +- :attr:`~Enum._order_` -- no longer used, kept for backward + compatibility (class attribute, removed during class creation) - :meth:`~Enum._generate_next_value_` -- used to get an appropriate value for an enum member; may be overridden diff --git a/Doc/tools/.nitignore b/Doc/tools/.nitignore index f96478b45e44c0..9db02c5c3c73c9 100644 --- a/Doc/tools/.nitignore +++ b/Doc/tools/.nitignore @@ -31,7 +31,6 @@ Doc/library/email.compat32-message.rst Doc/library/email.errors.rst Doc/library/email.parser.rst Doc/library/email.policy.rst -Doc/library/enum.rst Doc/library/exceptions.rst Doc/library/faulthandler.rst Doc/library/fcntl.rst From c968dc7ff3041137bb702436ff944692dede1ad1 Mon Sep 17 00:00:00 2001 From: Brett Cannon Date: Fri, 9 Feb 2024 00:21:49 -0800 Subject: [PATCH 02/11] GH-113632: update configure.ac for WebAssembly support tiers (#115192) Move WASI to tier 2 and drop Emscripten. --- Doc/whatsnew/3.13.rst | 6 ++++++ .../Build/2024-02-08-17-38-56.gh-issue-113632.y9KIGb.rst | 2 ++ configure | 6 ++---- configure.ac | 3 +-- 4 files changed, 11 insertions(+), 6 deletions(-) create mode 100644 Misc/NEWS.d/next/Build/2024-02-08-17-38-56.gh-issue-113632.y9KIGb.rst diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index 50a2a69c75ac70..b05e4badc9e58b 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -1339,6 +1339,12 @@ Build Changes :ref:`limited C API `. (Contributed by Victor Stinner in :gh:`85283`.) +* ``wasm32-wasi`` is now a tier 2 platform. + (Contributed by Brett Cannon in :gh:`115192`.) + +* ``wasm32-emscripten`` is no longer a supported platform. + (Contributed by Brett Cannon in :gh:`115192`.) + C API Changes ============= diff --git a/Misc/NEWS.d/next/Build/2024-02-08-17-38-56.gh-issue-113632.y9KIGb.rst b/Misc/NEWS.d/next/Build/2024-02-08-17-38-56.gh-issue-113632.y9KIGb.rst new file mode 100644 index 00000000000000..8b02b1b2cd08c9 --- /dev/null +++ b/Misc/NEWS.d/next/Build/2024-02-08-17-38-56.gh-issue-113632.y9KIGb.rst @@ -0,0 +1,2 @@ +Promote WASI to a tier 2 platform and drop Emscripten from tier 3 in +configure.ac. diff --git a/configure b/configure index 0375565c294552..705a778cafced3 100755 --- a/configure +++ b/configure @@ -6805,6 +6805,8 @@ case $host/$ac_cv_cc_name in #( aarch64-*-linux-gnu/clang) : PY_SUPPORT_TIER=2 ;; #( powerpc64le-*-linux-gnu/gcc) : + PY_SUPPORT_TIER=2 ;; #( + wasm32-unknown-wasi/clang) : PY_SUPPORT_TIER=2 ;; #( x86_64-*-linux-gnu/clang) : PY_SUPPORT_TIER=2 ;; #( @@ -6817,10 +6819,6 @@ case $host/$ac_cv_cc_name in #( PY_SUPPORT_TIER=3 ;; #( s390x-*-linux-gnu/gcc) : PY_SUPPORT_TIER=3 ;; #( - wasm32-unknown-emscripten/clang) : - PY_SUPPORT_TIER=3 ;; #( - wasm32-unknown-wasi/clang) : - PY_SUPPORT_TIER=3 ;; #( x86_64-*-freebsd*/clang) : PY_SUPPORT_TIER=3 ;; #( *) : diff --git a/configure.ac b/configure.ac index e121e893a1d0d9..dee7ed552b370f 100644 --- a/configure.ac +++ b/configure.ac @@ -973,14 +973,13 @@ AS_CASE([$host/$ac_cv_cc_name], [aarch64-*-linux-gnu/gcc], [PY_SUPPORT_TIER=2], dnl Linux ARM64, glibc, gcc+clang [aarch64-*-linux-gnu/clang], [PY_SUPPORT_TIER=2], [powerpc64le-*-linux-gnu/gcc], [PY_SUPPORT_TIER=2], dnl Linux on PPC64 little endian, glibc, gcc + [wasm32-unknown-wasi/clang], [PY_SUPPORT_TIER=2], dnl WebAssembly System Interface, clang [x86_64-*-linux-gnu/clang], [PY_SUPPORT_TIER=2], dnl Linux on AMD64, any vendor, glibc, clang [aarch64-pc-windows-msvc/msvc], [PY_SUPPORT_TIER=3], dnl Windows ARM64, MSVC [armv7l-*-linux-gnueabihf/gcc], [PY_SUPPORT_TIER=3], dnl ARMv7 LE with hardware floats, any vendor, glibc, gcc [powerpc64le-*-linux-gnu/clang], [PY_SUPPORT_TIER=3], dnl Linux on PPC64 little endian, glibc, clang [s390x-*-linux-gnu/gcc], [PY_SUPPORT_TIER=3], dnl Linux on 64bit s390x (big endian), glibc, gcc - [wasm32-unknown-emscripten/clang], [PY_SUPPORT_TIER=3], dnl WebAssembly Emscripten - [wasm32-unknown-wasi/clang], [PY_SUPPORT_TIER=3], dnl WebAssembly System Interface [x86_64-*-freebsd*/clang], [PY_SUPPORT_TIER=3], dnl FreeBSD on AMD64 [PY_SUPPORT_TIER=0] ) From 846fd721d518dda88a7d427ec3d2c03c45d9fa90 Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Fri, 9 Feb 2024 12:36:12 +0200 Subject: [PATCH 03/11] gh-115059: Flush the underlying write buffer in io.BufferedRandom.read1() (GH-115163) --- Lib/test/test_io.py | 52 +++++++++++++++++++ ...-02-08-13-26-14.gh-issue-115059.DqP9dr.rst | 1 + Modules/_io/bufferedio.c | 10 ++++ 3 files changed, 63 insertions(+) create mode 100644 Misc/NEWS.d/next/Library/2024-02-08-13-26-14.gh-issue-115059.DqP9dr.rst diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py index 73669ecc792776..a24579dcc878cf 100644 --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -2497,6 +2497,28 @@ def test_interleaved_read_write(self): f.flush() self.assertEqual(raw.getvalue(), b'a2c') + def test_read1_after_write(self): + with self.BytesIO(b'abcdef') as raw: + with self.tp(raw, 3) as f: + f.write(b"1") + self.assertEqual(f.read1(1), b'b') + f.flush() + self.assertEqual(raw.getvalue(), b'1bcdef') + with self.BytesIO(b'abcdef') as raw: + with self.tp(raw, 3) as f: + f.write(b"1") + self.assertEqual(f.read1(), b'bcd') + f.flush() + self.assertEqual(raw.getvalue(), b'1bcdef') + with self.BytesIO(b'abcdef') as raw: + with self.tp(raw, 3) as f: + f.write(b"1") + # XXX: read(100) returns different numbers of bytes + # in Python and C implementations. + self.assertEqual(f.read1(100)[:3], b'bcd') + f.flush() + self.assertEqual(raw.getvalue(), b'1bcdef') + def test_interleaved_readline_write(self): with self.BytesIO(b'ab\ncdef\ng\n') as raw: with self.tp(raw) as f: @@ -2509,6 +2531,36 @@ def test_interleaved_readline_write(self): f.flush() self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n') + def test_xxx(self): + with self.BytesIO(b'abcdefgh') as raw: + with self.tp(raw) as f: + f.write(b'123') + self.assertEqual(f.read(), b'defgh') + f.write(b'456') + f.flush() + self.assertEqual(raw.getvalue(), b'123defgh456') + with self.BytesIO(b'abcdefgh') as raw: + with self.tp(raw) as f: + f.write(b'123') + self.assertEqual(f.read(3), b'def') + f.write(b'456') + f.flush() + self.assertEqual(raw.getvalue(), b'123def456') + with self.BytesIO(b'abcdefgh') as raw: + with self.tp(raw) as f: + f.write(b'123') + self.assertEqual(f.read1(), b'defgh') + f.write(b'456') + f.flush() + self.assertEqual(raw.getvalue(), b'123defgh456') + with self.BytesIO(b'abcdefgh') as raw: + with self.tp(raw) as f: + f.write(b'123') + self.assertEqual(f.read1(3), b'def') + f.write(b'456') + f.flush() + self.assertEqual(raw.getvalue(), b'123def456') + # You can't construct a BufferedRandom over a non-seekable stream. test_unseekable = None diff --git a/Misc/NEWS.d/next/Library/2024-02-08-13-26-14.gh-issue-115059.DqP9dr.rst b/Misc/NEWS.d/next/Library/2024-02-08-13-26-14.gh-issue-115059.DqP9dr.rst new file mode 100644 index 00000000000000..331baedd3b24c5 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-02-08-13-26-14.gh-issue-115059.DqP9dr.rst @@ -0,0 +1 @@ +:meth:`io.BufferedRandom.read1` now flushes the underlying write buffer. diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c index f02207ace9f3d2..8ebe9ec7095586 100644 --- a/Modules/_io/bufferedio.c +++ b/Modules/_io/bufferedio.c @@ -1050,6 +1050,16 @@ _io__Buffered_read1_impl(buffered *self, Py_ssize_t n) Py_DECREF(res); return NULL; } + /* Flush the write buffer if necessary */ + if (self->writable) { + PyObject *r = buffered_flush_and_rewind_unlocked(self); + if (r == NULL) { + LEAVE_BUFFERED(self) + Py_DECREF(res); + return NULL; + } + Py_DECREF(r); + } _bufferedreader_reset_buf(self); r = _bufferedreader_raw_read(self, PyBytes_AS_STRING(res), n); LEAVE_BUFFERED(self) From 769d4448260aaec687d9306950225316f9faefce Mon Sep 17 00:00:00 2001 From: "Erlend E. Aasland" Date: Fri, 9 Feb 2024 15:11:36 +0100 Subject: [PATCH 04/11] Docs: correctly link to code objects (#115214) --- Doc/c-api/code.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Doc/c-api/code.rst b/Doc/c-api/code.rst index 5082b0cb6ad3f3..11c12e685fcace 100644 --- a/Doc/c-api/code.rst +++ b/Doc/c-api/code.rst @@ -22,12 +22,13 @@ bound into a function. .. c:var:: PyTypeObject PyCode_Type This is an instance of :c:type:`PyTypeObject` representing the Python - :class:`code` type. + :ref:`code object `. .. c:function:: int PyCode_Check(PyObject *co) - Return true if *co* is a :class:`code` object. This function always succeeds. + Return true if *co* is a :ref:`code object `. + This function always succeeds. .. c:function:: int PyCode_GetNumFree(PyCodeObject *co) From 31633f4473966b3bcd470440bab7f348711be48f Mon Sep 17 00:00:00 2001 From: Sam Gross Date: Fri, 9 Feb 2024 09:23:12 -0500 Subject: [PATCH 05/11] gh-115184: Fix refleak tracking issues in free-threaded build (#115188) Fixes a few issues related to refleak tracking in the free-threaded build: - Count blocks in abandoned segments - Call `_mi_page_free_collect` earlier during heap traversal in order to get an accurate count of blocks in use. - Add missing refcount tracking in `_Py_DecRefSharedDebug` and `_Py_ExplicitMergeRefcount`. - Pause threads in `get_num_global_allocated_blocks` to ensure that traversing the mimalloc heaps is safe. --- Objects/mimalloc/heap.c | 2 +- Objects/object.c | 11 +++++++---- Objects/obmalloc.c | 9 ++++++++- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/Objects/mimalloc/heap.c b/Objects/mimalloc/heap.c index 164b28f0fab240..154dad0b128480 100644 --- a/Objects/mimalloc/heap.c +++ b/Objects/mimalloc/heap.c @@ -538,7 +538,6 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_ mi_assert(page != NULL); if (page == NULL) return true; - _mi_page_free_collect(page,true); mi_assert_internal(page->local_free == NULL); if (page->used == 0) return true; @@ -635,6 +634,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_ typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) { + _mi_page_free_collect(page,true); const size_t bsize = mi_page_block_size(page); const size_t ubsize = mi_page_usable_block_size(page); area->reserved = page->reserved * bsize; diff --git a/Objects/object.c b/Objects/object.c index bbf7f98ae3daf9..37a4b7a417e35f 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -346,6 +346,9 @@ _Py_DecRefSharedDebug(PyObject *o, const char *filename, int lineno) if (should_queue) { // TODO: the inter-thread queue is not yet implemented. For now, // we just merge the refcount here. +#ifdef Py_REF_DEBUG + _Py_IncRefTotal(_PyInterpreterState_GET()); +#endif Py_ssize_t refcount = _Py_ExplicitMergeRefcount(o, -1); if (refcount == 0) { _Py_Dealloc(o); @@ -399,10 +402,6 @@ _Py_ExplicitMergeRefcount(PyObject *op, Py_ssize_t extra) Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared); do { refcnt = Py_ARITHMETIC_RIGHT_SHIFT(Py_ssize_t, shared, _Py_REF_SHARED_SHIFT); - if (_Py_REF_IS_MERGED(shared)) { - return refcnt; - } - refcnt += (Py_ssize_t)op->ob_ref_local; refcnt += extra; @@ -410,6 +409,10 @@ _Py_ExplicitMergeRefcount(PyObject *op, Py_ssize_t extra) } while (!_Py_atomic_compare_exchange_ssize(&op->ob_ref_shared, &shared, new_shared)); +#ifdef Py_REF_DEBUG + _Py_AddRefTotal(_PyInterpreterState_GET(), extra); +#endif + _Py_atomic_store_uint32_relaxed(&op->ob_ref_local, 0); _Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0); return refcnt; diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index bea4ea85332bdd..6a12c3dca38b36 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -1073,7 +1073,12 @@ get_mimalloc_allocated_blocks(PyInterpreterState *interp) mi_heap_visit_blocks(heap, false, &count_blocks, &allocated_blocks); } } - // TODO(sgross): count blocks in abandoned segments. + + mi_abandoned_pool_t *pool = &interp->mimalloc.abandoned_pool; + for (uint8_t tag = 0; tag < _Py_MIMALLOC_HEAP_COUNT; tag++) { + _mi_abandoned_pool_visit_blocks(pool, tag, false, &count_blocks, + &allocated_blocks); + } #else // TODO(sgross): this only counts the current thread's blocks. mi_heap_t *heap = mi_heap_get_default(); @@ -1189,6 +1194,7 @@ get_num_global_allocated_blocks(_PyRuntimeState *runtime) } } else { + _PyEval_StopTheWorldAll(&_PyRuntime); HEAD_LOCK(runtime); PyInterpreterState *interp = PyInterpreterState_Head(); assert(interp != NULL); @@ -1208,6 +1214,7 @@ get_num_global_allocated_blocks(_PyRuntimeState *runtime) } } HEAD_UNLOCK(runtime); + _PyEval_StartTheWorldAll(&_PyRuntime); #ifdef Py_DEBUG assert(got_main); #endif From f8931adc597aa696a0f60439e8f9a9047d51ef1c Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora Date: Fri, 9 Feb 2024 19:59:41 +0300 Subject: [PATCH 06/11] gh-115142: Skip test_optimizer if _testinternalcapi module is not available (GH-115175) --- Lib/test/test_optimizer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Lib/test/test_optimizer.py b/Lib/test/test_optimizer.py index b56bf3cfd9560e..c8554c40df4b2d 100644 --- a/Lib/test/test_optimizer.py +++ b/Lib/test/test_optimizer.py @@ -1,6 +1,9 @@ -import _testinternalcapi import unittest import types +from test.support import import_helper + + +_testinternalcapi = import_helper.import_module("_testinternalcapi") class TestRareEventCounters(unittest.TestCase): From 5a173efa693a053bf4a059c82c1c06c82a9fa8fb Mon Sep 17 00:00:00 2001 From: Peter Lazorchak Date: Fri, 9 Feb 2024 09:06:14 -0800 Subject: [PATCH 07/11] Add Peter L to ACKS (GH-115222) --- Misc/ACKS | 1 + 1 file changed, 1 insertion(+) diff --git a/Misc/ACKS b/Misc/ACKS index 466023f390a421..8a80e02ecba26a 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1051,6 +1051,7 @@ Mark Lawrence Chris Laws Michael Layzell Michael Lazar +Peter Lazorchak Brian Leair Mathieu Leduc-Hamel Amandine Lee From a225520af941fb125a4ede77a617501dfb8b46da Mon Sep 17 00:00:00 2001 From: Carl Meyer Date: Fri, 9 Feb 2024 12:19:09 -0700 Subject: [PATCH 08/11] gh-112903: Handle non-types in _BaseGenericAlias.__mro_entries__() (#115191) Co-authored-by: Alex Waygood --- Lib/test/test_typing.py | 69 +++++++++++++++++++ Lib/typing.py | 22 +++++- ...-02-08-17-04-58.gh-issue-112903.SN_vUs.rst | 2 + 3 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 Misc/NEWS.d/next/Library/2024-02-08-17-04-58.gh-issue-112903.SN_vUs.rst diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py index b684af4f33ed71..58566c4bfc821c 100644 --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -4920,6 +4920,75 @@ class B(Generic[S]): ... class C(List[int], B): ... self.assertEqual(C.__mro__, (C, list, B, Generic, object)) + def test_multiple_inheritance_non_type_with___mro_entries__(self): + class GoodEntries: + def __mro_entries__(self, bases): + return (object,) + + class A(List[int], GoodEntries()): ... + + self.assertEqual(A.__mro__, (A, list, Generic, object)) + + def test_multiple_inheritance_non_type_without___mro_entries__(self): + # Error should be from the type machinery, not from typing.py + with self.assertRaisesRegex(TypeError, r"^bases must be types"): + class A(List[int], object()): ... + + def test_multiple_inheritance_non_type_bad___mro_entries__(self): + class BadEntries: + def __mro_entries__(self, bases): + return None + + # Error should be from the type machinery, not from typing.py + with self.assertRaisesRegex( + TypeError, + r"^__mro_entries__ must return a tuple", + ): + class A(List[int], BadEntries()): ... + + def test_multiple_inheritance___mro_entries___returns_non_type(self): + class BadEntries: + def __mro_entries__(self, bases): + return (object(),) + + # Error should be from the type machinery, not from typing.py + with self.assertRaisesRegex( + TypeError, + r"^bases must be types", + ): + class A(List[int], BadEntries()): ... + + def test_multiple_inheritance_with_genericalias(self): + class A(typing.Sized, list[int]): ... + + self.assertEqual( + A.__mro__, + (A, collections.abc.Sized, Generic, list, object), + ) + + def test_multiple_inheritance_with_genericalias_2(self): + T = TypeVar("T") + + class BaseSeq(typing.Sequence[T]): ... + class MySeq(List[T], BaseSeq[T]): ... + + self.assertEqual( + MySeq.__mro__, + ( + MySeq, + list, + BaseSeq, + collections.abc.Sequence, + collections.abc.Reversible, + collections.abc.Collection, + collections.abc.Sized, + collections.abc.Iterable, + collections.abc.Container, + Generic, + object, + ), + ) + def test_init_subclass_super_called(self): class FinalException(Exception): pass diff --git a/Lib/typing.py b/Lib/typing.py index d278b4effc7eba..347373f00956c7 100644 --- a/Lib/typing.py +++ b/Lib/typing.py @@ -1135,9 +1135,29 @@ def __mro_entries__(self, bases): res = [] if self.__origin__ not in bases: res.append(self.__origin__) + + # Check if any base that occurs after us in `bases` is either itself a + # subclass of Generic, or something which will add a subclass of Generic + # to `__bases__` via its `__mro_entries__`. If not, add Generic + # ourselves. The goal is to ensure that Generic (or a subclass) will + # appear exactly once in the final bases tuple. If we let it appear + # multiple times, we risk "can't form a consistent MRO" errors. i = bases.index(self) for b in bases[i+1:]: - if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic): + if isinstance(b, _BaseGenericAlias): + break + if not isinstance(b, type): + meth = getattr(b, "__mro_entries__", None) + new_bases = meth(bases) if meth else None + if ( + isinstance(new_bases, tuple) and + any( + isinstance(b2, type) and issubclass(b2, Generic) + for b2 in new_bases + ) + ): + break + elif issubclass(b, Generic): break else: res.append(Generic) diff --git a/Misc/NEWS.d/next/Library/2024-02-08-17-04-58.gh-issue-112903.SN_vUs.rst b/Misc/NEWS.d/next/Library/2024-02-08-17-04-58.gh-issue-112903.SN_vUs.rst new file mode 100644 index 00000000000000..e27f5832553c13 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-02-08-17-04-58.gh-issue-112903.SN_vUs.rst @@ -0,0 +1,2 @@ +Fix "issubclass() arg 1 must be a class" errors in certain cases of multiple +inheritance with generic aliases (regression in early 3.13 alpha releases). From a3af3cb4f424034b56404704fdf8f18e8c0a9982 Mon Sep 17 00:00:00 2001 From: Sam Gross Date: Fri, 9 Feb 2024 17:08:32 -0500 Subject: [PATCH 09/11] gh-110481: Implement inter-thread queue for biased reference counting (#114824) Biased reference counting maintains two refcount fields in each object: `ob_ref_local` and `ob_ref_shared`. The true refcount is the sum of these two fields. In some cases, when refcounting operations are split across threads, the ob_ref_shared field can be negative (although the total refcount must be at least zero). In this case, the thread that decremented the refcount requests that the owning thread give up ownership and merge the refcount fields. --- Include/internal/pycore_brc.h | 74 +++++++ Include/internal/pycore_ceval.h | 1 + Include/internal/pycore_interp.h | 1 + Include/internal/pycore_object_stack.h | 6 + Include/internal/pycore_tstate.h | 2 + Lib/test/test_code.py | 1 + Lib/test/test_concurrent_futures/executor.py | 17 +- .../test_process_pool.py | 1 + Makefile.pre.in | 2 + Modules/posixmodule.c | 4 + Objects/dictobject.c | 16 +- Objects/object.c | 8 +- PCbuild/_freeze_module.vcxproj | 1 + PCbuild/_freeze_module.vcxproj.filters | 3 + PCbuild/pythoncore.vcxproj | 2 + PCbuild/pythoncore.vcxproj.filters | 6 + Python/brc.c | 198 ++++++++++++++++++ Python/ceval_gil.c | 8 + Python/gc_free_threading.c | 46 +++- Python/object_stack.c | 21 ++ Python/pystate.c | 11 + 21 files changed, 418 insertions(+), 11 deletions(-) create mode 100644 Include/internal/pycore_brc.h create mode 100644 Python/brc.c diff --git a/Include/internal/pycore_brc.h b/Include/internal/pycore_brc.h new file mode 100644 index 00000000000000..3453d83b57ca97 --- /dev/null +++ b/Include/internal/pycore_brc.h @@ -0,0 +1,74 @@ +#ifndef Py_INTERNAL_BRC_H +#define Py_INTERNAL_BRC_H + +#include +#include "pycore_llist.h" // struct llist_node +#include "pycore_lock.h" // PyMutex +#include "pycore_object_stack.h" // _PyObjectStack + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#ifdef Py_GIL_DISABLED + +// Prime number to avoid correlations with memory addresses. +#define _Py_BRC_NUM_BUCKETS 257 + +// Hash table bucket +struct _brc_bucket { + // Mutex protects both the bucket and thread state queues in this bucket. + PyMutex mutex; + + // Linked list of _PyThreadStateImpl objects hashed to this bucket. + struct llist_node root; +}; + +// Per-interpreter biased reference counting state +struct _brc_state { + // Hash table of thread states by thread-id. Thread states within a bucket + // are chained using a doubly-linked list. + struct _brc_bucket table[_Py_BRC_NUM_BUCKETS]; +}; + +// Per-thread biased reference counting state +struct _brc_thread_state { + // Linked-list of thread states per hash bucket + struct llist_node bucket_node; + + // Thread-id as determined by _PyThread_Id() + uintptr_t tid; + + // Objects with refcounts to be merged (protected by bucket mutex) + _PyObjectStack objects_to_merge; + + // Local stack of objects to be merged (not accessed by other threads) + _PyObjectStack local_objects_to_merge; +}; + +// Initialize/finalize the per-thread biased reference counting state +void _Py_brc_init_thread(PyThreadState *tstate); +void _Py_brc_remove_thread(PyThreadState *tstate); + +// Initialize per-interpreter state +void _Py_brc_init_state(PyInterpreterState *interp); + +void _Py_brc_after_fork(PyInterpreterState *interp); + +// Enqueues an object to be merged by it's owning thread (tid). This +// steals a reference to the object. +void _Py_brc_queue_object(PyObject *ob); + +// Merge the refcounts of queued objects for the current thread. +void _Py_brc_merge_refcounts(PyThreadState *tstate); + +#endif /* Py_GIL_DISABLED */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_BRC_H */ diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index a66af1389541dd..b158fc9ff5ebc1 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -206,6 +206,7 @@ void _PyEval_FrameClearAndPop(PyThreadState *tstate, _PyInterpreterFrame *frame) #define _PY_ASYNC_EXCEPTION_BIT 3 #define _PY_GC_SCHEDULED_BIT 4 #define _PY_EVAL_PLEASE_STOP_BIT 5 +#define _PY_EVAL_EXPLICIT_MERGE_BIT 6 /* Reserve a few bits for future use */ #define _PY_EVAL_EVENTS_BITS 8 diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h index f7c332ed747cfa..31d88071e19d0c 100644 --- a/Include/internal/pycore_interp.h +++ b/Include/internal/pycore_interp.h @@ -201,6 +201,7 @@ struct _is { #if defined(Py_GIL_DISABLED) struct _mimalloc_interp_state mimalloc; + struct _brc_state brc; // biased reference counting state #endif // Per-interpreter state for the obmalloc allocator. For the main diff --git a/Include/internal/pycore_object_stack.h b/Include/internal/pycore_object_stack.h index 1dc1c1591525de..d042be2a98090a 100644 --- a/Include/internal/pycore_object_stack.h +++ b/Include/internal/pycore_object_stack.h @@ -1,6 +1,8 @@ #ifndef Py_INTERNAL_OBJECT_STACK_H #define Py_INTERNAL_OBJECT_STACK_H +#include "pycore_freelist.h" // _PyFreeListState + #ifdef __cplusplus extern "C" { #endif @@ -74,6 +76,10 @@ _PyObjectStack_Pop(_PyObjectStack *stack) return obj; } +// Merge src into dst, leaving src empty +extern void +_PyObjectStack_Merge(_PyObjectStack *dst, _PyObjectStack *src); + // Remove all items from the stack extern void _PyObjectStack_Clear(_PyObjectStack *stack); diff --git a/Include/internal/pycore_tstate.h b/Include/internal/pycore_tstate.h index 472fa08154e8f9..77a1dc59163d21 100644 --- a/Include/internal/pycore_tstate.h +++ b/Include/internal/pycore_tstate.h @@ -10,6 +10,7 @@ extern "C" { #include "pycore_freelist.h" // struct _Py_freelist_state #include "pycore_mimalloc.h" // struct _mimalloc_thread_state +#include "pycore_brc.h" // struct _brc_thread_state // Every PyThreadState is actually allocated as a _PyThreadStateImpl. The @@ -22,6 +23,7 @@ typedef struct _PyThreadStateImpl { #ifdef Py_GIL_DISABLED struct _mimalloc_thread_state mimalloc; struct _Py_freelist_state freelist_state; + struct _brc_thread_state brc; #endif } _PyThreadStateImpl; diff --git a/Lib/test/test_code.py b/Lib/test/test_code.py index d8fb826edeb681..46bebfc7af675b 100644 --- a/Lib/test/test_code.py +++ b/Lib/test/test_code.py @@ -865,6 +865,7 @@ def __init__(self, f, test): self.test = test def run(self): del self.f + gc_collect() self.test.assertEqual(LAST_FREED, 500) SetExtra(f.__code__, FREE_INDEX, ctypes.c_voidp(500)) diff --git a/Lib/test/test_concurrent_futures/executor.py b/Lib/test/test_concurrent_futures/executor.py index 1e7d4344740943..6a79fe69ec37cf 100644 --- a/Lib/test/test_concurrent_futures/executor.py +++ b/Lib/test/test_concurrent_futures/executor.py @@ -1,8 +1,10 @@ import threading import time +import unittest import weakref from concurrent import futures from test import support +from test.support import Py_GIL_DISABLED def mul(x, y): @@ -83,10 +85,21 @@ def test_no_stale_references(self): my_object_collected = threading.Event() my_object_callback = weakref.ref( my_object, lambda obj: my_object_collected.set()) - # Deliberately discarding the future. - self.executor.submit(my_object.my_method) + fut = self.executor.submit(my_object.my_method) del my_object + if Py_GIL_DISABLED: + # Due to biased reference counting, my_object might only be + # deallocated while the thread that created it runs -- if the + # thread is paused waiting on an event, it may not merge the + # refcount of the queued object. For that reason, we wait for the + # task to finish (so that it's no longer referenced) and force a + # GC to ensure that it is collected. + fut.result() # Wait for the task to finish. + support.gc_collect() + else: + del fut # Deliberately discard the future. + collected = my_object_collected.wait(timeout=support.SHORT_TIMEOUT) self.assertTrue(collected, "Stale reference not collected within timeout.") diff --git a/Lib/test/test_concurrent_futures/test_process_pool.py b/Lib/test/test_concurrent_futures/test_process_pool.py index 3e61b0c9387c6f..7fc59a05f3deac 100644 --- a/Lib/test/test_concurrent_futures/test_process_pool.py +++ b/Lib/test/test_concurrent_futures/test_process_pool.py @@ -98,6 +98,7 @@ def test_ressources_gced_in_workers(self): # explicitly destroy the object to ensure that EventfulGCObj.__del__() # is called while manager is still running. + support.gc_collect() obj = None support.gc_collect() diff --git a/Makefile.pre.in b/Makefile.pre.in index 07b2ec7adde78a..4dabe328ce0362 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -405,6 +405,7 @@ PYTHON_OBJS= \ Python/ast_opt.o \ Python/ast_unparse.o \ Python/bltinmodule.o \ + Python/brc.o \ Python/ceval.o \ Python/codecs.o \ Python/compile.o \ @@ -1081,6 +1082,7 @@ PYTHON_HEADERS= \ $(srcdir)/Include/internal/pycore_atexit.h \ $(srcdir)/Include/internal/pycore_bitutils.h \ $(srcdir)/Include/internal/pycore_blocks_output_buffer.h \ + $(srcdir)/Include/internal/pycore_brc.h \ $(srcdir)/Include/internal/pycore_bytes_methods.h \ $(srcdir)/Include/internal/pycore_bytesobject.h \ $(srcdir)/Include/internal/pycore_call.h \ diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index e26265fc874ebb..230c961a2ac3c0 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -637,6 +637,10 @@ PyOS_AfterFork_Child(void) tstate->native_thread_id = PyThread_get_thread_native_id(); #endif +#ifdef Py_GIL_DISABLED + _Py_brc_after_fork(tstate->interp); +#endif + status = _PyEval_ReInitThreads(tstate); if (_PyStatus_EXCEPTION(status)) { goto fatal_error; diff --git a/Objects/dictobject.c b/Objects/dictobject.c index 2df95e977a180f..9b1defa5cbc609 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -5989,6 +5989,18 @@ _PyObject_MakeDictFromInstanceAttributes(PyObject *obj, PyDictValues *values) return make_dict_from_instance_attributes(interp, keys, values); } +static bool +has_unique_reference(PyObject *op) +{ +#ifdef Py_GIL_DISABLED + return (_Py_IsOwnedByCurrentThread(op) && + op->ob_ref_local == 1 && + _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared) == 0); +#else + return Py_REFCNT(op) == 1; +#endif +} + // Return true if the dict was dematerialized, false otherwise. bool _PyObject_MakeInstanceAttributesFromDict(PyObject *obj, PyDictOrValues *dorv) @@ -6005,7 +6017,9 @@ _PyObject_MakeInstanceAttributesFromDict(PyObject *obj, PyDictOrValues *dorv) return false; } assert(_PyType_HasFeature(Py_TYPE(obj), Py_TPFLAGS_HEAPTYPE)); - if (dict->ma_keys != CACHED_KEYS(Py_TYPE(obj)) || Py_REFCNT(dict) != 1) { + if (dict->ma_keys != CACHED_KEYS(Py_TYPE(obj)) || + !has_unique_reference((PyObject *)dict)) + { return false; } assert(dict->ma_values); diff --git a/Objects/object.c b/Objects/object.c index 37a4b7a417e35f..61e6131c6e99bb 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2,6 +2,7 @@ /* Generic object operations; and implementation of None */ #include "Python.h" +#include "pycore_brc.h" // _Py_brc_queue_object() #include "pycore_call.h" // _PyObject_CallNoArgs() #include "pycore_ceval.h" // _Py_EnterRecursiveCallTstate() #include "pycore_context.h" // _PyContextTokenMissing_Type @@ -344,15 +345,10 @@ _Py_DecRefSharedDebug(PyObject *o, const char *filename, int lineno) &shared, new_shared)); if (should_queue) { - // TODO: the inter-thread queue is not yet implemented. For now, - // we just merge the refcount here. #ifdef Py_REF_DEBUG _Py_IncRefTotal(_PyInterpreterState_GET()); #endif - Py_ssize_t refcount = _Py_ExplicitMergeRefcount(o, -1); - if (refcount == 0) { - _Py_Dealloc(o); - } + _Py_brc_queue_object(o); } else if (new_shared == _Py_REF_MERGED) { // refcount is zero AND merged diff --git a/PCbuild/_freeze_module.vcxproj b/PCbuild/_freeze_module.vcxproj index 35788ec4503e8f..49f529ebbc2f9b 100644 --- a/PCbuild/_freeze_module.vcxproj +++ b/PCbuild/_freeze_module.vcxproj @@ -191,6 +191,7 @@ + diff --git a/PCbuild/_freeze_module.vcxproj.filters b/PCbuild/_freeze_module.vcxproj.filters index 7a44179e356105..5b1bd7552b4cd9 100644 --- a/PCbuild/_freeze_module.vcxproj.filters +++ b/PCbuild/_freeze_module.vcxproj.filters @@ -46,6 +46,9 @@ Source Files + + Python + Source Files diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index e1ff97659659ee..4cc0ca4b9af8de 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -206,6 +206,7 @@ + @@ -553,6 +554,7 @@ + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index 4c55f23006b2f0..ceaa21217267cf 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -546,6 +546,9 @@ Include\internal + + Include\internal + Include\internal @@ -1253,6 +1256,9 @@ Python + + Python + Python diff --git a/Python/brc.c b/Python/brc.c new file mode 100644 index 00000000000000..f1fd57a2964cf5 --- /dev/null +++ b/Python/brc.c @@ -0,0 +1,198 @@ +// Implementation of biased reference counting inter-thread queue. +// +// Biased reference counting maintains two refcount fields in each object: +// ob_ref_local and ob_ref_shared. The true refcount is the sum of these two +// fields. In some cases, when refcounting operations are split across threads, +// the ob_ref_shared field can be negative (although the total refcount must +// be at least zero). In this case, the thread that decremented the refcount +// requests that the owning thread give up ownership and merge the refcount +// fields. This file implements the mechanism for doing so. +// +// Each thread state maintains a queue of objects whose refcounts it should +// merge. The thread states are stored in a per-interpreter hash table by +// thread id. The hash table has a fixed size and uses a linked list to store +// thread states within each bucket. +// +// The queueing thread uses the eval breaker mechanism to notify the owning +// thread that it has objects to merge. Additionaly, all queued objects are +// merged during GC. +#include "Python.h" +#include "pycore_object.h" // _Py_ExplicitMergeRefcount +#include "pycore_brc.h" // struct _brc_thread_state +#include "pycore_ceval.h" // _Py_set_eval_breaker_bit +#include "pycore_llist.h" // struct llist_node +#include "pycore_pystate.h" // _PyThreadStateImpl + +#ifdef Py_GIL_DISABLED + +// Get the hashtable bucket for a given thread id. +static struct _brc_bucket * +get_bucket(PyInterpreterState *interp, uintptr_t tid) +{ + return &interp->brc.table[tid % _Py_BRC_NUM_BUCKETS]; +} + +// Find the thread state in a hash table bucket by thread id. +static _PyThreadStateImpl * +find_thread_state(struct _brc_bucket *bucket, uintptr_t thread_id) +{ + struct llist_node *node; + llist_for_each(node, &bucket->root) { + // Get the containing _PyThreadStateImpl from the linked-list node. + _PyThreadStateImpl *ts = llist_data(node, _PyThreadStateImpl, + brc.bucket_node); + if (ts->brc.tid == thread_id) { + return ts; + } + } + return NULL; +} + +// Enqueue an object to be merged by the owning thread. This steals a +// reference to the object. +void +_Py_brc_queue_object(PyObject *ob) +{ + PyInterpreterState *interp = _PyInterpreterState_GET(); + + uintptr_t ob_tid = _Py_atomic_load_uintptr(&ob->ob_tid); + if (ob_tid == 0) { + // The owning thread may have concurrently decided to merge the + // refcount fields. + Py_DECREF(ob); + return; + } + + struct _brc_bucket *bucket = get_bucket(interp, ob_tid); + PyMutex_Lock(&bucket->mutex); + _PyThreadStateImpl *tstate = find_thread_state(bucket, ob_tid); + if (tstate == NULL) { + // If we didn't find the owning thread then it must have already exited. + // It's safe (and necessary) to merge the refcount. Subtract one when + // merging because we've stolen a reference. + Py_ssize_t refcount = _Py_ExplicitMergeRefcount(ob, -1); + PyMutex_Unlock(&bucket->mutex); + if (refcount == 0) { + _Py_Dealloc(ob); + } + return; + } + + if (_PyObjectStack_Push(&tstate->brc.objects_to_merge, ob) < 0) { + PyMutex_Unlock(&bucket->mutex); + + // Fall back to stopping all threads and manually merging the refcount + // if we can't enqueue the object to be merged. + _PyEval_StopTheWorld(interp); + Py_ssize_t refcount = _Py_ExplicitMergeRefcount(ob, -1); + _PyEval_StartTheWorld(interp); + + if (refcount == 0) { + _Py_Dealloc(ob); + } + return; + } + + // Notify owning thread + _Py_set_eval_breaker_bit(interp, _PY_EVAL_EXPLICIT_MERGE_BIT, 1); + + PyMutex_Unlock(&bucket->mutex); +} + +static void +merge_queued_objects(_PyObjectStack *to_merge) +{ + PyObject *ob; + while ((ob = _PyObjectStack_Pop(to_merge)) != NULL) { + // Subtract one when merging because the queue had a reference. + Py_ssize_t refcount = _Py_ExplicitMergeRefcount(ob, -1); + if (refcount == 0) { + _Py_Dealloc(ob); + } + } +} + +// Process this thread's queue of objects to merge. +void +_Py_brc_merge_refcounts(PyThreadState *tstate) +{ + struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc; + struct _brc_bucket *bucket = get_bucket(tstate->interp, brc->tid); + + // Append all objects into a local stack. We don't want to hold the lock + // while calling destructors. + PyMutex_Lock(&bucket->mutex); + _PyObjectStack_Merge(&brc->local_objects_to_merge, &brc->objects_to_merge); + PyMutex_Unlock(&bucket->mutex); + + // Process the local stack until it's empty + merge_queued_objects(&brc->local_objects_to_merge); +} + +void +_Py_brc_init_state(PyInterpreterState *interp) +{ + struct _brc_state *brc = &interp->brc; + for (Py_ssize_t i = 0; i < _Py_BRC_NUM_BUCKETS; i++) { + llist_init(&brc->table[i].root); + } +} + +void +_Py_brc_init_thread(PyThreadState *tstate) +{ + struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc; + brc->tid = _Py_ThreadId(); + + // Add ourself to the hashtable + struct _brc_bucket *bucket = get_bucket(tstate->interp, brc->tid); + PyMutex_Lock(&bucket->mutex); + llist_insert_tail(&bucket->root, &brc->bucket_node); + PyMutex_Unlock(&bucket->mutex); +} + +void +_Py_brc_remove_thread(PyThreadState *tstate) +{ + struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc; + struct _brc_bucket *bucket = get_bucket(tstate->interp, brc->tid); + + // We need to fully process any objects to merge before removing ourself + // from the hashtable. It is not safe to perform any refcount operations + // after we are removed. After that point, other threads treat our objects + // as abandoned and may merge the objects' refcounts directly. + bool empty = false; + while (!empty) { + // Process the local stack until it's empty + merge_queued_objects(&brc->local_objects_to_merge); + + PyMutex_Lock(&bucket->mutex); + empty = (brc->objects_to_merge.head == NULL); + if (empty) { + llist_remove(&brc->bucket_node); + } + else { + _PyObjectStack_Merge(&brc->local_objects_to_merge, + &brc->objects_to_merge); + } + PyMutex_Unlock(&bucket->mutex); + } + + assert(brc->local_objects_to_merge.head == NULL); + assert(brc->objects_to_merge.head == NULL); +} + +void +_Py_brc_after_fork(PyInterpreterState *interp) +{ + // Unlock all bucket mutexes. Some of the buckets may be locked because + // locks can be handed off to a parked thread (see lock.c). We don't have + // to worry about consistency here, becuase no thread can be actively + // modifying a bucket, but it might be paused (not yet woken up) on a + // PyMutex_Lock while holding that lock. + for (Py_ssize_t i = 0; i < _Py_BRC_NUM_BUCKETS; i++) { + _PyMutex_at_fork_reinit(&interp->brc.table[i].mutex); + } +} + +#endif /* Py_GIL_DISABLED */ diff --git a/Python/ceval_gil.c b/Python/ceval_gil.c index ad90359318761a..deb9741291fca7 100644 --- a/Python/ceval_gil.c +++ b/Python/ceval_gil.c @@ -980,6 +980,14 @@ _Py_HandlePending(PyThreadState *tstate) } } +#ifdef Py_GIL_DISABLED + /* Objects with refcounts to merge */ + if (_Py_eval_breaker_bit_is_set(interp, _PY_EVAL_EXPLICIT_MERGE_BIT)) { + _Py_set_eval_breaker_bit(interp, _PY_EVAL_EXPLICIT_MERGE_BIT, 0); + _Py_brc_merge_refcounts(tstate); + } +#endif + /* GC scheduled to run */ if (_Py_eval_breaker_bit_is_set(interp, _PY_GC_SCHEDULED_BIT)) { _Py_set_eval_breaker_bit(interp, _PY_GC_SCHEDULED_BIT, 0); diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 8fbcdb15109b76..5d3b097dee93e8 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -1,5 +1,6 @@ // Cyclic garbage collector implementation for free-threaded build. #include "Python.h" +#include "pycore_brc.h" // struct _brc_thread_state #include "pycore_ceval.h" // _Py_set_eval_breaker_bit() #include "pycore_context.h" #include "pycore_dict.h" // _PyDict_MaybeUntrack() @@ -152,8 +153,7 @@ gc_decref(PyObject *op) op->ob_tid -= 1; } -// Merge refcounts while the world is stopped. -static void +static Py_ssize_t merge_refcount(PyObject *op, Py_ssize_t extra) { assert(_PyInterpreterState_GET()->stoptheworld.world_stopped); @@ -169,6 +169,7 @@ merge_refcount(PyObject *op, Py_ssize_t extra) op->ob_tid = 0; op->ob_ref_local = 0; op->ob_ref_shared = _Py_REF_SHARED(refcount, _Py_REF_MERGED); + return refcount; } static void @@ -282,6 +283,41 @@ gc_visit_heaps(PyInterpreterState *interp, mi_block_visit_fun *visitor, return err; } +static void +merge_queued_objects(_PyThreadStateImpl *tstate, struct collection_state *state) +{ + struct _brc_thread_state *brc = &tstate->brc; + _PyObjectStack_Merge(&brc->local_objects_to_merge, &brc->objects_to_merge); + + PyObject *op; + while ((op = _PyObjectStack_Pop(&brc->local_objects_to_merge)) != NULL) { + // Subtract one when merging because the queue had a reference. + Py_ssize_t refcount = merge_refcount(op, -1); + + if (!_PyObject_GC_IS_TRACKED(op) && refcount == 0) { + // GC objects with zero refcount are handled subsequently by the + // GC as if they were cyclic trash, but we have to handle dead + // non-GC objects here. Add one to the refcount so that we can + // decref and deallocate the object once we start the world again. + op->ob_ref_shared += (1 << _Py_REF_SHARED_SHIFT); +#ifdef Py_REF_DEBUG + _Py_IncRefTotal(_PyInterpreterState_GET()); +#endif + worklist_push(&state->objs_to_decref, op); + } + } +} + +static void +merge_all_queued_objects(PyInterpreterState *interp, struct collection_state *state) +{ + HEAD_LOCK(&_PyRuntime); + for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) { + merge_queued_objects((_PyThreadStateImpl *)p, state); + } + HEAD_UNLOCK(&_PyRuntime); +} + // Subtract an incoming reference from the computed "gc_refs" refcount. static int visit_decref(PyObject *op, void *arg) @@ -927,6 +963,9 @@ static void gc_collect_internal(PyInterpreterState *interp, struct collection_state *state) { _PyEval_StopTheWorld(interp); + // merge refcounts for all queued objects + merge_all_queued_objects(interp, state); + // Find unreachable objects int err = deduce_unreachable_heap(interp, state); if (err < 0) { @@ -946,6 +985,9 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state) clear_weakrefs(state); _PyEval_StartTheWorld(interp); + // Deallocate any object from the refcount merge step + cleanup_worklist(&state->objs_to_decref); + // Call weakref callbacks and finalizers after unpausing other threads to // avoid potential deadlocks. call_weakref_callbacks(state); diff --git a/Python/object_stack.c b/Python/object_stack.c index 8544892eb71dcb..ced4460da00f44 100644 --- a/Python/object_stack.c +++ b/Python/object_stack.c @@ -67,6 +67,27 @@ _PyObjectStack_Clear(_PyObjectStack *queue) } } +void +_PyObjectStack_Merge(_PyObjectStack *dst, _PyObjectStack *src) +{ + if (src->head == NULL) { + return; + } + + if (dst->head != NULL) { + // First, append dst to the bottom of src + _PyObjectStackChunk *last = src->head; + while (last->prev != NULL) { + last = last->prev; + } + last->prev = dst->head; + } + + // Now that src has all the chunks, set dst to src + dst->head = src->head; + src->head = NULL; +} + void _PyObjectStackChunk_ClearFreeList(_PyFreeListState *free_lists, int is_finalization) { diff --git a/Python/pystate.c b/Python/pystate.c index e77e5bfa7e2df8..6cd034743ddf4c 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -611,6 +611,9 @@ init_interpreter(PyInterpreterState *interp, _PyGC_InitState(&interp->gc); PyConfig_InitPythonConfig(&interp->config); _PyType_InitCache(interp); +#ifdef Py_GIL_DISABLED + _Py_brc_init_state(interp); +#endif for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) { interp->monitors.tools[i] = 0; } @@ -1336,6 +1339,11 @@ init_threadstate(_PyThreadStateImpl *_tstate, tstate->datastack_limit = NULL; tstate->what_event = -1; +#ifdef Py_GIL_DISABLED + // Initialize biased reference counting inter-thread queue + _Py_brc_init_thread(tstate); +#endif + if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) { // Start in the suspended state if there is an ongoing stop-the-world. tstate->state = _Py_THREAD_SUSPENDED; @@ -1561,6 +1569,9 @@ PyThreadState_Clear(PyThreadState *tstate) _PyFreeListState *freelist_state = &((_PyThreadStateImpl*)tstate)->freelist_state; _Py_ClearFreeLists(freelist_state, 1); _PySlice_ClearCache(freelist_state); + + // Remove ourself from the biased reference counting table of threads. + _Py_brc_remove_thread(tstate); #endif _PyThreadState_ClearMimallocHeaps(tstate); From 564385612cdf72c2fa8e629a68225fb2cd3b3d99 Mon Sep 17 00:00:00 2001 From: dave-shawley Date: Fri, 9 Feb 2024 17:11:37 -0500 Subject: [PATCH 10/11] gh-115165: Fix `typing.Annotated` for immutable types (#115213) The return value from an annotated callable can raise any exception from __setattr__ for the `__orig_class__` property. --- Lib/test/test_typing.py | 21 +++++++++++++++++++ Lib/typing.py | 4 +++- ...-02-09-07-20-16.gh-issue-115165.yfJLXA.rst | 4 ++++ 3 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py index 58566c4bfc821c..c3a092f3af3009 100644 --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -4323,6 +4323,16 @@ class C(B[int]): c.bar = 'abc' self.assertEqual(c.__dict__, {'bar': 'abc'}) + def test_setattr_exceptions(self): + class Immutable[T]: + def __setattr__(self, key, value): + raise RuntimeError("immutable") + + # gh-115165: This used to cause RuntimeError to be raised + # when we tried to set `__orig_class__` on the `Immutable` instance + # returned by the `Immutable[int]()` call + self.assertIsInstance(Immutable[int](), Immutable) + def test_subscripted_generics_as_proxies(self): T = TypeVar('T') class C(Generic[T]): @@ -8561,6 +8571,17 @@ def test_instantiate_generic(self): self.assertEqual(MyCount([4, 4, 5]), {4: 2, 5: 1}) self.assertEqual(MyCount[int]([4, 4, 5]), {4: 2, 5: 1}) + def test_instantiate_immutable(self): + class C: + def __setattr__(self, key, value): + raise Exception("should be ignored") + + A = Annotated[C, "a decoration"] + # gh-115165: This used to cause RuntimeError to be raised + # when we tried to set `__orig_class__` on the `C` instance + # returned by the `A()` call + self.assertIsInstance(A(), C) + def test_cannot_instantiate_forward(self): A = Annotated["int", (5, 6)] with self.assertRaises(TypeError): diff --git a/Lib/typing.py b/Lib/typing.py index 347373f00956c7..914ddeaf504cd0 100644 --- a/Lib/typing.py +++ b/Lib/typing.py @@ -1127,7 +1127,9 @@ def __call__(self, *args, **kwargs): result = self.__origin__(*args, **kwargs) try: result.__orig_class__ = self - except AttributeError: + # Some objects raise TypeError (or something even more exotic) + # if you try to set attributes on them; we guard against that here + except Exception: pass return result diff --git a/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst b/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst new file mode 100644 index 00000000000000..73d3d001f07f3f --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst @@ -0,0 +1,4 @@ +Most exceptions are now ignored when attempting to set the ``__orig_class__`` +attribute on objects returned when calling :mod:`typing` generic aliases +(including generic aliases created using :data:`typing.Annotated`). +Previously only :exc:`AttributeError`` was ignored. Patch by Dave Shawley. From d4d5bae1471788b345155e8e93a2fe4ab92d09dc Mon Sep 17 00:00:00 2001 From: Donghee Na Date: Sat, 10 Feb 2024 09:57:04 +0900 Subject: [PATCH 11/11] gh-111968: Refactor _PyXXX_Fini to integrate with _PyObject_ClearFreeLists (gh-114899) --- Include/internal/pycore_context.h | 1 - Include/internal/pycore_floatobject.h | 1 - Include/internal/pycore_freelist.h | 10 ++++++++++ Include/internal/pycore_gc.h | 8 -------- Include/internal/pycore_genobject.h | 4 ---- Include/internal/pycore_list.h | 6 ------ Include/internal/pycore_object_stack.h | 3 --- Include/internal/pycore_sliceobject.h | 2 -- Include/internal/pycore_tuple.h | 1 - Objects/floatobject.c | 10 ---------- Objects/genobject.c | 11 ----------- Objects/listobject.c | 10 ---------- Objects/object.c | 15 +++++++++++++++ Objects/sliceobject.c | 12 ++++-------- Objects/tupleobject.c | 5 ----- Python/context.c | 11 ----------- Python/gc_free_threading.c | 2 +- Python/gc_gil.c | 2 +- Python/pylifecycle.c | 12 +++++------- Python/pystate.c | 19 ++----------------- 20 files changed, 38 insertions(+), 107 deletions(-) diff --git a/Include/internal/pycore_context.h b/Include/internal/pycore_context.h index 3284efba2b6f4c..ae5c47f195eb7f 100644 --- a/Include/internal/pycore_context.h +++ b/Include/internal/pycore_context.h @@ -14,7 +14,6 @@ extern PyTypeObject _PyContextTokenMissing_Type; /* runtime lifecycle */ PyStatus _PyContext_Init(PyInterpreterState *); -void _PyContext_Fini(_PyFreeListState *); /* other API */ diff --git a/Include/internal/pycore_floatobject.h b/Include/internal/pycore_floatobject.h index 038578e1f9680a..3767df5506d43f 100644 --- a/Include/internal/pycore_floatobject.h +++ b/Include/internal/pycore_floatobject.h @@ -15,7 +15,6 @@ extern "C" { extern void _PyFloat_InitState(PyInterpreterState *); extern PyStatus _PyFloat_InitTypes(PyInterpreterState *); -extern void _PyFloat_Fini(_PyFreeListState *); extern void _PyFloat_FiniType(PyInterpreterState *); diff --git a/Include/internal/pycore_freelist.h b/Include/internal/pycore_freelist.h index 82a42300991ecc..1bc551914794f0 100644 --- a/Include/internal/pycore_freelist.h +++ b/Include/internal/pycore_freelist.h @@ -125,6 +125,16 @@ typedef struct _Py_freelist_state { struct _Py_object_stack_state object_stacks; } _PyFreeListState; +extern void _PyObject_ClearFreeLists(_PyFreeListState *state, int is_finalization); +extern void _PyTuple_ClearFreeList(_PyFreeListState *state, int is_finalization); +extern void _PyFloat_ClearFreeList(_PyFreeListState *state, int is_finalization); +extern void _PyList_ClearFreeList(_PyFreeListState *state, int is_finalization); +extern void _PySlice_ClearFreeList(_PyFreeListState *state, int is_finalization); +extern void _PyDict_ClearFreeList(_PyFreeListState *state, int is_finalization); +extern void _PyAsyncGen_ClearFreeLists(_PyFreeListState *state, int is_finalization); +extern void _PyContext_ClearFreeList(_PyFreeListState *state, int is_finalization); +extern void _PyObjectStackChunk_ClearFreeList(_PyFreeListState *state, int is_finalization); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_gc.h b/Include/internal/pycore_gc.h index 8d0bc2a218e48d..582a16bf5218ce 100644 --- a/Include/internal/pycore_gc.h +++ b/Include/internal/pycore_gc.h @@ -279,14 +279,6 @@ extern PyObject *_PyGC_GetReferrers(PyInterpreterState *interp, PyObject *objs); // Functions to clear types free lists extern void _PyGC_ClearAllFreeLists(PyInterpreterState *interp); -extern void _Py_ClearFreeLists(_PyFreeListState *state, int is_finalization); -extern void _PyTuple_ClearFreeList(_PyFreeListState *state, int is_finalization); -extern void _PyFloat_ClearFreeList(_PyFreeListState *state, int is_finalization); -extern void _PyList_ClearFreeList(_PyFreeListState *state, int is_finalization); -extern void _PySlice_ClearCache(_PyFreeListState *state); -extern void _PyDict_ClearFreeList(_PyFreeListState *state, int is_finalization); -extern void _PyAsyncGen_ClearFreeLists(_PyFreeListState *state, int is_finalization); -extern void _PyContext_ClearFreeList(_PyFreeListState *state, int is_finalization); extern void _Py_ScheduleGC(PyInterpreterState *interp); extern void _Py_RunGC(PyThreadState *tstate); diff --git a/Include/internal/pycore_genobject.h b/Include/internal/pycore_genobject.h index 5ad63658051e86..b2aa017598409f 100644 --- a/Include/internal/pycore_genobject.h +++ b/Include/internal/pycore_genobject.h @@ -26,10 +26,6 @@ extern PyTypeObject _PyCoroWrapper_Type; extern PyTypeObject _PyAsyncGenWrappedValue_Type; extern PyTypeObject _PyAsyncGenAThrow_Type; -/* runtime lifecycle */ - -extern void _PyAsyncGen_Fini(_PyFreeListState *); - #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_list.h b/Include/internal/pycore_list.h index 4536f90e414493..50dc13c4da4487 100644 --- a/Include/internal/pycore_list.h +++ b/Include/internal/pycore_list.h @@ -13,12 +13,6 @@ extern "C" { extern PyObject* _PyList_Extend(PyListObject *, PyObject *); extern void _PyList_DebugMallocStats(FILE *out); - -/* runtime lifecycle */ - -extern void _PyList_Fini(_PyFreeListState *); - - #define _PyList_ITEMS(op) _Py_RVALUE(_PyList_CAST(op)->ob_item) extern int diff --git a/Include/internal/pycore_object_stack.h b/Include/internal/pycore_object_stack.h index d042be2a98090a..fc130b1e9920b4 100644 --- a/Include/internal/pycore_object_stack.h +++ b/Include/internal/pycore_object_stack.h @@ -34,9 +34,6 @@ _PyObjectStackChunk_New(void); extern void _PyObjectStackChunk_Free(_PyObjectStackChunk *); -extern void -_PyObjectStackChunk_ClearFreeList(_PyFreeListState *state, int is_finalization); - // Push an item onto the stack. Return -1 on allocation failure, 0 on success. static inline int _PyObjectStack_Push(_PyObjectStack *stack, PyObject *obj) diff --git a/Include/internal/pycore_sliceobject.h b/Include/internal/pycore_sliceobject.h index 0c72d3ee6225c5..89086f67683a2f 100644 --- a/Include/internal/pycore_sliceobject.h +++ b/Include/internal/pycore_sliceobject.h @@ -11,8 +11,6 @@ extern "C" { /* runtime lifecycle */ -extern void _PySlice_Fini(_PyFreeListState *); - extern PyObject * _PyBuildSlice_ConsumeRefs(PyObject *start, PyObject *stop); diff --git a/Include/internal/pycore_tuple.h b/Include/internal/pycore_tuple.h index b348339a505b0f..4605f355ccbc38 100644 --- a/Include/internal/pycore_tuple.h +++ b/Include/internal/pycore_tuple.h @@ -14,7 +14,6 @@ extern void _PyTuple_DebugMallocStats(FILE *out); /* runtime lifecycle */ extern PyStatus _PyTuple_InitGlobalObjects(PyInterpreterState *); -extern void _PyTuple_Fini(_PyFreeListState *); /* other API */ diff --git a/Objects/floatobject.c b/Objects/floatobject.c index c440e0dab0e79f..9b322c52d4daea 100644 --- a/Objects/floatobject.c +++ b/Objects/floatobject.c @@ -2010,16 +2010,6 @@ _PyFloat_ClearFreeList(_PyFreeListState *freelist_state, int is_finalization) #endif } -void -_PyFloat_Fini(_PyFreeListState *state) -{ - // With Py_GIL_DISABLED: - // the freelists for the current thread state have already been cleared. -#ifndef Py_GIL_DISABLED - _PyFloat_ClearFreeList(state, 1); -#endif -} - void _PyFloat_FiniType(PyInterpreterState *interp) { diff --git a/Objects/genobject.c b/Objects/genobject.c index ab523e46cceaa3..59ab7abf6180bd 100644 --- a/Objects/genobject.c +++ b/Objects/genobject.c @@ -1682,17 +1682,6 @@ _PyAsyncGen_ClearFreeLists(_PyFreeListState *freelist_state, int is_finalization #endif } -void -_PyAsyncGen_Fini(_PyFreeListState *state) -{ - // With Py_GIL_DISABLED: - // the freelists for the current thread state have already been cleared. -#ifndef Py_GIL_DISABLED - _PyAsyncGen_ClearFreeLists(state, 1); -#endif -} - - static PyObject * async_gen_unwrap_value(PyAsyncGenObject *gen, PyObject *result) { diff --git a/Objects/listobject.c b/Objects/listobject.c index 307b8f1bd76cac..7fdb91eab890b5 100644 --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -135,16 +135,6 @@ _PyList_ClearFreeList(_PyFreeListState *freelist_state, int is_finalization) #endif } -void -_PyList_Fini(_PyFreeListState *state) -{ - // With Py_GIL_DISABLED: - // the freelists for the current thread state have already been cleared. -#ifndef Py_GIL_DISABLED - _PyList_ClearFreeList(state, 1); -#endif -} - /* Print summary info about the state of the optimized allocator */ void _PyList_DebugMallocStats(FILE *out) diff --git a/Objects/object.c b/Objects/object.c index 61e6131c6e99bb..275aa6713c8c21 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -793,6 +793,21 @@ PyObject_Bytes(PyObject *v) return PyBytes_FromObject(v); } +void +_PyObject_ClearFreeLists(_PyFreeListState *state, int is_finalization) +{ + // In the free-threaded build, freelists are per-PyThreadState and cleared in PyThreadState_Clear() + // In the default build, freelists are per-interpreter and cleared in finalize_interp_types() + _PyFloat_ClearFreeList(state, is_finalization); + _PyTuple_ClearFreeList(state, is_finalization); + _PyList_ClearFreeList(state, is_finalization); + _PyDict_ClearFreeList(state, is_finalization); + _PyContext_ClearFreeList(state, is_finalization); + _PyAsyncGen_ClearFreeLists(state, is_finalization); + // Only be cleared if is_finalization is true. + _PyObjectStackChunk_ClearFreeList(state, is_finalization); + _PySlice_ClearFreeList(state, is_finalization); +} /* def _PyObject_FunctionStr(x): diff --git a/Objects/sliceobject.c b/Objects/sliceobject.c index 8b9d6bbfd858b7..9880c123c80f95 100644 --- a/Objects/sliceobject.c +++ b/Objects/sliceobject.c @@ -103,8 +103,11 @@ PyObject _Py_EllipsisObject = _PyObject_HEAD_INIT(&PyEllipsis_Type); /* Slice object implementation */ -void _PySlice_ClearCache(_PyFreeListState *state) +void _PySlice_ClearFreeList(_PyFreeListState *state, int is_finalization) { + if (!is_finalization) { + return; + } #ifdef WITH_FREELISTS PySliceObject *obj = state->slices.slice_cache; if (obj != NULL) { @@ -114,13 +117,6 @@ void _PySlice_ClearCache(_PyFreeListState *state) #endif } -void _PySlice_Fini(_PyFreeListState *state) -{ -#ifdef WITH_FREELISTS - _PySlice_ClearCache(state); -#endif -} - /* start, stop, and step are python objects with None indicating no index is present. */ diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c index b9bf6cd48f6129..7d73c3fb0f7f2c 100644 --- a/Objects/tupleobject.c +++ b/Objects/tupleobject.c @@ -964,11 +964,6 @@ _PyTuple_Resize(PyObject **pv, Py_ssize_t newsize) static void maybe_freelist_clear(_PyFreeListState *, int); -void -_PyTuple_Fini(_PyFreeListState *state) -{ - maybe_freelist_clear(state, 1); -} void _PyTuple_ClearFreeList(_PyFreeListState *state, int is_finalization) diff --git a/Python/context.c b/Python/context.c index 793dfa2b72c7e3..e44fef705c36e0 100644 --- a/Python/context.c +++ b/Python/context.c @@ -1284,17 +1284,6 @@ _PyContext_ClearFreeList(_PyFreeListState *freelist_state, int is_finalization) } -void -_PyContext_Fini(_PyFreeListState *state) -{ - // With Py_GIL_DISABLED: - // the freelists for the current thread state have already been cleared. -#ifndef Py_GIL_DISABLED - _PyContext_ClearFreeList(state, 1); -#endif -} - - PyStatus _PyContext_Init(PyInterpreterState *interp) { diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 5d3b097dee93e8..93e1168002b6f7 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -1721,7 +1721,7 @@ _PyGC_ClearAllFreeLists(PyInterpreterState *interp) HEAD_LOCK(&_PyRuntime); _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)interp->threads.head; while (tstate != NULL) { - _Py_ClearFreeLists(&tstate->freelist_state, 0); + _PyObject_ClearFreeLists(&tstate->freelist_state, 0); tstate = (_PyThreadStateImpl *)tstate->base.next; } HEAD_UNLOCK(&_PyRuntime); diff --git a/Python/gc_gil.c b/Python/gc_gil.c index 4e2aa8f7af746c..5f1365f509deb0 100644 --- a/Python/gc_gil.c +++ b/Python/gc_gil.c @@ -11,7 +11,7 @@ void _PyGC_ClearAllFreeLists(PyInterpreterState *interp) { - _Py_ClearFreeLists(&interp->freelist_state, 0); + _PyObject_ClearFreeLists(&interp->freelist_state, 0); } #endif diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index 0cac7109340129..61c9d4f9ea9575 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -1790,16 +1790,14 @@ finalize_interp_types(PyInterpreterState *interp) // a dict internally. _PyUnicode_ClearInterned(interp); - _PyDict_Fini(interp); _PyUnicode_Fini(interp); +#ifndef Py_GIL_DISABLED + // With Py_GIL_DISABLED: + // the freelists for the current thread state have already been cleared. _PyFreeListState *state = _PyFreeListState_GET(); - _PyTuple_Fini(state); - _PyList_Fini(state); - _PyFloat_Fini(state); - _PySlice_Fini(state); - _PyContext_Fini(state); - _PyAsyncGen_Fini(state); + _PyObject_ClearFreeLists(state, 1); +#endif #ifdef Py_DEBUG _PyStaticObjects_CheckRefcnt(interp); diff --git a/Python/pystate.c b/Python/pystate.c index 6cd034743ddf4c..937c43033b068d 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -1468,20 +1468,6 @@ clear_datastack(PyThreadState *tstate) } } -void -_Py_ClearFreeLists(_PyFreeListState *state, int is_finalization) -{ - // In the free-threaded build, freelists are per-PyThreadState and cleared in PyThreadState_Clear() - // In the default build, freelists are per-interpreter and cleared in finalize_interp_types() - _PyFloat_ClearFreeList(state, is_finalization); - _PyTuple_ClearFreeList(state, is_finalization); - _PyList_ClearFreeList(state, is_finalization); - _PyDict_ClearFreeList(state, is_finalization); - _PyContext_ClearFreeList(state, is_finalization); - _PyAsyncGen_ClearFreeLists(state, is_finalization); - _PyObjectStackChunk_ClearFreeList(state, is_finalization); -} - void PyThreadState_Clear(PyThreadState *tstate) { @@ -1566,9 +1552,8 @@ PyThreadState_Clear(PyThreadState *tstate) } #ifdef Py_GIL_DISABLED // Each thread should clear own freelists in free-threading builds. - _PyFreeListState *freelist_state = &((_PyThreadStateImpl*)tstate)->freelist_state; - _Py_ClearFreeLists(freelist_state, 1); - _PySlice_ClearCache(freelist_state); + _PyFreeListState *freelist_state = _PyFreeListState_GET(); + _PyObject_ClearFreeLists(freelist_state, 1); // Remove ourself from the biased reference counting table of threads. _Py_brc_remove_thread(tstate);