diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
commit | e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch) | |
tree | 64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/tools/python3/src/Modules/gcmodule.c | |
parent | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff) | |
download | ydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/tools/python3/src/Modules/gcmodule.c')
-rw-r--r-- | contrib/tools/python3/src/Modules/gcmodule.c | 2264 |
1 files changed, 1132 insertions, 1132 deletions
diff --git a/contrib/tools/python3/src/Modules/gcmodule.c b/contrib/tools/python3/src/Modules/gcmodule.c index 2f80ff3189..3cf1a00b00 100644 --- a/contrib/tools/python3/src/Modules/gcmodule.c +++ b/contrib/tools/python3/src/Modules/gcmodule.c @@ -24,99 +24,99 @@ */ #include "Python.h" -#include "pycore_context.h" -#include "pycore_initconfig.h" -#include "pycore_interp.h" // PyInterpreterState.gc -#include "pycore_object.h" -#include "pycore_pyerrors.h" -#include "pycore_pystate.h" // _PyThreadState_GET() +#include "pycore_context.h" +#include "pycore_initconfig.h" +#include "pycore_interp.h" // PyInterpreterState.gc +#include "pycore_object.h" +#include "pycore_pyerrors.h" +#include "pycore_pystate.h" // _PyThreadState_GET() #include "pydtrace.h" -#include "pytime.h" // _PyTime_GetMonotonicClock() +#include "pytime.h" // _PyTime_GetMonotonicClock() + +typedef struct _gc_runtime_state GCState; -typedef struct _gc_runtime_state GCState; - /*[clinic input] module gc [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/ - -#ifdef Py_DEBUG -# define GC_DEBUG -#endif - -#define GC_NEXT _PyGCHead_NEXT -#define GC_PREV _PyGCHead_PREV - -// update_refs() set this bit for all objects in current generation. -// subtract_refs() and move_unreachable() uses this to distinguish -// visited object is in GCing or not. -// -// move_unreachable() removes this flag from reachable objects. -// Only unreachable objects have this flag. -// -// No objects in interpreter have this flag after GC ends. -#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING - -// Lowest bit of _gc_next is used for UNREACHABLE flag. -// -// This flag represents the object is in unreachable list in move_unreachable() -// -// Although this flag is used only in move_unreachable(), move_unreachable() -// doesn't clear this flag to skip unnecessary iteration. -// move_legacy_finalizers() removes this flag instead. -// Between them, unreachable list is not normal list and we can not use -// most gc_list_* functions for it. -#define NEXT_MASK_UNREACHABLE (1) - + +#ifdef Py_DEBUG +# define GC_DEBUG +#endif + +#define GC_NEXT _PyGCHead_NEXT +#define GC_PREV _PyGCHead_PREV + +// update_refs() set this bit for all objects in current generation. +// subtract_refs() and move_unreachable() uses this to distinguish +// visited object is in GCing or not. +// +// move_unreachable() removes this flag from reachable objects. +// Only unreachable objects have this flag. +// +// No objects in interpreter have this flag after GC ends. +#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING + +// Lowest bit of _gc_next is used for UNREACHABLE flag. +// +// This flag represents the object is in unreachable list in move_unreachable() +// +// Although this flag is used only in move_unreachable(), move_unreachable() +// doesn't clear this flag to skip unnecessary iteration. +// move_legacy_finalizers() removes this flag instead. +// Between them, unreachable list is not normal list and we can not use +// most gc_list_* functions for it. +#define NEXT_MASK_UNREACHABLE (1) + /* Get an object's GC head */ #define AS_GC(o) ((PyGC_Head *)(o)-1) /* Get the object given the GC head */ #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1)) -static inline int -gc_is_collecting(PyGC_Head *g) -{ - return (g->_gc_prev & PREV_MASK_COLLECTING) != 0; -} - -static inline void -gc_clear_collecting(PyGC_Head *g) -{ - g->_gc_prev &= ~PREV_MASK_COLLECTING; -} - -static inline Py_ssize_t -gc_get_refs(PyGC_Head *g) -{ - return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT); -} - -static inline void -gc_set_refs(PyGC_Head *g, Py_ssize_t refs) -{ - g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK) - | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT); -} - -static inline void -gc_reset_refs(PyGC_Head *g, Py_ssize_t refs) -{ - g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED) - | PREV_MASK_COLLECTING - | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT); -} - -static inline void -gc_decref(PyGC_Head *g) -{ - _PyObject_ASSERT_WITH_MSG(FROM_GC(g), - gc_get_refs(g) > 0, - "refcount is too small"); - g->_gc_prev -= 1 << _PyGC_PREV_SHIFT; -} - +static inline int +gc_is_collecting(PyGC_Head *g) +{ + return (g->_gc_prev & PREV_MASK_COLLECTING) != 0; +} + +static inline void +gc_clear_collecting(PyGC_Head *g) +{ + g->_gc_prev &= ~PREV_MASK_COLLECTING; +} + +static inline Py_ssize_t +gc_get_refs(PyGC_Head *g) +{ + return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT); +} + +static inline void +gc_set_refs(PyGC_Head *g, Py_ssize_t refs) +{ + g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK) + | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT); +} + +static inline void +gc_reset_refs(PyGC_Head *g, Py_ssize_t refs) +{ + g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED) + | PREV_MASK_COLLECTING + | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT); +} + +static inline void +gc_decref(PyGC_Head *g) +{ + _PyObject_ASSERT_WITH_MSG(FROM_GC(g), + gc_get_refs(g) > 0, + "refcount is too small"); + g->_gc_prev -= 1 << _PyGC_PREV_SHIFT; +} + /* set for debugging information */ #define DEBUG_STATS (1<<0) /* print collection statistics */ #define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */ @@ -126,140 +126,140 @@ gc_decref(PyGC_Head *g) DEBUG_UNCOLLECTABLE | \ DEBUG_SAVEALL -#define GEN_HEAD(gcstate, n) (&(gcstate)->generations[n].head) +#define GEN_HEAD(gcstate, n) (&(gcstate)->generations[n].head) void -_PyGC_InitState(GCState *gcstate) +_PyGC_InitState(GCState *gcstate) { - gcstate->enabled = 1; /* automatic collection enabled? */ + gcstate->enabled = 1; /* automatic collection enabled? */ -#define _GEN_HEAD(n) GEN_HEAD(gcstate, n) +#define _GEN_HEAD(n) GEN_HEAD(gcstate, n) struct gc_generation generations[NUM_GENERATIONS] = { - /* PyGC_Head, threshold, count */ - {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0}, - {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0}, - {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0}, + /* PyGC_Head, threshold, count */ + {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0}, + {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0}, + {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0}, }; for (int i = 0; i < NUM_GENERATIONS; i++) { - gcstate->generations[i] = generations[i]; + gcstate->generations[i] = generations[i]; }; - gcstate->generation0 = GEN_HEAD(gcstate, 0); + gcstate->generation0 = GEN_HEAD(gcstate, 0); struct gc_generation permanent_generation = { - {(uintptr_t)&gcstate->permanent_generation.head, - (uintptr_t)&gcstate->permanent_generation.head}, 0, 0 + {(uintptr_t)&gcstate->permanent_generation.head, + (uintptr_t)&gcstate->permanent_generation.head}, 0, 0 }; - gcstate->permanent_generation = permanent_generation; -} - - -PyStatus -_PyGC_Init(PyThreadState *tstate) -{ - GCState *gcstate = &tstate->interp->gc; - if (gcstate->garbage == NULL) { - gcstate->garbage = PyList_New(0); - if (gcstate->garbage == NULL) { - return _PyStatus_NO_MEMORY(); - } - } - return _PyStatus_OK(); -} - - -/* -_gc_prev values ---------------- - -Between collections, _gc_prev is used for doubly linked list. - -Lowest two bits of _gc_prev are used for flags. -PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends -or _PyObject_GC_UNTRACK() is called. - -During a collection, _gc_prev is temporary used for gc_refs, and the gc list -is singly linked until _gc_prev is restored. - -gc_refs + gcstate->permanent_generation = permanent_generation; +} + + +PyStatus +_PyGC_Init(PyThreadState *tstate) +{ + GCState *gcstate = &tstate->interp->gc; + if (gcstate->garbage == NULL) { + gcstate->garbage = PyList_New(0); + if (gcstate->garbage == NULL) { + return _PyStatus_NO_MEMORY(); + } + } + return _PyStatus_OK(); +} + + +/* +_gc_prev values +--------------- + +Between collections, _gc_prev is used for doubly linked list. + +Lowest two bits of _gc_prev are used for flags. +PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends +or _PyObject_GC_UNTRACK() is called. + +During a collection, _gc_prev is temporary used for gc_refs, and the gc list +is singly linked until _gc_prev is restored. + +gc_refs At the start of a collection, update_refs() copies the true refcount to gc_refs, for each object in the generation being collected. subtract_refs() then adjusts gc_refs so that it equals the number of times an object is referenced directly from outside the generation being collected. -PREV_MASK_COLLECTING - Objects in generation being collected are marked PREV_MASK_COLLECTING in - update_refs(). - - -_gc_next values ---------------- - -_gc_next takes these values: - -0 - The object is not tracked - -!= 0 - Pointer to the next object in the GC list. - Additionally, lowest bit is used temporary for - NEXT_MASK_UNREACHABLE flag described below. - -NEXT_MASK_UNREACHABLE +PREV_MASK_COLLECTING + Objects in generation being collected are marked PREV_MASK_COLLECTING in + update_refs(). + + +_gc_next values +--------------- + +_gc_next takes these values: + +0 + The object is not tracked + +!= 0 + Pointer to the next object in the GC list. + Additionally, lowest bit is used temporary for + NEXT_MASK_UNREACHABLE flag described below. + +NEXT_MASK_UNREACHABLE move_unreachable() then moves objects not reachable (whether directly or - indirectly) from outside the generation into an "unreachable" set and - set this flag. - - Objects that are found to be reachable have gc_refs set to 1. - When this flag is set for the reachable object, the object must be in - "unreachable" set. - The flag is unset and the object is moved back to "reachable" set. - - move_legacy_finalizers() will remove this flag from "unreachable" set. + indirectly) from outside the generation into an "unreachable" set and + set this flag. + + Objects that are found to be reachable have gc_refs set to 1. + When this flag is set for the reachable object, the object must be in + "unreachable" set. + The flag is unset and the object is moved back to "reachable" set. + + move_legacy_finalizers() will remove this flag from "unreachable" set. */ /*** list functions ***/ -static inline void +static inline void gc_list_init(PyGC_Head *list) { - // List header must not have flags. - // We can assign pointer by simple cast. - list->_gc_prev = (uintptr_t)list; - list->_gc_next = (uintptr_t)list; + // List header must not have flags. + // We can assign pointer by simple cast. + list->_gc_prev = (uintptr_t)list; + list->_gc_next = (uintptr_t)list; } -static inline int +static inline int gc_list_is_empty(PyGC_Head *list) { - return (list->_gc_next == (uintptr_t)list); + return (list->_gc_next == (uintptr_t)list); } /* Append `node` to `list`. */ -static inline void +static inline void gc_list_append(PyGC_Head *node, PyGC_Head *list) { - PyGC_Head *last = (PyGC_Head *)list->_gc_prev; - - // last <-> node - _PyGCHead_SET_PREV(node, last); - _PyGCHead_SET_NEXT(last, node); - - // node <-> list - _PyGCHead_SET_NEXT(node, list); - list->_gc_prev = (uintptr_t)node; + PyGC_Head *last = (PyGC_Head *)list->_gc_prev; + + // last <-> node + _PyGCHead_SET_PREV(node, last); + _PyGCHead_SET_NEXT(last, node); + + // node <-> list + _PyGCHead_SET_NEXT(node, list); + list->_gc_prev = (uintptr_t)node; } /* Remove `node` from the gc list it's currently in. */ -static inline void +static inline void gc_list_remove(PyGC_Head *node) { - PyGC_Head *prev = GC_PREV(node); - PyGC_Head *next = GC_NEXT(node); - - _PyGCHead_SET_NEXT(prev, next); - _PyGCHead_SET_PREV(next, prev); - - node->_gc_next = 0; /* object is not currently tracked */ + PyGC_Head *prev = GC_PREV(node); + PyGC_Head *next = GC_NEXT(node); + + _PyGCHead_SET_NEXT(prev, next); + _PyGCHead_SET_PREV(next, prev); + + node->_gc_next = 0; /* object is not currently tracked */ } /* Move `node` from the gc list it's currently in (which is not explicitly @@ -270,18 +270,18 @@ static void gc_list_move(PyGC_Head *node, PyGC_Head *list) { /* Unlink from current list. */ - PyGC_Head *from_prev = GC_PREV(node); - PyGC_Head *from_next = GC_NEXT(node); - _PyGCHead_SET_NEXT(from_prev, from_next); - _PyGCHead_SET_PREV(from_next, from_prev); - + PyGC_Head *from_prev = GC_PREV(node); + PyGC_Head *from_next = GC_NEXT(node); + _PyGCHead_SET_NEXT(from_prev, from_next); + _PyGCHead_SET_PREV(from_next, from_prev); + /* Relink at end of new list. */ - // list must not have flags. So we can skip macros. - PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev; - _PyGCHead_SET_PREV(node, to_prev); - _PyGCHead_SET_NEXT(to_prev, node); - list->_gc_prev = (uintptr_t)node; - _PyGCHead_SET_NEXT(node, list); + // list must not have flags. So we can skip macros. + PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev; + _PyGCHead_SET_PREV(node, to_prev); + _PyGCHead_SET_NEXT(to_prev, node); + list->_gc_prev = (uintptr_t)node; + _PyGCHead_SET_NEXT(node, list); } /* append list `from` onto list `to`; `from` becomes an empty list */ @@ -290,17 +290,17 @@ gc_list_merge(PyGC_Head *from, PyGC_Head *to) { assert(from != to); if (!gc_list_is_empty(from)) { - PyGC_Head *to_tail = GC_PREV(to); - PyGC_Head *from_head = GC_NEXT(from); - PyGC_Head *from_tail = GC_PREV(from); - assert(from_head != from); - assert(from_tail != from); - - _PyGCHead_SET_NEXT(to_tail, from_head); - _PyGCHead_SET_PREV(from_head, to_tail); - - _PyGCHead_SET_NEXT(from_tail, to); - _PyGCHead_SET_PREV(to, from_tail); + PyGC_Head *to_tail = GC_PREV(to); + PyGC_Head *from_head = GC_NEXT(from); + PyGC_Head *from_tail = GC_PREV(from); + assert(from_head != from); + assert(from_tail != from); + + _PyGCHead_SET_NEXT(to_tail, from_head); + _PyGCHead_SET_PREV(from_head, to_tail); + + _PyGCHead_SET_NEXT(from_tail, to); + _PyGCHead_SET_PREV(to, from_tail); } gc_list_init(from); } @@ -310,30 +310,30 @@ gc_list_size(PyGC_Head *list) { PyGC_Head *gc; Py_ssize_t n = 0; - for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { + for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { n++; } return n; } -/* Walk the list and mark all objects as non-collecting */ -static inline void -gc_list_clear_collecting(PyGC_Head *collectable) -{ - PyGC_Head *gc; - for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) { - gc_clear_collecting(gc); - } -} - +/* Walk the list and mark all objects as non-collecting */ +static inline void +gc_list_clear_collecting(PyGC_Head *collectable) +{ + PyGC_Head *gc; + for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) { + gc_clear_collecting(gc); + } +} + /* Append objects in a GC list to a Python list. - * Return 0 if all OK, < 0 if error (out of memory for list) + * Return 0 if all OK, < 0 if error (out of memory for list) */ static int append_objects(PyObject *py_list, PyGC_Head *gc_list) { PyGC_Head *gc; - for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) { + for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) { PyObject *op = FROM_GC(gc); if (op != py_list) { if (PyList_Append(py_list, op)) { @@ -344,74 +344,74 @@ append_objects(PyObject *py_list, PyGC_Head *gc_list) return 0; } -// Constants for validate_list's flags argument. -enum flagstates {collecting_clear_unreachable_clear, - collecting_clear_unreachable_set, - collecting_set_unreachable_clear, - collecting_set_unreachable_set}; - -#ifdef GC_DEBUG -// validate_list checks list consistency. And it works as document -// describing when flags are expected to be set / unset. -// `head` must be a doubly-linked gc list, although it's fine (expected!) if -// the prev and next pointers are "polluted" with flags. -// What's checked: -// - The `head` pointers are not polluted. -// - The objects' PREV_MASK_COLLECTING and NEXT_MASK_UNREACHABLE flags are all -// `set or clear, as specified by the 'flags' argument. -// - The prev and next pointers are mutually consistent. -static void -validate_list(PyGC_Head *head, enum flagstates flags) -{ - assert((head->_gc_prev & PREV_MASK_COLLECTING) == 0); - assert((head->_gc_next & NEXT_MASK_UNREACHABLE) == 0); - uintptr_t prev_value = 0, next_value = 0; - switch (flags) { - case collecting_clear_unreachable_clear: - break; - case collecting_set_unreachable_clear: - prev_value = PREV_MASK_COLLECTING; - break; - case collecting_clear_unreachable_set: - next_value = NEXT_MASK_UNREACHABLE; - break; - case collecting_set_unreachable_set: - prev_value = PREV_MASK_COLLECTING; - next_value = NEXT_MASK_UNREACHABLE; - break; - default: - assert(! "bad internal flags argument"); - } - PyGC_Head *prev = head; - PyGC_Head *gc = GC_NEXT(head); - while (gc != head) { - PyGC_Head *trueprev = GC_PREV(gc); - PyGC_Head *truenext = (PyGC_Head *)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE); - assert(truenext != NULL); - assert(trueprev == prev); - assert((gc->_gc_prev & PREV_MASK_COLLECTING) == prev_value); - assert((gc->_gc_next & NEXT_MASK_UNREACHABLE) == next_value); - prev = gc; - gc = truenext; - } - assert(prev == GC_PREV(head)); -} -#else -#define validate_list(x, y) do{}while(0) -#endif - +// Constants for validate_list's flags argument. +enum flagstates {collecting_clear_unreachable_clear, + collecting_clear_unreachable_set, + collecting_set_unreachable_clear, + collecting_set_unreachable_set}; + +#ifdef GC_DEBUG +// validate_list checks list consistency. And it works as document +// describing when flags are expected to be set / unset. +// `head` must be a doubly-linked gc list, although it's fine (expected!) if +// the prev and next pointers are "polluted" with flags. +// What's checked: +// - The `head` pointers are not polluted. +// - The objects' PREV_MASK_COLLECTING and NEXT_MASK_UNREACHABLE flags are all +// `set or clear, as specified by the 'flags' argument. +// - The prev and next pointers are mutually consistent. +static void +validate_list(PyGC_Head *head, enum flagstates flags) +{ + assert((head->_gc_prev & PREV_MASK_COLLECTING) == 0); + assert((head->_gc_next & NEXT_MASK_UNREACHABLE) == 0); + uintptr_t prev_value = 0, next_value = 0; + switch (flags) { + case collecting_clear_unreachable_clear: + break; + case collecting_set_unreachable_clear: + prev_value = PREV_MASK_COLLECTING; + break; + case collecting_clear_unreachable_set: + next_value = NEXT_MASK_UNREACHABLE; + break; + case collecting_set_unreachable_set: + prev_value = PREV_MASK_COLLECTING; + next_value = NEXT_MASK_UNREACHABLE; + break; + default: + assert(! "bad internal flags argument"); + } + PyGC_Head *prev = head; + PyGC_Head *gc = GC_NEXT(head); + while (gc != head) { + PyGC_Head *trueprev = GC_PREV(gc); + PyGC_Head *truenext = (PyGC_Head *)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE); + assert(truenext != NULL); + assert(trueprev == prev); + assert((gc->_gc_prev & PREV_MASK_COLLECTING) == prev_value); + assert((gc->_gc_next & NEXT_MASK_UNREACHABLE) == next_value); + prev = gc; + gc = truenext; + } + assert(prev == GC_PREV(head)); +} +#else +#define validate_list(x, y) do{}while(0) +#endif + /*** end of list stuff ***/ -/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and - * PREV_MASK_COLLECTING bit is set for all objects in containers. +/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and + * PREV_MASK_COLLECTING bit is set for all objects in containers. */ static void update_refs(PyGC_Head *containers) { - PyGC_Head *gc = GC_NEXT(containers); - for (; gc != containers; gc = GC_NEXT(gc)) { - gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc))); + PyGC_Head *gc = GC_NEXT(containers); + for (; gc != containers; gc = GC_NEXT(gc)) { + gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc))); /* Python's cyclic gc should never see an incoming refcount * of 0: if something decref'ed to 0, it should have been * deallocated immediately at that time. @@ -430,25 +430,25 @@ update_refs(PyGC_Head *containers) * so serious that maybe this should be a release-build * check instead of an assert? */ - _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0); + _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0); } } /* A traversal callback for subtract_refs. */ static int -visit_decref(PyObject *op, void *parent) +visit_decref(PyObject *op, void *parent) { - _PyObject_ASSERT(_PyObject_CAST(parent), !_PyObject_IsFreed(op)); - - if (_PyObject_IS_GC(op)) { + _PyObject_ASSERT(_PyObject_CAST(parent), !_PyObject_IsFreed(op)); + + if (_PyObject_IS_GC(op)) { PyGC_Head *gc = AS_GC(op); /* We're only interested in gc_refs for objects in the * generation being collected, which can be recognized * because only they have positive gc_refs. */ - if (gc_is_collecting(gc)) { - gc_decref(gc); - } + if (gc_is_collecting(gc)) { + gc_decref(gc); + } } return 0; } @@ -462,13 +462,13 @@ static void subtract_refs(PyGC_Head *containers) { traverseproc traverse; - PyGC_Head *gc = GC_NEXT(containers); - for (; gc != containers; gc = GC_NEXT(gc)) { - PyObject *op = FROM_GC(gc); - traverse = Py_TYPE(op)->tp_traverse; + PyGC_Head *gc = GC_NEXT(containers); + for (; gc != containers; gc = GC_NEXT(gc)) { + PyObject *op = FROM_GC(gc); + traverse = Py_TYPE(op)->tp_traverse; (void) traverse(FROM_GC(gc), (visitproc)visit_decref, - op); + op); } } @@ -476,93 +476,93 @@ subtract_refs(PyGC_Head *containers) static int visit_reachable(PyObject *op, PyGC_Head *reachable) { - if (!_PyObject_IS_GC(op)) { - return 0; - } - - PyGC_Head *gc = AS_GC(op); - const Py_ssize_t gc_refs = gc_get_refs(gc); - - // Ignore objects in other generation. - // This also skips objects "to the left" of the current position in - // move_unreachable's scan of the 'young' list - they've already been - // traversed, and no longer have the PREV_MASK_COLLECTING flag. - if (! gc_is_collecting(gc)) { - return 0; - } - // It would be a logic error elsewhere if the collecting flag were set on - // an untracked object. - assert(gc->_gc_next != 0); - - if (gc->_gc_next & NEXT_MASK_UNREACHABLE) { - /* This had gc_refs = 0 when move_unreachable got - * to it, but turns out it's reachable after all. - * Move it back to move_unreachable's 'young' list, - * and move_unreachable will eventually get to it - * again. + if (!_PyObject_IS_GC(op)) { + return 0; + } + + PyGC_Head *gc = AS_GC(op); + const Py_ssize_t gc_refs = gc_get_refs(gc); + + // Ignore objects in other generation. + // This also skips objects "to the left" of the current position in + // move_unreachable's scan of the 'young' list - they've already been + // traversed, and no longer have the PREV_MASK_COLLECTING flag. + if (! gc_is_collecting(gc)) { + return 0; + } + // It would be a logic error elsewhere if the collecting flag were set on + // an untracked object. + assert(gc->_gc_next != 0); + + if (gc->_gc_next & NEXT_MASK_UNREACHABLE) { + /* This had gc_refs = 0 when move_unreachable got + * to it, but turns out it's reachable after all. + * Move it back to move_unreachable's 'young' list, + * and move_unreachable will eventually get to it + * again. + */ + // Manually unlink gc from unreachable list because the list functions + // don't work right in the presence of NEXT_MASK_UNREACHABLE flags. + PyGC_Head *prev = GC_PREV(gc); + PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE); + _PyObject_ASSERT(FROM_GC(prev), + prev->_gc_next & NEXT_MASK_UNREACHABLE); + _PyObject_ASSERT(FROM_GC(next), + next->_gc_next & NEXT_MASK_UNREACHABLE); + prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE + _PyGCHead_SET_PREV(next, prev); + + gc_list_append(gc, reachable); + gc_set_refs(gc, 1); + } + else if (gc_refs == 0) { + /* This is in move_unreachable's 'young' list, but + * the traversal hasn't yet gotten to it. All + * we need to do is tell move_unreachable that it's + * reachable. */ - // Manually unlink gc from unreachable list because the list functions - // don't work right in the presence of NEXT_MASK_UNREACHABLE flags. - PyGC_Head *prev = GC_PREV(gc); - PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE); - _PyObject_ASSERT(FROM_GC(prev), - prev->_gc_next & NEXT_MASK_UNREACHABLE); - _PyObject_ASSERT(FROM_GC(next), - next->_gc_next & NEXT_MASK_UNREACHABLE); - prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE - _PyGCHead_SET_PREV(next, prev); - - gc_list_append(gc, reachable); - gc_set_refs(gc, 1); - } - else if (gc_refs == 0) { - /* This is in move_unreachable's 'young' list, but - * the traversal hasn't yet gotten to it. All - * we need to do is tell move_unreachable that it's - * reachable. - */ - gc_set_refs(gc, 1); - } - /* Else there's nothing to do. - * If gc_refs > 0, it must be in move_unreachable's 'young' - * list, and move_unreachable will eventually get to it. - */ - else { - _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small"); - } + gc_set_refs(gc, 1); + } + /* Else there's nothing to do. + * If gc_refs > 0, it must be in move_unreachable's 'young' + * list, and move_unreachable will eventually get to it. + */ + else { + _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small"); + } return 0; } /* Move the unreachable objects from young to unreachable. After this, - * all objects in young don't have PREV_MASK_COLLECTING flag and - * unreachable have the flag. + * all objects in young don't have PREV_MASK_COLLECTING flag and + * unreachable have the flag. * All objects in young after this are directly or indirectly reachable * from outside the original young; and all objects in unreachable are * not. - * - * This function restores _gc_prev pointer. young and unreachable are - * doubly linked list after this function. - * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag. - * So we can not gc_list_* functions for unreachable until we remove the flag. + * + * This function restores _gc_prev pointer. young and unreachable are + * doubly linked list after this function. + * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag. + * So we can not gc_list_* functions for unreachable until we remove the flag. */ static void move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) { - // previous elem in the young list, used for restore gc_prev. - PyGC_Head *prev = young; - PyGC_Head *gc = GC_NEXT(young); + // previous elem in the young list, used for restore gc_prev. + PyGC_Head *prev = young; + PyGC_Head *gc = GC_NEXT(young); - /* Invariants: all objects "to the left" of us in young are reachable - * (directly or indirectly) from outside the young list as it was at entry. - * - * All other objects from the original young "to the left" of us are in - * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the + /* Invariants: all objects "to the left" of us in young are reachable + * (directly or indirectly) from outside the young list as it was at entry. + * + * All other objects from the original young "to the left" of us are in + * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the * left of us in 'young' now have been scanned, and no objects here * or to the right have been scanned yet. */ while (gc != young) { - if (gc_get_refs(gc)) { + if (gc_get_refs(gc)) { /* gc is definitely reachable from outside the * original 'young'. Mark it as such, and traverse * its pointers to find any other objects that may @@ -573,18 +573,18 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) */ PyObject *op = FROM_GC(gc); traverseproc traverse = Py_TYPE(op)->tp_traverse; - _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0, - "refcount is too small"); - // NOTE: visit_reachable may change gc->_gc_next when - // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before! + _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0, + "refcount is too small"); + // NOTE: visit_reachable may change gc->_gc_next when + // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before! (void) traverse(op, - (visitproc)visit_reachable, - (void *)young); - // relink gc_prev to prev element. - _PyGCHead_SET_PREV(gc, prev); - // gc is not COLLECTING state after here. - gc_clear_collecting(gc); - prev = gc; + (visitproc)visit_reachable, + (void *)young); + // relink gc_prev to prev element. + _PyGCHead_SET_PREV(gc, prev); + // gc is not COLLECTING state after here. + gc_clear_collecting(gc); + prev = gc; } else { /* This *may* be unreachable. To make progress, @@ -594,41 +594,41 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) * visit_reachable will eventually move gc back into * young if that's so, and we'll see it again. */ - // Move gc to unreachable. - // No need to gc->next->prev = prev because it is single linked. - prev->_gc_next = gc->_gc_next; - - // We can't use gc_list_append() here because we use - // NEXT_MASK_UNREACHABLE here. - PyGC_Head *last = GC_PREV(unreachable); - // NOTE: Since all objects in unreachable set has - // NEXT_MASK_UNREACHABLE flag, we set it unconditionally. - // But this may pollute the unreachable list head's 'next' pointer - // too. That's semantically senseless but expedient here - the - // damage is repaired when this function ends. - last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc); - _PyGCHead_SET_PREV(gc, last); - gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable); - unreachable->_gc_prev = (uintptr_t)gc; + // Move gc to unreachable. + // No need to gc->next->prev = prev because it is single linked. + prev->_gc_next = gc->_gc_next; + + // We can't use gc_list_append() here because we use + // NEXT_MASK_UNREACHABLE here. + PyGC_Head *last = GC_PREV(unreachable); + // NOTE: Since all objects in unreachable set has + // NEXT_MASK_UNREACHABLE flag, we set it unconditionally. + // But this may pollute the unreachable list head's 'next' pointer + // too. That's semantically senseless but expedient here - the + // damage is repaired when this function ends. + last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc); + _PyGCHead_SET_PREV(gc, last); + gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable); + unreachable->_gc_prev = (uintptr_t)gc; + } + gc = (PyGC_Head*)prev->_gc_next; + } + // young->_gc_prev must be last element remained in the list. + young->_gc_prev = (uintptr_t)prev; + // don't let the pollution of the list head's next pointer leak + unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE; +} + +static void +untrack_tuples(PyGC_Head *head) +{ + PyGC_Head *next, *gc = GC_NEXT(head); + while (gc != head) { + PyObject *op = FROM_GC(gc); + next = GC_NEXT(gc); + if (PyTuple_CheckExact(op)) { + _PyTuple_MaybeUntrack(op); } - gc = (PyGC_Head*)prev->_gc_next; - } - // young->_gc_prev must be last element remained in the list. - young->_gc_prev = (uintptr_t)prev; - // don't let the pollution of the list head's next pointer leak - unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE; -} - -static void -untrack_tuples(PyGC_Head *head) -{ - PyGC_Head *next, *gc = GC_NEXT(head); - while (gc != head) { - PyObject *op = FROM_GC(gc); - next = GC_NEXT(gc); - if (PyTuple_CheckExact(op)) { - _PyTuple_MaybeUntrack(op); - } gc = next; } } @@ -637,13 +637,13 @@ untrack_tuples(PyGC_Head *head) static void untrack_dicts(PyGC_Head *head) { - PyGC_Head *next, *gc = GC_NEXT(head); + PyGC_Head *next, *gc = GC_NEXT(head); while (gc != head) { PyObject *op = FROM_GC(gc); - next = GC_NEXT(gc); - if (PyDict_CheckExact(op)) { + next = GC_NEXT(gc); + if (PyDict_CheckExact(op)) { _PyDict_MaybeUntrack(op); - } + } gc = next; } } @@ -652,62 +652,62 @@ untrack_dicts(PyGC_Head *head) static int has_legacy_finalizer(PyObject *op) { - return Py_TYPE(op)->tp_del != NULL; + return Py_TYPE(op)->tp_del != NULL; } /* Move the objects in unreachable with tp_del slots into `finalizers`. - * - * This function also removes NEXT_MASK_UNREACHABLE flag - * from _gc_next in unreachable. + * + * This function also removes NEXT_MASK_UNREACHABLE flag + * from _gc_next in unreachable. */ static void move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers) { - PyGC_Head *gc, *next; - assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0); + PyGC_Head *gc, *next; + assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0); /* March over unreachable. Move objects with finalizers into * `finalizers`. */ - for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { + for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { PyObject *op = FROM_GC(gc); - _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE); - gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; - next = (PyGC_Head*)gc->_gc_next; + _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE); + gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; + next = (PyGC_Head*)gc->_gc_next; if (has_legacy_finalizer(op)) { - gc_clear_collecting(gc); + gc_clear_collecting(gc); gc_list_move(gc, finalizers); } } } -static inline void -clear_unreachable_mask(PyGC_Head *unreachable) -{ - /* Check that the list head does not have the unreachable bit set */ - assert(((uintptr_t)unreachable & NEXT_MASK_UNREACHABLE) == 0); - - PyGC_Head *gc, *next; - assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0); - for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { - _PyObject_ASSERT((PyObject*)FROM_GC(gc), gc->_gc_next & NEXT_MASK_UNREACHABLE); - gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; - next = (PyGC_Head*)gc->_gc_next; - } - validate_list(unreachable, collecting_set_unreachable_clear); -} - +static inline void +clear_unreachable_mask(PyGC_Head *unreachable) +{ + /* Check that the list head does not have the unreachable bit set */ + assert(((uintptr_t)unreachable & NEXT_MASK_UNREACHABLE) == 0); + + PyGC_Head *gc, *next; + assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0); + for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { + _PyObject_ASSERT((PyObject*)FROM_GC(gc), gc->_gc_next & NEXT_MASK_UNREACHABLE); + gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; + next = (PyGC_Head*)gc->_gc_next; + } + validate_list(unreachable, collecting_set_unreachable_clear); +} + /* A traversal callback for move_legacy_finalizer_reachable. */ static int visit_move(PyObject *op, PyGC_Head *tolist) { - if (_PyObject_IS_GC(op)) { - PyGC_Head *gc = AS_GC(op); - if (gc_is_collecting(gc)) { + if (_PyObject_IS_GC(op)) { + PyGC_Head *gc = AS_GC(op); + if (gc_is_collecting(gc)) { gc_list_move(gc, tolist); - gc_clear_collecting(gc); + gc_clear_collecting(gc); } } return 0; @@ -720,8 +720,8 @@ static void move_legacy_finalizer_reachable(PyGC_Head *finalizers) { traverseproc traverse; - PyGC_Head *gc = GC_NEXT(finalizers); - for (; gc != finalizers; gc = GC_NEXT(gc)) { + PyGC_Head *gc = GC_NEXT(finalizers); + for (; gc != finalizers; gc = GC_NEXT(gc)) { /* Note that the finalizers list may grow during this. */ traverse = Py_TYPE(FROM_GC(gc))->tp_traverse; (void) traverse(FROM_GC(gc), @@ -761,33 +761,33 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old) * make another pass over wrcb_to_call, invoking callbacks, after this * pass completes. */ - for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { + for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { PyWeakReference **wrlist; op = FROM_GC(gc); - next = GC_NEXT(gc); - - if (PyWeakref_Check(op)) { - /* A weakref inside the unreachable set must be cleared. If we - * allow its callback to execute inside delete_garbage(), it - * could expose objects that have tp_clear already called on - * them. Or, it could resurrect unreachable objects. One way - * this can happen is if some container objects do not implement - * tp_traverse. Then, wr_object can be outside the unreachable - * set but can be deallocated as a result of breaking the - * reference cycle. If we don't clear the weakref, the callback - * will run and potentially cause a crash. See bpo-38006 for - * one example. - */ - _PyWeakref_ClearRef((PyWeakReference *)op); - } - + next = GC_NEXT(gc); + + if (PyWeakref_Check(op)) { + /* A weakref inside the unreachable set must be cleared. If we + * allow its callback to execute inside delete_garbage(), it + * could expose objects that have tp_clear already called on + * them. Or, it could resurrect unreachable objects. One way + * this can happen is if some container objects do not implement + * tp_traverse. Then, wr_object can be outside the unreachable + * set but can be deallocated as a result of breaking the + * reference cycle. If we don't clear the weakref, the callback + * will run and potentially cause a crash. See bpo-38006 for + * one example. + */ + _PyWeakref_ClearRef((PyWeakReference *)op); + } + if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op))) continue; /* It supports weakrefs. Does it have any? */ wrlist = (PyWeakReference **) - _PyObject_GET_WEAKREFS_LISTPTR(op); + _PyObject_GET_WEAKREFS_LISTPTR(op); /* `op` may have some weakrefs. March over the list, clear * all the weakrefs, and move the weakrefs with callbacks @@ -800,47 +800,47 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old) * the callback pointer intact. Obscure: it also * changes *wrlist. */ - _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op); + _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op); _PyWeakref_ClearRef(wr); - _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None); - if (wr->wr_callback == NULL) { - /* no callback */ - continue; - } - - /* Headache time. `op` is going away, and is weakly referenced by - * `wr`, which has a callback. Should the callback be invoked? If wr - * is also trash, no: - * - * 1. There's no need to call it. The object and the weakref are - * both going away, so it's legitimate to pretend the weakref is - * going away first. The user has to ensure a weakref outlives its - * referent if they want a guarantee that the wr callback will get - * invoked. - * - * 2. It may be catastrophic to call it. If the callback is also in - * cyclic trash (CT), then although the CT is unreachable from - * outside the current generation, CT may be reachable from the - * callback. Then the callback could resurrect insane objects. - * - * Since the callback is never needed and may be unsafe in this case, - * wr is simply left in the unreachable set. Note that because we - * already called _PyWeakref_ClearRef(wr), its callback will never - * trigger. - * - * OTOH, if wr isn't part of CT, we should invoke the callback: the - * weakref outlived the trash. Note that since wr isn't CT in this - * case, its callback can't be CT either -- wr acted as an external - * root to this generation, and therefore its callback did too. So - * nothing in CT is reachable from the callback either, so it's hard - * to imagine how calling it later could create a problem for us. wr - * is moved to wrcb_to_call in this case. - */ - if (gc_is_collecting(AS_GC(wr))) { - /* it should already have been cleared above */ - assert(wr->wr_object == Py_None); + _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None); + if (wr->wr_callback == NULL) { + /* no callback */ + continue; + } + + /* Headache time. `op` is going away, and is weakly referenced by + * `wr`, which has a callback. Should the callback be invoked? If wr + * is also trash, no: + * + * 1. There's no need to call it. The object and the weakref are + * both going away, so it's legitimate to pretend the weakref is + * going away first. The user has to ensure a weakref outlives its + * referent if they want a guarantee that the wr callback will get + * invoked. + * + * 2. It may be catastrophic to call it. If the callback is also in + * cyclic trash (CT), then although the CT is unreachable from + * outside the current generation, CT may be reachable from the + * callback. Then the callback could resurrect insane objects. + * + * Since the callback is never needed and may be unsafe in this case, + * wr is simply left in the unreachable set. Note that because we + * already called _PyWeakref_ClearRef(wr), its callback will never + * trigger. + * + * OTOH, if wr isn't part of CT, we should invoke the callback: the + * weakref outlived the trash. Note that since wr isn't CT in this + * case, its callback can't be CT either -- wr acted as an external + * root to this generation, and therefore its callback did too. So + * nothing in CT is reachable from the callback either, so it's hard + * to imagine how calling it later could create a problem for us. wr + * is moved to wrcb_to_call in this case. + */ + if (gc_is_collecting(AS_GC(wr))) { + /* it should already have been cleared above */ + assert(wr->wr_object == Py_None); continue; - } + } /* Create a new reference so that wr can't go away * before we can process it again. @@ -863,15 +863,15 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old) PyObject *temp; PyObject *callback; - gc = (PyGC_Head*)wrcb_to_call._gc_next; + gc = (PyGC_Head*)wrcb_to_call._gc_next; op = FROM_GC(gc); - _PyObject_ASSERT(op, PyWeakref_Check(op)); + _PyObject_ASSERT(op, PyWeakref_Check(op)); wr = (PyWeakReference *)op; callback = wr->wr_callback; - _PyObject_ASSERT(op, callback != NULL); + _PyObject_ASSERT(op, callback != NULL); /* copy-paste of weakrefobject.c's handle_callback() */ - temp = PyObject_CallOneArg(callback, (PyObject *)wr); + temp = PyObject_CallOneArg(callback, (PyObject *)wr); if (temp == NULL) PyErr_WriteUnraisable(callback); else @@ -889,13 +889,13 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old) * ours). */ Py_DECREF(op); - if (wrcb_to_call._gc_next == (uintptr_t)gc) { + if (wrcb_to_call._gc_next == (uintptr_t)gc) { /* object is still alive -- move it */ gc_list_move(gc, old); } - else { + else { ++num_freed; - } + } } return num_freed; @@ -916,22 +916,22 @@ debug_cycle(const char *msg, PyObject *op) * merged into the old list regardless. */ static void -handle_legacy_finalizers(PyThreadState *tstate, - GCState *gcstate, - PyGC_Head *finalizers, PyGC_Head *old) +handle_legacy_finalizers(PyThreadState *tstate, + GCState *gcstate, + PyGC_Head *finalizers, PyGC_Head *old) { - assert(!_PyErr_Occurred(tstate)); - assert(gcstate->garbage != NULL); + assert(!_PyErr_Occurred(tstate)); + assert(gcstate->garbage != NULL); - PyGC_Head *gc = GC_NEXT(finalizers); - for (; gc != finalizers; gc = GC_NEXT(gc)) { + PyGC_Head *gc = GC_NEXT(finalizers); + for (; gc != finalizers; gc = GC_NEXT(gc)) { PyObject *op = FROM_GC(gc); - if ((gcstate->debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) { - if (PyList_Append(gcstate->garbage, op) < 0) { - _PyErr_Clear(tstate); + if ((gcstate->debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) { + if (PyList_Append(gcstate->garbage, op) < 0) { + _PyErr_Clear(tstate); break; - } + } } } @@ -943,7 +943,7 @@ handle_legacy_finalizers(PyThreadState *tstate, * list, due to refcounts falling to 0. */ static void -finalize_garbage(PyThreadState *tstate, PyGC_Head *collectable) +finalize_garbage(PyThreadState *tstate, PyGC_Head *collectable) { destructor finalize; PyGC_Head seen; @@ -959,15 +959,15 @@ finalize_garbage(PyThreadState *tstate, PyGC_Head *collectable) gc_list_init(&seen); while (!gc_list_is_empty(collectable)) { - PyGC_Head *gc = GC_NEXT(collectable); + PyGC_Head *gc = GC_NEXT(collectable); PyObject *op = FROM_GC(gc); gc_list_move(gc, &seen); if (!_PyGCHead_FINALIZED(gc) && (finalize = Py_TYPE(op)->tp_finalize) != NULL) { - _PyGCHead_SET_FINALIZED(gc); + _PyGCHead_SET_FINALIZED(gc); Py_INCREF(op); finalize(op); - assert(!_PyErr_Occurred(tstate)); + assert(!_PyErr_Occurred(tstate)); Py_DECREF(op); } } @@ -979,39 +979,39 @@ finalize_garbage(PyThreadState *tstate, PyGC_Head *collectable) * objects may be freed. It is possible I screwed something up here. */ static void -delete_garbage(PyThreadState *tstate, GCState *gcstate, - PyGC_Head *collectable, PyGC_Head *old) +delete_garbage(PyThreadState *tstate, GCState *gcstate, + PyGC_Head *collectable, PyGC_Head *old) { - assert(!_PyErr_Occurred(tstate)); + assert(!_PyErr_Occurred(tstate)); while (!gc_list_is_empty(collectable)) { - PyGC_Head *gc = GC_NEXT(collectable); + PyGC_Head *gc = GC_NEXT(collectable); PyObject *op = FROM_GC(gc); - _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0, - "refcount is too small"); - - if (gcstate->debug & DEBUG_SAVEALL) { - assert(gcstate->garbage != NULL); - if (PyList_Append(gcstate->garbage, op) < 0) { - _PyErr_Clear(tstate); - } + _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0, + "refcount is too small"); + + if (gcstate->debug & DEBUG_SAVEALL) { + assert(gcstate->garbage != NULL); + if (PyList_Append(gcstate->garbage, op) < 0) { + _PyErr_Clear(tstate); + } } else { - inquiry clear; + inquiry clear; if ((clear = Py_TYPE(op)->tp_clear) != NULL) { Py_INCREF(op); - (void) clear(op); - if (_PyErr_Occurred(tstate)) { - _PyErr_WriteUnraisableMsg("in tp_clear of", - (PyObject*)Py_TYPE(op)); - } + (void) clear(op); + if (_PyErr_Occurred(tstate)) { + _PyErr_WriteUnraisableMsg("in tp_clear of", + (PyObject*)Py_TYPE(op)); + } Py_DECREF(op); } } - if (GC_NEXT(collectable) == gc) { + if (GC_NEXT(collectable) == gc) { /* object is still alive, move it, it may die later */ - gc_clear_collecting(gc); + gc_clear_collecting(gc); gc_list_move(gc, old); } } @@ -1025,150 +1025,150 @@ delete_garbage(PyThreadState *tstate, GCState *gcstate, static void clear_freelists(void) { - _PyFrame_ClearFreeList(); - _PyTuple_ClearFreeList(); - _PyFloat_ClearFreeList(); - _PyList_ClearFreeList(); - _PyDict_ClearFreeList(); - _PyAsyncGen_ClearFreeLists(); - _PyContext_ClearFreeList(); -} - -// Show stats for objects in each generations -static void -show_stats_each_generations(GCState *gcstate) -{ - char buf[100]; - size_t pos = 0; - - for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) { - pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos, - " %"PY_FORMAT_SIZE_T"d", - gc_list_size(GEN_HEAD(gcstate, i))); - } - - PySys_FormatStderr( - "gc: objects in each generation:%s\n" - "gc: objects in permanent generation: %zd\n", - buf, gc_list_size(&gcstate->permanent_generation.head)); -} - -/* Deduce which objects among "base" are unreachable from outside the list - and move them to 'unreachable'. The process consist in the following steps: - -1. Copy all reference counts to a different field (gc_prev is used to hold - this copy to save memory). -2. Traverse all objects in "base" and visit all referred objects using - "tp_traverse" and for every visited object, subtract 1 to the reference - count (the one that we copied in the previous step). After this step, all - objects that can be reached directly from outside must have strictly positive - reference count, while all unreachable objects must have a count of exactly 0. -3. Identify all unreachable objects (the ones with 0 reference count) and move - them to the "unreachable" list. This step also needs to move back to "base" all - objects that were initially marked as unreachable but are referred transitively - by the reachable objects (the ones with strictly positive reference count). - -Contracts: - - * The "base" has to be a valid list with no mask set. - - * The "unreachable" list must be uninitialized (this function calls - gc_list_init over 'unreachable'). - -IMPORTANT: This function leaves 'unreachable' with the NEXT_MASK_UNREACHABLE -flag set but it does not clear it to skip unnecessary iteration. Before the -flag is cleared (for example, by using 'clear_unreachable_mask' function or -by a call to 'move_legacy_finalizers'), the 'unreachable' list is not a normal -list and we can not use most gc_list_* functions for it. */ -static inline void -deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) { - validate_list(base, collecting_clear_unreachable_clear); - /* Using ob_refcnt and gc_refs, calculate which objects in the - * container set are reachable from outside the set (i.e., have a - * refcount greater than 0 when all the references within the - * set are taken into account). - */ - update_refs(base); // gc_prev is used for gc_refs - subtract_refs(base); - - /* Leave everything reachable from outside base in base, and move - * everything else (in base) to unreachable. - * - * NOTE: This used to move the reachable objects into a reachable - * set instead. But most things usually turn out to be reachable, - * so it's more efficient to move the unreachable things. It "sounds slick" - * to move the unreachable objects, until you think about it - the reason it - * pays isn't actually obvious. - * - * Suppose we create objects A, B, C in that order. They appear in the young - * generation in the same order. If B points to A, and C to B, and C is - * reachable from outside, then the adjusted refcounts will be 0, 0, and 1 - * respectively. - * - * When move_unreachable finds A, A is moved to the unreachable list. The - * same for B when it's first encountered. Then C is traversed, B is moved - * _back_ to the reachable list. B is eventually traversed, and then A is - * moved back to the reachable list. - * - * So instead of not moving at all, the reachable objects B and A are moved - * twice each. Why is this a win? A straightforward algorithm to move the - * reachable objects instead would move A, B, and C once each. - * - * The key is that this dance leaves the objects in order C, B, A - it's - * reversed from the original order. On all _subsequent_ scans, none of - * them will move. Since most objects aren't in cycles, this can save an - * unbounded number of moves across an unbounded number of later collections. - * It can cost more only the first time the chain is scanned. - * - * Drawback: move_unreachable is also used to find out what's still trash - * after finalizers may resurrect objects. In _that_ case most unreachable - * objects will remain unreachable, so it would be more efficient to move - * the reachable objects instead. But this is a one-time cost, probably not - * worth complicating the code to speed just a little. - */ - gc_list_init(unreachable); - move_unreachable(base, unreachable); // gc_prev is pointer again - validate_list(base, collecting_clear_unreachable_clear); - validate_list(unreachable, collecting_set_unreachable_set); -} - -/* Handle objects that may have resurrected after a call to 'finalize_garbage', moving - them to 'old_generation' and placing the rest on 'still_unreachable'. - - Contracts: - * After this function 'unreachable' must not be used anymore and 'still_unreachable' - will contain the objects that did not resurrect. - - * The "still_unreachable" list must be uninitialized (this function calls - gc_list_init over 'still_unreachable'). - -IMPORTANT: After a call to this function, the 'still_unreachable' set will have the -PREV_MARK_COLLECTING set, but the objects in this set are going to be removed so -we can skip the expense of clearing the flag to avoid extra iteration. */ -static inline void -handle_resurrected_objects(PyGC_Head *unreachable, PyGC_Head* still_unreachable, - PyGC_Head *old_generation) -{ - // Remove the PREV_MASK_COLLECTING from unreachable - // to prepare it for a new call to 'deduce_unreachable' - gc_list_clear_collecting(unreachable); - - // After the call to deduce_unreachable, the 'still_unreachable' set will - // have the PREV_MARK_COLLECTING set, but the objects are going to be - // removed so we can skip the expense of clearing the flag. - PyGC_Head* resurrected = unreachable; - deduce_unreachable(resurrected, still_unreachable); - clear_unreachable_mask(still_unreachable); - - // Move the resurrected objects to the old generation for future collection. - gc_list_merge(resurrected, old_generation); -} - + _PyFrame_ClearFreeList(); + _PyTuple_ClearFreeList(); + _PyFloat_ClearFreeList(); + _PyList_ClearFreeList(); + _PyDict_ClearFreeList(); + _PyAsyncGen_ClearFreeLists(); + _PyContext_ClearFreeList(); +} + +// Show stats for objects in each generations +static void +show_stats_each_generations(GCState *gcstate) +{ + char buf[100]; + size_t pos = 0; + + for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) { + pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos, + " %"PY_FORMAT_SIZE_T"d", + gc_list_size(GEN_HEAD(gcstate, i))); + } + + PySys_FormatStderr( + "gc: objects in each generation:%s\n" + "gc: objects in permanent generation: %zd\n", + buf, gc_list_size(&gcstate->permanent_generation.head)); +} + +/* Deduce which objects among "base" are unreachable from outside the list + and move them to 'unreachable'. The process consist in the following steps: + +1. Copy all reference counts to a different field (gc_prev is used to hold + this copy to save memory). +2. Traverse all objects in "base" and visit all referred objects using + "tp_traverse" and for every visited object, subtract 1 to the reference + count (the one that we copied in the previous step). After this step, all + objects that can be reached directly from outside must have strictly positive + reference count, while all unreachable objects must have a count of exactly 0. +3. Identify all unreachable objects (the ones with 0 reference count) and move + them to the "unreachable" list. This step also needs to move back to "base" all + objects that were initially marked as unreachable but are referred transitively + by the reachable objects (the ones with strictly positive reference count). + +Contracts: + + * The "base" has to be a valid list with no mask set. + + * The "unreachable" list must be uninitialized (this function calls + gc_list_init over 'unreachable'). + +IMPORTANT: This function leaves 'unreachable' with the NEXT_MASK_UNREACHABLE +flag set but it does not clear it to skip unnecessary iteration. Before the +flag is cleared (for example, by using 'clear_unreachable_mask' function or +by a call to 'move_legacy_finalizers'), the 'unreachable' list is not a normal +list and we can not use most gc_list_* functions for it. */ +static inline void +deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) { + validate_list(base, collecting_clear_unreachable_clear); + /* Using ob_refcnt and gc_refs, calculate which objects in the + * container set are reachable from outside the set (i.e., have a + * refcount greater than 0 when all the references within the + * set are taken into account). + */ + update_refs(base); // gc_prev is used for gc_refs + subtract_refs(base); + + /* Leave everything reachable from outside base in base, and move + * everything else (in base) to unreachable. + * + * NOTE: This used to move the reachable objects into a reachable + * set instead. But most things usually turn out to be reachable, + * so it's more efficient to move the unreachable things. It "sounds slick" + * to move the unreachable objects, until you think about it - the reason it + * pays isn't actually obvious. + * + * Suppose we create objects A, B, C in that order. They appear in the young + * generation in the same order. If B points to A, and C to B, and C is + * reachable from outside, then the adjusted refcounts will be 0, 0, and 1 + * respectively. + * + * When move_unreachable finds A, A is moved to the unreachable list. The + * same for B when it's first encountered. Then C is traversed, B is moved + * _back_ to the reachable list. B is eventually traversed, and then A is + * moved back to the reachable list. + * + * So instead of not moving at all, the reachable objects B and A are moved + * twice each. Why is this a win? A straightforward algorithm to move the + * reachable objects instead would move A, B, and C once each. + * + * The key is that this dance leaves the objects in order C, B, A - it's + * reversed from the original order. On all _subsequent_ scans, none of + * them will move. Since most objects aren't in cycles, this can save an + * unbounded number of moves across an unbounded number of later collections. + * It can cost more only the first time the chain is scanned. + * + * Drawback: move_unreachable is also used to find out what's still trash + * after finalizers may resurrect objects. In _that_ case most unreachable + * objects will remain unreachable, so it would be more efficient to move + * the reachable objects instead. But this is a one-time cost, probably not + * worth complicating the code to speed just a little. + */ + gc_list_init(unreachable); + move_unreachable(base, unreachable); // gc_prev is pointer again + validate_list(base, collecting_clear_unreachable_clear); + validate_list(unreachable, collecting_set_unreachable_set); +} + +/* Handle objects that may have resurrected after a call to 'finalize_garbage', moving + them to 'old_generation' and placing the rest on 'still_unreachable'. + + Contracts: + * After this function 'unreachable' must not be used anymore and 'still_unreachable' + will contain the objects that did not resurrect. + + * The "still_unreachable" list must be uninitialized (this function calls + gc_list_init over 'still_unreachable'). + +IMPORTANT: After a call to this function, the 'still_unreachable' set will have the +PREV_MARK_COLLECTING set, but the objects in this set are going to be removed so +we can skip the expense of clearing the flag to avoid extra iteration. */ +static inline void +handle_resurrected_objects(PyGC_Head *unreachable, PyGC_Head* still_unreachable, + PyGC_Head *old_generation) +{ + // Remove the PREV_MASK_COLLECTING from unreachable + // to prepare it for a new call to 'deduce_unreachable' + gc_list_clear_collecting(unreachable); + + // After the call to deduce_unreachable, the 'still_unreachable' set will + // have the PREV_MARK_COLLECTING set, but the objects are going to be + // removed so we can skip the expense of clearing the flag. + PyGC_Head* resurrected = unreachable; + deduce_unreachable(resurrected, still_unreachable); + clear_unreachable_mask(still_unreachable); + + // Move the resurrected objects to the old generation for future collection. + gc_list_merge(resurrected, old_generation); +} + /* This is the main function. Read this to understand how the * collection process works. */ static Py_ssize_t -collect(PyThreadState *tstate, int generation, - Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable, int nofail) +collect(PyThreadState *tstate, int generation, + Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable, int nofail) { int i; Py_ssize_t m = 0; /* # objects collected */ @@ -1179,11 +1179,11 @@ collect(PyThreadState *tstate, int generation, PyGC_Head finalizers; /* objects with, & reachable from, __del__ */ PyGC_Head *gc; _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */ - GCState *gcstate = &tstate->interp->gc; + GCState *gcstate = &tstate->interp->gc; - if (gcstate->debug & DEBUG_STATS) { - PySys_WriteStderr("gc: collecting generation %d...\n", generation); - show_stats_each_generations(gcstate); + if (gcstate->debug & DEBUG_STATS) { + PySys_WriteStderr("gc: collecting generation %d...\n", generation); + show_stats_each_generations(gcstate); t1 = _PyTime_GetMonotonicClock(); } @@ -1192,47 +1192,47 @@ collect(PyThreadState *tstate, int generation, /* update collection and allocation counters */ if (generation+1 < NUM_GENERATIONS) - gcstate->generations[generation+1].count += 1; + gcstate->generations[generation+1].count += 1; for (i = 0; i <= generation; i++) - gcstate->generations[i].count = 0; + gcstate->generations[i].count = 0; /* merge younger generations with one we are currently collecting */ for (i = 0; i < generation; i++) { - gc_list_merge(GEN_HEAD(gcstate, i), GEN_HEAD(gcstate, generation)); + gc_list_merge(GEN_HEAD(gcstate, i), GEN_HEAD(gcstate, generation)); } /* handy references */ - young = GEN_HEAD(gcstate, generation); + young = GEN_HEAD(gcstate, generation); if (generation < NUM_GENERATIONS-1) - old = GEN_HEAD(gcstate, generation+1); + old = GEN_HEAD(gcstate, generation+1); else old = young; - validate_list(old, collecting_clear_unreachable_clear); + validate_list(old, collecting_clear_unreachable_clear); - deduce_unreachable(young, &unreachable); + deduce_unreachable(young, &unreachable); - untrack_tuples(young); + untrack_tuples(young); /* Move reachable objects to next generation. */ if (young != old) { if (generation == NUM_GENERATIONS - 2) { - gcstate->long_lived_pending += gc_list_size(young); + gcstate->long_lived_pending += gc_list_size(young); } gc_list_merge(young, old); } else { - /* We only un-track dicts in full collections, to avoid quadratic + /* We only un-track dicts in full collections, to avoid quadratic dict build-up. See issue #14775. */ untrack_dicts(young); - gcstate->long_lived_pending = 0; - gcstate->long_lived_total = gc_list_size(young); + gcstate->long_lived_pending = 0; + gcstate->long_lived_total = gc_list_size(young); } /* All objects in unreachable are trash, but objects reachable from * legacy finalizers (e.g. tp_del) can't safely be deleted. */ gc_list_init(&finalizers); - // NEXT_MASK_UNREACHABLE is cleared here. - // After move_legacy_finalizers(), unreachable is normal list. + // NEXT_MASK_UNREACHABLE is cleared here. + // After move_legacy_finalizers(), unreachable is normal list. move_legacy_finalizers(&unreachable, &finalizers); /* finalizers contains the unreachable objects with a legacy finalizer; * unreachable objects reachable *from* those are also uncollectable, @@ -1240,12 +1240,12 @@ collect(PyThreadState *tstate, int generation, */ move_legacy_finalizer_reachable(&finalizers); - validate_list(&finalizers, collecting_clear_unreachable_clear); - validate_list(&unreachable, collecting_set_unreachable_clear); - - /* Print debugging information. */ - if (gcstate->debug & DEBUG_COLLECTABLE) { - for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) { + validate_list(&finalizers, collecting_clear_unreachable_clear); + validate_list(&unreachable, collecting_set_unreachable_clear); + + /* Print debugging information. */ + if (gcstate->debug & DEBUG_COLLECTABLE) { + for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) { debug_cycle("collectable", FROM_GC(gc)); } } @@ -1253,46 +1253,46 @@ collect(PyThreadState *tstate, int generation, /* Clear weakrefs and invoke callbacks as necessary. */ m += handle_weakrefs(&unreachable, old); - validate_list(old, collecting_clear_unreachable_clear); - validate_list(&unreachable, collecting_set_unreachable_clear); - + validate_list(old, collecting_clear_unreachable_clear); + validate_list(&unreachable, collecting_set_unreachable_clear); + /* Call tp_finalize on objects which have one. */ - finalize_garbage(tstate, &unreachable); - - /* Handle any objects that may have resurrected after the call - * to 'finalize_garbage' and continue the collection with the - * objects that are still unreachable */ - PyGC_Head final_unreachable; - handle_resurrected_objects(&unreachable, &final_unreachable, old); - - /* Call tp_clear on objects in the final_unreachable set. This will cause - * the reference cycles to be broken. It may also cause some objects - * in finalizers to be freed. - */ - m += gc_list_size(&final_unreachable); - delete_garbage(tstate, gcstate, &final_unreachable, old); - + finalize_garbage(tstate, &unreachable); + + /* Handle any objects that may have resurrected after the call + * to 'finalize_garbage' and continue the collection with the + * objects that are still unreachable */ + PyGC_Head final_unreachable; + handle_resurrected_objects(&unreachable, &final_unreachable, old); + + /* Call tp_clear on objects in the final_unreachable set. This will cause + * the reference cycles to be broken. It may also cause some objects + * in finalizers to be freed. + */ + m += gc_list_size(&final_unreachable); + delete_garbage(tstate, gcstate, &final_unreachable, old); + /* Collect statistics on uncollectable objects found and print * debugging information. */ - for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) { + for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) { n++; - if (gcstate->debug & DEBUG_UNCOLLECTABLE) + if (gcstate->debug & DEBUG_UNCOLLECTABLE) debug_cycle("uncollectable", FROM_GC(gc)); } - if (gcstate->debug & DEBUG_STATS) { - double d = _PyTime_AsSecondsDouble(_PyTime_GetMonotonicClock() - t1); - PySys_WriteStderr( - "gc: done, %" PY_FORMAT_SIZE_T "d unreachable, " - "%" PY_FORMAT_SIZE_T "d uncollectable, %.4fs elapsed\n", - n+m, n, d); + if (gcstate->debug & DEBUG_STATS) { + double d = _PyTime_AsSecondsDouble(_PyTime_GetMonotonicClock() - t1); + PySys_WriteStderr( + "gc: done, %" PY_FORMAT_SIZE_T "d unreachable, " + "%" PY_FORMAT_SIZE_T "d uncollectable, %.4fs elapsed\n", + n+m, n, d); } /* Append instances in the uncollectable set to a Python * reachable list of garbage. The programmer has to deal with * this if they insist on creating this type of structure. */ - handle_legacy_finalizers(tstate, gcstate, &finalizers, old); - validate_list(old, collecting_clear_unreachable_clear); + handle_legacy_finalizers(tstate, gcstate, &finalizers, old); + validate_list(old, collecting_clear_unreachable_clear); /* Clear free list only during the collection of the highest * generation */ @@ -1300,56 +1300,56 @@ collect(PyThreadState *tstate, int generation, clear_freelists(); } - if (_PyErr_Occurred(tstate)) { + if (_PyErr_Occurred(tstate)) { if (nofail) { - _PyErr_Clear(tstate); + _PyErr_Clear(tstate); } else { - _PyErr_WriteUnraisableMsg("in garbage collection", NULL); + _PyErr_WriteUnraisableMsg("in garbage collection", NULL); } } /* Update stats */ - if (n_collected) { + if (n_collected) { *n_collected = m; - } - if (n_uncollectable) { + } + if (n_uncollectable) { *n_uncollectable = n; - } - - struct gc_generation_stats *stats = &gcstate->generation_stats[generation]; + } + + struct gc_generation_stats *stats = &gcstate->generation_stats[generation]; stats->collections++; stats->collected += m; stats->uncollectable += n; - if (PyDTrace_GC_DONE_ENABLED()) { - PyDTrace_GC_DONE(n + m); - } + if (PyDTrace_GC_DONE_ENABLED()) { + PyDTrace_GC_DONE(n + m); + } - assert(!_PyErr_Occurred(tstate)); - return n + m; + assert(!_PyErr_Occurred(tstate)); + return n + m; } /* Invoke progress callbacks to notify clients that garbage collection * is starting or stopping */ static void -invoke_gc_callback(PyThreadState *tstate, const char *phase, - int generation, Py_ssize_t collected, - Py_ssize_t uncollectable) +invoke_gc_callback(PyThreadState *tstate, const char *phase, + int generation, Py_ssize_t collected, + Py_ssize_t uncollectable) { - assert(!_PyErr_Occurred(tstate)); + assert(!_PyErr_Occurred(tstate)); /* we may get called very early */ - GCState *gcstate = &tstate->interp->gc; - if (gcstate->callbacks == NULL) { + GCState *gcstate = &tstate->interp->gc; + if (gcstate->callbacks == NULL) { return; - } - + } + /* The local variable cannot be rebound, check it for sanity */ - assert(PyList_CheckExact(gcstate->callbacks)); - PyObject *info = NULL; - if (PyList_GET_SIZE(gcstate->callbacks) != 0) { + assert(PyList_CheckExact(gcstate->callbacks)); + PyObject *info = NULL; + if (PyList_GET_SIZE(gcstate->callbacks) != 0) { info = Py_BuildValue("{sisnsn}", "generation", generation, "collected", collected, @@ -1359,8 +1359,8 @@ invoke_gc_callback(PyThreadState *tstate, const char *phase, return; } } - for (Py_ssize_t i=0; i<PyList_GET_SIZE(gcstate->callbacks); i++) { - PyObject *r, *cb = PyList_GET_ITEM(gcstate->callbacks, i); + for (Py_ssize_t i=0; i<PyList_GET_SIZE(gcstate->callbacks); i++) { + PyObject *r, *cb = PyList_GET_ITEM(gcstate->callbacks, i); Py_INCREF(cb); /* make sure cb doesn't go away */ r = PyObject_CallFunction(cb, "sO", phase, info); if (r == NULL) { @@ -1372,74 +1372,74 @@ invoke_gc_callback(PyThreadState *tstate, const char *phase, Py_DECREF(cb); } Py_XDECREF(info); - assert(!_PyErr_Occurred(tstate)); + assert(!_PyErr_Occurred(tstate)); } /* Perform garbage collection of a generation and invoke * progress callbacks. */ static Py_ssize_t -collect_with_callback(PyThreadState *tstate, int generation) +collect_with_callback(PyThreadState *tstate, int generation) { - assert(!_PyErr_Occurred(tstate)); + assert(!_PyErr_Occurred(tstate)); Py_ssize_t result, collected, uncollectable; - invoke_gc_callback(tstate, "start", generation, 0, 0); - result = collect(tstate, generation, &collected, &uncollectable, 0); - invoke_gc_callback(tstate, "stop", generation, collected, uncollectable); - assert(!_PyErr_Occurred(tstate)); + invoke_gc_callback(tstate, "start", generation, 0, 0); + result = collect(tstate, generation, &collected, &uncollectable, 0); + invoke_gc_callback(tstate, "stop", generation, collected, uncollectable); + assert(!_PyErr_Occurred(tstate)); return result; } static Py_ssize_t -collect_generations(PyThreadState *tstate) +collect_generations(PyThreadState *tstate) { - GCState *gcstate = &tstate->interp->gc; + GCState *gcstate = &tstate->interp->gc; /* Find the oldest generation (highest numbered) where the count * exceeds the threshold. Objects in the that generation and * generations younger than it will be collected. */ - Py_ssize_t n = 0; - for (int i = NUM_GENERATIONS-1; i >= 0; i--) { - if (gcstate->generations[i].count > gcstate->generations[i].threshold) { + Py_ssize_t n = 0; + for (int i = NUM_GENERATIONS-1; i >= 0; i--) { + if (gcstate->generations[i].count > gcstate->generations[i].threshold) { /* Avoid quadratic performance degradation in number - of tracked objects (see also issue #4074): - - To limit the cost of garbage collection, there are two strategies; - - make each collection faster, e.g. by scanning fewer objects - - do less collections - This heuristic is about the latter strategy. - - In addition to the various configurable thresholds, we only trigger a - full collection if the ratio - - long_lived_pending / long_lived_total - - is above a given value (hardwired to 25%). - - The reason is that, while "non-full" collections (i.e., collections of - the young and middle generations) will always examine roughly the same - number of objects -- determined by the aforementioned thresholds --, - the cost of a full collection is proportional to the total number of - long-lived objects, which is virtually unbounded. - - Indeed, it has been remarked that doing a full collection every - <constant number> of object creations entails a dramatic performance - degradation in workloads which consist in creating and storing lots of - long-lived objects (e.g. building a large list of GC-tracked objects would - show quadratic performance, instead of linear as expected: see issue #4074). - - Using the above ratio, instead, yields amortized linear performance in - the total number of objects (the effect of which can be summarized - thusly: "each full garbage collection is more and more costly as the - number of objects grows, but we do fewer and fewer of them"). - - This heuristic was suggested by Martin von Löwis on python-dev in - June 2008. His original analysis and proposal can be found at: - http://mail.python.org/pipermail/python-dev/2008-June/080579.html + of tracked objects (see also issue #4074): + + To limit the cost of garbage collection, there are two strategies; + - make each collection faster, e.g. by scanning fewer objects + - do less collections + This heuristic is about the latter strategy. + + In addition to the various configurable thresholds, we only trigger a + full collection if the ratio + + long_lived_pending / long_lived_total + + is above a given value (hardwired to 25%). + + The reason is that, while "non-full" collections (i.e., collections of + the young and middle generations) will always examine roughly the same + number of objects -- determined by the aforementioned thresholds --, + the cost of a full collection is proportional to the total number of + long-lived objects, which is virtually unbounded. + + Indeed, it has been remarked that doing a full collection every + <constant number> of object creations entails a dramatic performance + degradation in workloads which consist in creating and storing lots of + long-lived objects (e.g. building a large list of GC-tracked objects would + show quadratic performance, instead of linear as expected: see issue #4074). + + Using the above ratio, instead, yields amortized linear performance in + the total number of objects (the effect of which can be summarized + thusly: "each full garbage collection is more and more costly as the + number of objects grows, but we do fewer and fewer of them"). + + This heuristic was suggested by Martin von Löwis on python-dev in + June 2008. His original analysis and proposal can be found at: + http://mail.python.org/pipermail/python-dev/2008-June/080579.html */ if (i == NUM_GENERATIONS - 1 - && gcstate->long_lived_pending < gcstate->long_lived_total / 4) + && gcstate->long_lived_pending < gcstate->long_lived_total / 4) continue; - n = collect_with_callback(tstate, i); + n = collect_with_callback(tstate, i); break; } } @@ -1458,9 +1458,9 @@ static PyObject * gc_enable_impl(PyObject *module) /*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - gcstate->enabled = 1; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + gcstate->enabled = 1; Py_RETURN_NONE; } @@ -1474,9 +1474,9 @@ static PyObject * gc_disable_impl(PyObject *module) /*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - gcstate->enabled = 0; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + gcstate->enabled = 0; Py_RETURN_NONE; } @@ -1490,9 +1490,9 @@ static int gc_isenabled_impl(PyObject *module) /*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - return gcstate->enabled; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + return gcstate->enabled; } /*[clinic input] @@ -1513,23 +1513,23 @@ static Py_ssize_t gc_collect_impl(PyObject *module, int generation) /*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/ { - PyThreadState *tstate = _PyThreadState_GET(); + PyThreadState *tstate = _PyThreadState_GET(); if (generation < 0 || generation >= NUM_GENERATIONS) { - _PyErr_SetString(tstate, PyExc_ValueError, "invalid generation"); + _PyErr_SetString(tstate, PyExc_ValueError, "invalid generation"); return -1; } - GCState *gcstate = &tstate->interp->gc; - Py_ssize_t n; - if (gcstate->collecting) { - /* already collecting, don't do anything */ - n = 0; - } + GCState *gcstate = &tstate->interp->gc; + Py_ssize_t n; + if (gcstate->collecting) { + /* already collecting, don't do anything */ + n = 0; + } else { - gcstate->collecting = 1; - n = collect_with_callback(tstate, generation); - gcstate->collecting = 0; + gcstate->collecting = 1; + n = collect_with_callback(tstate, generation); + gcstate->collecting = 0; } return n; } @@ -1556,9 +1556,9 @@ static PyObject * gc_set_debug_impl(PyObject *module, int flags) /*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - gcstate->debug = flags; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + gcstate->debug = flags; Py_RETURN_NONE; } @@ -1572,9 +1572,9 @@ static int gc_get_debug_impl(PyObject *module) /*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - return gcstate->debug; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + return gcstate->debug; } PyDoc_STRVAR(gc_set_thresh__doc__, @@ -1584,18 +1584,18 @@ PyDoc_STRVAR(gc_set_thresh__doc__, "collection.\n"); static PyObject * -gc_set_threshold(PyObject *self, PyObject *args) +gc_set_threshold(PyObject *self, PyObject *args) { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; if (!PyArg_ParseTuple(args, "i|ii:set_threshold", - &gcstate->generations[0].threshold, - &gcstate->generations[1].threshold, - &gcstate->generations[2].threshold)) + &gcstate->generations[0].threshold, + &gcstate->generations[1].threshold, + &gcstate->generations[2].threshold)) return NULL; - for (int i = 3; i < NUM_GENERATIONS; i++) { + for (int i = 3; i < NUM_GENERATIONS; i++) { /* generations higher than 2 get the same threshold */ - gcstate->generations[i].threshold = gcstate->generations[2].threshold; + gcstate->generations[i].threshold = gcstate->generations[2].threshold; } Py_RETURN_NONE; } @@ -1610,12 +1610,12 @@ static PyObject * gc_get_threshold_impl(PyObject *module) /*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; return Py_BuildValue("(iii)", - gcstate->generations[0].threshold, - gcstate->generations[1].threshold, - gcstate->generations[2].threshold); + gcstate->generations[0].threshold, + gcstate->generations[1].threshold, + gcstate->generations[2].threshold); } /*[clinic input] @@ -1628,12 +1628,12 @@ static PyObject * gc_get_count_impl(PyObject *module) /*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; return Py_BuildValue("(iii)", - gcstate->generations[0].count, - gcstate->generations[1].count, - gcstate->generations[2].count); + gcstate->generations[0].count, + gcstate->generations[1].count, + gcstate->generations[2].count); } static int @@ -1652,7 +1652,7 @@ gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist) PyGC_Head *gc; PyObject *obj; traverseproc traverse; - for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { + for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { obj = FROM_GC(gc); traverse = Py_TYPE(obj)->tp_traverse; if (obj == objs || obj == resultlist) @@ -1672,21 +1672,21 @@ Return the list of objects that directly refer to any of objs."); static PyObject * gc_get_referrers(PyObject *self, PyObject *args) { - PyThreadState *tstate = _PyThreadState_GET(); + PyThreadState *tstate = _PyThreadState_GET(); int i; - - if (PySys_Audit("gc.get_referrers", "(O)", args) < 0) { - return NULL; - } - + + if (PySys_Audit("gc.get_referrers", "(O)", args) < 0) { + return NULL; + } + PyObject *result = PyList_New(0); - if (!result) { - return NULL; - } + if (!result) { + return NULL; + } - GCState *gcstate = &tstate->interp->gc; + GCState *gcstate = &tstate->interp->gc; for (i = 0; i < NUM_GENERATIONS; i++) { - if (!(gc_referrers_for(args, GEN_HEAD(gcstate, i), result))) { + if (!(gc_referrers_for(args, GEN_HEAD(gcstate, i), result))) { Py_DECREF(result); return NULL; } @@ -1709,9 +1709,9 @@ static PyObject * gc_get_referents(PyObject *self, PyObject *args) { Py_ssize_t i; - if (PySys_Audit("gc.get_referents", "(O)", args) < 0) { - return NULL; - } + if (PySys_Audit("gc.get_referents", "(O)", args) < 0) { + return NULL; + } PyObject *result = PyList_New(0); if (result == NULL) @@ -1721,7 +1721,7 @@ gc_get_referents(PyObject *self, PyObject *args) traverseproc traverse; PyObject *obj = PyTuple_GET_ITEM(args, i); - if (!_PyObject_IS_GC(obj)) + if (!_PyObject_IS_GC(obj)) continue; traverse = Py_TYPE(obj)->tp_traverse; if (! traverse) @@ -1736,67 +1736,67 @@ gc_get_referents(PyObject *self, PyObject *args) /*[clinic input] gc.get_objects - generation: Py_ssize_t(accept={int, NoneType}, c_default="-1") = None - Generation to extract the objects from. + generation: Py_ssize_t(accept={int, NoneType}, c_default="-1") = None + Generation to extract the objects from. Return a list of objects tracked by the collector (excluding the list returned). - -If generation is not None, return only the objects tracked by the collector -that are in that generation. + +If generation is not None, return only the objects tracked by the collector +that are in that generation. [clinic start generated code]*/ static PyObject * -gc_get_objects_impl(PyObject *module, Py_ssize_t generation) -/*[clinic end generated code: output=48b35fea4ba6cb0e input=ef7da9df9806754c]*/ +gc_get_objects_impl(PyObject *module, Py_ssize_t generation) +/*[clinic end generated code: output=48b35fea4ba6cb0e input=ef7da9df9806754c]*/ { - PyThreadState *tstate = _PyThreadState_GET(); + PyThreadState *tstate = _PyThreadState_GET(); int i; PyObject* result; - GCState *gcstate = &tstate->interp->gc; + GCState *gcstate = &tstate->interp->gc; + + if (PySys_Audit("gc.get_objects", "n", generation) < 0) { + return NULL; + } - if (PySys_Audit("gc.get_objects", "n", generation) < 0) { - return NULL; - } - result = PyList_New(0); - if (result == NULL) { + if (result == NULL) { return NULL; - } - - /* If generation is passed, we extract only that generation */ - if (generation != -1) { - if (generation >= NUM_GENERATIONS) { - _PyErr_Format(tstate, PyExc_ValueError, - "generation parameter must be less than the number of " - "available generations (%i)", - NUM_GENERATIONS); - goto error; - } - - if (generation < 0) { - _PyErr_SetString(tstate, PyExc_ValueError, - "generation parameter cannot be negative"); - goto error; - } - - if (append_objects(result, GEN_HEAD(gcstate, generation))) { - goto error; - } - - return result; - } - - /* If generation is not passed or None, get all objects from all generations */ + } + + /* If generation is passed, we extract only that generation */ + if (generation != -1) { + if (generation >= NUM_GENERATIONS) { + _PyErr_Format(tstate, PyExc_ValueError, + "generation parameter must be less than the number of " + "available generations (%i)", + NUM_GENERATIONS); + goto error; + } + + if (generation < 0) { + _PyErr_SetString(tstate, PyExc_ValueError, + "generation parameter cannot be negative"); + goto error; + } + + if (append_objects(result, GEN_HEAD(gcstate, generation))) { + goto error; + } + + return result; + } + + /* If generation is not passed or None, get all objects from all generations */ for (i = 0; i < NUM_GENERATIONS; i++) { - if (append_objects(result, GEN_HEAD(gcstate, i))) { - goto error; + if (append_objects(result, GEN_HEAD(gcstate, i))) { + goto error; } } return result; - -error: - Py_DECREF(result); - return NULL; + +error: + Py_DECREF(result); + return NULL; } /*[clinic input] @@ -1811,16 +1811,16 @@ gc_get_stats_impl(PyObject *module) { int i; struct gc_generation_stats stats[NUM_GENERATIONS], *st; - PyThreadState *tstate = _PyThreadState_GET(); + PyThreadState *tstate = _PyThreadState_GET(); /* To get consistent values despite allocations while constructing the result list, we use a snapshot of the running stats. */ - GCState *gcstate = &tstate->interp->gc; + GCState *gcstate = &tstate->interp->gc; for (i = 0; i < NUM_GENERATIONS; i++) { - stats[i] = gcstate->generation_stats[i]; + stats[i] = gcstate->generation_stats[i]; } - PyObject *result = PyList_New(0); + PyObject *result = PyList_New(0); if (result == NULL) return NULL; @@ -1865,7 +1865,7 @@ gc_is_tracked(PyObject *module, PyObject *obj) { PyObject *result; - if (_PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)) + if (_PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)) result = Py_True; else result = Py_False; @@ -1874,25 +1874,25 @@ gc_is_tracked(PyObject *module, PyObject *obj) } /*[clinic input] -gc.is_finalized - - obj: object - / - -Returns true if the object has been already finalized by the GC. -[clinic start generated code]*/ - -static PyObject * -gc_is_finalized(PyObject *module, PyObject *obj) -/*[clinic end generated code: output=e1516ac119a918ed input=201d0c58f69ae390]*/ -{ - if (_PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(AS_GC(obj))) { - Py_RETURN_TRUE; - } - Py_RETURN_FALSE; -} - -/*[clinic input] +gc.is_finalized + + obj: object + / + +Returns true if the object has been already finalized by the GC. +[clinic start generated code]*/ + +static PyObject * +gc_is_finalized(PyObject *module, PyObject *obj) +/*[clinic end generated code: output=e1516ac119a918ed input=201d0c58f69ae390]*/ +{ + if (_PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(AS_GC(obj))) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; +} + +/*[clinic input] gc.freeze Freeze all current tracked objects and ignore them for future collections. @@ -1906,11 +1906,11 @@ static PyObject * gc_freeze_impl(PyObject *module) /*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; for (int i = 0; i < NUM_GENERATIONS; ++i) { - gc_list_merge(GEN_HEAD(gcstate, i), &gcstate->permanent_generation.head); - gcstate->generations[i].count = 0; + gc_list_merge(GEN_HEAD(gcstate, i), &gcstate->permanent_generation.head); + gcstate->generations[i].count = 0; } Py_RETURN_NONE; } @@ -1927,10 +1927,10 @@ static PyObject * gc_unfreeze_impl(PyObject *module) /*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - gc_list_merge(&gcstate->permanent_generation.head, - GEN_HEAD(gcstate, NUM_GENERATIONS-1)); + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + gc_list_merge(&gcstate->permanent_generation.head, + GEN_HEAD(gcstate, NUM_GENERATIONS-1)); Py_RETURN_NONE; } @@ -1944,9 +1944,9 @@ static Py_ssize_t gc_get_freeze_count_impl(PyObject *module) /*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/ { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - return gc_list_size(&gcstate->permanent_generation.head); + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + return gc_list_size(&gcstate->permanent_generation.head); } @@ -1965,7 +1965,7 @@ PyDoc_STRVAR(gc__doc__, "get_threshold() -- Return the current the collection thresholds.\n" "get_objects() -- Return a list of all objects tracked by the collector.\n" "is_tracked() -- Returns true if a given object is tracked.\n" -"is_finalized() -- Returns true if a given object has been already finalized.\n" +"is_finalized() -- Returns true if a given object has been already finalized.\n" "get_referrers() -- Return the list of objects that refer to an object.\n" "get_referents() -- Return the list of objects that an object refers to.\n" "freeze() -- Freeze all tracked objects and ignore them for future collections.\n" @@ -1979,13 +1979,13 @@ static PyMethodDef GcMethods[] = { GC_SET_DEBUG_METHODDEF GC_GET_DEBUG_METHODDEF GC_GET_COUNT_METHODDEF - {"set_threshold", gc_set_threshold, METH_VARARGS, gc_set_thresh__doc__}, + {"set_threshold", gc_set_threshold, METH_VARARGS, gc_set_thresh__doc__}, GC_GET_THRESHOLD_METHODDEF GC_COLLECT_METHODDEF GC_GET_OBJECTS_METHODDEF GC_GET_STATS_METHODDEF GC_IS_TRACKED_METHODDEF - GC_IS_FINALIZED_METHODDEF + GC_IS_FINALIZED_METHODDEF {"get_referrers", gc_get_referrers, METH_VARARGS, gc_get_referrers__doc__}, {"get_referents", gc_get_referents, METH_VARARGS, @@ -2011,38 +2011,38 @@ static struct PyModuleDef gcmodule = { PyMODINIT_FUNC PyInit_gc(void) { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; - PyObject *m = PyModule_Create(&gcmodule); + PyObject *m = PyModule_Create(&gcmodule); - if (m == NULL) { + if (m == NULL) { return NULL; - } + } - if (gcstate->garbage == NULL) { - gcstate->garbage = PyList_New(0); - if (gcstate->garbage == NULL) { + if (gcstate->garbage == NULL) { + gcstate->garbage = PyList_New(0); + if (gcstate->garbage == NULL) { return NULL; - } + } } - Py_INCREF(gcstate->garbage); - if (PyModule_AddObject(m, "garbage", gcstate->garbage) < 0) { + Py_INCREF(gcstate->garbage); + if (PyModule_AddObject(m, "garbage", gcstate->garbage) < 0) { return NULL; - } + } - if (gcstate->callbacks == NULL) { - gcstate->callbacks = PyList_New(0); - if (gcstate->callbacks == NULL) { + if (gcstate->callbacks == NULL) { + gcstate->callbacks = PyList_New(0); + if (gcstate->callbacks == NULL) { return NULL; - } + } } - Py_INCREF(gcstate->callbacks); - if (PyModule_AddObject(m, "callbacks", gcstate->callbacks) < 0) { + Py_INCREF(gcstate->callbacks); + if (PyModule_AddObject(m, "callbacks", gcstate->callbacks) < 0) { return NULL; - } + } -#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) { return NULL; } +#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) { return NULL; } ADD_INT(DEBUG_STATS); ADD_INT(DEBUG_COLLECTABLE); ADD_INT(DEBUG_UNCOLLECTABLE); @@ -2056,25 +2056,25 @@ PyInit_gc(void) Py_ssize_t PyGC_Collect(void) { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - - if (!gcstate->enabled) { - return 0; - } - + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + + if (!gcstate->enabled) { + return 0; + } + Py_ssize_t n; - if (gcstate->collecting) { - /* already collecting, don't do anything */ - n = 0; - } + if (gcstate->collecting) { + /* already collecting, don't do anything */ + n = 0; + } else { PyObject *exc, *value, *tb; - gcstate->collecting = 1; - _PyErr_Fetch(tstate, &exc, &value, &tb); - n = collect_with_callback(tstate, NUM_GENERATIONS - 1); - _PyErr_Restore(tstate, exc, value, tb); - gcstate->collecting = 0; + gcstate->collecting = 1; + _PyErr_Fetch(tstate, &exc, &value, &tb); + n = collect_with_callback(tstate, NUM_GENERATIONS - 1); + _PyErr_Restore(tstate, exc, value, tb); + gcstate->collecting = 0; } return n; @@ -2089,10 +2089,10 @@ _PyGC_CollectIfEnabled(void) Py_ssize_t _PyGC_CollectNoFail(void) { - PyThreadState *tstate = _PyThreadState_GET(); - assert(!_PyErr_Occurred(tstate)); - - GCState *gcstate = &tstate->interp->gc; + PyThreadState *tstate = _PyThreadState_GET(); + assert(!_PyErr_Occurred(tstate)); + + GCState *gcstate = &tstate->interp->gc; Py_ssize_t n; /* Ideally, this function is only called on interpreter shutdown, @@ -2101,25 +2101,25 @@ _PyGC_CollectNoFail(void) during interpreter shutdown (and then never finish it). See http://bugs.python.org/issue8713#msg195178 for an example. */ - if (gcstate->collecting) { + if (gcstate->collecting) { n = 0; - } + } else { - gcstate->collecting = 1; - n = collect(tstate, NUM_GENERATIONS - 1, NULL, NULL, 1); - gcstate->collecting = 0; + gcstate->collecting = 1; + n = collect(tstate, NUM_GENERATIONS - 1, NULL, NULL, 1); + gcstate->collecting = 0; } return n; } void -_PyGC_DumpShutdownStats(PyThreadState *tstate) +_PyGC_DumpShutdownStats(PyThreadState *tstate) { - GCState *gcstate = &tstate->interp->gc; - if (!(gcstate->debug & DEBUG_SAVEALL) - && gcstate->garbage != NULL && PyList_GET_SIZE(gcstate->garbage) > 0) { + GCState *gcstate = &tstate->interp->gc; + if (!(gcstate->debug & DEBUG_SAVEALL) + && gcstate->garbage != NULL && PyList_GET_SIZE(gcstate->garbage) > 0) { const char *message; - if (gcstate->debug & DEBUG_UNCOLLECTABLE) + if (gcstate->debug & DEBUG_UNCOLLECTABLE) message = "gc: %zd uncollectable objects at " \ "shutdown"; else @@ -2130,13 +2130,13 @@ _PyGC_DumpShutdownStats(PyThreadState *tstate) already. */ if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0, "gc", NULL, message, - PyList_GET_SIZE(gcstate->garbage))) + PyList_GET_SIZE(gcstate->garbage))) PyErr_WriteUnraisable(NULL); - if (gcstate->debug & DEBUG_UNCOLLECTABLE) { + if (gcstate->debug & DEBUG_UNCOLLECTABLE) { PyObject *repr = NULL, *bytes = NULL; - repr = PyObject_Repr(gcstate->garbage); + repr = PyObject_Repr(gcstate->garbage); if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr))) - PyErr_WriteUnraisable(gcstate->garbage); + PyErr_WriteUnraisable(gcstate->garbage); else { PySys_WriteStderr( " %s\n", @@ -2149,36 +2149,36 @@ _PyGC_DumpShutdownStats(PyThreadState *tstate) } } - -static void -gc_fini_untrack(PyGC_Head *list) -{ - PyGC_Head *gc; - for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(list)) { - PyObject *op = FROM_GC(gc); - _PyObject_GC_UNTRACK(op); - } -} - - + +static void +gc_fini_untrack(PyGC_Head *list) +{ + PyGC_Head *gc; + for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(list)) { + PyObject *op = FROM_GC(gc); + _PyObject_GC_UNTRACK(op); + } +} + + void -_PyGC_Fini(PyThreadState *tstate) -{ - GCState *gcstate = &tstate->interp->gc; - Py_CLEAR(gcstate->garbage); - Py_CLEAR(gcstate->callbacks); - - if (!_Py_IsMainInterpreter(tstate)) { - // bpo-46070: Explicitly untrack all objects currently tracked by the - // GC. Otherwise, if an object is used later by another interpreter, - // calling PyObject_GC_UnTrack() on the object crashs if the previous - // or the next object of the PyGC_Head structure became a dangling - // pointer. - for (int i = 0; i < NUM_GENERATIONS; i++) { - PyGC_Head *gen = GEN_HEAD(gcstate, i); - gc_fini_untrack(gen); - } - } +_PyGC_Fini(PyThreadState *tstate) +{ + GCState *gcstate = &tstate->interp->gc; + Py_CLEAR(gcstate->garbage); + Py_CLEAR(gcstate->callbacks); + + if (!_Py_IsMainInterpreter(tstate)) { + // bpo-46070: Explicitly untrack all objects currently tracked by the + // GC. Otherwise, if an object is used later by another interpreter, + // calling PyObject_GC_UnTrack() on the object crashs if the previous + // or the next object of the PyGC_Head structure became a dangling + // pointer. + for (int i = 0; i < NUM_GENERATIONS; i++) { + PyGC_Head *gen = GEN_HEAD(gcstate, i); + gc_fini_untrack(gen); + } + } } /* for debugging */ @@ -2188,97 +2188,97 @@ _PyGC_Dump(PyGC_Head *g) _PyObject_Dump(FROM_GC(g)); } - -#ifdef Py_DEBUG -static int -visit_validate(PyObject *op, void *parent_raw) -{ - PyObject *parent = _PyObject_CAST(parent_raw); - if (_PyObject_IsFreed(op)) { - _PyObject_ASSERT_FAILED_MSG(parent, - "PyObject_GC_Track() object is not valid"); - } - return 0; -} -#endif - - + +#ifdef Py_DEBUG +static int +visit_validate(PyObject *op, void *parent_raw) +{ + PyObject *parent = _PyObject_CAST(parent_raw); + if (_PyObject_IsFreed(op)) { + _PyObject_ASSERT_FAILED_MSG(parent, + "PyObject_GC_Track() object is not valid"); + } + return 0; +} +#endif + + /* extension modules might be compiled with GC support so these functions must always be available */ void -PyObject_GC_Track(void *op_raw) -{ - PyObject *op = _PyObject_CAST(op_raw); - if (_PyObject_GC_IS_TRACKED(op)) { - _PyObject_ASSERT_FAILED_MSG(op, - "object already tracked " - "by the garbage collector"); - } +PyObject_GC_Track(void *op_raw) +{ + PyObject *op = _PyObject_CAST(op_raw); + if (_PyObject_GC_IS_TRACKED(op)) { + _PyObject_ASSERT_FAILED_MSG(op, + "object already tracked " + "by the garbage collector"); + } _PyObject_GC_TRACK(op); - -#ifdef Py_DEBUG - /* Check that the object is valid: validate objects traversed - by tp_traverse() */ - traverseproc traverse = Py_TYPE(op)->tp_traverse; - (void)traverse(op, visit_validate, op); -#endif + +#ifdef Py_DEBUG + /* Check that the object is valid: validate objects traversed + by tp_traverse() */ + traverseproc traverse = Py_TYPE(op)->tp_traverse; + (void)traverse(op, visit_validate, op); +#endif } void -PyObject_GC_UnTrack(void *op_raw) +PyObject_GC_UnTrack(void *op_raw) { - PyObject *op = _PyObject_CAST(op_raw); + PyObject *op = _PyObject_CAST(op_raw); /* Obscure: the Py_TRASHCAN mechanism requires that we be able to * call PyObject_GC_UnTrack twice on an object. */ - if (_PyObject_GC_IS_TRACKED(op)) { + if (_PyObject_GC_IS_TRACKED(op)) { _PyObject_GC_UNTRACK(op); - } + } +} + +int +PyObject_IS_GC(PyObject *obj) +{ + return _PyObject_IS_GC(obj); } -int -PyObject_IS_GC(PyObject *obj) -{ - return _PyObject_IS_GC(obj); -} - static PyObject * _PyObject_GC_Alloc(int use_calloc, size_t basicsize) { - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) { - return _PyErr_NoMemory(tstate); - } - size_t size = sizeof(PyGC_Head) + basicsize; - + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) { + return _PyErr_NoMemory(tstate); + } + size_t size = sizeof(PyGC_Head) + basicsize; + PyGC_Head *g; - if (use_calloc) { + if (use_calloc) { g = (PyGC_Head *)PyObject_Calloc(1, size); - } - else { + } + else { g = (PyGC_Head *)PyObject_Malloc(size); - } - if (g == NULL) { - return _PyErr_NoMemory(tstate); - } - assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary - - g->_gc_next = 0; - g->_gc_prev = 0; - gcstate->generations[0].count++; /* number of allocated GC objects */ - if (gcstate->generations[0].count > gcstate->generations[0].threshold && - gcstate->enabled && - gcstate->generations[0].threshold && - !gcstate->collecting && - !_PyErr_Occurred(tstate)) - { - gcstate->collecting = 1; - collect_generations(tstate); - gcstate->collecting = 0; - } - PyObject *op = FROM_GC(g); + } + if (g == NULL) { + return _PyErr_NoMemory(tstate); + } + assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary + + g->_gc_next = 0; + g->_gc_prev = 0; + gcstate->generations[0].count++; /* number of allocated GC objects */ + if (gcstate->generations[0].count > gcstate->generations[0].threshold && + gcstate->enabled && + gcstate->generations[0].threshold && + !gcstate->collecting && + !_PyErr_Occurred(tstate)) + { + gcstate->collecting = 1; + collect_generations(tstate); + gcstate->collecting = 0; + } + PyObject *op = FROM_GC(g); return op; } @@ -2324,17 +2324,17 @@ PyVarObject * _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems) { const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems); - _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op)); - if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) { - return (PyVarObject *)PyErr_NoMemory(); - } - + _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op)); + if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) { + return (PyVarObject *)PyErr_NoMemory(); + } + PyGC_Head *g = AS_GC(op); g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize); if (g == NULL) return (PyVarObject *)PyErr_NoMemory(); op = (PyVarObject *) FROM_GC(g); - Py_SET_SIZE(op, nitems); + Py_SET_SIZE(op, nitems); return op; } @@ -2342,31 +2342,31 @@ void PyObject_GC_Del(void *op) { PyGC_Head *g = AS_GC(op); - if (_PyObject_GC_IS_TRACKED(op)) { + if (_PyObject_GC_IS_TRACKED(op)) { gc_list_remove(g); } - PyThreadState *tstate = _PyThreadState_GET(); - GCState *gcstate = &tstate->interp->gc; - if (gcstate->generations[0].count > 0) { - gcstate->generations[0].count--; - } + PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + if (gcstate->generations[0].count > 0) { + gcstate->generations[0].count--; + } PyObject_FREE(g); } - -int -PyObject_GC_IsTracked(PyObject* obj) -{ - if (_PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)) { - return 1; - } - return 0; -} - -int -PyObject_GC_IsFinalized(PyObject *obj) -{ - if (_PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(AS_GC(obj))) { - return 1; - } - return 0; -} + +int +PyObject_GC_IsTracked(PyObject* obj) +{ + if (_PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)) { + return 1; + } + return 0; +} + +int +PyObject_GC_IsFinalized(PyObject *obj) +{ + if (_PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(AS_GC(obj))) { + return 1; + } + return 0; +} |