1 /*
2 
3   Reference Cycle Garbage Collection
4   ==================================
5 
6   Neil Schemenauer <nas@arctrix.com>
7 
8   Based on a post on the python-dev list.  Ideas from Guido van Rossum,
9   Eric Tiedemann, and various others.
10 
11   http://www.arctrix.com/nas/python/gc/
12 
13   The following mailing list threads provide a historical perspective on
14   the design of this module.  Note that a fair amount of refinement has
15   occurred since those discussions.
16 
17   http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18   http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19   http://mail.python.org/pipermail/python-dev/2000-March/002497.html
20 
21   For a highlevel view of the collection process, read the collect
22   function.
23 
24 */
25 
26 #include "Python.h"
27 #include "internal/context.h"
28 #include "internal/mem.h"
29 #include "internal/pystate.h"
30 #include "frameobject.h"        /* for PyFrame_ClearFreeList */
31 #include "pydtrace.h"
32 #include "pytime.h"             /* for _PyTime_GetMonotonicClock() */
33 
34 /*[clinic input]
35 module gc
36 [clinic start generated code]*/
37 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
38 
39 /* Get an object's GC head */
40 #define AS_GC(o) ((PyGC_Head *)(o)-1)
41 
42 /* Get the object given the GC head */
43 #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
44 
45 /* Python string to use if unhandled exception occurs */
46 static PyObject *gc_str = NULL;
47 
48 /* set for debugging information */
49 #define DEBUG_STATS             (1<<0) /* print collection statistics */
50 #define DEBUG_COLLECTABLE       (1<<1) /* print collectable objects */
51 #define DEBUG_UNCOLLECTABLE     (1<<2) /* print uncollectable objects */
52 #define DEBUG_SAVEALL           (1<<5) /* save all garbage in gc.garbage */
53 #define DEBUG_LEAK              DEBUG_COLLECTABLE | \
54                 DEBUG_UNCOLLECTABLE | \
55                 DEBUG_SAVEALL
56 
57 #define GEN_HEAD(n) (&_PyRuntime.gc.generations[n].head)
58 
59 void
_PyGC_Initialize(struct _gc_runtime_state * state)60 _PyGC_Initialize(struct _gc_runtime_state *state)
61 {
62     state->enabled = 1; /* automatic collection enabled? */
63 
64 #define _GEN_HEAD(n) (&state->generations[n].head)
65     struct gc_generation generations[NUM_GENERATIONS] = {
66         /* PyGC_Head,                                 threshold,      count */
67         {{{_GEN_HEAD(0), _GEN_HEAD(0), 0}},           700,            0},
68         {{{_GEN_HEAD(1), _GEN_HEAD(1), 0}},           10,             0},
69         {{{_GEN_HEAD(2), _GEN_HEAD(2), 0}},           10,             0},
70     };
71     for (int i = 0; i < NUM_GENERATIONS; i++) {
72         state->generations[i] = generations[i];
73     };
74     state->generation0 = GEN_HEAD(0);
75     struct gc_generation permanent_generation = {
76           {{&state->permanent_generation.head, &state->permanent_generation.head, 0}}, 0, 0
77     };
78     state->permanent_generation = permanent_generation;
79 }
80 
81 /*--------------------------------------------------------------------------
82 gc_refs values.
83 
84 Between collections, every gc'ed object has one of two gc_refs values:
85 
86 GC_UNTRACKED
87     The initial state; objects returned by PyObject_GC_Malloc are in this
88     state.  The object doesn't live in any generation list, and its
89     tp_traverse slot must not be called.
90 
91 GC_REACHABLE
92     The object lives in some generation list, and its tp_traverse is safe to
93     call.  An object transitions to GC_REACHABLE when PyObject_GC_Track
94     is called.
95 
96 During a collection, gc_refs can temporarily take on other states:
97 
98 >= 0
99     At the start of a collection, update_refs() copies the true refcount
100     to gc_refs, for each object in the generation being collected.
101     subtract_refs() then adjusts gc_refs so that it equals the number of
102     times an object is referenced directly from outside the generation
103     being collected.
104     gc_refs remains >= 0 throughout these steps.
105 
106 GC_TENTATIVELY_UNREACHABLE
107     move_unreachable() then moves objects not reachable (whether directly or
108     indirectly) from outside the generation into an "unreachable" set.
109     Objects that are found to be reachable have gc_refs set to GC_REACHABLE
110     again.  Objects that are found to be unreachable have gc_refs set to
111     GC_TENTATIVELY_UNREACHABLE.  It's "tentatively" because the pass doing
112     this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
113     transition back to GC_REACHABLE.
114 
115     Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
116     for collection.  If it's decided not to collect such an object (e.g.,
117     it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
118 ----------------------------------------------------------------------------
119 */
120 #define GC_UNTRACKED                    _PyGC_REFS_UNTRACKED
121 #define GC_REACHABLE                    _PyGC_REFS_REACHABLE
122 #define GC_TENTATIVELY_UNREACHABLE      _PyGC_REFS_TENTATIVELY_UNREACHABLE
123 
124 #define IS_TRACKED(o) (_PyGC_REFS(o) != GC_UNTRACKED)
125 #define IS_REACHABLE(o) (_PyGC_REFS(o) == GC_REACHABLE)
126 #define IS_TENTATIVELY_UNREACHABLE(o) ( \
127     _PyGC_REFS(o) == GC_TENTATIVELY_UNREACHABLE)
128 
129 /*** list functions ***/
130 
131 static void
gc_list_init(PyGC_Head * list)132 gc_list_init(PyGC_Head *list)
133 {
134     list->gc.gc_prev = list;
135     list->gc.gc_next = list;
136 }
137 
138 static int
gc_list_is_empty(PyGC_Head * list)139 gc_list_is_empty(PyGC_Head *list)
140 {
141     return (list->gc.gc_next == list);
142 }
143 
144 #if 0
145 /* This became unused after gc_list_move() was introduced. */
146 /* Append `node` to `list`. */
147 static void
148 gc_list_append(PyGC_Head *node, PyGC_Head *list)
149 {
150     node->gc.gc_next = list;
151     node->gc.gc_prev = list->gc.gc_prev;
152     node->gc.gc_prev->gc.gc_next = node;
153     list->gc.gc_prev = node;
154 }
155 #endif
156 
157 /* Remove `node` from the gc list it's currently in. */
158 static void
gc_list_remove(PyGC_Head * node)159 gc_list_remove(PyGC_Head *node)
160 {
161     node->gc.gc_prev->gc.gc_next = node->gc.gc_next;
162     node->gc.gc_next->gc.gc_prev = node->gc.gc_prev;
163     node->gc.gc_next = NULL; /* object is not currently tracked */
164 }
165 
166 /* Move `node` from the gc list it's currently in (which is not explicitly
167  * named here) to the end of `list`.  This is semantically the same as
168  * gc_list_remove(node) followed by gc_list_append(node, list).
169  */
170 static void
gc_list_move(PyGC_Head * node,PyGC_Head * list)171 gc_list_move(PyGC_Head *node, PyGC_Head *list)
172 {
173     PyGC_Head *new_prev;
174     PyGC_Head *current_prev = node->gc.gc_prev;
175     PyGC_Head *current_next = node->gc.gc_next;
176     /* Unlink from current list. */
177     current_prev->gc.gc_next = current_next;
178     current_next->gc.gc_prev = current_prev;
179     /* Relink at end of new list. */
180     new_prev = node->gc.gc_prev = list->gc.gc_prev;
181     new_prev->gc.gc_next = list->gc.gc_prev = node;
182     node->gc.gc_next = list;
183 }
184 
185 /* append list `from` onto list `to`; `from` becomes an empty list */
186 static void
gc_list_merge(PyGC_Head * from,PyGC_Head * to)187 gc_list_merge(PyGC_Head *from, PyGC_Head *to)
188 {
189     PyGC_Head *tail;
190     assert(from != to);
191     if (!gc_list_is_empty(from)) {
192         tail = to->gc.gc_prev;
193         tail->gc.gc_next = from->gc.gc_next;
194         tail->gc.gc_next->gc.gc_prev = tail;
195         to->gc.gc_prev = from->gc.gc_prev;
196         to->gc.gc_prev->gc.gc_next = to;
197     }
198     gc_list_init(from);
199 }
200 
201 static Py_ssize_t
gc_list_size(PyGC_Head * list)202 gc_list_size(PyGC_Head *list)
203 {
204     PyGC_Head *gc;
205     Py_ssize_t n = 0;
206     for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
207         n++;
208     }
209     return n;
210 }
211 
212 /* Append objects in a GC list to a Python list.
213  * Return 0 if all OK, < 0 if error (out of memory for list).
214  */
215 static int
append_objects(PyObject * py_list,PyGC_Head * gc_list)216 append_objects(PyObject *py_list, PyGC_Head *gc_list)
217 {
218     PyGC_Head *gc;
219     for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) {
220         PyObject *op = FROM_GC(gc);
221         if (op != py_list) {
222             if (PyList_Append(py_list, op)) {
223                 return -1; /* exception */
224             }
225         }
226     }
227     return 0;
228 }
229 
230 /*** end of list stuff ***/
231 
232 
233 /* Set all gc_refs = ob_refcnt.  After this, gc_refs is > 0 for all objects
234  * in containers, and is GC_REACHABLE for all tracked gc objects not in
235  * containers.
236  */
237 static void
update_refs(PyGC_Head * containers)238 update_refs(PyGC_Head *containers)
239 {
240     PyGC_Head *gc = containers->gc.gc_next;
241     for (; gc != containers; gc = gc->gc.gc_next) {
242         assert(_PyGCHead_REFS(gc) == GC_REACHABLE);
243         _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
244         /* Python's cyclic gc should never see an incoming refcount
245          * of 0:  if something decref'ed to 0, it should have been
246          * deallocated immediately at that time.
247          * Possible cause (if the assert triggers):  a tp_dealloc
248          * routine left a gc-aware object tracked during its teardown
249          * phase, and did something-- or allowed something to happen --
250          * that called back into Python.  gc can trigger then, and may
251          * see the still-tracked dying object.  Before this assert
252          * was added, such mistakes went on to allow gc to try to
253          * delete the object again.  In a debug build, that caused
254          * a mysterious segfault, when _Py_ForgetReference tried
255          * to remove the object from the doubly-linked list of all
256          * objects a second time.  In a release build, an actual
257          * double deallocation occurred, which leads to corruption
258          * of the allocator's internal bookkeeping pointers.  That's
259          * so serious that maybe this should be a release-build
260          * check instead of an assert?
261          */
262         assert(_PyGCHead_REFS(gc) != 0);
263     }
264 }
265 
266 /* A traversal callback for subtract_refs. */
267 static int
visit_decref(PyObject * op,void * data)268 visit_decref(PyObject *op, void *data)
269 {
270     assert(op != NULL);
271     if (PyObject_IS_GC(op)) {
272         PyGC_Head *gc = AS_GC(op);
273         /* We're only interested in gc_refs for objects in the
274          * generation being collected, which can be recognized
275          * because only they have positive gc_refs.
276          */
277         assert(_PyGCHead_REFS(gc) != 0); /* else refcount was too small */
278         if (_PyGCHead_REFS(gc) > 0)
279             _PyGCHead_DECREF(gc);
280     }
281     return 0;
282 }
283 
284 /* Subtract internal references from gc_refs.  After this, gc_refs is >= 0
285  * for all objects in containers, and is GC_REACHABLE for all tracked gc
286  * objects not in containers.  The ones with gc_refs > 0 are directly
287  * reachable from outside containers, and so can't be collected.
288  */
289 static void
subtract_refs(PyGC_Head * containers)290 subtract_refs(PyGC_Head *containers)
291 {
292     traverseproc traverse;
293     PyGC_Head *gc = containers->gc.gc_next;
294     for (; gc != containers; gc=gc->gc.gc_next) {
295         traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
296         (void) traverse(FROM_GC(gc),
297                        (visitproc)visit_decref,
298                        NULL);
299     }
300 }
301 
302 /* A traversal callback for move_unreachable. */
303 static int
visit_reachable(PyObject * op,PyGC_Head * reachable)304 visit_reachable(PyObject *op, PyGC_Head *reachable)
305 {
306     if (PyObject_IS_GC(op)) {
307         PyGC_Head *gc = AS_GC(op);
308         const Py_ssize_t gc_refs = _PyGCHead_REFS(gc);
309 
310         if (gc_refs == 0) {
311             /* This is in move_unreachable's 'young' list, but
312              * the traversal hasn't yet gotten to it.  All
313              * we need to do is tell move_unreachable that it's
314              * reachable.
315              */
316             _PyGCHead_SET_REFS(gc, 1);
317         }
318         else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) {
319             /* This had gc_refs = 0 when move_unreachable got
320              * to it, but turns out it's reachable after all.
321              * Move it back to move_unreachable's 'young' list,
322              * and move_unreachable will eventually get to it
323              * again.
324              */
325             gc_list_move(gc, reachable);
326             _PyGCHead_SET_REFS(gc, 1);
327         }
328         /* Else there's nothing to do.
329          * If gc_refs > 0, it must be in move_unreachable's 'young'
330          * list, and move_unreachable will eventually get to it.
331          * If gc_refs == GC_REACHABLE, it's either in some other
332          * generation so we don't care about it, or move_unreachable
333          * already dealt with it.
334          * If gc_refs == GC_UNTRACKED, it must be ignored.
335          */
336          else {
337             assert(gc_refs > 0
338                    || gc_refs == GC_REACHABLE
339                    || gc_refs == GC_UNTRACKED);
340          }
341     }
342     return 0;
343 }
344 
345 /* Move the unreachable objects from young to unreachable.  After this,
346  * all objects in young have gc_refs = GC_REACHABLE, and all objects in
347  * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE.  All tracked
348  * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
349  * All objects in young after this are directly or indirectly reachable
350  * from outside the original young; and all objects in unreachable are
351  * not.
352  */
353 static void
move_unreachable(PyGC_Head * young,PyGC_Head * unreachable)354 move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
355 {
356     PyGC_Head *gc = young->gc.gc_next;
357 
358     /* Invariants:  all objects "to the left" of us in young have gc_refs
359      * = GC_REACHABLE, and are indeed reachable (directly or indirectly)
360      * from outside the young list as it was at entry.  All other objects
361      * from the original young "to the left" of us are in unreachable now,
362      * and have gc_refs = GC_TENTATIVELY_UNREACHABLE.  All objects to the
363      * left of us in 'young' now have been scanned, and no objects here
364      * or to the right have been scanned yet.
365      */
366 
367     while (gc != young) {
368         PyGC_Head *next;
369 
370         if (_PyGCHead_REFS(gc)) {
371             /* gc is definitely reachable from outside the
372              * original 'young'.  Mark it as such, and traverse
373              * its pointers to find any other objects that may
374              * be directly reachable from it.  Note that the
375              * call to tp_traverse may append objects to young,
376              * so we have to wait until it returns to determine
377              * the next object to visit.
378              */
379             PyObject *op = FROM_GC(gc);
380             traverseproc traverse = Py_TYPE(op)->tp_traverse;
381             assert(_PyGCHead_REFS(gc) > 0);
382             _PyGCHead_SET_REFS(gc, GC_REACHABLE);
383             (void) traverse(op,
384                             (visitproc)visit_reachable,
385                             (void *)young);
386             next = gc->gc.gc_next;
387             if (PyTuple_CheckExact(op)) {
388                 _PyTuple_MaybeUntrack(op);
389             }
390         }
391         else {
392             /* This *may* be unreachable.  To make progress,
393              * assume it is.  gc isn't directly reachable from
394              * any object we've already traversed, but may be
395              * reachable from an object we haven't gotten to yet.
396              * visit_reachable will eventually move gc back into
397              * young if that's so, and we'll see it again.
398              */
399             next = gc->gc.gc_next;
400             gc_list_move(gc, unreachable);
401             _PyGCHead_SET_REFS(gc, GC_TENTATIVELY_UNREACHABLE);
402         }
403         gc = next;
404     }
405 }
406 
407 /* Try to untrack all currently tracked dictionaries */
408 static void
untrack_dicts(PyGC_Head * head)409 untrack_dicts(PyGC_Head *head)
410 {
411     PyGC_Head *next, *gc = head->gc.gc_next;
412     while (gc != head) {
413         PyObject *op = FROM_GC(gc);
414         next = gc->gc.gc_next;
415         if (PyDict_CheckExact(op))
416             _PyDict_MaybeUntrack(op);
417         gc = next;
418     }
419 }
420 
421 /* Return true if object has a pre-PEP 442 finalization method. */
422 static int
has_legacy_finalizer(PyObject * op)423 has_legacy_finalizer(PyObject *op)
424 {
425     return op->ob_type->tp_del != NULL;
426 }
427 
428 /* Move the objects in unreachable with tp_del slots into `finalizers`.
429  * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
430  * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
431  */
432 static void
move_legacy_finalizers(PyGC_Head * unreachable,PyGC_Head * finalizers)433 move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
434 {
435     PyGC_Head *gc;
436     PyGC_Head *next;
437 
438     /* March over unreachable.  Move objects with finalizers into
439      * `finalizers`.
440      */
441     for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
442         PyObject *op = FROM_GC(gc);
443 
444         assert(IS_TENTATIVELY_UNREACHABLE(op));
445         next = gc->gc.gc_next;
446 
447         if (has_legacy_finalizer(op)) {
448             gc_list_move(gc, finalizers);
449             _PyGCHead_SET_REFS(gc, GC_REACHABLE);
450         }
451     }
452 }
453 
454 /* A traversal callback for move_legacy_finalizer_reachable. */
455 static int
visit_move(PyObject * op,PyGC_Head * tolist)456 visit_move(PyObject *op, PyGC_Head *tolist)
457 {
458     if (PyObject_IS_GC(op)) {
459         if (IS_TENTATIVELY_UNREACHABLE(op)) {
460             PyGC_Head *gc = AS_GC(op);
461             gc_list_move(gc, tolist);
462             _PyGCHead_SET_REFS(gc, GC_REACHABLE);
463         }
464     }
465     return 0;
466 }
467 
468 /* Move objects that are reachable from finalizers, from the unreachable set
469  * into finalizers set.
470  */
471 static void
move_legacy_finalizer_reachable(PyGC_Head * finalizers)472 move_legacy_finalizer_reachable(PyGC_Head *finalizers)
473 {
474     traverseproc traverse;
475     PyGC_Head *gc = finalizers->gc.gc_next;
476     for (; gc != finalizers; gc = gc->gc.gc_next) {
477         /* Note that the finalizers list may grow during this. */
478         traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
479         (void) traverse(FROM_GC(gc),
480                         (visitproc)visit_move,
481                         (void *)finalizers);
482     }
483 }
484 
485 /* Clear all weakrefs to unreachable objects, and if such a weakref has a
486  * callback, invoke it if necessary.  Note that it's possible for such
487  * weakrefs to be outside the unreachable set -- indeed, those are precisely
488  * the weakrefs whose callbacks must be invoked.  See gc_weakref.txt for
489  * overview & some details.  Some weakrefs with callbacks may be reclaimed
490  * directly by this routine; the number reclaimed is the return value.  Other
491  * weakrefs with callbacks may be moved into the `old` generation.  Objects
492  * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
493  * unreachable are left at GC_TENTATIVELY_UNREACHABLE.  When this returns,
494  * no object in `unreachable` is weakly referenced anymore.
495  */
496 static int
handle_weakrefs(PyGC_Head * unreachable,PyGC_Head * old)497 handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
498 {
499     PyGC_Head *gc;
500     PyObject *op;               /* generally FROM_GC(gc) */
501     PyWeakReference *wr;        /* generally a cast of op */
502     PyGC_Head wrcb_to_call;     /* weakrefs with callbacks to call */
503     PyGC_Head *next;
504     int num_freed = 0;
505 
506     gc_list_init(&wrcb_to_call);
507 
508     /* Clear all weakrefs to the objects in unreachable.  If such a weakref
509      * also has a callback, move it into `wrcb_to_call` if the callback
510      * needs to be invoked.  Note that we cannot invoke any callbacks until
511      * all weakrefs to unreachable objects are cleared, lest the callback
512      * resurrect an unreachable object via a still-active weakref.  We
513      * make another pass over wrcb_to_call, invoking callbacks, after this
514      * pass completes.
515      */
516     for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
517         PyWeakReference **wrlist;
518 
519         op = FROM_GC(gc);
520         assert(IS_TENTATIVELY_UNREACHABLE(op));
521         next = gc->gc.gc_next;
522 
523         if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
524             continue;
525 
526         /* It supports weakrefs.  Does it have any? */
527         wrlist = (PyWeakReference **)
528                                 PyObject_GET_WEAKREFS_LISTPTR(op);
529 
530         /* `op` may have some weakrefs.  March over the list, clear
531          * all the weakrefs, and move the weakrefs with callbacks
532          * that must be called into wrcb_to_call.
533          */
534         for (wr = *wrlist; wr != NULL; wr = *wrlist) {
535             PyGC_Head *wrasgc;                  /* AS_GC(wr) */
536 
537             /* _PyWeakref_ClearRef clears the weakref but leaves
538              * the callback pointer intact.  Obscure:  it also
539              * changes *wrlist.
540              */
541             assert(wr->wr_object == op);
542             _PyWeakref_ClearRef(wr);
543             assert(wr->wr_object == Py_None);
544             if (wr->wr_callback == NULL)
545                 continue;                       /* no callback */
546 
547     /* Headache time.  `op` is going away, and is weakly referenced by
548      * `wr`, which has a callback.  Should the callback be invoked?  If wr
549      * is also trash, no:
550      *
551      * 1. There's no need to call it.  The object and the weakref are
552      *    both going away, so it's legitimate to pretend the weakref is
553      *    going away first.  The user has to ensure a weakref outlives its
554      *    referent if they want a guarantee that the wr callback will get
555      *    invoked.
556      *
557      * 2. It may be catastrophic to call it.  If the callback is also in
558      *    cyclic trash (CT), then although the CT is unreachable from
559      *    outside the current generation, CT may be reachable from the
560      *    callback.  Then the callback could resurrect insane objects.
561      *
562      * Since the callback is never needed and may be unsafe in this case,
563      * wr is simply left in the unreachable set.  Note that because we
564      * already called _PyWeakref_ClearRef(wr), its callback will never
565      * trigger.
566      *
567      * OTOH, if wr isn't part of CT, we should invoke the callback:  the
568      * weakref outlived the trash.  Note that since wr isn't CT in this
569      * case, its callback can't be CT either -- wr acted as an external
570      * root to this generation, and therefore its callback did too.  So
571      * nothing in CT is reachable from the callback either, so it's hard
572      * to imagine how calling it later could create a problem for us.  wr
573      * is moved to wrcb_to_call in this case.
574      */
575             if (IS_TENTATIVELY_UNREACHABLE(wr))
576                 continue;
577             assert(IS_REACHABLE(wr));
578 
579             /* Create a new reference so that wr can't go away
580              * before we can process it again.
581              */
582             Py_INCREF(wr);
583 
584             /* Move wr to wrcb_to_call, for the next pass. */
585             wrasgc = AS_GC(wr);
586             assert(wrasgc != next); /* wrasgc is reachable, but
587                                        next isn't, so they can't
588                                        be the same */
589             gc_list_move(wrasgc, &wrcb_to_call);
590         }
591     }
592 
593     /* Invoke the callbacks we decided to honor.  It's safe to invoke them
594      * because they can't reference unreachable objects.
595      */
596     while (! gc_list_is_empty(&wrcb_to_call)) {
597         PyObject *temp;
598         PyObject *callback;
599 
600         gc = wrcb_to_call.gc.gc_next;
601         op = FROM_GC(gc);
602         assert(IS_REACHABLE(op));
603         assert(PyWeakref_Check(op));
604         wr = (PyWeakReference *)op;
605         callback = wr->wr_callback;
606         assert(callback != NULL);
607 
608         /* copy-paste of weakrefobject.c's handle_callback() */
609         temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
610         if (temp == NULL)
611             PyErr_WriteUnraisable(callback);
612         else
613             Py_DECREF(temp);
614 
615         /* Give up the reference we created in the first pass.  When
616          * op's refcount hits 0 (which it may or may not do right now),
617          * op's tp_dealloc will decref op->wr_callback too.  Note
618          * that the refcount probably will hit 0 now, and because this
619          * weakref was reachable to begin with, gc didn't already
620          * add it to its count of freed objects.  Example:  a reachable
621          * weak value dict maps some key to this reachable weakref.
622          * The callback removes this key->weakref mapping from the
623          * dict, leaving no other references to the weakref (excepting
624          * ours).
625          */
626         Py_DECREF(op);
627         if (wrcb_to_call.gc.gc_next == gc) {
628             /* object is still alive -- move it */
629             gc_list_move(gc, old);
630         }
631         else
632             ++num_freed;
633     }
634 
635     return num_freed;
636 }
637 
638 static void
debug_cycle(const char * msg,PyObject * op)639 debug_cycle(const char *msg, PyObject *op)
640 {
641     PySys_FormatStderr("gc: %s <%s %p>\n",
642                        msg, Py_TYPE(op)->tp_name, op);
643 }
644 
645 /* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
646  * only from such cycles).
647  * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
648  * garbage list (a Python list), else only the objects in finalizers with
649  * __del__ methods are appended to garbage.  All objects in finalizers are
650  * merged into the old list regardless.
651  */
652 static void
handle_legacy_finalizers(PyGC_Head * finalizers,PyGC_Head * old)653 handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
654 {
655     PyGC_Head *gc = finalizers->gc.gc_next;
656 
657     if (_PyRuntime.gc.garbage == NULL) {
658         _PyRuntime.gc.garbage = PyList_New(0);
659         if (_PyRuntime.gc.garbage == NULL)
660             Py_FatalError("gc couldn't create gc.garbage list");
661     }
662     for (; gc != finalizers; gc = gc->gc.gc_next) {
663         PyObject *op = FROM_GC(gc);
664 
665         if ((_PyRuntime.gc.debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
666             if (PyList_Append(_PyRuntime.gc.garbage, op) < 0)
667                 break;
668         }
669     }
670 
671     gc_list_merge(finalizers, old);
672 }
673 
674 /* Run first-time finalizers (if any) on all the objects in collectable.
675  * Note that this may remove some (or even all) of the objects from the
676  * list, due to refcounts falling to 0.
677  */
678 static void
finalize_garbage(PyGC_Head * collectable)679 finalize_garbage(PyGC_Head *collectable)
680 {
681     destructor finalize;
682     PyGC_Head seen;
683 
684     /* While we're going through the loop, `finalize(op)` may cause op, or
685      * other objects, to be reclaimed via refcounts falling to zero.  So
686      * there's little we can rely on about the structure of the input
687      * `collectable` list across iterations.  For safety, we always take the
688      * first object in that list and move it to a temporary `seen` list.
689      * If objects vanish from the `collectable` and `seen` lists we don't
690      * care.
691      */
692     gc_list_init(&seen);
693 
694     while (!gc_list_is_empty(collectable)) {
695         PyGC_Head *gc = collectable->gc.gc_next;
696         PyObject *op = FROM_GC(gc);
697         gc_list_move(gc, &seen);
698         if (!_PyGCHead_FINALIZED(gc) &&
699                 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
700                 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
701             _PyGCHead_SET_FINALIZED(gc, 1);
702             Py_INCREF(op);
703             finalize(op);
704             Py_DECREF(op);
705         }
706     }
707     gc_list_merge(&seen, collectable);
708 }
709 
710 /* Walk the collectable list and check that they are really unreachable
711    from the outside (some objects could have been resurrected by a
712    finalizer). */
713 static int
check_garbage(PyGC_Head * collectable)714 check_garbage(PyGC_Head *collectable)
715 {
716     PyGC_Head *gc;
717     for (gc = collectable->gc.gc_next; gc != collectable;
718          gc = gc->gc.gc_next) {
719         _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
720         assert(_PyGCHead_REFS(gc) != 0);
721     }
722     subtract_refs(collectable);
723     for (gc = collectable->gc.gc_next; gc != collectable;
724          gc = gc->gc.gc_next) {
725         assert(_PyGCHead_REFS(gc) >= 0);
726         if (_PyGCHead_REFS(gc) != 0)
727             return -1;
728     }
729     return 0;
730 }
731 
732 static void
revive_garbage(PyGC_Head * collectable)733 revive_garbage(PyGC_Head *collectable)
734 {
735     PyGC_Head *gc;
736     for (gc = collectable->gc.gc_next; gc != collectable;
737          gc = gc->gc.gc_next) {
738         _PyGCHead_SET_REFS(gc, GC_REACHABLE);
739     }
740 }
741 
742 /* Break reference cycles by clearing the containers involved.  This is
743  * tricky business as the lists can be changing and we don't know which
744  * objects may be freed.  It is possible I screwed something up here.
745  */
746 static void
delete_garbage(PyGC_Head * collectable,PyGC_Head * old)747 delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
748 {
749     inquiry clear;
750 
751     while (!gc_list_is_empty(collectable)) {
752         PyGC_Head *gc = collectable->gc.gc_next;
753         PyObject *op = FROM_GC(gc);
754 
755         if (_PyRuntime.gc.debug & DEBUG_SAVEALL) {
756             PyList_Append(_PyRuntime.gc.garbage, op);
757         }
758         else {
759             if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
760                 Py_INCREF(op);
761                 clear(op);
762                 Py_DECREF(op);
763             }
764         }
765         if (collectable->gc.gc_next == gc) {
766             /* object is still alive, move it, it may die later */
767             gc_list_move(gc, old);
768             _PyGCHead_SET_REFS(gc, GC_REACHABLE);
769         }
770     }
771 }
772 
773 /* Clear all free lists
774  * All free lists are cleared during the collection of the highest generation.
775  * Allocated items in the free list may keep a pymalloc arena occupied.
776  * Clearing the free lists may give back memory to the OS earlier.
777  */
778 static void
clear_freelists(void)779 clear_freelists(void)
780 {
781     (void)PyMethod_ClearFreeList();
782     (void)PyFrame_ClearFreeList();
783     (void)PyCFunction_ClearFreeList();
784     (void)PyTuple_ClearFreeList();
785     (void)PyUnicode_ClearFreeList();
786     (void)PyFloat_ClearFreeList();
787     (void)PyList_ClearFreeList();
788     (void)PyDict_ClearFreeList();
789     (void)PySet_ClearFreeList();
790     (void)PyAsyncGen_ClearFreeLists();
791     (void)PyContext_ClearFreeList();
792 }
793 
794 /* This is the main function.  Read this to understand how the
795  * collection process works. */
796 static Py_ssize_t
collect(int generation,Py_ssize_t * n_collected,Py_ssize_t * n_uncollectable,int nofail)797 collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
798         int nofail)
799 {
800     int i;
801     Py_ssize_t m = 0; /* # objects collected */
802     Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
803     PyGC_Head *young; /* the generation we are examining */
804     PyGC_Head *old; /* next older generation */
805     PyGC_Head unreachable; /* non-problematic unreachable trash */
806     PyGC_Head finalizers;  /* objects with, & reachable from, __del__ */
807     PyGC_Head *gc;
808     _PyTime_t t1 = 0;   /* initialize to prevent a compiler warning */
809 
810     struct gc_generation_stats *stats = &_PyRuntime.gc.generation_stats[generation];
811 
812     if (_PyRuntime.gc.debug & DEBUG_STATS) {
813         PySys_WriteStderr("gc: collecting generation %d...\n",
814                           generation);
815         PySys_WriteStderr("gc: objects in each generation:");
816         for (i = 0; i < NUM_GENERATIONS; i++)
817             PySys_FormatStderr(" %zd",
818                               gc_list_size(GEN_HEAD(i)));
819         PySys_WriteStderr("\ngc: objects in permanent generation: %zd",
820                          gc_list_size(&_PyRuntime.gc.permanent_generation.head));
821         t1 = _PyTime_GetMonotonicClock();
822 
823         PySys_WriteStderr("\n");
824     }
825 
826     if (PyDTrace_GC_START_ENABLED())
827         PyDTrace_GC_START(generation);
828 
829     /* update collection and allocation counters */
830     if (generation+1 < NUM_GENERATIONS)
831         _PyRuntime.gc.generations[generation+1].count += 1;
832     for (i = 0; i <= generation; i++)
833         _PyRuntime.gc.generations[i].count = 0;
834 
835     /* merge younger generations with one we are currently collecting */
836     for (i = 0; i < generation; i++) {
837         gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
838     }
839 
840     /* handy references */
841     young = GEN_HEAD(generation);
842     if (generation < NUM_GENERATIONS-1)
843         old = GEN_HEAD(generation+1);
844     else
845         old = young;
846 
847     /* Using ob_refcnt and gc_refs, calculate which objects in the
848      * container set are reachable from outside the set (i.e., have a
849      * refcount greater than 0 when all the references within the
850      * set are taken into account).
851      */
852     update_refs(young);
853     subtract_refs(young);
854 
855     /* Leave everything reachable from outside young in young, and move
856      * everything else (in young) to unreachable.
857      * NOTE:  This used to move the reachable objects into a reachable
858      * set instead.  But most things usually turn out to be reachable,
859      * so it's more efficient to move the unreachable things.
860      */
861     gc_list_init(&unreachable);
862     move_unreachable(young, &unreachable);
863 
864     /* Move reachable objects to next generation. */
865     if (young != old) {
866         if (generation == NUM_GENERATIONS - 2) {
867             _PyRuntime.gc.long_lived_pending += gc_list_size(young);
868         }
869         gc_list_merge(young, old);
870     }
871     else {
872         /* We only untrack dicts in full collections, to avoid quadratic
873            dict build-up. See issue #14775. */
874         untrack_dicts(young);
875         _PyRuntime.gc.long_lived_pending = 0;
876         _PyRuntime.gc.long_lived_total = gc_list_size(young);
877     }
878 
879     /* All objects in unreachable are trash, but objects reachable from
880      * legacy finalizers (e.g. tp_del) can't safely be deleted.
881      */
882     gc_list_init(&finalizers);
883     move_legacy_finalizers(&unreachable, &finalizers);
884     /* finalizers contains the unreachable objects with a legacy finalizer;
885      * unreachable objects reachable *from* those are also uncollectable,
886      * and we move those into the finalizers list too.
887      */
888     move_legacy_finalizer_reachable(&finalizers);
889 
890     /* Print debugging information. */
891     if (_PyRuntime.gc.debug & DEBUG_COLLECTABLE) {
892         for (gc = unreachable.gc.gc_next; gc != &unreachable; gc = gc->gc.gc_next) {
893             debug_cycle("collectable", FROM_GC(gc));
894         }
895     }
896 
897     /* Clear weakrefs and invoke callbacks as necessary. */
898     m += handle_weakrefs(&unreachable, old);
899 
900     /* Call tp_finalize on objects which have one. */
901     finalize_garbage(&unreachable);
902 
903     if (check_garbage(&unreachable)) {
904         revive_garbage(&unreachable);
905         gc_list_merge(&unreachable, old);
906     }
907     else {
908         /* Call tp_clear on objects in the unreachable set.  This will cause
909          * the reference cycles to be broken.  It may also cause some objects
910          * in finalizers to be freed.
911          */
912         m += gc_list_size(&unreachable);
913         delete_garbage(&unreachable, old);
914     }
915 
916     /* Collect statistics on uncollectable objects found and print
917      * debugging information. */
918     for (gc = finalizers.gc.gc_next;
919          gc != &finalizers;
920          gc = gc->gc.gc_next) {
921         n++;
922         if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
923             debug_cycle("uncollectable", FROM_GC(gc));
924     }
925     if (_PyRuntime.gc.debug & DEBUG_STATS) {
926         _PyTime_t t2 = _PyTime_GetMonotonicClock();
927 
928         if (m == 0 && n == 0)
929             PySys_WriteStderr("gc: done");
930         else
931             PySys_FormatStderr(
932                 "gc: done, %zd unreachable, %zd uncollectable",
933                 n+m, n);
934         PySys_WriteStderr(", %.4fs elapsed\n",
935                           _PyTime_AsSecondsDouble(t2 - t1));
936     }
937 
938     /* Append instances in the uncollectable set to a Python
939      * reachable list of garbage.  The programmer has to deal with
940      * this if they insist on creating this type of structure.
941      */
942     handle_legacy_finalizers(&finalizers, old);
943 
944     /* Clear free list only during the collection of the highest
945      * generation */
946     if (generation == NUM_GENERATIONS-1) {
947         clear_freelists();
948     }
949 
950     if (PyErr_Occurred()) {
951         if (nofail) {
952             PyErr_Clear();
953         }
954         else {
955             if (gc_str == NULL)
956                 gc_str = PyUnicode_FromString("garbage collection");
957             PyErr_WriteUnraisable(gc_str);
958             Py_FatalError("unexpected exception during garbage collection");
959         }
960     }
961 
962     /* Update stats */
963     if (n_collected)
964         *n_collected = m;
965     if (n_uncollectable)
966         *n_uncollectable = n;
967     stats->collections++;
968     stats->collected += m;
969     stats->uncollectable += n;
970 
971     if (PyDTrace_GC_DONE_ENABLED())
972         PyDTrace_GC_DONE(n+m);
973 
974     return n+m;
975 }
976 
977 /* Invoke progress callbacks to notify clients that garbage collection
978  * is starting or stopping
979  */
980 static void
invoke_gc_callback(const char * phase,int generation,Py_ssize_t collected,Py_ssize_t uncollectable)981 invoke_gc_callback(const char *phase, int generation,
982                    Py_ssize_t collected, Py_ssize_t uncollectable)
983 {
984     Py_ssize_t i;
985     PyObject *info = NULL;
986 
987     /* we may get called very early */
988     if (_PyRuntime.gc.callbacks == NULL)
989         return;
990     /* The local variable cannot be rebound, check it for sanity */
991     assert(_PyRuntime.gc.callbacks != NULL && PyList_CheckExact(_PyRuntime.gc.callbacks));
992     if (PyList_GET_SIZE(_PyRuntime.gc.callbacks) != 0) {
993         info = Py_BuildValue("{sisnsn}",
994             "generation", generation,
995             "collected", collected,
996             "uncollectable", uncollectable);
997         if (info == NULL) {
998             PyErr_WriteUnraisable(NULL);
999             return;
1000         }
1001     }
1002     for (i=0; i<PyList_GET_SIZE(_PyRuntime.gc.callbacks); i++) {
1003         PyObject *r, *cb = PyList_GET_ITEM(_PyRuntime.gc.callbacks, i);
1004         Py_INCREF(cb); /* make sure cb doesn't go away */
1005         r = PyObject_CallFunction(cb, "sO", phase, info);
1006         if (r == NULL) {
1007             PyErr_WriteUnraisable(cb);
1008         }
1009         else {
1010             Py_DECREF(r);
1011         }
1012         Py_DECREF(cb);
1013     }
1014     Py_XDECREF(info);
1015 }
1016 
1017 /* Perform garbage collection of a generation and invoke
1018  * progress callbacks.
1019  */
1020 static Py_ssize_t
collect_with_callback(int generation)1021 collect_with_callback(int generation)
1022 {
1023     Py_ssize_t result, collected, uncollectable;
1024     invoke_gc_callback("start", generation, 0, 0);
1025     result = collect(generation, &collected, &uncollectable, 0);
1026     invoke_gc_callback("stop", generation, collected, uncollectable);
1027     return result;
1028 }
1029 
1030 static Py_ssize_t
collect_generations(void)1031 collect_generations(void)
1032 {
1033     int i;
1034     Py_ssize_t n = 0;
1035 
1036     /* Find the oldest generation (highest numbered) where the count
1037      * exceeds the threshold.  Objects in the that generation and
1038      * generations younger than it will be collected. */
1039     for (i = NUM_GENERATIONS-1; i >= 0; i--) {
1040         if (_PyRuntime.gc.generations[i].count > _PyRuntime.gc.generations[i].threshold) {
1041             /* Avoid quadratic performance degradation in number
1042                of tracked objects. See comments at the beginning
1043                of this file, and issue #4074.
1044             */
1045             if (i == NUM_GENERATIONS - 1
1046                 && _PyRuntime.gc.long_lived_pending < _PyRuntime.gc.long_lived_total / 4)
1047                 continue;
1048             n = collect_with_callback(i);
1049             break;
1050         }
1051     }
1052     return n;
1053 }
1054 
1055 #include "clinic/gcmodule.c.h"
1056 
1057 /*[clinic input]
1058 gc.enable
1059 
1060 Enable automatic garbage collection.
1061 [clinic start generated code]*/
1062 
1063 static PyObject *
gc_enable_impl(PyObject * module)1064 gc_enable_impl(PyObject *module)
1065 /*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
1066 {
1067     _PyRuntime.gc.enabled = 1;
1068     Py_RETURN_NONE;
1069 }
1070 
1071 /*[clinic input]
1072 gc.disable
1073 
1074 Disable automatic garbage collection.
1075 [clinic start generated code]*/
1076 
1077 static PyObject *
gc_disable_impl(PyObject * module)1078 gc_disable_impl(PyObject *module)
1079 /*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
1080 {
1081     _PyRuntime.gc.enabled = 0;
1082     Py_RETURN_NONE;
1083 }
1084 
1085 /*[clinic input]
1086 gc.isenabled -> bool
1087 
1088 Returns true if automatic garbage collection is enabled.
1089 [clinic start generated code]*/
1090 
1091 static int
gc_isenabled_impl(PyObject * module)1092 gc_isenabled_impl(PyObject *module)
1093 /*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
1094 {
1095     return _PyRuntime.gc.enabled;
1096 }
1097 
1098 /*[clinic input]
1099 gc.collect -> Py_ssize_t
1100 
1101     generation: int(c_default="NUM_GENERATIONS - 1") = 2
1102 
1103 Run the garbage collector.
1104 
1105 With no arguments, run a full collection.  The optional argument
1106 may be an integer specifying which generation to collect.  A ValueError
1107 is raised if the generation number is invalid.
1108 
1109 The number of unreachable objects is returned.
1110 [clinic start generated code]*/
1111 
1112 static Py_ssize_t
gc_collect_impl(PyObject * module,int generation)1113 gc_collect_impl(PyObject *module, int generation)
1114 /*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
1115 {
1116     Py_ssize_t n;
1117 
1118     if (generation < 0 || generation >= NUM_GENERATIONS) {
1119         PyErr_SetString(PyExc_ValueError, "invalid generation");
1120         return -1;
1121     }
1122 
1123     if (_PyRuntime.gc.collecting)
1124         n = 0; /* already collecting, don't do anything */
1125     else {
1126         _PyRuntime.gc.collecting = 1;
1127         n = collect_with_callback(generation);
1128         _PyRuntime.gc.collecting = 0;
1129     }
1130 
1131     return n;
1132 }
1133 
1134 /*[clinic input]
1135 gc.set_debug
1136 
1137     flags: int
1138         An integer that can have the following bits turned on:
1139           DEBUG_STATS - Print statistics during collection.
1140           DEBUG_COLLECTABLE - Print collectable objects found.
1141           DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1142             found.
1143           DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1144           DEBUG_LEAK - Debug leaking programs (everything but STATS).
1145     /
1146 
1147 Set the garbage collection debugging flags.
1148 
1149 Debugging information is written to sys.stderr.
1150 [clinic start generated code]*/
1151 
1152 static PyObject *
gc_set_debug_impl(PyObject * module,int flags)1153 gc_set_debug_impl(PyObject *module, int flags)
1154 /*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
1155 {
1156     _PyRuntime.gc.debug = flags;
1157 
1158     Py_RETURN_NONE;
1159 }
1160 
1161 /*[clinic input]
1162 gc.get_debug -> int
1163 
1164 Get the garbage collection debugging flags.
1165 [clinic start generated code]*/
1166 
1167 static int
gc_get_debug_impl(PyObject * module)1168 gc_get_debug_impl(PyObject *module)
1169 /*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
1170 {
1171     return _PyRuntime.gc.debug;
1172 }
1173 
1174 PyDoc_STRVAR(gc_set_thresh__doc__,
1175 "set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
1176 "\n"
1177 "Sets the collection thresholds.  Setting threshold0 to zero disables\n"
1178 "collection.\n");
1179 
1180 static PyObject *
gc_set_thresh(PyObject * self,PyObject * args)1181 gc_set_thresh(PyObject *self, PyObject *args)
1182 {
1183     int i;
1184     if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
1185                           &_PyRuntime.gc.generations[0].threshold,
1186                           &_PyRuntime.gc.generations[1].threshold,
1187                           &_PyRuntime.gc.generations[2].threshold))
1188         return NULL;
1189     for (i = 2; i < NUM_GENERATIONS; i++) {
1190         /* generations higher than 2 get the same threshold */
1191         _PyRuntime.gc.generations[i].threshold = _PyRuntime.gc.generations[2].threshold;
1192     }
1193 
1194     Py_RETURN_NONE;
1195 }
1196 
1197 /*[clinic input]
1198 gc.get_threshold
1199 
1200 Return the current collection thresholds.
1201 [clinic start generated code]*/
1202 
1203 static PyObject *
gc_get_threshold_impl(PyObject * module)1204 gc_get_threshold_impl(PyObject *module)
1205 /*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
1206 {
1207     return Py_BuildValue("(iii)",
1208                          _PyRuntime.gc.generations[0].threshold,
1209                          _PyRuntime.gc.generations[1].threshold,
1210                          _PyRuntime.gc.generations[2].threshold);
1211 }
1212 
1213 /*[clinic input]
1214 gc.get_count
1215 
1216 Return a three-tuple of the current collection counts.
1217 [clinic start generated code]*/
1218 
1219 static PyObject *
gc_get_count_impl(PyObject * module)1220 gc_get_count_impl(PyObject *module)
1221 /*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
1222 {
1223     return Py_BuildValue("(iii)",
1224                          _PyRuntime.gc.generations[0].count,
1225                          _PyRuntime.gc.generations[1].count,
1226                          _PyRuntime.gc.generations[2].count);
1227 }
1228 
1229 static int
referrersvisit(PyObject * obj,PyObject * objs)1230 referrersvisit(PyObject* obj, PyObject *objs)
1231 {
1232     Py_ssize_t i;
1233     for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1234         if (PyTuple_GET_ITEM(objs, i) == obj)
1235             return 1;
1236     return 0;
1237 }
1238 
1239 static int
gc_referrers_for(PyObject * objs,PyGC_Head * list,PyObject * resultlist)1240 gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
1241 {
1242     PyGC_Head *gc;
1243     PyObject *obj;
1244     traverseproc traverse;
1245     for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
1246         obj = FROM_GC(gc);
1247         traverse = Py_TYPE(obj)->tp_traverse;
1248         if (obj == objs || obj == resultlist)
1249             continue;
1250         if (traverse(obj, (visitproc)referrersvisit, objs)) {
1251             if (PyList_Append(resultlist, obj) < 0)
1252                 return 0; /* error */
1253         }
1254     }
1255     return 1; /* no error */
1256 }
1257 
1258 PyDoc_STRVAR(gc_get_referrers__doc__,
1259 "get_referrers(*objs) -> list\n\
1260 Return the list of objects that directly refer to any of objs.");
1261 
1262 static PyObject *
gc_get_referrers(PyObject * self,PyObject * args)1263 gc_get_referrers(PyObject *self, PyObject *args)
1264 {
1265     int i;
1266     PyObject *result = PyList_New(0);
1267     if (!result) return NULL;
1268 
1269     for (i = 0; i < NUM_GENERATIONS; i++) {
1270         if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1271             Py_DECREF(result);
1272             return NULL;
1273         }
1274     }
1275     return result;
1276 }
1277 
1278 /* Append obj to list; return true if error (out of memory), false if OK. */
1279 static int
referentsvisit(PyObject * obj,PyObject * list)1280 referentsvisit(PyObject *obj, PyObject *list)
1281 {
1282     return PyList_Append(list, obj) < 0;
1283 }
1284 
1285 PyDoc_STRVAR(gc_get_referents__doc__,
1286 "get_referents(*objs) -> list\n\
1287 Return the list of objects that are directly referred to by objs.");
1288 
1289 static PyObject *
gc_get_referents(PyObject * self,PyObject * args)1290 gc_get_referents(PyObject *self, PyObject *args)
1291 {
1292     Py_ssize_t i;
1293     PyObject *result = PyList_New(0);
1294 
1295     if (result == NULL)
1296         return NULL;
1297 
1298     for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1299         traverseproc traverse;
1300         PyObject *obj = PyTuple_GET_ITEM(args, i);
1301 
1302         if (! PyObject_IS_GC(obj))
1303             continue;
1304         traverse = Py_TYPE(obj)->tp_traverse;
1305         if (! traverse)
1306             continue;
1307         if (traverse(obj, (visitproc)referentsvisit, result)) {
1308             Py_DECREF(result);
1309             return NULL;
1310         }
1311     }
1312     return result;
1313 }
1314 
1315 /*[clinic input]
1316 gc.get_objects
1317 
1318 Return a list of objects tracked by the collector (excluding the list returned).
1319 [clinic start generated code]*/
1320 
1321 static PyObject *
gc_get_objects_impl(PyObject * module)1322 gc_get_objects_impl(PyObject *module)
1323 /*[clinic end generated code: output=fcb95d2e23e1f750 input=9439fe8170bf35d8]*/
1324 {
1325     int i;
1326     PyObject* result;
1327 
1328     result = PyList_New(0);
1329     if (result == NULL)
1330         return NULL;
1331     for (i = 0; i < NUM_GENERATIONS; i++) {
1332         if (append_objects(result, GEN_HEAD(i))) {
1333             Py_DECREF(result);
1334             return NULL;
1335         }
1336     }
1337     return result;
1338 }
1339 
1340 /*[clinic input]
1341 gc.get_stats
1342 
1343 Return a list of dictionaries containing per-generation statistics.
1344 [clinic start generated code]*/
1345 
1346 static PyObject *
gc_get_stats_impl(PyObject * module)1347 gc_get_stats_impl(PyObject *module)
1348 /*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
1349 {
1350     int i;
1351     PyObject *result;
1352     struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1353 
1354     /* To get consistent values despite allocations while constructing
1355        the result list, we use a snapshot of the running stats. */
1356     for (i = 0; i < NUM_GENERATIONS; i++) {
1357         stats[i] = _PyRuntime.gc.generation_stats[i];
1358     }
1359 
1360     result = PyList_New(0);
1361     if (result == NULL)
1362         return NULL;
1363 
1364     for (i = 0; i < NUM_GENERATIONS; i++) {
1365         PyObject *dict;
1366         st = &stats[i];
1367         dict = Py_BuildValue("{snsnsn}",
1368                              "collections", st->collections,
1369                              "collected", st->collected,
1370                              "uncollectable", st->uncollectable
1371                             );
1372         if (dict == NULL)
1373             goto error;
1374         if (PyList_Append(result, dict)) {
1375             Py_DECREF(dict);
1376             goto error;
1377         }
1378         Py_DECREF(dict);
1379     }
1380     return result;
1381 
1382 error:
1383     Py_XDECREF(result);
1384     return NULL;
1385 }
1386 
1387 
1388 /*[clinic input]
1389 gc.is_tracked
1390 
1391     obj: object
1392     /
1393 
1394 Returns true if the object is tracked by the garbage collector.
1395 
1396 Simple atomic objects will return false.
1397 [clinic start generated code]*/
1398 
1399 static PyObject *
gc_is_tracked(PyObject * module,PyObject * obj)1400 gc_is_tracked(PyObject *module, PyObject *obj)
1401 /*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
1402 {
1403     PyObject *result;
1404 
1405     if (PyObject_IS_GC(obj) && IS_TRACKED(obj))
1406         result = Py_True;
1407     else
1408         result = Py_False;
1409     Py_INCREF(result);
1410     return result;
1411 }
1412 
1413 /*[clinic input]
1414 gc.freeze
1415 
1416 Freeze all current tracked objects and ignore them for future collections.
1417 
1418 This can be used before a POSIX fork() call to make the gc copy-on-write friendly.
1419 Note: collection before a POSIX fork() call may free pages for future allocation
1420 which can cause copy-on-write.
1421 [clinic start generated code]*/
1422 
1423 static PyObject *
gc_freeze_impl(PyObject * module)1424 gc_freeze_impl(PyObject *module)
1425 /*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
1426 {
1427     for (int i = 0; i < NUM_GENERATIONS; ++i) {
1428         gc_list_merge(GEN_HEAD(i), &_PyRuntime.gc.permanent_generation.head);
1429         _PyRuntime.gc.generations[i].count = 0;
1430     }
1431     Py_RETURN_NONE;
1432 }
1433 
1434 /*[clinic input]
1435 gc.unfreeze
1436 
1437 Unfreeze all objects in the permanent generation.
1438 
1439 Put all objects in the permanent generation back into oldest generation.
1440 [clinic start generated code]*/
1441 
1442 static PyObject *
gc_unfreeze_impl(PyObject * module)1443 gc_unfreeze_impl(PyObject *module)
1444 /*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
1445 {
1446     gc_list_merge(&_PyRuntime.gc.permanent_generation.head, GEN_HEAD(NUM_GENERATIONS-1));
1447     Py_RETURN_NONE;
1448 }
1449 
1450 /*[clinic input]
1451 gc.get_freeze_count -> Py_ssize_t
1452 
1453 Return the number of objects in the permanent generation.
1454 [clinic start generated code]*/
1455 
1456 static Py_ssize_t
gc_get_freeze_count_impl(PyObject * module)1457 gc_get_freeze_count_impl(PyObject *module)
1458 /*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
1459 {
1460     return gc_list_size(&_PyRuntime.gc.permanent_generation.head);
1461 }
1462 
1463 
1464 PyDoc_STRVAR(gc__doc__,
1465 "This module provides access to the garbage collector for reference cycles.\n"
1466 "\n"
1467 "enable() -- Enable automatic garbage collection.\n"
1468 "disable() -- Disable automatic garbage collection.\n"
1469 "isenabled() -- Returns true if automatic collection is enabled.\n"
1470 "collect() -- Do a full collection right now.\n"
1471 "get_count() -- Return the current collection counts.\n"
1472 "get_stats() -- Return list of dictionaries containing per-generation stats.\n"
1473 "set_debug() -- Set debugging flags.\n"
1474 "get_debug() -- Get debugging flags.\n"
1475 "set_threshold() -- Set the collection thresholds.\n"
1476 "get_threshold() -- Return the current the collection thresholds.\n"
1477 "get_objects() -- Return a list of all objects tracked by the collector.\n"
1478 "is_tracked() -- Returns true if a given object is tracked.\n"
1479 "get_referrers() -- Return the list of objects that refer to an object.\n"
1480 "get_referents() -- Return the list of objects that an object refers to.\n"
1481 "freeze() -- Freeze all tracked objects and ignore them for future collections.\n"
1482 "unfreeze() -- Unfreeze all objects in the permanent generation.\n"
1483 "get_freeze_count() -- Return the number of objects in the permanent generation.\n");
1484 
1485 static PyMethodDef GcMethods[] = {
1486     GC_ENABLE_METHODDEF
1487     GC_DISABLE_METHODDEF
1488     GC_ISENABLED_METHODDEF
1489     GC_SET_DEBUG_METHODDEF
1490     GC_GET_DEBUG_METHODDEF
1491     GC_GET_COUNT_METHODDEF
1492     {"set_threshold",  gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
1493     GC_GET_THRESHOLD_METHODDEF
1494     GC_COLLECT_METHODDEF
1495     GC_GET_OBJECTS_METHODDEF
1496     GC_GET_STATS_METHODDEF
1497     GC_IS_TRACKED_METHODDEF
1498     {"get_referrers",  gc_get_referrers, METH_VARARGS,
1499         gc_get_referrers__doc__},
1500     {"get_referents",  gc_get_referents, METH_VARARGS,
1501         gc_get_referents__doc__},
1502     GC_FREEZE_METHODDEF
1503     GC_UNFREEZE_METHODDEF
1504     GC_GET_FREEZE_COUNT_METHODDEF
1505     {NULL,      NULL}           /* Sentinel */
1506 };
1507 
1508 static struct PyModuleDef gcmodule = {
1509     PyModuleDef_HEAD_INIT,
1510     "gc",              /* m_name */
1511     gc__doc__,         /* m_doc */
1512     -1,                /* m_size */
1513     GcMethods,         /* m_methods */
1514     NULL,              /* m_reload */
1515     NULL,              /* m_traverse */
1516     NULL,              /* m_clear */
1517     NULL               /* m_free */
1518 };
1519 
1520 PyMODINIT_FUNC
PyInit_gc(void)1521 PyInit_gc(void)
1522 {
1523     PyObject *m;
1524 
1525     m = PyModule_Create(&gcmodule);
1526 
1527     if (m == NULL)
1528         return NULL;
1529 
1530     if (_PyRuntime.gc.garbage == NULL) {
1531         _PyRuntime.gc.garbage = PyList_New(0);
1532         if (_PyRuntime.gc.garbage == NULL)
1533             return NULL;
1534     }
1535     Py_INCREF(_PyRuntime.gc.garbage);
1536     if (PyModule_AddObject(m, "garbage", _PyRuntime.gc.garbage) < 0)
1537         return NULL;
1538 
1539     if (_PyRuntime.gc.callbacks == NULL) {
1540         _PyRuntime.gc.callbacks = PyList_New(0);
1541         if (_PyRuntime.gc.callbacks == NULL)
1542             return NULL;
1543     }
1544     Py_INCREF(_PyRuntime.gc.callbacks);
1545     if (PyModule_AddObject(m, "callbacks", _PyRuntime.gc.callbacks) < 0)
1546         return NULL;
1547 
1548 #define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
1549     ADD_INT(DEBUG_STATS);
1550     ADD_INT(DEBUG_COLLECTABLE);
1551     ADD_INT(DEBUG_UNCOLLECTABLE);
1552     ADD_INT(DEBUG_SAVEALL);
1553     ADD_INT(DEBUG_LEAK);
1554 #undef ADD_INT
1555     return m;
1556 }
1557 
1558 /* API to invoke gc.collect() from C */
1559 Py_ssize_t
PyGC_Collect(void)1560 PyGC_Collect(void)
1561 {
1562     Py_ssize_t n;
1563 
1564     if (_PyRuntime.gc.collecting)
1565         n = 0; /* already collecting, don't do anything */
1566     else {
1567         PyObject *exc, *value, *tb;
1568         _PyRuntime.gc.collecting = 1;
1569         PyErr_Fetch(&exc, &value, &tb);
1570         n = collect_with_callback(NUM_GENERATIONS - 1);
1571         PyErr_Restore(exc, value, tb);
1572         _PyRuntime.gc.collecting = 0;
1573     }
1574 
1575     return n;
1576 }
1577 
1578 Py_ssize_t
_PyGC_CollectIfEnabled(void)1579 _PyGC_CollectIfEnabled(void)
1580 {
1581     if (!_PyRuntime.gc.enabled)
1582         return 0;
1583 
1584     return PyGC_Collect();
1585 }
1586 
1587 Py_ssize_t
_PyGC_CollectNoFail(void)1588 _PyGC_CollectNoFail(void)
1589 {
1590     Py_ssize_t n;
1591 
1592     /* Ideally, this function is only called on interpreter shutdown,
1593        and therefore not recursively.  Unfortunately, when there are daemon
1594        threads, a daemon thread can start a cyclic garbage collection
1595        during interpreter shutdown (and then never finish it).
1596        See http://bugs.python.org/issue8713#msg195178 for an example.
1597        */
1598     if (_PyRuntime.gc.collecting)
1599         n = 0;
1600     else {
1601         _PyRuntime.gc.collecting = 1;
1602         n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
1603         _PyRuntime.gc.collecting = 0;
1604     }
1605     return n;
1606 }
1607 
1608 void
_PyGC_DumpShutdownStats(void)1609 _PyGC_DumpShutdownStats(void)
1610 {
1611     if (!(_PyRuntime.gc.debug & DEBUG_SAVEALL)
1612         && _PyRuntime.gc.garbage != NULL && PyList_GET_SIZE(_PyRuntime.gc.garbage) > 0) {
1613         const char *message;
1614         if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
1615             message = "gc: %zd uncollectable objects at " \
1616                 "shutdown";
1617         else
1618             message = "gc: %zd uncollectable objects at " \
1619                 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
1620         /* PyErr_WarnFormat does too many things and we are at shutdown,
1621            the warnings module's dependencies (e.g. linecache) may be gone
1622            already. */
1623         if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1624                                      "gc", NULL, message,
1625                                      PyList_GET_SIZE(_PyRuntime.gc.garbage)))
1626             PyErr_WriteUnraisable(NULL);
1627         if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE) {
1628             PyObject *repr = NULL, *bytes = NULL;
1629             repr = PyObject_Repr(_PyRuntime.gc.garbage);
1630             if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
1631                 PyErr_WriteUnraisable(_PyRuntime.gc.garbage);
1632             else {
1633                 PySys_WriteStderr(
1634                     "      %s\n",
1635                     PyBytes_AS_STRING(bytes)
1636                     );
1637             }
1638             Py_XDECREF(repr);
1639             Py_XDECREF(bytes);
1640         }
1641     }
1642 }
1643 
1644 void
_PyGC_Fini(void)1645 _PyGC_Fini(void)
1646 {
1647     Py_CLEAR(_PyRuntime.gc.callbacks);
1648 }
1649 
1650 /* for debugging */
1651 void
_PyGC_Dump(PyGC_Head * g)1652 _PyGC_Dump(PyGC_Head *g)
1653 {
1654     _PyObject_Dump(FROM_GC(g));
1655 }
1656 
1657 /* extension modules might be compiled with GC support so these
1658    functions must always be available */
1659 
1660 #undef PyObject_GC_Track
1661 #undef PyObject_GC_UnTrack
1662 #undef PyObject_GC_Del
1663 #undef _PyObject_GC_Malloc
1664 
1665 void
PyObject_GC_Track(void * op)1666 PyObject_GC_Track(void *op)
1667 {
1668     _PyObject_GC_TRACK(op);
1669 }
1670 
1671 void
PyObject_GC_UnTrack(void * op)1672 PyObject_GC_UnTrack(void *op)
1673 {
1674     /* Obscure:  the Py_TRASHCAN mechanism requires that we be able to
1675      * call PyObject_GC_UnTrack twice on an object.
1676      */
1677     if (IS_TRACKED(op))
1678         _PyObject_GC_UNTRACK(op);
1679 }
1680 
1681 static PyObject *
_PyObject_GC_Alloc(int use_calloc,size_t basicsize)1682 _PyObject_GC_Alloc(int use_calloc, size_t basicsize)
1683 {
1684     PyObject *op;
1685     PyGC_Head *g;
1686     size_t size;
1687     if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1688         return PyErr_NoMemory();
1689     size = sizeof(PyGC_Head) + basicsize;
1690     if (use_calloc)
1691         g = (PyGC_Head *)PyObject_Calloc(1, size);
1692     else
1693         g = (PyGC_Head *)PyObject_Malloc(size);
1694     if (g == NULL)
1695         return PyErr_NoMemory();
1696     g->gc.gc_refs = 0;
1697     _PyGCHead_SET_REFS(g, GC_UNTRACKED);
1698     _PyRuntime.gc.generations[0].count++; /* number of allocated GC objects */
1699     if (_PyRuntime.gc.generations[0].count > _PyRuntime.gc.generations[0].threshold &&
1700         _PyRuntime.gc.enabled &&
1701         _PyRuntime.gc.generations[0].threshold &&
1702         !_PyRuntime.gc.collecting &&
1703         !PyErr_Occurred()) {
1704         _PyRuntime.gc.collecting = 1;
1705         collect_generations();
1706         _PyRuntime.gc.collecting = 0;
1707     }
1708     op = FROM_GC(g);
1709     return op;
1710 }
1711 
1712 PyObject *
_PyObject_GC_Malloc(size_t basicsize)1713 _PyObject_GC_Malloc(size_t basicsize)
1714 {
1715     return _PyObject_GC_Alloc(0, basicsize);
1716 }
1717 
1718 PyObject *
_PyObject_GC_Calloc(size_t basicsize)1719 _PyObject_GC_Calloc(size_t basicsize)
1720 {
1721     return _PyObject_GC_Alloc(1, basicsize);
1722 }
1723 
1724 PyObject *
_PyObject_GC_New(PyTypeObject * tp)1725 _PyObject_GC_New(PyTypeObject *tp)
1726 {
1727     PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1728     if (op != NULL)
1729         op = PyObject_INIT(op, tp);
1730     return op;
1731 }
1732 
1733 PyVarObject *
_PyObject_GC_NewVar(PyTypeObject * tp,Py_ssize_t nitems)1734 _PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
1735 {
1736     size_t size;
1737     PyVarObject *op;
1738 
1739     if (nitems < 0) {
1740         PyErr_BadInternalCall();
1741         return NULL;
1742     }
1743     size = _PyObject_VAR_SIZE(tp, nitems);
1744     op = (PyVarObject *) _PyObject_GC_Malloc(size);
1745     if (op != NULL)
1746         op = PyObject_INIT_VAR(op, tp, nitems);
1747     return op;
1748 }
1749 
1750 PyVarObject *
_PyObject_GC_Resize(PyVarObject * op,Py_ssize_t nitems)1751 _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
1752 {
1753     const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1754     PyGC_Head *g = AS_GC(op);
1755     assert(!IS_TRACKED(op));
1756     if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1757         return (PyVarObject *)PyErr_NoMemory();
1758     g = (PyGC_Head *)PyObject_REALLOC(g,  sizeof(PyGC_Head) + basicsize);
1759     if (g == NULL)
1760         return (PyVarObject *)PyErr_NoMemory();
1761     op = (PyVarObject *) FROM_GC(g);
1762     Py_SIZE(op) = nitems;
1763     return op;
1764 }
1765 
1766 void
PyObject_GC_Del(void * op)1767 PyObject_GC_Del(void *op)
1768 {
1769     PyGC_Head *g = AS_GC(op);
1770     if (IS_TRACKED(op))
1771         gc_list_remove(g);
1772     if (_PyRuntime.gc.generations[0].count > 0) {
1773         _PyRuntime.gc.generations[0].count--;
1774     }
1775     PyObject_FREE(g);
1776 }
1777