3 Reference Cycle Garbage Collection
4 ==================================
6 Neil Schemenauer <nas@arctrix.com>
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
11 http://www.arctrix.com/nas/python/gc/
12 http://www.python.org/pipermail/python-dev/2000-March/003869.html
13 http://www.python.org/pipermail/python-dev/2000-March/004010.html
14 http://www.python.org/pipermail/python-dev/2000-March/004022.html
16 For a highlevel view of the collection process, read the collect
22 #include "frameobject.h" /* for PyFrame_ClearFreeList */
24 /* Get an object's GC head */
25 #define AS_GC(o) ((PyGC_Head *)(o)-1)
27 /* Get the object given the GC head */
28 #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
30 /*** Global GC state ***/
32 struct gc_generation
{
34 int threshold
; /* collection threshold */
35 int count
; /* count of allocations or collections of younger
39 #define NUM_GENERATIONS 3
40 #define GEN_HEAD(n) (&generations[n].head)
42 /* linked lists of container objects */
43 static struct gc_generation generations
[NUM_GENERATIONS
] = {
44 /* PyGC_Head, threshold, count */
45 {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
46 {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
47 {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
50 PyGC_Head
*_PyGC_generation0
= GEN_HEAD(0);
52 static int enabled
= 1; /* automatic collection enabled? */
54 /* true if we are currently running the collector */
55 static int collecting
= 0;
57 /* list of uncollectable objects */
58 static PyObject
*garbage
= NULL
;
60 /* Python string to use if unhandled exception occurs */
61 static PyObject
*gc_str
= NULL
;
63 /* Python string used to look for __del__ attribute. */
64 static PyObject
*delstr
= NULL
;
66 /* This is the number of objects who survived the last full collection. It
67 approximates the number of long lived objects tracked by the GC.
69 (by "full collection", we mean a collection of the oldest generation).
71 static Py_ssize_t long_lived_total
= 0;
73 /* This is the number of objects who survived all "non-full" collections,
74 and are awaiting to undergo a full collection for the first time.
77 static Py_ssize_t long_lived_pending
= 0;
80 NOTE: about the counting of long-lived objects.
82 To limit the cost of garbage collection, there are two strategies;
83 - make each collection faster, e.g. by scanning fewer objects
85 This heuristic is about the latter strategy.
87 In addition to the various configurable thresholds, we only trigger a
88 full collection if the ratio
89 long_lived_pending / long_lived_total
90 is above a given value (hardwired to 25%).
92 The reason is that, while "non-full" collections (i.e., collections of
93 the young and middle generations) will always examine roughly the same
94 number of objects -- determined by the aforementioned thresholds --,
95 the cost of a full collection is proportional to the total number of
96 long-lived objects, which is virtually unbounded.
98 Indeed, it has been remarked that doing a full collection every
99 <constant number> of object creations entails a dramatic performance
100 degradation in workloads which consist in creating and storing lots of
101 long-lived objects (e.g. building a large list of GC-tracked objects would
102 show quadratic performance, instead of linear as expected: see issue #4074).
104 Using the above ratio, instead, yields amortized linear performance in
105 the total number of objects (the effect of which can be summarized
106 thusly: "each full garbage collection is more and more costly as the
107 number of objects grows, but we do fewer and fewer of them").
109 This heuristic was suggested by Martin von Löwis on python-dev in
110 June 2008. His original analysis and proposal can be found at:
111 http://mail.python.org/pipermail/python-dev/2008-June/080579.html
115 NOTE: about untracking of mutable objects.
117 Certain types of container cannot participate in a reference cycle, and
118 so do not need to be tracked by the garbage collector. Untracking these
119 objects reduces the cost of garbage collections. However, determining
120 which objects may be untracked is not free, and the costs must be
121 weighed against the benefits for garbage collection.
123 There are two possible strategies for when to untrack a container:
125 i) When the container is created.
126 ii) When the container is examined by the garbage collector.
128 Tuples containing only immutable objects (integers, strings etc, and
129 recursively, tuples of immutable objects) do not need to be tracked.
130 The interpreter creates a large number of tuples, many of which will
131 not survive until garbage collection. It is therefore not worthwhile
132 to untrack eligible tuples at creation time.
134 Instead, all tuples except the empty tuple are tracked when created.
135 During garbage collection it is determined whether any surviving tuples
136 can be untracked. A tuple can be untracked if all of its contents are
137 already not tracked. Tuples are examined for untracking in all garbage
138 collection cycles. It may take more than one cycle to untrack a tuple.
140 Dictionaries containing only immutable objects also do not need to be
141 tracked. Dictionaries are untracked when created. If a tracked item is
142 inserted into a dictionary (either as a key or value), the dictionary
143 becomes tracked. During a full garbage collection (all generations),
144 the collector will untrack any dictionaries whose contents are not
147 The module provides the python function is_tracked(obj), which returns
148 the CURRENT tracking status of the object. Subsequent garbage
149 collections may change the tracking status of the object.
151 Untracking of certain containers was introduced in issue #4688, and
152 the algorithm was refined in response to issue #14775.
155 /* set for debugging information */
156 #define DEBUG_STATS (1<<0) /* print collection statistics */
157 #define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
158 #define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
159 #define DEBUG_INSTANCES (1<<3) /* print instances */
160 #define DEBUG_OBJECTS (1<<4) /* print other objects */
161 #define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
162 #define DEBUG_LEAK DEBUG_COLLECTABLE | \
163 DEBUG_UNCOLLECTABLE | \
168 static PyObject
*tmod
= NULL
;
170 /*--------------------------------------------------------------------------
173 Between collections, every gc'ed object has one of two gc_refs values:
176 The initial state; objects returned by PyObject_GC_Malloc are in this
177 state. The object doesn't live in any generation list, and its
178 tp_traverse slot must not be called.
181 The object lives in some generation list, and its tp_traverse is safe to
182 call. An object transitions to GC_REACHABLE when PyObject_GC_Track
185 During a collection, gc_refs can temporarily take on other states:
188 At the start of a collection, update_refs() copies the true refcount
189 to gc_refs, for each object in the generation being collected.
190 subtract_refs() then adjusts gc_refs so that it equals the number of
191 times an object is referenced directly from outside the generation
193 gc_refs remains >= 0 throughout these steps.
195 GC_TENTATIVELY_UNREACHABLE
196 move_unreachable() then moves objects not reachable (whether directly or
197 indirectly) from outside the generation into an "unreachable" set.
198 Objects that are found to be reachable have gc_refs set to GC_REACHABLE
199 again. Objects that are found to be unreachable have gc_refs set to
200 GC_TENTATIVELY_UNREACHABLE. It's "tentatively" because the pass doing
201 this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
202 transition back to GC_REACHABLE.
204 Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
205 for collection. If it's decided not to collect such an object (e.g.,
206 it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
207 ----------------------------------------------------------------------------
209 #define GC_UNTRACKED _PyGC_REFS_UNTRACKED
210 #define GC_REACHABLE _PyGC_REFS_REACHABLE
211 #define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE
213 #define IS_TRACKED(o) ((AS_GC(o))->gc.gc_refs != GC_UNTRACKED)
214 #define IS_REACHABLE(o) ((AS_GC(o))->gc.gc_refs == GC_REACHABLE)
215 #define IS_TENTATIVELY_UNREACHABLE(o) ( \
216 (AS_GC(o))->gc.gc_refs == GC_TENTATIVELY_UNREACHABLE)
218 /*** list functions ***/
221 gc_list_init(PyGC_Head
*list
)
223 list
->gc
.gc_prev
= list
;
224 list
->gc
.gc_next
= list
;
228 gc_list_is_empty(PyGC_Head
*list
)
230 return (list
->gc
.gc_next
== list
);
234 /* This became unused after gc_list_move() was introduced. */
235 /* Append `node` to `list`. */
237 gc_list_append(PyGC_Head
*node
, PyGC_Head
*list
)
239 node
->gc
.gc_next
= list
;
240 node
->gc
.gc_prev
= list
->gc
.gc_prev
;
241 node
->gc
.gc_prev
->gc
.gc_next
= node
;
242 list
->gc
.gc_prev
= node
;
246 /* Remove `node` from the gc list it's currently in. */
248 gc_list_remove(PyGC_Head
*node
)
250 node
->gc
.gc_prev
->gc
.gc_next
= node
->gc
.gc_next
;
251 node
->gc
.gc_next
->gc
.gc_prev
= node
->gc
.gc_prev
;
252 node
->gc
.gc_next
= NULL
; /* object is not currently tracked */
255 /* Move `node` from the gc list it's currently in (which is not explicitly
256 * named here) to the end of `list`. This is semantically the same as
257 * gc_list_remove(node) followed by gc_list_append(node, list).
260 gc_list_move(PyGC_Head
*node
, PyGC_Head
*list
)
263 PyGC_Head
*current_prev
= node
->gc
.gc_prev
;
264 PyGC_Head
*current_next
= node
->gc
.gc_next
;
265 /* Unlink from current list. */
266 current_prev
->gc
.gc_next
= current_next
;
267 current_next
->gc
.gc_prev
= current_prev
;
268 /* Relink at end of new list. */
269 new_prev
= node
->gc
.gc_prev
= list
->gc
.gc_prev
;
270 new_prev
->gc
.gc_next
= list
->gc
.gc_prev
= node
;
271 node
->gc
.gc_next
= list
;
274 /* append list `from` onto list `to`; `from` becomes an empty list */
276 gc_list_merge(PyGC_Head
*from
, PyGC_Head
*to
)
280 if (!gc_list_is_empty(from
)) {
281 tail
= to
->gc
.gc_prev
;
282 tail
->gc
.gc_next
= from
->gc
.gc_next
;
283 tail
->gc
.gc_next
->gc
.gc_prev
= tail
;
284 to
->gc
.gc_prev
= from
->gc
.gc_prev
;
285 to
->gc
.gc_prev
->gc
.gc_next
= to
;
291 gc_list_size(PyGC_Head
*list
)
295 for (gc
= list
->gc
.gc_next
; gc
!= list
; gc
= gc
->gc
.gc_next
) {
301 /* Append objects in a GC list to a Python list.
302 * Return 0 if all OK, < 0 if error (out of memory for list).
305 append_objects(PyObject
*py_list
, PyGC_Head
*gc_list
)
308 for (gc
= gc_list
->gc
.gc_next
; gc
!= gc_list
; gc
= gc
->gc
.gc_next
) {
309 PyObject
*op
= FROM_GC(gc
);
311 if (PyList_Append(py_list
, op
)) {
312 return -1; /* exception */
319 /*** end of list stuff ***/
322 /* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 for all objects
323 * in containers, and is GC_REACHABLE for all tracked gc objects not in
327 update_refs(PyGC_Head
*containers
)
329 PyGC_Head
*gc
= containers
->gc
.gc_next
;
330 for (; gc
!= containers
; gc
= gc
->gc
.gc_next
) {
331 assert(gc
->gc
.gc_refs
== GC_REACHABLE
);
332 gc
->gc
.gc_refs
= Py_REFCNT(FROM_GC(gc
));
333 /* Python's cyclic gc should never see an incoming refcount
334 * of 0: if something decref'ed to 0, it should have been
335 * deallocated immediately at that time.
336 * Possible cause (if the assert triggers): a tp_dealloc
337 * routine left a gc-aware object tracked during its teardown
338 * phase, and did something-- or allowed something to happen --
339 * that called back into Python. gc can trigger then, and may
340 * see the still-tracked dying object. Before this assert
341 * was added, such mistakes went on to allow gc to try to
342 * delete the object again. In a debug build, that caused
343 * a mysterious segfault, when _Py_ForgetReference tried
344 * to remove the object from the doubly-linked list of all
345 * objects a second time. In a release build, an actual
346 * double deallocation occurred, which leads to corruption
347 * of the allocator's internal bookkeeping pointers. That's
348 * so serious that maybe this should be a release-build
349 * check instead of an assert?
351 assert(gc
->gc
.gc_refs
!= 0);
355 /* A traversal callback for subtract_refs. */
357 visit_decref(PyObject
*op
, void *data
)
360 if (PyObject_IS_GC(op
)) {
361 PyGC_Head
*gc
= AS_GC(op
);
362 /* We're only interested in gc_refs for objects in the
363 * generation being collected, which can be recognized
364 * because only they have positive gc_refs.
366 assert(gc
->gc
.gc_refs
!= 0); /* else refcount was too small */
367 if (gc
->gc
.gc_refs
> 0)
373 /* Subtract internal references from gc_refs. After this, gc_refs is >= 0
374 * for all objects in containers, and is GC_REACHABLE for all tracked gc
375 * objects not in containers. The ones with gc_refs > 0 are directly
376 * reachable from outside containers, and so can't be collected.
379 subtract_refs(PyGC_Head
*containers
)
381 traverseproc traverse
;
382 PyGC_Head
*gc
= containers
->gc
.gc_next
;
383 for (; gc
!= containers
; gc
=gc
->gc
.gc_next
) {
384 traverse
= Py_TYPE(FROM_GC(gc
))->tp_traverse
;
385 (void) traverse(FROM_GC(gc
),
386 (visitproc
)visit_decref
,
391 /* A traversal callback for move_unreachable. */
393 visit_reachable(PyObject
*op
, PyGC_Head
*reachable
)
395 if (PyObject_IS_GC(op
)) {
396 PyGC_Head
*gc
= AS_GC(op
);
397 const Py_ssize_t gc_refs
= gc
->gc
.gc_refs
;
400 /* This is in move_unreachable's 'young' list, but
401 * the traversal hasn't yet gotten to it. All
402 * we need to do is tell move_unreachable that it's
407 else if (gc_refs
== GC_TENTATIVELY_UNREACHABLE
) {
408 /* This had gc_refs = 0 when move_unreachable got
409 * to it, but turns out it's reachable after all.
410 * Move it back to move_unreachable's 'young' list,
411 * and move_unreachable will eventually get to it
414 gc_list_move(gc
, reachable
);
417 /* Else there's nothing to do.
418 * If gc_refs > 0, it must be in move_unreachable's 'young'
419 * list, and move_unreachable will eventually get to it.
420 * If gc_refs == GC_REACHABLE, it's either in some other
421 * generation so we don't care about it, or move_unreachable
422 * already dealt with it.
423 * If gc_refs == GC_UNTRACKED, it must be ignored.
427 || gc_refs
== GC_REACHABLE
428 || gc_refs
== GC_UNTRACKED
);
434 /* Move the unreachable objects from young to unreachable. After this,
435 * all objects in young have gc_refs = GC_REACHABLE, and all objects in
436 * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE. All tracked
437 * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
438 * All objects in young after this are directly or indirectly reachable
439 * from outside the original young; and all objects in unreachable are
443 move_unreachable(PyGC_Head
*young
, PyGC_Head
*unreachable
)
445 PyGC_Head
*gc
= young
->gc
.gc_next
;
447 /* Invariants: all objects "to the left" of us in young have gc_refs
448 * = GC_REACHABLE, and are indeed reachable (directly or indirectly)
449 * from outside the young list as it was at entry. All other objects
450 * from the original young "to the left" of us are in unreachable now,
451 * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the
452 * left of us in 'young' now have been scanned, and no objects here
453 * or to the right have been scanned yet.
456 while (gc
!= young
) {
459 if (gc
->gc
.gc_refs
) {
460 /* gc is definitely reachable from outside the
461 * original 'young'. Mark it as such, and traverse
462 * its pointers to find any other objects that may
463 * be directly reachable from it. Note that the
464 * call to tp_traverse may append objects to young,
465 * so we have to wait until it returns to determine
466 * the next object to visit.
468 PyObject
*op
= FROM_GC(gc
);
469 traverseproc traverse
= Py_TYPE(op
)->tp_traverse
;
470 assert(gc
->gc
.gc_refs
> 0);
471 gc
->gc
.gc_refs
= GC_REACHABLE
;
473 (visitproc
)visit_reachable
,
475 next
= gc
->gc
.gc_next
;
476 if (PyTuple_CheckExact(op
)) {
477 _PyTuple_MaybeUntrack(op
);
481 /* This *may* be unreachable. To make progress,
482 * assume it is. gc isn't directly reachable from
483 * any object we've already traversed, but may be
484 * reachable from an object we haven't gotten to yet.
485 * visit_reachable will eventually move gc back into
486 * young if that's so, and we'll see it again.
488 next
= gc
->gc
.gc_next
;
489 gc_list_move(gc
, unreachable
);
490 gc
->gc
.gc_refs
= GC_TENTATIVELY_UNREACHABLE
;
496 /* Return true if object has a finalization method.
497 * CAUTION: An instance of an old-style class has to be checked for a
498 *__del__ method, and earlier versions of this used to call PyObject_HasAttr,
499 * which in turn could call the class's __getattr__ hook (if any). That
500 * could invoke arbitrary Python code, mutating the object graph in arbitrary
501 * ways, and that was the source of some excruciatingly subtle bugs.
504 has_finalizer(PyObject
*op
)
506 if (PyInstance_Check(op
)) {
507 assert(delstr
!= NULL
);
508 return _PyInstance_Lookup(op
, delstr
) != NULL
;
510 else if (PyType_HasFeature(op
->ob_type
, Py_TPFLAGS_HEAPTYPE
))
511 return op
->ob_type
->tp_del
!= NULL
;
512 else if (PyGen_CheckExact(op
))
513 return PyGen_NeedsFinalizing((PyGenObject
*)op
);
518 /* Try to untrack all currently tracked dictionaries */
520 untrack_dicts(PyGC_Head
*head
)
522 PyGC_Head
*next
, *gc
= head
->gc
.gc_next
;
524 PyObject
*op
= FROM_GC(gc
);
525 next
= gc
->gc
.gc_next
;
526 if (PyDict_CheckExact(op
))
527 _PyDict_MaybeUntrack(op
);
532 /* Move the objects in unreachable with __del__ methods into `finalizers`.
533 * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
534 * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
537 move_finalizers(PyGC_Head
*unreachable
, PyGC_Head
*finalizers
)
542 /* March over unreachable. Move objects with finalizers into
545 for (gc
= unreachable
->gc
.gc_next
; gc
!= unreachable
; gc
= next
) {
546 PyObject
*op
= FROM_GC(gc
);
548 assert(IS_TENTATIVELY_UNREACHABLE(op
));
549 next
= gc
->gc
.gc_next
;
551 if (has_finalizer(op
)) {
552 gc_list_move(gc
, finalizers
);
553 gc
->gc
.gc_refs
= GC_REACHABLE
;
558 /* A traversal callback for move_finalizer_reachable. */
560 visit_move(PyObject
*op
, PyGC_Head
*tolist
)
562 if (PyObject_IS_GC(op
)) {
563 if (IS_TENTATIVELY_UNREACHABLE(op
)) {
564 PyGC_Head
*gc
= AS_GC(op
);
565 gc_list_move(gc
, tolist
);
566 gc
->gc
.gc_refs
= GC_REACHABLE
;
572 /* Move objects that are reachable from finalizers, from the unreachable set
573 * into finalizers set.
576 move_finalizer_reachable(PyGC_Head
*finalizers
)
578 traverseproc traverse
;
579 PyGC_Head
*gc
= finalizers
->gc
.gc_next
;
580 for (; gc
!= finalizers
; gc
= gc
->gc
.gc_next
) {
581 /* Note that the finalizers list may grow during this. */
582 traverse
= Py_TYPE(FROM_GC(gc
))->tp_traverse
;
583 (void) traverse(FROM_GC(gc
),
584 (visitproc
)visit_move
,
589 /* Clear all weakrefs to unreachable objects, and if such a weakref has a
590 * callback, invoke it if necessary. Note that it's possible for such
591 * weakrefs to be outside the unreachable set -- indeed, those are precisely
592 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
593 * overview & some details. Some weakrefs with callbacks may be reclaimed
594 * directly by this routine; the number reclaimed is the return value. Other
595 * weakrefs with callbacks may be moved into the `old` generation. Objects
596 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
597 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
598 * no object in `unreachable` is weakly referenced anymore.
601 handle_weakrefs(PyGC_Head
*unreachable
, PyGC_Head
*old
)
604 PyObject
*op
; /* generally FROM_GC(gc) */
605 PyWeakReference
*wr
; /* generally a cast of op */
606 PyGC_Head wrcb_to_call
; /* weakrefs with callbacks to call */
610 gc_list_init(&wrcb_to_call
);
612 /* Clear all weakrefs to the objects in unreachable. If such a weakref
613 * also has a callback, move it into `wrcb_to_call` if the callback
614 * needs to be invoked. Note that we cannot invoke any callbacks until
615 * all weakrefs to unreachable objects are cleared, lest the callback
616 * resurrect an unreachable object via a still-active weakref. We
617 * make another pass over wrcb_to_call, invoking callbacks, after this
620 for (gc
= unreachable
->gc
.gc_next
; gc
!= unreachable
; gc
= next
) {
621 PyWeakReference
**wrlist
;
624 assert(IS_TENTATIVELY_UNREACHABLE(op
));
625 next
= gc
->gc
.gc_next
;
627 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op
)))
630 /* It supports weakrefs. Does it have any? */
631 wrlist
= (PyWeakReference
**)
632 PyObject_GET_WEAKREFS_LISTPTR(op
);
634 /* `op` may have some weakrefs. March over the list, clear
635 * all the weakrefs, and move the weakrefs with callbacks
636 * that must be called into wrcb_to_call.
638 for (wr
= *wrlist
; wr
!= NULL
; wr
= *wrlist
) {
639 PyGC_Head
*wrasgc
; /* AS_GC(wr) */
641 /* _PyWeakref_ClearRef clears the weakref but leaves
642 * the callback pointer intact. Obscure: it also
645 assert(wr
->wr_object
== op
);
646 _PyWeakref_ClearRef(wr
);
647 assert(wr
->wr_object
== Py_None
);
648 if (wr
->wr_callback
== NULL
)
649 continue; /* no callback */
651 /* Headache time. `op` is going away, and is weakly referenced by
652 * `wr`, which has a callback. Should the callback be invoked? If wr
655 * 1. There's no need to call it. The object and the weakref are
656 * both going away, so it's legitimate to pretend the weakref is
657 * going away first. The user has to ensure a weakref outlives its
658 * referent if they want a guarantee that the wr callback will get
661 * 2. It may be catastrophic to call it. If the callback is also in
662 * cyclic trash (CT), then although the CT is unreachable from
663 * outside the current generation, CT may be reachable from the
664 * callback. Then the callback could resurrect insane objects.
666 * Since the callback is never needed and may be unsafe in this case,
667 * wr is simply left in the unreachable set. Note that because we
668 * already called _PyWeakref_ClearRef(wr), its callback will never
671 * OTOH, if wr isn't part of CT, we should invoke the callback: the
672 * weakref outlived the trash. Note that since wr isn't CT in this
673 * case, its callback can't be CT either -- wr acted as an external
674 * root to this generation, and therefore its callback did too. So
675 * nothing in CT is reachable from the callback either, so it's hard
676 * to imagine how calling it later could create a problem for us. wr
677 * is moved to wrcb_to_call in this case.
679 if (IS_TENTATIVELY_UNREACHABLE(wr
))
681 assert(IS_REACHABLE(wr
));
683 /* Create a new reference so that wr can't go away
684 * before we can process it again.
688 /* Move wr to wrcb_to_call, for the next pass. */
690 assert(wrasgc
!= next
); /* wrasgc is reachable, but
691 next isn't, so they can't
693 gc_list_move(wrasgc
, &wrcb_to_call
);
697 /* Invoke the callbacks we decided to honor. It's safe to invoke them
698 * because they can't reference unreachable objects.
700 while (! gc_list_is_empty(&wrcb_to_call
)) {
704 gc
= wrcb_to_call
.gc
.gc_next
;
706 assert(IS_REACHABLE(op
));
707 assert(PyWeakref_Check(op
));
708 wr
= (PyWeakReference
*)op
;
709 callback
= wr
->wr_callback
;
710 assert(callback
!= NULL
);
712 /* copy-paste of weakrefobject.c's handle_callback() */
713 temp
= PyObject_CallFunctionObjArgs(callback
, wr
, NULL
);
715 PyErr_WriteUnraisable(callback
);
719 /* Give up the reference we created in the first pass. When
720 * op's refcount hits 0 (which it may or may not do right now),
721 * op's tp_dealloc will decref op->wr_callback too. Note
722 * that the refcount probably will hit 0 now, and because this
723 * weakref was reachable to begin with, gc didn't already
724 * add it to its count of freed objects. Example: a reachable
725 * weak value dict maps some key to this reachable weakref.
726 * The callback removes this key->weakref mapping from the
727 * dict, leaving no other references to the weakref (excepting
731 if (wrcb_to_call
.gc
.gc_next
== gc
) {
732 /* object is still alive -- move it */
733 gc_list_move(gc
, old
);
743 debug_instance(char *msg
, PyInstanceObject
*inst
)
746 /* simple version of instance_repr */
747 PyObject
*classname
= inst
->in_class
->cl_name
;
748 if (classname
!= NULL
&& PyString_Check(classname
))
749 cname
= PyString_AsString(classname
);
752 PySys_WriteStderr("gc: %.100s <%.100s instance at %p>\n",
757 debug_cycle(char *msg
, PyObject
*op
)
759 if ((debug
& DEBUG_INSTANCES
) && PyInstance_Check(op
)) {
760 debug_instance(msg
, (PyInstanceObject
*)op
);
762 else if (debug
& DEBUG_OBJECTS
) {
763 PySys_WriteStderr("gc: %.100s <%.100s %p>\n",
764 msg
, Py_TYPE(op
)->tp_name
, op
);
768 /* Handle uncollectable garbage (cycles with finalizers, and stuff reachable
769 * only from such cycles).
770 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
771 * garbage list (a Python list), else only the objects in finalizers with
772 * __del__ methods are appended to garbage. All objects in finalizers are
773 * merged into the old list regardless.
774 * Returns 0 if all OK, <0 on error (out of memory to grow the garbage list).
775 * The finalizers list is made empty on a successful return.
778 handle_finalizers(PyGC_Head
*finalizers
, PyGC_Head
*old
)
780 PyGC_Head
*gc
= finalizers
->gc
.gc_next
;
782 if (garbage
== NULL
) {
783 garbage
= PyList_New(0);
785 Py_FatalError("gc couldn't create gc.garbage list");
787 for (; gc
!= finalizers
; gc
= gc
->gc
.gc_next
) {
788 PyObject
*op
= FROM_GC(gc
);
790 if ((debug
& DEBUG_SAVEALL
) || has_finalizer(op
)) {
791 if (PyList_Append(garbage
, op
) < 0)
796 gc_list_merge(finalizers
, old
);
800 /* Break reference cycles by clearing the containers involved. This is
801 * tricky business as the lists can be changing and we don't know which
802 * objects may be freed. It is possible I screwed something up here.
805 delete_garbage(PyGC_Head
*collectable
, PyGC_Head
*old
)
809 while (!gc_list_is_empty(collectable
)) {
810 PyGC_Head
*gc
= collectable
->gc
.gc_next
;
811 PyObject
*op
= FROM_GC(gc
);
813 assert(IS_TENTATIVELY_UNREACHABLE(op
));
814 if (debug
& DEBUG_SAVEALL
) {
815 PyList_Append(garbage
, op
);
818 if ((clear
= Py_TYPE(op
)->tp_clear
) != NULL
) {
824 if (collectable
->gc
.gc_next
== gc
) {
825 /* object is still alive, move it, it may die later */
826 gc_list_move(gc
, old
);
827 gc
->gc
.gc_refs
= GC_REACHABLE
;
832 /* Clear all free lists
833 * All free lists are cleared during the collection of the highest generation.
834 * Allocated items in the free list may keep a pymalloc arena occupied.
835 * Clearing the free lists may give back memory to the OS earlier.
838 clear_freelists(void)
840 (void)PyMethod_ClearFreeList();
841 (void)PyFrame_ClearFreeList();
842 (void)PyCFunction_ClearFreeList();
843 (void)PyTuple_ClearFreeList();
844 #ifdef Py_USING_UNICODE
845 (void)PyUnicode_ClearFreeList();
847 (void)PyInt_ClearFreeList();
848 (void)PyFloat_ClearFreeList();
856 PyObject
*f
= PyObject_CallMethod(tmod
, "time", NULL
);
861 if (PyFloat_Check(f
))
862 result
= PyFloat_AsDouble(f
);
869 /* This is the main function. Read this to understand how the
870 * collection process works. */
872 collect(int generation
)
875 Py_ssize_t m
= 0; /* # objects collected */
876 Py_ssize_t n
= 0; /* # unreachable objects that couldn't be collected */
877 PyGC_Head
*young
; /* the generation we are examining */
878 PyGC_Head
*old
; /* next older generation */
879 PyGC_Head unreachable
; /* non-problematic unreachable trash */
880 PyGC_Head finalizers
; /* objects with, & reachable from, __del__ */
884 if (delstr
== NULL
) {
885 delstr
= PyString_InternFromString("__del__");
887 Py_FatalError("gc couldn't allocate \"__del__\"");
890 if (debug
& DEBUG_STATS
) {
891 PySys_WriteStderr("gc: collecting generation %d...\n",
893 PySys_WriteStderr("gc: objects in each generation:");
894 for (i
= 0; i
< NUM_GENERATIONS
; i
++)
895 PySys_WriteStderr(" %" PY_FORMAT_SIZE_T
"d",
896 gc_list_size(GEN_HEAD(i
)));
898 PySys_WriteStderr("\n");
901 /* update collection and allocation counters */
902 if (generation
+1 < NUM_GENERATIONS
)
903 generations
[generation
+1].count
+= 1;
904 for (i
= 0; i
<= generation
; i
++)
905 generations
[i
].count
= 0;
907 /* merge younger generations with one we are currently collecting */
908 for (i
= 0; i
< generation
; i
++) {
909 gc_list_merge(GEN_HEAD(i
), GEN_HEAD(generation
));
912 /* handy references */
913 young
= GEN_HEAD(generation
);
914 if (generation
< NUM_GENERATIONS
-1)
915 old
= GEN_HEAD(generation
+1);
919 /* Using ob_refcnt and gc_refs, calculate which objects in the
920 * container set are reachable from outside the set (i.e., have a
921 * refcount greater than 0 when all the references within the
922 * set are taken into account).
925 subtract_refs(young
);
927 /* Leave everything reachable from outside young in young, and move
928 * everything else (in young) to unreachable.
929 * NOTE: This used to move the reachable objects into a reachable
930 * set instead. But most things usually turn out to be reachable,
931 * so it's more efficient to move the unreachable things.
933 gc_list_init(&unreachable
);
934 move_unreachable(young
, &unreachable
);
936 /* Move reachable objects to next generation. */
938 if (generation
== NUM_GENERATIONS
- 2) {
939 long_lived_pending
+= gc_list_size(young
);
941 gc_list_merge(young
, old
);
944 /* We only untrack dicts in full collections, to avoid quadratic
945 dict build-up. See issue #14775. */
946 untrack_dicts(young
);
947 long_lived_pending
= 0;
948 long_lived_total
= gc_list_size(young
);
951 /* All objects in unreachable are trash, but objects reachable from
952 * finalizers can't safely be deleted. Python programmers should take
953 * care not to create such things. For Python, finalizers means
954 * instance objects with __del__ methods. Weakrefs with callbacks
955 * can also call arbitrary Python code but they will be dealt with by
958 gc_list_init(&finalizers
);
959 move_finalizers(&unreachable
, &finalizers
);
960 /* finalizers contains the unreachable objects with a finalizer;
961 * unreachable objects reachable *from* those are also uncollectable,
962 * and we move those into the finalizers list too.
964 move_finalizer_reachable(&finalizers
);
966 /* Collect statistics on collectable objects found and print
967 * debugging information.
969 for (gc
= unreachable
.gc
.gc_next
; gc
!= &unreachable
;
970 gc
= gc
->gc
.gc_next
) {
972 if (debug
& DEBUG_COLLECTABLE
) {
973 debug_cycle("collectable", FROM_GC(gc
));
977 /* Clear weakrefs and invoke callbacks as necessary. */
978 m
+= handle_weakrefs(&unreachable
, old
);
980 /* Call tp_clear on objects in the unreachable set. This will cause
981 * the reference cycles to be broken. It may also cause some objects
982 * in finalizers to be freed.
984 delete_garbage(&unreachable
, old
);
986 /* Collect statistics on uncollectable objects found and print
987 * debugging information. */
988 for (gc
= finalizers
.gc
.gc_next
;
990 gc
= gc
->gc
.gc_next
) {
992 if (debug
& DEBUG_UNCOLLECTABLE
)
993 debug_cycle("uncollectable", FROM_GC(gc
));
995 if (debug
& DEBUG_STATS
) {
996 double t2
= get_time();
997 if (m
== 0 && n
== 0)
998 PySys_WriteStderr("gc: done");
1002 "%" PY_FORMAT_SIZE_T
"d unreachable, "
1003 "%" PY_FORMAT_SIZE_T
"d uncollectable",
1006 PySys_WriteStderr(", %.4fs elapsed", t2
-t1
);
1008 PySys_WriteStderr(".\n");
1011 /* Append instances in the uncollectable set to a Python
1012 * reachable list of garbage. The programmer has to deal with
1013 * this if they insist on creating this type of structure.
1015 (void)handle_finalizers(&finalizers
, old
);
1017 /* Clear free list only during the collection of the highest
1019 if (generation
== NUM_GENERATIONS
-1) {
1023 if (PyErr_Occurred()) {
1025 gc_str
= PyString_FromString("garbage collection");
1026 PyErr_WriteUnraisable(gc_str
);
1027 Py_FatalError("unexpected exception during garbage collection");
1033 collect_generations(void)
1038 /* Find the oldest generation (highest numbered) where the count
1039 * exceeds the threshold. Objects in the that generation and
1040 * generations younger than it will be collected. */
1041 for (i
= NUM_GENERATIONS
-1; i
>= 0; i
--) {
1042 if (generations
[i
].count
> generations
[i
].threshold
) {
1043 /* Avoid quadratic performance degradation in number
1044 of tracked objects. See comments at the beginning
1045 of this file, and issue #4074.
1047 if (i
== NUM_GENERATIONS
- 1
1048 && long_lived_pending
< long_lived_total
/ 4)
1057 PyDoc_STRVAR(gc_enable__doc__
,
1058 "enable() -> None\n"
1060 "Enable automatic garbage collection.\n");
1063 gc_enable(PyObject
*self
, PyObject
*noargs
)
1070 PyDoc_STRVAR(gc_disable__doc__
,
1071 "disable() -> None\n"
1073 "Disable automatic garbage collection.\n");
1076 gc_disable(PyObject
*self
, PyObject
*noargs
)
1083 PyDoc_STRVAR(gc_isenabled__doc__
,
1084 "isenabled() -> status\n"
1086 "Returns true if automatic garbage collection is enabled.\n");
1089 gc_isenabled(PyObject
*self
, PyObject
*noargs
)
1091 return PyBool_FromLong((long)enabled
);
1094 PyDoc_STRVAR(gc_collect__doc__
,
1095 "collect([generation]) -> n\n"
1097 "With no arguments, run a full collection. The optional argument\n"
1098 "may be an integer specifying which generation to collect. A ValueError\n"
1099 "is raised if the generation number is invalid.\n\n"
1100 "The number of unreachable objects is returned.\n");
1103 gc_collect(PyObject
*self
, PyObject
*args
, PyObject
*kws
)
1105 static char *keywords
[] = {"generation", NULL
};
1106 int genarg
= NUM_GENERATIONS
- 1;
1109 if (!PyArg_ParseTupleAndKeywords(args
, kws
, "|i", keywords
, &genarg
))
1112 else if (genarg
< 0 || genarg
>= NUM_GENERATIONS
) {
1113 PyErr_SetString(PyExc_ValueError
, "invalid generation");
1118 n
= 0; /* already collecting, don't do anything */
1121 n
= collect(genarg
);
1125 return PyInt_FromSsize_t(n
);
1128 PyDoc_STRVAR(gc_set_debug__doc__
,
1129 "set_debug(flags) -> None\n"
1131 "Set the garbage collection debugging flags. Debugging information is\n"
1132 "written to sys.stderr.\n"
1134 "flags is an integer and can have the following bits turned on:\n"
1136 " DEBUG_STATS - Print statistics during collection.\n"
1137 " DEBUG_COLLECTABLE - Print collectable objects found.\n"
1138 " DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.\n"
1139 " DEBUG_INSTANCES - Print instance objects.\n"
1140 " DEBUG_OBJECTS - Print objects other than instances.\n"
1141 " DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.\n"
1142 " DEBUG_LEAK - Debug leaking programs (everything but STATS).\n");
1145 gc_set_debug(PyObject
*self
, PyObject
*args
)
1147 if (!PyArg_ParseTuple(args
, "i:set_debug", &debug
))
1154 PyDoc_STRVAR(gc_get_debug__doc__
,
1155 "get_debug() -> flags\n"
1157 "Get the garbage collection debugging flags.\n");
1160 gc_get_debug(PyObject
*self
, PyObject
*noargs
)
1162 return Py_BuildValue("i", debug
);
1165 PyDoc_STRVAR(gc_set_thresh__doc__
,
1166 "set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
1168 "Sets the collection thresholds. Setting threshold0 to zero disables\n"
1172 gc_set_thresh(PyObject
*self
, PyObject
*args
)
1175 if (!PyArg_ParseTuple(args
, "i|ii:set_threshold",
1176 &generations
[0].threshold
,
1177 &generations
[1].threshold
,
1178 &generations
[2].threshold
))
1180 for (i
= 2; i
< NUM_GENERATIONS
; i
++) {
1181 /* generations higher than 2 get the same threshold */
1182 generations
[i
].threshold
= generations
[2].threshold
;
1189 PyDoc_STRVAR(gc_get_thresh__doc__
,
1190 "get_threshold() -> (threshold0, threshold1, threshold2)\n"
1192 "Return the current collection thresholds\n");
1195 gc_get_thresh(PyObject
*self
, PyObject
*noargs
)
1197 return Py_BuildValue("(iii)",
1198 generations
[0].threshold
,
1199 generations
[1].threshold
,
1200 generations
[2].threshold
);
1203 PyDoc_STRVAR(gc_get_count__doc__
,
1204 "get_count() -> (count0, count1, count2)\n"
1206 "Return the current collection counts\n");
1209 gc_get_count(PyObject
*self
, PyObject
*noargs
)
1211 return Py_BuildValue("(iii)",
1212 generations
[0].count
,
1213 generations
[1].count
,
1214 generations
[2].count
);
1218 referrersvisit(PyObject
* obj
, PyObject
*objs
)
1221 for (i
= 0; i
< PyTuple_GET_SIZE(objs
); i
++)
1222 if (PyTuple_GET_ITEM(objs
, i
) == obj
)
1228 gc_referrers_for(PyObject
*objs
, PyGC_Head
*list
, PyObject
*resultlist
)
1232 traverseproc traverse
;
1233 for (gc
= list
->gc
.gc_next
; gc
!= list
; gc
= gc
->gc
.gc_next
) {
1235 traverse
= Py_TYPE(obj
)->tp_traverse
;
1236 if (obj
== objs
|| obj
== resultlist
)
1238 if (traverse(obj
, (visitproc
)referrersvisit
, objs
)) {
1239 if (PyList_Append(resultlist
, obj
) < 0)
1240 return 0; /* error */
1243 return 1; /* no error */
1246 PyDoc_STRVAR(gc_get_referrers__doc__
,
1247 "get_referrers(*objs) -> list\n\
1248 Return the list of objects that directly refer to any of objs.");
1251 gc_get_referrers(PyObject
*self
, PyObject
*args
)
1254 PyObject
*result
= PyList_New(0);
1255 if (!result
) return NULL
;
1257 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
1258 if (!(gc_referrers_for(args
, GEN_HEAD(i
), result
))) {
1266 /* Append obj to list; return true if error (out of memory), false if OK. */
1268 referentsvisit(PyObject
*obj
, PyObject
*list
)
1270 return PyList_Append(list
, obj
) < 0;
1273 PyDoc_STRVAR(gc_get_referents__doc__
,
1274 "get_referents(*objs) -> list\n\
1275 Return the list of objects that are directly referred to by objs.");
1278 gc_get_referents(PyObject
*self
, PyObject
*args
)
1281 PyObject
*result
= PyList_New(0);
1286 for (i
= 0; i
< PyTuple_GET_SIZE(args
); i
++) {
1287 traverseproc traverse
;
1288 PyObject
*obj
= PyTuple_GET_ITEM(args
, i
);
1290 if (! PyObject_IS_GC(obj
))
1292 traverse
= Py_TYPE(obj
)->tp_traverse
;
1295 if (traverse(obj
, (visitproc
)referentsvisit
, result
)) {
1303 PyDoc_STRVAR(gc_get_objects__doc__
,
1304 "get_objects() -> [...]\n"
1306 "Return a list of objects tracked by the collector (excluding the list\n"
1310 gc_get_objects(PyObject
*self
, PyObject
*noargs
)
1315 result
= PyList_New(0);
1318 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
1319 if (append_objects(result
, GEN_HEAD(i
))) {
1327 PyDoc_STRVAR(gc_is_tracked__doc__
,
1328 "is_tracked(obj) -> bool\n"
1330 "Returns true if the object is tracked by the garbage collector.\n"
1331 "Simple atomic objects will return false.\n"
1335 gc_is_tracked(PyObject
*self
, PyObject
*obj
)
1339 if (PyObject_IS_GC(obj
) && IS_TRACKED(obj
))
1348 PyDoc_STRVAR(gc__doc__
,
1349 "This module provides access to the garbage collector for reference cycles.\n"
1351 "enable() -- Enable automatic garbage collection.\n"
1352 "disable() -- Disable automatic garbage collection.\n"
1353 "isenabled() -- Returns true if automatic collection is enabled.\n"
1354 "collect() -- Do a full collection right now.\n"
1355 "get_count() -- Return the current collection counts.\n"
1356 "set_debug() -- Set debugging flags.\n"
1357 "get_debug() -- Get debugging flags.\n"
1358 "set_threshold() -- Set the collection thresholds.\n"
1359 "get_threshold() -- Return the current the collection thresholds.\n"
1360 "get_objects() -- Return a list of all objects tracked by the collector.\n"
1361 "is_tracked() -- Returns true if a given object is tracked.\n"
1362 "get_referrers() -- Return the list of objects that refer to an object.\n"
1363 "get_referents() -- Return the list of objects that an object refers to.\n");
1365 static PyMethodDef GcMethods
[] = {
1366 {"enable", gc_enable
, METH_NOARGS
, gc_enable__doc__
},
1367 {"disable", gc_disable
, METH_NOARGS
, gc_disable__doc__
},
1368 {"isenabled", gc_isenabled
, METH_NOARGS
, gc_isenabled__doc__
},
1369 {"set_debug", gc_set_debug
, METH_VARARGS
, gc_set_debug__doc__
},
1370 {"get_debug", gc_get_debug
, METH_NOARGS
, gc_get_debug__doc__
},
1371 {"get_count", gc_get_count
, METH_NOARGS
, gc_get_count__doc__
},
1372 {"set_threshold", gc_set_thresh
, METH_VARARGS
, gc_set_thresh__doc__
},
1373 {"get_threshold", gc_get_thresh
, METH_NOARGS
, gc_get_thresh__doc__
},
1374 {"collect", (PyCFunction
)gc_collect
,
1375 METH_VARARGS
| METH_KEYWORDS
, gc_collect__doc__
},
1376 {"get_objects", gc_get_objects
,METH_NOARGS
, gc_get_objects__doc__
},
1377 {"is_tracked", gc_is_tracked
, METH_O
, gc_is_tracked__doc__
},
1378 {"get_referrers", gc_get_referrers
, METH_VARARGS
,
1379 gc_get_referrers__doc__
},
1380 {"get_referents", gc_get_referents
, METH_VARARGS
,
1381 gc_get_referents__doc__
},
1382 {NULL
, NULL
} /* Sentinel */
1390 m
= Py_InitModule4("gc",
1394 PYTHON_API_VERSION
);
1398 if (garbage
== NULL
) {
1399 garbage
= PyList_New(0);
1400 if (garbage
== NULL
)
1404 if (PyModule_AddObject(m
, "garbage", garbage
) < 0)
1407 /* Importing can't be done in collect() because collect()
1408 * can be called via PyGC_Collect() in Py_Finalize().
1409 * This wouldn't be a problem, except that <initialized> is
1410 * reset to 0 before calling collect which trips up
1411 * the import and triggers an assertion.
1414 tmod
= PyImport_ImportModuleNoBlock("time");
1419 #define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return
1420 ADD_INT(DEBUG_STATS
);
1421 ADD_INT(DEBUG_COLLECTABLE
);
1422 ADD_INT(DEBUG_UNCOLLECTABLE
);
1423 ADD_INT(DEBUG_INSTANCES
);
1424 ADD_INT(DEBUG_OBJECTS
);
1425 ADD_INT(DEBUG_SAVEALL
);
1426 ADD_INT(DEBUG_LEAK
);
1430 /* API to invoke gc.collect() from C */
1437 n
= 0; /* already collecting, don't do anything */
1440 n
= collect(NUM_GENERATIONS
- 1);
1449 _PyGC_Dump(PyGC_Head
*g
)
1451 _PyObject_Dump(FROM_GC(g
));
1454 /* extension modules might be compiled with GC support so these
1455 functions must always be available */
1457 #undef PyObject_GC_Track
1458 #undef PyObject_GC_UnTrack
1459 #undef PyObject_GC_Del
1460 #undef _PyObject_GC_Malloc
1463 PyObject_GC_Track(void *op
)
1465 _PyObject_GC_TRACK(op
);
1468 /* for binary compatibility with 2.2 */
1470 _PyObject_GC_Track(PyObject
*op
)
1472 PyObject_GC_Track(op
);
1476 PyObject_GC_UnTrack(void *op
)
1478 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1479 * call PyObject_GC_UnTrack twice on an object.
1482 _PyObject_GC_UNTRACK(op
);
1485 /* for binary compatibility with 2.2 */
1487 _PyObject_GC_UnTrack(PyObject
*op
)
1489 PyObject_GC_UnTrack(op
);
1493 _PyObject_GC_Malloc(size_t basicsize
)
1497 if (basicsize
> PY_SSIZE_T_MAX
- sizeof(PyGC_Head
))
1498 return PyErr_NoMemory();
1499 g
= (PyGC_Head
*)PyObject_MALLOC(
1500 sizeof(PyGC_Head
) + basicsize
);
1502 return PyErr_NoMemory();
1503 g
->gc
.gc_refs
= GC_UNTRACKED
;
1504 generations
[0].count
++; /* number of allocated GC objects */
1505 if (generations
[0].count
> generations
[0].threshold
&&
1507 generations
[0].threshold
&&
1509 !PyErr_Occurred()) {
1511 collect_generations();
1519 _PyObject_GC_New(PyTypeObject
*tp
)
1521 PyObject
*op
= _PyObject_GC_Malloc(_PyObject_SIZE(tp
));
1523 op
= PyObject_INIT(op
, tp
);
1528 _PyObject_GC_NewVar(PyTypeObject
*tp
, Py_ssize_t nitems
)
1530 const size_t size
= _PyObject_VAR_SIZE(tp
, nitems
);
1531 PyVarObject
*op
= (PyVarObject
*) _PyObject_GC_Malloc(size
);
1533 op
= PyObject_INIT_VAR(op
, tp
, nitems
);
1538 _PyObject_GC_Resize(PyVarObject
*op
, Py_ssize_t nitems
)
1540 const size_t basicsize
= _PyObject_VAR_SIZE(Py_TYPE(op
), nitems
);
1541 PyGC_Head
*g
= AS_GC(op
);
1542 if (basicsize
> PY_SSIZE_T_MAX
- sizeof(PyGC_Head
))
1543 return (PyVarObject
*)PyErr_NoMemory();
1544 g
= (PyGC_Head
*)PyObject_REALLOC(g
, sizeof(PyGC_Head
) + basicsize
);
1546 return (PyVarObject
*)PyErr_NoMemory();
1547 op
= (PyVarObject
*) FROM_GC(g
);
1548 Py_SIZE(op
) = nitems
;
1553 PyObject_GC_Del(void *op
)
1555 PyGC_Head
*g
= AS_GC(op
);
1558 if (generations
[0].count
> 0) {
1559 generations
[0].count
--;
1564 /* for binary compatibility with 2.2 */
1565 #undef _PyObject_GC_Del
1567 _PyObject_GC_Del(PyObject
*op
)
1569 PyObject_GC_Del(op
);