Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Revert GC changes to "work_to_do" logic.
This reverts parts of the GH-140262 change.  The changes that affect the
tuple untracking are left unchanged.  Revert the changes to the
calculation of the increment size, based on the "work_to_do" variable.
This causes cyclic garbage to be collected more quickly.  Revert also
the change to test_gc.py, which was done because the expected GC
collection was taking longer to happen.

With the tuple untrack change, the performance regression as reported by
bug GH-139951 is still resolved (work_to_do changes are not required).
  • Loading branch information
nascheme committed Nov 27, 2025
commit fef3eed829b8f69f38708059e75d25bd72825a6a
17 changes: 8 additions & 9 deletions Lib/test/test_gc.py
Original file line number Diff line number Diff line change
Expand Up @@ -1493,11 +1493,10 @@ def callback(ignored):
# The free-threaded build doesn't have multiple generations, so
# just trigger a GC manually.
gc.collect()
assert not detector.gc_happened
while not detector.gc_happened:
i += 1
if i > 100000:
self.fail("gc didn't happen after 100000 iterations")
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc

Expand Down Expand Up @@ -1569,8 +1568,8 @@ def __del__(self):
gc.collect()
while not detector.gc_happened:
i += 1
if i > 50000:
self.fail("gc didn't happen after 50000 iterations")
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc

Expand All @@ -1587,8 +1586,8 @@ def test_indirect_calls_with_gc_disabled(self):
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 100000:
self.fail("gc didn't happen after 100000 iterations")
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
junk.append([]) # this will eventually trigger gc

try:
Expand All @@ -1598,11 +1597,11 @@ def test_indirect_calls_with_gc_disabled(self):
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 100000:
if i > 10000:
break
junk.append([]) # this may eventually trigger gc (if it is enabled)

self.assertEqual(i, 100001)
self.assertEqual(i, 10001)
finally:
gc.enable()

Expand Down
6 changes: 2 additions & 4 deletions Python/gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1644,7 +1644,7 @@ assess_work_to_do(GCState *gcstate)
scale_factor = 2;
}
intptr_t new_objects = gcstate->young.count;
intptr_t max_heap_fraction = new_objects*2;
intptr_t max_heap_fraction = new_objects*3/2;
intptr_t heap_fraction = gcstate->heap_size / SCAN_RATE_DIVISOR / scale_factor;
if (heap_fraction > max_heap_fraction) {
heap_fraction = max_heap_fraction;
Expand All @@ -1659,9 +1659,6 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
GC_STAT_ADD(1, collections, 1);
GCState *gcstate = &tstate->interp->gc;
gcstate->work_to_do += assess_work_to_do(gcstate);
if (gcstate->work_to_do < 0) {
return;
}
untrack_tuples(&gcstate->young.head);
if (gcstate->phase == GC_PHASE_MARK) {
Py_ssize_t objects_marked = mark_at_start(tstate);
Expand Down Expand Up @@ -1705,6 +1702,7 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
gc_collect_region(tstate, &increment, &survivors, stats);
gc_list_merge(&survivors, visited);
assert(gc_list_is_empty(&increment));
gcstate->work_to_do += gcstate->heap_size / SCAN_RATE_DIVISOR / scale_factor;
gcstate->work_to_do -= increment_size;

if (gc_list_is_empty(not_visited)) {
Expand Down
Loading