1 /*
2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  *
22  */
23 
24 #include "precompiled.hpp"
25 
26 #include "classfile/classLoaderData.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "gc/shared/referenceProcessor.hpp"
29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
30 #include "gc/shared/workgroup.hpp"
31 #include "gc/shared/weakProcessor.inline.hpp"
32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
34 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
41 #include "gc/shenandoah/shenandoahHeuristics.hpp"
42 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
44 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
45 #include "gc/shenandoah/shenandoahStringDedup.hpp"
46 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
47 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
48 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
49 #include "gc/shenandoah/shenandoahUtils.hpp"
50 #include "gc/shenandoah/shenandoahVerifier.hpp"
51 
52 #include "memory/iterator.hpp"
53 #include "memory/metaspace.hpp"
54 #include "memory/resourceArea.hpp"
55 #include "memory/universe.hpp"
56 
57 /**
58  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
59  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
60  * is incremental-update-based.
61  *
62  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
63  * several reasons:
64  * - We will not reclaim them in this cycle anyway, because they are not in the
65  *   cset
66  * - It makes up for the bulk of work during final-pause
67  * - It also shortens the concurrent cycle because we don't need to
68  *   pointlessly traverse through newly allocated objects.
69  * - As a nice side-effect, it solves the I-U termination problem (mutators
70  *   cannot outrun the GC by allocating like crazy)
71  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
72  *   target object of stores if it's new. Treating new objects live implicitely
73  *   achieves the same, but without extra barriers. I think the effect of
74  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
75  *   particular, we will not see the head of a completely new long linked list
76  *   in final-pause and end up traversing huge chunks of the heap there.
77  * - We don't need to see/update the fields of new objects either, because they
78  *   are either still null, or anything that's been stored into them has been
79  *   evacuated+enqueued before (and will thus be treated later).
80  *
81  * We achieve this by setting TAMS for each region, and everything allocated
82  * beyond TAMS will be 'implicitely marked'.
83  *
84  * Gotchas:
85  * - While we want new objects to be implicitely marked, we don't want to count
86  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
87  *   them for cset. This means that we need to protect such regions from
88  *   getting accidentally thrashed at the end of traversal cycle. This is why I
89  *   keep track of alloc-regions and check is_alloc_region() in the trashing
90  *   code.
91  * - We *need* to traverse through evacuated objects. Those objects are
92  *   pre-existing, and any references in them point to interesting objects that
93  *   we need to see. We also want to count them as live, because we just
94  *   determined that they are alive :-) I achieve this by upping TAMS
95  *   concurrently for every gclab/gc-shared alloc before publishing the
96  *   evacuated object. This way, the GC threads will not consider such objects
97  *   implictely marked, and traverse through them as normal.
98  */
99 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
100 private:
101   ShenandoahObjToScanQueue* _queue;
102   ShenandoahTraversalGC* _traversal_gc;
103   ShenandoahHeap* const _heap;
104 
105 public:
ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue * q)106   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
107     _queue(q),
108     _heap(ShenandoahHeap::heap())
109  { }
110 
do_buffer(void ** buffer,size_t size)111   void do_buffer(void** buffer, size_t size) {
112     for (size_t i = 0; i < size; ++i) {
113       oop* p = (oop*) &buffer[i];
114       oop obj = RawAccess<>::oop_load(p);
115       shenandoah_assert_not_forwarded(p, obj);
116       if (_heap->marking_context()->mark(obj)) {
117         _queue->push(ShenandoahMarkTask(obj));
118       }
119     }
120   }
121 };
122 
123 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
124 private:
125   ShenandoahTraversalSATBBufferClosure* _satb_cl;
126 
127 public:
ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure * satb_cl)128   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
129     _satb_cl(satb_cl) {}
130 
do_thread(Thread * thread)131   void do_thread(Thread* thread) {
132     ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
133   }
134 };
135 
136 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
137 // and remark them later during final-traversal.
138 class ShenandoahMarkCLDClosure : public CLDClosure {
139 private:
140   OopClosure* _cl;
141 public:
ShenandoahMarkCLDClosure(OopClosure * cl)142   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
do_cld(ClassLoaderData * cld)143   void do_cld(ClassLoaderData* cld) {
144     cld->oops_do(_cl, true, true);
145   }
146 };
147 
148 // Like CLDToOopClosure, but only process modified CLDs
149 class ShenandoahRemarkCLDClosure : public CLDClosure {
150 private:
151   OopClosure* _cl;
152 public:
ShenandoahRemarkCLDClosure(OopClosure * cl)153   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
do_cld(ClassLoaderData * cld)154   void do_cld(ClassLoaderData* cld) {
155     if (cld->has_modified_oops()) {
156       cld->oops_do(_cl, true, true);
157     }
158   }
159 };
160 
161 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
162 private:
163   ShenandoahCSetRootScanner* _rp;
164   ShenandoahHeap* _heap;
165   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
166   ShenandoahStringDedupRoots       _dedup_roots;
167 
168 public:
ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner * rp)169   ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner* rp) :
170     AbstractGangTask("Shenandoah Init Traversal Collection"),
171     _rp(rp),
172     _heap(ShenandoahHeap::heap()) {}
173 
work(uint worker_id)174   void work(uint worker_id) {
175     ShenandoahParallelWorkerSession worker_session(worker_id);
176 
177     ShenandoahEvacOOMScope oom_evac_scope;
178     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
179     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
180 
181     bool process_refs = _heap->process_references();
182     bool unload_classes = _heap->unload_classes();
183     ReferenceProcessor* rp = NULL;
184     if (process_refs) {
185       rp = _heap->ref_processor();
186     }
187 
188     // Step 1: Process ordinary GC roots.
189     {
190       ShenandoahTraversalClosure roots_cl(q, rp);
191       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
192       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
193       if (unload_classes) {
194         _rp->roots_do(worker_id, &roots_cl, NULL, &code_cl);
195       } else {
196         _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl);
197       }
198 
199       AlwaysTrueClosure is_alive;
200       _dedup_roots.oops_do(&is_alive, &roots_cl, worker_id);
201     }
202   }
203 };
204 
205 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
206 private:
207   ShenandoahTaskTerminator* _terminator;
208   ShenandoahHeap* _heap;
209 public:
ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator * terminator)210   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
211     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
212     _terminator(terminator),
213     _heap(ShenandoahHeap::heap()) {}
214 
work(uint worker_id)215   void work(uint worker_id) {
216     ShenandoahConcurrentWorkerSession worker_session(worker_id);
217     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
218     ShenandoahEvacOOMScope oom_evac_scope;
219     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
220 
221     // Drain all outstanding work in queues.
222     traversal_gc->main_loop(worker_id, _terminator, true);
223   }
224 };
225 
226 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
227 private:
228   ShenandoahAllRootScanner* _rp;
229   ShenandoahTaskTerminator* _terminator;
230   ShenandoahHeap* _heap;
231 public:
ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner * rp,ShenandoahTaskTerminator * terminator)232   ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) :
233     AbstractGangTask("Shenandoah Final Traversal Collection"),
234     _rp(rp),
235     _terminator(terminator),
236     _heap(ShenandoahHeap::heap()) {}
237 
work(uint worker_id)238   void work(uint worker_id) {
239     ShenandoahParallelWorkerSession worker_session(worker_id);
240 
241     ShenandoahEvacOOMScope oom_evac_scope;
242     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
243 
244     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
245     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
246 
247     bool process_refs = _heap->process_references();
248     bool unload_classes = _heap->unload_classes();
249     ReferenceProcessor* rp = NULL;
250     if (process_refs) {
251       rp = _heap->ref_processor();
252     }
253 
254     // Step 0: Drain outstanding SATB queues.
255     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
256     ShenandoahTraversalSATBBufferClosure satb_cl(q);
257     {
258       // Process remaining finished SATB buffers.
259       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
260       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
261       // Process remaining threads SATB buffers below.
262     }
263 
264     // Step 1: Process GC roots.
265     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
266     // and the references to the oops are updated during init pause. New nmethods are handled
267     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
268     // roots here.
269     if (!_heap->is_degenerated_gc_in_progress()) {
270       ShenandoahTraversalClosure roots_cl(q, rp);
271       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
272       if (unload_classes) {
273         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
274         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
275       } else {
276         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
277         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
278       }
279     } else {
280       ShenandoahTraversalDegenClosure roots_cl(q, rp);
281       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
282       if (unload_classes) {
283         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
284         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
285       } else {
286         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
287         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
288       }
289     }
290 
291     {
292       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
293       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
294 
295       // Step 3: Finally drain all outstanding work in queues.
296       traversal_gc->main_loop(worker_id, _terminator, false);
297     }
298 
299   }
300 };
301 
ShenandoahTraversalGC(ShenandoahHeap * heap,size_t num_regions)302 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
303   _heap(heap),
304   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
305   _traversal_set(ShenandoahHeapRegionSet()) {
306 
307   // Traversal does not support concurrent code root scanning
308   FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false);
309 
310   uint num_queues = heap->max_workers();
311   for (uint i = 0; i < num_queues; ++i) {
312     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
313     task_queue->initialize();
314     _task_queues->register_queue(i, task_queue);
315   }
316 }
317 
~ShenandoahTraversalGC()318 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
319 }
320 
prepare_regions()321 void ShenandoahTraversalGC::prepare_regions() {
322   size_t num_regions = _heap->num_regions();
323   ShenandoahMarkingContext* const ctx = _heap->marking_context();
324   for (size_t i = 0; i < num_regions; i++) {
325     ShenandoahHeapRegion* region = _heap->get_region(i);
326     if (_heap->is_bitmap_slice_committed(region)) {
327       if (_traversal_set.is_in(i)) {
328         ctx->capture_top_at_mark_start(region);
329         region->clear_live_data();
330         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
331       } else {
332         // Everything outside the traversal set is always considered live.
333         ctx->reset_top_at_mark_start(region);
334       }
335     } else {
336       // FreeSet may contain uncommitted empty regions, once they are recommitted,
337       // their TAMS may have old values, so reset them here.
338       ctx->reset_top_at_mark_start(region);
339     }
340   }
341 }
342 
prepare()343 void ShenandoahTraversalGC::prepare() {
344   _heap->collection_set()->clear();
345   assert(_heap->collection_set()->count() == 0, "collection set not clear");
346 
347   {
348     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
349     _heap->make_parsable(true);
350   }
351 
352   if (UseTLAB) {
353     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
354     _heap->resize_tlabs();
355   }
356 
357   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
358   assert(!_heap->marking_context()->is_complete(), "should not be complete");
359 
360   ShenandoahFreeSet* free_set = _heap->free_set();
361   ShenandoahCollectionSet* collection_set = _heap->collection_set();
362 
363   // Find collection set
364   _heap->heuristics()->choose_collection_set(collection_set);
365   prepare_regions();
366 
367   // Rebuild free set
368   free_set->rebuild();
369 
370   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
371                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
372 }
373 
init_traversal_collection()374 void ShenandoahTraversalGC::init_traversal_collection() {
375   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
376 
377   if (ShenandoahVerify) {
378     _heap->verifier()->verify_before_traversal();
379   }
380 
381   if (VerifyBeforeGC) {
382     Universe::verify();
383   }
384 
385   {
386     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
387     ShenandoahHeapLocker lock(_heap->lock());
388     prepare();
389   }
390 
391   _heap->set_concurrent_traversal_in_progress(true);
392 
393   bool process_refs = _heap->process_references();
394   if (process_refs) {
395     ReferenceProcessor* rp = _heap->ref_processor();
396     rp->enable_discovery(true /*verify_no_refs*/);
397     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
398   }
399 
400   {
401     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
402     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
403     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
404 
405 #if COMPILER2_OR_JVMCI
406     DerivedPointerTable::clear();
407 #endif
408 
409     {
410       uint nworkers = _heap->workers()->active_workers();
411       task_queues()->reserve(nworkers);
412       ShenandoahCSetRootScanner rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
413       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
414       _heap->workers()->run_task(&traversal_task);
415     }
416 
417 #if COMPILER2_OR_JVMCI
418     DerivedPointerTable::update_pointers();
419 #endif
420   }
421 
422   if (ShenandoahPacing) {
423     _heap->pacer()->setup_for_traversal();
424   }
425 }
426 
main_loop(uint w,ShenandoahTaskTerminator * t,bool sts_yield)427 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
428   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
429 
430   // Initialize live data.
431   jushort* ld = _heap->get_liveness_cache(w);
432 
433   ReferenceProcessor* rp = NULL;
434   if (_heap->process_references()) {
435     rp = _heap->ref_processor();
436   }
437   {
438     if (!_heap->is_degenerated_gc_in_progress()) {
439       if (_heap->unload_classes()) {
440         if (ShenandoahStringDedup::is_enabled()) {
441           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
442           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
443         } else {
444           ShenandoahTraversalMetadataClosure cl(q, rp);
445           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
446         }
447       } else {
448         if (ShenandoahStringDedup::is_enabled()) {
449           ShenandoahTraversalDedupClosure cl(q, rp);
450           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
451         } else {
452           ShenandoahTraversalClosure cl(q, rp);
453           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
454         }
455       }
456     } else {
457       if (_heap->unload_classes()) {
458         if (ShenandoahStringDedup::is_enabled()) {
459           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
460           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
461         } else {
462           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
463           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
464         }
465       } else {
466         if (ShenandoahStringDedup::is_enabled()) {
467           ShenandoahTraversalDedupDegenClosure cl(q, rp);
468           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
469         } else {
470           ShenandoahTraversalDegenClosure cl(q, rp);
471           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
472         }
473       }
474     }
475   }
476 
477   _heap->flush_liveness_cache(w);
478 }
479 
480 template <class T>
main_loop_work(T * cl,jushort * live_data,uint worker_id,ShenandoahTaskTerminator * terminator,bool sts_yield)481 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
482   ShenandoahObjToScanQueueSet* queues = task_queues();
483   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
484   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
485 
486   uintx stride = ShenandoahMarkLoopStride;
487 
488   ShenandoahMarkTask task;
489 
490   // Process outstanding queues, if any.
491   q = queues->claim_next();
492   while (q != NULL) {
493     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
494       return;
495     }
496 
497     for (uint i = 0; i < stride; i++) {
498       if (q->pop(task)) {
499         conc_mark->do_task<T>(q, cl, live_data, &task);
500       } else {
501         assert(q->is_empty(), "Must be empty");
502         q = queues->claim_next();
503         break;
504       }
505     }
506   }
507 
508   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
509 
510   // Normal loop.
511   q = queues->queue(worker_id);
512 
513   ShenandoahTraversalSATBBufferClosure drain_satb(q);
514   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
515 
516   while (true) {
517     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
518 
519     while (satb_mq_set.completed_buffers_num() > 0) {
520       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
521     }
522 
523     uint work = 0;
524     for (uint i = 0; i < stride; i++) {
525       if (q->pop(task) ||
526           queues->steal(worker_id, task)) {
527         conc_mark->do_task<T>(q, cl, live_data, &task);
528         work++;
529       } else {
530         break;
531       }
532     }
533 
534     if (work == 0) {
535       // No more work, try to terminate
536       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
537       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
538       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
539       ShenandoahTerminatorTerminator tt(_heap);
540 
541       if (terminator->offer_termination(&tt)) return;
542     }
543   }
544 }
545 
check_and_handle_cancelled_gc(ShenandoahTaskTerminator * terminator,bool sts_yield)546 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
547   if (_heap->cancelled_gc()) {
548     return true;
549   }
550   return false;
551 }
552 
concurrent_traversal_collection()553 void ShenandoahTraversalGC::concurrent_traversal_collection() {
554   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
555   if (!_heap->cancelled_gc()) {
556     uint nworkers = _heap->workers()->active_workers();
557     task_queues()->reserve(nworkers);
558     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
559 
560     ShenandoahTaskTerminator terminator(nworkers, task_queues());
561     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
562     _heap->workers()->run_task(&task);
563   }
564 
565   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
566     preclean_weak_refs();
567   }
568 }
569 
final_traversal_collection()570 void ShenandoahTraversalGC::final_traversal_collection() {
571   _heap->make_parsable(true);
572 
573   if (!_heap->cancelled_gc()) {
574 #if COMPILER2_OR_JVMCI
575     DerivedPointerTable::clear();
576 #endif
577     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
578     uint nworkers = _heap->workers()->active_workers();
579     task_queues()->reserve(nworkers);
580 
581     // Finish traversal
582     ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
583     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
584 
585     ShenandoahTaskTerminator terminator(nworkers, task_queues());
586     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
587     _heap->workers()->run_task(&task);
588 #if COMPILER2_OR_JVMCI
589     DerivedPointerTable::update_pointers();
590 #endif
591   }
592 
593   if (!_heap->cancelled_gc() && _heap->process_references()) {
594     weak_refs_work();
595   }
596 
597   if (!_heap->cancelled_gc()) {
598     fixup_roots();
599     if (_heap->unload_classes()) {
600       _heap->unload_classes_and_cleanup_tables(false);
601     }
602   }
603 
604   if (!_heap->cancelled_gc()) {
605     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
606     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
607     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
608 
609     // No more marking expected
610     _heap->mark_complete_marking_context();
611 
612     // Resize metaspace
613     MetaspaceGC::compute_new_size();
614 
615     // Still good? We can now trash the cset, and make final verification
616     {
617       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
618       ShenandoahHeapLocker lock(_heap->lock());
619 
620       // Trash everything
621       // Clear immediate garbage regions.
622       size_t num_regions = _heap->num_regions();
623 
624       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
625       ShenandoahFreeSet* free_regions = _heap->free_set();
626       ShenandoahMarkingContext* const ctx = _heap->marking_context();
627       free_regions->clear();
628       for (size_t i = 0; i < num_regions; i++) {
629         ShenandoahHeapRegion* r = _heap->get_region(i);
630         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
631 
632         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
633         if (r->is_humongous_start() && candidate) {
634           // Trash humongous.
635           HeapWord* humongous_obj = r->bottom();
636           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
637           r->make_trash_immediate();
638           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
639             i++;
640             r = _heap->get_region(i);
641             assert(r->is_humongous_continuation(), "must be humongous continuation");
642             r->make_trash_immediate();
643           }
644         } else if (!r->is_empty() && candidate) {
645           // Trash regular.
646           assert(!r->is_humongous(), "handled above");
647           assert(!r->is_trash(), "must not already be trashed");
648           r->make_trash_immediate();
649         }
650       }
651       _heap->collection_set()->clear();
652       _heap->free_set()->rebuild();
653       reset();
654     }
655 
656     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
657     _heap->set_concurrent_traversal_in_progress(false);
658     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
659 
660     if (ShenandoahVerify) {
661       _heap->verifier()->verify_after_traversal();
662     }
663 
664     if (VerifyAfterGC) {
665       Universe::verify();
666     }
667   }
668 }
669 
670 class ShenandoahTraversalFixRootsClosure : public OopClosure {
671 private:
672   template <class T>
do_oop_work(T * p)673   inline void do_oop_work(T* p) {
674     T o = RawAccess<>::oop_load(p);
675     if (!CompressedOops::is_null(o)) {
676       oop obj = CompressedOops::decode_not_null(o);
677       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
678       if (!oopDesc::equals_raw(obj, forw)) {
679         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
680       }
681     }
682   }
683 
684 public:
do_oop(oop * p)685   inline void do_oop(oop* p) { do_oop_work(p); }
do_oop(narrowOop * p)686   inline void do_oop(narrowOop* p) { do_oop_work(p); }
687 };
688 
689 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
690 private:
691   ShenandoahRootUpdater* _rp;
692 
693 public:
ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater * rp)694   ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) :
695     AbstractGangTask("Shenandoah traversal fix roots"),
696     _rp(rp) {
697     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
698   }
699 
work(uint worker_id)700   void work(uint worker_id) {
701     ShenandoahParallelWorkerSession worker_session(worker_id);
702     ShenandoahTraversalFixRootsClosure cl;
703     ShenandoahForwardedIsAliveClosure is_alive;
704     _rp->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalFixRootsClosure>(worker_id, &is_alive, &cl);
705   }
706 };
707 
fixup_roots()708 void ShenandoahTraversalGC::fixup_roots() {
709 #if COMPILER2_OR_JVMCI
710   DerivedPointerTable::clear();
711 #endif
712   ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots, true /* update code cache */);
713   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
714   _heap->workers()->run_task(&update_roots_task);
715 #if COMPILER2_OR_JVMCI
716   DerivedPointerTable::update_pointers();
717 #endif
718 }
719 
reset()720 void ShenandoahTraversalGC::reset() {
721   _task_queues->clear();
722 }
723 
task_queues()724 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
725   return _task_queues;
726 }
727 
728 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
729 private:
730   ShenandoahHeap* const _heap;
731 public:
ShenandoahTraversalCancelledGCYieldClosure()732   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
should_return()733   virtual bool should_return() { return _heap->cancelled_gc(); }
734 };
735 
736 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
737 public:
do_void()738   void do_void() {
739     ShenandoahHeap* sh = ShenandoahHeap::heap();
740     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
741     assert(sh->process_references(), "why else would we be here?");
742     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
743     shenandoah_assert_rp_isalive_installed();
744     traversal_gc->main_loop((uint) 0, &terminator, true);
745   }
746 };
747 
748 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
749 private:
750   ShenandoahObjToScanQueue* _queue;
751   Thread* _thread;
752   ShenandoahTraversalGC* _traversal_gc;
753   ShenandoahMarkingContext* const _mark_context;
754 
755   template <class T>
do_oop_work(T * p)756   inline void do_oop_work(T* p) {
757     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
758   }
759 
760 public:
ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue * q)761   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
762     _queue(q), _thread(Thread::current()),
763     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
764     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
765 
do_oop(narrowOop * p)766   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)767   void do_oop(oop* p)       { do_oop_work(p); }
768 };
769 
770 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
771 private:
772   ShenandoahObjToScanQueue* _queue;
773   Thread* _thread;
774   ShenandoahTraversalGC* _traversal_gc;
775   ShenandoahMarkingContext* const _mark_context;
776 
777   template <class T>
do_oop_work(T * p)778   inline void do_oop_work(T* p) {
779     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
780   }
781 
782 public:
ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue * q)783   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
784           _queue(q), _thread(Thread::current()),
785           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
786           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
787 
do_oop(narrowOop * p)788   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)789   void do_oop(oop* p)       { do_oop_work(p); }
790 };
791 
792 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
793 private:
794   ShenandoahObjToScanQueue* _queue;
795   Thread* _thread;
796   ShenandoahTraversalGC* _traversal_gc;
797   ShenandoahMarkingContext* const _mark_context;
798 
799   template <class T>
do_oop_work(T * p)800   inline void do_oop_work(T* p) {
801     ShenandoahEvacOOMScope evac_scope;
802     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
803   }
804 
805 public:
ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue * q)806   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
807           _queue(q), _thread(Thread::current()),
808           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
809           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
810 
do_oop(narrowOop * p)811   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)812   void do_oop(oop* p)       { do_oop_work(p); }
813 };
814 
815 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
816 private:
817   ShenandoahObjToScanQueue* _queue;
818   Thread* _thread;
819   ShenandoahTraversalGC* _traversal_gc;
820   ShenandoahMarkingContext* const _mark_context;
821 
822   template <class T>
do_oop_work(T * p)823   inline void do_oop_work(T* p) {
824     ShenandoahEvacOOMScope evac_scope;
825     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
826   }
827 
828 public:
ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue * q)829   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
830           _queue(q), _thread(Thread::current()),
831           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
832           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
833 
do_oop(narrowOop * p)834   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)835   void do_oop(oop* p)       { do_oop_work(p); }
836 };
837 
838 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
839 private:
840   ReferenceProcessor* _rp;
841 
842 public:
ShenandoahTraversalPrecleanTask(ReferenceProcessor * rp)843   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
844           AbstractGangTask("Precleaning task"),
845           _rp(rp) {}
846 
work(uint worker_id)847   void work(uint worker_id) {
848     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
849     ShenandoahParallelWorkerSession worker_session(worker_id);
850     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
851     ShenandoahEvacOOMScope oom_evac_scope;
852 
853     ShenandoahHeap* sh = ShenandoahHeap::heap();
854 
855     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
856 
857     ShenandoahForwardedIsAliveClosure is_alive;
858     ShenandoahTraversalCancelledGCYieldClosure yield;
859     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
860     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
861     ResourceMark rm;
862     _rp->preclean_discovered_references(&is_alive, &keep_alive,
863                                         &complete_gc, &yield,
864                                         NULL);
865   }
866 };
867 
preclean_weak_refs()868 void ShenandoahTraversalGC::preclean_weak_refs() {
869   // Pre-cleaning weak references before diving into STW makes sense at the
870   // end of concurrent mark. This will filter out the references which referents
871   // are alive. Note that ReferenceProcessor already filters out these on reference
872   // discovery, and the bulk of work is done here. This phase processes leftovers
873   // that missed the initial filtering, i.e. when referent was marked alive after
874   // reference was discovered by RP.
875 
876   assert(_heap->process_references(), "sanity");
877   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
878 
879   // Shortcut if no references were discovered to avoid winding up threads.
880   ReferenceProcessor* rp = _heap->ref_processor();
881   if (!rp->has_discovered_references()) {
882     return;
883   }
884 
885   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
886 
887   shenandoah_assert_rp_isalive_not_installed();
888   ShenandoahForwardedIsAliveClosure is_alive;
889   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
890 
891   assert(task_queues()->is_empty(), "Should be empty");
892 
893   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
894   // queues and other goodies. When upstream ReferenceProcessor starts supporting
895   // parallel precleans, we can extend this to more threads.
896   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
897 
898   WorkGang* workers = _heap->workers();
899   uint nworkers = workers->active_workers();
900   assert(nworkers == 1, "This code uses only a single worker");
901   task_queues()->reserve(nworkers);
902 
903   ShenandoahTraversalPrecleanTask task(rp);
904   workers->run_task(&task);
905 
906   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
907 }
908 
909 // Weak Reference Closures
910 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
911   uint _worker_id;
912   ShenandoahTaskTerminator* _terminator;
913   bool _reset_terminator;
914 
915 public:
ShenandoahTraversalDrainMarkingStackClosure(uint worker_id,ShenandoahTaskTerminator * t,bool reset_terminator=false)916   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
917     _worker_id(worker_id),
918     _terminator(t),
919     _reset_terminator(reset_terminator) {
920   }
921 
do_void()922   void do_void() {
923     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
924 
925     ShenandoahHeap* sh = ShenandoahHeap::heap();
926     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
927     assert(sh->process_references(), "why else would we be here?");
928     shenandoah_assert_rp_isalive_installed();
929 
930     traversal_gc->main_loop(_worker_id, _terminator, false);
931 
932     if (_reset_terminator) {
933       _terminator->reset_for_reuse();
934     }
935   }
936 };
937 
938 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
939   uint _worker_id;
940   ShenandoahTaskTerminator* _terminator;
941   bool _reset_terminator;
942 
943 public:
ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id,ShenandoahTaskTerminator * t,bool reset_terminator=false)944   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
945           _worker_id(worker_id),
946           _terminator(t),
947           _reset_terminator(reset_terminator) {
948   }
949 
do_void()950   void do_void() {
951     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
952 
953     ShenandoahHeap* sh = ShenandoahHeap::heap();
954     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
955     assert(sh->process_references(), "why else would we be here?");
956     shenandoah_assert_rp_isalive_installed();
957 
958     ShenandoahEvacOOMScope evac_scope;
959     traversal_gc->main_loop(_worker_id, _terminator, false);
960 
961     if (_reset_terminator) {
962       _terminator->reset_for_reuse();
963     }
964   }
965 };
966 
weak_refs_work()967 void ShenandoahTraversalGC::weak_refs_work() {
968   assert(_heap->process_references(), "sanity");
969 
970   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
971 
972   ShenandoahGCPhase phase(phase_root);
973 
974   ReferenceProcessor* rp = _heap->ref_processor();
975 
976   // NOTE: We cannot shortcut on has_discovered_references() here, because
977   // we will miss marking JNI Weak refs then, see implementation in
978   // ReferenceProcessor::process_discovered_references.
979   weak_refs_work_doit();
980 
981   rp->verify_no_references_recorded();
982   assert(!rp->discovery_enabled(), "Post condition");
983 
984 }
985 
986 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
987 private:
988   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
989   ShenandoahTaskTerminator* _terminator;
990 
991 public:
ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask & proc_task,ShenandoahTaskTerminator * t)992   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
993                                       ShenandoahTaskTerminator* t) :
994     AbstractGangTask("Process reference objects in parallel"),
995     _proc_task(proc_task),
996     _terminator(t) {
997   }
998 
work(uint worker_id)999   void work(uint worker_id) {
1000     ShenandoahEvacOOMScope oom_evac_scope;
1001     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1002     ShenandoahHeap* heap = ShenandoahHeap::heap();
1003     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1004 
1005     ShenandoahForwardedIsAliveClosure is_alive;
1006     if (!heap->is_degenerated_gc_in_progress()) {
1007       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1008       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1009     } else {
1010       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1011       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1012     }
1013   }
1014 };
1015 
1016 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1017 private:
1018   WorkGang* _workers;
1019 
1020 public:
ShenandoahTraversalRefProcTaskExecutor(WorkGang * workers)1021   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1022 
1023   // Executes a task using worker threads.
execute(ProcessTask & task,uint ergo_workers)1024   void execute(ProcessTask& task, uint ergo_workers) {
1025     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1026 
1027     ShenandoahHeap* heap = ShenandoahHeap::heap();
1028     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1029     ShenandoahPushWorkerQueuesScope scope(_workers,
1030                                           traversal_gc->task_queues(),
1031                                           ergo_workers,
1032                                           /* do_check = */ false);
1033     uint nworkers = _workers->active_workers();
1034     traversal_gc->task_queues()->reserve(nworkers);
1035     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1036     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1037     _workers->run_task(&proc_task_proxy);
1038   }
1039 };
1040 
weak_refs_work_doit()1041 void ShenandoahTraversalGC::weak_refs_work_doit() {
1042   ReferenceProcessor* rp = _heap->ref_processor();
1043 
1044   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1045 
1046   shenandoah_assert_rp_isalive_not_installed();
1047   ShenandoahForwardedIsAliveClosure is_alive;
1048   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1049 
1050   WorkGang* workers = _heap->workers();
1051   uint nworkers = workers->active_workers();
1052 
1053   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1054   rp->set_active_mt_degree(nworkers);
1055 
1056   assert(task_queues()->is_empty(), "Should be empty");
1057 
1058   // complete_gc and keep_alive closures instantiated here are only needed for
1059   // single-threaded path in RP. They share the queue 0 for tracking work, which
1060   // simplifies implementation. Since RP may decide to call complete_gc several
1061   // times, we need to be able to reuse the terminator.
1062   uint serial_worker_id = 0;
1063   ShenandoahTaskTerminator terminator(1, task_queues());
1064   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1065   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1066 
1067   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1068 
1069   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1070   if (!_heap->is_degenerated_gc_in_progress()) {
1071     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1072     rp->process_discovered_references(&is_alive, &keep_alive,
1073                                       &complete_gc, &executor,
1074                                       &pt);
1075   } else {
1076     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1077     rp->process_discovered_references(&is_alive, &keep_alive,
1078                                       &complete_gc, &executor,
1079                                       &pt);
1080   }
1081 
1082   pt.print_all_references();
1083   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1084 }
1085