1 /*
2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  *
22  */
23 
24 #include "precompiled.hpp"
25 
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 
30 #include "gc/shared/weakProcessor.inline.hpp"
31 #include "gc/shared/gcTimer.hpp"
32 #include "gc/shared/referenceProcessor.hpp"
33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
34 
35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
38 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
42 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
43 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
44 #include "gc/shenandoah/shenandoahUtils.hpp"
45 
46 #include "memory/iterator.inline.hpp"
47 #include "memory/metaspace.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "runtime/handles.inline.hpp"
51 
52 template<UpdateRefsMode UPDATE_REFS>
53 class ShenandoahInitMarkRootsClosure : public OopClosure {
54 private:
55   ShenandoahObjToScanQueue* _queue;
56   ShenandoahHeap* _heap;
57   ShenandoahMarkingContext* const _mark_context;
58 
59   template <class T>
do_oop_work(T * p)60   inline void do_oop_work(T* p) {
61     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
62   }
63 
64 public:
ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue * q)65   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
66     _queue(q),
67     _heap(ShenandoahHeap::heap()),
68     _mark_context(_heap->marking_context()) {};
69 
do_oop(narrowOop * p)70   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)71   void do_oop(oop* p)       { do_oop_work(p); }
72 };
73 
ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue * q,ReferenceProcessor * rp)74 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
75   MetadataVisitingOopIterateClosure(rp),
76   _queue(q),
77   _heap(ShenandoahHeap::heap()),
78   _mark_context(_heap->marking_context())
79 { }
80 
81 template<UpdateRefsMode UPDATE_REFS>
82 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
83 private:
84   ShenandoahAllRootScanner* _rp;
85   bool _process_refs;
86 public:
ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner * rp,bool process_refs)87   ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp, bool process_refs) :
88     AbstractGangTask("Shenandoah init mark roots task"),
89     _rp(rp),
90     _process_refs(process_refs) {
91   }
92 
work(uint worker_id)93   void work(uint worker_id) {
94     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
95     ShenandoahParallelWorkerSession worker_session(worker_id);
96 
97     ShenandoahHeap* heap = ShenandoahHeap::heap();
98     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
99     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
100 
101     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
102 
103     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
104     do_work(heap, &mark_cl, worker_id);
105   }
106 
107 private:
do_work(ShenandoahHeap * heap,OopClosure * oops,uint worker_id)108   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
109     // The rationale for selecting the roots to scan is as follows:
110     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
111     //      code cache. This will allow us to identify the dead classes, unload them, *and*
112     //      invalidate the relevant code cache blobs. This could be only done together with
113     //      class unloading.
114     //   b. With unload_classes = false, we have to nominally retain all the references from code
115     //      cache, because there could be the case of embedded class/oop in the generated code,
116     //      which we will never visit during mark. Without code cache invalidation, as in (a),
117     //      we risk executing that code cache blob, and crashing.
118     if (heap->unload_classes()) {
119       _rp->strong_roots_do(worker_id, oops);
120     } else {
121       _rp->roots_do(worker_id, oops);
122     }
123   }
124 };
125 
126 class ShenandoahUpdateRootsTask : public AbstractGangTask {
127 private:
128   ShenandoahRootUpdater*  _root_updater;
129 public:
ShenandoahUpdateRootsTask(ShenandoahRootUpdater * root_updater)130   ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater) :
131     AbstractGangTask("Shenandoah update roots task"),
132     _root_updater(root_updater) {
133   }
134 
work(uint worker_id)135   void work(uint worker_id) {
136     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
137     ShenandoahParallelWorkerSession worker_session(worker_id);
138 
139     ShenandoahHeap* heap = ShenandoahHeap::heap();
140     ShenandoahUpdateRefsClosure cl;
141     AlwaysTrueClosure always_true;
142     _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
143   }
144 };
145 
146 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
147 private:
148   ShenandoahConcurrentMark* _cm;
149   ShenandoahTaskTerminator* _terminator;
150 
151 public:
ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark * cm,ShenandoahTaskTerminator * terminator)152   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
153     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
154   }
155 
work(uint worker_id)156   void work(uint worker_id) {
157     ShenandoahHeap* heap = ShenandoahHeap::heap();
158     ShenandoahConcurrentWorkerSession worker_session(worker_id);
159     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
160     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
161     ReferenceProcessor* rp;
162     if (heap->process_references()) {
163       rp = heap->ref_processor();
164       shenandoah_assert_rp_isalive_installed();
165     } else {
166       rp = NULL;
167     }
168 
169     _cm->concurrent_scan_code_roots(worker_id, rp);
170     _cm->mark_loop(worker_id, _terminator, rp,
171                    true, // cancellable
172                    ShenandoahStringDedup::is_enabled()); // perform string dedup
173   }
174 };
175 
176 class ShenandoahSATBThreadsClosure : public ThreadClosure {
177 private:
178   ShenandoahSATBBufferClosure* _satb_cl;
179   uintx _claim_token;
180 
181 public:
ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure * satb_cl)182   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
183     _satb_cl(satb_cl),
184     _claim_token(Threads::thread_claim_token()) {}
185 
do_thread(Thread * thread)186   void do_thread(Thread* thread) {
187     if (thread->claim_threads_do(true, _claim_token)) {
188       ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
189     }
190   }
191 };
192 
193 class ShenandoahFinalMarkingTask : public AbstractGangTask {
194 private:
195   ShenandoahConcurrentMark* _cm;
196   ShenandoahTaskTerminator* _terminator;
197   bool _dedup_string;
198 
199 public:
ShenandoahFinalMarkingTask(ShenandoahConcurrentMark * cm,ShenandoahTaskTerminator * terminator,bool dedup_string)200   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
201     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
202   }
203 
work(uint worker_id)204   void work(uint worker_id) {
205     ShenandoahHeap* heap = ShenandoahHeap::heap();
206 
207     ShenandoahParallelWorkerSession worker_session(worker_id);
208     // First drain remaining SATB buffers.
209     // Notice that this is not strictly necessary for mark-compact. But since
210     // it requires a StrongRootsScope around the task, we need to claim the
211     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
212     // full-gc.
213     {
214       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
215       ShenandoahSATBBufferClosure cl(q);
216       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
217       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
218       ShenandoahSATBThreadsClosure tc(&cl);
219       Threads::threads_do(&tc);
220     }
221 
222     ReferenceProcessor* rp;
223     if (heap->process_references()) {
224       rp = heap->ref_processor();
225       shenandoah_assert_rp_isalive_installed();
226     } else {
227       rp = NULL;
228     }
229 
230     if (heap->is_degenerated_gc_in_progress()) {
231       // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
232       // let's check here.
233       _cm->concurrent_scan_code_roots(worker_id, rp);
234     }
235 
236     _cm->mark_loop(worker_id, _terminator, rp,
237                    false, // not cancellable
238                    _dedup_string);
239 
240     assert(_cm->task_queues()->is_empty(), "Should be empty");
241   }
242 };
243 
mark_roots(ShenandoahPhaseTimings::Phase root_phase)244 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
245   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
246   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
247 
248   ShenandoahHeap* heap = ShenandoahHeap::heap();
249 
250   ShenandoahGCPhase phase(root_phase);
251 
252   WorkGang* workers = heap->workers();
253   uint nworkers = workers->active_workers();
254 
255   assert(nworkers <= task_queues()->size(), "Just check");
256 
257   ShenandoahAllRootScanner root_proc(nworkers, root_phase);
258   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
259   task_queues()->reserve(nworkers);
260 
261   if (heap->has_forwarded_objects()) {
262     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
263     workers->run_task(&mark_roots);
264   } else {
265     // No need to update references, which means the heap is stable.
266     // Can save time not walking through forwarding pointers.
267     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
268     workers->run_task(&mark_roots);
269   }
270 
271   if (ShenandoahConcurrentScanCodeRoots) {
272     clear_claim_codecache();
273   }
274 }
275 
update_roots(ShenandoahPhaseTimings::Phase root_phase)276 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
277   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
278 
279   bool update_code_cache = true; // initialize to safer value
280   switch (root_phase) {
281     case ShenandoahPhaseTimings::update_roots:
282     case ShenandoahPhaseTimings::final_update_refs_roots:
283       update_code_cache = false;
284       break;
285     case ShenandoahPhaseTimings::full_gc_roots:
286     case ShenandoahPhaseTimings::degen_gc_update_roots:
287       update_code_cache = true;
288       break;
289     default:
290       ShouldNotReachHere();
291   }
292 
293   ShenandoahGCPhase phase(root_phase);
294 
295 #if COMPILER2_OR_JVMCI
296   DerivedPointerTable::clear();
297 #endif
298 
299   uint nworkers = _heap->workers()->active_workers();
300 
301   ShenandoahRootUpdater root_updater(nworkers, root_phase, update_code_cache);
302   ShenandoahUpdateRootsTask update_roots(&root_updater);
303   _heap->workers()->run_task(&update_roots);
304 
305 #if COMPILER2_OR_JVMCI
306   DerivedPointerTable::update_pointers();
307 #endif
308 }
309 
310 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
311 private:
312   ShenandoahThreadRoots           _thread_roots;
313   ShenandoahPhaseTimings::Phase   _phase;
314 public:
ShenandoahUpdateThreadRootsTask(bool is_par,ShenandoahPhaseTimings::Phase phase)315   ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
316     AbstractGangTask("Shenandoah Update Thread Roots"),
317     _thread_roots(is_par),
318     _phase(phase) {
319     ShenandoahHeap::heap()->phase_timings()->record_workers_start(_phase);
320   }
321 
~ShenandoahUpdateThreadRootsTask()322   ~ShenandoahUpdateThreadRootsTask() {
323     ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
324   }
work(uint worker_id)325   void work(uint worker_id) {
326     ShenandoahUpdateRefsClosure cl;
327     _thread_roots.oops_do(&cl, NULL, worker_id);
328   }
329 };
330 
update_thread_roots(ShenandoahPhaseTimings::Phase root_phase)331 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
332   WorkGang* workers = _heap->workers();
333   bool is_par = workers->active_workers() > 1;
334 #if COMPILER2_OR_JVMCI
335   DerivedPointerTable::clear();
336 #endif
337   ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
338   workers->run_task(&task);
339 #if COMPILER2_OR_JVMCI
340   DerivedPointerTable::update_pointers();
341 #endif
342 }
343 
initialize(uint workers)344 void ShenandoahConcurrentMark::initialize(uint workers) {
345   _heap = ShenandoahHeap::heap();
346 
347   uint num_queues = MAX2(workers, 1U);
348 
349   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
350 
351   for (uint i = 0; i < num_queues; ++i) {
352     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
353     task_queue->initialize();
354     _task_queues->register_queue(i, task_queue);
355   }
356 }
357 
concurrent_scan_code_roots(uint worker_id,ReferenceProcessor * rp)358 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
359   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
360     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
361     if (!_heap->unload_classes()) {
362       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
363       // TODO: We can not honor StringDeduplication here, due to lock ranking
364       // inversion. So, we may miss some deduplication candidates.
365       if (_heap->has_forwarded_objects()) {
366         ShenandoahMarkResolveRefsClosure cl(q, rp);
367         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
368         CodeCache::blobs_do(&blobs);
369       } else {
370         ShenandoahMarkRefsClosure cl(q, rp);
371         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
372         CodeCache::blobs_do(&blobs);
373       }
374     }
375   }
376 }
377 
mark_from_roots()378 void ShenandoahConcurrentMark::mark_from_roots() {
379   WorkGang* workers = _heap->workers();
380   uint nworkers = workers->active_workers();
381 
382   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
383 
384   if (_heap->process_references()) {
385     ReferenceProcessor* rp = _heap->ref_processor();
386     rp->set_active_mt_degree(nworkers);
387 
388     // enable ("weak") refs discovery
389     rp->enable_discovery(true /*verify_no_refs*/);
390     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
391   }
392 
393   shenandoah_assert_rp_isalive_not_installed();
394   ShenandoahIsAliveSelector is_alive;
395   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
396 
397   task_queues()->reserve(nworkers);
398 
399   {
400     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
401     ShenandoahTaskTerminator terminator(nworkers, task_queues());
402     ShenandoahConcurrentMarkingTask task(this, &terminator);
403     workers->run_task(&task);
404   }
405 
406   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
407 }
408 
finish_mark_from_roots(bool full_gc)409 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
410   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
411 
412   uint nworkers = _heap->workers()->active_workers();
413 
414   // Finally mark everything else we've got in our queues during the previous steps.
415   // It does two different things for concurrent vs. mark-compact GC:
416   // - For concurrent GC, it starts with empty task queues, drains the remaining
417   //   SATB buffers, and then completes the marking closure.
418   // - For mark-compact GC, it starts out with the task queues seeded by initial
419   //   root scan, and completes the closure, thus marking through all live objects
420   // The implementation is the same, so it's shared here.
421   {
422     ShenandoahGCPhase phase(full_gc ?
423                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
424                             ShenandoahPhaseTimings::finish_queues);
425     task_queues()->reserve(nworkers);
426 
427     shenandoah_assert_rp_isalive_not_installed();
428     ShenandoahIsAliveSelector is_alive;
429     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
430 
431     ShenandoahTerminationTracker termination_tracker(full_gc ?
432                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
433                                                      ShenandoahPhaseTimings::termination);
434 
435     StrongRootsScope scope(nworkers);
436     ShenandoahTaskTerminator terminator(nworkers, task_queues());
437     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
438     _heap->workers()->run_task(&task);
439   }
440 
441   assert(task_queues()->is_empty(), "Should be empty");
442 
443   // When we're done marking everything, we process weak references.
444   if (_heap->process_references()) {
445     weak_refs_work(full_gc);
446   }
447 
448   weak_roots_work();
449 
450   // And finally finish class unloading
451   if (_heap->unload_classes()) {
452     _heap->unload_classes_and_cleanup_tables(full_gc);
453   } else if (ShenandoahStringDedup::is_enabled()) {
454     ShenandoahIsAliveSelector alive;
455     BoolObjectClosure* is_alive = alive.is_alive_closure();
456     ShenandoahStringDedup::unlink_or_oops_do(is_alive, NULL, false);
457   }
458   assert(task_queues()->is_empty(), "Should be empty");
459   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
460   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
461 
462   // Resize Metaspace
463   MetaspaceGC::compute_new_size();
464 }
465 
466 // Weak Reference Closures
467 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
468   uint _worker_id;
469   ShenandoahTaskTerminator* _terminator;
470   bool _reset_terminator;
471 
472 public:
ShenandoahCMDrainMarkingStackClosure(uint worker_id,ShenandoahTaskTerminator * t,bool reset_terminator=false)473   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
474     _worker_id(worker_id),
475     _terminator(t),
476     _reset_terminator(reset_terminator) {
477   }
478 
do_void()479   void do_void() {
480     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
481 
482     ShenandoahHeap* sh = ShenandoahHeap::heap();
483     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
484     assert(sh->process_references(), "why else would we be here?");
485     ReferenceProcessor* rp = sh->ref_processor();
486 
487     shenandoah_assert_rp_isalive_installed();
488 
489     scm->mark_loop(_worker_id, _terminator, rp,
490                    false,   // not cancellable
491                    false);  // do not do strdedup
492 
493     if (_reset_terminator) {
494       _terminator->reset_for_reuse();
495     }
496   }
497 };
498 
499 class ShenandoahCMKeepAliveClosure : public OopClosure {
500 private:
501   ShenandoahObjToScanQueue* _queue;
502   ShenandoahHeap* _heap;
503   ShenandoahMarkingContext* const _mark_context;
504 
505   template <class T>
do_oop_work(T * p)506   inline void do_oop_work(T* p) {
507     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
508   }
509 
510 public:
ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue * q)511   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
512     _queue(q),
513     _heap(ShenandoahHeap::heap()),
514     _mark_context(_heap->marking_context()) {}
515 
do_oop(narrowOop * p)516   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)517   void do_oop(oop* p)       { do_oop_work(p); }
518 };
519 
520 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
521 private:
522   ShenandoahObjToScanQueue* _queue;
523   ShenandoahHeap* _heap;
524   ShenandoahMarkingContext* const _mark_context;
525 
526   template <class T>
do_oop_work(T * p)527   inline void do_oop_work(T* p) {
528     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
529   }
530 
531 public:
ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue * q)532   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
533     _queue(q),
534     _heap(ShenandoahHeap::heap()),
535     _mark_context(_heap->marking_context()) {}
536 
do_oop(narrowOop * p)537   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)538   void do_oop(oop* p)       { do_oop_work(p); }
539 };
540 
541 class ShenandoahWeakUpdateClosure : public OopClosure {
542 private:
543   ShenandoahHeap* const _heap;
544 
545   template <class T>
do_oop_work(T * p)546   inline void do_oop_work(T* p) {
547     oop o = _heap->maybe_update_with_forwarded(p);
548     shenandoah_assert_marked_except(p, o, o == NULL);
549   }
550 
551 public:
ShenandoahWeakUpdateClosure()552   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
553 
do_oop(narrowOop * p)554   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)555   void do_oop(oop* p)       { do_oop_work(p); }
556 };
557 
558 class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
559 private:
560   template <class T>
do_oop_work(T * p)561   inline void do_oop_work(T* p) {
562 #ifdef ASSERT
563     T o = RawAccess<>::oop_load(p);
564     if (!CompressedOops::is_null(o)) {
565       oop obj = CompressedOops::decode_not_null(o);
566       shenandoah_assert_not_forwarded(p, obj);
567     }
568 #endif
569   }
570 
571 public:
ShenandoahWeakAssertNotForwardedClosure()572   ShenandoahWeakAssertNotForwardedClosure() {}
573 
do_oop(narrowOop * p)574   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)575   void do_oop(oop* p)       { do_oop_work(p); }
576 };
577 
578 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
579 private:
580   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
581   ShenandoahTaskTerminator* _terminator;
582 
583 public:
ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask & proc_task,ShenandoahTaskTerminator * t)584   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
585                              ShenandoahTaskTerminator* t) :
586     AbstractGangTask("Process reference objects in parallel"),
587     _proc_task(proc_task),
588     _terminator(t) {
589   }
590 
work(uint worker_id)591   void work(uint worker_id) {
592     ResourceMark rm;
593     HandleMark hm;
594     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
595     ShenandoahHeap* heap = ShenandoahHeap::heap();
596     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
597     if (heap->has_forwarded_objects()) {
598       ShenandoahForwardedIsAliveClosure is_alive;
599       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
600       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
601     } else {
602       ShenandoahIsAliveClosure is_alive;
603       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
604       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
605     }
606   }
607 };
608 
609 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
610 private:
611   WorkGang* _workers;
612 
613 public:
ShenandoahRefProcTaskExecutor(WorkGang * workers)614   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
615     _workers(workers) {
616   }
617 
618   // Executes a task using worker threads.
execute(ProcessTask & task,uint ergo_workers)619   void execute(ProcessTask& task, uint ergo_workers) {
620     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
621 
622     ShenandoahHeap* heap = ShenandoahHeap::heap();
623     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
624     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
625                                           ergo_workers,
626                                           /* do_check = */ false);
627     uint nworkers = _workers->active_workers();
628     cm->task_queues()->reserve(nworkers);
629     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
630     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
631     _workers->run_task(&proc_task_proxy);
632   }
633 };
634 
weak_refs_work(bool full_gc)635 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
636   assert(_heap->process_references(), "sanity");
637 
638   ShenandoahPhaseTimings::Phase phase_root =
639           full_gc ?
640           ShenandoahPhaseTimings::full_gc_weakrefs :
641           ShenandoahPhaseTimings::weakrefs;
642 
643   ShenandoahGCPhase phase(phase_root);
644 
645   ReferenceProcessor* rp = _heap->ref_processor();
646 
647   // NOTE: We cannot shortcut on has_discovered_references() here, because
648   // we will miss marking JNI Weak refs then, see implementation in
649   // ReferenceProcessor::process_discovered_references.
650   weak_refs_work_doit(full_gc);
651 
652   rp->verify_no_references_recorded();
653   assert(!rp->discovery_enabled(), "Post condition");
654 
655 }
656 
657 // Process leftover weak oops: update them, if needed or assert they do not
658 // need updating otherwise.
659 // Weak processor API requires us to visit the oops, even if we are not doing
660 // anything to them.
weak_roots_work()661 void ShenandoahConcurrentMark::weak_roots_work() {
662   WorkGang* workers = _heap->workers();
663   OopClosure* keep_alive = &do_nothing_cl;
664 #ifdef ASSERT
665   ShenandoahWeakAssertNotForwardedClosure verify_cl;
666   keep_alive = &verify_cl;
667 #endif
668   ShenandoahIsAliveClosure is_alive;
669   WeakProcessor::weak_oops_do(workers, &is_alive, keep_alive, 1);
670 }
671 
weak_refs_work_doit(bool full_gc)672 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
673   ReferenceProcessor* rp = _heap->ref_processor();
674 
675   ShenandoahPhaseTimings::Phase phase_process =
676           full_gc ?
677           ShenandoahPhaseTimings::full_gc_weakrefs_process :
678           ShenandoahPhaseTimings::weakrefs_process;
679 
680   ShenandoahPhaseTimings::Phase phase_process_termination =
681           full_gc ?
682           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
683           ShenandoahPhaseTimings::weakrefs_termination;
684 
685   shenandoah_assert_rp_isalive_not_installed();
686   ShenandoahIsAliveSelector is_alive;
687   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
688 
689   WorkGang* workers = _heap->workers();
690   uint nworkers = workers->active_workers();
691 
692   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
693   rp->set_active_mt_degree(nworkers);
694 
695   assert(task_queues()->is_empty(), "Should be empty");
696 
697   // complete_gc and keep_alive closures instantiated here are only needed for
698   // single-threaded path in RP. They share the queue 0 for tracking work, which
699   // simplifies implementation. Since RP may decide to call complete_gc several
700   // times, we need to be able to reuse the terminator.
701   uint serial_worker_id = 0;
702   ShenandoahTaskTerminator terminator(1, task_queues());
703   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
704 
705   ShenandoahRefProcTaskExecutor executor(workers);
706 
707   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
708 
709   {
710     ShenandoahGCPhase phase(phase_process);
711     ShenandoahTerminationTracker phase_term(phase_process_termination);
712 
713     if (_heap->has_forwarded_objects()) {
714       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
715       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
716                                         &complete_gc, &executor,
717                                         &pt);
718 
719     } else {
720       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
721       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
722                                         &complete_gc, &executor,
723                                         &pt);
724 
725     }
726 
727     pt.print_all_references();
728 
729     assert(task_queues()->is_empty(), "Should be empty");
730   }
731 }
732 
733 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
734 private:
735   ShenandoahHeap* const _heap;
736 public:
ShenandoahCancelledGCYieldClosure()737   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
should_return()738   virtual bool should_return() { return _heap->cancelled_gc(); }
739 };
740 
741 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
742 public:
do_void()743   void do_void() {
744     ShenandoahHeap* sh = ShenandoahHeap::heap();
745     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
746     assert(sh->process_references(), "why else would we be here?");
747     ShenandoahTaskTerminator terminator(1, scm->task_queues());
748 
749     ReferenceProcessor* rp = sh->ref_processor();
750     shenandoah_assert_rp_isalive_installed();
751 
752     scm->mark_loop(0, &terminator, rp,
753                    false, // not cancellable
754                    false); // do not do strdedup
755   }
756 };
757 
758 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
759 private:
760   ShenandoahObjToScanQueue* _queue;
761   ShenandoahHeap* _heap;
762   ShenandoahMarkingContext* const _mark_context;
763 
764   template <class T>
do_oop_work(T * p)765   inline void do_oop_work(T* p) {
766     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
767   }
768 
769 public:
ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue * q)770   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
771     _queue(q),
772     _heap(ShenandoahHeap::heap()),
773     _mark_context(_heap->marking_context()) {}
774 
do_oop(narrowOop * p)775   void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)776   void do_oop(oop* p)       { do_oop_work(p); }
777 };
778 
779 class ShenandoahPrecleanTask : public AbstractGangTask {
780 private:
781   ReferenceProcessor* _rp;
782 
783 public:
ShenandoahPrecleanTask(ReferenceProcessor * rp)784   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
785           AbstractGangTask("Precleaning task"),
786           _rp(rp) {}
787 
work(uint worker_id)788   void work(uint worker_id) {
789     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
790     ShenandoahParallelWorkerSession worker_session(worker_id);
791 
792     ShenandoahHeap* sh = ShenandoahHeap::heap();
793 
794     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
795 
796     ShenandoahCancelledGCYieldClosure yield;
797     ShenandoahPrecleanCompleteGCClosure complete_gc;
798 
799     if (sh->has_forwarded_objects()) {
800       ShenandoahForwardedIsAliveClosure is_alive;
801       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
802       ResourceMark rm;
803       _rp->preclean_discovered_references(&is_alive, &keep_alive,
804                                           &complete_gc, &yield,
805                                           NULL);
806     } else {
807       ShenandoahIsAliveClosure is_alive;
808       ShenandoahCMKeepAliveClosure keep_alive(q);
809       ResourceMark rm;
810       _rp->preclean_discovered_references(&is_alive, &keep_alive,
811                                           &complete_gc, &yield,
812                                           NULL);
813     }
814   }
815 };
816 
preclean_weak_refs()817 void ShenandoahConcurrentMark::preclean_weak_refs() {
818   // Pre-cleaning weak references before diving into STW makes sense at the
819   // end of concurrent mark. This will filter out the references which referents
820   // are alive. Note that ReferenceProcessor already filters out these on reference
821   // discovery, and the bulk of work is done here. This phase processes leftovers
822   // that missed the initial filtering, i.e. when referent was marked alive after
823   // reference was discovered by RP.
824 
825   assert(_heap->process_references(), "sanity");
826 
827   // Shortcut if no references were discovered to avoid winding up threads.
828   ReferenceProcessor* rp = _heap->ref_processor();
829   if (!rp->has_discovered_references()) {
830     return;
831   }
832 
833   assert(task_queues()->is_empty(), "Should be empty");
834 
835   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
836 
837   shenandoah_assert_rp_isalive_not_installed();
838   ShenandoahIsAliveSelector is_alive;
839   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
840 
841   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
842   // queues and other goodies. When upstream ReferenceProcessor starts supporting
843   // parallel precleans, we can extend this to more threads.
844   WorkGang* workers = _heap->workers();
845   uint nworkers = workers->active_workers();
846   assert(nworkers == 1, "This code uses only a single worker");
847   task_queues()->reserve(nworkers);
848 
849   ShenandoahPrecleanTask task(rp);
850   workers->run_task(&task);
851 
852   assert(task_queues()->is_empty(), "Should be empty");
853 }
854 
cancel()855 void ShenandoahConcurrentMark::cancel() {
856   // Clean up marking stacks.
857   ShenandoahObjToScanQueueSet* queues = task_queues();
858   queues->clear();
859 
860   // Cancel SATB buffers.
861   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
862 }
863 
get_queue(uint worker_id)864 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
865   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
866   return _task_queues->queue(worker_id);
867 }
868 
869 template <bool CANCELLABLE>
mark_loop_prework(uint w,ShenandoahTaskTerminator * t,ReferenceProcessor * rp,bool strdedup)870 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
871                                                  bool strdedup) {
872   ShenandoahObjToScanQueue* q = get_queue(w);
873 
874   jushort* ld = _heap->get_liveness_cache(w);
875 
876   // TODO: We can clean up this if we figure out how to do templated oop closures that
877   // play nice with specialized_oop_iterators.
878   if (_heap->unload_classes()) {
879     if (_heap->has_forwarded_objects()) {
880       if (strdedup) {
881         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
882         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
883       } else {
884         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
885         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
886       }
887     } else {
888       if (strdedup) {
889         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
890         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
891       } else {
892         ShenandoahMarkRefsMetadataClosure cl(q, rp);
893         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
894       }
895     }
896   } else {
897     if (_heap->has_forwarded_objects()) {
898       if (strdedup) {
899         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
900         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
901       } else {
902         ShenandoahMarkUpdateRefsClosure cl(q, rp);
903         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
904       }
905     } else {
906       if (strdedup) {
907         ShenandoahMarkRefsDedupClosure cl(q, rp);
908         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
909       } else {
910         ShenandoahMarkRefsClosure cl(q, rp);
911         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
912       }
913     }
914   }
915 
916   _heap->flush_liveness_cache(w);
917 }
918 
919 template <class T, bool CANCELLABLE>
mark_loop_work(T * cl,jushort * live_data,uint worker_id,ShenandoahTaskTerminator * terminator)920 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
921   uintx stride = ShenandoahMarkLoopStride;
922 
923   ShenandoahHeap* heap = ShenandoahHeap::heap();
924   ShenandoahObjToScanQueueSet* queues = task_queues();
925   ShenandoahObjToScanQueue* q;
926   ShenandoahMarkTask t;
927 
928   /*
929    * Process outstanding queues, if any.
930    *
931    * There can be more queues than workers. To deal with the imbalance, we claim
932    * extra queues first. Since marking can push new tasks into the queue associated
933    * with this worker id, we come back to process this queue in the normal loop.
934    */
935   assert(queues->get_reserved() == heap->workers()->active_workers(),
936          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
937 
938   q = queues->claim_next();
939   while (q != NULL) {
940     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
941       return;
942     }
943 
944     for (uint i = 0; i < stride; i++) {
945       if (q->pop(t)) {
946         do_task<T>(q, cl, live_data, &t);
947       } else {
948         assert(q->is_empty(), "Must be empty");
949         q = queues->claim_next();
950         break;
951       }
952     }
953   }
954   q = get_queue(worker_id);
955 
956   ShenandoahSATBBufferClosure drain_satb(q);
957   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
958 
959   /*
960    * Normal marking loop:
961    */
962   while (true) {
963     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
964       return;
965     }
966 
967     while (satb_mq_set.completed_buffers_num() > 0) {
968       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
969     }
970 
971     uint work = 0;
972     for (uint i = 0; i < stride; i++) {
973       if (q->pop(t) ||
974           queues->steal(worker_id, t)) {
975         do_task<T>(q, cl, live_data, &t);
976         work++;
977       } else {
978         break;
979       }
980     }
981 
982     if (work == 0) {
983       // No work encountered in current stride, try to terminate.
984       // Need to leave the STS here otherwise it might block safepoints.
985       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
986       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
987       ShenandoahTerminatorTerminator tt(heap);
988       if (terminator->offer_termination(&tt)) return;
989     }
990   }
991 }
992 
claim_codecache()993 bool ShenandoahConcurrentMark::claim_codecache() {
994   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
995   return _claimed_codecache.try_set();
996 }
997 
clear_claim_codecache()998 void ShenandoahConcurrentMark::clear_claim_codecache() {
999   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1000   _claimed_codecache.unset();
1001 }
1002