1 /*
2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "code/codeCache.hpp"
32 #include "gc/parallel/parallelScavengeHeap.hpp"
33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
34 #include "gc/parallel/psMarkSweep.hpp"
35 #include "gc/parallel/psMarkSweepDecorator.hpp"
36 #include "gc/parallel/psOldGen.hpp"
37 #include "gc/parallel/psScavenge.hpp"
38 #include "gc/parallel/psYoungGen.hpp"
39 #include "gc/serial/markSweep.hpp"
40 #include "gc/shared/gcCause.hpp"
41 #include "gc/shared/gcHeapSummary.hpp"
42 #include "gc/shared/gcId.hpp"
43 #include "gc/shared/gcLocker.hpp"
44 #include "gc/shared/gcTimer.hpp"
45 #include "gc/shared/gcTrace.hpp"
46 #include "gc/shared/gcTraceTime.inline.hpp"
47 #include "gc/shared/isGCActiveMark.hpp"
48 #include "gc/shared/referencePolicy.hpp"
49 #include "gc/shared/referenceProcessor.hpp"
50 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
51 #include "gc/shared/spaceDecorator.hpp"
52 #include "gc/shared/weakProcessor.hpp"
53 #include "memory/universe.hpp"
54 #include "logging/log.hpp"
55 #include "oops/oop.inline.hpp"
56 #include "runtime/biasedLocking.hpp"
57 #include "runtime/flags/flagSetting.hpp"
58 #include "runtime/handles.inline.hpp"
59 #include "runtime/safepoint.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "services/management.hpp"
62 #include "services/memoryService.hpp"
63 #include "utilities/align.hpp"
64 #include "utilities/events.hpp"
65 #include "utilities/stack.inline.hpp"
66 #if INCLUDE_JVMCI
67 #include "jvmci/jvmci.hpp"
68 #endif
69 
70 elapsedTimer        PSMarkSweep::_accumulated_time;
71 jlong               PSMarkSweep::_time_of_last_gc   = 0;
72 CollectorCounters*  PSMarkSweep::_counters = NULL;
73 
74 SpanSubjectToDiscoveryClosure PSMarkSweep::_span_based_discoverer;
75 
initialize()76 void PSMarkSweep::initialize() {
77   _span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region());
78   set_ref_processor(new ReferenceProcessor(&_span_based_discoverer));     // a vanilla ref proc
79   _counters = new CollectorCounters("Serial full collection pauses", 1);
80   MarkSweep::initialize();
81 }
82 
83 // This method contains all heap specific policy for invoking mark sweep.
84 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
85 // the heap. It will do nothing further. If we need to bail out for policy
86 // reasons, scavenge before full gc, or any other specialized behavior, it
87 // needs to be added here.
88 //
89 // Note that this method should only be called from the vm_thread while
90 // at a safepoint!
91 //
92 // Note that the all_soft_refs_clear flag in the soft ref policy
93 // may be true because this method can be called without intervening
94 // activity.  For example when the heap space is tight and full measure
95 // are being taken to free space.
96 
invoke(bool maximum_heap_compaction)97 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
98   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
99   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
100   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
101 
102   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
103   GCCause::Cause gc_cause = heap->gc_cause();
104   PSAdaptiveSizePolicy* policy = heap->size_policy();
105   IsGCActiveMark mark;
106 
107   if (ScavengeBeforeFullGC) {
108     PSScavenge::invoke_no_policy();
109   }
110 
111   const bool clear_all_soft_refs =
112     heap->soft_ref_policy()->should_clear_all_soft_refs();
113 
114   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
115   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
116   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
117 }
118 
119 // This method contains no policy. You should probably
120 // be calling invoke() instead.
invoke_no_policy(bool clear_all_softrefs)121 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
122   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
123   assert(ref_processor() != NULL, "Sanity");
124 
125   if (GCLocker::check_active_before_gc()) {
126     return false;
127   }
128 
129   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
130   GCCause::Cause gc_cause = heap->gc_cause();
131 
132   GCIdMark gc_id_mark;
133   _gc_timer->register_gc_start();
134   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
135 
136   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
137 
138   // The scope of casr should end after code that can change
139   // SoftRefolicy::_should_clear_all_soft_refs.
140   ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
141 
142   PSYoungGen* young_gen = heap->young_gen();
143   PSOldGen* old_gen = heap->old_gen();
144 
145   // Increment the invocation count
146   heap->increment_total_collections(true /* full */);
147 
148   // Save information needed to minimize mangling
149   heap->record_gen_tops_before_GC();
150 
151   // We need to track unique mark sweep invocations as well.
152   _total_invocations++;
153 
154   heap->print_heap_before_gc();
155   heap->trace_heap_before_gc(_gc_tracer);
156 
157   // Fill in TLABs
158   heap->ensure_parsability(true);  // retire TLABs
159 
160   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
161     HandleMark hm;  // Discard invalid handles created during verification
162     Universe::verify("Before GC");
163   }
164 
165   // Verify object start arrays
166   if (VerifyObjectStartArray &&
167       VerifyBeforeGC) {
168     old_gen->verify_object_start_array();
169   }
170 
171   // Filled in below to track the state of the young gen after the collection.
172   bool eden_empty;
173   bool survivors_empty;
174   bool young_gen_empty;
175 
176   {
177     HandleMark hm;
178 
179     GCTraceCPUTime tcpu;
180     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
181 
182     heap->pre_full_gc_dump(_gc_timer);
183 
184     TraceCollectorStats tcs(counters());
185     TraceMemoryManagerStats tms(heap->old_gc_manager(),gc_cause);
186 
187     if (log_is_enabled(Debug, gc, heap, exit)) {
188       accumulated_time()->start();
189     }
190 
191     // Let the size policy know we're starting
192     size_policy->major_collection_begin();
193 
194     BiasedLocking::preserve_marks();
195 
196     // Capture metadata size before collection for sizing.
197     size_t metadata_prev_used = MetaspaceUtils::used_bytes();
198 
199     size_t old_gen_prev_used = old_gen->used_in_bytes();
200     size_t young_gen_prev_used = young_gen->used_in_bytes();
201 
202     allocate_stacks();
203 
204 #if COMPILER2_OR_JVMCI
205     DerivedPointerTable::clear();
206 #endif
207 
208     ref_processor()->enable_discovery();
209     ref_processor()->setup_policy(clear_all_softrefs);
210 
211     mark_sweep_phase1(clear_all_softrefs);
212 
213     mark_sweep_phase2();
214 
215 #if COMPILER2_OR_JVMCI
216     // Don't add any more derived pointers during phase3
217     assert(DerivedPointerTable::is_active(), "Sanity");
218     DerivedPointerTable::set_active(false);
219 #endif
220 
221     mark_sweep_phase3();
222 
223     mark_sweep_phase4();
224 
225     restore_marks();
226 
227     deallocate_stacks();
228 
229     if (ZapUnusedHeapArea) {
230       // Do a complete mangle (top to end) because the usage for
231       // scratch does not maintain a top pointer.
232       young_gen->to_space()->mangle_unused_area_complete();
233     }
234 
235     eden_empty = young_gen->eden_space()->is_empty();
236     if (!eden_empty) {
237       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
238     }
239 
240     // Update heap occupancy information which is used as
241     // input to soft ref clearing policy at the next gc.
242     Universe::update_heap_info_at_gc();
243 
244     survivors_empty = young_gen->from_space()->is_empty() &&
245                       young_gen->to_space()->is_empty();
246     young_gen_empty = eden_empty && survivors_empty;
247 
248     PSCardTable* card_table = heap->card_table();
249     MemRegion old_mr = heap->old_gen()->reserved();
250     if (young_gen_empty) {
251       card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
252     } else {
253       card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
254     }
255 
256     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
257     ClassLoaderDataGraph::purge();
258     MetaspaceUtils::verify_metrics();
259 
260     BiasedLocking::restore_marks();
261     heap->prune_scavengable_nmethods();
262     JvmtiExport::gc_epilogue();
263 
264 #if COMPILER2_OR_JVMCI
265     DerivedPointerTable::update_pointers();
266 #endif
267 
268     assert(!ref_processor()->discovery_enabled(), "Should have been disabled earlier");
269 
270     // Update time of last GC
271     reset_millis_since_last_gc();
272 
273     // Let the size policy know we're done
274     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
275 
276     if (UseAdaptiveSizePolicy) {
277 
278      log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
279      log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
280                          old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
281 
282       // Don't check if the size_policy is ready here.  Let
283       // the size_policy check that internally.
284       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
285           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
286         // Swap the survivor spaces if from_space is empty. The
287         // resize_young_gen() called below is normally used after
288         // a successful young GC and swapping of survivor spaces;
289         // otherwise, it will fail to resize the young gen with
290         // the current implementation.
291         if (young_gen->from_space()->is_empty()) {
292           young_gen->from_space()->clear(SpaceDecorator::Mangle);
293           young_gen->swap_spaces();
294         }
295 
296         // Calculate optimal free space amounts
297         assert(young_gen->max_size() >
298           young_gen->from_space()->capacity_in_bytes() +
299           young_gen->to_space()->capacity_in_bytes(),
300           "Sizes of space in young gen are out of bounds");
301 
302         size_t young_live = young_gen->used_in_bytes();
303         size_t eden_live = young_gen->eden_space()->used_in_bytes();
304         size_t old_live = old_gen->used_in_bytes();
305         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
306         size_t max_old_gen_size = old_gen->max_gen_size();
307         size_t max_eden_size = young_gen->max_size() -
308           young_gen->from_space()->capacity_in_bytes() -
309           young_gen->to_space()->capacity_in_bytes();
310 
311         // Used for diagnostics
312         size_policy->clear_generation_free_space_flags();
313 
314         size_policy->compute_generations_free_space(young_live,
315                                                     eden_live,
316                                                     old_live,
317                                                     cur_eden,
318                                                     max_old_gen_size,
319                                                     max_eden_size,
320                                                     true /* full gc*/);
321 
322         size_policy->check_gc_overhead_limit(eden_live,
323                                              max_old_gen_size,
324                                              max_eden_size,
325                                              true /* full gc*/,
326                                              gc_cause,
327                                              heap->soft_ref_policy());
328 
329         size_policy->decay_supplemental_growth(true /* full gc*/);
330 
331         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
332 
333         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
334                                size_policy->calculated_survivor_size_in_bytes());
335       }
336       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
337     }
338 
339     if (UsePerfData) {
340       heap->gc_policy_counters()->update_counters();
341       heap->gc_policy_counters()->update_old_capacity(
342         old_gen->capacity_in_bytes());
343       heap->gc_policy_counters()->update_young_capacity(
344         young_gen->capacity_in_bytes());
345     }
346 
347     heap->resize_all_tlabs();
348 
349     // We collected the heap, recalculate the metaspace capacity
350     MetaspaceGC::compute_new_size();
351 
352     if (log_is_enabled(Debug, gc, heap, exit)) {
353       accumulated_time()->stop();
354     }
355 
356     young_gen->print_used_change(young_gen_prev_used);
357     old_gen->print_used_change(old_gen_prev_used);
358     MetaspaceUtils::print_metaspace_change(metadata_prev_used);
359 
360     // Track memory usage and detect low memory
361     MemoryService::track_memory_usage();
362     heap->update_counters();
363 
364     heap->post_full_gc_dump(_gc_timer);
365   }
366 
367   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
368     HandleMark hm;  // Discard invalid handles created during verification
369     Universe::verify("After GC");
370   }
371 
372   // Re-verify object start arrays
373   if (VerifyObjectStartArray &&
374       VerifyAfterGC) {
375     old_gen->verify_object_start_array();
376   }
377 
378   if (ZapUnusedHeapArea) {
379     old_gen->object_space()->check_mangled_unused_area_complete();
380   }
381 
382   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
383 
384   heap->print_heap_after_gc();
385   heap->trace_heap_after_gc(_gc_tracer);
386 
387 #ifdef TRACESPINNING
388   ParallelTaskTerminator::print_termination_counts();
389 #endif
390 
391   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
392 
393   _gc_timer->register_gc_end();
394 
395   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
396 
397   return true;
398 }
399 
absorb_live_data_from_eden(PSAdaptiveSizePolicy * size_policy,PSYoungGen * young_gen,PSOldGen * old_gen)400 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
401                                              PSYoungGen* young_gen,
402                                              PSOldGen* old_gen) {
403   MutableSpace* const eden_space = young_gen->eden_space();
404   assert(!eden_space->is_empty(), "eden must be non-empty");
405   assert(young_gen->virtual_space()->alignment() ==
406          old_gen->virtual_space()->alignment(), "alignments do not match");
407 
408   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
409     return false;
410   }
411 
412   // Both generations must be completely committed.
413   if (young_gen->virtual_space()->uncommitted_size() != 0) {
414     return false;
415   }
416   if (old_gen->virtual_space()->uncommitted_size() != 0) {
417     return false;
418   }
419 
420   // Figure out how much to take from eden.  Include the average amount promoted
421   // in the total; otherwise the next young gen GC will simply bail out to a
422   // full GC.
423   const size_t alignment = old_gen->virtual_space()->alignment();
424   const size_t eden_used = eden_space->used_in_bytes();
425   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
426   const size_t absorb_size = align_up(eden_used + promoted, alignment);
427   const size_t eden_capacity = eden_space->capacity_in_bytes();
428 
429   if (absorb_size >= eden_capacity) {
430     return false; // Must leave some space in eden.
431   }
432 
433   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
434   if (new_young_size < young_gen->min_gen_size()) {
435     return false; // Respect young gen minimum size.
436   }
437 
438   log_trace(gc, ergo, heap)(" absorbing " SIZE_FORMAT "K:  "
439                             "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
440                             "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
441                             "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
442                             absorb_size / K,
443                             eden_capacity / K, (eden_capacity - absorb_size) / K,
444                             young_gen->from_space()->used_in_bytes() / K,
445                             young_gen->to_space()->used_in_bytes() / K,
446                             young_gen->capacity_in_bytes() / K, new_young_size / K);
447 
448   // Fill the unused part of the old gen.
449   MutableSpace* const old_space = old_gen->object_space();
450   HeapWord* const unused_start = old_space->top();
451   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
452 
453   if (unused_words > 0) {
454     if (unused_words < CollectedHeap::min_fill_size()) {
455       return false;  // If the old gen cannot be filled, must give up.
456     }
457     CollectedHeap::fill_with_objects(unused_start, unused_words);
458   }
459 
460   // Take the live data from eden and set both top and end in the old gen to
461   // eden top.  (Need to set end because reset_after_change() mangles the region
462   // from end to virtual_space->high() in debug builds).
463   HeapWord* const new_top = eden_space->top();
464   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
465                                         absorb_size);
466   young_gen->reset_after_change();
467   old_space->set_top(new_top);
468   old_space->set_end(new_top);
469   old_gen->reset_after_change();
470 
471   // Update the object start array for the filler object and the data from eden.
472   ObjectStartArray* const start_array = old_gen->start_array();
473   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
474     start_array->allocate_block(p);
475   }
476 
477   // Could update the promoted average here, but it is not typically updated at
478   // full GCs and the value to use is unclear.  Something like
479   //
480   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
481 
482   size_policy->set_bytes_absorbed_from_eden(absorb_size);
483   return true;
484 }
485 
allocate_stacks()486 void PSMarkSweep::allocate_stacks() {
487   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
488   PSYoungGen* young_gen = heap->young_gen();
489 
490   MutableSpace* to_space = young_gen->to_space();
491   _preserved_marks = (PreservedMark*)to_space->top();
492   _preserved_count = 0;
493 
494   // We want to calculate the size in bytes first.
495   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
496   // Now divide by the size of a PreservedMark
497   _preserved_count_max /= sizeof(PreservedMark);
498 }
499 
500 
deallocate_stacks()501 void PSMarkSweep::deallocate_stacks() {
502   _preserved_mark_stack.clear(true);
503   _preserved_oop_stack.clear(true);
504   _marking_stack.clear();
505   _objarray_stack.clear(true);
506 }
507 
mark_sweep_phase1(bool clear_all_softrefs)508 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
509   // Recursively traverse all live objects and mark them
510   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
511 
512   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
513 
514   // Need to clear claim bits before the tracing starts.
515   ClassLoaderDataGraph::clear_claimed_marks();
516 
517   // General strong roots.
518   {
519     ParallelScavengeHeap::ParStrongRootsScope psrs;
520     Universe::oops_do(mark_and_push_closure());
521     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
522     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
523     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
524     ObjectSynchronizer::oops_do(mark_and_push_closure());
525     Management::oops_do(mark_and_push_closure());
526     JvmtiExport::oops_do(mark_and_push_closure());
527     SystemDictionary::oops_do(mark_and_push_closure());
528     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
529     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
530     //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
531     AOT_ONLY(AOTLoader::oops_do(mark_and_push_closure());)
532     JVMCI_ONLY(JVMCI::oops_do(mark_and_push_closure());)
533   }
534 
535   // Flush marking stack.
536   follow_stack();
537 
538   // Process reference objects found during marking
539   {
540     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
541 
542     ref_processor()->setup_policy(clear_all_softrefs);
543     ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
544     const ReferenceProcessorStats& stats =
545       ref_processor()->process_discovered_references(
546         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
547     gc_tracer()->report_gc_reference_stats(stats);
548     pt.print_all_references();
549   }
550 
551   // This is the point where the entire marking should have completed.
552   assert(_marking_stack.is_empty(), "Marking should have completed");
553 
554   {
555     GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
556     WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
557   }
558 
559   {
560     GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
561 
562     // Unload classes and purge the SystemDictionary.
563     bool purged_class = SystemDictionary::do_unloading(_gc_timer);
564 
565     // Unload nmethods.
566     CodeCache::do_unloading(is_alive_closure(), purged_class);
567 
568     // Prune dead klasses from subklass/sibling/implementor lists.
569     Klass::clean_weak_klass_links(purged_class);
570 
571     // Clean JVMCI metadata handles.
572     JVMCI_ONLY(JVMCI::do_unloading(purged_class));
573   }
574 
575   _gc_tracer->report_object_count_after_gc(is_alive_closure());
576 }
577 
578 
mark_sweep_phase2()579 void PSMarkSweep::mark_sweep_phase2() {
580   GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
581 
582   // Now all live objects are marked, compute the new object addresses.
583 
584   // It is not required that we traverse spaces in the same order in
585   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
586   // tracking expects us to do so. See comment under phase4.
587 
588   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
589   PSOldGen* old_gen = heap->old_gen();
590 
591   // Begin compacting into the old gen
592   PSMarkSweepDecorator::set_destination_decorator_tenured();
593 
594   // This will also compact the young gen spaces.
595   old_gen->precompact();
596 }
597 
mark_sweep_phase3()598 void PSMarkSweep::mark_sweep_phase3() {
599   // Adjust the pointers to reflect the new locations
600   GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
601 
602   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
603   PSYoungGen* young_gen = heap->young_gen();
604   PSOldGen* old_gen = heap->old_gen();
605 
606   // Need to clear claim bits before the tracing starts.
607   ClassLoaderDataGraph::clear_claimed_marks();
608 
609   // General strong roots.
610   Universe::oops_do(adjust_pointer_closure());
611   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
612   Threads::oops_do(adjust_pointer_closure(), NULL);
613   ObjectSynchronizer::oops_do(adjust_pointer_closure());
614   Management::oops_do(adjust_pointer_closure());
615   JvmtiExport::oops_do(adjust_pointer_closure());
616   SystemDictionary::oops_do(adjust_pointer_closure());
617   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
618 
619   // Now adjust pointers in remaining weak roots.  (All of which should
620   // have been cleared if they pointed to non-surviving objects.)
621   // Global (weak) JNI handles
622   WeakProcessor::oops_do(adjust_pointer_closure());
623 
624   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
625   CodeCache::blobs_do(&adjust_from_blobs);
626   AOT_ONLY(AOTLoader::oops_do(adjust_pointer_closure());)
627 
628   JVMCI_ONLY(JVMCI::oops_do(adjust_pointer_closure());)
629 
630   ref_processor()->weak_oops_do(adjust_pointer_closure());
631   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
632 
633   adjust_marks();
634 
635   young_gen->adjust_pointers();
636   old_gen->adjust_pointers();
637 }
638 
mark_sweep_phase4()639 void PSMarkSweep::mark_sweep_phase4() {
640   EventMark m("4 compact heap");
641   GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
642 
643   // All pointers are now adjusted, move objects accordingly
644 
645   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
646   PSYoungGen* young_gen = heap->young_gen();
647   PSOldGen* old_gen = heap->old_gen();
648 
649   old_gen->compact();
650   young_gen->compact();
651 }
652 
millis_since_last_gc()653 jlong PSMarkSweep::millis_since_last_gc() {
654   // We need a monotonically non-decreasing time in ms but
655   // os::javaTimeMillis() does not guarantee monotonicity.
656   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
657   jlong ret_val = now - _time_of_last_gc;
658   // XXX See note in genCollectedHeap::millis_since_last_gc().
659   if (ret_val < 0) {
660     NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
661     return 0;
662   }
663   return ret_val;
664 }
665 
reset_millis_since_last_gc()666 void PSMarkSweep::reset_millis_since_last_gc() {
667   // We need a monotonically non-decreasing time in ms but
668   // os::javaTimeMillis() does not guarantee monotonicity.
669   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
670 }
671