1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "code/codeCache.hpp"
32 #include "gc/parallel/parallelScavengeHeap.hpp"
33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
34 #include "gc/parallel/psMarkSweep.hpp"
35 #include "gc/parallel/psMarkSweepDecorator.hpp"
36 #include "gc/parallel/psOldGen.hpp"
37 #include "gc/parallel/psScavenge.hpp"
38 #include "gc/parallel/psYoungGen.hpp"
39 #include "gc/serial/markSweep.hpp"
40 #include "gc/shared/gcCause.hpp"
41 #include "gc/shared/gcHeapSummary.hpp"
42 #include "gc/shared/gcId.hpp"
43 #include "gc/shared/gcLocker.hpp"
44 #include "gc/shared/gcTimer.hpp"
45 #include "gc/shared/gcTrace.hpp"
46 #include "gc/shared/gcTraceTime.inline.hpp"
47 #include "gc/shared/isGCActiveMark.hpp"
48 #include "gc/shared/referencePolicy.hpp"
49 #include "gc/shared/referenceProcessor.hpp"
50 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
51 #include "gc/shared/spaceDecorator.hpp"
52 #include "gc/shared/weakProcessor.hpp"
53 #include "logging/log.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/biasedLocking.hpp"
56 #include "runtime/flags/flagSetting.hpp"
57 #include "runtime/handles.inline.hpp"
58 #include "runtime/safepoint.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "services/management.hpp"
61 #include "services/memoryService.hpp"
62 #include "utilities/align.hpp"
63 #include "utilities/events.hpp"
64 #include "utilities/stack.inline.hpp"
65
66 elapsedTimer PSMarkSweep::_accumulated_time;
67 jlong PSMarkSweep::_time_of_last_gc = 0;
68 CollectorCounters* PSMarkSweep::_counters = NULL;
69
70 SpanSubjectToDiscoveryClosure PSMarkSweep::_span_based_discoverer;
71
initialize()72 void PSMarkSweep::initialize() {
73 _span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region());
74 set_ref_processor(new ReferenceProcessor(&_span_based_discoverer)); // a vanilla ref proc
75 _counters = new CollectorCounters("PSMarkSweep", 1);
76 MarkSweep::initialize();
77 }
78
79 // This method contains all heap specific policy for invoking mark sweep.
80 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
81 // the heap. It will do nothing further. If we need to bail out for policy
82 // reasons, scavenge before full gc, or any other specialized behavior, it
83 // needs to be added here.
84 //
85 // Note that this method should only be called from the vm_thread while
86 // at a safepoint!
87 //
88 // Note that the all_soft_refs_clear flag in the collector policy
89 // may be true because this method can be called without intervening
90 // activity. For example when the heap space is tight and full measure
91 // are being taken to free space.
92
invoke(bool maximum_heap_compaction)93 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
94 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
95 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
96 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
97
98 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
99 GCCause::Cause gc_cause = heap->gc_cause();
100 PSAdaptiveSizePolicy* policy = heap->size_policy();
101 IsGCActiveMark mark;
102
103 if (ScavengeBeforeFullGC) {
104 PSScavenge::invoke_no_policy();
105 }
106
107 const bool clear_all_soft_refs =
108 heap->soft_ref_policy()->should_clear_all_soft_refs();
109
110 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
111 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
112 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
113 }
114
115 // This method contains no policy. You should probably
116 // be calling invoke() instead.
invoke_no_policy(bool clear_all_softrefs)117 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
118 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
119 assert(ref_processor() != NULL, "Sanity");
120
121 if (GCLocker::check_active_before_gc()) {
122 return false;
123 }
124
125 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
126 GCCause::Cause gc_cause = heap->gc_cause();
127
128 GCIdMark gc_id_mark;
129 _gc_timer->register_gc_start();
130 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
131
132 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
133
134 // The scope of casr should end after code that can change
135 // CollectorPolicy::_should_clear_all_soft_refs.
136 ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
137
138 PSYoungGen* young_gen = heap->young_gen();
139 PSOldGen* old_gen = heap->old_gen();
140
141 // Increment the invocation count
142 heap->increment_total_collections(true /* full */);
143
144 // Save information needed to minimize mangling
145 heap->record_gen_tops_before_GC();
146
147 // We need to track unique mark sweep invocations as well.
148 _total_invocations++;
149
150 heap->print_heap_before_gc();
151 heap->trace_heap_before_gc(_gc_tracer);
152
153 // Fill in TLABs
154 heap->ensure_parsability(true); // retire TLABs
155
156 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
157 HandleMark hm; // Discard invalid handles created during verification
158 Universe::verify("Before GC");
159 }
160
161 // Verify object start arrays
162 if (VerifyObjectStartArray &&
163 VerifyBeforeGC) {
164 old_gen->verify_object_start_array();
165 }
166
167 // Filled in below to track the state of the young gen after the collection.
168 bool eden_empty;
169 bool survivors_empty;
170 bool young_gen_empty;
171
172 {
173 HandleMark hm;
174
175 GCTraceCPUTime tcpu;
176 GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
177
178 heap->pre_full_gc_dump(_gc_timer);
179
180 TraceCollectorStats tcs(counters());
181 TraceMemoryManagerStats tms(heap->old_gc_manager(),gc_cause);
182
183 if (log_is_enabled(Debug, gc, heap, exit)) {
184 accumulated_time()->start();
185 }
186
187 // Let the size policy know we're starting
188 size_policy->major_collection_begin();
189
190 CodeCache::gc_prologue();
191 BiasedLocking::preserve_marks();
192
193 // Capture metadata size before collection for sizing.
194 size_t metadata_prev_used = MetaspaceUtils::used_bytes();
195
196 size_t old_gen_prev_used = old_gen->used_in_bytes();
197 size_t young_gen_prev_used = young_gen->used_in_bytes();
198
199 allocate_stacks();
200
201 #if COMPILER2_OR_JVMCI
202 DerivedPointerTable::clear();
203 #endif
204
205 ref_processor()->enable_discovery();
206 ref_processor()->setup_policy(clear_all_softrefs);
207
208 mark_sweep_phase1(clear_all_softrefs);
209
210 mark_sweep_phase2();
211
212 #if COMPILER2_OR_JVMCI
213 // Don't add any more derived pointers during phase3
214 assert(DerivedPointerTable::is_active(), "Sanity");
215 DerivedPointerTable::set_active(false);
216 #endif
217
218 mark_sweep_phase3();
219
220 mark_sweep_phase4();
221
222 restore_marks();
223
224 deallocate_stacks();
225
226 if (ZapUnusedHeapArea) {
227 // Do a complete mangle (top to end) because the usage for
228 // scratch does not maintain a top pointer.
229 young_gen->to_space()->mangle_unused_area_complete();
230 }
231
232 eden_empty = young_gen->eden_space()->is_empty();
233 if (!eden_empty) {
234 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
235 }
236
237 // Update heap occupancy information which is used as
238 // input to soft ref clearing policy at the next gc.
239 Universe::update_heap_info_at_gc();
240
241 survivors_empty = young_gen->from_space()->is_empty() &&
242 young_gen->to_space()->is_empty();
243 young_gen_empty = eden_empty && survivors_empty;
244
245 PSCardTable* card_table = heap->card_table();
246 MemRegion old_mr = heap->old_gen()->reserved();
247 if (young_gen_empty) {
248 card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
249 } else {
250 card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
251 }
252
253 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
254 ClassLoaderDataGraph::purge();
255 MetaspaceUtils::verify_metrics();
256
257 BiasedLocking::restore_marks();
258 CodeCache::gc_epilogue();
259 JvmtiExport::gc_epilogue();
260
261 #if COMPILER2_OR_JVMCI
262 DerivedPointerTable::update_pointers();
263 #endif
264
265 assert(!ref_processor()->discovery_enabled(), "Should have been disabled earlier");
266
267 // Update time of last GC
268 reset_millis_since_last_gc();
269
270 // Let the size policy know we're done
271 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
272
273 if (UseAdaptiveSizePolicy) {
274
275 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
276 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
277 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
278
279 // Don't check if the size_policy is ready here. Let
280 // the size_policy check that internally.
281 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
282 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
283 // Swap the survivor spaces if from_space is empty. The
284 // resize_young_gen() called below is normally used after
285 // a successful young GC and swapping of survivor spaces;
286 // otherwise, it will fail to resize the young gen with
287 // the current implementation.
288 if (young_gen->from_space()->is_empty()) {
289 young_gen->from_space()->clear(SpaceDecorator::Mangle);
290 young_gen->swap_spaces();
291 }
292
293 // Calculate optimal free space amounts
294 assert(young_gen->max_size() >
295 young_gen->from_space()->capacity_in_bytes() +
296 young_gen->to_space()->capacity_in_bytes(),
297 "Sizes of space in young gen are out of bounds");
298
299 size_t young_live = young_gen->used_in_bytes();
300 size_t eden_live = young_gen->eden_space()->used_in_bytes();
301 size_t old_live = old_gen->used_in_bytes();
302 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
303 size_t max_old_gen_size = old_gen->max_gen_size();
304 size_t max_eden_size = young_gen->max_size() -
305 young_gen->from_space()->capacity_in_bytes() -
306 young_gen->to_space()->capacity_in_bytes();
307
308 // Used for diagnostics
309 size_policy->clear_generation_free_space_flags();
310
311 size_policy->compute_generations_free_space(young_live,
312 eden_live,
313 old_live,
314 cur_eden,
315 max_old_gen_size,
316 max_eden_size,
317 true /* full gc*/);
318
319 size_policy->check_gc_overhead_limit(young_live,
320 eden_live,
321 max_old_gen_size,
322 max_eden_size,
323 true /* full gc*/,
324 gc_cause,
325 heap->soft_ref_policy());
326
327 size_policy->decay_supplemental_growth(true /* full gc*/);
328
329 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
330
331 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
332 size_policy->calculated_survivor_size_in_bytes());
333 }
334 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
335 }
336
337 if (UsePerfData) {
338 heap->gc_policy_counters()->update_counters();
339 heap->gc_policy_counters()->update_old_capacity(
340 old_gen->capacity_in_bytes());
341 heap->gc_policy_counters()->update_young_capacity(
342 young_gen->capacity_in_bytes());
343 }
344
345 heap->resize_all_tlabs();
346
347 // We collected the heap, recalculate the metaspace capacity
348 MetaspaceGC::compute_new_size();
349
350 if (log_is_enabled(Debug, gc, heap, exit)) {
351 accumulated_time()->stop();
352 }
353
354 young_gen->print_used_change(young_gen_prev_used);
355 old_gen->print_used_change(old_gen_prev_used);
356 MetaspaceUtils::print_metaspace_change(metadata_prev_used);
357
358 // Track memory usage and detect low memory
359 MemoryService::track_memory_usage();
360 heap->update_counters();
361
362 heap->post_full_gc_dump(_gc_timer);
363 }
364
365 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
366 HandleMark hm; // Discard invalid handles created during verification
367 Universe::verify("After GC");
368 }
369
370 // Re-verify object start arrays
371 if (VerifyObjectStartArray &&
372 VerifyAfterGC) {
373 old_gen->verify_object_start_array();
374 }
375
376 if (ZapUnusedHeapArea) {
377 old_gen->object_space()->check_mangled_unused_area_complete();
378 }
379
380 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
381
382 heap->print_heap_after_gc();
383 heap->trace_heap_after_gc(_gc_tracer);
384
385 #ifdef TRACESPINNING
386 ParallelTaskTerminator::print_termination_counts();
387 #endif
388
389 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
390
391 _gc_timer->register_gc_end();
392
393 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
394
395 return true;
396 }
397
absorb_live_data_from_eden(PSAdaptiveSizePolicy * size_policy,PSYoungGen * young_gen,PSOldGen * old_gen)398 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
399 PSYoungGen* young_gen,
400 PSOldGen* old_gen) {
401 MutableSpace* const eden_space = young_gen->eden_space();
402 assert(!eden_space->is_empty(), "eden must be non-empty");
403 assert(young_gen->virtual_space()->alignment() ==
404 old_gen->virtual_space()->alignment(), "alignments do not match");
405
406 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
407 return false;
408 }
409
410 // Both generations must be completely committed.
411 if (young_gen->virtual_space()->uncommitted_size() != 0) {
412 return false;
413 }
414 if (old_gen->virtual_space()->uncommitted_size() != 0) {
415 return false;
416 }
417
418 // Figure out how much to take from eden. Include the average amount promoted
419 // in the total; otherwise the next young gen GC will simply bail out to a
420 // full GC.
421 const size_t alignment = old_gen->virtual_space()->alignment();
422 const size_t eden_used = eden_space->used_in_bytes();
423 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
424 const size_t absorb_size = align_up(eden_used + promoted, alignment);
425 const size_t eden_capacity = eden_space->capacity_in_bytes();
426
427 if (absorb_size >= eden_capacity) {
428 return false; // Must leave some space in eden.
429 }
430
431 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
432 if (new_young_size < young_gen->min_gen_size()) {
433 return false; // Respect young gen minimum size.
434 }
435
436 log_trace(gc, ergo, heap)(" absorbing " SIZE_FORMAT "K: "
437 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
438 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
439 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
440 absorb_size / K,
441 eden_capacity / K, (eden_capacity - absorb_size) / K,
442 young_gen->from_space()->used_in_bytes() / K,
443 young_gen->to_space()->used_in_bytes() / K,
444 young_gen->capacity_in_bytes() / K, new_young_size / K);
445
446 // Fill the unused part of the old gen.
447 MutableSpace* const old_space = old_gen->object_space();
448 HeapWord* const unused_start = old_space->top();
449 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
450
451 if (unused_words > 0) {
452 if (unused_words < CollectedHeap::min_fill_size()) {
453 return false; // If the old gen cannot be filled, must give up.
454 }
455 CollectedHeap::fill_with_objects(unused_start, unused_words);
456 }
457
458 // Take the live data from eden and set both top and end in the old gen to
459 // eden top. (Need to set end because reset_after_change() mangles the region
460 // from end to virtual_space->high() in debug builds).
461 HeapWord* const new_top = eden_space->top();
462 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
463 absorb_size);
464 young_gen->reset_after_change();
465 old_space->set_top(new_top);
466 old_space->set_end(new_top);
467 old_gen->reset_after_change();
468
469 // Update the object start array for the filler object and the data from eden.
470 ObjectStartArray* const start_array = old_gen->start_array();
471 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
472 start_array->allocate_block(p);
473 }
474
475 // Could update the promoted average here, but it is not typically updated at
476 // full GCs and the value to use is unclear. Something like
477 //
478 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
479
480 size_policy->set_bytes_absorbed_from_eden(absorb_size);
481 return true;
482 }
483
allocate_stacks()484 void PSMarkSweep::allocate_stacks() {
485 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
486 PSYoungGen* young_gen = heap->young_gen();
487
488 MutableSpace* to_space = young_gen->to_space();
489 _preserved_marks = (PreservedMark*)to_space->top();
490 _preserved_count = 0;
491
492 // We want to calculate the size in bytes first.
493 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
494 // Now divide by the size of a PreservedMark
495 _preserved_count_max /= sizeof(PreservedMark);
496 }
497
498
deallocate_stacks()499 void PSMarkSweep::deallocate_stacks() {
500 _preserved_mark_stack.clear(true);
501 _preserved_oop_stack.clear(true);
502 _marking_stack.clear();
503 _objarray_stack.clear(true);
504 }
505
mark_sweep_phase1(bool clear_all_softrefs)506 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
507 // Recursively traverse all live objects and mark them
508 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
509
510 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
511
512 // Need to clear claim bits before the tracing starts.
513 ClassLoaderDataGraph::clear_claimed_marks();
514
515 // General strong roots.
516 {
517 ParallelScavengeHeap::ParStrongRootsScope psrs;
518 Universe::oops_do(mark_and_push_closure());
519 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
520 MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
521 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
522 ObjectSynchronizer::oops_do(mark_and_push_closure());
523 Management::oops_do(mark_and_push_closure());
524 JvmtiExport::oops_do(mark_and_push_closure());
525 SystemDictionary::oops_do(mark_and_push_closure());
526 ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
527 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
528 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
529 AOTLoader::oops_do(mark_and_push_closure());
530 }
531
532 // Flush marking stack.
533 follow_stack();
534
535 // Process reference objects found during marking
536 {
537 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
538
539 ref_processor()->setup_policy(clear_all_softrefs);
540 ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
541 const ReferenceProcessorStats& stats =
542 ref_processor()->process_discovered_references(
543 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
544 gc_tracer()->report_gc_reference_stats(stats);
545 pt.print_all_references();
546 }
547
548 // This is the point where the entire marking should have completed.
549 assert(_marking_stack.is_empty(), "Marking should have completed");
550
551 {
552 GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
553 WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
554 }
555
556 {
557 GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
558
559 // Unload classes and purge the SystemDictionary.
560 bool purged_class = SystemDictionary::do_unloading(_gc_timer);
561
562 // Unload nmethods.
563 CodeCache::do_unloading(is_alive_closure(), purged_class);
564
565 // Prune dead klasses from subklass/sibling/implementor lists.
566 Klass::clean_weak_klass_links(purged_class);
567 }
568
569 {
570 GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer);
571 // Delete entries for dead interned strings.
572 StringTable::unlink(is_alive_closure());
573 }
574
575 {
576 GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer);
577 // Clean up unreferenced symbols in symbol table.
578 SymbolTable::unlink();
579 }
580
581 _gc_tracer->report_object_count_after_gc(is_alive_closure());
582 }
583
584
mark_sweep_phase2()585 void PSMarkSweep::mark_sweep_phase2() {
586 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
587
588 // Now all live objects are marked, compute the new object addresses.
589
590 // It is not required that we traverse spaces in the same order in
591 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
592 // tracking expects us to do so. See comment under phase4.
593
594 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
595 PSOldGen* old_gen = heap->old_gen();
596
597 // Begin compacting into the old gen
598 PSMarkSweepDecorator::set_destination_decorator_tenured();
599
600 // This will also compact the young gen spaces.
601 old_gen->precompact();
602 }
603
mark_sweep_phase3()604 void PSMarkSweep::mark_sweep_phase3() {
605 // Adjust the pointers to reflect the new locations
606 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
607
608 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
609 PSYoungGen* young_gen = heap->young_gen();
610 PSOldGen* old_gen = heap->old_gen();
611
612 // Need to clear claim bits before the tracing starts.
613 ClassLoaderDataGraph::clear_claimed_marks();
614
615 // General strong roots.
616 Universe::oops_do(adjust_pointer_closure());
617 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
618 Threads::oops_do(adjust_pointer_closure(), NULL);
619 ObjectSynchronizer::oops_do(adjust_pointer_closure());
620 Management::oops_do(adjust_pointer_closure());
621 JvmtiExport::oops_do(adjust_pointer_closure());
622 SystemDictionary::oops_do(adjust_pointer_closure());
623 ClassLoaderDataGraph::cld_do(adjust_cld_closure());
624
625 // Now adjust pointers in remaining weak roots. (All of which should
626 // have been cleared if they pointed to non-surviving objects.)
627 // Global (weak) JNI handles
628 WeakProcessor::oops_do(adjust_pointer_closure());
629
630 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
631 CodeCache::blobs_do(&adjust_from_blobs);
632 AOTLoader::oops_do(adjust_pointer_closure());
633 StringTable::oops_do(adjust_pointer_closure());
634 ref_processor()->weak_oops_do(adjust_pointer_closure());
635 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
636
637 adjust_marks();
638
639 young_gen->adjust_pointers();
640 old_gen->adjust_pointers();
641 }
642
mark_sweep_phase4()643 void PSMarkSweep::mark_sweep_phase4() {
644 EventMark m("4 compact heap");
645 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
646
647 // All pointers are now adjusted, move objects accordingly
648
649 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
650 PSYoungGen* young_gen = heap->young_gen();
651 PSOldGen* old_gen = heap->old_gen();
652
653 old_gen->compact();
654 young_gen->compact();
655 }
656
millis_since_last_gc()657 jlong PSMarkSweep::millis_since_last_gc() {
658 // We need a monotonically non-decreasing time in ms but
659 // os::javaTimeMillis() does not guarantee monotonicity.
660 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
661 jlong ret_val = now - _time_of_last_gc;
662 // XXX See note in genCollectedHeap::millis_since_last_gc().
663 if (ret_val < 0) {
664 NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
665 return 0;
666 }
667 return ret_val;
668 }
669
reset_millis_since_last_gc()670 void PSMarkSweep::reset_millis_since_last_gc() {
671 // We need a monotonically non-decreasing time in ms but
672 // os::javaTimeMillis() does not guarantee monotonicity.
673 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
674 }
675