1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "gc/parallel/adjoiningGenerations.hpp"
28 #include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
29 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
30 #include "gc/parallel/gcTaskManager.hpp"
31 #include "gc/parallel/generationSizer.hpp"
32 #include "gc/parallel/objectStartArray.inline.hpp"
33 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
34 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
35 #include "gc/parallel/psMarkSweepProxy.hpp"
36 #include "gc/parallel/psMemoryPool.hpp"
37 #include "gc/parallel/psParallelCompact.inline.hpp"
38 #include "gc/parallel/psPromotionManager.hpp"
39 #include "gc/parallel/psScavenge.hpp"
40 #include "gc/parallel/psVMOperations.hpp"
41 #include "gc/shared/gcHeapSummary.hpp"
42 #include "gc/shared/gcLocker.hpp"
43 #include "gc/shared/gcWhen.hpp"
44 #include "logging/log.hpp"
45 #include "memory/metaspaceCounters.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "runtime/handles.inline.hpp"
48 #include "runtime/java.hpp"
49 #include "runtime/vmThread.hpp"
50 #include "services/memoryManager.hpp"
51 #include "services/memTracker.hpp"
52 #include "utilities/macros.hpp"
53 #include "utilities/vmError.hpp"
54
55 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
56 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
57 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
58 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
59 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
60
initialize()61 jint ParallelScavengeHeap::initialize() {
62 size_t heap_size = _collector_policy->heap_reserved_size_bytes();
63
64 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
65
66 os::trace_page_sizes("Heap",
67 _collector_policy->min_heap_byte_size(),
68 heap_size,
69 generation_alignment(),
70 heap_rs.base(),
71 heap_rs.size());
72
73 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
74
75 PSCardTable* card_table = new PSCardTable(reserved_region());
76 card_table->initialize();
77 CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
78 barrier_set->initialize();
79 BarrierSet::set_barrier_set(barrier_set);
80
81 // Make up the generations
82 // Calculate the maximum size that a generation can grow. This
83 // includes growth into the other generation. Note that the
84 // parameter _max_gen_size is kept as the maximum
85 // size of the generation as the boundaries currently stand.
86 // _max_gen_size is still used as that value.
87 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
88 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
89
90 _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs, _collector_policy, generation_alignment());
91
92 _old_gen = _gens->old_gen();
93 _young_gen = _gens->young_gen();
94
95 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
96 const size_t old_capacity = _old_gen->capacity_in_bytes();
97 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
98 _size_policy =
99 new PSAdaptiveSizePolicy(eden_capacity,
100 initial_promo_size,
101 young_gen()->to_space()->capacity_in_bytes(),
102 _collector_policy->gen_alignment(),
103 max_gc_pause_sec,
104 max_gc_minor_pause_sec,
105 GCTimeRatio
106 );
107
108 assert(_collector_policy->is_hetero_heap() || !UseAdaptiveGCBoundary ||
109 (old_gen()->virtual_space()->high_boundary() ==
110 young_gen()->virtual_space()->low_boundary()),
111 "Boundaries must meet");
112 // initialize the policy counters - 2 collectors, 2 generations
113 _gc_policy_counters =
114 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
115
116 // Set up the GCTaskManager
117 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
118
119 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
120 return JNI_ENOMEM;
121 }
122
123 return JNI_OK;
124 }
125
initialize_serviceability()126 void ParallelScavengeHeap::initialize_serviceability() {
127
128 _eden_pool = new EdenMutableSpacePool(_young_gen,
129 _young_gen->eden_space(),
130 "PS Eden Space",
131 false /* support_usage_threshold */);
132
133 _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
134 "PS Survivor Space",
135 false /* support_usage_threshold */);
136
137 _old_pool = new PSGenerationPool(_old_gen,
138 "PS Old Gen",
139 true /* support_usage_threshold */);
140
141 _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
142 _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
143
144 _old_manager->add_pool(_eden_pool);
145 _old_manager->add_pool(_survivor_pool);
146 _old_manager->add_pool(_old_pool);
147
148 _young_manager->add_pool(_eden_pool);
149 _young_manager->add_pool(_survivor_pool);
150
151 }
152
post_initialize()153 void ParallelScavengeHeap::post_initialize() {
154 CollectedHeap::post_initialize();
155 // Need to init the tenuring threshold
156 PSScavenge::initialize();
157 if (UseParallelOldGC) {
158 PSParallelCompact::post_initialize();
159 } else {
160 PSMarkSweepProxy::initialize();
161 }
162 PSPromotionManager::initialize();
163 }
164
update_counters()165 void ParallelScavengeHeap::update_counters() {
166 young_gen()->update_counters();
167 old_gen()->update_counters();
168 MetaspaceCounters::update_performance_counters();
169 CompressedClassSpaceCounters::update_performance_counters();
170 }
171
capacity() const172 size_t ParallelScavengeHeap::capacity() const {
173 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
174 return value;
175 }
176
used() const177 size_t ParallelScavengeHeap::used() const {
178 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
179 return value;
180 }
181
is_maximal_no_gc() const182 bool ParallelScavengeHeap::is_maximal_no_gc() const {
183 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
184 }
185
186
max_capacity() const187 size_t ParallelScavengeHeap::max_capacity() const {
188 size_t estimated = reserved_region().byte_size();
189 if (UseAdaptiveSizePolicy) {
190 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
191 } else {
192 estimated -= young_gen()->to_space()->capacity_in_bytes();
193 }
194 return MAX2(estimated, capacity());
195 }
196
is_in(const void * p) const197 bool ParallelScavengeHeap::is_in(const void* p) const {
198 return young_gen()->is_in(p) || old_gen()->is_in(p);
199 }
200
is_in_reserved(const void * p) const201 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
202 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
203 }
204
205 // There are two levels of allocation policy here.
206 //
207 // When an allocation request fails, the requesting thread must invoke a VM
208 // operation, transfer control to the VM thread, and await the results of a
209 // garbage collection. That is quite expensive, and we should avoid doing it
210 // multiple times if possible.
211 //
212 // To accomplish this, we have a basic allocation policy, and also a
213 // failed allocation policy.
214 //
215 // The basic allocation policy controls how you allocate memory without
216 // attempting garbage collection. It is okay to grab locks and
217 // expand the heap, if that can be done without coming to a safepoint.
218 // It is likely that the basic allocation policy will not be very
219 // aggressive.
220 //
221 // The failed allocation policy is invoked from the VM thread after
222 // the basic allocation policy is unable to satisfy a mem_allocate
223 // request. This policy needs to cover the entire range of collection,
224 // heap expansion, and out-of-memory conditions. It should make every
225 // attempt to allocate the requested memory.
226
227 // Basic allocation policy. Should never be called at a safepoint, or
228 // from the VM thread.
229 //
230 // This method must handle cases where many mem_allocate requests fail
231 // simultaneously. When that happens, only one VM operation will succeed,
232 // and the rest will not be executed. For that reason, this method loops
233 // during failed allocation attempts. If the java heap becomes exhausted,
234 // we rely on the size_policy object to force a bail out.
mem_allocate(size_t size,bool * gc_overhead_limit_was_exceeded)235 HeapWord* ParallelScavengeHeap::mem_allocate(
236 size_t size,
237 bool* gc_overhead_limit_was_exceeded) {
238 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
239 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
240 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
241
242 // In general gc_overhead_limit_was_exceeded should be false so
243 // set it so here and reset it to true only if the gc time
244 // limit is being exceeded as checked below.
245 *gc_overhead_limit_was_exceeded = false;
246
247 HeapWord* result = young_gen()->allocate(size);
248
249 uint loop_count = 0;
250 uint gc_count = 0;
251 uint gclocker_stalled_count = 0;
252
253 while (result == NULL) {
254 // We don't want to have multiple collections for a single filled generation.
255 // To prevent this, each thread tracks the total_collections() value, and if
256 // the count has changed, does not do a new collection.
257 //
258 // The collection count must be read only while holding the heap lock. VM
259 // operations also hold the heap lock during collections. There is a lock
260 // contention case where thread A blocks waiting on the Heap_lock, while
261 // thread B is holding it doing a collection. When thread A gets the lock,
262 // the collection count has already changed. To prevent duplicate collections,
263 // The policy MUST attempt allocations during the same period it reads the
264 // total_collections() value!
265 {
266 MutexLocker ml(Heap_lock);
267 gc_count = total_collections();
268
269 result = young_gen()->allocate(size);
270 if (result != NULL) {
271 return result;
272 }
273
274 // If certain conditions hold, try allocating from the old gen.
275 result = mem_allocate_old_gen(size);
276 if (result != NULL) {
277 return result;
278 }
279
280 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
281 return NULL;
282 }
283
284 // Failed to allocate without a gc.
285 if (GCLocker::is_active_and_needs_gc()) {
286 // If this thread is not in a jni critical section, we stall
287 // the requestor until the critical section has cleared and
288 // GC allowed. When the critical section clears, a GC is
289 // initiated by the last thread exiting the critical section; so
290 // we retry the allocation sequence from the beginning of the loop,
291 // rather than causing more, now probably unnecessary, GC attempts.
292 JavaThread* jthr = JavaThread::current();
293 if (!jthr->in_critical()) {
294 MutexUnlocker mul(Heap_lock);
295 GCLocker::stall_until_clear();
296 gclocker_stalled_count += 1;
297 continue;
298 } else {
299 if (CheckJNICalls) {
300 fatal("Possible deadlock due to allocating while"
301 " in jni critical section");
302 }
303 return NULL;
304 }
305 }
306 }
307
308 if (result == NULL) {
309 // Generate a VM operation
310 VM_ParallelGCFailedAllocation op(size, gc_count);
311 VMThread::execute(&op);
312
313 // Did the VM operation execute? If so, return the result directly.
314 // This prevents us from looping until time out on requests that can
315 // not be satisfied.
316 if (op.prologue_succeeded()) {
317 assert(is_in_or_null(op.result()), "result not in heap");
318
319 // If GC was locked out during VM operation then retry allocation
320 // and/or stall as necessary.
321 if (op.gc_locked()) {
322 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
323 continue; // retry and/or stall as necessary
324 }
325
326 // Exit the loop if the gc time limit has been exceeded.
327 // The allocation must have failed above ("result" guarding
328 // this path is NULL) and the most recent collection has exceeded the
329 // gc overhead limit (although enough may have been collected to
330 // satisfy the allocation). Exit the loop so that an out-of-memory
331 // will be thrown (return a NULL ignoring the contents of
332 // op.result()),
333 // but clear gc_overhead_limit_exceeded so that the next collection
334 // starts with a clean slate (i.e., forgets about previous overhead
335 // excesses). Fill op.result() with a filler object so that the
336 // heap remains parsable.
337 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
338 const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
339
340 if (limit_exceeded && softrefs_clear) {
341 *gc_overhead_limit_was_exceeded = true;
342 size_policy()->set_gc_overhead_limit_exceeded(false);
343 log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
344 if (op.result() != NULL) {
345 CollectedHeap::fill_with_object(op.result(), size);
346 }
347 return NULL;
348 }
349
350 return op.result();
351 }
352 }
353
354 // The policy object will prevent us from looping forever. If the
355 // time spent in gc crosses a threshold, we will bail out.
356 loop_count++;
357 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
358 (loop_count % QueuedAllocationWarningCount == 0)) {
359 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
360 log_warning(gc)("\tsize=" SIZE_FORMAT, size);
361 }
362 }
363
364 return result;
365 }
366
367 // A "death march" is a series of ultra-slow allocations in which a full gc is
368 // done before each allocation, and after the full gc the allocation still
369 // cannot be satisfied from the young gen. This routine detects that condition;
370 // it should be called after a full gc has been done and the allocation
371 // attempted from the young gen. The parameter 'addr' should be the result of
372 // that young gen allocation attempt.
373 void
death_march_check(HeapWord * const addr,size_t size)374 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
375 if (addr != NULL) {
376 _death_march_count = 0; // death march has ended
377 } else if (_death_march_count == 0) {
378 if (should_alloc_in_eden(size)) {
379 _death_march_count = 1; // death march has started
380 }
381 }
382 }
383
mem_allocate_old_gen(size_t size)384 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
385 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
386 // Size is too big for eden, or gc is locked out.
387 return old_gen()->allocate(size);
388 }
389
390 // If a "death march" is in progress, allocate from the old gen a limited
391 // number of times before doing a GC.
392 if (_death_march_count > 0) {
393 if (_death_march_count < 64) {
394 ++_death_march_count;
395 return old_gen()->allocate(size);
396 } else {
397 _death_march_count = 0;
398 }
399 }
400 return NULL;
401 }
402
do_full_collection(bool clear_all_soft_refs)403 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
404 if (UseParallelOldGC) {
405 // The do_full_collection() parameter clear_all_soft_refs
406 // is interpreted here as maximum_compaction which will
407 // cause SoftRefs to be cleared.
408 bool maximum_compaction = clear_all_soft_refs;
409 PSParallelCompact::invoke(maximum_compaction);
410 } else {
411 PSMarkSweepProxy::invoke(clear_all_soft_refs);
412 }
413 }
414
415 // Failed allocation policy. Must be called from the VM thread, and
416 // only at a safepoint! Note that this method has policy for allocation
417 // flow, and NOT collection policy. So we do not check for gc collection
418 // time over limit here, that is the responsibility of the heap specific
419 // collection methods. This method decides where to attempt allocations,
420 // and when to attempt collections, but no collection specific policy.
failed_mem_allocate(size_t size)421 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
422 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
423 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
424 assert(!is_gc_active(), "not reentrant");
425 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
426
427 // We assume that allocation in eden will fail unless we collect.
428
429 // First level allocation failure, scavenge and allocate in young gen.
430 GCCauseSetter gccs(this, GCCause::_allocation_failure);
431 const bool invoked_full_gc = PSScavenge::invoke();
432 HeapWord* result = young_gen()->allocate(size);
433
434 // Second level allocation failure.
435 // Mark sweep and allocate in young generation.
436 if (result == NULL && !invoked_full_gc) {
437 do_full_collection(false);
438 result = young_gen()->allocate(size);
439 }
440
441 death_march_check(result, size);
442
443 // Third level allocation failure.
444 // After mark sweep and young generation allocation failure,
445 // allocate in old generation.
446 if (result == NULL) {
447 result = old_gen()->allocate(size);
448 }
449
450 // Fourth level allocation failure. We're running out of memory.
451 // More complete mark sweep and allocate in young generation.
452 if (result == NULL) {
453 do_full_collection(true);
454 result = young_gen()->allocate(size);
455 }
456
457 // Fifth level allocation failure.
458 // After more complete mark sweep, allocate in old generation.
459 if (result == NULL) {
460 result = old_gen()->allocate(size);
461 }
462
463 return result;
464 }
465
ensure_parsability(bool retire_tlabs)466 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
467 CollectedHeap::ensure_parsability(retire_tlabs);
468 young_gen()->eden_space()->ensure_parsability();
469 }
470
tlab_capacity(Thread * thr) const471 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
472 return young_gen()->eden_space()->tlab_capacity(thr);
473 }
474
tlab_used(Thread * thr) const475 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
476 return young_gen()->eden_space()->tlab_used(thr);
477 }
478
unsafe_max_tlab_alloc(Thread * thr) const479 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
480 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
481 }
482
allocate_new_tlab(size_t min_size,size_t requested_size,size_t * actual_size)483 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
484 HeapWord* result = young_gen()->allocate(requested_size);
485 if (result != NULL) {
486 *actual_size = requested_size;
487 }
488
489 return result;
490 }
491
resize_all_tlabs()492 void ParallelScavengeHeap::resize_all_tlabs() {
493 CollectedHeap::resize_all_tlabs();
494 }
495
496 // This method is used by System.gc() and JVMTI.
collect(GCCause::Cause cause)497 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
498 assert(!Heap_lock->owned_by_self(),
499 "this thread should not own the Heap_lock");
500
501 uint gc_count = 0;
502 uint full_gc_count = 0;
503 {
504 MutexLocker ml(Heap_lock);
505 // This value is guarded by the Heap_lock
506 gc_count = total_collections();
507 full_gc_count = total_full_collections();
508 }
509
510 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
511 VMThread::execute(&op);
512 }
513
object_iterate(ObjectClosure * cl)514 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
515 young_gen()->object_iterate(cl);
516 old_gen()->object_iterate(cl);
517 }
518
519
block_start(const void * addr) const520 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
521 if (young_gen()->is_in_reserved(addr)) {
522 assert(young_gen()->is_in(addr),
523 "addr should be in allocated part of young gen");
524 // called from os::print_location by find or VMError
525 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
526 Unimplemented();
527 } else if (old_gen()->is_in_reserved(addr)) {
528 assert(old_gen()->is_in(addr),
529 "addr should be in allocated part of old gen");
530 return old_gen()->start_array()->object_start((HeapWord*)addr);
531 }
532 return 0;
533 }
534
block_size(const HeapWord * addr) const535 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
536 return oop(addr)->size();
537 }
538
block_is_obj(const HeapWord * addr) const539 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
540 return block_start(addr) == addr;
541 }
542
millis_since_last_gc()543 jlong ParallelScavengeHeap::millis_since_last_gc() {
544 return UseParallelOldGC ?
545 PSParallelCompact::millis_since_last_gc() :
546 PSMarkSweepProxy::millis_since_last_gc();
547 }
548
prepare_for_verify()549 void ParallelScavengeHeap::prepare_for_verify() {
550 ensure_parsability(false); // no need to retire TLABs for verification
551 }
552
create_ps_heap_summary()553 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
554 PSOldGen* old = old_gen();
555 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
556 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
557 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
558
559 PSYoungGen* young = young_gen();
560 VirtualSpaceSummary young_summary(young->reserved().start(),
561 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
562
563 MutableSpace* eden = young_gen()->eden_space();
564 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
565
566 MutableSpace* from = young_gen()->from_space();
567 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
568
569 MutableSpace* to = young_gen()->to_space();
570 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
571
572 VirtualSpaceSummary heap_summary = create_heap_space_summary();
573 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
574 }
575
print_on(outputStream * st) const576 void ParallelScavengeHeap::print_on(outputStream* st) const {
577 young_gen()->print_on(st);
578 old_gen()->print_on(st);
579 MetaspaceUtils::print_on(st);
580 }
581
print_on_error(outputStream * st) const582 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
583 this->CollectedHeap::print_on_error(st);
584
585 if (UseParallelOldGC) {
586 st->cr();
587 PSParallelCompact::print_on_error(st);
588 }
589 }
590
gc_threads_do(ThreadClosure * tc) const591 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
592 PSScavenge::gc_task_manager()->threads_do(tc);
593 }
594
print_gc_threads_on(outputStream * st) const595 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
596 PSScavenge::gc_task_manager()->print_threads_on(st);
597 }
598
print_tracing_info() const599 void ParallelScavengeHeap::print_tracing_info() const {
600 AdaptiveSizePolicyOutput::print();
601 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
602 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
603 UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
604 }
605
606
verify(VerifyOption option)607 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
608 // Why do we need the total_collections()-filter below?
609 if (total_collections() > 0) {
610 log_debug(gc, verify)("Tenured");
611 old_gen()->verify();
612
613 log_debug(gc, verify)("Eden");
614 young_gen()->verify();
615 }
616 }
617
trace_heap(GCWhen::Type when,const GCTracer * gc_tracer)618 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
619 const PSHeapSummary& heap_summary = create_ps_heap_summary();
620 gc_tracer->report_gc_heap_summary(when, heap_summary);
621
622 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
623 gc_tracer->report_metaspace_summary(when, metaspace_summary);
624 }
625
heap()626 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
627 CollectedHeap* heap = Universe::heap();
628 assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
629 assert(heap->kind() == CollectedHeap::Parallel, "Invalid name");
630 return (ParallelScavengeHeap*)heap;
631 }
632
barrier_set()633 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
634 return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
635 }
636
card_table()637 PSCardTable* ParallelScavengeHeap::card_table() {
638 return static_cast<PSCardTable*>(barrier_set()->card_table());
639 }
640
641 // Before delegating the resize to the young generation,
642 // the reserved space for the young and old generations
643 // may be changed to accommodate the desired resize.
resize_young_gen(size_t eden_size,size_t survivor_size)644 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
645 size_t survivor_size) {
646 if (UseAdaptiveGCBoundary) {
647 if (size_policy()->bytes_absorbed_from_eden() != 0) {
648 size_policy()->reset_bytes_absorbed_from_eden();
649 return; // The generation changed size already.
650 }
651 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
652 }
653
654 // Delegate the resize to the generation.
655 _young_gen->resize(eden_size, survivor_size);
656 }
657
658 // Before delegating the resize to the old generation,
659 // the reserved space for the young and old generations
660 // may be changed to accommodate the desired resize.
resize_old_gen(size_t desired_free_space)661 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
662 if (UseAdaptiveGCBoundary) {
663 if (size_policy()->bytes_absorbed_from_eden() != 0) {
664 size_policy()->reset_bytes_absorbed_from_eden();
665 return; // The generation changed size already.
666 }
667 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
668 }
669
670 // Delegate the resize to the generation.
671 _old_gen->resize(desired_free_space);
672 }
673
ParStrongRootsScope()674 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
675 // nothing particular
676 }
677
~ParStrongRootsScope()678 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
679 // nothing particular
680 }
681
682 #ifndef PRODUCT
record_gen_tops_before_GC()683 void ParallelScavengeHeap::record_gen_tops_before_GC() {
684 if (ZapUnusedHeapArea) {
685 young_gen()->record_spaces_top();
686 old_gen()->record_spaces_top();
687 }
688 }
689
gen_mangle_unused_area()690 void ParallelScavengeHeap::gen_mangle_unused_area() {
691 if (ZapUnusedHeapArea) {
692 young_gen()->eden_space()->mangle_unused_area();
693 young_gen()->to_space()->mangle_unused_area();
694 young_gen()->from_space()->mangle_unused_area();
695 old_gen()->object_space()->mangle_unused_area();
696 }
697 }
698 #endif
699
is_scavengable(oop obj)700 bool ParallelScavengeHeap::is_scavengable(oop obj) {
701 return is_in_young(obj);
702 }
703
register_nmethod(nmethod * nm)704 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
705 CodeCache::register_scavenge_root_nmethod(nm);
706 }
707
verify_nmethod(nmethod * nm)708 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
709 CodeCache::verify_scavenge_root_nmethod(nm);
710 }
711
memory_managers()712 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
713 GrowableArray<GCMemoryManager*> memory_managers(2);
714 memory_managers.append(_young_manager);
715 memory_managers.append(_old_manager);
716 return memory_managers;
717 }
718
memory_pools()719 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
720 GrowableArray<MemoryPool*> memory_pools(3);
721 memory_pools.append(_eden_pool);
722 memory_pools.append(_survivor_pool);
723 memory_pools.append(_old_pool);
724 return memory_pools;
725 }
726