1 /*
2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/icBuffer.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/serial/defNewGeneration.hpp"
34 #include "gc/shared/adaptiveSizePolicy.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTableRS.hpp"
37 #include "gc/shared/collectedHeap.inline.hpp"
38 #include "gc/shared/collectorCounters.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.hpp"
41 #include "gc/shared/gcPolicyCounters.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.inline.hpp"
44 #include "gc/shared/genArguments.hpp"
45 #include "gc/shared/gcVMOperations.hpp"
46 #include "gc/shared/genCollectedHeap.hpp"
47 #include "gc/shared/genOopClosures.inline.hpp"
48 #include "gc/shared/generationSpec.hpp"
49 #include "gc/shared/gcInitLogger.hpp"
50 #include "gc/shared/locationPrinter.inline.hpp"
51 #include "gc/shared/oopStorage.inline.hpp"
52 #include "gc/shared/oopStorageSet.inline.hpp"
53 #include "gc/shared/oopStorageParState.inline.hpp"
54 #include "gc/shared/scavengableNMethods.hpp"
55 #include "gc/shared/space.hpp"
56 #include "gc/shared/strongRootsScope.hpp"
57 #include "gc/shared/weakProcessor.hpp"
58 #include "gc/shared/workgroup.hpp"
59 #include "memory/iterator.hpp"
60 #include "memory/metaspaceCounters.hpp"
61 #include "memory/metaspaceUtils.hpp"
62 #include "memory/resourceArea.hpp"
63 #include "memory/universe.hpp"
64 #include "oops/oop.inline.hpp"
65 #include "runtime/biasedLocking.hpp"
66 #include "runtime/handles.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/java.hpp"
69 #include "runtime/vmThread.hpp"
70 #include "services/memoryService.hpp"
71 #include "utilities/autoRestore.hpp"
72 #include "utilities/debug.hpp"
73 #include "utilities/formatBuffer.hpp"
74 #include "utilities/macros.hpp"
75 #include "utilities/stack.inline.hpp"
76 #include "utilities/vmError.hpp"
77 #if INCLUDE_JVMCI
78 #include "jvmci/jvmci.hpp"
79 #endif
80 
GenCollectedHeap(Generation::Name young,Generation::Name old,const char * policy_counters_name)81 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
82                                    Generation::Name old,
83                                    const char* policy_counters_name) :
84   CollectedHeap(),
85   _young_gen(NULL),
86   _old_gen(NULL),
87   _young_gen_spec(new GenerationSpec(young,
88                                      NewSize,
89                                      MaxNewSize,
90                                      GenAlignment)),
91   _old_gen_spec(new GenerationSpec(old,
92                                    OldSize,
93                                    MaxOldSize,
94                                    GenAlignment)),
95   _rem_set(NULL),
96   _soft_ref_gen_policy(),
97   _size_policy(NULL),
98   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
99   _incremental_collection_failed(false),
100   _full_collections_completed(0),
101   _young_manager(NULL),
102   _old_manager(NULL) {
103 }
104 
initialize()105 jint GenCollectedHeap::initialize() {
106   // While there are no constraints in the GC code that HeapWordSize
107   // be any particular value, there are multiple other areas in the
108   // system which believe this to be true (e.g. oop->object_size in some
109   // cases incorrectly returns the size in wordSize units rather than
110   // HeapWordSize).
111   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
112 
113   // Allocate space for the heap.
114 
115   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
116 
117   if (!heap_rs.is_reserved()) {
118     vm_shutdown_during_initialization(
119       "Could not reserve enough space for object heap");
120     return JNI_ENOMEM;
121   }
122 
123   initialize_reserved_region(heap_rs);
124 
125   _rem_set = create_rem_set(heap_rs.region());
126   _rem_set->initialize();
127   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
128   bs->initialize();
129   BarrierSet::set_barrier_set(bs);
130 
131   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
132   _young_gen = _young_gen_spec->init(young_rs, rem_set());
133   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
134 
135   old_rs = old_rs.first_part(_old_gen_spec->max_size());
136   _old_gen = _old_gen_spec->init(old_rs, rem_set());
137 
138   GCInitLogger::print();
139 
140   return JNI_OK;
141 }
142 
create_rem_set(const MemRegion & reserved_region)143 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
144   return new CardTableRS(reserved_region);
145 }
146 
initialize_size_policy(size_t init_eden_size,size_t init_promo_size,size_t init_survivor_size)147 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
148                                               size_t init_promo_size,
149                                               size_t init_survivor_size) {
150   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
151   _size_policy = new AdaptiveSizePolicy(init_eden_size,
152                                         init_promo_size,
153                                         init_survivor_size,
154                                         max_gc_pause_sec,
155                                         GCTimeRatio);
156 }
157 
allocate(size_t alignment)158 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
159   // Now figure out the total size.
160   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
161   assert(alignment % pageSize == 0, "Must be");
162 
163   // Check for overflow.
164   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
165   if (total_reserved < _young_gen_spec->max_size()) {
166     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
167                                   "the maximum representable size");
168   }
169   assert(total_reserved % alignment == 0,
170          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
171          SIZE_FORMAT, total_reserved, alignment);
172 
173   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
174   size_t used_page_size = heap_rs.page_size();
175 
176   os::trace_page_sizes("Heap",
177                        MinHeapSize,
178                        total_reserved,
179                        used_page_size,
180                        heap_rs.base(),
181                        heap_rs.size());
182 
183   return heap_rs;
184 }
185 
186 class GenIsScavengable : public BoolObjectClosure {
187 public:
do_object_b(oop obj)188   bool do_object_b(oop obj) {
189     return GenCollectedHeap::heap()->is_in_young(obj);
190   }
191 };
192 
193 static GenIsScavengable _is_scavengable;
194 
post_initialize()195 void GenCollectedHeap::post_initialize() {
196   CollectedHeap::post_initialize();
197   ref_processing_init();
198 
199   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
200 
201   initialize_size_policy(def_new_gen->eden()->capacity(),
202                          _old_gen->capacity(),
203                          def_new_gen->from()->capacity());
204 
205   MarkSweep::initialize();
206 
207   ScavengableNMethods::initialize(&_is_scavengable);
208 }
209 
ref_processing_init()210 void GenCollectedHeap::ref_processing_init() {
211   _young_gen->ref_processor_init();
212   _old_gen->ref_processor_init();
213 }
214 
get_pre_gc_values() const215 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
216   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
217 
218   return PreGenGCValues(def_new_gen->used(),
219                         def_new_gen->capacity(),
220                         def_new_gen->eden()->used(),
221                         def_new_gen->eden()->capacity(),
222                         def_new_gen->from()->used(),
223                         def_new_gen->from()->capacity(),
224                         old_gen()->used(),
225                         old_gen()->capacity());
226 }
227 
young_gen_spec() const228 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
229   return _young_gen_spec;
230 }
231 
old_gen_spec() const232 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
233   return _old_gen_spec;
234 }
235 
capacity() const236 size_t GenCollectedHeap::capacity() const {
237   return _young_gen->capacity() + _old_gen->capacity();
238 }
239 
used() const240 size_t GenCollectedHeap::used() const {
241   return _young_gen->used() + _old_gen->used();
242 }
243 
save_used_regions()244 void GenCollectedHeap::save_used_regions() {
245   _old_gen->save_used_region();
246   _young_gen->save_used_region();
247 }
248 
max_capacity() const249 size_t GenCollectedHeap::max_capacity() const {
250   return _young_gen->max_capacity() + _old_gen->max_capacity();
251 }
252 
253 // Update the _full_collections_completed counter
254 // at the end of a stop-world full GC.
update_full_collections_completed()255 unsigned int GenCollectedHeap::update_full_collections_completed() {
256   assert(_full_collections_completed <= _total_full_collections,
257          "Can't complete more collections than were started");
258   _full_collections_completed = _total_full_collections;
259   return _full_collections_completed;
260 }
261 
262 // Return true if any of the following is true:
263 // . the allocation won't fit into the current young gen heap
264 // . gc locker is occupied (jni critical section)
265 // . heap memory is tight -- the most recent previous collection
266 //   was a full collection because a partial collection (would
267 //   have) failed and is likely to fail again
should_try_older_generation_allocation(size_t word_size) const268 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
269   size_t young_capacity = _young_gen->capacity_before_gc();
270   return    (word_size > heap_word_size(young_capacity))
271          || GCLocker::is_active_and_needs_gc()
272          || incremental_collection_failed();
273 }
274 
expand_heap_and_allocate(size_t size,bool is_tlab)275 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
276   HeapWord* result = NULL;
277   if (_old_gen->should_allocate(size, is_tlab)) {
278     result = _old_gen->expand_and_allocate(size, is_tlab);
279   }
280   if (result == NULL) {
281     if (_young_gen->should_allocate(size, is_tlab)) {
282       result = _young_gen->expand_and_allocate(size, is_tlab);
283     }
284   }
285   assert(result == NULL || is_in_reserved(result), "result not in heap");
286   return result;
287 }
288 
mem_allocate_work(size_t size,bool is_tlab,bool * gc_overhead_limit_was_exceeded)289 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
290                                               bool is_tlab,
291                                               bool* gc_overhead_limit_was_exceeded) {
292   // In general gc_overhead_limit_was_exceeded should be false so
293   // set it so here and reset it to true only if the gc time
294   // limit is being exceeded as checked below.
295   *gc_overhead_limit_was_exceeded = false;
296 
297   HeapWord* result = NULL;
298 
299   // Loop until the allocation is satisfied, or unsatisfied after GC.
300   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
301 
302     // First allocation attempt is lock-free.
303     Generation *young = _young_gen;
304     assert(young->supports_inline_contig_alloc(),
305       "Otherwise, must do alloc within heap lock");
306     if (young->should_allocate(size, is_tlab)) {
307       result = young->par_allocate(size, is_tlab);
308       if (result != NULL) {
309         assert(is_in_reserved(result), "result not in heap");
310         return result;
311       }
312     }
313     uint gc_count_before;  // Read inside the Heap_lock locked region.
314     {
315       MutexLocker ml(Heap_lock);
316       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
317       // Note that only large objects get a shot at being
318       // allocated in later generations.
319       bool first_only = !should_try_older_generation_allocation(size);
320 
321       result = attempt_allocation(size, is_tlab, first_only);
322       if (result != NULL) {
323         assert(is_in_reserved(result), "result not in heap");
324         return result;
325       }
326 
327       if (GCLocker::is_active_and_needs_gc()) {
328         if (is_tlab) {
329           return NULL;  // Caller will retry allocating individual object.
330         }
331         if (!is_maximal_no_gc()) {
332           // Try and expand heap to satisfy request.
333           result = expand_heap_and_allocate(size, is_tlab);
334           // Result could be null if we are out of space.
335           if (result != NULL) {
336             return result;
337           }
338         }
339 
340         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
341           return NULL; // We didn't get to do a GC and we didn't get any memory.
342         }
343 
344         // If this thread is not in a jni critical section, we stall
345         // the requestor until the critical section has cleared and
346         // GC allowed. When the critical section clears, a GC is
347         // initiated by the last thread exiting the critical section; so
348         // we retry the allocation sequence from the beginning of the loop,
349         // rather than causing more, now probably unnecessary, GC attempts.
350         JavaThread* jthr = JavaThread::current();
351         if (!jthr->in_critical()) {
352           MutexUnlocker mul(Heap_lock);
353           // Wait for JNI critical section to be exited
354           GCLocker::stall_until_clear();
355           gclocker_stalled_count += 1;
356           continue;
357         } else {
358           if (CheckJNICalls) {
359             fatal("Possible deadlock due to allocating while"
360                   " in jni critical section");
361           }
362           return NULL;
363         }
364       }
365 
366       // Read the gc count while the heap lock is held.
367       gc_count_before = total_collections();
368     }
369 
370     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
371     VMThread::execute(&op);
372     if (op.prologue_succeeded()) {
373       result = op.result();
374       if (op.gc_locked()) {
375          assert(result == NULL, "must be NULL if gc_locked() is true");
376          continue;  // Retry and/or stall as necessary.
377       }
378 
379       // Allocation has failed and a collection
380       // has been done.  If the gc time limit was exceeded the
381       // this time, return NULL so that an out-of-memory
382       // will be thrown.  Clear gc_overhead_limit_exceeded
383       // so that the overhead exceeded does not persist.
384 
385       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
386       const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
387 
388       if (limit_exceeded && softrefs_clear) {
389         *gc_overhead_limit_was_exceeded = true;
390         size_policy()->set_gc_overhead_limit_exceeded(false);
391         if (op.result() != NULL) {
392           CollectedHeap::fill_with_object(op.result(), size);
393         }
394         return NULL;
395       }
396       assert(result == NULL || is_in_reserved(result),
397              "result not in heap");
398       return result;
399     }
400 
401     // Give a warning if we seem to be looping forever.
402     if ((QueuedAllocationWarningCount > 0) &&
403         (try_count % QueuedAllocationWarningCount == 0)) {
404           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
405                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
406     }
407   }
408 }
409 
attempt_allocation(size_t size,bool is_tlab,bool first_only)410 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
411                                                bool is_tlab,
412                                                bool first_only) {
413   HeapWord* res = NULL;
414 
415   if (_young_gen->should_allocate(size, is_tlab)) {
416     res = _young_gen->allocate(size, is_tlab);
417     if (res != NULL || first_only) {
418       return res;
419     }
420   }
421 
422   if (_old_gen->should_allocate(size, is_tlab)) {
423     res = _old_gen->allocate(size, is_tlab);
424   }
425 
426   return res;
427 }
428 
mem_allocate(size_t size,bool * gc_overhead_limit_was_exceeded)429 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
430                                          bool* gc_overhead_limit_was_exceeded) {
431   return mem_allocate_work(size,
432                            false /* is_tlab */,
433                            gc_overhead_limit_was_exceeded);
434 }
435 
must_clear_all_soft_refs()436 bool GenCollectedHeap::must_clear_all_soft_refs() {
437   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
438          _gc_cause == GCCause::_wb_full_gc;
439 }
440 
collect_generation(Generation * gen,bool full,size_t size,bool is_tlab,bool run_verification,bool clear_soft_refs,bool restore_marks_for_biased_locking)441 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
442                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
443                                           bool restore_marks_for_biased_locking) {
444   FormatBuffer<> title("Collect gen: %s", gen->short_name());
445   GCTraceTime(Trace, gc, phases) t1(title);
446   TraceCollectorStats tcs(gen->counters());
447   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
448 
449   gen->stat_record()->invocations++;
450   gen->stat_record()->accumulated_time.start();
451 
452   // Must be done anew before each collection because
453   // a previous collection will do mangling and will
454   // change top of some spaces.
455   record_gen_tops_before_GC();
456 
457   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
458 
459   if (run_verification && VerifyBeforeGC) {
460     Universe::verify("Before GC");
461   }
462   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
463 
464   if (restore_marks_for_biased_locking) {
465     // We perform this mark word preservation work lazily
466     // because it's only at this point that we know whether we
467     // absolutely have to do it; we want to avoid doing it for
468     // scavenge-only collections where it's unnecessary
469     BiasedLocking::preserve_marks();
470   }
471 
472   // Do collection work
473   {
474     // Note on ref discovery: For what appear to be historical reasons,
475     // GCH enables and disabled (by enqueing) refs discovery.
476     // In the future this should be moved into the generation's
477     // collect method so that ref discovery and enqueueing concerns
478     // are local to a generation. The collect method could return
479     // an appropriate indication in the case that notification on
480     // the ref lock was needed. This will make the treatment of
481     // weak refs more uniform (and indeed remove such concerns
482     // from GCH). XXX
483 
484     save_marks();   // save marks for all gens
485     // We want to discover references, but not process them yet.
486     // This mode is disabled in process_discovered_references if the
487     // generation does some collection work, or in
488     // enqueue_discovered_references if the generation returns
489     // without doing any work.
490     ReferenceProcessor* rp = gen->ref_processor();
491     // If the discovery of ("weak") refs in this generation is
492     // atomic wrt other collectors in this configuration, we
493     // are guaranteed to have empty discovered ref lists.
494     if (rp->discovery_is_atomic()) {
495       rp->enable_discovery();
496       rp->setup_policy(clear_soft_refs);
497     } else {
498       // collect() below will enable discovery as appropriate
499     }
500     gen->collect(full, clear_soft_refs, size, is_tlab);
501     if (!rp->enqueuing_is_done()) {
502       rp->disable_discovery();
503     } else {
504       rp->set_enqueuing_is_done(false);
505     }
506     rp->verify_no_references_recorded();
507   }
508 
509   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
510 
511   gen->stat_record()->accumulated_time.stop();
512 
513   update_gc_stats(gen, full);
514 
515   if (run_verification && VerifyAfterGC) {
516     Universe::verify("After GC");
517   }
518 }
519 
do_collection(bool full,bool clear_all_soft_refs,size_t size,bool is_tlab,GenerationType max_generation)520 void GenCollectedHeap::do_collection(bool           full,
521                                      bool           clear_all_soft_refs,
522                                      size_t         size,
523                                      bool           is_tlab,
524                                      GenerationType max_generation) {
525   ResourceMark rm;
526   DEBUG_ONLY(Thread* my_thread = Thread::current();)
527 
528   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
529   assert(my_thread->is_VM_thread(), "only VM thread");
530   assert(Heap_lock->is_locked(),
531          "the requesting thread should have the Heap_lock");
532   guarantee(!is_gc_active(), "collection is not reentrant");
533 
534   if (GCLocker::check_active_before_gc()) {
535     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
536   }
537 
538   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
539                           soft_ref_policy()->should_clear_all_soft_refs();
540 
541   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
542 
543   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
544 
545   bool complete = full && (max_generation == OldGen);
546   bool old_collects_young = complete && !ScavengeBeforeFullGC;
547   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
548 
549   const PreGenGCValues pre_gc_values = get_pre_gc_values();
550 
551   bool run_verification = total_collections() >= VerifyGCStartAt;
552   bool prepared_for_verification = false;
553   bool do_full_collection = false;
554 
555   if (do_young_collection) {
556     GCIdMark gc_id_mark;
557     GCTraceCPUTime tcpu;
558     GCTraceTime(Info, gc) t("Pause Young", NULL, gc_cause(), true);
559 
560     print_heap_before_gc();
561 
562     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
563       prepare_for_verify();
564       prepared_for_verification = true;
565     }
566 
567     gc_prologue(complete);
568     increment_total_collections(complete);
569 
570     collect_generation(_young_gen,
571                        full,
572                        size,
573                        is_tlab,
574                        run_verification && VerifyGCLevel <= 0,
575                        do_clear_all_soft_refs,
576                        false);
577 
578     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
579         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
580       // Allocation request was met by young GC.
581       size = 0;
582     }
583 
584     // Ask if young collection is enough. If so, do the final steps for young collection,
585     // and fallthrough to the end.
586     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
587     if (!do_full_collection) {
588       // Adjust generation sizes.
589       _young_gen->compute_new_size();
590 
591       print_heap_change(pre_gc_values);
592 
593       // Track memory usage and detect low memory after GC finishes
594       MemoryService::track_memory_usage();
595 
596       gc_epilogue(complete);
597     }
598 
599     print_heap_after_gc();
600 
601   } else {
602     // No young collection, ask if we need to perform Full collection.
603     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
604   }
605 
606   if (do_full_collection) {
607     GCIdMark gc_id_mark;
608     GCTraceCPUTime tcpu;
609     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
610 
611     print_heap_before_gc();
612 
613     if (!prepared_for_verification && run_verification &&
614         VerifyGCLevel <= 1 && VerifyBeforeGC) {
615       prepare_for_verify();
616     }
617 
618     if (!do_young_collection) {
619       gc_prologue(complete);
620       increment_total_collections(complete);
621     }
622 
623     // Accounting quirk: total full collections would be incremented when "complete"
624     // is set, by calling increment_total_collections above. However, we also need to
625     // account Full collections that had "complete" unset.
626     if (!complete) {
627       increment_total_full_collections();
628     }
629 
630     collect_generation(_old_gen,
631                        full,
632                        size,
633                        is_tlab,
634                        run_verification && VerifyGCLevel <= 1,
635                        do_clear_all_soft_refs,
636                        true);
637 
638     // Adjust generation sizes.
639     _old_gen->compute_new_size();
640     _young_gen->compute_new_size();
641 
642     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
643     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
644     DEBUG_ONLY(MetaspaceUtils::verify();)
645     // Resize the metaspace capacity after full collections
646     MetaspaceGC::compute_new_size();
647     update_full_collections_completed();
648 
649     print_heap_change(pre_gc_values);
650 
651     // Track memory usage and detect low memory after GC finishes
652     MemoryService::track_memory_usage();
653 
654     // Need to tell the epilogue code we are done with Full GC, regardless what was
655     // the initial value for "complete" flag.
656     gc_epilogue(true);
657 
658     BiasedLocking::restore_marks();
659 
660     print_heap_after_gc();
661   }
662 }
663 
should_do_full_collection(size_t size,bool full,bool is_tlab,GenCollectedHeap::GenerationType max_gen) const664 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
665                                                  GenCollectedHeap::GenerationType max_gen) const {
666   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
667 }
668 
register_nmethod(nmethod * nm)669 void GenCollectedHeap::register_nmethod(nmethod* nm) {
670   ScavengableNMethods::register_nmethod(nm);
671 }
672 
unregister_nmethod(nmethod * nm)673 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
674   ScavengableNMethods::unregister_nmethod(nm);
675 }
676 
verify_nmethod(nmethod * nm)677 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
678   ScavengableNMethods::verify_nmethod(nm);
679 }
680 
flush_nmethod(nmethod * nm)681 void GenCollectedHeap::flush_nmethod(nmethod* nm) {
682   // Do nothing.
683 }
684 
prune_scavengable_nmethods()685 void GenCollectedHeap::prune_scavengable_nmethods() {
686   ScavengableNMethods::prune_nmethods();
687 }
688 
satisfy_failed_allocation(size_t size,bool is_tlab)689 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
690   GCCauseSetter x(this, GCCause::_allocation_failure);
691   HeapWord* result = NULL;
692 
693   assert(size != 0, "Precondition violated");
694   if (GCLocker::is_active_and_needs_gc()) {
695     // GC locker is active; instead of a collection we will attempt
696     // to expand the heap, if there's room for expansion.
697     if (!is_maximal_no_gc()) {
698       result = expand_heap_and_allocate(size, is_tlab);
699     }
700     return result;   // Could be null if we are out of space.
701   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
702     // Do an incremental collection.
703     do_collection(false,                     // full
704                   false,                     // clear_all_soft_refs
705                   size,                      // size
706                   is_tlab,                   // is_tlab
707                   GenCollectedHeap::OldGen); // max_generation
708   } else {
709     log_trace(gc)(" :: Trying full because partial may fail :: ");
710     // Try a full collection; see delta for bug id 6266275
711     // for the original code and why this has been simplified
712     // with from-space allocation criteria modified and
713     // such allocation moved out of the safepoint path.
714     do_collection(true,                      // full
715                   false,                     // clear_all_soft_refs
716                   size,                      // size
717                   is_tlab,                   // is_tlab
718                   GenCollectedHeap::OldGen); // max_generation
719   }
720 
721   result = attempt_allocation(size, is_tlab, false /*first_only*/);
722 
723   if (result != NULL) {
724     assert(is_in_reserved(result), "result not in heap");
725     return result;
726   }
727 
728   // OK, collection failed, try expansion.
729   result = expand_heap_and_allocate(size, is_tlab);
730   if (result != NULL) {
731     return result;
732   }
733 
734   // If we reach this point, we're really out of memory. Try every trick
735   // we can to reclaim memory. Force collection of soft references. Force
736   // a complete compaction of the heap. Any additional methods for finding
737   // free memory should be here, especially if they are expensive. If this
738   // attempt fails, an OOM exception will be thrown.
739   {
740     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
741 
742     do_collection(true,                      // full
743                   true,                      // clear_all_soft_refs
744                   size,                      // size
745                   is_tlab,                   // is_tlab
746                   GenCollectedHeap::OldGen); // max_generation
747   }
748 
749   result = attempt_allocation(size, is_tlab, false /* first_only */);
750   if (result != NULL) {
751     assert(is_in_reserved(result), "result not in heap");
752     return result;
753   }
754 
755   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
756     "Flag should have been handled and cleared prior to this point");
757 
758   // What else?  We might try synchronous finalization later.  If the total
759   // space available is large enough for the allocation, then a more
760   // complete compaction phase than we've tried so far might be
761   // appropriate.
762   return NULL;
763 }
764 
765 #ifdef ASSERT
766 class AssertNonScavengableClosure: public OopClosure {
767 public:
do_oop(oop * p)768   virtual void do_oop(oop* p) {
769     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
770       "Referent should not be scavengable.");  }
do_oop(narrowOop * p)771   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
772 };
773 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
774 #endif
775 
process_roots(ScanningOption so,OopClosure * strong_roots,CLDClosure * strong_cld_closure,CLDClosure * weak_cld_closure,CodeBlobToOopClosure * code_roots)776 void GenCollectedHeap::process_roots(ScanningOption so,
777                                      OopClosure* strong_roots,
778                                      CLDClosure* strong_cld_closure,
779                                      CLDClosure* weak_cld_closure,
780                                      CodeBlobToOopClosure* code_roots) {
781   // General roots.
782   assert(code_roots != NULL, "code root closure should always be set");
783 
784   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
785 
786   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
787   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
788 
789   Threads::oops_do(strong_roots, roots_from_code_p);
790 
791   OopStorageSet::strong_oops_do(strong_roots);
792 
793   if (so & SO_ScavengeCodeCache) {
794     assert(code_roots != NULL, "must supply closure for code cache");
795 
796     // We only visit parts of the CodeCache when scavenging.
797     ScavengableNMethods::nmethods_do(code_roots);
798   }
799   if (so & SO_AllCodeCache) {
800     assert(code_roots != NULL, "must supply closure for code cache");
801 
802     // CMSCollector uses this to do intermediate-strength collections.
803     // We scan the entire code cache, since CodeCache::do_unloading is not called.
804     CodeCache::blobs_do(code_roots);
805   }
806   // Verify that the code cache contents are not subject to
807   // movement by a scavenging collection.
808   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
809   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
810 }
811 
full_process_roots(bool is_adjust_phase,ScanningOption so,bool only_strong_roots,OopClosure * root_closure,CLDClosure * cld_closure)812 void GenCollectedHeap::full_process_roots(bool is_adjust_phase,
813                                           ScanningOption so,
814                                           bool only_strong_roots,
815                                           OopClosure* root_closure,
816                                           CLDClosure* cld_closure) {
817   MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
818   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
819 
820   process_roots(so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
821 }
822 
gen_process_weak_roots(OopClosure * root_closure)823 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
824   WeakProcessor::oops_do(root_closure);
825   _young_gen->ref_processor()->weak_oops_do(root_closure);
826   _old_gen->ref_processor()->weak_oops_do(root_closure);
827 }
828 
no_allocs_since_save_marks()829 bool GenCollectedHeap::no_allocs_since_save_marks() {
830   return _young_gen->no_allocs_since_save_marks() &&
831          _old_gen->no_allocs_since_save_marks();
832 }
833 
supports_inline_contig_alloc() const834 bool GenCollectedHeap::supports_inline_contig_alloc() const {
835   return _young_gen->supports_inline_contig_alloc();
836 }
837 
top_addr() const838 HeapWord* volatile* GenCollectedHeap::top_addr() const {
839   return _young_gen->top_addr();
840 }
841 
end_addr() const842 HeapWord** GenCollectedHeap::end_addr() const {
843   return _young_gen->end_addr();
844 }
845 
846 // public collection interfaces
847 
collect(GCCause::Cause cause)848 void GenCollectedHeap::collect(GCCause::Cause cause) {
849   if ((cause == GCCause::_wb_young_gc) ||
850       (cause == GCCause::_gc_locker)) {
851     // Young collection for WhiteBox or GCLocker.
852     collect(cause, YoungGen);
853   } else {
854 #ifdef ASSERT
855   if (cause == GCCause::_scavenge_alot) {
856     // Young collection only.
857     collect(cause, YoungGen);
858   } else {
859     // Stop-the-world full collection.
860     collect(cause, OldGen);
861   }
862 #else
863     // Stop-the-world full collection.
864     collect(cause, OldGen);
865 #endif
866   }
867 }
868 
collect(GCCause::Cause cause,GenerationType max_generation)869 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
870   // The caller doesn't have the Heap_lock
871   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
872   MutexLocker ml(Heap_lock);
873   collect_locked(cause, max_generation);
874 }
875 
collect_locked(GCCause::Cause cause)876 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
877   // The caller has the Heap_lock
878   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
879   collect_locked(cause, OldGen);
880 }
881 
882 // this is the private collection interface
883 // The Heap_lock is expected to be held on entry.
884 
collect_locked(GCCause::Cause cause,GenerationType max_generation)885 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
886   // Read the GC count while holding the Heap_lock
887   unsigned int gc_count_before      = total_collections();
888   unsigned int full_gc_count_before = total_full_collections();
889 
890   if (GCLocker::should_discard(cause, gc_count_before)) {
891     return;
892   }
893 
894   {
895     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
896     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
897                          cause, max_generation);
898     VMThread::execute(&op);
899   }
900 }
901 
do_full_collection(bool clear_all_soft_refs)902 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
903    do_full_collection(clear_all_soft_refs, OldGen);
904 }
905 
do_full_collection(bool clear_all_soft_refs,GenerationType last_generation)906 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
907                                           GenerationType last_generation) {
908   do_collection(true,                   // full
909                 clear_all_soft_refs,    // clear_all_soft_refs
910                 0,                      // size
911                 false,                  // is_tlab
912                 last_generation);       // last_generation
913   // Hack XXX FIX ME !!!
914   // A scavenge may not have been attempted, or may have
915   // been attempted and failed, because the old gen was too full
916   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
917     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
918     // This time allow the old gen to be collected as well
919     do_collection(true,                // full
920                   clear_all_soft_refs, // clear_all_soft_refs
921                   0,                   // size
922                   false,               // is_tlab
923                   OldGen);             // last_generation
924   }
925 }
926 
is_in_young(oop p)927 bool GenCollectedHeap::is_in_young(oop p) {
928   bool result = cast_from_oop<HeapWord*>(p) < _old_gen->reserved().start();
929   assert(result == _young_gen->is_in_reserved(p),
930          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
931   return result;
932 }
933 
934 // Returns "TRUE" iff "p" points into the committed areas of the heap.
is_in(const void * p) const935 bool GenCollectedHeap::is_in(const void* p) const {
936   return _young_gen->is_in(p) || _old_gen->is_in(p);
937 }
938 
939 #ifdef ASSERT
940 // Don't implement this by using is_in_young().  This method is used
941 // in some cases to check that is_in_young() is correct.
is_in_partial_collection(const void * p)942 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
943   assert(is_in_reserved(p) || p == NULL,
944     "Does not work if address is non-null and outside of the heap");
945   return p < _young_gen->reserved().end() && p != NULL;
946 }
947 #endif
948 
oop_iterate(OopIterateClosure * cl)949 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
950   _young_gen->oop_iterate(cl);
951   _old_gen->oop_iterate(cl);
952 }
953 
object_iterate(ObjectClosure * cl)954 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
955   _young_gen->object_iterate(cl);
956   _old_gen->object_iterate(cl);
957 }
958 
space_containing(const void * addr) const959 Space* GenCollectedHeap::space_containing(const void* addr) const {
960   Space* res = _young_gen->space_containing(addr);
961   if (res != NULL) {
962     return res;
963   }
964   res = _old_gen->space_containing(addr);
965   assert(res != NULL, "Could not find containing space");
966   return res;
967 }
968 
block_start(const void * addr) const969 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
970   assert(is_in_reserved(addr), "block_start of address outside of heap");
971   if (_young_gen->is_in_reserved(addr)) {
972     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
973     return _young_gen->block_start(addr);
974   }
975 
976   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
977   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
978   return _old_gen->block_start(addr);
979 }
980 
block_is_obj(const HeapWord * addr) const981 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
982   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
983   assert(block_start(addr) == addr, "addr must be a block start");
984   if (_young_gen->is_in_reserved(addr)) {
985     return _young_gen->block_is_obj(addr);
986   }
987 
988   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
989   return _old_gen->block_is_obj(addr);
990 }
991 
tlab_capacity(Thread * thr) const992 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
993   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
994   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
995   return _young_gen->tlab_capacity();
996 }
997 
tlab_used(Thread * thr) const998 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
999   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1000   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
1001   return _young_gen->tlab_used();
1002 }
1003 
unsafe_max_tlab_alloc(Thread * thr) const1004 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1005   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1006   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
1007   return _young_gen->unsafe_max_tlab_alloc();
1008 }
1009 
allocate_new_tlab(size_t min_size,size_t requested_size,size_t * actual_size)1010 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
1011                                               size_t requested_size,
1012                                               size_t* actual_size) {
1013   bool gc_overhead_limit_was_exceeded;
1014   HeapWord* result = mem_allocate_work(requested_size /* size */,
1015                                        true /* is_tlab */,
1016                                        &gc_overhead_limit_was_exceeded);
1017   if (result != NULL) {
1018     *actual_size = requested_size;
1019   }
1020 
1021   return result;
1022 }
1023 
1024 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1025 // from the list headed by "*prev_ptr".
removeSmallestScratch(ScratchBlock ** prev_ptr)1026 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1027   bool first = true;
1028   size_t min_size = 0;   // "first" makes this conceptually infinite.
1029   ScratchBlock **smallest_ptr, *smallest;
1030   ScratchBlock  *cur = *prev_ptr;
1031   while (cur) {
1032     assert(*prev_ptr == cur, "just checking");
1033     if (first || cur->num_words < min_size) {
1034       smallest_ptr = prev_ptr;
1035       smallest     = cur;
1036       min_size     = smallest->num_words;
1037       first        = false;
1038     }
1039     prev_ptr = &cur->next;
1040     cur     =  cur->next;
1041   }
1042   smallest      = *smallest_ptr;
1043   *smallest_ptr = smallest->next;
1044   return smallest;
1045 }
1046 
1047 // Sort the scratch block list headed by res into decreasing size order,
1048 // and set "res" to the result.
sort_scratch_list(ScratchBlock * & list)1049 static void sort_scratch_list(ScratchBlock*& list) {
1050   ScratchBlock* sorted = NULL;
1051   ScratchBlock* unsorted = list;
1052   while (unsorted) {
1053     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1054     smallest->next  = sorted;
1055     sorted          = smallest;
1056   }
1057   list = sorted;
1058 }
1059 
gather_scratch(Generation * requestor,size_t max_alloc_words)1060 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1061                                                size_t max_alloc_words) {
1062   ScratchBlock* res = NULL;
1063   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1064   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1065   sort_scratch_list(res);
1066   return res;
1067 }
1068 
release_scratch()1069 void GenCollectedHeap::release_scratch() {
1070   _young_gen->reset_scratch();
1071   _old_gen->reset_scratch();
1072 }
1073 
1074 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
do_generation(Generation * gen)1075   void do_generation(Generation* gen) {
1076     gen->prepare_for_verify();
1077   }
1078 };
1079 
prepare_for_verify()1080 void GenCollectedHeap::prepare_for_verify() {
1081   ensure_parsability(false);        // no need to retire TLABs
1082   GenPrepareForVerifyClosure blk;
1083   generation_iterate(&blk, false);
1084 }
1085 
generation_iterate(GenClosure * cl,bool old_to_young)1086 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1087                                           bool old_to_young) {
1088   if (old_to_young) {
1089     cl->do_generation(_old_gen);
1090     cl->do_generation(_young_gen);
1091   } else {
1092     cl->do_generation(_young_gen);
1093     cl->do_generation(_old_gen);
1094   }
1095 }
1096 
is_maximal_no_gc() const1097 bool GenCollectedHeap::is_maximal_no_gc() const {
1098   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1099 }
1100 
save_marks()1101 void GenCollectedHeap::save_marks() {
1102   _young_gen->save_marks();
1103   _old_gen->save_marks();
1104 }
1105 
heap()1106 GenCollectedHeap* GenCollectedHeap::heap() {
1107   // SerialHeap is the only subtype of GenCollectedHeap.
1108   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
1109 }
1110 
1111 #if INCLUDE_SERIALGC
prepare_for_compaction()1112 void GenCollectedHeap::prepare_for_compaction() {
1113   // Start by compacting into same gen.
1114   CompactPoint cp(_old_gen);
1115   _old_gen->prepare_for_compaction(&cp);
1116   _young_gen->prepare_for_compaction(&cp);
1117 }
1118 #endif // INCLUDE_SERIALGC
1119 
verify(VerifyOption option)1120 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1121   log_debug(gc, verify)("%s", _old_gen->name());
1122   _old_gen->verify();
1123 
1124   log_debug(gc, verify)("%s", _old_gen->name());
1125   _young_gen->verify();
1126 
1127   log_debug(gc, verify)("RemSet");
1128   rem_set()->verify();
1129 }
1130 
print_on(outputStream * st) const1131 void GenCollectedHeap::print_on(outputStream* st) const {
1132   if (_young_gen != NULL) {
1133     _young_gen->print_on(st);
1134   }
1135   if (_old_gen != NULL) {
1136     _old_gen->print_on(st);
1137   }
1138   MetaspaceUtils::print_on(st);
1139 }
1140 
gc_threads_do(ThreadClosure * tc) const1141 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1142 }
1143 
print_location(outputStream * st,void * addr) const1144 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
1145   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
1146 }
1147 
print_tracing_info() const1148 void GenCollectedHeap::print_tracing_info() const {
1149   if (log_is_enabled(Debug, gc, heap, exit)) {
1150     LogStreamHandle(Debug, gc, heap, exit) lsh;
1151     _young_gen->print_summary_info_on(&lsh);
1152     _old_gen->print_summary_info_on(&lsh);
1153   }
1154 }
1155 
print_heap_change(const PreGenGCValues & pre_gc_values) const1156 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
1157   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
1158 
1159   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
1160                      HEAP_CHANGE_FORMAT" "
1161                      HEAP_CHANGE_FORMAT,
1162                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1163                                              pre_gc_values.young_gen_used(),
1164                                              pre_gc_values.young_gen_capacity(),
1165                                              def_new_gen->used(),
1166                                              def_new_gen->capacity()),
1167                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1168                                              pre_gc_values.eden_used(),
1169                                              pre_gc_values.eden_capacity(),
1170                                              def_new_gen->eden()->used(),
1171                                              def_new_gen->eden()->capacity()),
1172                      HEAP_CHANGE_FORMAT_ARGS("From",
1173                                              pre_gc_values.from_used(),
1174                                              pre_gc_values.from_capacity(),
1175                                              def_new_gen->from()->used(),
1176                                              def_new_gen->from()->capacity()));
1177   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1178                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1179                                              pre_gc_values.old_gen_used(),
1180                                              pre_gc_values.old_gen_capacity(),
1181                                              old_gen()->used(),
1182                                              old_gen()->capacity()));
1183   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1184 }
1185 
1186 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1187  private:
1188   bool _full;
1189  public:
do_generation(Generation * gen)1190   void do_generation(Generation* gen) {
1191     gen->gc_prologue(_full);
1192   }
GenGCPrologueClosure(bool full)1193   GenGCPrologueClosure(bool full) : _full(full) {};
1194 };
1195 
gc_prologue(bool full)1196 void GenCollectedHeap::gc_prologue(bool full) {
1197   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1198 
1199   // Fill TLAB's and such
1200   ensure_parsability(true);   // retire TLABs
1201 
1202   // Walk generations
1203   GenGCPrologueClosure blk(full);
1204   generation_iterate(&blk, false);  // not old-to-young.
1205 };
1206 
1207 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1208  private:
1209   bool _full;
1210  public:
do_generation(Generation * gen)1211   void do_generation(Generation* gen) {
1212     gen->gc_epilogue(_full);
1213   }
GenGCEpilogueClosure(bool full)1214   GenGCEpilogueClosure(bool full) : _full(full) {};
1215 };
1216 
gc_epilogue(bool full)1217 void GenCollectedHeap::gc_epilogue(bool full) {
1218 #if COMPILER2_OR_JVMCI
1219   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1220   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1221   guarantee(!CompilerConfig::is_c2_or_jvmci_compiler_enabled() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1222 #endif // COMPILER2_OR_JVMCI
1223 
1224   resize_all_tlabs();
1225 
1226   GenGCEpilogueClosure blk(full);
1227   generation_iterate(&blk, false);  // not old-to-young.
1228 
1229   MetaspaceCounters::update_performance_counters();
1230 };
1231 
1232 #ifndef PRODUCT
1233 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1234  private:
1235  public:
do_generation(Generation * gen)1236   void do_generation(Generation* gen) {
1237     gen->record_spaces_top();
1238   }
1239 };
1240 
record_gen_tops_before_GC()1241 void GenCollectedHeap::record_gen_tops_before_GC() {
1242   if (ZapUnusedHeapArea) {
1243     GenGCSaveTopsBeforeGCClosure blk;
1244     generation_iterate(&blk, false);  // not old-to-young.
1245   }
1246 }
1247 #endif  // not PRODUCT
1248 
1249 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1250  public:
do_generation(Generation * gen)1251   void do_generation(Generation* gen) {
1252     gen->ensure_parsability();
1253   }
1254 };
1255 
ensure_parsability(bool retire_tlabs)1256 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1257   CollectedHeap::ensure_parsability(retire_tlabs);
1258   GenEnsureParsabilityClosure ep_cl;
1259   generation_iterate(&ep_cl, false);
1260 }
1261 
handle_failed_promotion(Generation * old_gen,oop obj,size_t obj_size)1262 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1263                                               oop obj,
1264                                               size_t obj_size) {
1265   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1266   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1267   HeapWord* result = NULL;
1268 
1269   result = old_gen->expand_and_allocate(obj_size, false);
1270 
1271   if (result != NULL) {
1272     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size);
1273   }
1274   return cast_to_oop(result);
1275 }
1276