1 /*
2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "classfile/vmSymbols.hpp"
31 #include "code/codeCache.hpp"
32 #include "code/icBuffer.hpp"
33 #include "gc/serial/defNewGeneration.hpp"
34 #include "gc/shared/adaptiveSizePolicy.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTableRS.hpp"
37 #include "gc/shared/collectedHeap.inline.hpp"
38 #include "gc/shared/collectorCounters.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.hpp"
41 #include "gc/shared/gcPolicyCounters.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.inline.hpp"
44 #include "gc/shared/genCollectedHeap.hpp"
45 #include "gc/shared/genOopClosures.inline.hpp"
46 #include "gc/shared/generationSpec.hpp"
47 #include "gc/shared/oopStorageParState.inline.hpp"
48 #include "gc/shared/space.hpp"
49 #include "gc/shared/strongRootsScope.hpp"
50 #include "gc/shared/vmGCOperations.hpp"
51 #include "gc/shared/weakProcessor.hpp"
52 #include "gc/shared/workgroup.hpp"
53 #include "memory/filemap.hpp"
54 #include "memory/iterator.hpp"
55 #include "memory/metaspaceCounters.hpp"
56 #include "memory/resourceArea.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "runtime/biasedLocking.hpp"
59 #include "runtime/flags/flagSetting.hpp"
60 #include "runtime/handles.hpp"
61 #include "runtime/handles.inline.hpp"
62 #include "runtime/java.hpp"
63 #include "runtime/vmThread.hpp"
64 #include "services/management.hpp"
65 #include "services/memoryService.hpp"
66 #include "utilities/debug.hpp"
67 #include "utilities/formatBuffer.hpp"
68 #include "utilities/macros.hpp"
69 #include "utilities/stack.inline.hpp"
70 #include "utilities/vmError.hpp"
71 
GenCollectedHeap(GenCollectorPolicy * policy,Generation::Name young,Generation::Name old,const char * policy_counters_name)72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy,
73                                    Generation::Name young,
74                                    Generation::Name old,
75                                    const char* policy_counters_name) :
76   CollectedHeap(),
77   _rem_set(NULL),
78   _young_gen_spec(new GenerationSpec(young,
79                                      policy->initial_young_size(),
80                                      policy->max_young_size(),
81                                      policy->gen_alignment())),
82   _old_gen_spec(new GenerationSpec(old,
83                                    policy->initial_old_size(),
84                                    policy->max_old_size(),
85                                    policy->gen_alignment())),
86   _gen_policy(policy),
87   _soft_ref_gen_policy(),
88   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
89   _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
90   _full_collections_completed(0) {
91 }
92 
initialize()93 jint GenCollectedHeap::initialize() {
94   // While there are no constraints in the GC code that HeapWordSize
95   // be any particular value, there are multiple other areas in the
96   // system which believe this to be true (e.g. oop->object_size in some
97   // cases incorrectly returns the size in wordSize units rather than
98   // HeapWordSize).
99   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
100 
101   // Allocate space for the heap.
102 
103   char* heap_address;
104   ReservedSpace heap_rs;
105 
106   size_t heap_alignment = collector_policy()->heap_alignment();
107 
108   heap_address = allocate(heap_alignment, &heap_rs);
109 
110   if (!heap_rs.is_reserved()) {
111     vm_shutdown_during_initialization(
112       "Could not reserve enough space for object heap");
113     return JNI_ENOMEM;
114   }
115 
116   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
117 
118   _rem_set = create_rem_set(reserved_region());
119   _rem_set->initialize();
120   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
121   bs->initialize();
122   BarrierSet::set_barrier_set(bs);
123 
124   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false);
125   _young_gen = _young_gen_spec->init(young_rs, rem_set());
126   heap_rs = heap_rs.last_part(_young_gen_spec->max_size());
127 
128   ReservedSpace old_rs = heap_rs.first_part(_old_gen_spec->max_size(), false, false);
129   _old_gen = _old_gen_spec->init(old_rs, rem_set());
130   clear_incremental_collection_failed();
131 
132   return JNI_OK;
133 }
134 
create_rem_set(const MemRegion & reserved_region)135 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
136   return new CardTableRS(reserved_region, false /* scan_concurrently */);
137 }
138 
initialize_size_policy(size_t init_eden_size,size_t init_promo_size,size_t init_survivor_size)139 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
140                                               size_t init_promo_size,
141                                               size_t init_survivor_size) {
142   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
143   _size_policy = new AdaptiveSizePolicy(init_eden_size,
144                                         init_promo_size,
145                                         init_survivor_size,
146                                         max_gc_pause_sec,
147                                         GCTimeRatio);
148 }
149 
allocate(size_t alignment,ReservedSpace * heap_rs)150 char* GenCollectedHeap::allocate(size_t alignment,
151                                  ReservedSpace* heap_rs){
152   // Now figure out the total size.
153   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
154   assert(alignment % pageSize == 0, "Must be");
155 
156   // Check for overflow.
157   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
158   if (total_reserved < _young_gen_spec->max_size()) {
159     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
160                                   "the maximum representable size");
161   }
162   assert(total_reserved % alignment == 0,
163          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
164          SIZE_FORMAT, total_reserved, alignment);
165 
166   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
167 
168   os::trace_page_sizes("Heap",
169                        collector_policy()->min_heap_byte_size(),
170                        total_reserved,
171                        alignment,
172                        heap_rs->base(),
173                        heap_rs->size());
174 
175   return heap_rs->base();
176 }
177 
post_initialize()178 void GenCollectedHeap::post_initialize() {
179   CollectedHeap::post_initialize();
180   ref_processing_init();
181 
182   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
183 
184   initialize_size_policy(def_new_gen->eden()->capacity(),
185                          _old_gen->capacity(),
186                          def_new_gen->from()->capacity());
187 
188   MarkSweep::initialize();
189 }
190 
ref_processing_init()191 void GenCollectedHeap::ref_processing_init() {
192   _young_gen->ref_processor_init();
193   _old_gen->ref_processor_init();
194 }
195 
young_gen_spec() const196 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
197   return _young_gen_spec;
198 }
199 
old_gen_spec() const200 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
201   return _old_gen_spec;
202 }
203 
capacity() const204 size_t GenCollectedHeap::capacity() const {
205   return _young_gen->capacity() + _old_gen->capacity();
206 }
207 
used() const208 size_t GenCollectedHeap::used() const {
209   return _young_gen->used() + _old_gen->used();
210 }
211 
save_used_regions()212 void GenCollectedHeap::save_used_regions() {
213   _old_gen->save_used_region();
214   _young_gen->save_used_region();
215 }
216 
max_capacity() const217 size_t GenCollectedHeap::max_capacity() const {
218   return _young_gen->max_capacity() + _old_gen->max_capacity();
219 }
220 
221 // Update the _full_collections_completed counter
222 // at the end of a stop-world full GC.
update_full_collections_completed()223 unsigned int GenCollectedHeap::update_full_collections_completed() {
224   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
225   assert(_full_collections_completed <= _total_full_collections,
226          "Can't complete more collections than were started");
227   _full_collections_completed = _total_full_collections;
228   ml.notify_all();
229   return _full_collections_completed;
230 }
231 
232 // Update the _full_collections_completed counter, as appropriate,
233 // at the end of a concurrent GC cycle. Note the conditional update
234 // below to allow this method to be called by a concurrent collector
235 // without synchronizing in any manner with the VM thread (which
236 // may already have initiated a STW full collection "concurrently").
update_full_collections_completed(unsigned int count)237 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
238   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
239   assert((_full_collections_completed <= _total_full_collections) &&
240          (count <= _total_full_collections),
241          "Can't complete more collections than were started");
242   if (count > _full_collections_completed) {
243     _full_collections_completed = count;
244     ml.notify_all();
245   }
246   return _full_collections_completed;
247 }
248 
249 // Return true if any of the following is true:
250 // . the allocation won't fit into the current young gen heap
251 // . gc locker is occupied (jni critical section)
252 // . heap memory is tight -- the most recent previous collection
253 //   was a full collection because a partial collection (would
254 //   have) failed and is likely to fail again
should_try_older_generation_allocation(size_t word_size) const255 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
256   size_t young_capacity = _young_gen->capacity_before_gc();
257   return    (word_size > heap_word_size(young_capacity))
258          || GCLocker::is_active_and_needs_gc()
259          || incremental_collection_failed();
260 }
261 
expand_heap_and_allocate(size_t size,bool is_tlab)262 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
263   HeapWord* result = NULL;
264   if (_old_gen->should_allocate(size, is_tlab)) {
265     result = _old_gen->expand_and_allocate(size, is_tlab);
266   }
267   if (result == NULL) {
268     if (_young_gen->should_allocate(size, is_tlab)) {
269       result = _young_gen->expand_and_allocate(size, is_tlab);
270     }
271   }
272   assert(result == NULL || is_in_reserved(result), "result not in heap");
273   return result;
274 }
275 
mem_allocate_work(size_t size,bool is_tlab,bool * gc_overhead_limit_was_exceeded)276 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
277                                               bool is_tlab,
278                                               bool* gc_overhead_limit_was_exceeded) {
279   // In general gc_overhead_limit_was_exceeded should be false so
280   // set it so here and reset it to true only if the gc time
281   // limit is being exceeded as checked below.
282   *gc_overhead_limit_was_exceeded = false;
283 
284   HeapWord* result = NULL;
285 
286   // Loop until the allocation is satisfied, or unsatisfied after GC.
287   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
288     HandleMark hm; // Discard any handles allocated in each iteration.
289 
290     // First allocation attempt is lock-free.
291     Generation *young = _young_gen;
292     assert(young->supports_inline_contig_alloc(),
293       "Otherwise, must do alloc within heap lock");
294     if (young->should_allocate(size, is_tlab)) {
295       result = young->par_allocate(size, is_tlab);
296       if (result != NULL) {
297         assert(is_in_reserved(result), "result not in heap");
298         return result;
299       }
300     }
301     uint gc_count_before;  // Read inside the Heap_lock locked region.
302     {
303       MutexLocker ml(Heap_lock);
304       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
305       // Note that only large objects get a shot at being
306       // allocated in later generations.
307       bool first_only = !should_try_older_generation_allocation(size);
308 
309       result = attempt_allocation(size, is_tlab, first_only);
310       if (result != NULL) {
311         assert(is_in_reserved(result), "result not in heap");
312         return result;
313       }
314 
315       if (GCLocker::is_active_and_needs_gc()) {
316         if (is_tlab) {
317           return NULL;  // Caller will retry allocating individual object.
318         }
319         if (!is_maximal_no_gc()) {
320           // Try and expand heap to satisfy request.
321           result = expand_heap_and_allocate(size, is_tlab);
322           // Result could be null if we are out of space.
323           if (result != NULL) {
324             return result;
325           }
326         }
327 
328         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
329           return NULL; // We didn't get to do a GC and we didn't get any memory.
330         }
331 
332         // If this thread is not in a jni critical section, we stall
333         // the requestor until the critical section has cleared and
334         // GC allowed. When the critical section clears, a GC is
335         // initiated by the last thread exiting the critical section; so
336         // we retry the allocation sequence from the beginning of the loop,
337         // rather than causing more, now probably unnecessary, GC attempts.
338         JavaThread* jthr = JavaThread::current();
339         if (!jthr->in_critical()) {
340           MutexUnlocker mul(Heap_lock);
341           // Wait for JNI critical section to be exited
342           GCLocker::stall_until_clear();
343           gclocker_stalled_count += 1;
344           continue;
345         } else {
346           if (CheckJNICalls) {
347             fatal("Possible deadlock due to allocating while"
348                   " in jni critical section");
349           }
350           return NULL;
351         }
352       }
353 
354       // Read the gc count while the heap lock is held.
355       gc_count_before = total_collections();
356     }
357 
358     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
359     VMThread::execute(&op);
360     if (op.prologue_succeeded()) {
361       result = op.result();
362       if (op.gc_locked()) {
363          assert(result == NULL, "must be NULL if gc_locked() is true");
364          continue;  // Retry and/or stall as necessary.
365       }
366 
367       // Allocation has failed and a collection
368       // has been done.  If the gc time limit was exceeded the
369       // this time, return NULL so that an out-of-memory
370       // will be thrown.  Clear gc_overhead_limit_exceeded
371       // so that the overhead exceeded does not persist.
372 
373       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
374       const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
375 
376       if (limit_exceeded && softrefs_clear) {
377         *gc_overhead_limit_was_exceeded = true;
378         size_policy()->set_gc_overhead_limit_exceeded(false);
379         if (op.result() != NULL) {
380           CollectedHeap::fill_with_object(op.result(), size);
381         }
382         return NULL;
383       }
384       assert(result == NULL || is_in_reserved(result),
385              "result not in heap");
386       return result;
387     }
388 
389     // Give a warning if we seem to be looping forever.
390     if ((QueuedAllocationWarningCount > 0) &&
391         (try_count % QueuedAllocationWarningCount == 0)) {
392           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
393                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
394     }
395   }
396 }
397 
398 #ifndef PRODUCT
399 // Override of memory state checking method in CollectedHeap:
400 // Some collectors (CMS for example) can't have badHeapWordVal written
401 // in the first two words of an object. (For instance , in the case of
402 // CMS these words hold state used to synchronize between certain
403 // (concurrent) GC steps and direct allocating mutators.)
404 // The skip_header_HeapWords() method below, allows us to skip
405 // over the requisite number of HeapWord's. Note that (for
406 // generational collectors) this means that those many words are
407 // skipped in each object, irrespective of the generation in which
408 // that object lives. The resultant loss of precision seems to be
409 // harmless and the pain of avoiding that imprecision appears somewhat
410 // higher than we are prepared to pay for such rudimentary debugging
411 // support.
check_for_non_bad_heap_word_value(HeapWord * addr,size_t size)412 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
413                                                          size_t size) {
414   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
415     // We are asked to check a size in HeapWords,
416     // but the memory is mangled in juint words.
417     juint* start = (juint*) (addr + skip_header_HeapWords());
418     juint* end   = (juint*) (addr + size);
419     for (juint* slot = start; slot < end; slot += 1) {
420       assert(*slot == badHeapWordVal,
421              "Found non badHeapWordValue in pre-allocation check");
422     }
423   }
424 }
425 #endif
426 
attempt_allocation(size_t size,bool is_tlab,bool first_only)427 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
428                                                bool is_tlab,
429                                                bool first_only) {
430   HeapWord* res = NULL;
431 
432   if (_young_gen->should_allocate(size, is_tlab)) {
433     res = _young_gen->allocate(size, is_tlab);
434     if (res != NULL || first_only) {
435       return res;
436     }
437   }
438 
439   if (_old_gen->should_allocate(size, is_tlab)) {
440     res = _old_gen->allocate(size, is_tlab);
441   }
442 
443   return res;
444 }
445 
mem_allocate(size_t size,bool * gc_overhead_limit_was_exceeded)446 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
447                                          bool* gc_overhead_limit_was_exceeded) {
448   return mem_allocate_work(size,
449                            false /* is_tlab */,
450                            gc_overhead_limit_was_exceeded);
451 }
452 
must_clear_all_soft_refs()453 bool GenCollectedHeap::must_clear_all_soft_refs() {
454   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
455          _gc_cause == GCCause::_wb_full_gc;
456 }
457 
collect_generation(Generation * gen,bool full,size_t size,bool is_tlab,bool run_verification,bool clear_soft_refs,bool restore_marks_for_biased_locking)458 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
459                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
460                                           bool restore_marks_for_biased_locking) {
461   FormatBuffer<> title("Collect gen: %s", gen->short_name());
462   GCTraceTime(Trace, gc, phases) t1(title);
463   TraceCollectorStats tcs(gen->counters());
464   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
465 
466   gen->stat_record()->invocations++;
467   gen->stat_record()->accumulated_time.start();
468 
469   // Must be done anew before each collection because
470   // a previous collection will do mangling and will
471   // change top of some spaces.
472   record_gen_tops_before_GC();
473 
474   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
475 
476   if (run_verification && VerifyBeforeGC) {
477     HandleMark hm;  // Discard invalid handles created during verification
478     Universe::verify("Before GC");
479   }
480   COMPILER2_PRESENT(DerivedPointerTable::clear());
481 
482   if (restore_marks_for_biased_locking) {
483     // We perform this mark word preservation work lazily
484     // because it's only at this point that we know whether we
485     // absolutely have to do it; we want to avoid doing it for
486     // scavenge-only collections where it's unnecessary
487     BiasedLocking::preserve_marks();
488   }
489 
490   // Do collection work
491   {
492     // Note on ref discovery: For what appear to be historical reasons,
493     // GCH enables and disabled (by enqueing) refs discovery.
494     // In the future this should be moved into the generation's
495     // collect method so that ref discovery and enqueueing concerns
496     // are local to a generation. The collect method could return
497     // an appropriate indication in the case that notification on
498     // the ref lock was needed. This will make the treatment of
499     // weak refs more uniform (and indeed remove such concerns
500     // from GCH). XXX
501 
502     HandleMark hm;  // Discard invalid handles created during gc
503     save_marks();   // save marks for all gens
504     // We want to discover references, but not process them yet.
505     // This mode is disabled in process_discovered_references if the
506     // generation does some collection work, or in
507     // enqueue_discovered_references if the generation returns
508     // without doing any work.
509     ReferenceProcessor* rp = gen->ref_processor();
510     // If the discovery of ("weak") refs in this generation is
511     // atomic wrt other collectors in this configuration, we
512     // are guaranteed to have empty discovered ref lists.
513     if (rp->discovery_is_atomic()) {
514       rp->enable_discovery();
515       rp->setup_policy(clear_soft_refs);
516     } else {
517       // collect() below will enable discovery as appropriate
518     }
519     gen->collect(full, clear_soft_refs, size, is_tlab);
520     if (!rp->enqueuing_is_done()) {
521       rp->disable_discovery();
522     } else {
523       rp->set_enqueuing_is_done(false);
524     }
525     rp->verify_no_references_recorded();
526   }
527 
528   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
529 
530   gen->stat_record()->accumulated_time.stop();
531 
532   update_gc_stats(gen, full);
533 
534   if (run_verification && VerifyAfterGC) {
535     HandleMark hm;  // Discard invalid handles created during verification
536     Universe::verify("After GC");
537   }
538 }
539 
do_collection(bool full,bool clear_all_soft_refs,size_t size,bool is_tlab,GenerationType max_generation)540 void GenCollectedHeap::do_collection(bool           full,
541                                      bool           clear_all_soft_refs,
542                                      size_t         size,
543                                      bool           is_tlab,
544                                      GenerationType max_generation) {
545   ResourceMark rm;
546   DEBUG_ONLY(Thread* my_thread = Thread::current();)
547 
548   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
549   assert(my_thread->is_VM_thread() ||
550          my_thread->is_ConcurrentGC_thread(),
551          "incorrect thread type capability");
552   assert(Heap_lock->is_locked(),
553          "the requesting thread should have the Heap_lock");
554   guarantee(!is_gc_active(), "collection is not reentrant");
555 
556   if (GCLocker::check_active_before_gc()) {
557     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
558   }
559 
560   GCIdMark gc_id_mark;
561 
562   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
563                           soft_ref_policy()->should_clear_all_soft_refs();
564 
565   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
566 
567   const size_t metadata_prev_used = MetaspaceUtils::used_bytes();
568 
569   print_heap_before_gc();
570 
571   {
572     FlagSetting fl(_is_gc_active, true);
573 
574     bool complete = full && (max_generation == OldGen);
575     bool old_collects_young = complete && !ScavengeBeforeFullGC;
576     bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
577 
578     FormatBuffer<> gc_string("%s", "Pause ");
579     if (do_young_collection) {
580       gc_string.append("Young");
581     } else {
582       gc_string.append("Full");
583     }
584 
585     GCTraceCPUTime tcpu;
586     GCTraceTime(Info, gc) t(gc_string, NULL, gc_cause(), true);
587 
588     gc_prologue(complete);
589     increment_total_collections(complete);
590 
591     size_t young_prev_used = _young_gen->used();
592     size_t old_prev_used = _old_gen->used();
593 
594     bool run_verification = total_collections() >= VerifyGCStartAt;
595 
596     bool prepared_for_verification = false;
597     bool collected_old = false;
598 
599     if (do_young_collection) {
600       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
601         prepare_for_verify();
602         prepared_for_verification = true;
603       }
604 
605       collect_generation(_young_gen,
606                          full,
607                          size,
608                          is_tlab,
609                          run_verification && VerifyGCLevel <= 0,
610                          do_clear_all_soft_refs,
611                          false);
612 
613       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
614           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
615         // Allocation request was met by young GC.
616         size = 0;
617       }
618     }
619 
620     bool must_restore_marks_for_biased_locking = false;
621 
622     if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
623       if (!complete) {
624         // The full_collections increment was missed above.
625         increment_total_full_collections();
626       }
627 
628       if (!prepared_for_verification && run_verification &&
629           VerifyGCLevel <= 1 && VerifyBeforeGC) {
630         prepare_for_verify();
631       }
632 
633       if (do_young_collection) {
634         // We did a young GC. Need a new GC id for the old GC.
635         GCIdMark gc_id_mark;
636         GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
637         collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true);
638       } else {
639         // No young GC done. Use the same GC id as was set up earlier in this method.
640         collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true);
641       }
642 
643       must_restore_marks_for_biased_locking = true;
644       collected_old = true;
645     }
646 
647     // Update "complete" boolean wrt what actually transpired --
648     // for instance, a promotion failure could have led to
649     // a whole heap collection.
650     complete = complete || collected_old;
651 
652     print_heap_change(young_prev_used, old_prev_used);
653     MetaspaceUtils::print_metaspace_change(metadata_prev_used);
654 
655     // Adjust generation sizes.
656     if (collected_old) {
657       _old_gen->compute_new_size();
658     }
659     _young_gen->compute_new_size();
660 
661     if (complete) {
662       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
663       ClassLoaderDataGraph::purge();
664       MetaspaceUtils::verify_metrics();
665       // Resize the metaspace capacity after full collections
666       MetaspaceGC::compute_new_size();
667       update_full_collections_completed();
668     }
669 
670     // Track memory usage and detect low memory after GC finishes
671     MemoryService::track_memory_usage();
672 
673     gc_epilogue(complete);
674 
675     if (must_restore_marks_for_biased_locking) {
676       BiasedLocking::restore_marks();
677     }
678   }
679 
680   print_heap_after_gc();
681 
682 #ifdef TRACESPINNING
683   ParallelTaskTerminator::print_termination_counts();
684 #endif
685 }
686 
register_nmethod(nmethod * nm)687 void GenCollectedHeap::register_nmethod(nmethod* nm) {
688   CodeCache::register_scavenge_root_nmethod(nm);
689 }
690 
verify_nmethod(nmethod * nm)691 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
692   CodeCache::verify_scavenge_root_nmethod(nm);
693 }
694 
satisfy_failed_allocation(size_t size,bool is_tlab)695 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
696   GCCauseSetter x(this, GCCause::_allocation_failure);
697   HeapWord* result = NULL;
698 
699   assert(size != 0, "Precondition violated");
700   if (GCLocker::is_active_and_needs_gc()) {
701     // GC locker is active; instead of a collection we will attempt
702     // to expand the heap, if there's room for expansion.
703     if (!is_maximal_no_gc()) {
704       result = expand_heap_and_allocate(size, is_tlab);
705     }
706     return result;   // Could be null if we are out of space.
707   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
708     // Do an incremental collection.
709     do_collection(false,                     // full
710                   false,                     // clear_all_soft_refs
711                   size,                      // size
712                   is_tlab,                   // is_tlab
713                   GenCollectedHeap::OldGen); // max_generation
714   } else {
715     log_trace(gc)(" :: Trying full because partial may fail :: ");
716     // Try a full collection; see delta for bug id 6266275
717     // for the original code and why this has been simplified
718     // with from-space allocation criteria modified and
719     // such allocation moved out of the safepoint path.
720     do_collection(true,                      // full
721                   false,                     // clear_all_soft_refs
722                   size,                      // size
723                   is_tlab,                   // is_tlab
724                   GenCollectedHeap::OldGen); // max_generation
725   }
726 
727   result = attempt_allocation(size, is_tlab, false /*first_only*/);
728 
729   if (result != NULL) {
730     assert(is_in_reserved(result), "result not in heap");
731     return result;
732   }
733 
734   // OK, collection failed, try expansion.
735   result = expand_heap_and_allocate(size, is_tlab);
736   if (result != NULL) {
737     return result;
738   }
739 
740   // If we reach this point, we're really out of memory. Try every trick
741   // we can to reclaim memory. Force collection of soft references. Force
742   // a complete compaction of the heap. Any additional methods for finding
743   // free memory should be here, especially if they are expensive. If this
744   // attempt fails, an OOM exception will be thrown.
745   {
746     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
747 
748     do_collection(true,                      // full
749                   true,                      // clear_all_soft_refs
750                   size,                      // size
751                   is_tlab,                   // is_tlab
752                   GenCollectedHeap::OldGen); // max_generation
753   }
754 
755   result = attempt_allocation(size, is_tlab, false /* first_only */);
756   if (result != NULL) {
757     assert(is_in_reserved(result), "result not in heap");
758     return result;
759   }
760 
761   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
762     "Flag should have been handled and cleared prior to this point");
763 
764   // What else?  We might try synchronous finalization later.  If the total
765   // space available is large enough for the allocation, then a more
766   // complete compaction phase than we've tried so far might be
767   // appropriate.
768   return NULL;
769 }
770 
771 #ifdef ASSERT
772 class AssertNonScavengableClosure: public OopClosure {
773 public:
do_oop(oop * p)774   virtual void do_oop(oop* p) {
775     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
776       "Referent should not be scavengable.");  }
do_oop(narrowOop * p)777   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
778 };
779 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
780 #endif
781 
process_roots(StrongRootsScope * scope,ScanningOption so,OopClosure * strong_roots,CLDClosure * strong_cld_closure,CLDClosure * weak_cld_closure,CodeBlobToOopClosure * code_roots)782 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
783                                      ScanningOption so,
784                                      OopClosure* strong_roots,
785                                      CLDClosure* strong_cld_closure,
786                                      CLDClosure* weak_cld_closure,
787                                      CodeBlobToOopClosure* code_roots) {
788   // General roots.
789   assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
790   assert(code_roots != NULL, "code root closure should always be set");
791   // _n_termination for _process_strong_tasks should be set up stream
792   // in a method not running in a GC worker.  Otherwise the GC worker
793   // could be trying to change the termination condition while the task
794   // is executing in another GC worker.
795 
796   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
797     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
798   }
799 
800   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
801   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
802 
803   bool is_par = scope->n_threads() > 1;
804   Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
805 
806   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
807     Universe::oops_do(strong_roots);
808   }
809   // Global (strong) JNI handles
810   if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
811     JNIHandles::oops_do(strong_roots);
812   }
813 
814   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
815     ObjectSynchronizer::oops_do(strong_roots);
816   }
817   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
818     Management::oops_do(strong_roots);
819   }
820   if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
821     JvmtiExport::oops_do(strong_roots);
822   }
823   if (UseAOT && !_process_strong_tasks->is_task_claimed(GCH_PS_aot_oops_do)) {
824     AOTLoader::oops_do(strong_roots);
825   }
826 
827   if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
828     SystemDictionary::oops_do(strong_roots);
829   }
830 
831   if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
832     if (so & SO_ScavengeCodeCache) {
833       assert(code_roots != NULL, "must supply closure for code cache");
834 
835       // We only visit parts of the CodeCache when scavenging.
836       CodeCache::scavenge_root_nmethods_do(code_roots);
837     }
838     if (so & SO_AllCodeCache) {
839       assert(code_roots != NULL, "must supply closure for code cache");
840 
841       // CMSCollector uses this to do intermediate-strength collections.
842       // We scan the entire code cache, since CodeCache::do_unloading is not called.
843       CodeCache::blobs_do(code_roots);
844     }
845     // Verify that the code cache contents are not subject to
846     // movement by a scavenging collection.
847     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
848     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
849   }
850 }
851 
process_string_table_roots(StrongRootsScope * scope,OopClosure * root_closure,OopStorage::ParState<false,false> * par_state_string)852 void GenCollectedHeap::process_string_table_roots(StrongRootsScope* scope,
853                                                   OopClosure* root_closure,
854                                                   OopStorage::ParState<false, false>* par_state_string) {
855   assert(root_closure != NULL, "Must be set");
856   // All threads execute the following. A specific chunk of buckets
857   // from the StringTable are the individual tasks.
858 
859   // Either we should be single threaded or have a ParState
860   assert((scope->n_threads() <= 1) || par_state_string != NULL, "Parallel but no ParState");
861 
862   if (scope->n_threads() > 1) {
863     StringTable::possibly_parallel_oops_do(par_state_string, root_closure);
864   } else {
865     StringTable::oops_do(root_closure);
866   }
867 }
868 
young_process_roots(StrongRootsScope * scope,OopsInGenClosure * root_closure,OopsInGenClosure * old_gen_closure,CLDClosure * cld_closure,OopStorage::ParState<false,false> * par_state_string)869 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
870                                            OopsInGenClosure* root_closure,
871                                            OopsInGenClosure* old_gen_closure,
872                                            CLDClosure* cld_closure,
873                                            OopStorage::ParState<false, false>* par_state_string) {
874   MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
875 
876   process_roots(scope, SO_ScavengeCodeCache, root_closure,
877                 cld_closure, cld_closure, &mark_code_closure);
878   process_string_table_roots(scope, root_closure, par_state_string);
879 
880   if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
881     root_closure->reset_generation();
882   }
883 
884   // When collection is parallel, all threads get to cooperate to do
885   // old generation scanning.
886   old_gen_closure->set_generation(_old_gen);
887   rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
888   old_gen_closure->reset_generation();
889 
890   _process_strong_tasks->all_tasks_completed(scope->n_threads());
891 }
892 
full_process_roots(StrongRootsScope * scope,bool is_adjust_phase,ScanningOption so,bool only_strong_roots,OopsInGenClosure * root_closure,CLDClosure * cld_closure,OopStorage::ParState<false,false> * par_state_string)893 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
894                                           bool is_adjust_phase,
895                                           ScanningOption so,
896                                           bool only_strong_roots,
897                                           OopsInGenClosure* root_closure,
898                                           CLDClosure* cld_closure,
899                                           OopStorage::ParState<false, false>* par_state_string) {
900   MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
901   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
902 
903   process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
904   if (is_adjust_phase) {
905     // We never treat the string table as roots during marking
906     // for the full gc, so we only need to process it during
907     // the adjust phase.
908     process_string_table_roots(scope, root_closure, par_state_string);
909   }
910 
911   _process_strong_tasks->all_tasks_completed(scope->n_threads());
912 }
913 
gen_process_weak_roots(OopClosure * root_closure)914 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
915   WeakProcessor::oops_do(root_closure);
916   _young_gen->ref_processor()->weak_oops_do(root_closure);
917   _old_gen->ref_processor()->weak_oops_do(root_closure);
918 }
919 
no_allocs_since_save_marks()920 bool GenCollectedHeap::no_allocs_since_save_marks() {
921   return _young_gen->no_allocs_since_save_marks() &&
922          _old_gen->no_allocs_since_save_marks();
923 }
924 
supports_inline_contig_alloc() const925 bool GenCollectedHeap::supports_inline_contig_alloc() const {
926   return _young_gen->supports_inline_contig_alloc();
927 }
928 
top_addr() const929 HeapWord* volatile* GenCollectedHeap::top_addr() const {
930   return _young_gen->top_addr();
931 }
932 
end_addr() const933 HeapWord** GenCollectedHeap::end_addr() const {
934   return _young_gen->end_addr();
935 }
936 
937 // public collection interfaces
938 
collect(GCCause::Cause cause)939 void GenCollectedHeap::collect(GCCause::Cause cause) {
940   if ((cause == GCCause::_wb_young_gc) ||
941       (cause == GCCause::_gc_locker)) {
942     // Young collection for WhiteBox or GCLocker.
943     collect(cause, YoungGen);
944   } else {
945 #ifdef ASSERT
946   if (cause == GCCause::_scavenge_alot) {
947     // Young collection only.
948     collect(cause, YoungGen);
949   } else {
950     // Stop-the-world full collection.
951     collect(cause, OldGen);
952   }
953 #else
954     // Stop-the-world full collection.
955     collect(cause, OldGen);
956 #endif
957   }
958 }
959 
collect(GCCause::Cause cause,GenerationType max_generation)960 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
961   // The caller doesn't have the Heap_lock
962   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
963   MutexLocker ml(Heap_lock);
964   collect_locked(cause, max_generation);
965 }
966 
collect_locked(GCCause::Cause cause)967 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
968   // The caller has the Heap_lock
969   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
970   collect_locked(cause, OldGen);
971 }
972 
973 // this is the private collection interface
974 // The Heap_lock is expected to be held on entry.
975 
collect_locked(GCCause::Cause cause,GenerationType max_generation)976 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
977   // Read the GC count while holding the Heap_lock
978   unsigned int gc_count_before      = total_collections();
979   unsigned int full_gc_count_before = total_full_collections();
980 
981   if (GCLocker::should_discard(cause, gc_count_before)) {
982     return;
983   }
984 
985   {
986     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
987     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
988                          cause, max_generation);
989     VMThread::execute(&op);
990   }
991 }
992 
do_full_collection(bool clear_all_soft_refs)993 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
994    do_full_collection(clear_all_soft_refs, OldGen);
995 }
996 
do_full_collection(bool clear_all_soft_refs,GenerationType last_generation)997 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
998                                           GenerationType last_generation) {
999   do_collection(true,                   // full
1000                 clear_all_soft_refs,    // clear_all_soft_refs
1001                 0,                      // size
1002                 false,                  // is_tlab
1003                 last_generation);       // last_generation
1004   // Hack XXX FIX ME !!!
1005   // A scavenge may not have been attempted, or may have
1006   // been attempted and failed, because the old gen was too full
1007   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
1008     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
1009     // This time allow the old gen to be collected as well
1010     do_collection(true,                // full
1011                   clear_all_soft_refs, // clear_all_soft_refs
1012                   0,                   // size
1013                   false,               // is_tlab
1014                   OldGen);             // last_generation
1015   }
1016 }
1017 
is_in_young(oop p)1018 bool GenCollectedHeap::is_in_young(oop p) {
1019   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
1020   assert(result == _young_gen->is_in_reserved(p),
1021          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
1022   return result;
1023 }
1024 
1025 // Returns "TRUE" iff "p" points into the committed areas of the heap.
is_in(const void * p) const1026 bool GenCollectedHeap::is_in(const void* p) const {
1027   return _young_gen->is_in(p) || _old_gen->is_in(p);
1028 }
1029 
1030 #ifdef ASSERT
1031 // Don't implement this by using is_in_young().  This method is used
1032 // in some cases to check that is_in_young() is correct.
is_in_partial_collection(const void * p)1033 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
1034   assert(is_in_reserved(p) || p == NULL,
1035     "Does not work if address is non-null and outside of the heap");
1036   return p < _young_gen->reserved().end() && p != NULL;
1037 }
1038 #endif
1039 
oop_iterate(OopIterateClosure * cl)1040 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
1041   _young_gen->oop_iterate(cl);
1042   _old_gen->oop_iterate(cl);
1043 }
1044 
object_iterate(ObjectClosure * cl)1045 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
1046   _young_gen->object_iterate(cl);
1047   _old_gen->object_iterate(cl);
1048 }
1049 
safe_object_iterate(ObjectClosure * cl)1050 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
1051   _young_gen->safe_object_iterate(cl);
1052   _old_gen->safe_object_iterate(cl);
1053 }
1054 
space_containing(const void * addr) const1055 Space* GenCollectedHeap::space_containing(const void* addr) const {
1056   Space* res = _young_gen->space_containing(addr);
1057   if (res != NULL) {
1058     return res;
1059   }
1060   res = _old_gen->space_containing(addr);
1061   assert(res != NULL, "Could not find containing space");
1062   return res;
1063 }
1064 
block_start(const void * addr) const1065 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
1066   assert(is_in_reserved(addr), "block_start of address outside of heap");
1067   if (_young_gen->is_in_reserved(addr)) {
1068     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
1069     return _young_gen->block_start(addr);
1070   }
1071 
1072   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
1073   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
1074   return _old_gen->block_start(addr);
1075 }
1076 
block_size(const HeapWord * addr) const1077 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1078   assert(is_in_reserved(addr), "block_size of address outside of heap");
1079   if (_young_gen->is_in_reserved(addr)) {
1080     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
1081     return _young_gen->block_size(addr);
1082   }
1083 
1084   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
1085   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
1086   return _old_gen->block_size(addr);
1087 }
1088 
block_is_obj(const HeapWord * addr) const1089 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1090   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1091   assert(block_start(addr) == addr, "addr must be a block start");
1092   if (_young_gen->is_in_reserved(addr)) {
1093     return _young_gen->block_is_obj(addr);
1094   }
1095 
1096   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
1097   return _old_gen->block_is_obj(addr);
1098 }
1099 
supports_tlab_allocation() const1100 bool GenCollectedHeap::supports_tlab_allocation() const {
1101   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1102   return _young_gen->supports_tlab_allocation();
1103 }
1104 
tlab_capacity(Thread * thr) const1105 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1106   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1107   if (_young_gen->supports_tlab_allocation()) {
1108     return _young_gen->tlab_capacity();
1109   }
1110   return 0;
1111 }
1112 
tlab_used(Thread * thr) const1113 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
1114   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1115   if (_young_gen->supports_tlab_allocation()) {
1116     return _young_gen->tlab_used();
1117   }
1118   return 0;
1119 }
1120 
unsafe_max_tlab_alloc(Thread * thr) const1121 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1122   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1123   if (_young_gen->supports_tlab_allocation()) {
1124     return _young_gen->unsafe_max_tlab_alloc();
1125   }
1126   return 0;
1127 }
1128 
allocate_new_tlab(size_t min_size,size_t requested_size,size_t * actual_size)1129 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
1130                                               size_t requested_size,
1131                                               size_t* actual_size) {
1132   bool gc_overhead_limit_was_exceeded;
1133   HeapWord* result = mem_allocate_work(requested_size /* size */,
1134                                        true /* is_tlab */,
1135                                        &gc_overhead_limit_was_exceeded);
1136   if (result != NULL) {
1137     *actual_size = requested_size;
1138   }
1139 
1140   return result;
1141 }
1142 
1143 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1144 // from the list headed by "*prev_ptr".
removeSmallestScratch(ScratchBlock ** prev_ptr)1145 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1146   bool first = true;
1147   size_t min_size = 0;   // "first" makes this conceptually infinite.
1148   ScratchBlock **smallest_ptr, *smallest;
1149   ScratchBlock  *cur = *prev_ptr;
1150   while (cur) {
1151     assert(*prev_ptr == cur, "just checking");
1152     if (first || cur->num_words < min_size) {
1153       smallest_ptr = prev_ptr;
1154       smallest     = cur;
1155       min_size     = smallest->num_words;
1156       first        = false;
1157     }
1158     prev_ptr = &cur->next;
1159     cur     =  cur->next;
1160   }
1161   smallest      = *smallest_ptr;
1162   *smallest_ptr = smallest->next;
1163   return smallest;
1164 }
1165 
1166 // Sort the scratch block list headed by res into decreasing size order,
1167 // and set "res" to the result.
sort_scratch_list(ScratchBlock * & list)1168 static void sort_scratch_list(ScratchBlock*& list) {
1169   ScratchBlock* sorted = NULL;
1170   ScratchBlock* unsorted = list;
1171   while (unsorted) {
1172     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1173     smallest->next  = sorted;
1174     sorted          = smallest;
1175   }
1176   list = sorted;
1177 }
1178 
gather_scratch(Generation * requestor,size_t max_alloc_words)1179 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1180                                                size_t max_alloc_words) {
1181   ScratchBlock* res = NULL;
1182   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1183   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1184   sort_scratch_list(res);
1185   return res;
1186 }
1187 
release_scratch()1188 void GenCollectedHeap::release_scratch() {
1189   _young_gen->reset_scratch();
1190   _old_gen->reset_scratch();
1191 }
1192 
1193 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
do_generation(Generation * gen)1194   void do_generation(Generation* gen) {
1195     gen->prepare_for_verify();
1196   }
1197 };
1198 
prepare_for_verify()1199 void GenCollectedHeap::prepare_for_verify() {
1200   ensure_parsability(false);        // no need to retire TLABs
1201   GenPrepareForVerifyClosure blk;
1202   generation_iterate(&blk, false);
1203 }
1204 
generation_iterate(GenClosure * cl,bool old_to_young)1205 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1206                                           bool old_to_young) {
1207   if (old_to_young) {
1208     cl->do_generation(_old_gen);
1209     cl->do_generation(_young_gen);
1210   } else {
1211     cl->do_generation(_young_gen);
1212     cl->do_generation(_old_gen);
1213   }
1214 }
1215 
is_maximal_no_gc() const1216 bool GenCollectedHeap::is_maximal_no_gc() const {
1217   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1218 }
1219 
save_marks()1220 void GenCollectedHeap::save_marks() {
1221   _young_gen->save_marks();
1222   _old_gen->save_marks();
1223 }
1224 
heap()1225 GenCollectedHeap* GenCollectedHeap::heap() {
1226   CollectedHeap* heap = Universe::heap();
1227   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1228   assert(heap->kind() == CollectedHeap::Serial ||
1229          heap->kind() == CollectedHeap::CMS, "Invalid name");
1230   return (GenCollectedHeap*) heap;
1231 }
1232 
1233 #if INCLUDE_SERIALGC
prepare_for_compaction()1234 void GenCollectedHeap::prepare_for_compaction() {
1235   // Start by compacting into same gen.
1236   CompactPoint cp(_old_gen);
1237   _old_gen->prepare_for_compaction(&cp);
1238   _young_gen->prepare_for_compaction(&cp);
1239 }
1240 #endif // INCLUDE_SERIALGC
1241 
verify(VerifyOption option)1242 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1243   log_debug(gc, verify)("%s", _old_gen->name());
1244   _old_gen->verify();
1245 
1246   log_debug(gc, verify)("%s", _old_gen->name());
1247   _young_gen->verify();
1248 
1249   log_debug(gc, verify)("RemSet");
1250   rem_set()->verify();
1251 }
1252 
print_on(outputStream * st) const1253 void GenCollectedHeap::print_on(outputStream* st) const {
1254   _young_gen->print_on(st);
1255   _old_gen->print_on(st);
1256   MetaspaceUtils::print_on(st);
1257 }
1258 
gc_threads_do(ThreadClosure * tc) const1259 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1260 }
1261 
print_gc_threads_on(outputStream * st) const1262 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1263 }
1264 
print_tracing_info() const1265 void GenCollectedHeap::print_tracing_info() const {
1266   if (log_is_enabled(Debug, gc, heap, exit)) {
1267     LogStreamHandle(Debug, gc, heap, exit) lsh;
1268     _young_gen->print_summary_info_on(&lsh);
1269     _old_gen->print_summary_info_on(&lsh);
1270   }
1271 }
1272 
print_heap_change(size_t young_prev_used,size_t old_prev_used) const1273 void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
1274   log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1275                      _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
1276   log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1277                      _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
1278 }
1279 
1280 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1281  private:
1282   bool _full;
1283  public:
do_generation(Generation * gen)1284   void do_generation(Generation* gen) {
1285     gen->gc_prologue(_full);
1286   }
GenGCPrologueClosure(bool full)1287   GenGCPrologueClosure(bool full) : _full(full) {};
1288 };
1289 
gc_prologue(bool full)1290 void GenCollectedHeap::gc_prologue(bool full) {
1291   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1292 
1293   // Fill TLAB's and such
1294   CollectedHeap::accumulate_statistics_all_tlabs();
1295   ensure_parsability(true);   // retire TLABs
1296 
1297   // Walk generations
1298   GenGCPrologueClosure blk(full);
1299   generation_iterate(&blk, false);  // not old-to-young.
1300 };
1301 
1302 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1303  private:
1304   bool _full;
1305  public:
do_generation(Generation * gen)1306   void do_generation(Generation* gen) {
1307     gen->gc_epilogue(_full);
1308   }
GenGCEpilogueClosure(bool full)1309   GenGCEpilogueClosure(bool full) : _full(full) {};
1310 };
1311 
gc_epilogue(bool full)1312 void GenCollectedHeap::gc_epilogue(bool full) {
1313 #if COMPILER2_OR_JVMCI
1314   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1315   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1316   guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1317 #endif // COMPILER2_OR_JVMCI
1318 
1319   resize_all_tlabs();
1320 
1321   GenGCEpilogueClosure blk(full);
1322   generation_iterate(&blk, false);  // not old-to-young.
1323 
1324   if (!CleanChunkPoolAsync) {
1325     Chunk::clean_chunk_pool();
1326   }
1327 
1328   MetaspaceCounters::update_performance_counters();
1329   CompressedClassSpaceCounters::update_performance_counters();
1330 };
1331 
1332 #ifndef PRODUCT
1333 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1334  private:
1335  public:
do_generation(Generation * gen)1336   void do_generation(Generation* gen) {
1337     gen->record_spaces_top();
1338   }
1339 };
1340 
record_gen_tops_before_GC()1341 void GenCollectedHeap::record_gen_tops_before_GC() {
1342   if (ZapUnusedHeapArea) {
1343     GenGCSaveTopsBeforeGCClosure blk;
1344     generation_iterate(&blk, false);  // not old-to-young.
1345   }
1346 }
1347 #endif  // not PRODUCT
1348 
1349 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1350  public:
do_generation(Generation * gen)1351   void do_generation(Generation* gen) {
1352     gen->ensure_parsability();
1353   }
1354 };
1355 
ensure_parsability(bool retire_tlabs)1356 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1357   CollectedHeap::ensure_parsability(retire_tlabs);
1358   GenEnsureParsabilityClosure ep_cl;
1359   generation_iterate(&ep_cl, false);
1360 }
1361 
handle_failed_promotion(Generation * old_gen,oop obj,size_t obj_size)1362 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1363                                               oop obj,
1364                                               size_t obj_size) {
1365   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1366   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1367   HeapWord* result = NULL;
1368 
1369   result = old_gen->expand_and_allocate(obj_size, false);
1370 
1371   if (result != NULL) {
1372     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1373   }
1374   return oop(result);
1375 }
1376 
1377 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1378   jlong _time;   // in ms
1379   jlong _now;    // in ms
1380 
1381  public:
GenTimeOfLastGCClosure(jlong now)1382   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1383 
time()1384   jlong time() { return _time; }
1385 
do_generation(Generation * gen)1386   void do_generation(Generation* gen) {
1387     _time = MIN2(_time, gen->time_of_last_gc(_now));
1388   }
1389 };
1390 
millis_since_last_gc()1391 jlong GenCollectedHeap::millis_since_last_gc() {
1392   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1393   // provided the underlying platform provides such a time source
1394   // (and it is bug free). So we still have to guard against getting
1395   // back a time later than 'now'.
1396   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1397   GenTimeOfLastGCClosure tolgc_cl(now);
1398   // iterate over generations getting the oldest
1399   // time that a generation was collected
1400   generation_iterate(&tolgc_cl, false);
1401 
1402   jlong retVal = now - tolgc_cl.time();
1403   if (retVal < 0) {
1404     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
1405        ". returning zero instead.", retVal);
1406     return 0;
1407   }
1408   return retVal;
1409 }
1410