1 /*
2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/serial/defNewGeneration.inline.hpp"
27 #include "gc/serial/serialHeap.inline.hpp"
28 #include "gc/serial/tenuredGeneration.hpp"
29 #include "gc/shared/adaptiveSizePolicy.hpp"
30 #include "gc/shared/ageTable.inline.hpp"
31 #include "gc/shared/cardTableRS.hpp"
32 #include "gc/shared/collectorCounters.hpp"
33 #include "gc/shared/gcHeapSummary.hpp"
34 #include "gc/shared/gcLocker.hpp"
35 #include "gc/shared/gcPolicyCounters.hpp"
36 #include "gc/shared/gcTimer.hpp"
37 #include "gc/shared/gcTrace.hpp"
38 #include "gc/shared/gcTraceTime.inline.hpp"
39 #include "gc/shared/genOopClosures.inline.hpp"
40 #include "gc/shared/generationSpec.hpp"
41 #include "gc/shared/preservedMarks.inline.hpp"
42 #include "gc/shared/referencePolicy.hpp"
43 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
44 #include "gc/shared/space.inline.hpp"
45 #include "gc/shared/spaceDecorator.hpp"
46 #include "gc/shared/strongRootsScope.hpp"
47 #include "gc/shared/weakProcessor.hpp"
48 #include "logging/log.hpp"
49 #include "memory/iterator.inline.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "oops/instanceRefKlass.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "runtime/atomic.hpp"
54 #include "runtime/java.hpp"
55 #include "runtime/prefetch.inline.hpp"
56 #include "runtime/thread.inline.hpp"
57 #include "utilities/align.hpp"
58 #include "utilities/copy.hpp"
59 #include "utilities/globalDefinitions.hpp"
60 #include "utilities/stack.inline.hpp"
61 
62 //
63 // DefNewGeneration functions.
64 
65 // Methods of protected closure types.
66 
IsAliveClosure(Generation * young_gen)67 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
68   assert(_young_gen->kind() == Generation::ParNew ||
69          _young_gen->kind() == Generation::DefNew, "Expected the young generation here");
70 }
71 
do_object_b(oop p)72 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
73   return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded();
74 }
75 
76 DefNewGeneration::KeepAliveClosure::
KeepAliveClosure(ScanWeakRefClosure * cl)77 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
78   _rs = GenCollectedHeap::heap()->rem_set();
79 }
80 
do_oop(oop * p)81 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
do_oop(narrowOop * p)82 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
83 
84 
85 DefNewGeneration::FastKeepAliveClosure::
FastKeepAliveClosure(DefNewGeneration * g,ScanWeakRefClosure * cl)86 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
87   DefNewGeneration::KeepAliveClosure(cl) {
88   _boundary = g->reserved().end();
89 }
90 
do_oop(oop * p)91 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
do_oop(narrowOop * p)92 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
93 
94 DefNewGeneration::FastEvacuateFollowersClosure::
FastEvacuateFollowersClosure(SerialHeap * heap,FastScanClosure * cur,FastScanClosure * older)95 FastEvacuateFollowersClosure(SerialHeap* heap,
96                              FastScanClosure* cur,
97                              FastScanClosure* older) :
98   _heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older)
99 {
100 }
101 
do_void()102 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
103   do {
104     _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, _scan_older);
105   } while (!_heap->no_allocs_since_save_marks());
106   guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
107 }
108 
ScanClosure(DefNewGeneration * g,bool gc_barrier)109 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
110     OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
111 {
112   _boundary = _g->reserved().end();
113 }
114 
FastScanClosure(DefNewGeneration * g,bool gc_barrier)115 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
116     OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
117 {
118   _boundary = _g->reserved().end();
119 }
120 
do_cld(ClassLoaderData * cld)121 void CLDScanClosure::do_cld(ClassLoaderData* cld) {
122   NOT_PRODUCT(ResourceMark rm);
123   log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",
124                                   p2i(cld),
125                                   cld->loader_name_and_id(),
126                                   cld->has_modified_oops() ? "true" : "false");
127 
128   // If the cld has not been dirtied we know that there's
129   // no references into  the young gen and we can skip it.
130   if (cld->has_modified_oops()) {
131     if (_accumulate_modified_oops) {
132       cld->accumulate_modified_oops();
133     }
134 
135     // Tell the closure which CLD is being scanned so that it can be dirtied
136     // if oops are left pointing into the young gen.
137     _scavenge_closure->set_scanned_cld(cld);
138 
139     // Clean the cld since we're going to scavenge all the metadata.
140     cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
141 
142     _scavenge_closure->set_scanned_cld(NULL);
143   }
144 }
145 
ScanWeakRefClosure(DefNewGeneration * g)146 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
147   _g(g)
148 {
149   _boundary = _g->reserved().end();
150 }
151 
DefNewGeneration(ReservedSpace rs,size_t initial_size,const char * policy)152 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
153                                    size_t initial_size,
154                                    const char* policy)
155   : Generation(rs, initial_size),
156     _preserved_marks_set(false /* in_c_heap */),
157     _promo_failure_drain_in_progress(false),
158     _should_allocate_from_space(false)
159 {
160   MemRegion cmr((HeapWord*)_virtual_space.low(),
161                 (HeapWord*)_virtual_space.high());
162   GenCollectedHeap* gch = GenCollectedHeap::heap();
163 
164   gch->rem_set()->resize_covered_region(cmr);
165 
166   _eden_space = new ContiguousSpace();
167   _from_space = new ContiguousSpace();
168   _to_space   = new ContiguousSpace();
169 
170   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
171     vm_exit_during_initialization("Could not allocate a new gen space");
172   }
173 
174   // Compute the maximum eden and survivor space sizes. These sizes
175   // are computed assuming the entire reserved space is committed.
176   // These values are exported as performance counters.
177   uintx alignment = gch->collector_policy()->space_alignment();
178   uintx size = _virtual_space.reserved_size();
179   _max_survivor_size = compute_survivor_size(size, alignment);
180   _max_eden_size = size - (2*_max_survivor_size);
181 
182   // allocate the performance counters
183   GenCollectorPolicy* gcp = gch->gen_policy();
184 
185   // Generation counters -- generation 0, 3 subspaces
186   _gen_counters = new GenerationCounters("new", 0, 3,
187       gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
188   _gc_counters = new CollectorCounters(policy, 0);
189 
190   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
191                                       _gen_counters);
192   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
193                                       _gen_counters);
194   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
195                                     _gen_counters);
196 
197   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
198   update_counters();
199   _old_gen = NULL;
200   _tenuring_threshold = MaxTenuringThreshold;
201   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
202 
203   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
204 }
205 
compute_space_boundaries(uintx minimum_eden_size,bool clear_space,bool mangle_space)206 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
207                                                 bool clear_space,
208                                                 bool mangle_space) {
209   uintx alignment =
210     GenCollectedHeap::heap()->collector_policy()->space_alignment();
211 
212   // If the spaces are being cleared (only done at heap initialization
213   // currently), the survivor spaces need not be empty.
214   // Otherwise, no care is taken for used areas in the survivor spaces
215   // so check.
216   assert(clear_space || (to()->is_empty() && from()->is_empty()),
217     "Initialization of the survivor spaces assumes these are empty");
218 
219   // Compute sizes
220   uintx size = _virtual_space.committed_size();
221   uintx survivor_size = compute_survivor_size(size, alignment);
222   uintx eden_size = size - (2*survivor_size);
223   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
224 
225   if (eden_size < minimum_eden_size) {
226     // May happen due to 64Kb rounding, if so adjust eden size back up
227     minimum_eden_size = align_up(minimum_eden_size, alignment);
228     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
229     uintx unaligned_survivor_size =
230       align_down(maximum_survivor_size, alignment);
231     survivor_size = MAX2(unaligned_survivor_size, alignment);
232     eden_size = size - (2*survivor_size);
233     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
234     assert(eden_size >= minimum_eden_size, "just checking");
235   }
236 
237   char *eden_start = _virtual_space.low();
238   char *from_start = eden_start + eden_size;
239   char *to_start   = from_start + survivor_size;
240   char *to_end     = to_start   + survivor_size;
241 
242   assert(to_end == _virtual_space.high(), "just checking");
243   assert(Space::is_aligned(eden_start), "checking alignment");
244   assert(Space::is_aligned(from_start), "checking alignment");
245   assert(Space::is_aligned(to_start),   "checking alignment");
246 
247   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
248   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
249   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
250 
251   // A minimum eden size implies that there is a part of eden that
252   // is being used and that affects the initialization of any
253   // newly formed eden.
254   bool live_in_eden = minimum_eden_size > 0;
255 
256   // If not clearing the spaces, do some checking to verify that
257   // the space are already mangled.
258   if (!clear_space) {
259     // Must check mangling before the spaces are reshaped.  Otherwise,
260     // the bottom or end of one space may have moved into another
261     // a failure of the check may not correctly indicate which space
262     // is not properly mangled.
263     if (ZapUnusedHeapArea) {
264       HeapWord* limit = (HeapWord*) _virtual_space.high();
265       eden()->check_mangled_unused_area(limit);
266       from()->check_mangled_unused_area(limit);
267         to()->check_mangled_unused_area(limit);
268     }
269   }
270 
271   // Reset the spaces for their new regions.
272   eden()->initialize(edenMR,
273                      clear_space && !live_in_eden,
274                      SpaceDecorator::Mangle);
275   // If clear_space and live_in_eden, we will not have cleared any
276   // portion of eden above its top. This can cause newly
277   // expanded space not to be mangled if using ZapUnusedHeapArea.
278   // We explicitly do such mangling here.
279   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
280     eden()->mangle_unused_area();
281   }
282   from()->initialize(fromMR, clear_space, mangle_space);
283   to()->initialize(toMR, clear_space, mangle_space);
284 
285   // Set next compaction spaces.
286   eden()->set_next_compaction_space(from());
287   // The to-space is normally empty before a compaction so need
288   // not be considered.  The exception is during promotion
289   // failure handling when to-space can contain live objects.
290   from()->set_next_compaction_space(NULL);
291 }
292 
swap_spaces()293 void DefNewGeneration::swap_spaces() {
294   ContiguousSpace* s = from();
295   _from_space        = to();
296   _to_space          = s;
297   eden()->set_next_compaction_space(from());
298   // The to-space is normally empty before a compaction so need
299   // not be considered.  The exception is during promotion
300   // failure handling when to-space can contain live objects.
301   from()->set_next_compaction_space(NULL);
302 
303   if (UsePerfData) {
304     CSpaceCounters* c = _from_counters;
305     _from_counters = _to_counters;
306     _to_counters = c;
307   }
308 }
309 
expand(size_t bytes)310 bool DefNewGeneration::expand(size_t bytes) {
311   MutexLocker x(ExpandHeap_lock);
312   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
313   bool success = _virtual_space.expand_by(bytes);
314   if (success && ZapUnusedHeapArea) {
315     // Mangle newly committed space immediately because it
316     // can be done here more simply that after the new
317     // spaces have been computed.
318     HeapWord* new_high = (HeapWord*) _virtual_space.high();
319     MemRegion mangle_region(prev_high, new_high);
320     SpaceMangler::mangle_region(mangle_region);
321   }
322 
323   // Do not attempt an expand-to-the reserve size.  The
324   // request should properly observe the maximum size of
325   // the generation so an expand-to-reserve should be
326   // unnecessary.  Also a second call to expand-to-reserve
327   // value potentially can cause an undue expansion.
328   // For example if the first expand fail for unknown reasons,
329   // but the second succeeds and expands the heap to its maximum
330   // value.
331   if (GCLocker::is_active()) {
332     log_debug(gc)("Garbage collection disabled, expanded heap instead");
333   }
334 
335   return success;
336 }
337 
adjust_for_thread_increase(size_t new_size_candidate,size_t new_size_before,size_t alignment) const338 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
339                                                     size_t new_size_before,
340                                                     size_t alignment) const {
341   size_t desired_new_size = new_size_before;
342 
343   if (NewSizeThreadIncrease > 0) {
344     int threads_count;
345     size_t thread_increase_size = 0;
346 
347     // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.
348     threads_count = Threads::number_of_non_daemon_threads();
349     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
350       thread_increase_size = threads_count * NewSizeThreadIncrease;
351 
352       // 2. Check an overflow at 'new_size_candidate + thread_increase_size'.
353       if (new_size_candidate <= max_uintx - thread_increase_size) {
354         new_size_candidate += thread_increase_size;
355 
356         // 3. Check an overflow at 'align_up'.
357         size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
358         if (new_size_candidate <= aligned_max) {
359           desired_new_size = align_up(new_size_candidate, alignment);
360         }
361       }
362     }
363   }
364 
365   return desired_new_size;
366 }
367 
compute_new_size()368 void DefNewGeneration::compute_new_size() {
369   // This is called after a GC that includes the old generation, so from-space
370   // will normally be empty.
371   // Note that we check both spaces, since if scavenge failed they revert roles.
372   // If not we bail out (otherwise we would have to relocate the objects).
373   if (!from()->is_empty() || !to()->is_empty()) {
374     return;
375   }
376 
377   GenCollectedHeap* gch = GenCollectedHeap::heap();
378 
379   size_t old_size = gch->old_gen()->capacity();
380   size_t new_size_before = _virtual_space.committed_size();
381   size_t min_new_size = initial_size();
382   size_t max_new_size = reserved().byte_size();
383   assert(min_new_size <= new_size_before &&
384          new_size_before <= max_new_size,
385          "just checking");
386   // All space sizes must be multiples of Generation::GenGrain.
387   size_t alignment = Generation::GenGrain;
388 
389   int threads_count = 0;
390   size_t thread_increase_size = 0;
391 
392   size_t new_size_candidate = old_size / NewRatio;
393   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
394   // and reverts to previous value if any overflow happens
395   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment);
396 
397   // Adjust new generation size
398   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
399   assert(desired_new_size <= max_new_size, "just checking");
400 
401   bool changed = false;
402   if (desired_new_size > new_size_before) {
403     size_t change = desired_new_size - new_size_before;
404     assert(change % alignment == 0, "just checking");
405     if (expand(change)) {
406        changed = true;
407     }
408     // If the heap failed to expand to the desired size,
409     // "changed" will be false.  If the expansion failed
410     // (and at this point it was expected to succeed),
411     // ignore the failure (leaving "changed" as false).
412   }
413   if (desired_new_size < new_size_before && eden()->is_empty()) {
414     // bail out of shrinking if objects in eden
415     size_t change = new_size_before - desired_new_size;
416     assert(change % alignment == 0, "just checking");
417     _virtual_space.shrink_by(change);
418     changed = true;
419   }
420   if (changed) {
421     // The spaces have already been mangled at this point but
422     // may not have been cleared (set top = bottom) and should be.
423     // Mangling was done when the heap was being expanded.
424     compute_space_boundaries(eden()->used(),
425                              SpaceDecorator::Clear,
426                              SpaceDecorator::DontMangle);
427     MemRegion cmr((HeapWord*)_virtual_space.low(),
428                   (HeapWord*)_virtual_space.high());
429     gch->rem_set()->resize_covered_region(cmr);
430 
431     log_debug(gc, ergo, heap)(
432         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
433         new_size_before/K, _virtual_space.committed_size()/K,
434         eden()->capacity()/K, from()->capacity()/K);
435     log_trace(gc, ergo, heap)(
436         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
437           thread_increase_size/K, threads_count);
438       }
439 }
440 
younger_refs_iterate(OopsInGenClosure * cl,uint n_threads)441 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
442   assert(false, "NYI -- are you sure you want to call this?");
443 }
444 
445 
capacity() const446 size_t DefNewGeneration::capacity() const {
447   return eden()->capacity()
448        + from()->capacity();  // to() is only used during scavenge
449 }
450 
451 
used() const452 size_t DefNewGeneration::used() const {
453   return eden()->used()
454        + from()->used();      // to() is only used during scavenge
455 }
456 
457 
free() const458 size_t DefNewGeneration::free() const {
459   return eden()->free()
460        + from()->free();      // to() is only used during scavenge
461 }
462 
max_capacity() const463 size_t DefNewGeneration::max_capacity() const {
464   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
465   const size_t reserved_bytes = reserved().byte_size();
466   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
467 }
468 
unsafe_max_alloc_nogc() const469 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
470   return eden()->free();
471 }
472 
capacity_before_gc() const473 size_t DefNewGeneration::capacity_before_gc() const {
474   return eden()->capacity();
475 }
476 
contiguous_available() const477 size_t DefNewGeneration::contiguous_available() const {
478   return eden()->free();
479 }
480 
481 
top_addr() const482 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
end_addr() const483 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
484 
object_iterate(ObjectClosure * blk)485 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
486   eden()->object_iterate(blk);
487   from()->object_iterate(blk);
488 }
489 
490 
space_iterate(SpaceClosure * blk,bool usedOnly)491 void DefNewGeneration::space_iterate(SpaceClosure* blk,
492                                      bool usedOnly) {
493   blk->do_space(eden());
494   blk->do_space(from());
495   blk->do_space(to());
496 }
497 
498 // The last collection bailed out, we are running out of heap space,
499 // so we try to allocate the from-space, too.
allocate_from_space(size_t size)500 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
501   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
502 
503   // If the Heap_lock is not locked by this thread, this will be called
504   // again later with the Heap_lock held.
505   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
506 
507   HeapWord* result = NULL;
508   if (do_alloc) {
509     result = from()->allocate(size);
510   }
511 
512   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
513                         size,
514                         GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
515                           "true" : "false",
516                         Heap_lock->is_locked() ? "locked" : "unlocked",
517                         from()->free(),
518                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
519                         do_alloc ? "  Heap_lock is not owned by self" : "",
520                         result == NULL ? "NULL" : "object");
521 
522   return result;
523 }
524 
expand_and_allocate(size_t size,bool is_tlab,bool parallel)525 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
526                                                 bool   is_tlab,
527                                                 bool   parallel) {
528   // We don't attempt to expand the young generation (but perhaps we should.)
529   return allocate(size, is_tlab);
530 }
531 
adjust_desired_tenuring_threshold()532 void DefNewGeneration::adjust_desired_tenuring_threshold() {
533   // Set the desired survivor size to half the real survivor space
534   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
535   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
536 
537   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
538 
539   if (UsePerfData) {
540     GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();
541     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
542     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
543   }
544 
545   age_table()->print_age_table(_tenuring_threshold);
546 }
547 
collect(bool full,bool clear_all_soft_refs,size_t size,bool is_tlab)548 void DefNewGeneration::collect(bool   full,
549                                bool   clear_all_soft_refs,
550                                size_t size,
551                                bool   is_tlab) {
552   assert(full || size > 0, "otherwise we don't want to collect");
553 
554   SerialHeap* heap = SerialHeap::heap();
555 
556   _gc_timer->register_gc_start();
557   DefNewTracer gc_tracer;
558   gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
559 
560   _old_gen = heap->old_gen();
561 
562   // If the next generation is too full to accommodate promotion
563   // from this generation, pass on collection; let the next generation
564   // do it.
565   if (!collection_attempt_is_safe()) {
566     log_trace(gc)(":: Collection attempt not safe ::");
567     heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
568     return;
569   }
570   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
571 
572   init_assuming_no_promotion_failure();
573 
574   GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause());
575 
576   heap->trace_heap_before_gc(&gc_tracer);
577 
578   // These can be shared for all code paths
579   IsAliveClosure is_alive(this);
580   ScanWeakRefClosure scan_weak_ref(this);
581 
582   age_table()->clear();
583   to()->clear(SpaceDecorator::Mangle);
584   // The preserved marks should be empty at the start of the GC.
585   _preserved_marks_set.init(1);
586 
587   heap->rem_set()->prepare_for_younger_refs_iterate(false);
588 
589   assert(heap->no_allocs_since_save_marks(),
590          "save marks have not been newly set.");
591 
592   FastScanClosure fsc_with_no_gc_barrier(this, false);
593   FastScanClosure fsc_with_gc_barrier(this, true);
594 
595   CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
596                                   heap->rem_set()->cld_rem_set()->accumulate_modified_oops());
597 
598   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
599   FastEvacuateFollowersClosure evacuate_followers(heap,
600                                                   &fsc_with_no_gc_barrier,
601                                                   &fsc_with_gc_barrier);
602 
603   assert(heap->no_allocs_since_save_marks(),
604          "save marks have not been newly set.");
605 
606   {
607     // DefNew needs to run with n_threads == 0, to make sure the serial
608     // version of the card table scanning code is used.
609     // See: CardTableRS::non_clean_card_iterate_possibly_parallel.
610     StrongRootsScope srs(0);
611 
612     heap->young_process_roots(&srs,
613                               &fsc_with_no_gc_barrier,
614                               &fsc_with_gc_barrier,
615                               &cld_scan_closure);
616   }
617 
618   // "evacuate followers".
619   evacuate_followers.do_void();
620 
621   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
622   ReferenceProcessor* rp = ref_processor();
623   rp->setup_policy(clear_all_soft_refs);
624   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
625   const ReferenceProcessorStats& stats =
626   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
627                                     NULL, &pt);
628   gc_tracer.report_gc_reference_stats(stats);
629   gc_tracer.report_tenuring_threshold(tenuring_threshold());
630   pt.print_all_references();
631 
632   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
633 
634   WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
635 
636   // Verify that the usage of keep_alive didn't copy any objects.
637   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
638 
639   if (!_promotion_failed) {
640     // Swap the survivor spaces.
641     eden()->clear(SpaceDecorator::Mangle);
642     from()->clear(SpaceDecorator::Mangle);
643     if (ZapUnusedHeapArea) {
644       // This is now done here because of the piece-meal mangling which
645       // can check for valid mangling at intermediate points in the
646       // collection(s).  When a young collection fails to collect
647       // sufficient space resizing of the young generation can occur
648       // an redistribute the spaces in the young generation.  Mangle
649       // here so that unzapped regions don't get distributed to
650       // other spaces.
651       to()->mangle_unused_area();
652     }
653     swap_spaces();
654 
655     assert(to()->is_empty(), "to space should be empty now");
656 
657     adjust_desired_tenuring_threshold();
658 
659     // A successful scavenge should restart the GC time limit count which is
660     // for full GC's.
661     AdaptiveSizePolicy* size_policy = heap->size_policy();
662     size_policy->reset_gc_overhead_limit_count();
663     assert(!heap->incremental_collection_failed(), "Should be clear");
664   } else {
665     assert(_promo_failure_scan_stack.is_empty(), "post condition");
666     _promo_failure_scan_stack.clear(true); // Clear cached segments.
667 
668     remove_forwarding_pointers();
669     log_info(gc, promotion)("Promotion failed");
670     // Add to-space to the list of space to compact
671     // when a promotion failure has occurred.  In that
672     // case there can be live objects in to-space
673     // as a result of a partial evacuation of eden
674     // and from-space.
675     swap_spaces();   // For uniformity wrt ParNewGeneration.
676     from()->set_next_compaction_space(to());
677     heap->set_incremental_collection_failed();
678 
679     // Inform the next generation that a promotion failure occurred.
680     _old_gen->promotion_failure_occurred();
681     gc_tracer.report_promotion_failed(_promotion_failed_info);
682 
683     // Reset the PromotionFailureALot counters.
684     NOT_PRODUCT(heap->reset_promotion_should_fail();)
685   }
686   // We should have processed and cleared all the preserved marks.
687   _preserved_marks_set.reclaim();
688   // set new iteration safe limit for the survivor spaces
689   from()->set_concurrent_iteration_safe_limit(from()->top());
690   to()->set_concurrent_iteration_safe_limit(to()->top());
691 
692   // We need to use a monotonically non-decreasing time in ms
693   // or we will see time-warp warnings and os::javaTimeMillis()
694   // does not guarantee monotonicity.
695   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
696   update_time_of_last_gc(now);
697 
698   heap->trace_heap_after_gc(&gc_tracer);
699 
700   _gc_timer->register_gc_end();
701 
702   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
703 }
704 
init_assuming_no_promotion_failure()705 void DefNewGeneration::init_assuming_no_promotion_failure() {
706   _promotion_failed = false;
707   _promotion_failed_info.reset();
708   from()->set_next_compaction_space(NULL);
709 }
710 
remove_forwarding_pointers()711 void DefNewGeneration::remove_forwarding_pointers() {
712   RemoveForwardedPointerClosure rspc;
713   eden()->object_iterate(&rspc);
714   from()->object_iterate(&rspc);
715   restore_preserved_marks();
716 }
717 
restore_preserved_marks()718 void DefNewGeneration::restore_preserved_marks() {
719   SharedRestorePreservedMarksTaskExecutor task_executor(NULL);
720   _preserved_marks_set.restore(&task_executor);
721 }
722 
handle_promotion_failure(oop old)723 void DefNewGeneration::handle_promotion_failure(oop old) {
724   log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());
725 
726   _promotion_failed = true;
727   _promotion_failed_info.register_copy_failure(old->size());
728   _preserved_marks_set.get()->push_if_necessary(old, old->mark_raw());
729   // forward to self
730   old->forward_to(old);
731 
732   _promo_failure_scan_stack.push(old);
733 
734   if (!_promo_failure_drain_in_progress) {
735     // prevent recursion in copy_to_survivor_space()
736     _promo_failure_drain_in_progress = true;
737     drain_promo_failure_scan_stack();
738     _promo_failure_drain_in_progress = false;
739   }
740 }
741 
copy_to_survivor_space(oop old)742 oop DefNewGeneration::copy_to_survivor_space(oop old) {
743   assert(is_in_reserved(old) && !old->is_forwarded(),
744          "shouldn't be scavenging this oop");
745   size_t s = old->size();
746   oop obj = NULL;
747 
748   // Try allocating obj in to-space (unless too old)
749   if (old->age() < tenuring_threshold()) {
750     obj = (oop) to()->allocate_aligned(s);
751   }
752 
753   // Otherwise try allocating obj tenured
754   if (obj == NULL) {
755     obj = _old_gen->promote(old, s);
756     if (obj == NULL) {
757       handle_promotion_failure(old);
758       return old;
759     }
760   } else {
761     // Prefetch beyond obj
762     const intx interval = PrefetchCopyIntervalInBytes;
763     Prefetch::write(obj, interval);
764 
765     // Copy obj
766     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
767 
768     // Increment age if obj still in new generation
769     obj->incr_age();
770     age_table()->add(obj, s);
771   }
772 
773   // Done, insert forward pointer to obj in this header
774   old->forward_to(obj);
775 
776   return obj;
777 }
778 
drain_promo_failure_scan_stack()779 void DefNewGeneration::drain_promo_failure_scan_stack() {
780   while (!_promo_failure_scan_stack.is_empty()) {
781      oop obj = _promo_failure_scan_stack.pop();
782      obj->oop_iterate(_promo_failure_scan_stack_closure);
783   }
784 }
785 
save_marks()786 void DefNewGeneration::save_marks() {
787   eden()->set_saved_mark();
788   to()->set_saved_mark();
789   from()->set_saved_mark();
790 }
791 
792 
reset_saved_marks()793 void DefNewGeneration::reset_saved_marks() {
794   eden()->reset_saved_mark();
795   to()->reset_saved_mark();
796   from()->reset_saved_mark();
797 }
798 
799 
no_allocs_since_save_marks()800 bool DefNewGeneration::no_allocs_since_save_marks() {
801   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
802   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
803   return to()->saved_mark_at_top();
804 }
805 
contribute_scratch(ScratchBlock * & list,Generation * requestor,size_t max_alloc_words)806 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
807                                          size_t max_alloc_words) {
808   if (requestor == this || _promotion_failed) {
809     return;
810   }
811   assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
812 
813   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
814   if (to_space->top() > to_space->bottom()) {
815     trace("to_space not empty when contribute_scratch called");
816   }
817   */
818 
819   ContiguousSpace* to_space = to();
820   assert(to_space->end() >= to_space->top(), "pointers out of order");
821   size_t free_words = pointer_delta(to_space->end(), to_space->top());
822   if (free_words >= MinFreeScratchWords) {
823     ScratchBlock* sb = (ScratchBlock*)to_space->top();
824     sb->num_words = free_words;
825     sb->next = list;
826     list = sb;
827   }
828 }
829 
reset_scratch()830 void DefNewGeneration::reset_scratch() {
831   // If contributing scratch in to_space, mangle all of
832   // to_space if ZapUnusedHeapArea.  This is needed because
833   // top is not maintained while using to-space as scratch.
834   if (ZapUnusedHeapArea) {
835     to()->mangle_unused_area_complete();
836   }
837 }
838 
collection_attempt_is_safe()839 bool DefNewGeneration::collection_attempt_is_safe() {
840   if (!to()->is_empty()) {
841     log_trace(gc)(":: to is not empty ::");
842     return false;
843   }
844   if (_old_gen == NULL) {
845     GenCollectedHeap* gch = GenCollectedHeap::heap();
846     _old_gen = gch->old_gen();
847   }
848   return _old_gen->promotion_attempt_is_safe(used());
849 }
850 
gc_epilogue(bool full)851 void DefNewGeneration::gc_epilogue(bool full) {
852   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
853 
854   assert(!GCLocker::is_active(), "We should not be executing here");
855   // Check if the heap is approaching full after a collection has
856   // been done.  Generally the young generation is empty at
857   // a minimum at the end of a collection.  If it is not, then
858   // the heap is approaching full.
859   GenCollectedHeap* gch = GenCollectedHeap::heap();
860   if (full) {
861     DEBUG_ONLY(seen_incremental_collection_failed = false;)
862     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
863       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
864                             GCCause::to_string(gch->gc_cause()));
865       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
866       set_should_allocate_from_space(); // we seem to be running out of space
867     } else {
868       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
869                             GCCause::to_string(gch->gc_cause()));
870       gch->clear_incremental_collection_failed(); // We just did a full collection
871       clear_should_allocate_from_space(); // if set
872     }
873   } else {
874 #ifdef ASSERT
875     // It is possible that incremental_collection_failed() == true
876     // here, because an attempted scavenge did not succeed. The policy
877     // is normally expected to cause a full collection which should
878     // clear that condition, so we should not be here twice in a row
879     // with incremental_collection_failed() == true without having done
880     // a full collection in between.
881     if (!seen_incremental_collection_failed &&
882         gch->incremental_collection_failed()) {
883       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
884                             GCCause::to_string(gch->gc_cause()));
885       seen_incremental_collection_failed = true;
886     } else if (seen_incremental_collection_failed) {
887       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
888                             GCCause::to_string(gch->gc_cause()));
889       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
890              (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
891              !gch->incremental_collection_failed(),
892              "Twice in a row");
893       seen_incremental_collection_failed = false;
894     }
895 #endif // ASSERT
896   }
897 
898   if (ZapUnusedHeapArea) {
899     eden()->check_mangled_unused_area_complete();
900     from()->check_mangled_unused_area_complete();
901     to()->check_mangled_unused_area_complete();
902   }
903 
904   if (!CleanChunkPoolAsync) {
905     Chunk::clean_chunk_pool();
906   }
907 
908   // update the generation and space performance counters
909   update_counters();
910   gch->counters()->update_counters();
911 }
912 
record_spaces_top()913 void DefNewGeneration::record_spaces_top() {
914   assert(ZapUnusedHeapArea, "Not mangling unused space");
915   eden()->set_top_for_allocations();
916   to()->set_top_for_allocations();
917   from()->set_top_for_allocations();
918 }
919 
ref_processor_init()920 void DefNewGeneration::ref_processor_init() {
921   Generation::ref_processor_init();
922 }
923 
924 
update_counters()925 void DefNewGeneration::update_counters() {
926   if (UsePerfData) {
927     _eden_counters->update_all();
928     _from_counters->update_all();
929     _to_counters->update_all();
930     _gen_counters->update_all();
931   }
932 }
933 
verify()934 void DefNewGeneration::verify() {
935   eden()->verify();
936   from()->verify();
937     to()->verify();
938 }
939 
print_on(outputStream * st) const940 void DefNewGeneration::print_on(outputStream* st) const {
941   Generation::print_on(st);
942   st->print("  eden");
943   eden()->print_on(st);
944   st->print("  from");
945   from()->print_on(st);
946   st->print("  to  ");
947   to()->print_on(st);
948 }
949 
950 
name() const951 const char* DefNewGeneration::name() const {
952   return "def new generation";
953 }
954 
955 // Moved from inline file as they are not called inline
first_compaction_space() const956 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
957   return eden();
958 }
959 
allocate(size_t word_size,bool is_tlab)960 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
961   // This is the slow-path allocation for the DefNewGeneration.
962   // Most allocations are fast-path in compiled code.
963   // We try to allocate from the eden.  If that works, we are happy.
964   // Note that since DefNewGeneration supports lock-free allocation, we
965   // have to use it here, as well.
966   HeapWord* result = eden()->par_allocate(word_size);
967   if (result != NULL) {
968     if (_old_gen != NULL) {
969       _old_gen->sample_eden_chunk();
970     }
971   } else {
972     // If the eden is full and the last collection bailed out, we are running
973     // out of heap space, and we try to allocate the from-space, too.
974     // allocate_from_space can't be inlined because that would introduce a
975     // circular dependency at compile time.
976     result = allocate_from_space(word_size);
977   }
978   return result;
979 }
980 
par_allocate(size_t word_size,bool is_tlab)981 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
982                                          bool is_tlab) {
983   HeapWord* res = eden()->par_allocate(word_size);
984   if (_old_gen != NULL) {
985     _old_gen->sample_eden_chunk();
986   }
987   return res;
988 }
989 
tlab_capacity() const990 size_t DefNewGeneration::tlab_capacity() const {
991   return eden()->capacity();
992 }
993 
tlab_used() const994 size_t DefNewGeneration::tlab_used() const {
995   return eden()->used();
996 }
997 
unsafe_max_tlab_alloc() const998 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
999   return unsafe_max_alloc_nogc();
1000 }
1001