1 /*
2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/serial/genMarkSweep.hpp"
27 #include "gc/serial/tenuredGeneration.inline.hpp"
28 #include "gc/shared/blockOffsetTable.inline.hpp"
29 #include "gc/shared/cardGeneration.inline.hpp"
30 #include "gc/shared/collectorCounters.hpp"
31 #include "gc/shared/gcTimer.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/genCollectedHeap.hpp"
34 #include "gc/shared/genOopClosures.inline.hpp"
35 #include "gc/shared/generationSpec.hpp"
36 #include "gc/shared/space.hpp"
37 #include "logging/log.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/java.hpp"
41 #include "utilities/macros.hpp"
42 
TenuredGeneration(ReservedSpace rs,size_t initial_byte_size,size_t min_byte_size,size_t max_byte_size,CardTableRS * remset)43 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
44                                      size_t initial_byte_size,
45                                      size_t min_byte_size,
46                                      size_t max_byte_size,
47                                      CardTableRS* remset) :
48   CardGeneration(rs, initial_byte_size, remset)
49 {
50   HeapWord* bottom = (HeapWord*) _virtual_space.low();
51   HeapWord* end    = (HeapWord*) _virtual_space.high();
52   _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
53   _the_space->reset_saved_mark();
54   _shrink_factor = 0;
55   _capacity_at_prologue = 0;
56 
57   _gc_stats = new GCStats();
58 
59   // initialize performance counters
60 
61   const char* gen_name = "old";
62   // Generation Counters -- generation 1, 1 subspace
63   _gen_counters = new GenerationCounters(gen_name, 1, 1,
64       min_byte_size, max_byte_size, &_virtual_space);
65 
66   _gc_counters = new CollectorCounters("Serial full collection pauses", 1);
67 
68   _space_counters = new CSpaceCounters(gen_name, 0,
69                                        _virtual_space.reserved_size(),
70                                        _the_space, _gen_counters);
71 }
72 
gc_prologue(bool full)73 void TenuredGeneration::gc_prologue(bool full) {
74   _capacity_at_prologue = capacity();
75   _used_at_prologue = used();
76 }
77 
should_collect(bool full,size_t size,bool is_tlab)78 bool TenuredGeneration::should_collect(bool  full,
79                                        size_t size,
80                                        bool   is_tlab) {
81   // This should be one big conditional or (||), but I want to be able to tell
82   // why it returns what it returns (without re-evaluating the conditionals
83   // in case they aren't idempotent), so I'm doing it this way.
84   // DeMorgan says it's okay.
85   if (full) {
86     log_trace(gc)("TenuredGeneration::should_collect: because full");
87     return true;
88   }
89   if (should_allocate(size, is_tlab)) {
90     log_trace(gc)("TenuredGeneration::should_collect: because should_allocate(" SIZE_FORMAT ")", size);
91     return true;
92   }
93   // If we don't have very much free space.
94   // XXX: 10000 should be a percentage of the capacity!!!
95   if (free() < 10000) {
96     log_trace(gc)("TenuredGeneration::should_collect: because free(): " SIZE_FORMAT, free());
97     return true;
98   }
99   // If we had to expand to accommodate promotions from the young generation
100   if (_capacity_at_prologue < capacity()) {
101     log_trace(gc)("TenuredGeneration::should_collect: because_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
102         _capacity_at_prologue, capacity());
103     return true;
104   }
105 
106   return false;
107 }
108 
compute_new_size()109 void TenuredGeneration::compute_new_size() {
110   assert_locked_or_safepoint(Heap_lock);
111 
112   // Compute some numbers about the state of the heap.
113   const size_t used_after_gc = used();
114   const size_t capacity_after_gc = capacity();
115 
116   CardGeneration::compute_new_size();
117 
118   assert(used() == used_after_gc && used_after_gc <= capacity(),
119          "used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
120          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity());
121 }
122 
update_gc_stats(Generation * current_generation,bool full)123 void TenuredGeneration::update_gc_stats(Generation* current_generation,
124                                         bool full) {
125   // If the young generation has been collected, gather any statistics
126   // that are of interest at this point.
127   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
128   if (!full && current_is_young) {
129     // Calculate size of data promoted from the young generation
130     // before doing the collection.
131     size_t used_before_gc = used();
132 
133     // If the young gen collection was skipped, then the
134     // number of promoted bytes will be 0 and adding it to the
135     // average will incorrectly lessen the average.  It is, however,
136     // also possible that no promotion was needed.
137     if (used_before_gc >= _used_at_prologue) {
138       size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
139       gc_stats()->avg_promoted()->sample(promoted_in_bytes);
140     }
141   }
142 }
143 
update_counters()144 void TenuredGeneration::update_counters() {
145   if (UsePerfData) {
146     _space_counters->update_all();
147     _gen_counters->update_all();
148   }
149 }
150 
promotion_attempt_is_safe(size_t max_promotion_in_bytes) const151 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
152   size_t available = max_contiguous_available();
153   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
154   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
155 
156   log_trace(gc)("Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
157     res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
158 
159   return res;
160 }
161 
collect(bool full,bool clear_all_soft_refs,size_t size,bool is_tlab)162 void TenuredGeneration::collect(bool   full,
163                                 bool   clear_all_soft_refs,
164                                 size_t size,
165                                 bool   is_tlab) {
166   GenCollectedHeap* gch = GenCollectedHeap::heap();
167 
168   // Temporarily expand the span of our ref processor, so
169   // refs discovery is over the entire heap, not just this generation
170   ReferenceProcessorSpanMutator
171     x(ref_processor(), gch->reserved_region());
172 
173   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
174   gc_timer->register_gc_start();
175 
176   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
177   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
178 
179   gch->pre_full_gc_dump(gc_timer);
180 
181   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
182 
183   gch->post_full_gc_dump(gc_timer);
184 
185   gc_timer->register_gc_end();
186 
187   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
188 }
189 
190 HeapWord*
expand_and_allocate(size_t word_size,bool is_tlab,bool parallel)191 TenuredGeneration::expand_and_allocate(size_t word_size,
192                                        bool is_tlab,
193                                        bool parallel) {
194   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
195   if (parallel) {
196     MutexLocker x(ParGCRareEvent_lock);
197     HeapWord* result = NULL;
198     size_t byte_size = word_size * HeapWordSize;
199     while (true) {
200       expand(byte_size, _min_heap_delta_bytes);
201       if (GCExpandToAllocateDelayMillis > 0) {
202         os::naked_sleep(GCExpandToAllocateDelayMillis);
203       }
204       result = _the_space->par_allocate(word_size);
205       if ( result != NULL) {
206         return result;
207       } else {
208         // If there's not enough expansion space available, give up.
209         if (_virtual_space.uncommitted_size() < byte_size) {
210           return NULL;
211         }
212         // else try again
213       }
214     }
215   } else {
216     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
217     return _the_space->allocate(word_size);
218   }
219 }
220 
expand(size_t bytes,size_t expand_bytes)221 bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
222   GCMutexLocker x(ExpandHeap_lock);
223   return CardGeneration::expand(bytes, expand_bytes);
224 }
225 
unsafe_max_alloc_nogc() const226 size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
227   return _the_space->free();
228 }
229 
contiguous_available() const230 size_t TenuredGeneration::contiguous_available() const {
231   return _the_space->free() + _virtual_space.uncommitted_size();
232 }
233 
assert_correct_size_change_locking()234 void TenuredGeneration::assert_correct_size_change_locking() {
235   assert_locked_or_safepoint(ExpandHeap_lock);
236 }
237 
238 // Currently nothing to do.
prepare_for_verify()239 void TenuredGeneration::prepare_for_verify() {}
240 
object_iterate(ObjectClosure * blk)241 void TenuredGeneration::object_iterate(ObjectClosure* blk) {
242   _the_space->object_iterate(blk);
243 }
244 
save_marks()245 void TenuredGeneration::save_marks() {
246   _the_space->set_saved_mark();
247 }
248 
reset_saved_marks()249 void TenuredGeneration::reset_saved_marks() {
250   _the_space->reset_saved_mark();
251 }
252 
no_allocs_since_save_marks()253 bool TenuredGeneration::no_allocs_since_save_marks() {
254   return _the_space->saved_mark_at_top();
255 }
256 
gc_epilogue(bool full)257 void TenuredGeneration::gc_epilogue(bool full) {
258   // update the generation and space performance counters
259   update_counters();
260   if (ZapUnusedHeapArea) {
261     _the_space->check_mangled_unused_area_complete();
262   }
263 }
264 
record_spaces_top()265 void TenuredGeneration::record_spaces_top() {
266   assert(ZapUnusedHeapArea, "Not mangling unused space");
267   _the_space->set_top_for_allocations();
268 }
269 
verify()270 void TenuredGeneration::verify() {
271   _the_space->verify();
272 }
273 
print_on(outputStream * st) const274 void TenuredGeneration::print_on(outputStream* st)  const {
275   Generation::print_on(st);
276   st->print("   the");
277   _the_space->print_on(st);
278 }
279