1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/parallel/objectStartArray.inline.hpp"
27 #include "gc/parallel/parallelScavengeHeap.hpp"
28 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
29 #include "gc/parallel/psCardTable.hpp"
30 #include "gc/parallel/psFileBackedVirtualspace.hpp"
31 #include "gc/parallel/psMarkSweepDecorator.hpp"
32 #include "gc/parallel/psOldGen.hpp"
33 #include "gc/shared/cardTableBarrierSet.hpp"
34 #include "gc/shared/gcLocker.hpp"
35 #include "gc/shared/spaceDecorator.hpp"
36 #include "logging/log.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/java.hpp"
39 #include "utilities/align.hpp"
40
select_name()41 inline const char* PSOldGen::select_name() {
42 return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
43 }
44
PSOldGen(ReservedSpace rs,size_t alignment,size_t initial_size,size_t min_size,size_t max_size,const char * perf_data_name,int level)45 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
46 size_t initial_size, size_t min_size, size_t max_size,
47 const char* perf_data_name, int level):
48 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
49 _max_gen_size(max_size)
50 {
51 initialize(rs, alignment, perf_data_name, level);
52 }
53
PSOldGen(size_t initial_size,size_t min_size,size_t max_size,const char * perf_data_name,int level)54 PSOldGen::PSOldGen(size_t initial_size,
55 size_t min_size, size_t max_size,
56 const char* perf_data_name, int level):
57 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
58 _max_gen_size(max_size)
59 {}
60
initialize(ReservedSpace rs,size_t alignment,const char * perf_data_name,int level)61 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
62 const char* perf_data_name, int level) {
63 initialize_virtual_space(rs, alignment);
64 initialize_work(perf_data_name, level);
65
66 // The old gen can grow to gen_size_limit(). _reserve reflects only
67 // the current maximum that can be committed.
68 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
69
70 initialize_performance_counters(perf_data_name, level);
71 }
72
initialize_virtual_space(ReservedSpace rs,size_t alignment)73 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
74
75 if(ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
76 _virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt);
77 if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) {
78 vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path");
79 }
80 } else {
81 _virtual_space = new PSVirtualSpace(rs, alignment);
82 }
83 if (!_virtual_space->expand_by(_init_gen_size)) {
84 vm_exit_during_initialization("Could not reserve enough space for "
85 "object heap");
86 }
87 }
88
initialize_work(const char * perf_data_name,int level)89 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
90 //
91 // Basic memory initialization
92 //
93
94 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
95 heap_word_size(_max_gen_size));
96 assert(limit_reserved.byte_size() == _max_gen_size,
97 "word vs bytes confusion");
98 //
99 // Object start stuff
100 //
101
102 start_array()->initialize(limit_reserved);
103
104 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
105 (HeapWord*)virtual_space()->high_boundary());
106
107 //
108 // Card table stuff
109 //
110
111 MemRegion cmr((HeapWord*)virtual_space()->low(),
112 (HeapWord*)virtual_space()->high());
113 if (ZapUnusedHeapArea) {
114 // Mangle newly committed space immediately rather than
115 // waiting for the initialization of the space even though
116 // mangling is related to spaces. Doing it here eliminates
117 // the need to carry along information that a complete mangling
118 // (bottom to end) needs to be done.
119 SpaceMangler::mangle_region(cmr);
120 }
121
122 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
123 PSCardTable* ct = heap->card_table();
124 ct->resize_covered_region(cmr);
125
126 // Verify that the start and end of this generation is the start of a card.
127 // If this wasn't true, a single card could span more than one generation,
128 // which would cause problems when we commit/uncommit memory, and when we
129 // clear and dirty cards.
130 guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
131 if (_reserved.end() != heap->reserved_region().end()) {
132 // Don't check at the very end of the heap as we'll assert that we're probing off
133 // the end if we try.
134 guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
135 }
136
137 //
138 // ObjectSpace stuff
139 //
140
141 _object_space = new MutableSpace(virtual_space()->alignment());
142
143 if (_object_space == NULL)
144 vm_exit_during_initialization("Could not allocate an old gen space");
145
146 object_space()->initialize(cmr,
147 SpaceDecorator::Clear,
148 SpaceDecorator::Mangle);
149
150 #if INCLUDE_SERIALGC
151 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
152
153 if (_object_mark_sweep == NULL) {
154 vm_exit_during_initialization("Could not complete allocation of old generation");
155 }
156 #endif // INCLUDE_SERIALGC
157
158 // Update the start_array
159 start_array()->set_covered_region(cmr);
160 }
161
initialize_performance_counters(const char * perf_data_name,int level)162 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
163 // Generation Counters, generation 'level', 1 subspace
164 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
165 _max_gen_size, virtual_space());
166 _space_counters = new SpaceCounters(perf_data_name, 0,
167 virtual_space()->reserved_size(),
168 _object_space, _gen_counters);
169 }
170
171 // Assume that the generation has been allocated if its
172 // reserved size is not 0.
is_allocated()173 bool PSOldGen::is_allocated() {
174 return virtual_space()->reserved_size() != 0;
175 }
176
177 #if INCLUDE_SERIALGC
178
precompact()179 void PSOldGen::precompact() {
180 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
181
182 // Reset start array first.
183 start_array()->reset();
184
185 object_mark_sweep()->precompact();
186
187 // Now compact the young gen
188 heap->young_gen()->precompact();
189 }
190
adjust_pointers()191 void PSOldGen::adjust_pointers() {
192 object_mark_sweep()->adjust_pointers();
193 }
194
compact()195 void PSOldGen::compact() {
196 object_mark_sweep()->compact(ZapUnusedHeapArea);
197 }
198
199 #endif // INCLUDE_SERIALGC
200
contiguous_available() const201 size_t PSOldGen::contiguous_available() const {
202 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
203 }
204
205 // Allocation. We report all successful allocations to the size policy
206 // Note that the perm gen does not use this method, and should not!
allocate(size_t word_size)207 HeapWord* PSOldGen::allocate(size_t word_size) {
208 assert_locked_or_safepoint(Heap_lock);
209 HeapWord* res = allocate_noexpand(word_size);
210
211 if (res == NULL) {
212 res = expand_and_allocate(word_size);
213 }
214
215 // Allocations in the old generation need to be reported
216 if (res != NULL) {
217 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
218 heap->size_policy()->tenured_allocation(word_size * HeapWordSize);
219 }
220
221 return res;
222 }
223
expand_and_allocate(size_t word_size)224 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
225 expand(word_size*HeapWordSize);
226 if (GCExpandToAllocateDelayMillis > 0) {
227 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
228 }
229 return allocate_noexpand(word_size);
230 }
231
expand_and_cas_allocate(size_t word_size)232 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
233 expand(word_size*HeapWordSize);
234 if (GCExpandToAllocateDelayMillis > 0) {
235 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
236 }
237 return cas_allocate_noexpand(word_size);
238 }
239
expand(size_t bytes)240 void PSOldGen::expand(size_t bytes) {
241 if (bytes == 0) {
242 return;
243 }
244 MutexLocker x(ExpandHeap_lock);
245 const size_t alignment = virtual_space()->alignment();
246 size_t aligned_bytes = align_up(bytes, alignment);
247 size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
248
249 if (UseNUMA) {
250 // With NUMA we use round-robin page allocation for the old gen. Expand by at least
251 // providing a page per lgroup. Alignment is larger or equal to the page size.
252 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
253 }
254 if (aligned_bytes == 0){
255 // The alignment caused the number of bytes to wrap. An expand_by(0) will
256 // return true with the implication that and expansion was done when it
257 // was not. A call to expand implies a best effort to expand by "bytes"
258 // but not a guarantee. Align down to give a best effort. This is likely
259 // the most that the generation can expand since it has some capacity to
260 // start with.
261 aligned_bytes = align_down(bytes, alignment);
262 }
263
264 bool success = false;
265 if (aligned_expand_bytes > aligned_bytes) {
266 success = expand_by(aligned_expand_bytes);
267 }
268 if (!success) {
269 success = expand_by(aligned_bytes);
270 }
271 if (!success) {
272 success = expand_to_reserved();
273 }
274
275 if (success && GCLocker::is_active_and_needs_gc()) {
276 log_debug(gc)("Garbage collection disabled, expanded heap instead");
277 }
278 }
279
expand_by(size_t bytes)280 bool PSOldGen::expand_by(size_t bytes) {
281 assert_lock_strong(ExpandHeap_lock);
282 assert_locked_or_safepoint(Heap_lock);
283 if (bytes == 0) {
284 return true; // That's what virtual_space()->expand_by(0) would return
285 }
286 bool result = virtual_space()->expand_by(bytes);
287 if (result) {
288 if (ZapUnusedHeapArea) {
289 // We need to mangle the newly expanded area. The memregion spans
290 // end -> new_end, we assume that top -> end is already mangled.
291 // Do the mangling before post_resize() is called because
292 // the space is available for allocation after post_resize();
293 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
294 assert(object_space()->end() < virtual_space_high,
295 "Should be true before post_resize()");
296 MemRegion mangle_region(object_space()->end(), virtual_space_high);
297 // Note that the object space has not yet been updated to
298 // coincide with the new underlying virtual space.
299 SpaceMangler::mangle_region(mangle_region);
300 }
301 post_resize();
302 if (UsePerfData) {
303 _space_counters->update_capacity();
304 _gen_counters->update_all();
305 }
306 }
307
308 if (result) {
309 size_t new_mem_size = virtual_space()->committed_size();
310 size_t old_mem_size = new_mem_size - bytes;
311 log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
312 name(), old_mem_size/K, bytes/K, new_mem_size/K);
313 }
314
315 return result;
316 }
317
expand_to_reserved()318 bool PSOldGen::expand_to_reserved() {
319 assert_lock_strong(ExpandHeap_lock);
320 assert_locked_or_safepoint(Heap_lock);
321
322 bool result = true;
323 const size_t remaining_bytes = virtual_space()->uncommitted_size();
324 if (remaining_bytes > 0) {
325 result = expand_by(remaining_bytes);
326 DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed"));
327 }
328 return result;
329 }
330
shrink(size_t bytes)331 void PSOldGen::shrink(size_t bytes) {
332 assert_lock_strong(ExpandHeap_lock);
333 assert_locked_or_safepoint(Heap_lock);
334
335 size_t size = align_down(bytes, virtual_space()->alignment());
336 if (size > 0) {
337 assert_lock_strong(ExpandHeap_lock);
338 virtual_space()->shrink_by(bytes);
339 post_resize();
340
341 size_t new_mem_size = virtual_space()->committed_size();
342 size_t old_mem_size = new_mem_size + bytes;
343 log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
344 name(), old_mem_size/K, bytes/K, new_mem_size/K);
345 }
346 }
347
resize(size_t desired_free_space)348 void PSOldGen::resize(size_t desired_free_space) {
349 const size_t alignment = virtual_space()->alignment();
350 const size_t size_before = virtual_space()->committed_size();
351 size_t new_size = used_in_bytes() + desired_free_space;
352 if (new_size < used_in_bytes()) {
353 // Overflowed the addition.
354 new_size = gen_size_limit();
355 }
356 // Adjust according to our min and max
357 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
358
359 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
360 new_size = align_up(new_size, alignment);
361
362 const size_t current_size = capacity_in_bytes();
363
364 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: "
365 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
366 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
367 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
368 desired_free_space, used_in_bytes(), new_size, current_size,
369 gen_size_limit(), min_gen_size());
370
371 if (new_size == current_size) {
372 // No change requested
373 return;
374 }
375 if (new_size > current_size) {
376 size_t change_bytes = new_size - current_size;
377 expand(change_bytes);
378 } else {
379 size_t change_bytes = current_size - new_size;
380 // shrink doesn't grab this lock, expand does. Is that right?
381 MutexLocker x(ExpandHeap_lock);
382 shrink(change_bytes);
383 }
384
385 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
386 ParallelScavengeHeap::heap()->total_collections(),
387 size_before,
388 virtual_space()->committed_size());
389 }
390
391 // NOTE! We need to be careful about resizing. During a GC, multiple
392 // allocators may be active during heap expansion. If we allow the
393 // heap resizing to become visible before we have correctly resized
394 // all heap related data structures, we may cause program failures.
post_resize()395 void PSOldGen::post_resize() {
396 // First construct a memregion representing the new size
397 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
398 (HeapWord*)virtual_space()->high());
399 size_t new_word_size = new_memregion.word_size();
400
401 start_array()->set_covered_region(new_memregion);
402 ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
403
404 // ALWAYS do this last!!
405 object_space()->initialize(new_memregion,
406 SpaceDecorator::DontClear,
407 SpaceDecorator::DontMangle);
408
409 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
410 "Sanity");
411 }
412
gen_size_limit()413 size_t PSOldGen::gen_size_limit() {
414 return _max_gen_size;
415 }
416
reset_after_change()417 void PSOldGen::reset_after_change() {
418 ShouldNotReachHere();
419 return;
420 }
421
available_for_expansion()422 size_t PSOldGen::available_for_expansion() {
423 ShouldNotReachHere();
424 return 0;
425 }
426
available_for_contraction()427 size_t PSOldGen::available_for_contraction() {
428 ShouldNotReachHere();
429 return 0;
430 }
431
print() const432 void PSOldGen::print() const { print_on(tty);}
print_on(outputStream * st) const433 void PSOldGen::print_on(outputStream* st) const {
434 st->print(" %-15s", name());
435 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
436 capacity_in_bytes()/K, used_in_bytes()/K);
437 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
438 p2i(virtual_space()->low_boundary()),
439 p2i(virtual_space()->high()),
440 p2i(virtual_space()->high_boundary()));
441
442 st->print(" object"); object_space()->print_on(st);
443 }
444
print_used_change(size_t prev_used) const445 void PSOldGen::print_used_change(size_t prev_used) const {
446 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
447 name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K);
448 }
449
update_counters()450 void PSOldGen::update_counters() {
451 if (UsePerfData) {
452 _space_counters->update_all();
453 _gen_counters->update_all();
454 }
455 }
456
457 #ifndef PRODUCT
458
space_invariants()459 void PSOldGen::space_invariants() {
460 assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
461 "Space invariant");
462 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
463 "Space invariant");
464 assert(virtual_space()->low_boundary() <= virtual_space()->low(),
465 "Space invariant");
466 assert(virtual_space()->high_boundary() >= virtual_space()->high(),
467 "Space invariant");
468 assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
469 "Space invariant");
470 assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
471 "Space invariant");
472 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
473 "Space invariant");
474 }
475 #endif
476
verify()477 void PSOldGen::verify() {
478 object_space()->verify();
479 }
480 class VerifyObjectStartArrayClosure : public ObjectClosure {
481 PSOldGen* _old_gen;
482 ObjectStartArray* _start_array;
483
484 public:
VerifyObjectStartArrayClosure(PSOldGen * old_gen,ObjectStartArray * start_array)485 VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :
486 _old_gen(old_gen), _start_array(start_array) { }
487
do_object(oop obj)488 virtual void do_object(oop obj) {
489 HeapWord* test_addr = (HeapWord*)obj + 1;
490 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
491 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
492 }
493 };
494
verify_object_start_array()495 void PSOldGen::verify_object_start_array() {
496 VerifyObjectStartArrayClosure check( this, &_start_array );
497 object_iterate(&check);
498 }
499
500 #ifndef PRODUCT
record_spaces_top()501 void PSOldGen::record_spaces_top() {
502 assert(ZapUnusedHeapArea, "Not mangling unused space");
503 object_space()->set_top_for_allocations();
504 }
505 #endif
506