1 /*
2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
31 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
33 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
34 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
35 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
36 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
37 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
38 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
39 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
40 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
41 #include "gc_implementation/shared/gcHeapSummary.hpp"
42 #include "gc_implementation/shared/gcTimer.hpp"
43 #include "gc_implementation/shared/gcTrace.hpp"
44 #include "gc_implementation/shared/gcTraceTime.hpp"
45 #include "gc_implementation/shared/isGCActiveMark.hpp"
46 #include "gc_interface/gcCause.hpp"
47 #include "memory/gcLocker.inline.hpp"
48 #include "memory/referencePolicy.hpp"
49 #include "memory/referenceProcessor.hpp"
50 #include "oops/methodData.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "oops/oop.pcgc.inline.hpp"
53 #include "runtime/fprofiler.hpp"
54 #include "runtime/safepoint.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "services/management.hpp"
57 #include "services/memoryService.hpp"
58 #include "services/memTracker.hpp"
59 #include "utilities/events.hpp"
60 #include "utilities/stack.inline.hpp"
61 #if INCLUDE_JFR
62 #include "jfr/jfr.hpp"
63 #endif // INCLUDE_JFR
64
65 #include <math.h>
66
67 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
68
69 // All sizes are in HeapWords.
70 const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words
71 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
72 const size_t ParallelCompactData::RegionSizeBytes =
73 RegionSize << LogHeapWordSize;
74 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
75 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
76 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
77
78 const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
79 const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
80 const size_t ParallelCompactData::BlockSizeBytes =
81 BlockSize << LogHeapWordSize;
82 const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
83 const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
84 const size_t ParallelCompactData::BlockAddrMask = ~BlockAddrOffsetMask;
85
86 const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
87 const size_t ParallelCompactData::Log2BlocksPerRegion =
88 Log2RegionSize - Log2BlockSize;
89
90 const ParallelCompactData::RegionData::region_sz_t
91 ParallelCompactData::RegionData::dc_shift = 27;
92
93 const ParallelCompactData::RegionData::region_sz_t
94 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
95
96 const ParallelCompactData::RegionData::region_sz_t
97 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
98
99 const ParallelCompactData::RegionData::region_sz_t
100 ParallelCompactData::RegionData::los_mask = ~dc_mask;
101
102 const ParallelCompactData::RegionData::region_sz_t
103 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
104
105 const ParallelCompactData::RegionData::region_sz_t
106 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
107
108 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
109 bool PSParallelCompact::_print_phases = false;
110
111 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
112 Klass* PSParallelCompact::_updated_int_array_klass_obj = NULL;
113
114 double PSParallelCompact::_dwl_mean;
115 double PSParallelCompact::_dwl_std_dev;
116 double PSParallelCompact::_dwl_first_term;
117 double PSParallelCompact::_dwl_adjustment;
118 #ifdef ASSERT
119 bool PSParallelCompact::_dwl_initialized = false;
120 #endif // #ifdef ASSERT
121
record(size_t src_region_idx,size_t partial_obj_size,HeapWord * destination)122 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
123 HeapWord* destination)
124 {
125 assert(src_region_idx != 0, "invalid src_region_idx");
126 assert(partial_obj_size != 0, "invalid partial_obj_size argument");
127 assert(destination != NULL, "invalid destination argument");
128
129 _src_region_idx = src_region_idx;
130 _partial_obj_size = partial_obj_size;
131 _destination = destination;
132
133 // These fields may not be updated below, so make sure they're clear.
134 assert(_dest_region_addr == NULL, "should have been cleared");
135 assert(_first_src_addr == NULL, "should have been cleared");
136
137 // Determine the number of destination regions for the partial object.
138 HeapWord* const last_word = destination + partial_obj_size - 1;
139 const ParallelCompactData& sd = PSParallelCompact::summary_data();
140 HeapWord* const beg_region_addr = sd.region_align_down(destination);
141 HeapWord* const end_region_addr = sd.region_align_down(last_word);
142
143 if (beg_region_addr == end_region_addr) {
144 // One destination region.
145 _destination_count = 1;
146 if (end_region_addr == destination) {
147 // The destination falls on a region boundary, thus the first word of the
148 // partial object will be the first word copied to the destination region.
149 _dest_region_addr = end_region_addr;
150 _first_src_addr = sd.region_to_addr(src_region_idx);
151 }
152 } else {
153 // Two destination regions. When copied, the partial object will cross a
154 // destination region boundary, so a word somewhere within the partial
155 // object will be the first word copied to the second destination region.
156 _destination_count = 2;
157 _dest_region_addr = end_region_addr;
158 const size_t ofs = pointer_delta(end_region_addr, destination);
159 assert(ofs < _partial_obj_size, "sanity");
160 _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
161 }
162 }
163
clear()164 void SplitInfo::clear()
165 {
166 _src_region_idx = 0;
167 _partial_obj_size = 0;
168 _destination = NULL;
169 _destination_count = 0;
170 _dest_region_addr = NULL;
171 _first_src_addr = NULL;
172 assert(!is_valid(), "sanity");
173 }
174
175 #ifdef ASSERT
verify_clear()176 void SplitInfo::verify_clear()
177 {
178 assert(_src_region_idx == 0, "not clear");
179 assert(_partial_obj_size == 0, "not clear");
180 assert(_destination == NULL, "not clear");
181 assert(_destination_count == 0, "not clear");
182 assert(_dest_region_addr == NULL, "not clear");
183 assert(_first_src_addr == NULL, "not clear");
184 }
185 #endif // #ifdef ASSERT
186
187
print_on_error(outputStream * st)188 void PSParallelCompact::print_on_error(outputStream* st) {
189 _mark_bitmap.print_on_error(st);
190 }
191
192 #ifndef PRODUCT
193 const char* PSParallelCompact::space_names[] = {
194 "old ", "eden", "from", "to "
195 };
196
print_region_ranges()197 void PSParallelCompact::print_region_ranges()
198 {
199 tty->print_cr("space bottom top end new_top");
200 tty->print_cr("------ ---------- ---------- ---------- ----------");
201
202 for (unsigned int id = 0; id < last_space_id; ++id) {
203 const MutableSpace* space = _space_info[id].space();
204 tty->print_cr("%u %s "
205 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
206 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
207 id, space_names[id],
208 summary_data().addr_to_region_idx(space->bottom()),
209 summary_data().addr_to_region_idx(space->top()),
210 summary_data().addr_to_region_idx(space->end()),
211 summary_data().addr_to_region_idx(_space_info[id].new_top()));
212 }
213 }
214
215 void
print_generic_summary_region(size_t i,const ParallelCompactData::RegionData * c)216 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
217 {
218 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
219 #define REGION_DATA_FORMAT SIZE_FORMAT_W(5)
220
221 ParallelCompactData& sd = PSParallelCompact::summary_data();
222 size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
223 tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
224 REGION_IDX_FORMAT " " PTR_FORMAT " "
225 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
226 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
227 i, c->data_location(), dci, c->destination(),
228 c->partial_obj_size(), c->live_obj_size(),
229 c->data_size(), c->source_region(), c->destination_count());
230
231 #undef REGION_IDX_FORMAT
232 #undef REGION_DATA_FORMAT
233 }
234
235 void
print_generic_summary_data(ParallelCompactData & summary_data,HeapWord * const beg_addr,HeapWord * const end_addr)236 print_generic_summary_data(ParallelCompactData& summary_data,
237 HeapWord* const beg_addr,
238 HeapWord* const end_addr)
239 {
240 size_t total_words = 0;
241 size_t i = summary_data.addr_to_region_idx(beg_addr);
242 const size_t last = summary_data.addr_to_region_idx(end_addr);
243 HeapWord* pdest = 0;
244
245 while (i <= last) {
246 ParallelCompactData::RegionData* c = summary_data.region(i);
247 if (c->data_size() != 0 || c->destination() != pdest) {
248 print_generic_summary_region(i, c);
249 total_words += c->data_size();
250 pdest = c->destination();
251 }
252 ++i;
253 }
254
255 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
256 }
257
258 void
print_generic_summary_data(ParallelCompactData & summary_data,SpaceInfo * space_info)259 print_generic_summary_data(ParallelCompactData& summary_data,
260 SpaceInfo* space_info)
261 {
262 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
263 const MutableSpace* space = space_info[id].space();
264 print_generic_summary_data(summary_data, space->bottom(),
265 MAX2(space->top(), space_info[id].new_top()));
266 }
267 }
268
269 void
print_initial_summary_region(size_t i,const ParallelCompactData::RegionData * c,bool newline=true)270 print_initial_summary_region(size_t i,
271 const ParallelCompactData::RegionData* c,
272 bool newline = true)
273 {
274 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
275 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
276 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
277 i, c->destination(),
278 c->partial_obj_size(), c->live_obj_size(),
279 c->data_size(), c->source_region(), c->destination_count());
280 if (newline) tty->cr();
281 }
282
283 void
print_initial_summary_data(ParallelCompactData & summary_data,const MutableSpace * space)284 print_initial_summary_data(ParallelCompactData& summary_data,
285 const MutableSpace* space) {
286 if (space->top() == space->bottom()) {
287 return;
288 }
289
290 const size_t region_size = ParallelCompactData::RegionSize;
291 typedef ParallelCompactData::RegionData RegionData;
292 HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
293 const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
294 const RegionData* c = summary_data.region(end_region - 1);
295 HeapWord* end_addr = c->destination() + c->data_size();
296 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
297
298 // Print (and count) the full regions at the beginning of the space.
299 size_t full_region_count = 0;
300 size_t i = summary_data.addr_to_region_idx(space->bottom());
301 while (i < end_region && summary_data.region(i)->data_size() == region_size) {
302 print_initial_summary_region(i, summary_data.region(i));
303 ++full_region_count;
304 ++i;
305 }
306
307 size_t live_to_right = live_in_space - full_region_count * region_size;
308
309 double max_reclaimed_ratio = 0.0;
310 size_t max_reclaimed_ratio_region = 0;
311 size_t max_dead_to_right = 0;
312 size_t max_live_to_right = 0;
313
314 // Print the 'reclaimed ratio' for regions while there is something live in
315 // the region or to the right of it. The remaining regions are empty (and
316 // uninteresting), and computing the ratio will result in division by 0.
317 while (i < end_region && live_to_right > 0) {
318 c = summary_data.region(i);
319 HeapWord* const region_addr = summary_data.region_to_addr(i);
320 const size_t used_to_right = pointer_delta(space->top(), region_addr);
321 const size_t dead_to_right = used_to_right - live_to_right;
322 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
323
324 if (reclaimed_ratio > max_reclaimed_ratio) {
325 max_reclaimed_ratio = reclaimed_ratio;
326 max_reclaimed_ratio_region = i;
327 max_dead_to_right = dead_to_right;
328 max_live_to_right = live_to_right;
329 }
330
331 print_initial_summary_region(i, c, false);
332 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
333 reclaimed_ratio, dead_to_right, live_to_right);
334
335 live_to_right -= c->data_size();
336 ++i;
337 }
338
339 // Any remaining regions are empty. Print one more if there is one.
340 if (i < end_region) {
341 print_initial_summary_region(i, summary_data.region(i));
342 }
343
344 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
345 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
346 max_reclaimed_ratio_region, max_dead_to_right,
347 max_live_to_right, max_reclaimed_ratio);
348 }
349
350 void
print_initial_summary_data(ParallelCompactData & summary_data,SpaceInfo * space_info)351 print_initial_summary_data(ParallelCompactData& summary_data,
352 SpaceInfo* space_info) {
353 unsigned int id = PSParallelCompact::old_space_id;
354 const MutableSpace* space;
355 do {
356 space = space_info[id].space();
357 print_initial_summary_data(summary_data, space);
358 } while (++id < PSParallelCompact::eden_space_id);
359
360 do {
361 space = space_info[id].space();
362 print_generic_summary_data(summary_data, space->bottom(), space->top());
363 } while (++id < PSParallelCompact::last_space_id);
364 }
365 #endif // #ifndef PRODUCT
366
367 #ifdef ASSERT
368 size_t add_obj_count;
369 size_t add_obj_size;
370 size_t mark_bitmap_count;
371 size_t mark_bitmap_size;
372 #endif // #ifdef ASSERT
373
ParallelCompactData()374 ParallelCompactData::ParallelCompactData()
375 {
376 _region_start = 0;
377
378 _region_vspace = 0;
379 _reserved_byte_size = 0;
380 _region_data = 0;
381 _region_count = 0;
382
383 _block_vspace = 0;
384 _block_data = 0;
385 _block_count = 0;
386 }
387
initialize(MemRegion covered_region)388 bool ParallelCompactData::initialize(MemRegion covered_region)
389 {
390 _region_start = covered_region.start();
391 const size_t region_size = covered_region.word_size();
392 DEBUG_ONLY(_region_end = _region_start + region_size;)
393
394 assert(region_align_down(_region_start) == _region_start,
395 "region start not aligned");
396 assert((region_size & RegionSizeOffsetMask) == 0,
397 "region size not a multiple of RegionSize");
398
399 bool result = initialize_region_data(region_size) && initialize_block_data();
400 return result;
401 }
402
403 PSVirtualSpace*
create_vspace(size_t count,size_t element_size)404 ParallelCompactData::create_vspace(size_t count, size_t element_size)
405 {
406 const size_t raw_bytes = count * element_size;
407 const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
408 const size_t granularity = os::vm_allocation_granularity();
409 _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
410
411 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
412 MAX2(page_sz, granularity);
413 ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
414 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
415 rs.size());
416
417 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
418
419 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
420 if (vspace != 0) {
421 if (vspace->expand_by(_reserved_byte_size)) {
422 return vspace;
423 }
424 delete vspace;
425 // Release memory reserved in the space.
426 rs.release();
427 }
428
429 return 0;
430 }
431
initialize_region_data(size_t region_size)432 bool ParallelCompactData::initialize_region_data(size_t region_size)
433 {
434 const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
435 _region_vspace = create_vspace(count, sizeof(RegionData));
436 if (_region_vspace != 0) {
437 _region_data = (RegionData*)_region_vspace->reserved_low_addr();
438 _region_count = count;
439 return true;
440 }
441 return false;
442 }
443
initialize_block_data()444 bool ParallelCompactData::initialize_block_data()
445 {
446 assert(_region_count != 0, "region data must be initialized first");
447 const size_t count = _region_count << Log2BlocksPerRegion;
448 _block_vspace = create_vspace(count, sizeof(BlockData));
449 if (_block_vspace != 0) {
450 _block_data = (BlockData*)_block_vspace->reserved_low_addr();
451 _block_count = count;
452 return true;
453 }
454 return false;
455 }
456
clear()457 void ParallelCompactData::clear()
458 {
459 memset(_region_data, 0, _region_vspace->committed_size());
460 memset(_block_data, 0, _block_vspace->committed_size());
461 }
462
clear_range(size_t beg_region,size_t end_region)463 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
464 assert(beg_region <= _region_count, "beg_region out of range");
465 assert(end_region <= _region_count, "end_region out of range");
466 assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
467
468 const size_t region_cnt = end_region - beg_region;
469 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
470
471 const size_t beg_block = beg_region * BlocksPerRegion;
472 const size_t block_cnt = region_cnt * BlocksPerRegion;
473 memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
474 }
475
partial_obj_end(size_t region_idx) const476 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
477 {
478 const RegionData* cur_cp = region(region_idx);
479 const RegionData* const end_cp = region(region_count() - 1);
480
481 HeapWord* result = region_to_addr(region_idx);
482 if (cur_cp < end_cp) {
483 do {
484 result += cur_cp->partial_obj_size();
485 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
486 }
487 return result;
488 }
489
add_obj(HeapWord * addr,size_t len)490 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
491 {
492 const size_t obj_ofs = pointer_delta(addr, _region_start);
493 const size_t beg_region = obj_ofs >> Log2RegionSize;
494 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
495
496 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
497 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
498
499 if (beg_region == end_region) {
500 // All in one region.
501 _region_data[beg_region].add_live_obj(len);
502 return;
503 }
504
505 // First region.
506 const size_t beg_ofs = region_offset(addr);
507 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
508
509 Klass* klass = ((oop)addr)->klass();
510 // Middle regions--completely spanned by this object.
511 for (size_t region = beg_region + 1; region < end_region; ++region) {
512 _region_data[region].set_partial_obj_size(RegionSize);
513 _region_data[region].set_partial_obj_addr(addr);
514 }
515
516 // Last region.
517 const size_t end_ofs = region_offset(addr + len - 1);
518 _region_data[end_region].set_partial_obj_size(end_ofs + 1);
519 _region_data[end_region].set_partial_obj_addr(addr);
520 }
521
522 void
summarize_dense_prefix(HeapWord * beg,HeapWord * end)523 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
524 {
525 assert(region_offset(beg) == 0, "not RegionSize aligned");
526 assert(region_offset(end) == 0, "not RegionSize aligned");
527
528 size_t cur_region = addr_to_region_idx(beg);
529 const size_t end_region = addr_to_region_idx(end);
530 HeapWord* addr = beg;
531 while (cur_region < end_region) {
532 _region_data[cur_region].set_destination(addr);
533 _region_data[cur_region].set_destination_count(0);
534 _region_data[cur_region].set_source_region(cur_region);
535 _region_data[cur_region].set_data_location(addr);
536
537 // Update live_obj_size so the region appears completely full.
538 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
539 _region_data[cur_region].set_live_obj_size(live_size);
540
541 ++cur_region;
542 addr += RegionSize;
543 }
544 }
545
546 // Find the point at which a space can be split and, if necessary, record the
547 // split point.
548 //
549 // If the current src region (which overflowed the destination space) doesn't
550 // have a partial object, the split point is at the beginning of the current src
551 // region (an "easy" split, no extra bookkeeping required).
552 //
553 // If the current src region has a partial object, the split point is in the
554 // region where that partial object starts (call it the split_region). If
555 // split_region has a partial object, then the split point is just after that
556 // partial object (a "hard" split where we have to record the split data and
557 // zero the partial_obj_size field). With a "hard" split, we know that the
558 // partial_obj ends within split_region because the partial object that caused
559 // the overflow starts in split_region. If split_region doesn't have a partial
560 // obj, then the split is at the beginning of split_region (another "easy"
561 // split).
562 HeapWord*
summarize_split_space(size_t src_region,SplitInfo & split_info,HeapWord * destination,HeapWord * target_end,HeapWord ** target_next)563 ParallelCompactData::summarize_split_space(size_t src_region,
564 SplitInfo& split_info,
565 HeapWord* destination,
566 HeapWord* target_end,
567 HeapWord** target_next)
568 {
569 assert(destination <= target_end, "sanity");
570 assert(destination + _region_data[src_region].data_size() > target_end,
571 "region should not fit into target space");
572 assert(is_region_aligned(target_end), "sanity");
573
574 size_t split_region = src_region;
575 HeapWord* split_destination = destination;
576 size_t partial_obj_size = _region_data[src_region].partial_obj_size();
577
578 if (destination + partial_obj_size > target_end) {
579 // The split point is just after the partial object (if any) in the
580 // src_region that contains the start of the object that overflowed the
581 // destination space.
582 //
583 // Find the start of the "overflow" object and set split_region to the
584 // region containing it.
585 HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
586 split_region = addr_to_region_idx(overflow_obj);
587
588 // Clear the source_region field of all destination regions whose first word
589 // came from data after the split point (a non-null source_region field
590 // implies a region must be filled).
591 //
592 // An alternative to the simple loop below: clear during post_compact(),
593 // which uses memcpy instead of individual stores, and is easy to
594 // parallelize. (The downside is that it clears the entire RegionData
595 // object as opposed to just one field.)
596 //
597 // post_compact() would have to clear the summary data up to the highest
598 // address that was written during the summary phase, which would be
599 //
600 // max(top, max(new_top, clear_top))
601 //
602 // where clear_top is a new field in SpaceInfo. Would have to set clear_top
603 // to target_end.
604 const RegionData* const sr = region(split_region);
605 const size_t beg_idx =
606 addr_to_region_idx(region_align_up(sr->destination() +
607 sr->partial_obj_size()));
608 const size_t end_idx = addr_to_region_idx(target_end);
609
610 if (TraceParallelOldGCSummaryPhase) {
611 gclog_or_tty->print_cr("split: clearing source_region field in ["
612 SIZE_FORMAT ", " SIZE_FORMAT ")",
613 beg_idx, end_idx);
614 }
615 for (size_t idx = beg_idx; idx < end_idx; ++idx) {
616 _region_data[idx].set_source_region(0);
617 }
618
619 // Set split_destination and partial_obj_size to reflect the split region.
620 split_destination = sr->destination();
621 partial_obj_size = sr->partial_obj_size();
622 }
623
624 // The split is recorded only if a partial object extends onto the region.
625 if (partial_obj_size != 0) {
626 _region_data[split_region].set_partial_obj_size(0);
627 split_info.record(split_region, partial_obj_size, split_destination);
628 }
629
630 // Setup the continuation addresses.
631 *target_next = split_destination + partial_obj_size;
632 HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
633
634 if (TraceParallelOldGCSummaryPhase) {
635 const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
636 gclog_or_tty->print_cr("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT
637 " pos=" SIZE_FORMAT,
638 split_type, source_next, split_region,
639 partial_obj_size);
640 gclog_or_tty->print_cr("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
641 " tn=" PTR_FORMAT,
642 split_type, split_destination,
643 addr_to_region_idx(split_destination),
644 *target_next);
645
646 if (partial_obj_size != 0) {
647 HeapWord* const po_beg = split_info.destination();
648 HeapWord* const po_end = po_beg + split_info.partial_obj_size();
649 gclog_or_tty->print_cr("%s split: "
650 "po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
651 "po_end=" PTR_FORMAT " " SIZE_FORMAT,
652 split_type,
653 po_beg, addr_to_region_idx(po_beg),
654 po_end, addr_to_region_idx(po_end));
655 }
656 }
657
658 return source_next;
659 }
660
summarize(SplitInfo & split_info,HeapWord * source_beg,HeapWord * source_end,HeapWord ** source_next,HeapWord * target_beg,HeapWord * target_end,HeapWord ** target_next)661 bool ParallelCompactData::summarize(SplitInfo& split_info,
662 HeapWord* source_beg, HeapWord* source_end,
663 HeapWord** source_next,
664 HeapWord* target_beg, HeapWord* target_end,
665 HeapWord** target_next)
666 {
667 if (TraceParallelOldGCSummaryPhase) {
668 HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
669 tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
670 "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
671 source_beg, source_end, source_next_val,
672 target_beg, target_end, *target_next);
673 }
674
675 size_t cur_region = addr_to_region_idx(source_beg);
676 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
677
678 HeapWord *dest_addr = target_beg;
679 while (cur_region < end_region) {
680 // The destination must be set even if the region has no data.
681 _region_data[cur_region].set_destination(dest_addr);
682
683 size_t words = _region_data[cur_region].data_size();
684 if (words > 0) {
685 // If cur_region does not fit entirely into the target space, find a point
686 // at which the source space can be 'split' so that part is copied to the
687 // target space and the rest is copied elsewhere.
688 if (dest_addr + words > target_end) {
689 assert(source_next != NULL, "source_next is NULL when splitting");
690 *source_next = summarize_split_space(cur_region, split_info, dest_addr,
691 target_end, target_next);
692 return false;
693 }
694
695 // Compute the destination_count for cur_region, and if necessary, update
696 // source_region for a destination region. The source_region field is
697 // updated if cur_region is the first (left-most) region to be copied to a
698 // destination region.
699 //
700 // The destination_count calculation is a bit subtle. A region that has
701 // data that compacts into itself does not count itself as a destination.
702 // This maintains the invariant that a zero count means the region is
703 // available and can be claimed and then filled.
704 uint destination_count = 0;
705 if (split_info.is_split(cur_region)) {
706 // The current region has been split: the partial object will be copied
707 // to one destination space and the remaining data will be copied to
708 // another destination space. Adjust the initial destination_count and,
709 // if necessary, set the source_region field if the partial object will
710 // cross a destination region boundary.
711 destination_count = split_info.destination_count();
712 if (destination_count == 2) {
713 size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
714 _region_data[dest_idx].set_source_region(cur_region);
715 }
716 }
717
718 HeapWord* const last_addr = dest_addr + words - 1;
719 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
720 const size_t dest_region_2 = addr_to_region_idx(last_addr);
721
722 // Initially assume that the destination regions will be the same and
723 // adjust the value below if necessary. Under this assumption, if
724 // cur_region == dest_region_2, then cur_region will be compacted
725 // completely into itself.
726 destination_count += cur_region == dest_region_2 ? 0 : 1;
727 if (dest_region_1 != dest_region_2) {
728 // Destination regions differ; adjust destination_count.
729 destination_count += 1;
730 // Data from cur_region will be copied to the start of dest_region_2.
731 _region_data[dest_region_2].set_source_region(cur_region);
732 } else if (region_offset(dest_addr) == 0) {
733 // Data from cur_region will be copied to the start of the destination
734 // region.
735 _region_data[dest_region_1].set_source_region(cur_region);
736 }
737
738 _region_data[cur_region].set_destination_count(destination_count);
739 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
740 dest_addr += words;
741 }
742
743 ++cur_region;
744 }
745
746 *target_next = dest_addr;
747 return true;
748 }
749
calc_new_pointer(HeapWord * addr)750 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
751 assert(addr != NULL, "Should detect NULL oop earlier");
752 assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
753 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
754
755 // Region covering the object.
756 RegionData* const region_ptr = addr_to_region_ptr(addr);
757 HeapWord* result = region_ptr->destination();
758
759 // If the entire Region is live, the new location is region->destination + the
760 // offset of the object within in the Region.
761
762 // Run some performance tests to determine if this special case pays off. It
763 // is worth it for pointers into the dense prefix. If the optimization to
764 // avoid pointer updates in regions that only point to the dense prefix is
765 // ever implemented, this should be revisited.
766 if (region_ptr->data_size() == RegionSize) {
767 result += region_offset(addr);
768 return result;
769 }
770
771 // Otherwise, the new location is region->destination + block offset + the
772 // number of live words in the Block that are (a) to the left of addr and (b)
773 // due to objects that start in the Block.
774
775 // Fill in the block table if necessary. This is unsynchronized, so multiple
776 // threads may fill the block table for a region (harmless, since it is
777 // idempotent).
778 if (!region_ptr->blocks_filled()) {
779 PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
780 region_ptr->set_blocks_filled();
781 }
782
783 HeapWord* const search_start = block_align_down(addr);
784 const size_t block_offset = addr_to_block_ptr(addr)->offset();
785
786 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
787 const size_t live = bitmap->live_words_in_range(search_start, oop(addr));
788 result += block_offset + live;
789 DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
790 return result;
791 }
792
793 #ifdef ASSERT
verify_clear(const PSVirtualSpace * vspace)794 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
795 {
796 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
797 const size_t* const end = (const size_t*)vspace->committed_high_addr();
798 for (const size_t* p = beg; p < end; ++p) {
799 assert(*p == 0, "not zero");
800 }
801 }
802
verify_clear()803 void ParallelCompactData::verify_clear()
804 {
805 verify_clear(_region_vspace);
806 verify_clear(_block_vspace);
807 }
808 #endif // #ifdef ASSERT
809
810 STWGCTimer PSParallelCompact::_gc_timer;
811 ParallelOldTracer PSParallelCompact::_gc_tracer;
812 elapsedTimer PSParallelCompact::_accumulated_time;
813 unsigned int PSParallelCompact::_total_invocations = 0;
814 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
815 jlong PSParallelCompact::_time_of_last_gc = 0;
816 CollectorCounters* PSParallelCompact::_counters = NULL;
817 ParMarkBitMap PSParallelCompact::_mark_bitmap;
818 ParallelCompactData PSParallelCompact::_summary_data;
819
820 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
821
do_object_b(oop p)822 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
823
do_oop(oop * p)824 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
do_oop(narrowOop * p)825 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
826
827 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
828 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
829
do_oop(oop * p)830 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
do_oop(narrowOop * p)831 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
832
do_void()833 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
834
do_oop(oop * p)835 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) {
836 mark_and_push(_compaction_manager, p);
837 }
do_oop(narrowOop * p)838 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
839
do_klass(Klass * klass)840 void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
841 klass->oops_do(_mark_and_push_closure);
842 }
do_klass(Klass * klass)843 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
844 klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
845 }
846
post_initialize()847 void PSParallelCompact::post_initialize() {
848 ParallelScavengeHeap* heap = gc_heap();
849 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
850
851 MemRegion mr = heap->reserved_region();
852 _ref_processor =
853 new ReferenceProcessor(mr, // span
854 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
855 (int) ParallelGCThreads, // mt processing degree
856 true, // mt discovery
857 (int) ParallelGCThreads, // mt discovery degree
858 true, // atomic_discovery
859 &_is_alive_closure); // non-header is alive closure
860 _counters = new CollectorCounters("PSParallelCompact", 1);
861
862 // Initialize static fields in ParCompactionManager.
863 ParCompactionManager::initialize(mark_bitmap());
864 }
865
initialize()866 bool PSParallelCompact::initialize() {
867 ParallelScavengeHeap* heap = gc_heap();
868 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
869 MemRegion mr = heap->reserved_region();
870
871 // Was the old gen get allocated successfully?
872 if (!heap->old_gen()->is_allocated()) {
873 return false;
874 }
875
876 initialize_space_info();
877 initialize_dead_wood_limiter();
878
879 if (!_mark_bitmap.initialize(mr)) {
880 vm_shutdown_during_initialization(
881 err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
882 "garbage collection for the requested " SIZE_FORMAT "KB heap.",
883 _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
884 return false;
885 }
886
887 if (!_summary_data.initialize(mr)) {
888 vm_shutdown_during_initialization(
889 err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
890 "garbage collection for the requested " SIZE_FORMAT "KB heap.",
891 _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
892 return false;
893 }
894
895 return true;
896 }
897
initialize_space_info()898 void PSParallelCompact::initialize_space_info()
899 {
900 memset(&_space_info, 0, sizeof(_space_info));
901
902 ParallelScavengeHeap* heap = gc_heap();
903 PSYoungGen* young_gen = heap->young_gen();
904
905 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
906 _space_info[eden_space_id].set_space(young_gen->eden_space());
907 _space_info[from_space_id].set_space(young_gen->from_space());
908 _space_info[to_space_id].set_space(young_gen->to_space());
909
910 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
911 }
912
initialize_dead_wood_limiter()913 void PSParallelCompact::initialize_dead_wood_limiter()
914 {
915 const size_t max = 100;
916 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
917 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
918 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
919 DEBUG_ONLY(_dwl_initialized = true;)
920 _dwl_adjustment = normal_distribution(1.0);
921 }
922
923 // Simple class for storing info about the heap at the start of GC, to be used
924 // after GC for comparison/printing.
925 class PreGCValues {
926 public:
PreGCValues()927 PreGCValues() { }
PreGCValues(ParallelScavengeHeap * heap)928 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
929
fill(ParallelScavengeHeap * heap)930 void fill(ParallelScavengeHeap* heap) {
931 _heap_used = heap->used();
932 _young_gen_used = heap->young_gen()->used_in_bytes();
933 _old_gen_used = heap->old_gen()->used_in_bytes();
934 _metadata_used = MetaspaceAux::used_bytes();
935 };
936
heap_used() const937 size_t heap_used() const { return _heap_used; }
young_gen_used() const938 size_t young_gen_used() const { return _young_gen_used; }
old_gen_used() const939 size_t old_gen_used() const { return _old_gen_used; }
metadata_used() const940 size_t metadata_used() const { return _metadata_used; }
941
942 private:
943 size_t _heap_used;
944 size_t _young_gen_used;
945 size_t _old_gen_used;
946 size_t _metadata_used;
947 };
948
949 void
clear_data_covering_space(SpaceId id)950 PSParallelCompact::clear_data_covering_space(SpaceId id)
951 {
952 // At this point, top is the value before GC, new_top() is the value that will
953 // be set at the end of GC. The marking bitmap is cleared to top; nothing
954 // should be marked above top. The summary data is cleared to the larger of
955 // top & new_top.
956 MutableSpace* const space = _space_info[id].space();
957 HeapWord* const bot = space->bottom();
958 HeapWord* const top = space->top();
959 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
960
961 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
962 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
963 _mark_bitmap.clear_range(beg_bit, end_bit);
964
965 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
966 const size_t end_region =
967 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
968 _summary_data.clear_range(beg_region, end_region);
969
970 // Clear the data used to 'split' regions.
971 SplitInfo& split_info = _space_info[id].split_info();
972 if (split_info.is_valid()) {
973 split_info.clear();
974 }
975 DEBUG_ONLY(split_info.verify_clear();)
976 }
977
pre_compact(PreGCValues * pre_gc_values)978 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
979 {
980 // Update the from & to space pointers in space_info, since they are swapped
981 // at each young gen gc. Do the update unconditionally (even though a
982 // promotion failure does not swap spaces) because an unknown number of minor
983 // collections will have swapped the spaces an unknown number of times.
984 GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
985 ParallelScavengeHeap* heap = gc_heap();
986 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
987 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
988
989 pre_gc_values->fill(heap);
990
991 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
992 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
993
994 // Increment the invocation count
995 heap->increment_total_collections(true);
996
997 // We need to track unique mark sweep invocations as well.
998 _total_invocations++;
999
1000 heap->print_heap_before_gc();
1001 heap->trace_heap_before_gc(&_gc_tracer);
1002
1003 // Fill in TLABs
1004 heap->accumulate_statistics_all_tlabs();
1005 heap->ensure_parsability(true); // retire TLABs
1006
1007 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
1008 HandleMark hm; // Discard invalid handles created during verification
1009 Universe::verify(" VerifyBeforeGC:");
1010 }
1011
1012 // Verify object start arrays
1013 if (VerifyObjectStartArray &&
1014 VerifyBeforeGC) {
1015 heap->old_gen()->verify_object_start_array();
1016 }
1017
1018 DEBUG_ONLY(mark_bitmap()->verify_clear();)
1019 DEBUG_ONLY(summary_data().verify_clear();)
1020
1021 // Have worker threads release resources the next time they run a task.
1022 gc_task_manager()->release_all_resources();
1023 }
1024
post_compact()1025 void PSParallelCompact::post_compact()
1026 {
1027 GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
1028
1029 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1030 // Clear the marking bitmap, summary data and split info.
1031 clear_data_covering_space(SpaceId(id));
1032 // Update top(). Must be done after clearing the bitmap and summary data.
1033 _space_info[id].publish_new_top();
1034 }
1035
1036 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1037 MutableSpace* const from_space = _space_info[from_space_id].space();
1038 MutableSpace* const to_space = _space_info[to_space_id].space();
1039
1040 ParallelScavengeHeap* heap = gc_heap();
1041 bool eden_empty = eden_space->is_empty();
1042 if (!eden_empty) {
1043 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1044 heap->young_gen(), heap->old_gen());
1045 }
1046
1047 // Update heap occupancy information which is used as input to the soft ref
1048 // clearing policy at the next gc.
1049 Universe::update_heap_info_at_gc();
1050
1051 bool young_gen_empty = eden_empty && from_space->is_empty() &&
1052 to_space->is_empty();
1053
1054 BarrierSet* bs = heap->barrier_set();
1055 if (bs->is_a(BarrierSet::ModRef)) {
1056 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
1057 MemRegion old_mr = heap->old_gen()->reserved();
1058
1059 if (young_gen_empty) {
1060 modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
1061 } else {
1062 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1063 }
1064 }
1065
1066 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1067 ClassLoaderDataGraph::purge();
1068 MetaspaceAux::verify_metrics();
1069
1070 Threads::gc_epilogue();
1071 CodeCache::gc_epilogue();
1072 JvmtiExport::gc_epilogue();
1073
1074 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1075
1076 ref_processor()->enqueue_discovered_references(NULL);
1077
1078 if (ZapUnusedHeapArea) {
1079 heap->gen_mangle_unused_area();
1080 }
1081
1082 // Update time of last GC
1083 reset_millis_since_last_gc();
1084 }
1085
1086 HeapWord*
compute_dense_prefix_via_density(const SpaceId id,bool maximum_compaction)1087 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1088 bool maximum_compaction)
1089 {
1090 const size_t region_size = ParallelCompactData::RegionSize;
1091 const ParallelCompactData& sd = summary_data();
1092
1093 const MutableSpace* const space = _space_info[id].space();
1094 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1095 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1096 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1097
1098 // Skip full regions at the beginning of the space--they are necessarily part
1099 // of the dense prefix.
1100 size_t full_count = 0;
1101 const RegionData* cp;
1102 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1103 ++full_count;
1104 }
1105
1106 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1107 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1108 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1109 if (maximum_compaction || cp == end_cp || interval_ended) {
1110 _maximum_compaction_gc_num = total_invocations();
1111 return sd.region_to_addr(cp);
1112 }
1113
1114 HeapWord* const new_top = _space_info[id].new_top();
1115 const size_t space_live = pointer_delta(new_top, space->bottom());
1116 const size_t space_used = space->used_in_words();
1117 const size_t space_capacity = space->capacity_in_words();
1118
1119 const double cur_density = double(space_live) / space_capacity;
1120 const double deadwood_density =
1121 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1122 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1123
1124 if (TraceParallelOldGCDensePrefix) {
1125 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1126 cur_density, deadwood_density, deadwood_goal);
1127 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1128 "space_cap=" SIZE_FORMAT,
1129 space_live, space_used,
1130 space_capacity);
1131 }
1132
1133 // XXX - Use binary search?
1134 HeapWord* dense_prefix = sd.region_to_addr(cp);
1135 const RegionData* full_cp = cp;
1136 const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1137 while (cp < end_cp) {
1138 HeapWord* region_destination = cp->destination();
1139 const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1140 if (TraceParallelOldGCDensePrefix && Verbose) {
1141 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1142 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
1143 sd.region(cp), region_destination,
1144 dense_prefix, cur_deadwood);
1145 }
1146
1147 if (cur_deadwood >= deadwood_goal) {
1148 // Found the region that has the correct amount of deadwood to the left.
1149 // This typically occurs after crossing a fairly sparse set of regions, so
1150 // iterate backwards over those sparse regions, looking for the region
1151 // that has the lowest density of live objects 'to the right.'
1152 size_t space_to_left = sd.region(cp) * region_size;
1153 size_t live_to_left = space_to_left - cur_deadwood;
1154 size_t space_to_right = space_capacity - space_to_left;
1155 size_t live_to_right = space_live - live_to_left;
1156 double density_to_right = double(live_to_right) / space_to_right;
1157 while (cp > full_cp) {
1158 --cp;
1159 const size_t prev_region_live_to_right = live_to_right -
1160 cp->data_size();
1161 const size_t prev_region_space_to_right = space_to_right + region_size;
1162 double prev_region_density_to_right =
1163 double(prev_region_live_to_right) / prev_region_space_to_right;
1164 if (density_to_right <= prev_region_density_to_right) {
1165 return dense_prefix;
1166 }
1167 if (TraceParallelOldGCDensePrefix && Verbose) {
1168 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1169 "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1170 prev_region_density_to_right);
1171 }
1172 dense_prefix -= region_size;
1173 live_to_right = prev_region_live_to_right;
1174 space_to_right = prev_region_space_to_right;
1175 density_to_right = prev_region_density_to_right;
1176 }
1177 return dense_prefix;
1178 }
1179
1180 dense_prefix += region_size;
1181 ++cp;
1182 }
1183
1184 return dense_prefix;
1185 }
1186
1187 #ifndef PRODUCT
print_dense_prefix_stats(const char * const algorithm,const SpaceId id,const bool maximum_compaction,HeapWord * const addr)1188 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1189 const SpaceId id,
1190 const bool maximum_compaction,
1191 HeapWord* const addr)
1192 {
1193 const size_t region_idx = summary_data().addr_to_region_idx(addr);
1194 RegionData* const cp = summary_data().region(region_idx);
1195 const MutableSpace* const space = _space_info[id].space();
1196 HeapWord* const new_top = _space_info[id].new_top();
1197
1198 const size_t space_live = pointer_delta(new_top, space->bottom());
1199 const size_t dead_to_left = pointer_delta(addr, cp->destination());
1200 const size_t space_cap = space->capacity_in_words();
1201 const double dead_to_left_pct = double(dead_to_left) / space_cap;
1202 const size_t live_to_right = new_top - cp->destination();
1203 const size_t dead_to_right = space->top() - addr - live_to_right;
1204
1205 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1206 "spl=" SIZE_FORMAT " "
1207 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1208 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1209 " ratio=%10.8f",
1210 algorithm, addr, region_idx,
1211 space_live,
1212 dead_to_left, dead_to_left_pct,
1213 dead_to_right, live_to_right,
1214 double(dead_to_right) / live_to_right);
1215 }
1216 #endif // #ifndef PRODUCT
1217
1218 // Return a fraction indicating how much of the generation can be treated as
1219 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
1220 // based on the density of live objects in the generation to determine a limit,
1221 // which is then adjusted so the return value is min_percent when the density is
1222 // 1.
1223 //
1224 // The following table shows some return values for a different values of the
1225 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1226 // min_percent is 1.
1227 //
1228 // fraction allowed as dead wood
1229 // -----------------------------------------------------------------
1230 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1231 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1232 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1233 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1234 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1235 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1236 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1237 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1238 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1239 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1240 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1241 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1242 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1243 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1244 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1245 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1246 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1247 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1248 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1249 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1250 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1251 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1252 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1253
dead_wood_limiter(double density,size_t min_percent)1254 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1255 {
1256 assert(_dwl_initialized, "uninitialized");
1257
1258 // The raw limit is the value of the normal distribution at x = density.
1259 const double raw_limit = normal_distribution(density);
1260
1261 // Adjust the raw limit so it becomes the minimum when the density is 1.
1262 //
1263 // First subtract the adjustment value (which is simply the precomputed value
1264 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1265 // Then add the minimum value, so the minimum is returned when the density is
1266 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
1267 const double min = double(min_percent) / 100.0;
1268 const double limit = raw_limit - _dwl_adjustment + min;
1269 return MAX2(limit, 0.0);
1270 }
1271
1272 ParallelCompactData::RegionData*
first_dead_space_region(const RegionData * beg,const RegionData * end)1273 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1274 const RegionData* end)
1275 {
1276 const size_t region_size = ParallelCompactData::RegionSize;
1277 ParallelCompactData& sd = summary_data();
1278 size_t left = sd.region(beg);
1279 size_t right = end > beg ? sd.region(end) - 1 : left;
1280
1281 // Binary search.
1282 while (left < right) {
1283 // Equivalent to (left + right) / 2, but does not overflow.
1284 const size_t middle = left + (right - left) / 2;
1285 RegionData* const middle_ptr = sd.region(middle);
1286 HeapWord* const dest = middle_ptr->destination();
1287 HeapWord* const addr = sd.region_to_addr(middle);
1288 assert(dest != NULL, "sanity");
1289 assert(dest <= addr, "must move left");
1290
1291 if (middle > left && dest < addr) {
1292 right = middle - 1;
1293 } else if (middle < right && middle_ptr->data_size() == region_size) {
1294 left = middle + 1;
1295 } else {
1296 return middle_ptr;
1297 }
1298 }
1299 return sd.region(left);
1300 }
1301
1302 ParallelCompactData::RegionData*
dead_wood_limit_region(const RegionData * beg,const RegionData * end,size_t dead_words)1303 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1304 const RegionData* end,
1305 size_t dead_words)
1306 {
1307 ParallelCompactData& sd = summary_data();
1308 size_t left = sd.region(beg);
1309 size_t right = end > beg ? sd.region(end) - 1 : left;
1310
1311 // Binary search.
1312 while (left < right) {
1313 // Equivalent to (left + right) / 2, but does not overflow.
1314 const size_t middle = left + (right - left) / 2;
1315 RegionData* const middle_ptr = sd.region(middle);
1316 HeapWord* const dest = middle_ptr->destination();
1317 HeapWord* const addr = sd.region_to_addr(middle);
1318 assert(dest != NULL, "sanity");
1319 assert(dest <= addr, "must move left");
1320
1321 const size_t dead_to_left = pointer_delta(addr, dest);
1322 if (middle > left && dead_to_left > dead_words) {
1323 right = middle - 1;
1324 } else if (middle < right && dead_to_left < dead_words) {
1325 left = middle + 1;
1326 } else {
1327 return middle_ptr;
1328 }
1329 }
1330 return sd.region(left);
1331 }
1332
1333 // The result is valid during the summary phase, after the initial summarization
1334 // of each space into itself, and before final summarization.
1335 inline double
reclaimed_ratio(const RegionData * const cp,HeapWord * const bottom,HeapWord * const top,HeapWord * const new_top)1336 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1337 HeapWord* const bottom,
1338 HeapWord* const top,
1339 HeapWord* const new_top)
1340 {
1341 ParallelCompactData& sd = summary_data();
1342
1343 assert(cp != NULL, "sanity");
1344 assert(bottom != NULL, "sanity");
1345 assert(top != NULL, "sanity");
1346 assert(new_top != NULL, "sanity");
1347 assert(top >= new_top, "summary data problem?");
1348 assert(new_top > bottom, "space is empty; should not be here");
1349 assert(new_top >= cp->destination(), "sanity");
1350 assert(top >= sd.region_to_addr(cp), "sanity");
1351
1352 HeapWord* const destination = cp->destination();
1353 const size_t dense_prefix_live = pointer_delta(destination, bottom);
1354 const size_t compacted_region_live = pointer_delta(new_top, destination);
1355 const size_t compacted_region_used = pointer_delta(top,
1356 sd.region_to_addr(cp));
1357 const size_t reclaimable = compacted_region_used - compacted_region_live;
1358
1359 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1360 return double(reclaimable) / divisor;
1361 }
1362
1363 // Return the address of the end of the dense prefix, a.k.a. the start of the
1364 // compacted region. The address is always on a region boundary.
1365 //
1366 // Completely full regions at the left are skipped, since no compaction can
1367 // occur in those regions. Then the maximum amount of dead wood to allow is
1368 // computed, based on the density (amount live / capacity) of the generation;
1369 // the region with approximately that amount of dead space to the left is
1370 // identified as the limit region. Regions between the last completely full
1371 // region and the limit region are scanned and the one that has the best
1372 // (maximum) reclaimed_ratio() is selected.
1373 HeapWord*
compute_dense_prefix(const SpaceId id,bool maximum_compaction)1374 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1375 bool maximum_compaction)
1376 {
1377 if (ParallelOldGCSplitALot) {
1378 if (_space_info[id].dense_prefix() != _space_info[id].space()->bottom()) {
1379 // The value was chosen to provoke splitting a young gen space; use it.
1380 return _space_info[id].dense_prefix();
1381 }
1382 }
1383
1384 const size_t region_size = ParallelCompactData::RegionSize;
1385 const ParallelCompactData& sd = summary_data();
1386
1387 const MutableSpace* const space = _space_info[id].space();
1388 HeapWord* const top = space->top();
1389 HeapWord* const top_aligned_up = sd.region_align_up(top);
1390 HeapWord* const new_top = _space_info[id].new_top();
1391 HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1392 HeapWord* const bottom = space->bottom();
1393 const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1394 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1395 const RegionData* const new_top_cp =
1396 sd.addr_to_region_ptr(new_top_aligned_up);
1397
1398 // Skip full regions at the beginning of the space--they are necessarily part
1399 // of the dense prefix.
1400 const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1401 assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1402 space->is_empty(), "no dead space allowed to the left");
1403 assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1404 "region must have dead space");
1405
1406 // The gc number is saved whenever a maximum compaction is done, and used to
1407 // determine when the maximum compaction interval has expired. This avoids
1408 // successive max compactions for different reasons.
1409 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1410 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1411 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1412 total_invocations() == HeapFirstMaximumCompactionCount;
1413 if (maximum_compaction || full_cp == top_cp || interval_ended) {
1414 _maximum_compaction_gc_num = total_invocations();
1415 return sd.region_to_addr(full_cp);
1416 }
1417
1418 const size_t space_live = pointer_delta(new_top, bottom);
1419 const size_t space_used = space->used_in_words();
1420 const size_t space_capacity = space->capacity_in_words();
1421
1422 const double density = double(space_live) / double(space_capacity);
1423 const size_t min_percent_free = MarkSweepDeadRatio;
1424 const double limiter = dead_wood_limiter(density, min_percent_free);
1425 const size_t dead_wood_max = space_used - space_live;
1426 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1427 dead_wood_max);
1428
1429 if (TraceParallelOldGCDensePrefix) {
1430 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1431 "space_cap=" SIZE_FORMAT,
1432 space_live, space_used,
1433 space_capacity);
1434 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
1435 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1436 density, min_percent_free, limiter,
1437 dead_wood_max, dead_wood_limit);
1438 }
1439
1440 // Locate the region with the desired amount of dead space to the left.
1441 const RegionData* const limit_cp =
1442 dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1443
1444 // Scan from the first region with dead space to the limit region and find the
1445 // one with the best (largest) reclaimed ratio.
1446 double best_ratio = 0.0;
1447 const RegionData* best_cp = full_cp;
1448 for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1449 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1450 if (tmp_ratio > best_ratio) {
1451 best_cp = cp;
1452 best_ratio = tmp_ratio;
1453 }
1454 }
1455
1456 #if 0
1457 // Something to consider: if the region with the best ratio is 'close to' the
1458 // first region w/free space, choose the first region with free space
1459 // ("first-free"). The first-free region is usually near the start of the
1460 // heap, which means we are copying most of the heap already, so copy a bit
1461 // more to get complete compaction.
1462 if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
1463 _maximum_compaction_gc_num = total_invocations();
1464 best_cp = full_cp;
1465 }
1466 #endif // #if 0
1467
1468 return sd.region_to_addr(best_cp);
1469 }
1470
1471 #ifndef PRODUCT
1472 void
fill_with_live_objects(SpaceId id,HeapWord * const start,size_t words)1473 PSParallelCompact::fill_with_live_objects(SpaceId id, HeapWord* const start,
1474 size_t words)
1475 {
1476 if (TraceParallelOldGCSummaryPhase) {
1477 tty->print_cr("fill_with_live_objects [" PTR_FORMAT " " PTR_FORMAT ") "
1478 SIZE_FORMAT, start, start + words, words);
1479 }
1480
1481 ObjectStartArray* const start_array = _space_info[id].start_array();
1482 CollectedHeap::fill_with_objects(start, words);
1483 for (HeapWord* p = start; p < start + words; p += oop(p)->size()) {
1484 _mark_bitmap.mark_obj(p, words);
1485 _summary_data.add_obj(p, words);
1486 start_array->allocate_block(p);
1487 }
1488 }
1489
1490 void
summarize_new_objects(SpaceId id,HeapWord * start)1491 PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
1492 {
1493 ParallelCompactData& sd = summary_data();
1494 MutableSpace* space = _space_info[id].space();
1495
1496 // Find the source and destination start addresses.
1497 HeapWord* const src_addr = sd.region_align_down(start);
1498 HeapWord* dst_addr;
1499 if (src_addr < start) {
1500 dst_addr = sd.addr_to_region_ptr(src_addr)->destination();
1501 } else if (src_addr > space->bottom()) {
1502 // The start (the original top() value) is aligned to a region boundary so
1503 // the associated region does not have a destination. Compute the
1504 // destination from the previous region.
1505 RegionData* const cp = sd.addr_to_region_ptr(src_addr) - 1;
1506 dst_addr = cp->destination() + cp->data_size();
1507 } else {
1508 // Filling the entire space.
1509 dst_addr = space->bottom();
1510 }
1511 assert(dst_addr != NULL, "sanity");
1512
1513 // Update the summary data.
1514 bool result = _summary_data.summarize(_space_info[id].split_info(),
1515 src_addr, space->top(), NULL,
1516 dst_addr, space->end(),
1517 _space_info[id].new_top_addr());
1518 assert(result, "should not fail: bad filler object size");
1519 }
1520
1521 void
provoke_split_fill_survivor(SpaceId id)1522 PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
1523 {
1524 if (total_invocations() % (ParallelOldGCSplitInterval * 3) != 0) {
1525 return;
1526 }
1527
1528 MutableSpace* const space = _space_info[id].space();
1529 if (space->is_empty()) {
1530 HeapWord* b = space->bottom();
1531 HeapWord* t = b + space->capacity_in_words() / 2;
1532 space->set_top(t);
1533 if (ZapUnusedHeapArea) {
1534 space->set_top_for_allocations();
1535 }
1536
1537 size_t min_size = CollectedHeap::min_fill_size();
1538 size_t obj_len = min_size;
1539 while (b + obj_len <= t) {
1540 CollectedHeap::fill_with_object(b, obj_len);
1541 mark_bitmap()->mark_obj(b, obj_len);
1542 summary_data().add_obj(b, obj_len);
1543 b += obj_len;
1544 obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
1545 }
1546 if (b < t) {
1547 // The loop didn't completely fill to t (top); adjust top downward.
1548 space->set_top(b);
1549 if (ZapUnusedHeapArea) {
1550 space->set_top_for_allocations();
1551 }
1552 }
1553
1554 HeapWord** nta = _space_info[id].new_top_addr();
1555 bool result = summary_data().summarize(_space_info[id].split_info(),
1556 space->bottom(), space->top(), NULL,
1557 space->bottom(), space->end(), nta);
1558 assert(result, "space must fit into itself");
1559 }
1560 }
1561
1562 void
provoke_split(bool & max_compaction)1563 PSParallelCompact::provoke_split(bool & max_compaction)
1564 {
1565 if (total_invocations() % ParallelOldGCSplitInterval != 0) {
1566 return;
1567 }
1568
1569 const size_t region_size = ParallelCompactData::RegionSize;
1570 ParallelCompactData& sd = summary_data();
1571
1572 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1573 MutableSpace* const from_space = _space_info[from_space_id].space();
1574 const size_t eden_live = pointer_delta(eden_space->top(),
1575 _space_info[eden_space_id].new_top());
1576 const size_t from_live = pointer_delta(from_space->top(),
1577 _space_info[from_space_id].new_top());
1578
1579 const size_t min_fill_size = CollectedHeap::min_fill_size();
1580 const size_t eden_free = pointer_delta(eden_space->end(), eden_space->top());
1581 const size_t eden_fillable = eden_free >= min_fill_size ? eden_free : 0;
1582 const size_t from_free = pointer_delta(from_space->end(), from_space->top());
1583 const size_t from_fillable = from_free >= min_fill_size ? from_free : 0;
1584
1585 // Choose the space to split; need at least 2 regions live (or fillable).
1586 SpaceId id;
1587 MutableSpace* space;
1588 size_t live_words;
1589 size_t fill_words;
1590 if (eden_live + eden_fillable >= region_size * 2) {
1591 id = eden_space_id;
1592 space = eden_space;
1593 live_words = eden_live;
1594 fill_words = eden_fillable;
1595 } else if (from_live + from_fillable >= region_size * 2) {
1596 id = from_space_id;
1597 space = from_space;
1598 live_words = from_live;
1599 fill_words = from_fillable;
1600 } else {
1601 return; // Give up.
1602 }
1603 assert(fill_words == 0 || fill_words >= min_fill_size, "sanity");
1604
1605 if (live_words < region_size * 2) {
1606 // Fill from top() to end() w/live objects of mixed sizes.
1607 HeapWord* const fill_start = space->top();
1608 live_words += fill_words;
1609
1610 space->set_top(fill_start + fill_words);
1611 if (ZapUnusedHeapArea) {
1612 space->set_top_for_allocations();
1613 }
1614
1615 HeapWord* cur_addr = fill_start;
1616 while (fill_words > 0) {
1617 const size_t r = (size_t)os::random() % (region_size / 2) + min_fill_size;
1618 size_t cur_size = MIN2(align_object_size_(r), fill_words);
1619 if (fill_words - cur_size < min_fill_size) {
1620 cur_size = fill_words; // Avoid leaving a fragment too small to fill.
1621 }
1622
1623 CollectedHeap::fill_with_object(cur_addr, cur_size);
1624 mark_bitmap()->mark_obj(cur_addr, cur_size);
1625 sd.add_obj(cur_addr, cur_size);
1626
1627 cur_addr += cur_size;
1628 fill_words -= cur_size;
1629 }
1630
1631 summarize_new_objects(id, fill_start);
1632 }
1633
1634 max_compaction = false;
1635
1636 // Manipulate the old gen so that it has room for about half of the live data
1637 // in the target young gen space (live_words / 2).
1638 id = old_space_id;
1639 space = _space_info[id].space();
1640 const size_t free_at_end = space->free_in_words();
1641 const size_t free_target = align_object_size(live_words / 2);
1642 const size_t dead = pointer_delta(space->top(), _space_info[id].new_top());
1643
1644 if (free_at_end >= free_target + min_fill_size) {
1645 // Fill space above top() and set the dense prefix so everything survives.
1646 HeapWord* const fill_start = space->top();
1647 const size_t fill_size = free_at_end - free_target;
1648 space->set_top(space->top() + fill_size);
1649 if (ZapUnusedHeapArea) {
1650 space->set_top_for_allocations();
1651 }
1652 fill_with_live_objects(id, fill_start, fill_size);
1653 summarize_new_objects(id, fill_start);
1654 _space_info[id].set_dense_prefix(sd.region_align_down(space->top()));
1655 } else if (dead + free_at_end > free_target) {
1656 // Find a dense prefix that makes the right amount of space available.
1657 HeapWord* cur = sd.region_align_down(space->top());
1658 HeapWord* cur_destination = sd.addr_to_region_ptr(cur)->destination();
1659 size_t dead_to_right = pointer_delta(space->end(), cur_destination);
1660 while (dead_to_right < free_target) {
1661 cur -= region_size;
1662 cur_destination = sd.addr_to_region_ptr(cur)->destination();
1663 dead_to_right = pointer_delta(space->end(), cur_destination);
1664 }
1665 _space_info[id].set_dense_prefix(cur);
1666 }
1667 }
1668 #endif // #ifndef PRODUCT
1669
summarize_spaces_quick()1670 void PSParallelCompact::summarize_spaces_quick()
1671 {
1672 for (unsigned int i = 0; i < last_space_id; ++i) {
1673 const MutableSpace* space = _space_info[i].space();
1674 HeapWord** nta = _space_info[i].new_top_addr();
1675 bool result = _summary_data.summarize(_space_info[i].split_info(),
1676 space->bottom(), space->top(), NULL,
1677 space->bottom(), space->end(), nta);
1678 assert(result, "space must fit into itself");
1679 _space_info[i].set_dense_prefix(space->bottom());
1680 }
1681
1682 #ifndef PRODUCT
1683 if (ParallelOldGCSplitALot) {
1684 provoke_split_fill_survivor(to_space_id);
1685 }
1686 #endif // #ifndef PRODUCT
1687 }
1688
fill_dense_prefix_end(SpaceId id)1689 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1690 {
1691 HeapWord* const dense_prefix_end = dense_prefix(id);
1692 const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1693 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1694 if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1695 // Only enough dead space is filled so that any remaining dead space to the
1696 // left is larger than the minimum filler object. (The remainder is filled
1697 // during the copy/update phase.)
1698 //
1699 // The size of the dead space to the right of the boundary is not a
1700 // concern, since compaction will be able to use whatever space is
1701 // available.
1702 //
1703 // Here '||' is the boundary, 'x' represents a don't care bit and a box
1704 // surrounds the space to be filled with an object.
1705 //
1706 // In the 32-bit VM, each bit represents two 32-bit words:
1707 // +---+
1708 // a) beg_bits: ... x x x | 0 | || 0 x x ...
1709 // end_bits: ... x x x | 0 | || 0 x x ...
1710 // +---+
1711 //
1712 // In the 64-bit VM, each bit represents one 64-bit word:
1713 // +------------+
1714 // b) beg_bits: ... x x x | 0 || 0 | x x ...
1715 // end_bits: ... x x 1 | 0 || 0 | x x ...
1716 // +------------+
1717 // +-------+
1718 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
1719 // end_bits: ... x 1 | 0 0 | || 0 x x ...
1720 // +-------+
1721 // +-----------+
1722 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
1723 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
1724 // +-----------+
1725 // +-------+
1726 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
1727 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
1728 // +-------+
1729
1730 // Initially assume case a, c or e will apply.
1731 size_t obj_len = CollectedHeap::min_fill_size();
1732 HeapWord* obj_beg = dense_prefix_end - obj_len;
1733
1734 #ifdef _LP64
1735 if (MinObjAlignment > 1) { // object alignment > heap word size
1736 // Cases a, c or e.
1737 } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1738 // Case b above.
1739 obj_beg = dense_prefix_end - 1;
1740 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1741 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1742 // Case d above.
1743 obj_beg = dense_prefix_end - 3;
1744 obj_len = 3;
1745 }
1746 #endif // #ifdef _LP64
1747
1748 CollectedHeap::fill_with_object(obj_beg, obj_len);
1749 _mark_bitmap.mark_obj(obj_beg, obj_len);
1750 _summary_data.add_obj(obj_beg, obj_len);
1751 assert(start_array(id) != NULL, "sanity");
1752 start_array(id)->allocate_block(obj_beg);
1753 }
1754 }
1755
1756 void
clear_source_region(HeapWord * beg_addr,HeapWord * end_addr)1757 PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
1758 {
1759 RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
1760 HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
1761 RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
1762 for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
1763 cur->set_source_region(0);
1764 }
1765 }
1766
1767 void
summarize_space(SpaceId id,bool maximum_compaction)1768 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1769 {
1770 assert(id < last_space_id, "id out of range");
1771 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom() ||
1772 ParallelOldGCSplitALot && id == old_space_id,
1773 "should have been reset in summarize_spaces_quick()");
1774
1775 const MutableSpace* space = _space_info[id].space();
1776 if (_space_info[id].new_top() != space->bottom()) {
1777 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1778 _space_info[id].set_dense_prefix(dense_prefix_end);
1779
1780 #ifndef PRODUCT
1781 if (TraceParallelOldGCDensePrefix) {
1782 print_dense_prefix_stats("ratio", id, maximum_compaction,
1783 dense_prefix_end);
1784 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1785 print_dense_prefix_stats("density", id, maximum_compaction, addr);
1786 }
1787 #endif // #ifndef PRODUCT
1788
1789 // Recompute the summary data, taking into account the dense prefix. If
1790 // every last byte will be reclaimed, then the existing summary data which
1791 // compacts everything can be left in place.
1792 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1793 // If dead space crosses the dense prefix boundary, it is (at least
1794 // partially) filled with a dummy object, marked live and added to the
1795 // summary data. This simplifies the copy/update phase and must be done
1796 // before the final locations of objects are determined, to prevent
1797 // leaving a fragment of dead space that is too small to fill.
1798 fill_dense_prefix_end(id);
1799
1800 // Compute the destination of each Region, and thus each object.
1801 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1802 _summary_data.summarize(_space_info[id].split_info(),
1803 dense_prefix_end, space->top(), NULL,
1804 dense_prefix_end, space->end(),
1805 _space_info[id].new_top_addr());
1806 }
1807 }
1808
1809 if (TraceParallelOldGCSummaryPhase) {
1810 const size_t region_size = ParallelCompactData::RegionSize;
1811 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1812 const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1813 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1814 HeapWord* const new_top = _space_info[id].new_top();
1815 const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1816 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1817 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1818 "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1819 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1820 id, space->capacity_in_words(), dense_prefix_end,
1821 dp_region, dp_words / region_size,
1822 cr_words / region_size, new_top);
1823 }
1824 }
1825
1826 #ifndef PRODUCT
summary_phase_msg(SpaceId dst_space_id,HeapWord * dst_beg,HeapWord * dst_end,SpaceId src_space_id,HeapWord * src_beg,HeapWord * src_end)1827 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1828 HeapWord* dst_beg, HeapWord* dst_end,
1829 SpaceId src_space_id,
1830 HeapWord* src_beg, HeapWord* src_end)
1831 {
1832 if (TraceParallelOldGCSummaryPhase) {
1833 tty->print_cr("summarizing %d [%s] into %d [%s]: "
1834 "src=" PTR_FORMAT "-" PTR_FORMAT " "
1835 SIZE_FORMAT "-" SIZE_FORMAT " "
1836 "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1837 SIZE_FORMAT "-" SIZE_FORMAT,
1838 src_space_id, space_names[src_space_id],
1839 dst_space_id, space_names[dst_space_id],
1840 src_beg, src_end,
1841 _summary_data.addr_to_region_idx(src_beg),
1842 _summary_data.addr_to_region_idx(src_end),
1843 dst_beg, dst_end,
1844 _summary_data.addr_to_region_idx(dst_beg),
1845 _summary_data.addr_to_region_idx(dst_end));
1846 }
1847 }
1848 #endif // #ifndef PRODUCT
1849
summary_phase(ParCompactionManager * cm,bool maximum_compaction)1850 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1851 bool maximum_compaction)
1852 {
1853 GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
1854 // trace("2");
1855
1856 #ifdef ASSERT
1857 if (TraceParallelOldGCMarkingPhase) {
1858 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1859 "add_obj_bytes=" SIZE_FORMAT,
1860 add_obj_count, add_obj_size * HeapWordSize);
1861 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1862 "mark_bitmap_bytes=" SIZE_FORMAT,
1863 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1864 }
1865 #endif // #ifdef ASSERT
1866
1867 // Quick summarization of each space into itself, to see how much is live.
1868 summarize_spaces_quick();
1869
1870 if (TraceParallelOldGCSummaryPhase) {
1871 tty->print_cr("summary_phase: after summarizing each space to self");
1872 Universe::print();
1873 NOT_PRODUCT(print_region_ranges());
1874 if (Verbose) {
1875 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1876 }
1877 }
1878
1879 // The amount of live data that will end up in old space (assuming it fits).
1880 size_t old_space_total_live = 0;
1881 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1882 old_space_total_live += pointer_delta(_space_info[id].new_top(),
1883 _space_info[id].space()->bottom());
1884 }
1885
1886 MutableSpace* const old_space = _space_info[old_space_id].space();
1887 const size_t old_capacity = old_space->capacity_in_words();
1888 if (old_space_total_live > old_capacity) {
1889 // XXX - should also try to expand
1890 maximum_compaction = true;
1891 }
1892 #ifndef PRODUCT
1893 if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
1894 provoke_split(maximum_compaction);
1895 }
1896 #endif // #ifndef PRODUCT
1897
1898 // Old generations.
1899 summarize_space(old_space_id, maximum_compaction);
1900
1901 // Summarize the remaining spaces in the young gen. The initial target space
1902 // is the old gen. If a space does not fit entirely into the target, then the
1903 // remainder is compacted into the space itself and that space becomes the new
1904 // target.
1905 SpaceId dst_space_id = old_space_id;
1906 HeapWord* dst_space_end = old_space->end();
1907 HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1908 for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1909 const MutableSpace* space = _space_info[id].space();
1910 const size_t live = pointer_delta(_space_info[id].new_top(),
1911 space->bottom());
1912 const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1913
1914 NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1915 SpaceId(id), space->bottom(), space->top());)
1916 if (live > 0 && live <= available) {
1917 // All the live data will fit.
1918 bool done = _summary_data.summarize(_space_info[id].split_info(),
1919 space->bottom(), space->top(),
1920 NULL,
1921 *new_top_addr, dst_space_end,
1922 new_top_addr);
1923 assert(done, "space must fit into old gen");
1924
1925 // Reset the new_top value for the space.
1926 _space_info[id].set_new_top(space->bottom());
1927 } else if (live > 0) {
1928 // Attempt to fit part of the source space into the target space.
1929 HeapWord* next_src_addr = NULL;
1930 bool done = _summary_data.summarize(_space_info[id].split_info(),
1931 space->bottom(), space->top(),
1932 &next_src_addr,
1933 *new_top_addr, dst_space_end,
1934 new_top_addr);
1935 assert(!done, "space should not fit into old gen");
1936 assert(next_src_addr != NULL, "sanity");
1937
1938 // The source space becomes the new target, so the remainder is compacted
1939 // within the space itself.
1940 dst_space_id = SpaceId(id);
1941 dst_space_end = space->end();
1942 new_top_addr = _space_info[id].new_top_addr();
1943 NOT_PRODUCT(summary_phase_msg(dst_space_id,
1944 space->bottom(), dst_space_end,
1945 SpaceId(id), next_src_addr, space->top());)
1946 done = _summary_data.summarize(_space_info[id].split_info(),
1947 next_src_addr, space->top(),
1948 NULL,
1949 space->bottom(), dst_space_end,
1950 new_top_addr);
1951 assert(done, "space must fit when compacted into itself");
1952 assert(*new_top_addr <= space->top(), "usage should not grow");
1953 }
1954 }
1955
1956 if (TraceParallelOldGCSummaryPhase) {
1957 tty->print_cr("summary_phase: after final summarization");
1958 Universe::print();
1959 NOT_PRODUCT(print_region_ranges());
1960 if (Verbose) {
1961 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1962 }
1963 }
1964 }
1965
1966 // This method should contain all heap-specific policy for invoking a full
1967 // collection. invoke_no_policy() will only attempt to compact the heap; it
1968 // will do nothing further. If we need to bail out for policy reasons, scavenge
1969 // before full gc, or any other specialized behavior, it needs to be added here.
1970 //
1971 // Note that this method should only be called from the vm_thread while at a
1972 // safepoint.
1973 //
1974 // Note that the all_soft_refs_clear flag in the collector policy
1975 // may be true because this method can be called without intervening
1976 // activity. For example when the heap space is tight and full measure
1977 // are being taken to free space.
invoke(bool maximum_heap_compaction)1978 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1979 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1980 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1981 "should be in vm thread");
1982
1983 ParallelScavengeHeap* heap = gc_heap();
1984 GCCause::Cause gc_cause = heap->gc_cause();
1985 assert(!heap->is_gc_active(), "not reentrant");
1986
1987 PSAdaptiveSizePolicy* policy = heap->size_policy();
1988 IsGCActiveMark mark;
1989
1990 if (ScavengeBeforeFullGC) {
1991 PSScavenge::invoke_no_policy();
1992 }
1993
1994 const bool clear_all_soft_refs =
1995 heap->collector_policy()->should_clear_all_soft_refs();
1996
1997 PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1998 maximum_heap_compaction);
1999 }
2000
2001 // This method contains no policy. You should probably
2002 // be calling invoke() instead.
invoke_no_policy(bool maximum_heap_compaction)2003 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
2004 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
2005 assert(ref_processor() != NULL, "Sanity");
2006
2007 if (GC_locker::check_active_before_gc()) {
2008 return false;
2009 }
2010
2011 ParallelScavengeHeap* heap = gc_heap();
2012
2013 _gc_timer.register_gc_start();
2014 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
2015
2016 TimeStamp marking_start;
2017 TimeStamp compaction_start;
2018 TimeStamp collection_exit;
2019
2020 GCCause::Cause gc_cause = heap->gc_cause();
2021 PSYoungGen* young_gen = heap->young_gen();
2022 PSOldGen* old_gen = heap->old_gen();
2023 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2024
2025 // The scope of casr should end after code that can change
2026 // CollectorPolicy::_should_clear_all_soft_refs.
2027 ClearedAllSoftRefs casr(maximum_heap_compaction,
2028 heap->collector_policy());
2029
2030 if (ZapUnusedHeapArea) {
2031 // Save information needed to minimize mangling
2032 heap->record_gen_tops_before_GC();
2033 }
2034
2035 heap->pre_full_gc_dump(&_gc_timer);
2036
2037 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
2038
2039 // Make sure data structures are sane, make the heap parsable, and do other
2040 // miscellaneous bookkeeping.
2041 PreGCValues pre_gc_values;
2042 pre_compact(&pre_gc_values);
2043
2044 // Get the compaction manager reserved for the VM thread.
2045 ParCompactionManager* const vmthread_cm =
2046 ParCompactionManager::manager_array(gc_task_manager()->workers());
2047
2048 // Place after pre_compact() where the number of invocations is incremented.
2049 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
2050
2051 {
2052 ResourceMark rm;
2053 HandleMark hm;
2054
2055 // Set the number of GC threads to be used in this collection
2056 gc_task_manager()->set_active_gang();
2057 gc_task_manager()->task_idle_workers();
2058 heap->set_par_threads(gc_task_manager()->active_workers());
2059
2060 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2061 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
2062 TraceCollectorStats tcs(counters());
2063 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
2064
2065 if (TraceGen1Time) accumulated_time()->start();
2066
2067 // Let the size policy know we're starting
2068 size_policy->major_collection_begin();
2069
2070 CodeCache::gc_prologue();
2071 Threads::gc_prologue();
2072
2073 COMPILER2_PRESENT(DerivedPointerTable::clear());
2074
2075 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
2076 ref_processor()->setup_policy(maximum_heap_compaction);
2077
2078 bool marked_for_unloading = false;
2079
2080 marking_start.update();
2081 marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
2082
2083 bool max_on_system_gc = UseMaximumCompactionOnSystemGC
2084 && gc_cause == GCCause::_java_lang_system_gc;
2085 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
2086
2087 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
2088 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
2089
2090 // adjust_roots() updates Universe::_intArrayKlassObj which is
2091 // needed by the compaction for filling holes in the dense prefix.
2092 adjust_roots();
2093
2094 compaction_start.update();
2095 compact();
2096
2097 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
2098 // done before resizing.
2099 post_compact();
2100
2101 // Let the size policy know we're done
2102 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
2103
2104 if (UseAdaptiveSizePolicy) {
2105 if (PrintAdaptiveSizePolicy) {
2106 gclog_or_tty->print("AdaptiveSizeStart: ");
2107 gclog_or_tty->stamp();
2108 gclog_or_tty->print_cr(" collection: %d ",
2109 heap->total_collections());
2110 if (Verbose) {
2111 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
2112 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
2113 }
2114 }
2115
2116 // Don't check if the size_policy is ready here. Let
2117 // the size_policy check that internally.
2118 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
2119 ((gc_cause != GCCause::_java_lang_system_gc) ||
2120 UseAdaptiveSizePolicyWithSystemGC)) {
2121 // Calculate optimal free space amounts
2122 assert(young_gen->max_size() >
2123 young_gen->from_space()->capacity_in_bytes() +
2124 young_gen->to_space()->capacity_in_bytes(),
2125 "Sizes of space in young gen are out-of-bounds");
2126
2127 size_t young_live = young_gen->used_in_bytes();
2128 size_t eden_live = young_gen->eden_space()->used_in_bytes();
2129 size_t old_live = old_gen->used_in_bytes();
2130 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
2131 size_t max_old_gen_size = old_gen->max_gen_size();
2132 size_t max_eden_size = young_gen->max_size() -
2133 young_gen->from_space()->capacity_in_bytes() -
2134 young_gen->to_space()->capacity_in_bytes();
2135
2136 // Used for diagnostics
2137 size_policy->clear_generation_free_space_flags();
2138
2139 size_policy->compute_generations_free_space(young_live,
2140 eden_live,
2141 old_live,
2142 cur_eden,
2143 max_old_gen_size,
2144 max_eden_size,
2145 true /* full gc*/);
2146
2147 size_policy->check_gc_overhead_limit(young_live,
2148 eden_live,
2149 max_old_gen_size,
2150 max_eden_size,
2151 true /* full gc*/,
2152 gc_cause,
2153 heap->collector_policy());
2154
2155 size_policy->decay_supplemental_growth(true /* full gc*/);
2156
2157 heap->resize_old_gen(
2158 size_policy->calculated_old_free_size_in_bytes());
2159
2160 // Don't resize the young generation at an major collection. A
2161 // desired young generation size may have been calculated but
2162 // resizing the young generation complicates the code because the
2163 // resizing of the old generation may have moved the boundary
2164 // between the young generation and the old generation. Let the
2165 // young generation resizing happen at the minor collections.
2166 }
2167 if (PrintAdaptiveSizePolicy) {
2168 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
2169 heap->total_collections());
2170 }
2171 }
2172
2173 if (UsePerfData) {
2174 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
2175 counters->update_counters();
2176 counters->update_old_capacity(old_gen->capacity_in_bytes());
2177 counters->update_young_capacity(young_gen->capacity_in_bytes());
2178 }
2179
2180 heap->resize_all_tlabs();
2181
2182 // Resize the metaspace capactiy after a collection
2183 MetaspaceGC::compute_new_size();
2184
2185 if (TraceGen1Time) accumulated_time()->stop();
2186
2187 if (PrintGC) {
2188 if (PrintGCDetails) {
2189 // No GC timestamp here. This is after GC so it would be confusing.
2190 young_gen->print_used_change(pre_gc_values.young_gen_used());
2191 old_gen->print_used_change(pre_gc_values.old_gen_used());
2192 heap->print_heap_change(pre_gc_values.heap_used());
2193 MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
2194 } else {
2195 heap->print_heap_change(pre_gc_values.heap_used());
2196 }
2197 }
2198
2199 // Track memory usage and detect low memory
2200 MemoryService::track_memory_usage();
2201 heap->update_counters();
2202 gc_task_manager()->release_idle_workers();
2203 }
2204
2205 #ifdef ASSERT
2206 for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
2207 ParCompactionManager* const cm =
2208 ParCompactionManager::manager_array(int(i));
2209 assert(cm->marking_stack()->is_empty(), "should be empty");
2210 assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
2211 }
2212 #endif // ASSERT
2213
2214 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
2215 HandleMark hm; // Discard invalid handles created during verification
2216 Universe::verify(" VerifyAfterGC:");
2217 }
2218
2219 // Re-verify object start arrays
2220 if (VerifyObjectStartArray &&
2221 VerifyAfterGC) {
2222 old_gen->verify_object_start_array();
2223 }
2224
2225 if (ZapUnusedHeapArea) {
2226 old_gen->object_space()->check_mangled_unused_area_complete();
2227 }
2228
2229 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2230
2231 collection_exit.update();
2232
2233 heap->print_heap_after_gc();
2234 heap->trace_heap_after_gc(&_gc_tracer);
2235
2236 if (PrintGCTaskTimeStamps) {
2237 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2238 INT64_FORMAT,
2239 marking_start.ticks(), compaction_start.ticks(),
2240 collection_exit.ticks());
2241 gc_task_manager()->print_task_time_stamps();
2242 }
2243
2244 heap->post_full_gc_dump(&_gc_timer);
2245
2246 #ifdef TRACESPINNING
2247 ParallelTaskTerminator::print_termination_counts();
2248 #endif
2249
2250 _gc_timer.register_gc_end();
2251
2252 _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
2253 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
2254
2255 return true;
2256 }
2257
absorb_live_data_from_eden(PSAdaptiveSizePolicy * size_policy,PSYoungGen * young_gen,PSOldGen * old_gen)2258 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2259 PSYoungGen* young_gen,
2260 PSOldGen* old_gen) {
2261 MutableSpace* const eden_space = young_gen->eden_space();
2262 assert(!eden_space->is_empty(), "eden must be non-empty");
2263 assert(young_gen->virtual_space()->alignment() ==
2264 old_gen->virtual_space()->alignment(), "alignments do not match");
2265
2266 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2267 return false;
2268 }
2269
2270 // Both generations must be completely committed.
2271 if (young_gen->virtual_space()->uncommitted_size() != 0) {
2272 return false;
2273 }
2274 if (old_gen->virtual_space()->uncommitted_size() != 0) {
2275 return false;
2276 }
2277
2278 // Figure out how much to take from eden. Include the average amount promoted
2279 // in the total; otherwise the next young gen GC will simply bail out to a
2280 // full GC.
2281 const size_t alignment = old_gen->virtual_space()->alignment();
2282 const size_t eden_used = eden_space->used_in_bytes();
2283 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
2284 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
2285 const size_t eden_capacity = eden_space->capacity_in_bytes();
2286
2287 if (absorb_size >= eden_capacity) {
2288 return false; // Must leave some space in eden.
2289 }
2290
2291 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
2292 if (new_young_size < young_gen->min_gen_size()) {
2293 return false; // Respect young gen minimum size.
2294 }
2295
2296 if (TraceAdaptiveGCBoundary && Verbose) {
2297 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
2298 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2299 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2300 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2301 absorb_size / K,
2302 eden_capacity / K, (eden_capacity - absorb_size) / K,
2303 young_gen->from_space()->used_in_bytes() / K,
2304 young_gen->to_space()->used_in_bytes() / K,
2305 young_gen->capacity_in_bytes() / K, new_young_size / K);
2306 }
2307
2308 // Fill the unused part of the old gen.
2309 MutableSpace* const old_space = old_gen->object_space();
2310 HeapWord* const unused_start = old_space->top();
2311 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2312
2313 if (unused_words > 0) {
2314 if (unused_words < CollectedHeap::min_fill_size()) {
2315 return false; // If the old gen cannot be filled, must give up.
2316 }
2317 CollectedHeap::fill_with_objects(unused_start, unused_words);
2318 }
2319
2320 // Take the live data from eden and set both top and end in the old gen to
2321 // eden top. (Need to set end because reset_after_change() mangles the region
2322 // from end to virtual_space->high() in debug builds).
2323 HeapWord* const new_top = eden_space->top();
2324 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2325 absorb_size);
2326 young_gen->reset_after_change();
2327 old_space->set_top(new_top);
2328 old_space->set_end(new_top);
2329 old_gen->reset_after_change();
2330
2331 // Update the object start array for the filler object and the data from eden.
2332 ObjectStartArray* const start_array = old_gen->start_array();
2333 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2334 start_array->allocate_block(p);
2335 }
2336
2337 // Could update the promoted average here, but it is not typically updated at
2338 // full GCs and the value to use is unclear. Something like
2339 //
2340 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2341
2342 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2343 return true;
2344 }
2345
gc_task_manager()2346 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2347 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2348 "shouldn't return NULL");
2349 return ParallelScavengeHeap::gc_task_manager();
2350 }
2351
marking_phase(ParCompactionManager * cm,bool maximum_heap_compaction,ParallelOldTracer * gc_tracer)2352 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2353 bool maximum_heap_compaction,
2354 ParallelOldTracer *gc_tracer) {
2355 // Recursively traverse all live objects and mark them
2356 GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2357
2358 ParallelScavengeHeap* heap = gc_heap();
2359 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2360 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2361 TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
2362 ParallelTaskTerminator terminator(active_gc_threads, qset);
2363
2364 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2365 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2366
2367 // Need new claim bits before marking starts.
2368 ClassLoaderDataGraph::clear_claimed_marks();
2369
2370 {
2371 GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2372
2373 ParallelScavengeHeap::ParStrongRootsScope psrs;
2374
2375 GCTaskQueue* q = GCTaskQueue::create();
2376
2377 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2378 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2379 // We scan the thread roots in parallel
2380 Threads::create_thread_roots_marking_tasks(q);
2381 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2382 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2383 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2384 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2385 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2386 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2387 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2388
2389 if (active_gc_threads > 1) {
2390 for (uint j = 0; j < active_gc_threads; j++) {
2391 q->enqueue(new StealMarkingTask(&terminator));
2392 }
2393 }
2394
2395 gc_task_manager()->execute_and_wait(q);
2396 }
2397
2398 // Process reference objects found during marking
2399 {
2400 GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2401
2402 ReferenceProcessorStats stats;
2403 if (ref_processor()->processing_is_mt()) {
2404 RefProcTaskExecutor task_executor;
2405 stats = ref_processor()->process_discovered_references(
2406 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2407 &task_executor, &_gc_timer, _gc_tracer.gc_id());
2408 } else {
2409 stats = ref_processor()->process_discovered_references(
2410 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2411 &_gc_timer, _gc_tracer.gc_id());
2412 }
2413
2414 gc_tracer->report_gc_reference_stats(stats);
2415 }
2416
2417 GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2418
2419 // This is the point where the entire marking should have completed.
2420 assert(cm->marking_stacks_empty(), "Marking should have completed");
2421
2422 // Follow system dictionary roots and unload classes.
2423 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2424
2425 // Unload nmethods.
2426 CodeCache::do_unloading(is_alive_closure(), purged_class);
2427
2428 // Prune dead klasses from subklass/sibling/implementor lists.
2429 Klass::clean_weak_klass_links(is_alive_closure());
2430
2431 // Delete entries for dead interned strings.
2432 StringTable::unlink(is_alive_closure());
2433
2434 // Clean up unreferenced symbols in symbol table.
2435 SymbolTable::unlink();
2436 _gc_tracer.report_object_count_after_gc(is_alive_closure());
2437 }
2438
follow_class_loader(ParCompactionManager * cm,ClassLoaderData * cld)2439 void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
2440 ClassLoaderData* cld) {
2441 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2442 PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
2443
2444 cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
2445 }
2446
adjust_roots()2447 void PSParallelCompact::adjust_roots() {
2448 // Adjust the pointers to reflect the new locations
2449 GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2450
2451 // Need new claim bits when tracing through and adjusting pointers.
2452 ClassLoaderDataGraph::clear_claimed_marks();
2453
2454 // General strong roots.
2455 Universe::oops_do(adjust_pointer_closure());
2456 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
2457 CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2458 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2459 ObjectSynchronizer::oops_do(adjust_pointer_closure());
2460 FlatProfiler::oops_do(adjust_pointer_closure());
2461 Management::oops_do(adjust_pointer_closure());
2462 JvmtiExport::oops_do(adjust_pointer_closure());
2463 SystemDictionary::oops_do(adjust_pointer_closure());
2464 ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2465
2466 // Now adjust pointers in remaining weak roots. (All of which should
2467 // have been cleared if they pointed to non-surviving objects.)
2468 // Global (weak) JNI handles
2469 JNIHandles::weak_oops_do(adjust_pointer_closure());
2470 JFR_ONLY(Jfr::weak_oops_do(adjust_pointer_closure()));
2471
2472 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
2473 CodeCache::blobs_do(&adjust_from_blobs);
2474 StringTable::oops_do(adjust_pointer_closure());
2475 ref_processor()->weak_oops_do(adjust_pointer_closure());
2476 // Roots were visited so references into the young gen in roots
2477 // may have been scanned. Process them also.
2478 // Should the reference processor have a span that excludes
2479 // young gen objects?
2480 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2481 }
2482
enqueue_region_draining_tasks(GCTaskQueue * q,uint parallel_gc_threads)2483 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2484 uint parallel_gc_threads)
2485 {
2486 GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2487
2488 // Find the threads that are active
2489 unsigned int which = 0;
2490
2491 const uint task_count = MAX2(parallel_gc_threads, 1U);
2492 for (uint j = 0; j < task_count; j++) {
2493 q->enqueue(new DrainStacksCompactionTask(j));
2494 ParCompactionManager::verify_region_list_empty(j);
2495 // Set the region stacks variables to "no" region stack values
2496 // so that they will be recognized and needing a region stack
2497 // in the stealing tasks if they do not get one by executing
2498 // a draining stack.
2499 ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2500 cm->set_region_stack(NULL);
2501 cm->set_region_stack_index((uint)max_uintx);
2502 }
2503 ParCompactionManager::reset_recycled_stack_index();
2504
2505 // Find all regions that are available (can be filled immediately) and
2506 // distribute them to the thread stacks. The iteration is done in reverse
2507 // order (high to low) so the regions will be removed in ascending order.
2508
2509 const ParallelCompactData& sd = PSParallelCompact::summary_data();
2510
2511 size_t fillable_regions = 0; // A count for diagnostic purposes.
2512 // A region index which corresponds to the tasks created above.
2513 // "which" must be 0 <= which < task_count
2514
2515 which = 0;
2516 // id + 1 is used to test termination so unsigned can
2517 // be used with an old_space_id == 0.
2518 for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2519 SpaceInfo* const space_info = _space_info + id;
2520 MutableSpace* const space = space_info->space();
2521 HeapWord* const new_top = space_info->new_top();
2522
2523 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2524 const size_t end_region =
2525 sd.addr_to_region_idx(sd.region_align_up(new_top));
2526
2527 for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2528 if (sd.region(cur)->claim_unsafe()) {
2529 ParCompactionManager::region_list_push(which, cur);
2530
2531 if (TraceParallelOldGCCompactionPhase && Verbose) {
2532 const size_t count_mod_8 = fillable_regions & 7;
2533 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2534 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
2535 if (count_mod_8 == 7) gclog_or_tty->cr();
2536 }
2537
2538 NOT_PRODUCT(++fillable_regions;)
2539
2540 // Assign regions to tasks in round-robin fashion.
2541 if (++which == task_count) {
2542 assert(which <= parallel_gc_threads,
2543 "Inconsistent number of workers");
2544 which = 0;
2545 }
2546 }
2547 }
2548 }
2549
2550 if (TraceParallelOldGCCompactionPhase) {
2551 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2552 gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
2553 }
2554 }
2555
2556 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2557
enqueue_dense_prefix_tasks(GCTaskQueue * q,uint parallel_gc_threads)2558 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2559 uint parallel_gc_threads) {
2560 GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2561
2562 ParallelCompactData& sd = PSParallelCompact::summary_data();
2563
2564 // Iterate over all the spaces adding tasks for updating
2565 // regions in the dense prefix. Assume that 1 gc thread
2566 // will work on opening the gaps and the remaining gc threads
2567 // will work on the dense prefix.
2568 unsigned int space_id;
2569 for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2570 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2571 const MutableSpace* const space = _space_info[space_id].space();
2572
2573 if (dense_prefix_end == space->bottom()) {
2574 // There is no dense prefix for this space.
2575 continue;
2576 }
2577
2578 // The dense prefix is before this region.
2579 size_t region_index_end_dense_prefix =
2580 sd.addr_to_region_idx(dense_prefix_end);
2581 RegionData* const dense_prefix_cp =
2582 sd.region(region_index_end_dense_prefix);
2583 assert(dense_prefix_end == space->end() ||
2584 dense_prefix_cp->available() ||
2585 dense_prefix_cp->claimed(),
2586 "The region after the dense prefix should always be ready to fill");
2587
2588 size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2589
2590 // Is there dense prefix work?
2591 size_t total_dense_prefix_regions =
2592 region_index_end_dense_prefix - region_index_start;
2593 // How many regions of the dense prefix should be given to
2594 // each thread?
2595 if (total_dense_prefix_regions > 0) {
2596 uint tasks_for_dense_prefix = 1;
2597 if (total_dense_prefix_regions <=
2598 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2599 // Don't over partition. This assumes that
2600 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2601 // so there are not many regions to process.
2602 tasks_for_dense_prefix = parallel_gc_threads;
2603 } else {
2604 // Over partition
2605 tasks_for_dense_prefix = parallel_gc_threads *
2606 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2607 }
2608 size_t regions_per_thread = total_dense_prefix_regions /
2609 tasks_for_dense_prefix;
2610 // Give each thread at least 1 region.
2611 if (regions_per_thread == 0) {
2612 regions_per_thread = 1;
2613 }
2614
2615 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2616 if (region_index_start >= region_index_end_dense_prefix) {
2617 break;
2618 }
2619 // region_index_end is not processed
2620 size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2621 region_index_end_dense_prefix);
2622 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2623 region_index_start,
2624 region_index_end));
2625 region_index_start = region_index_end;
2626 }
2627 }
2628 // This gets any part of the dense prefix that did not
2629 // fit evenly.
2630 if (region_index_start < region_index_end_dense_prefix) {
2631 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2632 region_index_start,
2633 region_index_end_dense_prefix));
2634 }
2635 }
2636 }
2637
enqueue_region_stealing_tasks(GCTaskQueue * q,ParallelTaskTerminator * terminator_ptr,uint parallel_gc_threads)2638 void PSParallelCompact::enqueue_region_stealing_tasks(
2639 GCTaskQueue* q,
2640 ParallelTaskTerminator* terminator_ptr,
2641 uint parallel_gc_threads) {
2642 GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2643
2644 // Once a thread has drained it's stack, it should try to steal regions from
2645 // other threads.
2646 if (parallel_gc_threads > 1) {
2647 for (uint j = 0; j < parallel_gc_threads; j++) {
2648 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2649 }
2650 }
2651 }
2652
2653 #ifdef ASSERT
2654 // Write a histogram of the number of times the block table was filled for a
2655 // region.
write_block_fill_histogram(outputStream * const out)2656 void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
2657 {
2658 if (!TraceParallelOldGCCompactionPhase) return;
2659
2660 typedef ParallelCompactData::RegionData rd_t;
2661 ParallelCompactData& sd = summary_data();
2662
2663 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2664 MutableSpace* const spc = _space_info[id].space();
2665 if (spc->bottom() != spc->top()) {
2666 const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2667 HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2668 const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2669
2670 size_t histo[5] = { 0, 0, 0, 0, 0 };
2671 const size_t histo_len = sizeof(histo) / sizeof(size_t);
2672 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2673
2674 for (const rd_t* cur = beg; cur < end; ++cur) {
2675 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2676 }
2677 out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2678 for (size_t i = 0; i < histo_len; ++i) {
2679 out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2680 histo[i], 100.0 * histo[i] / region_cnt);
2681 }
2682 out->cr();
2683 }
2684 }
2685 }
2686 #endif // #ifdef ASSERT
2687
compact()2688 void PSParallelCompact::compact() {
2689 // trace("5");
2690 GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2691
2692 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2693 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2694 PSOldGen* old_gen = heap->old_gen();
2695 old_gen->start_array()->reset();
2696 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2697 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2698 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2699 ParallelTaskTerminator terminator(active_gc_threads, qset);
2700
2701 GCTaskQueue* q = GCTaskQueue::create();
2702 enqueue_region_draining_tasks(q, active_gc_threads);
2703 enqueue_dense_prefix_tasks(q, active_gc_threads);
2704 enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2705
2706 {
2707 GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2708
2709 gc_task_manager()->execute_and_wait(q);
2710
2711 #ifdef ASSERT
2712 // Verify that all regions have been processed before the deferred updates.
2713 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2714 verify_complete(SpaceId(id));
2715 }
2716 #endif
2717 }
2718
2719 {
2720 // Update the deferred objects, if any. Any compaction manager can be used.
2721 GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2722 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2723 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2724 update_deferred_objects(cm, SpaceId(id));
2725 }
2726 }
2727
2728 DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
2729 }
2730
2731 #ifdef ASSERT
verify_complete(SpaceId space_id)2732 void PSParallelCompact::verify_complete(SpaceId space_id) {
2733 // All Regions between space bottom() to new_top() should be marked as filled
2734 // and all Regions between new_top() and top() should be available (i.e.,
2735 // should have been emptied).
2736 ParallelCompactData& sd = summary_data();
2737 SpaceInfo si = _space_info[space_id];
2738 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2739 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2740 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2741 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2742 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2743
2744 bool issued_a_warning = false;
2745
2746 size_t cur_region;
2747 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2748 const RegionData* const c = sd.region(cur_region);
2749 if (!c->completed()) {
2750 warning("region " SIZE_FORMAT " not filled: "
2751 "destination_count=" SIZE_FORMAT,
2752 cur_region, c->destination_count());
2753 issued_a_warning = true;
2754 }
2755 }
2756
2757 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2758 const RegionData* const c = sd.region(cur_region);
2759 if (!c->available()) {
2760 warning("region " SIZE_FORMAT " not empty: "
2761 "destination_count=" SIZE_FORMAT,
2762 cur_region, c->destination_count());
2763 issued_a_warning = true;
2764 }
2765 }
2766
2767 if (issued_a_warning) {
2768 print_region_ranges();
2769 }
2770 }
2771 #endif // #ifdef ASSERT
2772
2773 // Update interior oops in the ranges of regions [beg_region, end_region).
2774 void
update_and_deadwood_in_dense_prefix(ParCompactionManager * cm,SpaceId space_id,size_t beg_region,size_t end_region)2775 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2776 SpaceId space_id,
2777 size_t beg_region,
2778 size_t end_region) {
2779 ParallelCompactData& sd = summary_data();
2780 ParMarkBitMap* const mbm = mark_bitmap();
2781
2782 HeapWord* beg_addr = sd.region_to_addr(beg_region);
2783 HeapWord* const end_addr = sd.region_to_addr(end_region);
2784 assert(beg_region <= end_region, "bad region range");
2785 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2786
2787 #ifdef ASSERT
2788 // Claim the regions to avoid triggering an assert when they are marked as
2789 // filled.
2790 for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2791 assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2792 }
2793 #endif // #ifdef ASSERT
2794
2795 if (beg_addr != space(space_id)->bottom()) {
2796 // Find the first live object or block of dead space that *starts* in this
2797 // range of regions. If a partial object crosses onto the region, skip it;
2798 // it will be marked for 'deferred update' when the object head is
2799 // processed. If dead space crosses onto the region, it is also skipped; it
2800 // will be filled when the prior region is processed. If neither of those
2801 // apply, the first word in the region is the start of a live object or dead
2802 // space.
2803 assert(beg_addr > space(space_id)->bottom(), "sanity");
2804 const RegionData* const cp = sd.region(beg_region);
2805 if (cp->partial_obj_size() != 0) {
2806 beg_addr = sd.partial_obj_end(beg_region);
2807 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2808 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2809 }
2810 }
2811
2812 if (beg_addr < end_addr) {
2813 // A live object or block of dead space starts in this range of Regions.
2814 HeapWord* const dense_prefix_end = dense_prefix(space_id);
2815
2816 // Create closures and iterate.
2817 UpdateOnlyClosure update_closure(mbm, cm, space_id);
2818 FillClosure fill_closure(cm, space_id);
2819 ParMarkBitMap::IterationStatus status;
2820 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2821 dense_prefix_end);
2822 if (status == ParMarkBitMap::incomplete) {
2823 update_closure.do_addr(update_closure.source());
2824 }
2825 }
2826
2827 // Mark the regions as filled.
2828 RegionData* const beg_cp = sd.region(beg_region);
2829 RegionData* const end_cp = sd.region(end_region);
2830 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2831 cp->set_completed();
2832 }
2833 }
2834
2835 // Return the SpaceId for the space containing addr. If addr is not in the
2836 // heap, last_space_id is returned. In debug mode it expects the address to be
2837 // in the heap and asserts such.
space_id(HeapWord * addr)2838 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2839 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
2840
2841 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2842 if (_space_info[id].space()->contains(addr)) {
2843 return SpaceId(id);
2844 }
2845 }
2846
2847 assert(false, "no space contains the addr");
2848 return last_space_id;
2849 }
2850
update_deferred_objects(ParCompactionManager * cm,SpaceId id)2851 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2852 SpaceId id) {
2853 assert(id < last_space_id, "bad space id");
2854
2855 ParallelCompactData& sd = summary_data();
2856 const SpaceInfo* const space_info = _space_info + id;
2857 ObjectStartArray* const start_array = space_info->start_array();
2858
2859 const MutableSpace* const space = space_info->space();
2860 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2861 HeapWord* const beg_addr = space_info->dense_prefix();
2862 HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2863
2864 const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2865 const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2866 const RegionData* cur_region;
2867 for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2868 HeapWord* const addr = cur_region->deferred_obj_addr();
2869 if (addr != NULL) {
2870 if (start_array != NULL) {
2871 start_array->allocate_block(addr);
2872 }
2873 oop(addr)->update_contents(cm);
2874 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
2875 }
2876 }
2877 }
2878
2879 // Skip over count live words starting from beg, and return the address of the
2880 // next live word. Unless marked, the word corresponding to beg is assumed to
2881 // be dead. Callers must either ensure beg does not correspond to the middle of
2882 // an object, or account for those live words in some other way. Callers must
2883 // also ensure that there are enough live words in the range [beg, end) to skip.
2884 HeapWord*
skip_live_words(HeapWord * beg,HeapWord * end,size_t count)2885 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2886 {
2887 assert(count > 0, "sanity");
2888
2889 ParMarkBitMap* m = mark_bitmap();
2890 idx_t bits_to_skip = m->words_to_bits(count);
2891 idx_t cur_beg = m->addr_to_bit(beg);
2892 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
2893
2894 do {
2895 cur_beg = m->find_obj_beg(cur_beg, search_end);
2896 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2897 const size_t obj_bits = cur_end - cur_beg + 1;
2898 if (obj_bits > bits_to_skip) {
2899 return m->bit_to_addr(cur_beg + bits_to_skip);
2900 }
2901 bits_to_skip -= obj_bits;
2902 cur_beg = cur_end + 1;
2903 } while (bits_to_skip > 0);
2904
2905 // Skipping the desired number of words landed just past the end of an object.
2906 // Find the start of the next object.
2907 cur_beg = m->find_obj_beg(cur_beg, search_end);
2908 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2909 return m->bit_to_addr(cur_beg);
2910 }
2911
first_src_addr(HeapWord * const dest_addr,SpaceId src_space_id,size_t src_region_idx)2912 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2913 SpaceId src_space_id,
2914 size_t src_region_idx)
2915 {
2916 assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2917
2918 const SplitInfo& split_info = _space_info[src_space_id].split_info();
2919 if (split_info.dest_region_addr() == dest_addr) {
2920 // The partial object ending at the split point contains the first word to
2921 // be copied to dest_addr.
2922 return split_info.first_src_addr();
2923 }
2924
2925 const ParallelCompactData& sd = summary_data();
2926 ParMarkBitMap* const bitmap = mark_bitmap();
2927 const size_t RegionSize = ParallelCompactData::RegionSize;
2928
2929 assert(sd.is_region_aligned(dest_addr), "not aligned");
2930 const RegionData* const src_region_ptr = sd.region(src_region_idx);
2931 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2932 HeapWord* const src_region_destination = src_region_ptr->destination();
2933
2934 assert(dest_addr >= src_region_destination, "wrong src region");
2935 assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2936
2937 HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2938 HeapWord* const src_region_end = src_region_beg + RegionSize;
2939
2940 HeapWord* addr = src_region_beg;
2941 if (dest_addr == src_region_destination) {
2942 // Return the first live word in the source region.
2943 if (partial_obj_size == 0) {
2944 addr = bitmap->find_obj_beg(addr, src_region_end);
2945 assert(addr < src_region_end, "no objects start in src region");
2946 }
2947 return addr;
2948 }
2949
2950 // Must skip some live data.
2951 size_t words_to_skip = dest_addr - src_region_destination;
2952 assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2953
2954 if (partial_obj_size >= words_to_skip) {
2955 // All the live words to skip are part of the partial object.
2956 addr += words_to_skip;
2957 if (partial_obj_size == words_to_skip) {
2958 // Find the first live word past the partial object.
2959 addr = bitmap->find_obj_beg(addr, src_region_end);
2960 assert(addr < src_region_end, "wrong src region");
2961 }
2962 return addr;
2963 }
2964
2965 // Skip over the partial object (if any).
2966 if (partial_obj_size != 0) {
2967 words_to_skip -= partial_obj_size;
2968 addr += partial_obj_size;
2969 }
2970
2971 // Skip over live words due to objects that start in the region.
2972 addr = skip_live_words(addr, src_region_end, words_to_skip);
2973 assert(addr < src_region_end, "wrong src region");
2974 return addr;
2975 }
2976
decrement_destination_counts(ParCompactionManager * cm,SpaceId src_space_id,size_t beg_region,HeapWord * end_addr)2977 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2978 SpaceId src_space_id,
2979 size_t beg_region,
2980 HeapWord* end_addr)
2981 {
2982 ParallelCompactData& sd = summary_data();
2983
2984 #ifdef ASSERT
2985 MutableSpace* const src_space = _space_info[src_space_id].space();
2986 HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2987 assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2988 "src_space_id does not match beg_addr");
2989 assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2990 "src_space_id does not match end_addr");
2991 #endif // #ifdef ASSERT
2992
2993 RegionData* const beg = sd.region(beg_region);
2994 RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2995
2996 // Regions up to new_top() are enqueued if they become available.
2997 HeapWord* const new_top = _space_info[src_space_id].new_top();
2998 RegionData* const enqueue_end =
2999 sd.addr_to_region_ptr(sd.region_align_up(new_top));
3000
3001 for (RegionData* cur = beg; cur < end; ++cur) {
3002 assert(cur->data_size() > 0, "region must have live data");
3003 cur->decrement_destination_count();
3004 if (cur < enqueue_end && cur->available() && cur->claim()) {
3005 cm->push_region(sd.region(cur));
3006 }
3007 }
3008 }
3009
next_src_region(MoveAndUpdateClosure & closure,SpaceId & src_space_id,HeapWord * & src_space_top,HeapWord * end_addr)3010 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
3011 SpaceId& src_space_id,
3012 HeapWord*& src_space_top,
3013 HeapWord* end_addr)
3014 {
3015 typedef ParallelCompactData::RegionData RegionData;
3016
3017 ParallelCompactData& sd = PSParallelCompact::summary_data();
3018 const size_t region_size = ParallelCompactData::RegionSize;
3019
3020 size_t src_region_idx = 0;
3021
3022 // Skip empty regions (if any) up to the top of the space.
3023 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
3024 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
3025 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
3026 const RegionData* const top_region_ptr =
3027 sd.addr_to_region_ptr(top_aligned_up);
3028 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
3029 ++src_region_ptr;
3030 }
3031
3032 if (src_region_ptr < top_region_ptr) {
3033 // The next source region is in the current space. Update src_region_idx
3034 // and the source address to match src_region_ptr.
3035 src_region_idx = sd.region(src_region_ptr);
3036 HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
3037 if (src_region_addr > closure.source()) {
3038 closure.set_source(src_region_addr);
3039 }
3040 return src_region_idx;
3041 }
3042
3043 // Switch to a new source space and find the first non-empty region.
3044 unsigned int space_id = src_space_id + 1;
3045 assert(space_id < last_space_id, "not enough spaces");
3046
3047 HeapWord* const destination = closure.destination();
3048
3049 do {
3050 MutableSpace* space = _space_info[space_id].space();
3051 HeapWord* const bottom = space->bottom();
3052 const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
3053
3054 // Iterate over the spaces that do not compact into themselves.
3055 if (bottom_cp->destination() != bottom) {
3056 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
3057 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
3058
3059 for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
3060 if (src_cp->live_obj_size() > 0) {
3061 // Found it.
3062 assert(src_cp->destination() == destination,
3063 "first live obj in the space must match the destination");
3064 assert(src_cp->partial_obj_size() == 0,
3065 "a space cannot begin with a partial obj");
3066
3067 src_space_id = SpaceId(space_id);
3068 src_space_top = space->top();
3069 const size_t src_region_idx = sd.region(src_cp);
3070 closure.set_source(sd.region_to_addr(src_region_idx));
3071 return src_region_idx;
3072 } else {
3073 assert(src_cp->data_size() == 0, "sanity");
3074 }
3075 }
3076 }
3077 } while (++space_id < last_space_id);
3078
3079 assert(false, "no source region was found");
3080 return 0;
3081 }
3082
fill_region(ParCompactionManager * cm,size_t region_idx)3083 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
3084 {
3085 typedef ParMarkBitMap::IterationStatus IterationStatus;
3086 const size_t RegionSize = ParallelCompactData::RegionSize;
3087 ParMarkBitMap* const bitmap = mark_bitmap();
3088 ParallelCompactData& sd = summary_data();
3089 RegionData* const region_ptr = sd.region(region_idx);
3090
3091 // Get the items needed to construct the closure.
3092 HeapWord* dest_addr = sd.region_to_addr(region_idx);
3093 SpaceId dest_space_id = space_id(dest_addr);
3094 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
3095 HeapWord* new_top = _space_info[dest_space_id].new_top();
3096 assert(dest_addr < new_top, "sanity");
3097 const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
3098
3099 // Get the source region and related info.
3100 size_t src_region_idx = region_ptr->source_region();
3101 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
3102 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
3103
3104 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3105 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
3106
3107 // Adjust src_region_idx to prepare for decrementing destination counts (the
3108 // destination count is not decremented when a region is copied to itself).
3109 if (src_region_idx == region_idx) {
3110 src_region_idx += 1;
3111 }
3112
3113 if (bitmap->is_unmarked(closure.source())) {
3114 // The first source word is in the middle of an object; copy the remainder
3115 // of the object or as much as will fit. The fact that pointer updates were
3116 // deferred will be noted when the object header is processed.
3117 HeapWord* const old_src_addr = closure.source();
3118 closure.copy_partial_obj();
3119 if (closure.is_full()) {
3120 decrement_destination_counts(cm, src_space_id, src_region_idx,
3121 closure.source());
3122 region_ptr->set_deferred_obj_addr(NULL);
3123 region_ptr->set_completed();
3124 return;
3125 }
3126
3127 HeapWord* const end_addr = sd.region_align_down(closure.source());
3128 if (sd.region_align_down(old_src_addr) != end_addr) {
3129 // The partial object was copied from more than one source region.
3130 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
3131
3132 // Move to the next source region, possibly switching spaces as well. All
3133 // args except end_addr may be modified.
3134 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3135 end_addr);
3136 }
3137 }
3138
3139 do {
3140 HeapWord* const cur_addr = closure.source();
3141 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
3142 src_space_top);
3143 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
3144
3145 if (status == ParMarkBitMap::incomplete) {
3146 // The last obj that starts in the source region does not end in the
3147 // region.
3148 assert(closure.source() < end_addr, "sanity");
3149 HeapWord* const obj_beg = closure.source();
3150 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
3151 src_space_top);
3152 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
3153 if (obj_end < range_end) {
3154 // The end was found; the entire object will fit.
3155 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
3156 assert(status != ParMarkBitMap::would_overflow, "sanity");
3157 } else {
3158 // The end was not found; the object will not fit.
3159 assert(range_end < src_space_top, "obj cannot cross space boundary");
3160 status = ParMarkBitMap::would_overflow;
3161 }
3162 }
3163
3164 if (status == ParMarkBitMap::would_overflow) {
3165 // The last object did not fit. Note that interior oop updates were
3166 // deferred, then copy enough of the object to fill the region.
3167 region_ptr->set_deferred_obj_addr(closure.destination());
3168 status = closure.copy_until_full(); // copies from closure.source()
3169
3170 decrement_destination_counts(cm, src_space_id, src_region_idx,
3171 closure.source());
3172 region_ptr->set_completed();
3173 return;
3174 }
3175
3176 if (status == ParMarkBitMap::full) {
3177 decrement_destination_counts(cm, src_space_id, src_region_idx,
3178 closure.source());
3179 region_ptr->set_deferred_obj_addr(NULL);
3180 region_ptr->set_completed();
3181 return;
3182 }
3183
3184 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
3185
3186 // Move to the next source region, possibly switching spaces as well. All
3187 // args except end_addr may be modified.
3188 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3189 end_addr);
3190 } while (true);
3191 }
3192
fill_blocks(size_t region_idx)3193 void PSParallelCompact::fill_blocks(size_t region_idx)
3194 {
3195 // Fill in the block table elements for the specified region. Each block
3196 // table element holds the number of live words in the region that are to the
3197 // left of the first object that starts in the block. Thus only blocks in
3198 // which an object starts need to be filled.
3199 //
3200 // The algorithm scans the section of the bitmap that corresponds to the
3201 // region, keeping a running total of the live words. When an object start is
3202 // found, if it's the first to start in the block that contains it, the
3203 // current total is written to the block table element.
3204 const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
3205 const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
3206 const size_t RegionSize = ParallelCompactData::RegionSize;
3207
3208 ParallelCompactData& sd = summary_data();
3209 const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
3210 if (partial_obj_size >= RegionSize) {
3211 return; // No objects start in this region.
3212 }
3213
3214 // Ensure the first loop iteration decides that the block has changed.
3215 size_t cur_block = sd.block_count();
3216
3217 const ParMarkBitMap* const bitmap = mark_bitmap();
3218
3219 const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
3220 assert((size_t)1 << Log2BitsPerBlock ==
3221 bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
3222
3223 size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
3224 const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
3225 size_t live_bits = bitmap->words_to_bits(partial_obj_size);
3226 beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
3227 while (beg_bit < range_end) {
3228 const size_t new_block = beg_bit >> Log2BitsPerBlock;
3229 if (new_block != cur_block) {
3230 cur_block = new_block;
3231 sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
3232 }
3233
3234 const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
3235 if (end_bit < range_end - 1) {
3236 live_bits += end_bit - beg_bit + 1;
3237 beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
3238 } else {
3239 return;
3240 }
3241 }
3242 }
3243
3244 void
move_and_update(ParCompactionManager * cm,SpaceId space_id)3245 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
3246 const MutableSpace* sp = space(space_id);
3247 if (sp->is_empty()) {
3248 return;
3249 }
3250
3251 ParallelCompactData& sd = PSParallelCompact::summary_data();
3252 ParMarkBitMap* const bitmap = mark_bitmap();
3253 HeapWord* const dp_addr = dense_prefix(space_id);
3254 HeapWord* beg_addr = sp->bottom();
3255 HeapWord* end_addr = sp->top();
3256
3257 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
3258
3259 const size_t beg_region = sd.addr_to_region_idx(beg_addr);
3260 const size_t dp_region = sd.addr_to_region_idx(dp_addr);
3261 if (beg_region < dp_region) {
3262 update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
3263 }
3264
3265 // The destination of the first live object that starts in the region is one
3266 // past the end of the partial object entering the region (if any).
3267 HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
3268 HeapWord* const new_top = _space_info[space_id].new_top();
3269 assert(new_top >= dest_addr, "bad new_top value");
3270 const size_t words = pointer_delta(new_top, dest_addr);
3271
3272 if (words > 0) {
3273 ObjectStartArray* start_array = _space_info[space_id].start_array();
3274 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3275
3276 ParMarkBitMap::IterationStatus status;
3277 status = bitmap->iterate(&closure, dest_addr, end_addr);
3278 assert(status == ParMarkBitMap::full, "iteration not complete");
3279 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3280 "live objects skipped because closure is full");
3281 }
3282 }
3283
millis_since_last_gc()3284 jlong PSParallelCompact::millis_since_last_gc() {
3285 // We need a monotonically non-deccreasing time in ms but
3286 // os::javaTimeMillis() does not guarantee monotonicity.
3287 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3288 jlong ret_val = now - _time_of_last_gc;
3289 // XXX See note in genCollectedHeap::millis_since_last_gc().
3290 if (ret_val < 0) {
3291 NOT_PRODUCT(warning("time warp: " INT64_FORMAT, ret_val);)
3292 return 0;
3293 }
3294 return ret_val;
3295 }
3296
reset_millis_since_last_gc()3297 void PSParallelCompact::reset_millis_since_last_gc() {
3298 // We need a monotonically non-deccreasing time in ms but
3299 // os::javaTimeMillis() does not guarantee monotonicity.
3300 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3301 }
3302
copy_until_full()3303 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3304 {
3305 if (source() != destination()) {
3306 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3307 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3308 }
3309 update_state(words_remaining());
3310 assert(is_full(), "sanity");
3311 return ParMarkBitMap::full;
3312 }
3313
copy_partial_obj()3314 void MoveAndUpdateClosure::copy_partial_obj()
3315 {
3316 size_t words = words_remaining();
3317
3318 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3319 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3320 if (end_addr < range_end) {
3321 words = bitmap()->obj_size(source(), end_addr);
3322 }
3323
3324 // This test is necessary; if omitted, the pointer updates to a partial object
3325 // that crosses the dense prefix boundary could be overwritten.
3326 if (source() != destination()) {
3327 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3328 Copy::aligned_conjoint_words(source(), destination(), words);
3329 }
3330 update_state(words);
3331 }
3332
3333 ParMarkBitMapClosure::IterationStatus
do_addr(HeapWord * addr,size_t words)3334 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3335 assert(destination() != NULL, "sanity");
3336 assert(bitmap()->obj_size(addr) == words, "bad size");
3337
3338 _source = addr;
3339 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
3340 destination(), "wrong destination");
3341
3342 if (words > words_remaining()) {
3343 return ParMarkBitMap::would_overflow;
3344 }
3345
3346 // The start_array must be updated even if the object is not moving.
3347 if (_start_array != NULL) {
3348 _start_array->allocate_block(destination());
3349 }
3350
3351 if (destination() != source()) {
3352 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3353 Copy::aligned_conjoint_words(source(), destination(), words);
3354 }
3355
3356 oop moved_oop = (oop) destination();
3357 moved_oop->update_contents(compaction_manager());
3358 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
3359
3360 update_state(words);
3361 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3362 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3363 }
3364
UpdateOnlyClosure(ParMarkBitMap * mbm,ParCompactionManager * cm,PSParallelCompact::SpaceId space_id)3365 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3366 ParCompactionManager* cm,
3367 PSParallelCompact::SpaceId space_id) :
3368 ParMarkBitMapClosure(mbm, cm),
3369 _space_id(space_id),
3370 _start_array(PSParallelCompact::start_array(space_id))
3371 {
3372 }
3373
3374 // Updates the references in the object to their new values.
3375 ParMarkBitMapClosure::IterationStatus
do_addr(HeapWord * addr,size_t words)3376 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3377 do_addr(addr);
3378 return ParMarkBitMap::incomplete;
3379 }
3380