1 /*
2  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1CollectionSet.hpp"
28 #include "gc/g1/g1CollectionSetCandidates.hpp"
29 #include "gc/g1/g1CollectorState.hpp"
30 #include "gc/g1/g1HotCardCache.hpp"
31 #include "gc/g1/g1ParScanThreadState.hpp"
32 #include "gc/g1/g1Policy.hpp"
33 #include "gc/g1/heapRegion.inline.hpp"
34 #include "gc/g1/heapRegionRemSet.hpp"
35 #include "gc/g1/heapRegionSet.hpp"
36 #include "logging/logStream.hpp"
37 #include "runtime/orderAccess.hpp"
38 #include "utilities/debug.hpp"
39 #include "utilities/globalDefinitions.hpp"
40 #include "utilities/quickSort.hpp"
41 
collector_state() const42 G1CollectorState* G1CollectionSet::collector_state() const {
43   return _g1h->collector_state();
44 }
45 
phase_times()46 G1GCPhaseTimes* G1CollectionSet::phase_times() {
47   return _policy->phase_times();
48 }
49 
predict_region_non_copy_time_ms(HeapRegion * hr) const50 double G1CollectionSet::predict_region_non_copy_time_ms(HeapRegion* hr) const {
51   return _policy->predict_region_non_copy_time_ms(hr, collector_state()->in_young_only_phase());
52 }
53 
G1CollectionSet(G1CollectedHeap * g1h,G1Policy * policy)54 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
55   _g1h(g1h),
56   _policy(policy),
57   _candidates(NULL),
58   _eden_region_length(0),
59   _survivor_region_length(0),
60   _old_region_length(0),
61   _collection_set_regions(NULL),
62   _collection_set_cur_length(0),
63   _collection_set_max_length(0),
64   _num_optional_regions(0),
65   _bytes_used_before(0),
66   _recorded_rs_length(0),
67   _inc_build_state(Inactive),
68   _inc_part_start(0),
69   _inc_collection_set_stats(NULL),
70   _inc_bytes_used_before(0),
71   _inc_recorded_rs_length(0),
72   _inc_recorded_rs_length_diff(0),
73   _inc_predicted_non_copy_time_ms(0.0),
74   _inc_predicted_non_copy_time_ms_diff(0.0) {
75 }
76 
~G1CollectionSet()77 G1CollectionSet::~G1CollectionSet() {
78   FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
79   FREE_C_HEAP_ARRAY(IncCollectionSetRegionStat, _inc_collection_set_stats);
80   free_optional_regions();
81   clear_candidates();
82 }
83 
init_region_lengths(uint eden_cset_region_length,uint survivor_cset_region_length)84 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
85                                           uint survivor_cset_region_length) {
86   assert_at_safepoint_on_vm_thread();
87 
88   _eden_region_length     = eden_cset_region_length;
89   _survivor_region_length = survivor_cset_region_length;
90 
91   assert((size_t) young_region_length() == _collection_set_cur_length,
92          "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
93 
94   _old_region_length = 0;
95   free_optional_regions();
96 }
97 
initialize(uint max_region_length)98 void G1CollectionSet::initialize(uint max_region_length) {
99   guarantee(_collection_set_regions == NULL, "Must only initialize once.");
100   _collection_set_max_length = max_region_length;
101   _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
102   _inc_collection_set_stats = NEW_C_HEAP_ARRAY(IncCollectionSetRegionStat, max_region_length, mtGC);
103 }
104 
free_optional_regions()105 void G1CollectionSet::free_optional_regions() {
106   _num_optional_regions = 0;
107 }
108 
clear_candidates()109 void G1CollectionSet::clear_candidates() {
110   delete _candidates;
111   _candidates = NULL;
112 }
113 
set_recorded_rs_length(size_t rs_length)114 void G1CollectionSet::set_recorded_rs_length(size_t rs_length) {
115   _recorded_rs_length = rs_length;
116 }
117 
118 // Add the heap region at the head of the non-incremental collection set
add_old_region(HeapRegion * hr)119 void G1CollectionSet::add_old_region(HeapRegion* hr) {
120   assert_at_safepoint_on_vm_thread();
121 
122   assert(_inc_build_state == Active,
123          "Precondition, actively building cset or adding optional later on");
124   assert(hr->is_old(), "the region should be old");
125 
126   assert(!hr->in_collection_set(), "should not already be in the collection set");
127   _g1h->register_old_region_with_region_attr(hr);
128 
129   _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
130   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
131 
132   _bytes_used_before += hr->used();
133   _recorded_rs_length += hr->rem_set()->occupied();
134   _old_region_length++;
135 
136   _g1h->old_set_remove(hr);
137 }
138 
add_optional_region(HeapRegion * hr)139 void G1CollectionSet::add_optional_region(HeapRegion* hr) {
140   assert(hr->is_old(), "the region should be old");
141   assert(!hr->in_collection_set(), "should not already be in the CSet");
142 
143   _g1h->register_optional_region_with_region_attr(hr);
144 
145   hr->set_index_in_opt_cset(_num_optional_regions++);
146 }
147 
start_incremental_building()148 void G1CollectionSet::start_incremental_building() {
149   assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
150   assert(_inc_build_state == Inactive, "Precondition");
151 #ifdef ASSERT
152   for (size_t i = 0; i < _collection_set_max_length; i++) {
153     _inc_collection_set_stats[i].reset();
154   }
155 #endif
156 
157   _inc_bytes_used_before = 0;
158 
159   _inc_recorded_rs_length = 0;
160   _inc_recorded_rs_length_diff = 0;
161   _inc_predicted_non_copy_time_ms = 0.0;
162   _inc_predicted_non_copy_time_ms_diff = 0.0;
163 
164   update_incremental_marker();
165 }
166 
finalize_incremental_building()167 void G1CollectionSet::finalize_incremental_building() {
168   assert(_inc_build_state == Active, "Precondition");
169   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
170 
171   // The two "main" fields, _inc_recorded_rs_length and
172   // _inc_predicted_non_copy_time_ms, are updated by the thread
173   // that adds a new region to the CSet. Further updates by the
174   // concurrent refinement thread that samples the young RSet lengths
175   // are accumulated in the *_diff fields. Here we add the diffs to
176   // the "main" fields.
177 
178   _inc_recorded_rs_length += _inc_recorded_rs_length_diff;
179   _inc_predicted_non_copy_time_ms += _inc_predicted_non_copy_time_ms_diff;
180 
181   _inc_recorded_rs_length_diff = 0;
182   _inc_predicted_non_copy_time_ms_diff = 0.0;
183 }
184 
clear()185 void G1CollectionSet::clear() {
186   assert_at_safepoint_on_vm_thread();
187   _collection_set_cur_length = 0;
188 }
189 
iterate(HeapRegionClosure * cl) const190 void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
191   size_t len = _collection_set_cur_length;
192   OrderAccess::loadload();
193 
194   for (uint i = 0; i < len; i++) {
195     HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
196     bool result = cl->do_heap_region(r);
197     if (result) {
198       cl->set_incomplete();
199       return;
200     }
201   }
202 }
203 
par_iterate(HeapRegionClosure * cl,HeapRegionClaimer * hr_claimer,uint worker_id,uint total_workers) const204 void G1CollectionSet::par_iterate(HeapRegionClosure* cl,
205                                   HeapRegionClaimer* hr_claimer,
206                                   uint worker_id,
207                                   uint total_workers) const {
208   iterate_part_from(cl, hr_claimer, 0, cur_length(), worker_id, total_workers);
209 }
210 
iterate_optional(HeapRegionClosure * cl) const211 void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
212   assert_at_safepoint();
213 
214   for (uint i = 0; i < _num_optional_regions; i++) {
215     HeapRegion* r = _candidates->at(i);
216     bool result = cl->do_heap_region(r);
217     guarantee(!result, "Must not cancel iteration");
218   }
219 }
220 
iterate_incremental_part_from(HeapRegionClosure * cl,HeapRegionClaimer * hr_claimer,uint worker_id,uint total_workers) const221 void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
222                                                     HeapRegionClaimer* hr_claimer,
223                                                     uint worker_id,
224                                                     uint total_workers) const {
225   iterate_part_from(cl, hr_claimer, _inc_part_start, increment_length(), worker_id, total_workers);
226 }
227 
iterate_part_from(HeapRegionClosure * cl,HeapRegionClaimer * hr_claimer,size_t offset,size_t length,uint worker_id,uint total_workers) const228 void G1CollectionSet::iterate_part_from(HeapRegionClosure* cl,
229                                         HeapRegionClaimer* hr_claimer,
230                                         size_t offset,
231                                         size_t length,
232                                         uint worker_id,
233                                         uint total_workers) const {
234   assert_at_safepoint();
235   if (length == 0) {
236     return;
237   }
238 
239   size_t start_pos = (worker_id * length) / total_workers;
240   size_t cur_pos = start_pos;
241 
242   do {
243     uint region_idx = _collection_set_regions[cur_pos + offset];
244     if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
245       HeapRegion* r = _g1h->region_at(region_idx);
246       bool result = cl->do_heap_region(r);
247       guarantee(!result, "Must not cancel iteration");
248     }
249 
250     cur_pos++;
251     if (cur_pos == length) {
252       cur_pos = 0;
253     }
254   } while (cur_pos != start_pos);
255 }
256 
update_young_region_prediction(HeapRegion * hr,size_t new_rs_length)257 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
258                                                      size_t new_rs_length) {
259   // Update the CSet information that is dependent on the new RS length
260   assert(hr->is_young(), "Precondition");
261   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
262 
263   IncCollectionSetRegionStat* stat = &_inc_collection_set_stats[hr->hrm_index()];
264 
265   size_t old_rs_length = stat->_rs_length;
266   assert(old_rs_length <= new_rs_length,
267          "Remembered set decreased (changed from " SIZE_FORMAT " to " SIZE_FORMAT " region %u type %s)",
268          old_rs_length, new_rs_length, hr->hrm_index(), hr->get_short_type_str());
269   size_t rs_length_diff = new_rs_length - old_rs_length;
270   stat->_rs_length = new_rs_length;
271   _inc_recorded_rs_length_diff += rs_length_diff;
272 
273   double old_non_copy_time = stat->_non_copy_time_ms;
274   assert(old_non_copy_time >= 0.0, "Non copy time for region %u not initialized yet, is %.3f", hr->hrm_index(), old_non_copy_time);
275   double new_non_copy_time = predict_region_non_copy_time_ms(hr);
276   double non_copy_time_ms_diff = new_non_copy_time - old_non_copy_time;
277 
278   stat->_non_copy_time_ms = new_non_copy_time;
279   _inc_predicted_non_copy_time_ms_diff += non_copy_time_ms_diff;
280 }
281 
add_young_region_common(HeapRegion * hr)282 void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
283   assert(hr->is_young(), "invariant");
284   assert(_inc_build_state == Active, "Precondition");
285 
286   // This routine is used when:
287   // * adding survivor regions to the incremental cset at the end of an
288   //   evacuation pause or
289   // * adding the current allocation region to the incremental cset
290   //   when it is retired.
291   // Therefore this routine may be called at a safepoint by the
292   // VM thread, or in-between safepoints by mutator threads (when
293   // retiring the current allocation region)
294   // We need to clear and set the cached recorded/cached collection set
295   // information in the heap region here (before the region gets added
296   // to the collection set). An individual heap region's cached values
297   // are calculated, aggregated with the policy collection set info,
298   // and cached in the heap region here (initially) and (subsequently)
299   // by the Young List sampling code.
300   // Ignore calls to this due to retirement during full gc.
301 
302   if (!_g1h->collector_state()->in_full_gc()) {
303     size_t rs_length = hr->rem_set()->occupied();
304     double non_copy_time = predict_region_non_copy_time_ms(hr);
305 
306     // Cache the values we have added to the aggregated information
307     // in the heap region in case we have to remove this region from
308     // the incremental collection set, or it is updated by the
309     // rset sampling code
310 
311     IncCollectionSetRegionStat* stat = &_inc_collection_set_stats[hr->hrm_index()];
312     stat->_rs_length = rs_length;
313     stat->_non_copy_time_ms = non_copy_time;
314 
315     _inc_recorded_rs_length += rs_length;
316     _inc_predicted_non_copy_time_ms += non_copy_time;
317     _inc_bytes_used_before += hr->used();
318   }
319 
320   assert(!hr->in_collection_set(), "invariant");
321   _g1h->register_young_region_with_region_attr(hr);
322 
323   // We use UINT_MAX as "invalid" marker in verification.
324   assert(_collection_set_cur_length < (UINT_MAX - 1),
325          "Collection set is too large with " SIZE_FORMAT " entries", _collection_set_cur_length);
326   hr->set_young_index_in_cset((uint)_collection_set_cur_length + 1);
327 
328   _collection_set_regions[_collection_set_cur_length] = hr->hrm_index();
329   // Concurrent readers must observe the store of the value in the array before an
330   // update to the length field.
331   OrderAccess::storestore();
332   _collection_set_cur_length++;
333   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
334 }
335 
add_survivor_regions(HeapRegion * hr)336 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
337   assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
338   add_young_region_common(hr);
339 }
340 
add_eden_region(HeapRegion * hr)341 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
342   assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
343   add_young_region_common(hr);
344 }
345 
346 #ifndef PRODUCT
347 class G1VerifyYoungAgesClosure : public HeapRegionClosure {
348 public:
349   bool _valid;
350 
G1VerifyYoungAgesClosure()351   G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
352 
do_heap_region(HeapRegion * r)353   virtual bool do_heap_region(HeapRegion* r) {
354     guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
355 
356     if (!r->has_surv_rate_group()) {
357       log_error(gc, verify)("## encountered young region without surv_rate_group");
358       _valid = false;
359     }
360 
361     if (!r->has_valid_age_in_surv_rate()) {
362       log_error(gc, verify)("## encountered invalid age in young region");
363       _valid = false;
364     }
365 
366     return false;
367   }
368 
valid() const369   bool valid() const { return _valid; }
370 };
371 
verify_young_ages()372 bool G1CollectionSet::verify_young_ages() {
373   assert_at_safepoint_on_vm_thread();
374 
375   G1VerifyYoungAgesClosure cl;
376   iterate(&cl);
377 
378   if (!cl.valid()) {
379     LogStreamHandle(Error, gc, verify) log;
380     print(&log);
381   }
382 
383   return cl.valid();
384 }
385 
386 class G1PrintCollectionSetDetailClosure : public HeapRegionClosure {
387   outputStream* _st;
388 public:
G1PrintCollectionSetDetailClosure(outputStream * st)389   G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
390 
do_heap_region(HeapRegion * r)391   virtual bool do_heap_region(HeapRegion* r) {
392     assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
393     _st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
394                   HR_FORMAT_PARAMS(r),
395                   p2i(r->prev_top_at_mark_start()),
396                   p2i(r->next_top_at_mark_start()),
397                   r->has_surv_rate_group() ? r->age_in_surv_rate_group() : -1);
398     return false;
399   }
400 };
401 
print(outputStream * st)402 void G1CollectionSet::print(outputStream* st) {
403   st->print_cr("\nCollection_set:");
404 
405   G1PrintCollectionSetDetailClosure cl(st);
406   iterate(&cl);
407 }
408 #endif // !PRODUCT
409 
finalize_young_part(double target_pause_time_ms,G1SurvivorRegions * survivors)410 double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors) {
411   Ticks start_time = Ticks::now();
412 
413   finalize_incremental_building();
414 
415   guarantee(target_pause_time_ms > 0.0,
416             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
417 
418   size_t pending_cards = _policy->pending_cards_at_gc_start() + _g1h->hot_card_cache()->num_entries();
419 
420   log_trace(gc, ergo, cset)("Start choosing CSet. Pending cards: " SIZE_FORMAT " target pause time: %1.2fms",
421                             pending_cards, target_pause_time_ms);
422 
423   // The young list is laid with the survivor regions from the previous
424   // pause are appended to the RHS of the young list, i.e.
425   //   [Newly Young Regions ++ Survivors from last pause].
426 
427   uint eden_region_length = _g1h->eden_regions_count();
428   uint survivor_region_length = survivors->length();
429   init_region_lengths(eden_region_length, survivor_region_length);
430 
431   verify_young_cset_indices();
432 
433   // Clear the fields that point to the survivor list - they are all young now.
434   survivors->convert_to_eden();
435 
436   _bytes_used_before = _inc_bytes_used_before;
437 
438   // The number of recorded young regions is the incremental
439   // collection set's current size
440   set_recorded_rs_length(_inc_recorded_rs_length);
441 
442   double predicted_base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
443   double predicted_eden_time = _inc_predicted_non_copy_time_ms + _policy->predict_eden_copy_time_ms(eden_region_length);
444   double remaining_time_ms = MAX2(target_pause_time_ms - (predicted_base_time_ms + predicted_eden_time), 0.0);
445 
446   log_trace(gc, ergo, cset)("Added young regions to CSet. Eden: %u regions, Survivors: %u regions, "
447                             "predicted eden time: %1.2fms, predicted base time: %1.2fms, target pause time: %1.2fms, remaining time: %1.2fms",
448                             eden_region_length, survivor_region_length,
449                             predicted_eden_time, predicted_base_time_ms, target_pause_time_ms, remaining_time_ms);
450 
451   phase_times()->record_young_cset_choice_time_ms((Ticks::now() - start_time).seconds() * 1000.0);
452 
453   return remaining_time_ms;
454 }
455 
compare_region_idx(const uint a,const uint b)456 static int compare_region_idx(const uint a, const uint b) {
457   if (a > b) {
458     return 1;
459   } else if (a == b) {
460     return 0;
461   } else {
462     return -1;
463   }
464 }
465 
finalize_old_part(double time_remaining_ms)466 void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
467   double non_young_start_time_sec = os::elapsedTime();
468 
469   if (collector_state()->in_mixed_phase()) {
470     candidates()->verify();
471 
472     uint num_initial_old_regions;
473     uint num_optional_old_regions;
474 
475     _policy->calculate_old_collection_set_regions(candidates(),
476                                                   time_remaining_ms,
477                                                   num_initial_old_regions,
478                                                   num_optional_old_regions);
479 
480     // Prepare initial old regions.
481     move_candidates_to_collection_set(num_initial_old_regions);
482 
483     // Prepare optional old regions for evacuation.
484     uint candidate_idx = candidates()->cur_idx();
485     for (uint i = 0; i < num_optional_old_regions; i++) {
486       add_optional_region(candidates()->at(candidate_idx + i));
487     }
488 
489     candidates()->verify();
490   }
491 
492   stop_incremental_building();
493 
494   double non_young_end_time_sec = os::elapsedTime();
495   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
496 
497   QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
498 }
499 
move_candidates_to_collection_set(uint num_old_candidate_regions)500 void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) {
501   if (num_old_candidate_regions == 0) {
502     return;
503   }
504   uint candidate_idx = candidates()->cur_idx();
505   for (uint i = 0; i < num_old_candidate_regions; i++) {
506     HeapRegion* r = candidates()->at(candidate_idx + i);
507     // This potentially optional candidate region is going to be an actual collection
508     // set region. Clear cset marker.
509     _g1h->clear_region_attr(r);
510     add_old_region(r);
511   }
512   candidates()->remove(num_old_candidate_regions);
513 
514   candidates()->verify();
515 }
516 
finalize_initial_collection_set(double target_pause_time_ms,G1SurvivorRegions * survivor)517 void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
518   double time_remaining_ms = finalize_young_part(target_pause_time_ms, survivor);
519   finalize_old_part(time_remaining_ms);
520 }
521 
finalize_optional_for_evacuation(double remaining_pause_time)522 bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) {
523   update_incremental_marker();
524 
525   uint num_selected_regions;
526   _policy->calculate_optional_collection_set_regions(candidates(),
527                                                      _num_optional_regions,
528                                                      remaining_pause_time,
529                                                      num_selected_regions);
530 
531   move_candidates_to_collection_set(num_selected_regions);
532 
533   _num_optional_regions -= num_selected_regions;
534 
535   stop_incremental_building();
536 
537   _g1h->verify_region_attr_remset_update();
538 
539   return num_selected_regions > 0;
540 }
541 
abandon_optional_collection_set(G1ParScanThreadStateSet * pss)542 void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) {
543   for (uint i = 0; i < _num_optional_regions; i++) {
544     HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
545     pss->record_unused_optional_region(r);
546     // Clear collection set marker and make sure that the remembered set information
547     // is correct as we still need it later.
548     _g1h->clear_region_attr(r);
549     _g1h->register_region_with_region_attr(r);
550     r->clear_index_in_opt_cset();
551   }
552   free_optional_regions();
553 
554   _g1h->verify_region_attr_remset_update();
555 }
556 
557 #ifdef ASSERT
558 class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
559 private:
560   size_t _young_length;
561   uint* _heap_region_indices;
562 public:
G1VerifyYoungCSetIndicesClosure(size_t young_length)563   G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
564     _heap_region_indices = NEW_C_HEAP_ARRAY(uint, young_length + 1, mtGC);
565     for (size_t i = 0; i < young_length + 1; i++) {
566       _heap_region_indices[i] = UINT_MAX;
567     }
568   }
~G1VerifyYoungCSetIndicesClosure()569   ~G1VerifyYoungCSetIndicesClosure() {
570     FREE_C_HEAP_ARRAY(int, _heap_region_indices);
571   }
572 
do_heap_region(HeapRegion * r)573   virtual bool do_heap_region(HeapRegion* r) {
574     const uint idx = r->young_index_in_cset();
575 
576     assert(idx > 0, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
577     assert(idx <= _young_length, "Young cset index %u too large for region %u", idx, r->hrm_index());
578 
579     assert(_heap_region_indices[idx] == UINT_MAX,
580            "Index %d used by multiple regions, first use by region %u, second by region %u",
581            idx, _heap_region_indices[idx], r->hrm_index());
582 
583     _heap_region_indices[idx] = r->hrm_index();
584 
585     return false;
586   }
587 };
588 
verify_young_cset_indices() const589 void G1CollectionSet::verify_young_cset_indices() const {
590   assert_at_safepoint_on_vm_thread();
591 
592   G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
593   iterate(&cl);
594 }
595 #endif
596