1 /* 2 * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_G1_G1COLLECTIONSET_HPP 26 #define SHARE_GC_G1_G1COLLECTIONSET_HPP 27 28 #include "utilities/debug.hpp" 29 #include "utilities/globalDefinitions.hpp" 30 31 class G1CollectedHeap; 32 class G1CollectionSetCandidates; 33 class G1CollectorState; 34 class G1GCPhaseTimes; 35 class G1ParScanThreadStateSet; 36 class G1Policy; 37 class G1SurvivorRegions; 38 class HeapRegion; 39 class HeapRegionClaimer; 40 class HeapRegionClosure; 41 42 // The collection set. 43 // 44 // The set of regions that are evacuated during an evacuation pause. 45 // 46 // At the end of a collection, before freeing the collection set, this set 47 // contains all regions that were evacuated during this collection: 48 // 49 // - survivor regions from the last collection (if any) 50 // - eden regions allocated by the mutator 51 // - old gen regions evacuated during mixed gc 52 // 53 // This set is built incrementally at mutator time as regions are retired, and 54 // if this had been a mixed gc, some additional (during gc) incrementally added 55 // old regions from the collection set candidates built during the concurrent 56 // cycle. 57 // 58 // A more detailed overview of how the collection set changes over time follows: 59 // 60 // 0) at the end of GC the survivor regions are added to this collection set. 61 // 1) the mutator incrementally adds eden regions as they retire 62 // 63 // ----- gc starts 64 // 65 // 2) prepare (finalize) young regions of the collection set for collection 66 // - relabel the survivors as eden 67 // - finish up the incremental building that happened at mutator time 68 // 69 // iff this is a young-only collection: 70 // 71 // a3) evacuate the current collection set in one "initial evacuation" phase 72 // 73 // iff this is a mixed collection: 74 // 75 // b3) calculate the set of old gen regions we may be able to collect in this 76 // collection from the list of collection set candidates. 77 // - one part is added to the current collection set 78 // - the remainder regions are labeled as optional, and NOT yet added to the 79 // collection set. 80 // b4) evacuate the current collection set in the "initial evacuation" phase 81 // b5) evacuate the optional regions in the "optional evacuation" phase. This is 82 // done in increments (or rounds). 83 // b5-1) add a few of the optional regions to the current collection set 84 // b5-2) evacuate only these newly added optional regions. For this mechanism we 85 // reuse the incremental collection set building infrastructure (used also at 86 // mutator time). 87 // b5-3) repeat from b5-1 until the policy determines we are done 88 // 89 // all collections 90 // 91 // 6) free the collection set (contains all regions now; empties collection set 92 // afterwards) 93 // 7) add survivors to this collection set 94 // 95 // ----- gc ends 96 // 97 // goto 1) 98 // 99 // Examples of how the collection set might look over time: 100 // 101 // Legend: 102 // S = survivor, E = eden, O = old. 103 // |xxxx| = increment (with increment markers), containing four regions 104 // 105 // |SSSS| ... after step 0), with four survivor regions 106 // |SSSSEE| ... at step 1), after retiring two eden regions 107 // |SSSSEEEE| ... after step 1), after retiring four eden regions 108 // |EEEEEEEE| ... after step 2) 109 // 110 // iff this is a young-only collection 111 // 112 // EEEEEEEE|| ... after step a3), after initial evacuation phase 113 // || ... after step 6) 114 // |SS| ... after step 7), with two survivor regions 115 // 116 // iff this is a mixed collection 117 // 118 // |EEEEEEEEOOOO| ... after step b3), added four regions to be 119 // evacuated in the "initial evacuation" phase 120 // EEEEEEEEOOOO|| ... after step b4), incremental part is empty 121 // after evacuation 122 // EEEEEEEEOOOO|OO| ... after step b5.1), added two regions to be 123 // evacuated in the first round of the 124 // "optional evacuation" phase 125 // EEEEEEEEOOOOOO|O| ... after step b5.1), added one region to be 126 // evacuated in the second round of the 127 // "optional evacuation" phase 128 // EEEEEEEEOOOOOOO|| ... after step b5), the complete collection set. 129 // || ... after step b6) 130 // |SSS| ... after step 7), with three survivor regions 131 // 132 class G1CollectionSet { 133 G1CollectedHeap* _g1h; 134 G1Policy* _policy; 135 136 // All old gen collection set candidate regions for the current mixed phase. 137 G1CollectionSetCandidates* _candidates; 138 139 uint _eden_region_length; 140 uint _survivor_region_length; 141 uint _old_region_length; 142 143 // The actual collection set as a set of region indices. 144 // All entries in _collection_set_regions below _collection_set_cur_length are 145 // assumed to be part of the collection set. 146 // We assume that at any time there is at most only one writer and (one or more) 147 // concurrent readers. This means we are good with using storestore and loadload 148 // barriers on the writer and reader respectively only. 149 uint* _collection_set_regions; 150 volatile size_t _collection_set_cur_length; 151 size_t _collection_set_max_length; 152 153 // When doing mixed collections we can add old regions to the collection set, which 154 // will be collected only if there is enough time. We call these optional regions. 155 // This member records the current number of regions that are of that type that 156 // correspond to the first x entries in the collection set candidates. 157 uint _num_optional_regions; 158 159 // The number of bytes in the collection set before the pause. Set from 160 // the incrementally built collection set at the start of an evacuation 161 // pause, and updated as more regions are added to the collection set. 162 size_t _bytes_used_before; 163 164 // The number of cards in the remembered set in the collection set. Set from 165 // the incrementally built collection set at the start of an evacuation 166 // pause, and updated as more regions are added to the collection set. 167 size_t _recorded_rs_length; 168 169 enum CSetBuildType { 170 Active, // We are actively building the collection set 171 Inactive // We are not actively building the collection set 172 }; 173 174 CSetBuildType _inc_build_state; 175 size_t _inc_part_start; 176 177 // Information about eden regions in the incremental collection set. 178 struct IncCollectionSetRegionStat { 179 // The predicted non-copy time that was added to the total incremental value 180 // for the collection set. 181 double _non_copy_time_ms; 182 // The remembered set length that was added to the total incremental value 183 // for the collection set. 184 size_t _rs_length; 185 186 #ifdef ASSERT 187 // Resets members to "uninitialized" values. resetG1CollectionSet::IncCollectionSetRegionStat188 void reset() { _rs_length = ~(size_t)0; _non_copy_time_ms = -1.0; } 189 #endif 190 }; 191 192 IncCollectionSetRegionStat* _inc_collection_set_stats; 193 // The associated information that is maintained while the incremental 194 // collection set is being built with *young* regions. Used to populate 195 // the recorded info for the evacuation pause. 196 197 // The number of bytes in the incrementally built collection set. 198 // Used to set _collection_set_bytes_used_before at the start of 199 // an evacuation pause. 200 size_t _inc_bytes_used_before; 201 202 // The RSet lengths recorded for regions in the CSet. It is updated 203 // by the thread that adds a new region to the CSet. We assume that 204 // only one thread can be allocating a new CSet region (currently, 205 // it does so after taking the Heap_lock) hence no need to 206 // synchronize updates to this field. 207 size_t _inc_recorded_rs_length; 208 209 // A concurrent refinement thread periodically samples the young 210 // region RSets and needs to update _inc_recorded_rs_length as 211 // the RSets grow. Instead of having to synchronize updates to that 212 // field we accumulate them in this field and add it to 213 // _inc_recorded_rs_length_diff at the start of a GC. 214 size_t _inc_recorded_rs_length_diff; 215 216 // The predicted elapsed time it will take to collect the regions in 217 // the CSet. This is updated by the thread that adds a new region to 218 // the CSet. See the comment for _inc_recorded_rs_length about 219 // MT-safety assumptions. 220 double _inc_predicted_non_copy_time_ms; 221 222 // See the comment for _inc_recorded_rs_length_diff. 223 double _inc_predicted_non_copy_time_ms_diff; 224 225 void set_recorded_rs_length(size_t rs_length); 226 227 G1CollectorState* collector_state() const; 228 G1GCPhaseTimes* phase_times(); 229 230 void verify_young_cset_indices() const NOT_DEBUG_RETURN; 231 232 double predict_region_non_copy_time_ms(HeapRegion* hr) const; 233 234 // Update the incremental collection set information when adding a region. 235 void add_young_region_common(HeapRegion* hr); 236 237 // Add old region "hr" to the collection set. 238 void add_old_region(HeapRegion* hr); 239 void free_optional_regions(); 240 241 // Add old region "hr" to optional collection set. 242 void add_optional_region(HeapRegion* hr); 243 244 void move_candidates_to_collection_set(uint num_regions); 245 246 // Finalize the young part of the initial collection set. Relabel survivor regions 247 // as Eden and calculate a prediction on how long the evacuation of all young regions 248 // will take. 249 double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors); 250 // Perform any final calculations on the incremental collection set fields before we 251 // can use them. 252 void finalize_incremental_building(); 253 254 // Select the old regions of the initial collection set and determine how many optional 255 // regions we might be able to evacuate in this pause. 256 void finalize_old_part(double time_remaining_ms); 257 258 // Iterate the part of the collection set given by the offset and length applying the given 259 // HeapRegionClosure. The worker_id will determine where in the part to start the iteration 260 // to allow for more efficient parallel iteration. 261 void iterate_part_from(HeapRegionClosure* cl, 262 HeapRegionClaimer* hr_claimer, 263 size_t offset, 264 size_t length, 265 uint worker_id, 266 uint total_workers) const; 267 public: 268 G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy); 269 ~G1CollectionSet(); 270 271 // Initializes the collection set giving the maximum possible length of the collection set. 272 void initialize(uint max_region_length); 273 274 void clear_candidates(); 275 set_candidates(G1CollectionSetCandidates * candidates)276 void set_candidates(G1CollectionSetCandidates* candidates) { 277 assert(_candidates == NULL, "Trying to replace collection set candidates."); 278 _candidates = candidates; 279 } candidates()280 G1CollectionSetCandidates* candidates() { return _candidates; } 281 282 void init_region_lengths(uint eden_cset_region_length, 283 uint survivor_cset_region_length); 284 region_length() const285 uint region_length() const { return young_region_length() + 286 old_region_length(); } young_region_length() const287 uint young_region_length() const { return eden_region_length() + 288 survivor_region_length(); } 289 eden_region_length() const290 uint eden_region_length() const { return _eden_region_length; } survivor_region_length() const291 uint survivor_region_length() const { return _survivor_region_length; } old_region_length() const292 uint old_region_length() const { return _old_region_length; } optional_region_length() const293 uint optional_region_length() const { return _num_optional_regions; } 294 295 // Reset the contents of the collection set. 296 void clear(); 297 298 // Incremental collection set support 299 300 // Initialize incremental collection set info. 301 void start_incremental_building(); 302 // Start a new collection set increment. update_incremental_marker()303 void update_incremental_marker() { _inc_build_state = Active; _inc_part_start = _collection_set_cur_length; } 304 // Stop adding regions to the current collection set increment. stop_incremental_building()305 void stop_incremental_building() { _inc_build_state = Inactive; } 306 307 // Iterate over the current collection set increment applying the given HeapRegionClosure 308 // from a starting position determined by the given worker id. 309 void iterate_incremental_part_from(HeapRegionClosure* cl, HeapRegionClaimer* hr_claimer, uint worker_id, uint total_workers) const; 310 311 // Returns the length of the current increment in number of regions. increment_length() const312 size_t increment_length() const { return _collection_set_cur_length - _inc_part_start; } 313 // Returns the length of the whole current collection set in number of regions cur_length() const314 size_t cur_length() const { return _collection_set_cur_length; } 315 316 // Iterate over the entire collection set (all increments calculated so far), applying 317 // the given HeapRegionClosure on all of them. 318 void iterate(HeapRegionClosure* cl) const; 319 void par_iterate(HeapRegionClosure* cl, 320 HeapRegionClaimer* hr_claimer, 321 uint worker_id, 322 uint total_workers) const; 323 324 void iterate_optional(HeapRegionClosure* cl) const; 325 recorded_rs_length()326 size_t recorded_rs_length() { return _recorded_rs_length; } 327 bytes_used_before() const328 size_t bytes_used_before() const { 329 return _bytes_used_before; 330 } 331 reset_bytes_used_before()332 void reset_bytes_used_before() { 333 _bytes_used_before = 0; 334 } 335 336 // Finalize the initial collection set consisting of all young regions potentially a 337 // few old gen regions. 338 void finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor); 339 // Finalize the next collection set from the set of available optional old gen regions. 340 bool finalize_optional_for_evacuation(double remaining_pause_time); 341 // Abandon (clean up) optional collection set regions that were not evacuated in this 342 // pause. 343 void abandon_optional_collection_set(G1ParScanThreadStateSet* pss); 344 345 // Update information about hr in the aggregated information for 346 // the incrementally built collection set. 347 void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length); 348 349 // Add eden region to the collection set. 350 void add_eden_region(HeapRegion* hr); 351 352 // Add survivor region to the collection set. 353 void add_survivor_regions(HeapRegion* hr); 354 355 #ifndef PRODUCT 356 bool verify_young_ages(); 357 358 void print(outputStream* st); 359 #endif // !PRODUCT 360 }; 361 362 #endif // SHARE_GC_G1_G1COLLECTIONSET_HPP 363