1 /*
2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Arguments.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1ConcurrentRefine.hpp"
29 #include "gc/g1/g1CommittedRegionMap.inline.hpp"
30 #include "gc/g1/g1NUMAStats.hpp"
31 #include "gc/g1/heapRegion.hpp"
32 #include "gc/g1/heapRegionManager.inline.hpp"
33 #include "gc/g1/heapRegionSet.inline.hpp"
34 #include "jfr/jfrEvents.hpp"
35 #include "logging/logStream.hpp"
36 #include "memory/allocation.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/orderAccess.hpp"
39 #include "utilities/bitMap.inline.hpp"
40
41 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
42 public:
check_mt_safety()43 void check_mt_safety() {
44 // Master Free List MT safety protocol:
45 // (a) If we're at a safepoint, operations on the master free list
46 // should be invoked by either the VM thread (which will serialize
47 // them) or by the GC workers while holding the
48 // FreeList_lock.
49 // (b) If we're not at a safepoint, operations on the master free
50 // list should be invoked while holding the Heap_lock.
51
52 if (SafepointSynchronize::is_at_safepoint()) {
53 guarantee(Thread::current()->is_VM_thread() ||
54 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
55 } else {
56 guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
57 }
58 }
is_correct_type(HeapRegion * hr)59 bool is_correct_type(HeapRegion* hr) { return hr->is_free(); }
get_description()60 const char* get_description() { return "Free Regions"; }
61 };
62
HeapRegionManager()63 HeapRegionManager::HeapRegionManager() :
64 _bot_mapper(NULL),
65 _cardtable_mapper(NULL),
66 _card_counts_mapper(NULL),
67 _committed_map(),
68 _allocated_heapregions_length(0),
69 _regions(), _heap_mapper(NULL),
70 _prev_bitmap_mapper(NULL),
71 _next_bitmap_mapper(NULL),
72 _free_list("Free list", new MasterFreeRegionListChecker())
73 { }
74
initialize(G1RegionToSpaceMapper * heap_storage,G1RegionToSpaceMapper * prev_bitmap,G1RegionToSpaceMapper * next_bitmap,G1RegionToSpaceMapper * bot,G1RegionToSpaceMapper * cardtable,G1RegionToSpaceMapper * card_counts)75 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
76 G1RegionToSpaceMapper* prev_bitmap,
77 G1RegionToSpaceMapper* next_bitmap,
78 G1RegionToSpaceMapper* bot,
79 G1RegionToSpaceMapper* cardtable,
80 G1RegionToSpaceMapper* card_counts) {
81 _allocated_heapregions_length = 0;
82
83 _heap_mapper = heap_storage;
84
85 _prev_bitmap_mapper = prev_bitmap;
86 _next_bitmap_mapper = next_bitmap;
87
88 _bot_mapper = bot;
89 _cardtable_mapper = cardtable;
90
91 _card_counts_mapper = card_counts;
92
93 _regions.initialize(heap_storage->reserved(), HeapRegion::GrainBytes);
94
95 _committed_map.initialize(reserved_length());
96 }
97
allocate_free_region(HeapRegionType type,uint requested_node_index)98 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
99 HeapRegion* hr = NULL;
100 bool from_head = !type.is_young();
101 G1NUMA* numa = G1NUMA::numa();
102
103 if (requested_node_index != G1NUMA::AnyNodeIndex && numa->is_enabled()) {
104 // Try to allocate with requested node index.
105 hr = _free_list.remove_region_with_node_index(from_head, requested_node_index);
106 }
107
108 if (hr == NULL) {
109 // If there's a single active node or we did not get a region from our requested node,
110 // try without requested node index.
111 hr = _free_list.remove_region(from_head);
112 }
113
114 if (hr != NULL) {
115 assert(hr->next() == NULL, "Single region should not have next");
116 assert(is_available(hr->hrm_index()), "Must be committed");
117
118 if (numa->is_enabled() && hr->node_index() < numa->num_active_nodes()) {
119 numa->update_statistics(G1NUMAStats::NewRegionAlloc, requested_node_index, hr->node_index());
120 }
121 }
122
123 return hr;
124 }
125
allocate_humongous_from_free_list(uint num_regions)126 HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) {
127 uint candidate = find_contiguous_in_free_list(num_regions);
128 if (candidate == G1_NO_HRM_INDEX) {
129 return NULL;
130 }
131 return allocate_free_regions_starting_at(candidate, num_regions);
132 }
133
allocate_humongous_allow_expand(uint num_regions)134 HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) {
135 uint candidate = find_contiguous_allow_expand(num_regions);
136 if (candidate == G1_NO_HRM_INDEX) {
137 return NULL;
138 }
139 expand_exact(candidate, num_regions, G1CollectedHeap::heap()->workers());
140 return allocate_free_regions_starting_at(candidate, num_regions);
141 }
142
allocate_humongous(uint num_regions)143 HeapRegion* HeapRegionManager::allocate_humongous(uint num_regions) {
144 // Special case a single region to avoid expensive search.
145 if (num_regions == 1) {
146 return allocate_free_region(HeapRegionType::Humongous, G1NUMA::AnyNodeIndex);
147 }
148 return allocate_humongous_from_free_list(num_regions);
149 }
150
expand_and_allocate_humongous(uint num_regions)151 HeapRegion* HeapRegionManager::expand_and_allocate_humongous(uint num_regions) {
152 return allocate_humongous_allow_expand(num_regions);
153 }
154
155 #ifdef ASSERT
is_free(HeapRegion * hr) const156 bool HeapRegionManager::is_free(HeapRegion* hr) const {
157 return _free_list.contains(hr);
158 }
159 #endif
160
new_heap_region(uint hrm_index)161 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
162 G1CollectedHeap* g1h = G1CollectedHeap::heap();
163 HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
164 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
165 assert(reserved().contains(mr), "invariant");
166 return g1h->new_heap_region(hrm_index, mr);
167 }
168
expand(uint start,uint num_regions,WorkGang * pretouch_gang)169 void HeapRegionManager::expand(uint start, uint num_regions, WorkGang* pretouch_gang) {
170 commit_regions(start, num_regions, pretouch_gang);
171 for (uint i = start; i < start + num_regions; i++) {
172 HeapRegion* hr = _regions.get_by_index(i);
173 if (hr == NULL) {
174 hr = new_heap_region(i);
175 OrderAccess::storestore();
176 _regions.set_by_index(i, hr);
177 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
178 }
179 G1CollectedHeap::heap()->hr_printer()->commit(hr);
180 }
181 activate_regions(start, num_regions);
182 }
183
commit_regions(uint index,size_t num_regions,WorkGang * pretouch_gang)184 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {
185 guarantee(num_regions > 0, "Must commit more than zero regions");
186 guarantee(num_regions <= available(),
187 "Cannot commit more than the maximum amount of regions");
188
189 _heap_mapper->commit_regions(index, num_regions, pretouch_gang);
190
191 // Also commit auxiliary data
192 _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
193 _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
194
195 _bot_mapper->commit_regions(index, num_regions, pretouch_gang);
196 _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang);
197
198 _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang);
199 }
200
uncommit_regions(uint start,uint num_regions)201 void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
202 guarantee(num_regions > 0, "No point in calling this for zero regions");
203
204 uint end = start + num_regions;
205 G1HRPrinter* printer = G1CollectedHeap::heap()->hr_printer();
206 if (printer->is_active()) {
207 for (uint i = start; i < end; i++) {
208 // Can't use at() here since region is no longer marked available.
209 HeapRegion* hr = _regions.get_by_index(i);
210 assert(hr != NULL, "Region should still be present");
211 printer->uncommit(hr);
212 }
213 }
214
215 // Uncommit heap memory
216 _heap_mapper->uncommit_regions(start, num_regions);
217
218 // Also uncommit auxiliary data
219 _prev_bitmap_mapper->uncommit_regions(start, num_regions);
220 _next_bitmap_mapper->uncommit_regions(start, num_regions);
221
222 _bot_mapper->uncommit_regions(start, num_regions);
223 _cardtable_mapper->uncommit_regions(start, num_regions);
224
225 _card_counts_mapper->uncommit_regions(start, num_regions);
226
227 _committed_map.uncommit(start, end);
228 }
229
initialize_regions(uint start,uint num_regions)230 void HeapRegionManager::initialize_regions(uint start, uint num_regions) {
231 for (uint i = start; i < start + num_regions; i++) {
232 assert(is_available(i), "Just made region %u available but is apparently not.", i);
233 HeapRegion* hr = at(i);
234
235 hr->initialize();
236 hr->set_node_index(G1NUMA::numa()->index_for_region(hr));
237 insert_into_free_list(hr);
238 G1CollectedHeap::heap()->hr_printer()->active(hr);
239 }
240 }
241
activate_regions(uint start,uint num_regions)242 void HeapRegionManager::activate_regions(uint start, uint num_regions) {
243 _committed_map.activate(start, start + num_regions);
244 initialize_regions(start, num_regions);
245 }
246
reactivate_regions(uint start,uint num_regions)247 void HeapRegionManager::reactivate_regions(uint start, uint num_regions) {
248 assert(num_regions > 0, "No point in calling this for zero regions");
249
250 clear_auxiliary_data_structures(start, num_regions);
251
252 _committed_map.reactivate(start, start + num_regions);
253 initialize_regions(start, num_regions);
254 }
255
deactivate_regions(uint start,uint num_regions)256 void HeapRegionManager::deactivate_regions(uint start, uint num_regions) {
257 assert(num_regions > 0, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
258 assert(length() >= num_regions, "pre-condition");
259
260 // Reset NUMA index to and print state change.
261 uint end = start + num_regions;
262 for (uint i = start; i < end; i++) {
263 HeapRegion* hr = at(i);
264 hr->set_node_index(G1NUMA::UnknownNodeIndex);
265 G1CollectedHeap::heap()->hr_printer()->inactive(hr);
266 }
267
268 _committed_map.deactivate(start, end);
269 }
270
clear_auxiliary_data_structures(uint start,uint num_regions)271 void HeapRegionManager::clear_auxiliary_data_structures(uint start, uint num_regions) {
272 // Signal marking bitmaps to clear the given regions.
273 _prev_bitmap_mapper->signal_mapping_changed(start, num_regions);
274 _next_bitmap_mapper->signal_mapping_changed(start, num_regions);
275 // Signal G1BlockOffsetTable to clear the given regions.
276 _bot_mapper->signal_mapping_changed(start, num_regions);
277 // Signal G1CardTable to clear the given regions.
278 _cardtable_mapper->signal_mapping_changed(start, num_regions);
279 // Signal G1CardCounts to clear the given regions.
280 _card_counts_mapper->signal_mapping_changed(start, num_regions);
281 }
282
get_auxiliary_data_memory_usage() const283 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
284 size_t used_sz =
285 _prev_bitmap_mapper->committed_size() +
286 _next_bitmap_mapper->committed_size() +
287 _bot_mapper->committed_size() +
288 _cardtable_mapper->committed_size() +
289 _card_counts_mapper->committed_size();
290
291 size_t committed_sz =
292 _prev_bitmap_mapper->reserved_size() +
293 _next_bitmap_mapper->reserved_size() +
294 _bot_mapper->reserved_size() +
295 _cardtable_mapper->reserved_size() +
296 _card_counts_mapper->reserved_size();
297
298 return MemoryUsage(0, used_sz, committed_sz, committed_sz);
299 }
300
has_inactive_regions() const301 bool HeapRegionManager::has_inactive_regions() const {
302 return _committed_map.num_inactive() > 0;
303 }
304
uncommit_inactive_regions(uint limit)305 uint HeapRegionManager::uncommit_inactive_regions(uint limit) {
306 assert(limit > 0, "Need to specify at least one region to uncommit");
307
308 uint uncommitted = 0;
309 uint offset = 0;
310 do {
311 MutexLocker uc(Uncommit_lock, Mutex::_no_safepoint_check_flag);
312 HeapRegionRange range = _committed_map.next_inactive_range(offset);
313 // No more regions available for uncommit. Return the number of regions
314 // already uncommitted or 0 if there were no longer any inactive regions.
315 if (range.length() == 0) {
316 return uncommitted;
317 }
318
319 uint start = range.start();
320 uint num_regions = MIN2(range.length(), limit - uncommitted);
321 uncommitted += num_regions;
322 uncommit_regions(start, num_regions);
323 } while (uncommitted < limit);
324
325 assert(uncommitted == limit, "Invariant");
326 return uncommitted;
327 }
328
expand_inactive(uint num_regions)329 uint HeapRegionManager::expand_inactive(uint num_regions) {
330 uint offset = 0;
331 uint expanded = 0;
332
333 do {
334 HeapRegionRange regions = _committed_map.next_inactive_range(offset);
335 if (regions.length() == 0) {
336 // No more unavailable regions.
337 break;
338 }
339
340 uint to_expand = MIN2(num_regions - expanded, regions.length());
341 reactivate_regions(regions.start(), to_expand);
342 expanded += to_expand;
343 offset = regions.end();
344 } while (expanded < num_regions);
345
346 return expanded;
347 }
348
expand_any(uint num_regions,WorkGang * pretouch_workers)349 uint HeapRegionManager::expand_any(uint num_regions, WorkGang* pretouch_workers) {
350 assert(num_regions > 0, "Must expand at least 1 region");
351
352 uint offset = 0;
353 uint expanded = 0;
354
355 do {
356 HeapRegionRange regions = _committed_map.next_committable_range(offset);
357 if (regions.length() == 0) {
358 // No more unavailable regions.
359 break;
360 }
361
362 uint to_expand = MIN2(num_regions - expanded, regions.length());
363 expand(regions.start(), to_expand, pretouch_workers);
364 expanded += to_expand;
365 offset = regions.end();
366 } while (expanded < num_regions);
367
368 return expanded;
369 }
370
expand_by(uint num_regions,WorkGang * pretouch_workers)371 uint HeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
372 assert(num_regions > 0, "Must expand at least 1 region");
373
374 // First "undo" any requests to uncommit memory concurrently by
375 // reverting such regions to being available.
376 uint expanded = expand_inactive(num_regions);
377
378 // Commit more regions if needed.
379 if (expanded < num_regions) {
380 expanded += expand_any(num_regions - expanded, pretouch_workers);
381 }
382
383 verify_optional();
384 return expanded;
385 }
386
expand_exact(uint start,uint num_regions,WorkGang * pretouch_workers)387 void HeapRegionManager::expand_exact(uint start, uint num_regions, WorkGang* pretouch_workers) {
388 assert(num_regions != 0, "Need to request at least one region");
389 uint end = start + num_regions;
390
391 for (uint i = start; i < end; i++) {
392 // First check inactive. If the regions is inactive, try to reactivate it
393 // before it get uncommitted by the G1SeriveThread.
394 if (_committed_map.inactive(i)) {
395 // Need to grab the lock since this can be called by a java thread
396 // doing humongous allocations.
397 MutexLocker uc(Uncommit_lock, Mutex::_no_safepoint_check_flag);
398 // State might change while getting the lock.
399 if (_committed_map.inactive(i)) {
400 reactivate_regions(i, 1);
401 }
402 }
403 // Not else-if to catch the case where the inactive region was uncommited
404 // while waiting to get the lock.
405 if (!_committed_map.active(i)) {
406 expand(i, 1, pretouch_workers);
407 }
408
409 assert(at(i)->is_free(), "Region must be free at this point");
410 }
411
412 verify_optional();
413 }
414
expand_on_preferred_node(uint preferred_index)415 uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
416 uint expand_candidate = UINT_MAX;
417
418 if (available() >= 1) {
419 for (uint i = 0; i < reserved_length(); i++) {
420 if (is_available(i)) {
421 // Already in use continue
422 continue;
423 }
424 // Always save the candidate so we can expand later on.
425 expand_candidate = i;
426 if (is_on_preferred_index(expand_candidate, preferred_index)) {
427 // We have found a candidate on the preferred node, break.
428 break;
429 }
430 }
431 }
432
433 if (expand_candidate == UINT_MAX) {
434 // No regions left, expand failed.
435 return 0;
436 }
437
438 expand_exact(expand_candidate, 1, NULL);
439 return 1;
440 }
441
is_on_preferred_index(uint region_index,uint preferred_node_index)442 bool HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) {
443 uint region_node_index = G1NUMA::numa()->preferred_node_index_for_index(region_index);
444 return region_node_index == preferred_node_index;
445 }
446
447 #ifdef ASSERT
assert_contiguous_range(uint start,uint num_regions)448 void HeapRegionManager::assert_contiguous_range(uint start, uint num_regions) {
449 // General sanity check, regions found should either be available and empty
450 // or not available so that we can make them available and use them.
451 for (uint i = start; i < (start + num_regions); i++) {
452 HeapRegion* hr = _regions.get_by_index(i);
453 assert(!is_available(i) || hr->is_free(),
454 "Found region sequence starting at " UINT32_FORMAT ", length " UINT32_FORMAT
455 " that is not free at " UINT32_FORMAT ". Hr is " PTR_FORMAT ", type is %s",
456 start, num_regions, i, p2i(hr), hr->get_type_str());
457 }
458 }
459 #endif
460
find_contiguous_in_range(uint start,uint end,uint num_regions)461 uint HeapRegionManager::find_contiguous_in_range(uint start, uint end, uint num_regions) {
462 assert(start <= end, "precondition");
463 assert(num_regions >= 1, "precondition");
464 uint candidate = start; // First region in candidate sequence.
465 uint unchecked = candidate; // First unchecked region in candidate.
466 // While the candidate sequence fits in the range...
467 while (num_regions <= (end - candidate)) {
468 // Walk backward over the regions for the current candidate.
469 for (uint i = candidate + num_regions - 1; true; --i) {
470 if (is_available(i) && !at(i)->is_free()) {
471 // Region i can't be used, so restart with i+1 as the start
472 // of a new candidate sequence, and with the region after the
473 // old candidate sequence being the first unchecked region.
474 unchecked = candidate + num_regions;
475 candidate = i + 1;
476 break;
477 } else if (i == unchecked) {
478 // All regions of candidate sequence have passed check.
479 assert_contiguous_range(candidate, num_regions);
480 return candidate;
481 }
482 }
483 }
484 return G1_NO_HRM_INDEX;
485 }
486
find_contiguous_in_free_list(uint num_regions)487 uint HeapRegionManager::find_contiguous_in_free_list(uint num_regions) {
488 uint candidate = G1_NO_HRM_INDEX;
489 HeapRegionRange range(0,0);
490
491 do {
492 range = _committed_map.next_active_range(range.end());
493 candidate = find_contiguous_in_range(range.start(), range.end(), num_regions);
494 } while (candidate == G1_NO_HRM_INDEX && range.end() < reserved_length());
495
496 return candidate;
497 }
498
find_contiguous_allow_expand(uint num_regions)499 uint HeapRegionManager::find_contiguous_allow_expand(uint num_regions) {
500 // Check if we can actually satisfy the allocation.
501 if (num_regions > available()) {
502 return G1_NO_HRM_INDEX;
503 }
504 // Find any candidate.
505 return find_contiguous_in_range(0, reserved_length(), num_regions);
506 }
507
next_region_in_heap(const HeapRegion * r) const508 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
509 guarantee(r != NULL, "Start region must be a valid region");
510 guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index());
511 for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
512 HeapRegion* hr = _regions.get_by_index(i);
513 if (is_available(i)) {
514 return hr;
515 }
516 }
517 return NULL;
518 }
519
iterate(HeapRegionClosure * blk) const520 void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
521 uint len = reserved_length();
522
523 for (uint i = 0; i < len; i++) {
524 if (!is_available(i)) {
525 continue;
526 }
527 guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
528 bool res = blk->do_heap_region(at(i));
529 if (res) {
530 blk->set_incomplete();
531 return;
532 }
533 }
534 }
535
find_highest_free(bool * expanded)536 uint HeapRegionManager::find_highest_free(bool* expanded) {
537 // Loop downwards from the highest region index, looking for an
538 // entry which is either free or not yet committed. If not yet
539 // committed, expand at that index.
540 uint curr = reserved_length() - 1;
541 while (true) {
542 HeapRegion *hr = _regions.get_by_index(curr);
543 if (hr == NULL || !is_available(curr)) {
544 // Found uncommitted and free region, expand to make it available for use.
545 expand_exact(curr, 1, NULL);
546 assert(at(curr)->is_free(), "Region (%u) must be available and free after expand", curr);
547
548 *expanded = true;
549 return curr;
550 } else {
551 if (hr->is_free()) {
552 *expanded = false;
553 return curr;
554 }
555 }
556 if (curr == 0) {
557 return G1_NO_HRM_INDEX;
558 }
559 curr--;
560 }
561 }
562
allocate_containing_regions(MemRegion range,size_t * commit_count,WorkGang * pretouch_workers)563 bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers) {
564 size_t commits = 0;
565 uint start_index = (uint)_regions.get_index_by_address(range.start());
566 uint last_index = (uint)_regions.get_index_by_address(range.last());
567
568 // Ensure that each G1 region in the range is free, returning false if not.
569 // Commit those that are not yet available, and keep count.
570 for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
571 if (!is_available(curr_index)) {
572 commits++;
573 expand_exact(curr_index, 1, pretouch_workers);
574 }
575 HeapRegion* curr_region = _regions.get_by_index(curr_index);
576 if (!curr_region->is_free()) {
577 return false;
578 }
579 }
580
581 allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
582 *commit_count = commits;
583 return true;
584 }
585
par_iterate(HeapRegionClosure * blk,HeapRegionClaimer * hrclaimer,const uint start_index) const586 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const {
587 // Every worker will actually look at all regions, skipping over regions that
588 // are currently not committed.
589 // This also (potentially) iterates over regions newly allocated during GC. This
590 // is no problem except for some extra work.
591 const uint n_regions = hrclaimer->n_regions();
592 for (uint count = 0; count < n_regions; count++) {
593 const uint index = (start_index + count) % n_regions;
594 assert(index < n_regions, "sanity");
595 // Skip over unavailable regions
596 if (!is_available(index)) {
597 continue;
598 }
599 HeapRegion* r = _regions.get_by_index(index);
600 // We'll ignore regions already claimed.
601 // However, if the iteration is specified as concurrent, the values for
602 // is_starts_humongous and is_continues_humongous can not be trusted,
603 // and we should just blindly iterate over regions regardless of their
604 // humongous status.
605 if (hrclaimer->is_region_claimed(index)) {
606 continue;
607 }
608 // OK, try to claim it
609 if (!hrclaimer->claim_region(index)) {
610 continue;
611 }
612 bool res = blk->do_heap_region(r);
613 if (res) {
614 return;
615 }
616 }
617 }
618
shrink_by(uint num_regions_to_remove)619 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
620 assert(length() > 0, "the region sequence should not be empty");
621 assert(length() <= _allocated_heapregions_length, "invariant");
622 assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
623 assert(num_regions_to_remove < length(), "We should never remove all regions");
624
625 if (num_regions_to_remove == 0) {
626 return 0;
627 }
628
629 uint removed = 0;
630 uint cur = _allocated_heapregions_length - 1;
631 uint idx_last_found = 0;
632 uint num_last_found = 0;
633
634 while ((removed < num_regions_to_remove) &&
635 (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
636 uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
637
638 shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
639
640 cur = idx_last_found;
641 removed += to_remove;
642 }
643
644 verify_optional();
645
646 return removed;
647 }
648
shrink_at(uint index,size_t num_regions)649 void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
650 #ifdef ASSERT
651 for (uint i = index; i < (index + num_regions); i++) {
652 assert(is_available(i), "Expected available region at index %u", i);
653 assert(at(i)->is_empty(), "Expected empty region at index %u", i);
654 assert(at(i)->is_free(), "Expected free region at index %u", i);
655 }
656 #endif
657 // Mark regions as inactive making them ready for uncommit.
658 deactivate_regions(index, (uint) num_regions);
659 }
660
find_empty_from_idx_reverse(uint start_idx,uint * res_idx) const661 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
662 guarantee(start_idx < _allocated_heapregions_length, "checking");
663 guarantee(res_idx != NULL, "checking");
664
665 uint num_regions_found = 0;
666
667 jlong cur = start_idx;
668 while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
669 cur--;
670 }
671 if (cur == -1) {
672 return num_regions_found;
673 }
674 jlong old_cur = cur;
675 // cur indexes the first empty region
676 while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
677 cur--;
678 }
679 *res_idx = cur + 1;
680 num_regions_found = old_cur - cur;
681
682 #ifdef ASSERT
683 for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
684 assert(at(i)->is_empty(), "just checking");
685 }
686 #endif
687 return num_regions_found;
688 }
689
verify()690 void HeapRegionManager::verify() {
691 guarantee(length() <= _allocated_heapregions_length,
692 "invariant: _length: %u _allocated_length: %u",
693 length(), _allocated_heapregions_length);
694 guarantee(_allocated_heapregions_length <= reserved_length(),
695 "invariant: _allocated_length: %u _max_length: %u",
696 _allocated_heapregions_length, reserved_length());
697 guarantee(length() <= max_length(),
698 "invariant: committed regions: %u max_regions: %u",
699 length(), max_length());
700
701 bool prev_committed = true;
702 uint num_committed = 0;
703 HeapWord* prev_end = heap_bottom();
704 for (uint i = 0; i < _allocated_heapregions_length; i++) {
705 if (!is_available(i)) {
706 prev_committed = false;
707 continue;
708 }
709 num_committed++;
710 HeapRegion* hr = _regions.get_by_index(i);
711 guarantee(hr != NULL, "invariant: i: %u", i);
712 guarantee(!prev_committed || hr->bottom() == prev_end,
713 "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT,
714 i, HR_FORMAT_PARAMS(hr), p2i(prev_end));
715 guarantee(hr->hrm_index() == i,
716 "invariant: i: %u hrm_index(): %u", i, hr->hrm_index());
717 // Asserts will fire if i is >= _length
718 HeapWord* addr = hr->bottom();
719 guarantee(addr_to_region(addr) == hr, "sanity");
720 // We cannot check whether the region is part of a particular set: at the time
721 // this method may be called, we have only completed allocation of the regions,
722 // but not put into a region set.
723 prev_committed = true;
724 prev_end = hr->end();
725 }
726 for (uint i = _allocated_heapregions_length; i < reserved_length(); i++) {
727 guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
728 }
729
730 guarantee(num_committed == length(), "Found %u committed regions, but should be %u", num_committed, length());
731 _free_list.verify();
732 }
733
734 #ifndef PRODUCT
verify_optional()735 void HeapRegionManager::verify_optional() {
736 verify();
737 }
738 #endif // PRODUCT
739
HeapRegionClaimer(uint n_workers)740 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
741 _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
742 assert(n_workers > 0, "Need at least one worker.");
743 uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
744 memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
745 _claims = new_claims;
746 }
747
~HeapRegionClaimer()748 HeapRegionClaimer::~HeapRegionClaimer() {
749 FREE_C_HEAP_ARRAY(uint, _claims);
750 }
751
offset_for_worker(uint worker_id) const752 uint HeapRegionClaimer::offset_for_worker(uint worker_id) const {
753 assert(worker_id < _n_workers, "Invalid worker_id.");
754 return _n_regions * worker_id / _n_workers;
755 }
756
is_region_claimed(uint region_index) const757 bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
758 assert(region_index < _n_regions, "Invalid index.");
759 return _claims[region_index] == Claimed;
760 }
761
claim_region(uint region_index)762 bool HeapRegionClaimer::claim_region(uint region_index) {
763 assert(region_index < _n_regions, "Invalid index.");
764 uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
765 return old_val == Unclaimed;
766 }
767
768 class G1RebuildFreeListTask : public AbstractGangTask {
769 HeapRegionManager* _hrm;
770 FreeRegionList* _worker_freelists;
771 uint _worker_chunk_size;
772 uint _num_workers;
773
774 public:
G1RebuildFreeListTask(HeapRegionManager * hrm,uint num_workers)775 G1RebuildFreeListTask(HeapRegionManager* hrm, uint num_workers) :
776 AbstractGangTask("G1 Rebuild Free List Task"),
777 _hrm(hrm),
778 _worker_freelists(NEW_C_HEAP_ARRAY(FreeRegionList, num_workers, mtGC)),
779 _worker_chunk_size((_hrm->reserved_length() + num_workers - 1) / num_workers),
780 _num_workers(num_workers) {
781 for (uint worker = 0; worker < _num_workers; worker++) {
782 ::new (&_worker_freelists[worker]) FreeRegionList("Appendable Worker Free List");
783 }
784 }
785
~G1RebuildFreeListTask()786 ~G1RebuildFreeListTask() {
787 for (uint worker = 0; worker < _num_workers; worker++) {
788 _worker_freelists[worker].~FreeRegionList();
789 }
790 FREE_C_HEAP_ARRAY(FreeRegionList, _worker_freelists);
791 }
792
worker_freelist(uint worker)793 FreeRegionList* worker_freelist(uint worker) {
794 return &_worker_freelists[worker];
795 }
796
797 // Each worker creates a free list for a chunk of the heap. The chunks won't
798 // be overlapping so we don't need to do any claiming.
work(uint worker_id)799 void work(uint worker_id) {
800 Ticks start_time = Ticks::now();
801 EventGCPhaseParallel event;
802
803 uint start = worker_id * _worker_chunk_size;
804 uint end = MIN2(start + _worker_chunk_size, _hrm->reserved_length());
805
806 // If start is outside the heap, this worker has nothing to do.
807 if (start > end) {
808 return;
809 }
810
811 FreeRegionList *free_list = worker_freelist(worker_id);
812 for (uint i = start; i < end; i++) {
813 HeapRegion *region = _hrm->at_or_null(i);
814 if (region != NULL && region->is_free()) {
815 // Need to clear old links to allow to be added to new freelist.
816 region->unlink_from_list();
817 free_list->add_to_tail(region);
818 }
819 }
820
821 event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::RebuildFreeList));
822 G1CollectedHeap::heap()->phase_times()->record_time_secs(G1GCPhaseTimes::RebuildFreeList, worker_id, (Ticks::now() - start_time).seconds());
823 }
824 };
825
rebuild_free_list(WorkGang * workers)826 void HeapRegionManager::rebuild_free_list(WorkGang* workers) {
827 // Abandon current free list to allow a rebuild.
828 _free_list.abandon();
829
830 uint const num_workers = clamp(max_length(), 1u, workers->active_workers());
831 G1RebuildFreeListTask task(this, num_workers);
832
833 log_debug(gc, ergo)("Running %s using %u workers for rebuilding free list of regions",
834 task.name(), num_workers);
835 workers->run_task(&task, num_workers);
836
837 // Link the partial free lists together.
838 Ticks serial_time = Ticks::now();
839 for (uint worker = 0; worker < num_workers; worker++) {
840 _free_list.append_ordered(task.worker_freelist(worker));
841 }
842 G1CollectedHeap::heap()->phase_times()->record_serial_rebuild_freelist_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
843 }
844