1 /*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/icBuffer.hpp"
29 #include "gc_implementation/g1/bufferingOopClosure.hpp"
30 #include "gc_implementation/g1/concurrentG1Refine.hpp"
31 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
32 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
33 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
34 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
35 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
36 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
37 #include "gc_implementation/g1/g1EvacFailure.hpp"
38 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
39 #include "gc_implementation/g1/g1Log.hpp"
40 #include "gc_implementation/g1/g1MarkSweep.hpp"
41 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
42 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
43 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
44 #include "gc_implementation/g1/g1RemSet.inline.hpp"
45 #include "gc_implementation/g1/g1RootProcessor.hpp"
46 #include "gc_implementation/g1/g1StringDedup.hpp"
47 #include "gc_implementation/g1/g1YCTypes.hpp"
48 #include "gc_implementation/g1/heapRegion.inline.hpp"
49 #include "gc_implementation/g1/heapRegionRemSet.hpp"
50 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
51 #include "gc_implementation/g1/vm_operations_g1.hpp"
52 #include "gc_implementation/shared/gcHeapSummary.hpp"
53 #include "gc_implementation/shared/gcTimer.hpp"
54 #include "gc_implementation/shared/gcTrace.hpp"
55 #include "gc_implementation/shared/gcTraceTime.hpp"
56 #include "gc_implementation/shared/isGCActiveMark.hpp"
57 #include "memory/allocation.hpp"
58 #include "memory/gcLocker.inline.hpp"
59 #include "memory/generationSpec.hpp"
60 #include "memory/iterator.hpp"
61 #include "memory/referenceProcessor.hpp"
62 #include "oops/oop.inline.hpp"
63 #include "oops/oop.pcgc.inline.hpp"
64 #include "runtime/orderAccess.inline.hpp"
65 #include "runtime/vmThread.hpp"
66
67 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
68
69 // turn it on so that the contents of the young list (scan-only /
70 // to-be-collected) are printed at "strategic" points before / during
71 // / after the collection --- this is useful for debugging
72 #define YOUNG_LIST_VERBOSE 0
73 // CURRENT STATUS
74 // This file is under construction. Search for "FIXME".
75
76 // INVARIANTS/NOTES
77 //
78 // All allocation activity covered by the G1CollectedHeap interface is
79 // serialized by acquiring the HeapLock. This happens in mem_allocate
80 // and allocate_new_tlab, which are the "entry" points to the
81 // allocation code from the rest of the JVM. (Note that this does not
82 // apply to TLAB allocation, which is not part of this interface: it
83 // is done by clients of this interface.)
84
85 // Local to this file.
86
87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
88 bool _concurrent;
89 public:
RefineCardTableEntryClosure()90 RefineCardTableEntryClosure() : _concurrent(true) { }
91
do_card_ptr(jbyte * card_ptr,uint worker_i)92 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
93 bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
94 // This path is executed by the concurrent refine or mutator threads,
95 // concurrently, and so we do not care if card_ptr contains references
96 // that point into the collection set.
97 assert(!oops_into_cset, "should be");
98
99 if (_concurrent && SuspendibleThreadSet::should_yield()) {
100 // Caller will actually yield.
101 return false;
102 }
103 // Otherwise, we finished successfully; return true.
104 return true;
105 }
106
set_concurrent(bool b)107 void set_concurrent(bool b) { _concurrent = b; }
108 };
109
110
111 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
112 size_t _num_processed;
113 CardTableModRefBS* _ctbs;
114 int _histo[256];
115
116 public:
ClearLoggedCardTableEntryClosure()117 ClearLoggedCardTableEntryClosure() :
118 _num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
119 {
120 for (int i = 0; i < 256; i++) _histo[i] = 0;
121 }
122
do_card_ptr(jbyte * card_ptr,uint worker_i)123 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
124 unsigned char* ujb = (unsigned char*)card_ptr;
125 int ind = (int)(*ujb);
126 _histo[ind]++;
127
128 *card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
129 _num_processed++;
130
131 return true;
132 }
133
num_processed()134 size_t num_processed() { return _num_processed; }
135
print_histo()136 void print_histo() {
137 gclog_or_tty->print_cr("Card table value histogram:");
138 for (int i = 0; i < 256; i++) {
139 if (_histo[i] != 0) {
140 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
141 }
142 }
143 }
144 };
145
146 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
147 private:
148 size_t _num_processed;
149
150 public:
RedirtyLoggedCardTableEntryClosure()151 RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
152
do_card_ptr(jbyte * card_ptr,uint worker_i)153 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
154 *card_ptr = CardTableModRefBS::dirty_card_val();
155 _num_processed++;
156 return true;
157 }
158
num_processed() const159 size_t num_processed() const { return _num_processed; }
160 };
161
YoungList(G1CollectedHeap * g1h)162 YoungList::YoungList(G1CollectedHeap* g1h) :
163 _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
164 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
165 guarantee(check_list_empty(false), "just making sure...");
166 }
167
push_region(HeapRegion * hr)168 void YoungList::push_region(HeapRegion *hr) {
169 assert(!hr->is_young(), "should not already be young");
170 assert(hr->get_next_young_region() == NULL, "cause it should!");
171
172 hr->set_next_young_region(_head);
173 _head = hr;
174
175 _g1h->g1_policy()->set_region_eden(hr, (int) _length);
176 ++_length;
177 }
178
add_survivor_region(HeapRegion * hr)179 void YoungList::add_survivor_region(HeapRegion* hr) {
180 assert(hr->is_survivor(), "should be flagged as survivor region");
181 assert(hr->get_next_young_region() == NULL, "cause it should!");
182
183 hr->set_next_young_region(_survivor_head);
184 if (_survivor_head == NULL) {
185 _survivor_tail = hr;
186 }
187 _survivor_head = hr;
188 ++_survivor_length;
189 }
190
empty_list(HeapRegion * list)191 void YoungList::empty_list(HeapRegion* list) {
192 while (list != NULL) {
193 HeapRegion* next = list->get_next_young_region();
194 list->set_next_young_region(NULL);
195 list->uninstall_surv_rate_group();
196 // This is called before a Full GC and all the non-empty /
197 // non-humongous regions at the end of the Full GC will end up as
198 // old anyway.
199 list->set_old();
200 list = next;
201 }
202 }
203
empty_list()204 void YoungList::empty_list() {
205 assert(check_list_well_formed(), "young list should be well formed");
206
207 empty_list(_head);
208 _head = NULL;
209 _length = 0;
210
211 empty_list(_survivor_head);
212 _survivor_head = NULL;
213 _survivor_tail = NULL;
214 _survivor_length = 0;
215
216 _last_sampled_rs_lengths = 0;
217
218 assert(check_list_empty(false), "just making sure...");
219 }
220
check_list_well_formed()221 bool YoungList::check_list_well_formed() {
222 bool ret = true;
223
224 uint length = 0;
225 HeapRegion* curr = _head;
226 HeapRegion* last = NULL;
227 while (curr != NULL) {
228 if (!curr->is_young()) {
229 gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
230 "incorrectly tagged (y: %d, surv: %d)",
231 p2i(curr->bottom()), p2i(curr->end()),
232 curr->is_young(), curr->is_survivor());
233 ret = false;
234 }
235 ++length;
236 last = curr;
237 curr = curr->get_next_young_region();
238 }
239 ret = ret && (length == _length);
240
241 if (!ret) {
242 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
243 gclog_or_tty->print_cr("### list has %u entries, _length is %u",
244 length, _length);
245 }
246
247 return ret;
248 }
249
check_list_empty(bool check_sample)250 bool YoungList::check_list_empty(bool check_sample) {
251 bool ret = true;
252
253 if (_length != 0) {
254 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
255 _length);
256 ret = false;
257 }
258 if (check_sample && _last_sampled_rs_lengths != 0) {
259 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
260 ret = false;
261 }
262 if (_head != NULL) {
263 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
264 ret = false;
265 }
266 if (!ret) {
267 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
268 }
269
270 return ret;
271 }
272
273 void
rs_length_sampling_init()274 YoungList::rs_length_sampling_init() {
275 _sampled_rs_lengths = 0;
276 _curr = _head;
277 }
278
279 bool
rs_length_sampling_more()280 YoungList::rs_length_sampling_more() {
281 return _curr != NULL;
282 }
283
284 void
rs_length_sampling_next()285 YoungList::rs_length_sampling_next() {
286 assert( _curr != NULL, "invariant" );
287 size_t rs_length = _curr->rem_set()->occupied();
288
289 _sampled_rs_lengths += rs_length;
290
291 // The current region may not yet have been added to the
292 // incremental collection set (it gets added when it is
293 // retired as the current allocation region).
294 if (_curr->in_collection_set()) {
295 // Update the collection set policy information for this region
296 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
297 }
298
299 _curr = _curr->get_next_young_region();
300 if (_curr == NULL) {
301 _last_sampled_rs_lengths = _sampled_rs_lengths;
302 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
303 }
304 }
305
306 void
reset_auxilary_lists()307 YoungList::reset_auxilary_lists() {
308 guarantee( is_empty(), "young list should be empty" );
309 assert(check_list_well_formed(), "young list should be well formed");
310
311 // Add survivor regions to SurvRateGroup.
312 _g1h->g1_policy()->note_start_adding_survivor_regions();
313 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
314
315 int young_index_in_cset = 0;
316 for (HeapRegion* curr = _survivor_head;
317 curr != NULL;
318 curr = curr->get_next_young_region()) {
319 _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
320
321 // The region is a non-empty survivor so let's add it to
322 // the incremental collection set for the next evacuation
323 // pause.
324 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
325 young_index_in_cset += 1;
326 }
327 assert((uint) young_index_in_cset == _survivor_length, "post-condition");
328 _g1h->g1_policy()->note_stop_adding_survivor_regions();
329
330 _head = _survivor_head;
331 _length = _survivor_length;
332 if (_survivor_head != NULL) {
333 assert(_survivor_tail != NULL, "cause it shouldn't be");
334 assert(_survivor_length > 0, "invariant");
335 _survivor_tail->set_next_young_region(NULL);
336 }
337
338 // Don't clear the survivor list handles until the start of
339 // the next evacuation pause - we need it in order to re-tag
340 // the survivor regions from this evacuation pause as 'young'
341 // at the start of the next.
342
343 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
344
345 assert(check_list_well_formed(), "young list should be well formed");
346 }
347
print()348 void YoungList::print() {
349 HeapRegion* lists[] = {_head, _survivor_head};
350 const char* names[] = {"YOUNG", "SURVIVOR"};
351
352 for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
353 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
354 HeapRegion *curr = lists[list];
355 if (curr == NULL)
356 gclog_or_tty->print_cr(" empty");
357 while (curr != NULL) {
358 gclog_or_tty->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
359 HR_FORMAT_PARAMS(curr),
360 p2i(curr->prev_top_at_mark_start()),
361 p2i(curr->next_top_at_mark_start()),
362 curr->age_in_surv_rate_group_cond());
363 curr = curr->get_next_young_region();
364 }
365 }
366
367 gclog_or_tty->cr();
368 }
369
reset_from_card_cache(uint start_idx,size_t num_regions)370 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
371 OtherRegionsTable::invalidate(start_idx, num_regions);
372 }
373
on_commit(uint start_idx,size_t num_regions,bool zero_filled)374 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
375 // The from card cache is not the memory that is actually committed. So we cannot
376 // take advantage of the zero_filled parameter.
377 reset_from_card_cache(start_idx, num_regions);
378 }
379
push_dirty_cards_region(HeapRegion * hr)380 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
381 {
382 // Claim the right to put the region on the dirty cards region list
383 // by installing a self pointer.
384 HeapRegion* next = hr->get_next_dirty_cards_region();
385 if (next == NULL) {
386 HeapRegion* res = (HeapRegion*)
387 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
388 NULL);
389 if (res == NULL) {
390 HeapRegion* head;
391 do {
392 // Put the region to the dirty cards region list.
393 head = _dirty_cards_region_list;
394 next = (HeapRegion*)
395 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
396 if (next == head) {
397 assert(hr->get_next_dirty_cards_region() == hr,
398 "hr->get_next_dirty_cards_region() != hr");
399 if (next == NULL) {
400 // The last region in the list points to itself.
401 hr->set_next_dirty_cards_region(hr);
402 } else {
403 hr->set_next_dirty_cards_region(next);
404 }
405 }
406 } while (next != head);
407 }
408 }
409 }
410
pop_dirty_cards_region()411 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
412 {
413 HeapRegion* head;
414 HeapRegion* hr;
415 do {
416 head = _dirty_cards_region_list;
417 if (head == NULL) {
418 return NULL;
419 }
420 HeapRegion* new_head = head->get_next_dirty_cards_region();
421 if (head == new_head) {
422 // The last region.
423 new_head = NULL;
424 }
425 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
426 head);
427 } while (hr != head);
428 assert(hr != NULL, "invariant");
429 hr->set_next_dirty_cards_region(NULL);
430 return hr;
431 }
432
433 #ifdef ASSERT
434 // A region is added to the collection set as it is retired
435 // so an address p can point to a region which will be in the
436 // collection set but has not yet been retired. This method
437 // therefore is only accurate during a GC pause after all
438 // regions have been retired. It is used for debugging
439 // to check if an nmethod has references to objects that can
440 // be move during a partial collection. Though it can be
441 // inaccurate, it is sufficient for G1 because the conservative
442 // implementation of is_scavengable() for G1 will indicate that
443 // all nmethods must be scanned during a partial collection.
is_in_partial_collection(const void * p)444 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
445 if (p == NULL) {
446 return false;
447 }
448 return heap_region_containing(p)->in_collection_set();
449 }
450 #endif
451
452 // Returns true if the reference points to an object that
453 // can move in an incremental collection.
is_scavengable(const void * p)454 bool G1CollectedHeap::is_scavengable(const void* p) {
455 HeapRegion* hr = heap_region_containing(p);
456 return !hr->isHumongous();
457 }
458
check_ct_logs_at_safepoint()459 void G1CollectedHeap::check_ct_logs_at_safepoint() {
460 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
461 CardTableModRefBS* ct_bs = g1_barrier_set();
462
463 // Count the dirty cards at the start.
464 CountNonCleanMemRegionClosure count1(this);
465 ct_bs->mod_card_iterate(&count1);
466 int orig_count = count1.n();
467
468 // First clear the logged cards.
469 ClearLoggedCardTableEntryClosure clear;
470 dcqs.apply_closure_to_all_completed_buffers(&clear);
471 dcqs.iterate_closure_all_threads(&clear, false);
472 clear.print_histo();
473
474 // Now ensure that there's no dirty cards.
475 CountNonCleanMemRegionClosure count2(this);
476 ct_bs->mod_card_iterate(&count2);
477 if (count2.n() != 0) {
478 gclog_or_tty->print_cr("Card table has %d entries; %d originally",
479 count2.n(), orig_count);
480 }
481 guarantee(count2.n() == 0, "Card table should be clean.");
482
483 RedirtyLoggedCardTableEntryClosure redirty;
484 dcqs.apply_closure_to_all_completed_buffers(&redirty);
485 dcqs.iterate_closure_all_threads(&redirty, false);
486 gclog_or_tty->print_cr("Log entries = " SIZE_FORMAT ", dirty cards = %d.",
487 clear.num_processed(), orig_count);
488 guarantee(redirty.num_processed() == clear.num_processed(),
489 err_msg("Redirtied " SIZE_FORMAT " cards, bug cleared " SIZE_FORMAT,
490 redirty.num_processed(), clear.num_processed()));
491
492 CountNonCleanMemRegionClosure count3(this);
493 ct_bs->mod_card_iterate(&count3);
494 if (count3.n() != orig_count) {
495 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
496 orig_count, count3.n());
497 guarantee(count3.n() >= orig_count, "Should have restored them all.");
498 }
499 }
500
501 // Private class members.
502
503 G1CollectedHeap* G1CollectedHeap::_g1h;
504
505 // Private methods.
506
507 HeapRegion*
new_region_try_secondary_free_list(bool is_old)508 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
509 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
510 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
511 if (!_secondary_free_list.is_empty()) {
512 if (G1ConcRegionFreeingVerbose) {
513 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
514 "secondary_free_list has %u entries",
515 _secondary_free_list.length());
516 }
517 // It looks as if there are free regions available on the
518 // secondary_free_list. Let's move them to the free_list and try
519 // again to allocate from it.
520 append_secondary_free_list();
521
522 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
523 "empty we should have moved at least one entry to the free_list");
524 HeapRegion* res = _hrm.allocate_free_region(is_old);
525 if (G1ConcRegionFreeingVerbose) {
526 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
527 "allocated " HR_FORMAT " from secondary_free_list",
528 HR_FORMAT_PARAMS(res));
529 }
530 return res;
531 }
532
533 // Wait here until we get notified either when (a) there are no
534 // more free regions coming or (b) some regions have been moved on
535 // the secondary_free_list.
536 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
537 }
538
539 if (G1ConcRegionFreeingVerbose) {
540 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
541 "could not allocate from secondary_free_list");
542 }
543 return NULL;
544 }
545
new_region(size_t word_size,bool is_old,bool do_expand)546 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
547 assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
548 "the only time we use this to allocate a humongous region is "
549 "when we are allocating a single humongous region");
550
551 HeapRegion* res;
552 if (G1StressConcRegionFreeing) {
553 if (!_secondary_free_list.is_empty()) {
554 if (G1ConcRegionFreeingVerbose) {
555 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
556 "forced to look at the secondary_free_list");
557 }
558 res = new_region_try_secondary_free_list(is_old);
559 if (res != NULL) {
560 return res;
561 }
562 }
563 }
564
565 res = _hrm.allocate_free_region(is_old);
566
567 if (res == NULL) {
568 if (G1ConcRegionFreeingVerbose) {
569 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
570 "res == NULL, trying the secondary_free_list");
571 }
572 res = new_region_try_secondary_free_list(is_old);
573 }
574 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
575 // Currently, only attempts to allocate GC alloc regions set
576 // do_expand to true. So, we should only reach here during a
577 // safepoint. If this assumption changes we might have to
578 // reconsider the use of _expand_heap_after_alloc_failure.
579 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
580
581 ergo_verbose1(ErgoHeapSizing,
582 "attempt heap expansion",
583 ergo_format_reason("region allocation request failed")
584 ergo_format_byte("allocation request"),
585 word_size * HeapWordSize);
586 if (expand(word_size * HeapWordSize)) {
587 // Given that expand() succeeded in expanding the heap, and we
588 // always expand the heap by an amount aligned to the heap
589 // region size, the free list should in theory not be empty.
590 // In either case allocate_free_region() will check for NULL.
591 res = _hrm.allocate_free_region(is_old);
592 } else {
593 _expand_heap_after_alloc_failure = false;
594 }
595 }
596 return res;
597 }
598
599 HeapWord*
humongous_obj_allocate_initialize_regions(uint first,uint num_regions,size_t word_size,AllocationContext_t context)600 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
601 uint num_regions,
602 size_t word_size,
603 AllocationContext_t context) {
604 assert(first != G1_NO_HRM_INDEX, "pre-condition");
605 assert(isHumongous(word_size), "word_size should be humongous");
606 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
607
608 // Index of last region in the series + 1.
609 uint last = first + num_regions;
610
611 // We need to initialize the region(s) we just discovered. This is
612 // a bit tricky given that it can happen concurrently with
613 // refinement threads refining cards on these regions and
614 // potentially wanting to refine the BOT as they are scanning
615 // those cards (this can happen shortly after a cleanup; see CR
616 // 6991377). So we have to set up the region(s) carefully and in
617 // a specific order.
618
619 // The word size sum of all the regions we will allocate.
620 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
621 assert(word_size <= word_size_sum, "sanity");
622
623 // This will be the "starts humongous" region.
624 HeapRegion* first_hr = region_at(first);
625 // The header of the new object will be placed at the bottom of
626 // the first region.
627 HeapWord* new_obj = first_hr->bottom();
628 // This will be the new end of the first region in the series that
629 // should also match the end of the last region in the series.
630 HeapWord* new_end = new_obj + word_size_sum;
631 // This will be the new top of the first region that will reflect
632 // this allocation.
633 HeapWord* new_top = new_obj + word_size;
634
635 // First, we need to zero the header of the space that we will be
636 // allocating. When we update top further down, some refinement
637 // threads might try to scan the region. By zeroing the header we
638 // ensure that any thread that will try to scan the region will
639 // come across the zero klass word and bail out.
640 //
641 // NOTE: It would not have been correct to have used
642 // CollectedHeap::fill_with_object() and make the space look like
643 // an int array. The thread that is doing the allocation will
644 // later update the object header to a potentially different array
645 // type and, for a very short period of time, the klass and length
646 // fields will be inconsistent. This could cause a refinement
647 // thread to calculate the object size incorrectly.
648 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
649
650 // We will set up the first region as "starts humongous". This
651 // will also update the BOT covering all the regions to reflect
652 // that there is a single object that starts at the bottom of the
653 // first region.
654 first_hr->set_startsHumongous(new_top, new_end);
655 first_hr->set_allocation_context(context);
656 // Then, if there are any, we will set up the "continues
657 // humongous" regions.
658 HeapRegion* hr = NULL;
659 for (uint i = first + 1; i < last; ++i) {
660 hr = region_at(i);
661 hr->set_continuesHumongous(first_hr);
662 hr->set_allocation_context(context);
663 }
664 // If we have "continues humongous" regions (hr != NULL), then the
665 // end of the last one should match new_end.
666 assert(hr == NULL || hr->end() == new_end, "sanity");
667
668 // Up to this point no concurrent thread would have been able to
669 // do any scanning on any region in this series. All the top
670 // fields still point to bottom, so the intersection between
671 // [bottom,top] and [card_start,card_end] will be empty. Before we
672 // update the top fields, we'll do a storestore to make sure that
673 // no thread sees the update to top before the zeroing of the
674 // object header and the BOT initialization.
675 OrderAccess::storestore();
676
677 // Now that the BOT and the object header have been initialized,
678 // we can update top of the "starts humongous" region.
679 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
680 "new_top should be in this region");
681 first_hr->set_top(new_top);
682 if (_hr_printer.is_active()) {
683 HeapWord* bottom = first_hr->bottom();
684 HeapWord* end = first_hr->orig_end();
685 if ((first + 1) == last) {
686 // the series has a single humongous region
687 _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
688 } else {
689 // the series has more than one humongous regions
690 _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
691 }
692 }
693
694 // Now, we will update the top fields of the "continues humongous"
695 // regions. The reason we need to do this is that, otherwise,
696 // these regions would look empty and this will confuse parts of
697 // G1. For example, the code that looks for a consecutive number
698 // of empty regions will consider them empty and try to
699 // re-allocate them. We can extend is_empty() to also include
700 // !continuesHumongous(), but it is easier to just update the top
701 // fields here. The way we set top for all regions (i.e., top ==
702 // end for all regions but the last one, top == new_top for the
703 // last one) is actually used when we will free up the humongous
704 // region in free_humongous_region().
705 hr = NULL;
706 for (uint i = first + 1; i < last; ++i) {
707 hr = region_at(i);
708 if ((i + 1) == last) {
709 // last continues humongous region
710 assert(hr->bottom() < new_top && new_top <= hr->end(),
711 "new_top should fall on this region");
712 hr->set_top(new_top);
713 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
714 } else {
715 // not last one
716 assert(new_top > hr->end(), "new_top should be above this region");
717 hr->set_top(hr->end());
718 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
719 }
720 }
721 // If we have continues humongous regions (hr != NULL), then the
722 // end of the last one should match new_end and its top should
723 // match new_top.
724 assert(hr == NULL ||
725 (hr->end() == new_end && hr->top() == new_top), "sanity");
726 check_bitmaps("Humongous Region Allocation", first_hr);
727
728 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
729 _allocator->increase_used(first_hr->used());
730 _humongous_set.add(first_hr);
731
732 return new_obj;
733 }
734
735 // If could fit into free regions w/o expansion, try.
736 // Otherwise, if can expand, do so.
737 // Otherwise, if using ex regions might help, try with ex given back.
humongous_obj_allocate(size_t word_size,AllocationContext_t context)738 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
739 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
740
741 verify_region_sets_optional();
742
743 uint first = G1_NO_HRM_INDEX;
744 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
745
746 if (obj_regions == 1) {
747 // Only one region to allocate, try to use a fast path by directly allocating
748 // from the free lists. Do not try to expand here, we will potentially do that
749 // later.
750 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
751 if (hr != NULL) {
752 first = hr->hrm_index();
753 }
754 } else {
755 // We can't allocate humongous regions spanning more than one region while
756 // cleanupComplete() is running, since some of the regions we find to be
757 // empty might not yet be added to the free list. It is not straightforward
758 // to know in which list they are on so that we can remove them. We only
759 // need to do this if we need to allocate more than one region to satisfy the
760 // current humongous allocation request. If we are only allocating one region
761 // we use the one-region region allocation code (see above), that already
762 // potentially waits for regions from the secondary free list.
763 wait_while_free_regions_coming();
764 append_secondary_free_list_if_not_empty_with_lock();
765
766 // Policy: Try only empty regions (i.e. already committed first). Maybe we
767 // are lucky enough to find some.
768 first = _hrm.find_contiguous_only_empty(obj_regions);
769 if (first != G1_NO_HRM_INDEX) {
770 _hrm.allocate_free_regions_starting_at(first, obj_regions);
771 }
772 }
773
774 if (first == G1_NO_HRM_INDEX) {
775 // Policy: We could not find enough regions for the humongous object in the
776 // free list. Look through the heap to find a mix of free and uncommitted regions.
777 // If so, try expansion.
778 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
779 if (first != G1_NO_HRM_INDEX) {
780 // We found something. Make sure these regions are committed, i.e. expand
781 // the heap. Alternatively we could do a defragmentation GC.
782 ergo_verbose1(ErgoHeapSizing,
783 "attempt heap expansion",
784 ergo_format_reason("humongous allocation request failed")
785 ergo_format_byte("allocation request"),
786 word_size * HeapWordSize);
787
788 _hrm.expand_at(first, obj_regions);
789 g1_policy()->record_new_heap_size(num_regions());
790
791 #ifdef ASSERT
792 for (uint i = first; i < first + obj_regions; ++i) {
793 HeapRegion* hr = region_at(i);
794 assert(hr->is_free(), "sanity");
795 assert(hr->is_empty(), "sanity");
796 assert(is_on_master_free_list(hr), "sanity");
797 }
798 #endif
799 _hrm.allocate_free_regions_starting_at(first, obj_regions);
800 } else {
801 // Policy: Potentially trigger a defragmentation GC.
802 }
803 }
804
805 HeapWord* result = NULL;
806 if (first != G1_NO_HRM_INDEX) {
807 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
808 word_size, context);
809 assert(result != NULL, "it should always return a valid result");
810
811 // A successful humongous object allocation changes the used space
812 // information of the old generation so we need to recalculate the
813 // sizes and update the jstat counters here.
814 g1mm()->update_sizes();
815 }
816
817 verify_region_sets_optional();
818
819 return result;
820 }
821
allocate_new_tlab(size_t word_size)822 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
823 assert_heap_not_locked_and_not_at_safepoint();
824 assert(!isHumongous(word_size), "we do not allow humongous TLABs");
825
826 uint dummy_gc_count_before;
827 uint dummy_gclocker_retry_count = 0;
828 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
829 }
830
831 HeapWord*
mem_allocate(size_t word_size,bool * gc_overhead_limit_was_exceeded)832 G1CollectedHeap::mem_allocate(size_t word_size,
833 bool* gc_overhead_limit_was_exceeded) {
834 assert_heap_not_locked_and_not_at_safepoint();
835
836 // Loop until the allocation is satisfied, or unsatisfied after GC.
837 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
838 uint gc_count_before;
839
840 HeapWord* result = NULL;
841 if (!isHumongous(word_size)) {
842 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
843 } else {
844 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
845 }
846 if (result != NULL) {
847 return result;
848 }
849
850 // Create the garbage collection operation...
851 VM_G1CollectForAllocation op(gc_count_before, word_size);
852 op.set_allocation_context(AllocationContext::current());
853
854 // ...and get the VM thread to execute it.
855 VMThread::execute(&op);
856
857 if (op.prologue_succeeded() && op.pause_succeeded()) {
858 // If the operation was successful we'll return the result even
859 // if it is NULL. If the allocation attempt failed immediately
860 // after a Full GC, it's unlikely we'll be able to allocate now.
861 HeapWord* result = op.result();
862 if (result != NULL && !isHumongous(word_size)) {
863 // Allocations that take place on VM operations do not do any
864 // card dirtying and we have to do it here. We only have to do
865 // this for non-humongous allocations, though.
866 dirty_young_block(result, word_size);
867 }
868 return result;
869 } else {
870 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
871 return NULL;
872 }
873 assert(op.result() == NULL,
874 "the result should be NULL if the VM op did not succeed");
875 }
876
877 // Give a warning if we seem to be looping forever.
878 if ((QueuedAllocationWarningCount > 0) &&
879 (try_count % QueuedAllocationWarningCount == 0)) {
880 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
881 }
882 }
883
884 ShouldNotReachHere();
885 return NULL;
886 }
887
attempt_allocation_slow(size_t word_size,AllocationContext_t context,uint * gc_count_before_ret,uint * gclocker_retry_count_ret)888 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
889 AllocationContext_t context,
890 uint* gc_count_before_ret,
891 uint* gclocker_retry_count_ret) {
892 // Make sure you read the note in attempt_allocation_humongous().
893
894 assert_heap_not_locked_and_not_at_safepoint();
895 assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
896 "be called for humongous allocation requests");
897
898 // We should only get here after the first-level allocation attempt
899 // (attempt_allocation()) failed to allocate.
900
901 // We will loop until a) we manage to successfully perform the
902 // allocation or b) we successfully schedule a collection which
903 // fails to perform the allocation. b) is the only case when we'll
904 // return NULL.
905 HeapWord* result = NULL;
906 for (int try_count = 1; /* we'll return */; try_count += 1) {
907 bool should_try_gc;
908 uint gc_count_before;
909
910 {
911 MutexLockerEx x(Heap_lock);
912 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
913 false /* bot_updates */);
914 if (result != NULL) {
915 return result;
916 }
917
918 // If we reach here, attempt_allocation_locked() above failed to
919 // allocate a new region. So the mutator alloc region should be NULL.
920 assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
921
922 if (GC_locker::is_active_and_needs_gc()) {
923 if (g1_policy()->can_expand_young_list()) {
924 // No need for an ergo verbose message here,
925 // can_expand_young_list() does this when it returns true.
926 result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
927 false /* bot_updates */);
928 if (result != NULL) {
929 return result;
930 }
931 }
932 should_try_gc = false;
933 } else {
934 // The GCLocker may not be active but the GCLocker initiated
935 // GC may not yet have been performed (GCLocker::needs_gc()
936 // returns true). In this case we do not try this GC and
937 // wait until the GCLocker initiated GC is performed, and
938 // then retry the allocation.
939 if (GC_locker::needs_gc()) {
940 should_try_gc = false;
941 } else {
942 // Read the GC count while still holding the Heap_lock.
943 gc_count_before = total_collections();
944 should_try_gc = true;
945 }
946 }
947 }
948
949 if (should_try_gc) {
950 bool succeeded;
951 result = do_collection_pause(word_size, gc_count_before, &succeeded,
952 GCCause::_g1_inc_collection_pause);
953 if (result != NULL) {
954 assert(succeeded, "only way to get back a non-NULL result");
955 return result;
956 }
957
958 if (succeeded) {
959 // If we get here we successfully scheduled a collection which
960 // failed to allocate. No point in trying to allocate
961 // further. We'll just return NULL.
962 MutexLockerEx x(Heap_lock);
963 *gc_count_before_ret = total_collections();
964 return NULL;
965 }
966 } else {
967 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
968 MutexLockerEx x(Heap_lock);
969 *gc_count_before_ret = total_collections();
970 return NULL;
971 }
972 // The GCLocker is either active or the GCLocker initiated
973 // GC has not yet been performed. Stall until it is and
974 // then retry the allocation.
975 GC_locker::stall_until_clear();
976 (*gclocker_retry_count_ret) += 1;
977 }
978
979 // We can reach here if we were unsuccessful in scheduling a
980 // collection (because another thread beat us to it) or if we were
981 // stalled due to the GC locker. In either can we should retry the
982 // allocation attempt in case another thread successfully
983 // performed a collection and reclaimed enough space. We do the
984 // first attempt (without holding the Heap_lock) here and the
985 // follow-on attempt will be at the start of the next loop
986 // iteration (after taking the Heap_lock).
987 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
988 false /* bot_updates */);
989 if (result != NULL) {
990 return result;
991 }
992
993 // Give a warning if we seem to be looping forever.
994 if ((QueuedAllocationWarningCount > 0) &&
995 (try_count % QueuedAllocationWarningCount == 0)) {
996 warning("G1CollectedHeap::attempt_allocation_slow() "
997 "retries %d times", try_count);
998 }
999 }
1000
1001 ShouldNotReachHere();
1002 return NULL;
1003 }
1004
attempt_allocation_humongous(size_t word_size,uint * gc_count_before_ret,uint * gclocker_retry_count_ret)1005 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1006 uint* gc_count_before_ret,
1007 uint* gclocker_retry_count_ret) {
1008 // The structure of this method has a lot of similarities to
1009 // attempt_allocation_slow(). The reason these two were not merged
1010 // into a single one is that such a method would require several "if
1011 // allocation is not humongous do this, otherwise do that"
1012 // conditional paths which would obscure its flow. In fact, an early
1013 // version of this code did use a unified method which was harder to
1014 // follow and, as a result, it had subtle bugs that were hard to
1015 // track down. So keeping these two methods separate allows each to
1016 // be more readable. It will be good to keep these two in sync as
1017 // much as possible.
1018
1019 assert_heap_not_locked_and_not_at_safepoint();
1020 assert(isHumongous(word_size), "attempt_allocation_humongous() "
1021 "should only be called for humongous allocations");
1022
1023 // Humongous objects can exhaust the heap quickly, so we should check if we
1024 // need to start a marking cycle at each humongous object allocation. We do
1025 // the check before we do the actual allocation. The reason for doing it
1026 // before the allocation is that we avoid having to keep track of the newly
1027 // allocated memory while we do a GC.
1028 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
1029 word_size)) {
1030 collect(GCCause::_g1_humongous_allocation);
1031 }
1032
1033 // We will loop until a) we manage to successfully perform the
1034 // allocation or b) we successfully schedule a collection which
1035 // fails to perform the allocation. b) is the only case when we'll
1036 // return NULL.
1037 HeapWord* result = NULL;
1038 for (int try_count = 1; /* we'll return */; try_count += 1) {
1039 bool should_try_gc;
1040 uint gc_count_before;
1041
1042 {
1043 MutexLockerEx x(Heap_lock);
1044
1045 // Given that humongous objects are not allocated in young
1046 // regions, we'll first try to do the allocation without doing a
1047 // collection hoping that there's enough space in the heap.
1048 result = humongous_obj_allocate(word_size, AllocationContext::current());
1049 if (result != NULL) {
1050 return result;
1051 }
1052
1053 if (GC_locker::is_active_and_needs_gc()) {
1054 should_try_gc = false;
1055 } else {
1056 // The GCLocker may not be active but the GCLocker initiated
1057 // GC may not yet have been performed (GCLocker::needs_gc()
1058 // returns true). In this case we do not try this GC and
1059 // wait until the GCLocker initiated GC is performed, and
1060 // then retry the allocation.
1061 if (GC_locker::needs_gc()) {
1062 should_try_gc = false;
1063 } else {
1064 // Read the GC count while still holding the Heap_lock.
1065 gc_count_before = total_collections();
1066 should_try_gc = true;
1067 }
1068 }
1069 }
1070
1071 if (should_try_gc) {
1072 // If we failed to allocate the humongous object, we should try to
1073 // do a collection pause (if we're allowed) in case it reclaims
1074 // enough space for the allocation to succeed after the pause.
1075
1076 bool succeeded;
1077 result = do_collection_pause(word_size, gc_count_before, &succeeded,
1078 GCCause::_g1_humongous_allocation);
1079 if (result != NULL) {
1080 assert(succeeded, "only way to get back a non-NULL result");
1081 return result;
1082 }
1083
1084 if (succeeded) {
1085 // If we get here we successfully scheduled a collection which
1086 // failed to allocate. No point in trying to allocate
1087 // further. We'll just return NULL.
1088 MutexLockerEx x(Heap_lock);
1089 *gc_count_before_ret = total_collections();
1090 return NULL;
1091 }
1092 } else {
1093 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1094 MutexLockerEx x(Heap_lock);
1095 *gc_count_before_ret = total_collections();
1096 return NULL;
1097 }
1098 // The GCLocker is either active or the GCLocker initiated
1099 // GC has not yet been performed. Stall until it is and
1100 // then retry the allocation.
1101 GC_locker::stall_until_clear();
1102 (*gclocker_retry_count_ret) += 1;
1103 }
1104
1105 // We can reach here if we were unsuccessful in scheduling a
1106 // collection (because another thread beat us to it) or if we were
1107 // stalled due to the GC locker. In either can we should retry the
1108 // allocation attempt in case another thread successfully
1109 // performed a collection and reclaimed enough space. Give a
1110 // warning if we seem to be looping forever.
1111
1112 if ((QueuedAllocationWarningCount > 0) &&
1113 (try_count % QueuedAllocationWarningCount == 0)) {
1114 warning("G1CollectedHeap::attempt_allocation_humongous() "
1115 "retries %d times", try_count);
1116 }
1117 }
1118
1119 ShouldNotReachHere();
1120 return NULL;
1121 }
1122
attempt_allocation_at_safepoint(size_t word_size,AllocationContext_t context,bool expect_null_mutator_alloc_region)1123 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1124 AllocationContext_t context,
1125 bool expect_null_mutator_alloc_region) {
1126 assert_at_safepoint(true /* should_be_vm_thread */);
1127 assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1128 !expect_null_mutator_alloc_region,
1129 "the current alloc region was unexpectedly found to be non-NULL");
1130
1131 if (!isHumongous(word_size)) {
1132 return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1133 false /* bot_updates */);
1134 } else {
1135 HeapWord* result = humongous_obj_allocate(word_size, context);
1136 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1137 g1_policy()->set_initiate_conc_mark_if_possible();
1138 }
1139 return result;
1140 }
1141
1142 ShouldNotReachHere();
1143 }
1144
1145 class PostMCRemSetClearClosure: public HeapRegionClosure {
1146 G1CollectedHeap* _g1h;
1147 ModRefBarrierSet* _mr_bs;
1148 public:
PostMCRemSetClearClosure(G1CollectedHeap * g1h,ModRefBarrierSet * mr_bs)1149 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1150 _g1h(g1h), _mr_bs(mr_bs) {}
1151
doHeapRegion(HeapRegion * r)1152 bool doHeapRegion(HeapRegion* r) {
1153 HeapRegionRemSet* hrrs = r->rem_set();
1154
1155 if (r->continuesHumongous()) {
1156 // We'll assert that the strong code root list and RSet is empty
1157 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1158 assert(hrrs->occupied() == 0, "RSet should be empty");
1159 return false;
1160 }
1161
1162 _g1h->reset_gc_time_stamps(r);
1163 hrrs->clear();
1164 // You might think here that we could clear just the cards
1165 // corresponding to the used region. But no: if we leave a dirty card
1166 // in a region we might allocate into, then it would prevent that card
1167 // from being enqueued, and cause it to be missed.
1168 // Re: the performance cost: we shouldn't be doing full GC anyway!
1169 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1170
1171 return false;
1172 }
1173 };
1174
clear_rsets_post_compaction()1175 void G1CollectedHeap::clear_rsets_post_compaction() {
1176 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1177 heap_region_iterate(&rs_clear);
1178 }
1179
1180 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1181 G1CollectedHeap* _g1h;
1182 UpdateRSOopClosure _cl;
1183 int _worker_i;
1184 public:
RebuildRSOutOfRegionClosure(G1CollectedHeap * g1,int worker_i=0)1185 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1186 _cl(g1->g1_rem_set(), worker_i),
1187 _worker_i(worker_i),
1188 _g1h(g1)
1189 { }
1190
doHeapRegion(HeapRegion * r)1191 bool doHeapRegion(HeapRegion* r) {
1192 if (!r->continuesHumongous()) {
1193 _cl.set_from(r);
1194 r->oop_iterate(&_cl);
1195 }
1196 return false;
1197 }
1198 };
1199
1200 class ParRebuildRSTask: public AbstractGangTask {
1201 G1CollectedHeap* _g1;
1202 public:
ParRebuildRSTask(G1CollectedHeap * g1)1203 ParRebuildRSTask(G1CollectedHeap* g1)
1204 : AbstractGangTask("ParRebuildRSTask"),
1205 _g1(g1)
1206 { }
1207
work(uint worker_id)1208 void work(uint worker_id) {
1209 RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1210 _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
1211 _g1->workers()->active_workers(),
1212 HeapRegion::RebuildRSClaimValue);
1213 }
1214 };
1215
1216 class PostCompactionPrinterClosure: public HeapRegionClosure {
1217 private:
1218 G1HRPrinter* _hr_printer;
1219 public:
doHeapRegion(HeapRegion * hr)1220 bool doHeapRegion(HeapRegion* hr) {
1221 assert(!hr->is_young(), "not expecting to find young regions");
1222 if (hr->is_free()) {
1223 // We only generate output for non-empty regions.
1224 } else if (hr->startsHumongous()) {
1225 if (hr->region_num() == 1) {
1226 // single humongous region
1227 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1228 } else {
1229 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1230 }
1231 } else if (hr->continuesHumongous()) {
1232 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1233 } else if (hr->is_old()) {
1234 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1235 } else {
1236 ShouldNotReachHere();
1237 }
1238 return false;
1239 }
1240
PostCompactionPrinterClosure(G1HRPrinter * hr_printer)1241 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1242 : _hr_printer(hr_printer) { }
1243 };
1244
print_hrm_post_compaction()1245 void G1CollectedHeap::print_hrm_post_compaction() {
1246 PostCompactionPrinterClosure cl(hr_printer());
1247 heap_region_iterate(&cl);
1248 }
1249
do_collection(bool explicit_gc,bool clear_all_soft_refs,size_t word_size)1250 bool G1CollectedHeap::do_collection(bool explicit_gc,
1251 bool clear_all_soft_refs,
1252 size_t word_size) {
1253 assert_at_safepoint(true /* should_be_vm_thread */);
1254
1255 if (GC_locker::check_active_before_gc()) {
1256 return false;
1257 }
1258
1259 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1260 gc_timer->register_gc_start();
1261
1262 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1263 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1264
1265 SvcGCMarker sgcm(SvcGCMarker::FULL);
1266 ResourceMark rm;
1267
1268 print_heap_before_gc();
1269 trace_heap_before_gc(gc_tracer);
1270
1271 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1272
1273 verify_region_sets_optional();
1274
1275 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1276 collector_policy()->should_clear_all_soft_refs();
1277
1278 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1279
1280 {
1281 IsGCActiveMark x;
1282
1283 // Timing
1284 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1285 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1286
1287 {
1288 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1289 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1290 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1291
1292 double start = os::elapsedTime();
1293 g1_policy()->record_full_collection_start();
1294
1295 // Note: When we have a more flexible GC logging framework that
1296 // allows us to add optional attributes to a GC log record we
1297 // could consider timing and reporting how long we wait in the
1298 // following two methods.
1299 wait_while_free_regions_coming();
1300 // If we start the compaction before the CM threads finish
1301 // scanning the root regions we might trip them over as we'll
1302 // be moving objects / updating references. So let's wait until
1303 // they are done. By telling them to abort, they should complete
1304 // early.
1305 _cm->root_regions()->abort();
1306 _cm->root_regions()->wait_until_scan_finished();
1307 append_secondary_free_list_if_not_empty_with_lock();
1308
1309 gc_prologue(true);
1310 increment_total_collections(true /* full gc */);
1311 increment_old_marking_cycles_started();
1312
1313 assert(used() == recalculate_used(), "Should be equal");
1314
1315 verify_before_gc();
1316
1317 check_bitmaps("Full GC Start");
1318 pre_full_gc_dump(gc_timer);
1319
1320 COMPILER2_PRESENT(DerivedPointerTable::clear());
1321
1322 // Disable discovery and empty the discovered lists
1323 // for the CM ref processor.
1324 ref_processor_cm()->disable_discovery();
1325 ref_processor_cm()->abandon_partial_discovery();
1326 ref_processor_cm()->verify_no_references_recorded();
1327
1328 // Abandon current iterations of concurrent marking and concurrent
1329 // refinement, if any are in progress. We have to do this before
1330 // wait_until_scan_finished() below.
1331 concurrent_mark()->abort();
1332
1333 // Make sure we'll choose a new allocation region afterwards.
1334 _allocator->release_mutator_alloc_region();
1335 _allocator->abandon_gc_alloc_regions();
1336 g1_rem_set()->cleanupHRRS();
1337
1338 // We should call this after we retire any currently active alloc
1339 // regions so that all the ALLOC / RETIRE events are generated
1340 // before the start GC event.
1341 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1342
1343 // We may have added regions to the current incremental collection
1344 // set between the last GC or pause and now. We need to clear the
1345 // incremental collection set and then start rebuilding it afresh
1346 // after this full GC.
1347 abandon_collection_set(g1_policy()->inc_cset_head());
1348 g1_policy()->clear_incremental_cset();
1349 g1_policy()->stop_incremental_cset_building();
1350
1351 tear_down_region_sets(false /* free_list_only */);
1352 g1_policy()->set_gcs_are_young(true);
1353
1354 // See the comments in g1CollectedHeap.hpp and
1355 // G1CollectedHeap::ref_processing_init() about
1356 // how reference processing currently works in G1.
1357
1358 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1359 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1360
1361 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1362 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1363
1364 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1365 ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1366
1367 // Do collection work
1368 {
1369 HandleMark hm; // Discard invalid handles created during gc
1370 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1371 }
1372
1373 assert(num_free_regions() == 0, "we should not have added any free regions");
1374 rebuild_region_sets(false /* free_list_only */);
1375
1376 // Enqueue any discovered reference objects that have
1377 // not been removed from the discovered lists.
1378 ref_processor_stw()->enqueue_discovered_references();
1379
1380 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1381
1382 MemoryService::track_memory_usage();
1383
1384 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1385 ref_processor_stw()->verify_no_references_recorded();
1386
1387 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1388 ClassLoaderDataGraph::purge();
1389 MetaspaceAux::verify_metrics();
1390
1391 // Note: since we've just done a full GC, concurrent
1392 // marking is no longer active. Therefore we need not
1393 // re-enable reference discovery for the CM ref processor.
1394 // That will be done at the start of the next marking cycle.
1395 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1396 ref_processor_cm()->verify_no_references_recorded();
1397
1398 reset_gc_time_stamp();
1399 // Since everything potentially moved, we will clear all remembered
1400 // sets, and clear all cards. Later we will rebuild remembered
1401 // sets. We will also reset the GC time stamps of the regions.
1402 clear_rsets_post_compaction();
1403 check_gc_time_stamps();
1404
1405 // Resize the heap if necessary.
1406 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1407
1408 if (_hr_printer.is_active()) {
1409 // We should do this after we potentially resize the heap so
1410 // that all the COMMIT / UNCOMMIT events are generated before
1411 // the end GC event.
1412
1413 print_hrm_post_compaction();
1414 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1415 }
1416
1417 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1418 if (hot_card_cache->use_cache()) {
1419 hot_card_cache->reset_card_counts();
1420 hot_card_cache->reset_hot_cache();
1421 }
1422
1423 // Rebuild remembered sets of all regions.
1424 if (G1CollectedHeap::use_parallel_gc_threads()) {
1425 uint n_workers =
1426 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1427 workers()->active_workers(),
1428 Threads::number_of_non_daemon_threads());
1429 assert(UseDynamicNumberOfGCThreads ||
1430 n_workers == workers()->total_workers(),
1431 "If not dynamic should be using all the workers");
1432 workers()->set_active_workers(n_workers);
1433 // Set parallel threads in the heap (_n_par_threads) only
1434 // before a parallel phase and always reset it to 0 after
1435 // the phase so that the number of parallel threads does
1436 // no get carried forward to a serial phase where there
1437 // may be code that is "possibly_parallel".
1438 set_par_threads(n_workers);
1439
1440 ParRebuildRSTask rebuild_rs_task(this);
1441 assert(check_heap_region_claim_values(
1442 HeapRegion::InitialClaimValue), "sanity check");
1443 assert(UseDynamicNumberOfGCThreads ||
1444 workers()->active_workers() == workers()->total_workers(),
1445 "Unless dynamic should use total workers");
1446 // Use the most recent number of active workers
1447 assert(workers()->active_workers() > 0,
1448 "Active workers not properly set");
1449 set_par_threads(workers()->active_workers());
1450 workers()->run_task(&rebuild_rs_task);
1451 set_par_threads(0);
1452 assert(check_heap_region_claim_values(
1453 HeapRegion::RebuildRSClaimValue), "sanity check");
1454 reset_heap_region_claim_values();
1455 } else {
1456 RebuildRSOutOfRegionClosure rebuild_rs(this);
1457 heap_region_iterate(&rebuild_rs);
1458 }
1459
1460 // Rebuild the strong code root lists for each region
1461 rebuild_strong_code_roots();
1462
1463 // Purge code root memory
1464 purge_code_root_memory();
1465
1466 if (true) { // FIXME
1467 MetaspaceGC::compute_new_size();
1468 }
1469
1470 #ifdef TRACESPINNING
1471 ParallelTaskTerminator::print_termination_counts();
1472 #endif
1473
1474 // Discard all rset updates
1475 JavaThread::dirty_card_queue_set().abandon_logs();
1476 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1477
1478 _young_list->reset_sampled_info();
1479 // At this point there should be no regions in the
1480 // entire heap tagged as young.
1481 assert(check_young_list_empty(true /* check_heap */),
1482 "young list should be empty at this point");
1483
1484 // Update the number of full collections that have been completed.
1485 increment_old_marking_cycles_completed(false /* concurrent */);
1486
1487 _hrm.verify_optional();
1488 verify_region_sets_optional();
1489
1490 verify_after_gc();
1491
1492 // Clear the previous marking bitmap, if needed for bitmap verification.
1493 // Note we cannot do this when we clear the next marking bitmap in
1494 // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1495 // objects marked during a full GC against the previous bitmap.
1496 // But we need to clear it before calling check_bitmaps below since
1497 // the full GC has compacted objects and updated TAMS but not updated
1498 // the prev bitmap.
1499 if (G1VerifyBitmaps) {
1500 ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1501 }
1502 check_bitmaps("Full GC End");
1503
1504 // Start a new incremental collection set for the next pause
1505 assert(g1_policy()->collection_set() == NULL, "must be");
1506 g1_policy()->start_incremental_cset_building();
1507
1508 clear_cset_fast_test();
1509
1510 _allocator->init_mutator_alloc_region();
1511
1512 double end = os::elapsedTime();
1513 g1_policy()->record_full_collection_end();
1514
1515 if (G1Log::fine()) {
1516 g1_policy()->print_heap_transition();
1517 }
1518
1519 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1520 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1521 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1522 // before any GC notifications are raised.
1523 g1mm()->update_sizes();
1524
1525 gc_epilogue(true);
1526 }
1527
1528 if (G1Log::finer()) {
1529 g1_policy()->print_detailed_heap_transition(true /* full */);
1530 }
1531
1532 print_heap_after_gc();
1533 trace_heap_after_gc(gc_tracer);
1534
1535 post_full_gc_dump(gc_timer);
1536
1537 gc_timer->register_gc_end();
1538 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1539 }
1540
1541 return true;
1542 }
1543
do_full_collection(bool clear_all_soft_refs)1544 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1545 // do_collection() will return whether it succeeded in performing
1546 // the GC. Currently, there is no facility on the
1547 // do_full_collection() API to notify the caller than the collection
1548 // did not succeed (e.g., because it was locked out by the GC
1549 // locker). So, right now, we'll ignore the return value.
1550 bool dummy = do_collection(true, /* explicit_gc */
1551 clear_all_soft_refs,
1552 0 /* word_size */);
1553 }
1554
1555 // This code is mostly copied from TenuredGeneration.
1556 void
1557 G1CollectedHeap::
resize_if_necessary_after_full_collection(size_t word_size)1558 resize_if_necessary_after_full_collection(size_t word_size) {
1559 // Include the current allocation, if any, and bytes that will be
1560 // pre-allocated to support collections, as "used".
1561 const size_t used_after_gc = used();
1562 const size_t capacity_after_gc = capacity();
1563 const size_t free_after_gc = capacity_after_gc - used_after_gc;
1564
1565 // This is enforced in arguments.cpp.
1566 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1567 "otherwise the code below doesn't make sense");
1568
1569 // We don't have floating point command-line arguments
1570 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1571 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1572 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1573 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1574
1575 const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1576 const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1577
1578 // We have to be careful here as these two calculations can overflow
1579 // 32-bit size_t's.
1580 double used_after_gc_d = (double) used_after_gc;
1581 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1582 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1583
1584 // Let's make sure that they are both under the max heap size, which
1585 // by default will make them fit into a size_t.
1586 double desired_capacity_upper_bound = (double) max_heap_size;
1587 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1588 desired_capacity_upper_bound);
1589 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1590 desired_capacity_upper_bound);
1591
1592 // We can now safely turn them into size_t's.
1593 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1594 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1595
1596 // This assert only makes sense here, before we adjust them
1597 // with respect to the min and max heap size.
1598 assert(minimum_desired_capacity <= maximum_desired_capacity,
1599 err_msg("minimum_desired_capacity = " SIZE_FORMAT ", "
1600 "maximum_desired_capacity = " SIZE_FORMAT,
1601 minimum_desired_capacity, maximum_desired_capacity));
1602
1603 // Should not be greater than the heap max size. No need to adjust
1604 // it with respect to the heap min size as it's a lower bound (i.e.,
1605 // we'll try to make the capacity larger than it, not smaller).
1606 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1607 // Should not be less than the heap min size. No need to adjust it
1608 // with respect to the heap max size as it's an upper bound (i.e.,
1609 // we'll try to make the capacity smaller than it, not greater).
1610 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1611
1612 if (capacity_after_gc < minimum_desired_capacity) {
1613 // Don't expand unless it's significant
1614 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1615 ergo_verbose4(ErgoHeapSizing,
1616 "attempt heap expansion",
1617 ergo_format_reason("capacity lower than "
1618 "min desired capacity after Full GC")
1619 ergo_format_byte("capacity")
1620 ergo_format_byte("occupancy")
1621 ergo_format_byte_perc("min desired capacity"),
1622 capacity_after_gc, used_after_gc,
1623 minimum_desired_capacity, (double) MinHeapFreeRatio);
1624 expand(expand_bytes);
1625
1626 // No expansion, now see if we want to shrink
1627 } else if (capacity_after_gc > maximum_desired_capacity) {
1628 // Capacity too large, compute shrinking size
1629 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1630 ergo_verbose4(ErgoHeapSizing,
1631 "attempt heap shrinking",
1632 ergo_format_reason("capacity higher than "
1633 "max desired capacity after Full GC")
1634 ergo_format_byte("capacity")
1635 ergo_format_byte("occupancy")
1636 ergo_format_byte_perc("max desired capacity"),
1637 capacity_after_gc, used_after_gc,
1638 maximum_desired_capacity, (double) MaxHeapFreeRatio);
1639 shrink(shrink_bytes);
1640 }
1641 }
1642
1643
1644 HeapWord*
satisfy_failed_allocation(size_t word_size,AllocationContext_t context,bool * succeeded)1645 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1646 AllocationContext_t context,
1647 bool* succeeded) {
1648 assert_at_safepoint(true /* should_be_vm_thread */);
1649
1650 *succeeded = true;
1651 // Let's attempt the allocation first.
1652 HeapWord* result =
1653 attempt_allocation_at_safepoint(word_size,
1654 context,
1655 false /* expect_null_mutator_alloc_region */);
1656 if (result != NULL) {
1657 assert(*succeeded, "sanity");
1658 return result;
1659 }
1660
1661 // In a G1 heap, we're supposed to keep allocation from failing by
1662 // incremental pauses. Therefore, at least for now, we'll favor
1663 // expansion over collection. (This might change in the future if we can
1664 // do something smarter than full collection to satisfy a failed alloc.)
1665 result = expand_and_allocate(word_size, context);
1666 if (result != NULL) {
1667 assert(*succeeded, "sanity");
1668 return result;
1669 }
1670
1671 // Expansion didn't work, we'll try to do a Full GC.
1672 bool gc_succeeded = do_collection(false, /* explicit_gc */
1673 false, /* clear_all_soft_refs */
1674 word_size);
1675 if (!gc_succeeded) {
1676 *succeeded = false;
1677 return NULL;
1678 }
1679
1680 // Retry the allocation
1681 result = attempt_allocation_at_safepoint(word_size,
1682 context,
1683 true /* expect_null_mutator_alloc_region */);
1684 if (result != NULL) {
1685 assert(*succeeded, "sanity");
1686 return result;
1687 }
1688
1689 // Then, try a Full GC that will collect all soft references.
1690 gc_succeeded = do_collection(false, /* explicit_gc */
1691 true, /* clear_all_soft_refs */
1692 word_size);
1693 if (!gc_succeeded) {
1694 *succeeded = false;
1695 return NULL;
1696 }
1697
1698 // Retry the allocation once more
1699 result = attempt_allocation_at_safepoint(word_size,
1700 context,
1701 true /* expect_null_mutator_alloc_region */);
1702 if (result != NULL) {
1703 assert(*succeeded, "sanity");
1704 return result;
1705 }
1706
1707 assert(!collector_policy()->should_clear_all_soft_refs(),
1708 "Flag should have been handled and cleared prior to this point");
1709
1710 // What else? We might try synchronous finalization later. If the total
1711 // space available is large enough for the allocation, then a more
1712 // complete compaction phase than we've tried so far might be
1713 // appropriate.
1714 assert(*succeeded, "sanity");
1715 return NULL;
1716 }
1717
1718 // Attempting to expand the heap sufficiently
1719 // to support an allocation of the given "word_size". If
1720 // successful, perform the allocation and return the address of the
1721 // allocated block, or else "NULL".
1722
expand_and_allocate(size_t word_size,AllocationContext_t context)1723 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1724 assert_at_safepoint(true /* should_be_vm_thread */);
1725
1726 verify_region_sets_optional();
1727
1728 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1729 ergo_verbose1(ErgoHeapSizing,
1730 "attempt heap expansion",
1731 ergo_format_reason("allocation request failed")
1732 ergo_format_byte("allocation request"),
1733 word_size * HeapWordSize);
1734 if (expand(expand_bytes)) {
1735 _hrm.verify_optional();
1736 verify_region_sets_optional();
1737 return attempt_allocation_at_safepoint(word_size,
1738 context,
1739 false /* expect_null_mutator_alloc_region */);
1740 }
1741 return NULL;
1742 }
1743
expand(size_t expand_bytes)1744 bool G1CollectedHeap::expand(size_t expand_bytes) {
1745 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1746 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1747 HeapRegion::GrainBytes);
1748 ergo_verbose2(ErgoHeapSizing,
1749 "expand the heap",
1750 ergo_format_byte("requested expansion amount")
1751 ergo_format_byte("attempted expansion amount"),
1752 expand_bytes, aligned_expand_bytes);
1753
1754 if (is_maximal_no_gc()) {
1755 ergo_verbose0(ErgoHeapSizing,
1756 "did not expand the heap",
1757 ergo_format_reason("heap already fully expanded"));
1758 return false;
1759 }
1760
1761 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1762 assert(regions_to_expand > 0, "Must expand by at least one region");
1763
1764 uint expanded_by = _hrm.expand_by(regions_to_expand);
1765
1766 if (expanded_by > 0) {
1767 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1768 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1769 g1_policy()->record_new_heap_size(num_regions());
1770 } else {
1771 ergo_verbose0(ErgoHeapSizing,
1772 "did not expand the heap",
1773 ergo_format_reason("heap expansion operation failed"));
1774 // The expansion of the virtual storage space was unsuccessful.
1775 // Let's see if it was because we ran out of swap.
1776 if (G1ExitOnExpansionFailure &&
1777 _hrm.available() >= regions_to_expand) {
1778 // We had head room...
1779 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1780 }
1781 }
1782 return regions_to_expand > 0;
1783 }
1784
shrink_helper(size_t shrink_bytes)1785 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1786 size_t aligned_shrink_bytes =
1787 ReservedSpace::page_align_size_down(shrink_bytes);
1788 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1789 HeapRegion::GrainBytes);
1790 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1791
1792 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1793 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1794
1795 ergo_verbose3(ErgoHeapSizing,
1796 "shrink the heap",
1797 ergo_format_byte("requested shrinking amount")
1798 ergo_format_byte("aligned shrinking amount")
1799 ergo_format_byte("attempted shrinking amount"),
1800 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1801 if (num_regions_removed > 0) {
1802 g1_policy()->record_new_heap_size(num_regions());
1803 } else {
1804 ergo_verbose0(ErgoHeapSizing,
1805 "did not shrink the heap",
1806 ergo_format_reason("heap shrinking operation failed"));
1807 }
1808 }
1809
shrink(size_t shrink_bytes)1810 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1811 verify_region_sets_optional();
1812
1813 // We should only reach here at the end of a Full GC which means we
1814 // should not not be holding to any GC alloc regions. The method
1815 // below will make sure of that and do any remaining clean up.
1816 _allocator->abandon_gc_alloc_regions();
1817
1818 // Instead of tearing down / rebuilding the free lists here, we
1819 // could instead use the remove_all_pending() method on free_list to
1820 // remove only the ones that we need to remove.
1821 tear_down_region_sets(true /* free_list_only */);
1822 shrink_helper(shrink_bytes);
1823 rebuild_region_sets(true /* free_list_only */);
1824
1825 _hrm.verify_optional();
1826 verify_region_sets_optional();
1827 }
1828
1829 // Public methods.
1830
1831 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1832 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1833 #endif // _MSC_VER
1834
1835
G1CollectedHeap(G1CollectorPolicy * policy_)1836 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1837 SharedHeap(policy_),
1838 _g1_policy(policy_),
1839 _dirty_card_queue_set(false),
1840 _into_cset_dirty_card_queue_set(false),
1841 _is_alive_closure_cm(this),
1842 _is_alive_closure_stw(this),
1843 _ref_processor_cm(NULL),
1844 _ref_processor_stw(NULL),
1845 _bot_shared(NULL),
1846 _evac_failure_scan_stack(NULL),
1847 _mark_in_progress(false),
1848 _cg1r(NULL),
1849 _g1mm(NULL),
1850 _refine_cte_cl(NULL),
1851 _full_collection(false),
1852 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1853 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1854 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1855 _humongous_reclaim_candidates(),
1856 _has_humongous_reclaim_candidates(false),
1857 _free_regions_coming(false),
1858 _young_list(new YoungList(this)),
1859 _gc_time_stamp(0),
1860 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1861 _old_plab_stats(OldPLABSize, PLABWeight),
1862 _expand_heap_after_alloc_failure(true),
1863 _surviving_young_words(NULL),
1864 _old_marking_cycles_started(0),
1865 _old_marking_cycles_completed(0),
1866 _concurrent_cycle_started(false),
1867 _heap_summary_sent(false),
1868 _in_cset_fast_test(),
1869 _dirty_cards_region_list(NULL),
1870 _worker_cset_start_region(NULL),
1871 _worker_cset_start_region_time_stamp(NULL),
1872 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1873 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1874 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1875 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1876
1877 _g1h = this;
1878
1879 _allocator = G1Allocator::create_allocator(_g1h);
1880 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1881
1882 int n_queues = MAX2((int)ParallelGCThreads, 1);
1883 _task_queues = new RefToScanQueueSet(n_queues);
1884
1885 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1886 assert(n_rem_sets > 0, "Invariant.");
1887
1888 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1889 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1890 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1891
1892 for (int i = 0; i < n_queues; i++) {
1893 RefToScanQueue* q = new RefToScanQueue();
1894 q->initialize();
1895 _task_queues->register_queue(i, q);
1896 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1897 }
1898 clear_cset_start_regions();
1899
1900 // Initialize the G1EvacuationFailureALot counters and flags.
1901 NOT_PRODUCT(reset_evacuation_should_fail();)
1902
1903 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1904 }
1905
create_aux_memory_mapper(const char * description,size_t size,size_t translation_factor)1906 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1907 size_t size,
1908 size_t translation_factor) {
1909 size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1910 // Allocate a new reserved space, preferring to use large pages.
1911 ReservedSpace rs(size, preferred_page_size);
1912 G1RegionToSpaceMapper* result =
1913 G1RegionToSpaceMapper::create_mapper(rs,
1914 size,
1915 rs.alignment(),
1916 HeapRegion::GrainBytes,
1917 translation_factor,
1918 mtGC);
1919 if (TracePageSizes) {
1920 gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1921 description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1922 }
1923 return result;
1924 }
1925
initialize()1926 jint G1CollectedHeap::initialize() {
1927 CollectedHeap::pre_initialize();
1928 os::enable_vtime();
1929
1930 G1Log::init();
1931
1932 // Necessary to satisfy locking discipline assertions.
1933
1934 MutexLocker x(Heap_lock);
1935
1936 // We have to initialize the printer before committing the heap, as
1937 // it will be used then.
1938 _hr_printer.set_active(G1PrintHeapRegions);
1939
1940 // While there are no constraints in the GC code that HeapWordSize
1941 // be any particular value, there are multiple other areas in the
1942 // system which believe this to be true (e.g. oop->object_size in some
1943 // cases incorrectly returns the size in wordSize units rather than
1944 // HeapWordSize).
1945 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1946
1947 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1948 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1949 size_t heap_alignment = collector_policy()->heap_alignment();
1950
1951 // Ensure that the sizes are properly aligned.
1952 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1953 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1954 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1955
1956 _refine_cte_cl = new RefineCardTableEntryClosure();
1957
1958 _cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
1959
1960 // Reserve the maximum.
1961
1962 // When compressed oops are enabled, the preferred heap base
1963 // is calculated by subtracting the requested size from the
1964 // 32Gb boundary and using the result as the base address for
1965 // heap reservation. If the requested size is not aligned to
1966 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1967 // into the ReservedHeapSpace constructor) then the actual
1968 // base of the reserved heap may end up differing from the
1969 // address that was requested (i.e. the preferred heap base).
1970 // If this happens then we could end up using a non-optimal
1971 // compressed oops mode.
1972
1973 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1974 heap_alignment);
1975
1976 // It is important to do this in a way such that concurrent readers can't
1977 // temporarily think something is in the heap. (I've actually seen this
1978 // happen in asserts: DLD.)
1979 _reserved.set_word_size(0);
1980 _reserved.set_start((HeapWord*)heap_rs.base());
1981 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
1982
1983 // Create the gen rem set (and barrier set) for the entire reserved region.
1984 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
1985 set_barrier_set(rem_set()->bs());
1986 if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
1987 vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
1988 return JNI_ENOMEM;
1989 }
1990
1991 // Also create a G1 rem set.
1992 _g1_rem_set = new G1RemSet(this, g1_barrier_set());
1993
1994 // Carve out the G1 part of the heap.
1995
1996 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1997 G1RegionToSpaceMapper* heap_storage =
1998 G1RegionToSpaceMapper::create_mapper(g1_rs,
1999 g1_rs.size(),
2000 UseLargePages ? os::large_page_size() : os::vm_page_size(),
2001 HeapRegion::GrainBytes,
2002 1,
2003 mtJavaHeap);
2004 heap_storage->set_mapping_changed_listener(&_listener);
2005
2006 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
2007 G1RegionToSpaceMapper* bot_storage =
2008 create_aux_memory_mapper("Block offset table",
2009 G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
2010 G1BlockOffsetSharedArray::N_bytes);
2011
2012 ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
2013 G1RegionToSpaceMapper* cardtable_storage =
2014 create_aux_memory_mapper("Card table",
2015 G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
2016 G1BlockOffsetSharedArray::N_bytes);
2017
2018 G1RegionToSpaceMapper* card_counts_storage =
2019 create_aux_memory_mapper("Card counts table",
2020 G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
2021 G1BlockOffsetSharedArray::N_bytes);
2022
2023 size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
2024 G1RegionToSpaceMapper* prev_bitmap_storage =
2025 create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());
2026 G1RegionToSpaceMapper* next_bitmap_storage =
2027 create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());
2028
2029 _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
2030 g1_barrier_set()->initialize(cardtable_storage);
2031 // Do later initialization work for concurrent refinement.
2032 _cg1r->init(card_counts_storage);
2033
2034 // 6843694 - ensure that the maximum region index can fit
2035 // in the remembered set structures.
2036 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2037 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2038
2039 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2040 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2041 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2042 "too many cards per region");
2043
2044 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2045
2046 _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
2047
2048 _g1h = this;
2049
2050 {
2051 HeapWord* start = _hrm.reserved().start();
2052 HeapWord* end = _hrm.reserved().end();
2053 size_t granularity = HeapRegion::GrainBytes;
2054
2055 _in_cset_fast_test.initialize(start, end, granularity);
2056 _humongous_reclaim_candidates.initialize(start, end, granularity);
2057 }
2058
2059 // Create the ConcurrentMark data structure and thread.
2060 // (Must do this late, so that "max_regions" is defined.)
2061 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2062 if (_cm == NULL || !_cm->completed_initialization()) {
2063 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2064 return JNI_ENOMEM;
2065 }
2066 _cmThread = _cm->cmThread();
2067
2068 // Initialize the from_card cache structure of HeapRegionRemSet.
2069 HeapRegionRemSet::init_heap(max_regions());
2070
2071 // Now expand into the initial heap size.
2072 if (!expand(init_byte_size)) {
2073 vm_shutdown_during_initialization("Failed to allocate initial heap.");
2074 return JNI_ENOMEM;
2075 }
2076
2077 // Perform any initialization actions delegated to the policy.
2078 g1_policy()->init();
2079
2080 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2081 SATB_Q_FL_lock,
2082 G1SATBProcessCompletedThreshold,
2083 Shared_SATB_Q_lock);
2084
2085 JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
2086 DirtyCardQ_CBL_mon,
2087 DirtyCardQ_FL_lock,
2088 concurrent_g1_refine()->yellow_zone(),
2089 concurrent_g1_refine()->red_zone(),
2090 Shared_DirtyCardQ_lock);
2091
2092 dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
2093 DirtyCardQ_CBL_mon,
2094 DirtyCardQ_FL_lock,
2095 -1, // never trigger processing
2096 -1, // no limit on length
2097 Shared_DirtyCardQ_lock,
2098 &JavaThread::dirty_card_queue_set());
2099
2100 // Initialize the card queue set used to hold cards containing
2101 // references into the collection set.
2102 _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2103 DirtyCardQ_CBL_mon,
2104 DirtyCardQ_FL_lock,
2105 -1, // never trigger processing
2106 -1, // no limit on length
2107 Shared_DirtyCardQ_lock,
2108 &JavaThread::dirty_card_queue_set());
2109
2110 // In case we're keeping closure specialization stats, initialize those
2111 // counts and that mechanism.
2112 SpecializationStats::clear();
2113
2114 // Here we allocate the dummy HeapRegion that is required by the
2115 // G1AllocRegion class.
2116 HeapRegion* dummy_region = _hrm.get_dummy_region();
2117
2118 // We'll re-use the same region whether the alloc region will
2119 // require BOT updates or not and, if it doesn't, then a non-young
2120 // region will complain that it cannot support allocations without
2121 // BOT updates. So we'll tag the dummy region as eden to avoid that.
2122 dummy_region->set_eden();
2123 // Make sure it's full.
2124 dummy_region->set_top(dummy_region->end());
2125 G1AllocRegion::setup(this, dummy_region);
2126
2127 _allocator->init_mutator_alloc_region();
2128
2129 // Do create of the monitoring and management support so that
2130 // values in the heap have been properly initialized.
2131 _g1mm = new G1MonitoringSupport(this);
2132
2133 G1StringDedup::initialize();
2134
2135 return JNI_OK;
2136 }
2137
stop()2138 void G1CollectedHeap::stop() {
2139 // Stop all concurrent threads. We do this to make sure these threads
2140 // do not continue to execute and access resources (e.g. gclog_or_tty)
2141 // that are destroyed during shutdown.
2142 _cg1r->stop();
2143 _cmThread->stop();
2144 if (G1StringDedup::is_enabled()) {
2145 G1StringDedup::stop();
2146 }
2147 }
2148
conservative_max_heap_alignment()2149 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2150 return HeapRegion::max_region_size();
2151 }
2152
ref_processing_init()2153 void G1CollectedHeap::ref_processing_init() {
2154 // Reference processing in G1 currently works as follows:
2155 //
2156 // * There are two reference processor instances. One is
2157 // used to record and process discovered references
2158 // during concurrent marking; the other is used to
2159 // record and process references during STW pauses
2160 // (both full and incremental).
2161 // * Both ref processors need to 'span' the entire heap as
2162 // the regions in the collection set may be dotted around.
2163 //
2164 // * For the concurrent marking ref processor:
2165 // * Reference discovery is enabled at initial marking.
2166 // * Reference discovery is disabled and the discovered
2167 // references processed etc during remarking.
2168 // * Reference discovery is MT (see below).
2169 // * Reference discovery requires a barrier (see below).
2170 // * Reference processing may or may not be MT
2171 // (depending on the value of ParallelRefProcEnabled
2172 // and ParallelGCThreads).
2173 // * A full GC disables reference discovery by the CM
2174 // ref processor and abandons any entries on it's
2175 // discovered lists.
2176 //
2177 // * For the STW processor:
2178 // * Non MT discovery is enabled at the start of a full GC.
2179 // * Processing and enqueueing during a full GC is non-MT.
2180 // * During a full GC, references are processed after marking.
2181 //
2182 // * Discovery (may or may not be MT) is enabled at the start
2183 // of an incremental evacuation pause.
2184 // * References are processed near the end of a STW evacuation pause.
2185 // * For both types of GC:
2186 // * Discovery is atomic - i.e. not concurrent.
2187 // * Reference discovery will not need a barrier.
2188
2189 SharedHeap::ref_processing_init();
2190 MemRegion mr = reserved_region();
2191
2192 // Concurrent Mark ref processor
2193 _ref_processor_cm =
2194 new ReferenceProcessor(mr, // span
2195 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2196 // mt processing
2197 (int) ParallelGCThreads,
2198 // degree of mt processing
2199 (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2200 // mt discovery
2201 (int) MAX2(ParallelGCThreads, ConcGCThreads),
2202 // degree of mt discovery
2203 false,
2204 // Reference discovery is not atomic
2205 &_is_alive_closure_cm);
2206 // is alive closure
2207 // (for efficiency/performance)
2208
2209 // STW ref processor
2210 _ref_processor_stw =
2211 new ReferenceProcessor(mr, // span
2212 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2213 // mt processing
2214 MAX2((int)ParallelGCThreads, 1),
2215 // degree of mt processing
2216 (ParallelGCThreads > 1),
2217 // mt discovery
2218 MAX2((int)ParallelGCThreads, 1),
2219 // degree of mt discovery
2220 true,
2221 // Reference discovery is atomic
2222 &_is_alive_closure_stw);
2223 // is alive closure
2224 // (for efficiency/performance)
2225 }
2226
capacity() const2227 size_t G1CollectedHeap::capacity() const {
2228 return _hrm.length() * HeapRegion::GrainBytes;
2229 }
2230
reset_gc_time_stamps(HeapRegion * hr)2231 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2232 assert(!hr->continuesHumongous(), "pre-condition");
2233 hr->reset_gc_time_stamp();
2234 if (hr->startsHumongous()) {
2235 uint first_index = hr->hrm_index() + 1;
2236 uint last_index = hr->last_hc_index();
2237 for (uint i = first_index; i < last_index; i += 1) {
2238 HeapRegion* chr = region_at(i);
2239 assert(chr->continuesHumongous(), "sanity");
2240 chr->reset_gc_time_stamp();
2241 }
2242 }
2243 }
2244
2245 #ifndef PRODUCT
2246 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2247 private:
2248 unsigned _gc_time_stamp;
2249 bool _failures;
2250
2251 public:
CheckGCTimeStampsHRClosure(unsigned gc_time_stamp)2252 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2253 _gc_time_stamp(gc_time_stamp), _failures(false) { }
2254
doHeapRegion(HeapRegion * hr)2255 virtual bool doHeapRegion(HeapRegion* hr) {
2256 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2257 if (_gc_time_stamp != region_gc_time_stamp) {
2258 gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2259 "expected %d", HR_FORMAT_PARAMS(hr),
2260 region_gc_time_stamp, _gc_time_stamp);
2261 _failures = true;
2262 }
2263 return false;
2264 }
2265
failures()2266 bool failures() { return _failures; }
2267 };
2268
check_gc_time_stamps()2269 void G1CollectedHeap::check_gc_time_stamps() {
2270 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2271 heap_region_iterate(&cl);
2272 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2273 }
2274 #endif // PRODUCT
2275
iterate_dirty_card_closure(CardTableEntryClosure * cl,DirtyCardQueue * into_cset_dcq,bool concurrent,uint worker_i)2276 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2277 DirtyCardQueue* into_cset_dcq,
2278 bool concurrent,
2279 uint worker_i) {
2280 // Clean cards in the hot card cache
2281 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2282 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2283
2284 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2285 size_t n_completed_buffers = 0;
2286 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2287 n_completed_buffers++;
2288 }
2289 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2290 dcqs.clear_n_completed_buffers();
2291 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2292 }
2293
2294
2295 // Computes the sum of the storage used by the various regions.
used() const2296 size_t G1CollectedHeap::used() const {
2297 return _allocator->used();
2298 }
2299
used_unlocked() const2300 size_t G1CollectedHeap::used_unlocked() const {
2301 return _allocator->used_unlocked();
2302 }
2303
2304 class SumUsedClosure: public HeapRegionClosure {
2305 size_t _used;
2306 public:
SumUsedClosure()2307 SumUsedClosure() : _used(0) {}
doHeapRegion(HeapRegion * r)2308 bool doHeapRegion(HeapRegion* r) {
2309 if (!r->continuesHumongous()) {
2310 _used += r->used();
2311 }
2312 return false;
2313 }
result()2314 size_t result() { return _used; }
2315 };
2316
recalculate_used() const2317 size_t G1CollectedHeap::recalculate_used() const {
2318 double recalculate_used_start = os::elapsedTime();
2319
2320 SumUsedClosure blk;
2321 heap_region_iterate(&blk);
2322
2323 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2324 return blk.result();
2325 }
2326
should_do_concurrent_full_gc(GCCause::Cause cause)2327 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2328 switch (cause) {
2329 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2330 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2331 case GCCause::_g1_humongous_allocation: return true;
2332 case GCCause::_update_allocation_context_stats_inc: return true;
2333 case GCCause::_wb_conc_mark: return true;
2334 default: return false;
2335 }
2336 }
2337
2338 #ifndef PRODUCT
allocate_dummy_regions()2339 void G1CollectedHeap::allocate_dummy_regions() {
2340 // Let's fill up most of the region
2341 size_t word_size = HeapRegion::GrainWords - 1024;
2342 // And as a result the region we'll allocate will be humongous.
2343 guarantee(isHumongous(word_size), "sanity");
2344
2345 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2346 // Let's use the existing mechanism for the allocation
2347 HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2348 AllocationContext::system());
2349 if (dummy_obj != NULL) {
2350 MemRegion mr(dummy_obj, word_size);
2351 CollectedHeap::fill_with_object(mr);
2352 } else {
2353 // If we can't allocate once, we probably cannot allocate
2354 // again. Let's get out of the loop.
2355 break;
2356 }
2357 }
2358 }
2359 #endif // !PRODUCT
2360
increment_old_marking_cycles_started()2361 void G1CollectedHeap::increment_old_marking_cycles_started() {
2362 assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2363 _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2364 err_msg("Wrong marking cycle count (started: %d, completed: %d)",
2365 _old_marking_cycles_started, _old_marking_cycles_completed));
2366
2367 _old_marking_cycles_started++;
2368 }
2369
increment_old_marking_cycles_completed(bool concurrent)2370 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2371 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2372
2373 // We assume that if concurrent == true, then the caller is a
2374 // concurrent thread that was joined the Suspendible Thread
2375 // Set. If there's ever a cheap way to check this, we should add an
2376 // assert here.
2377
2378 // Given that this method is called at the end of a Full GC or of a
2379 // concurrent cycle, and those can be nested (i.e., a Full GC can
2380 // interrupt a concurrent cycle), the number of full collections
2381 // completed should be either one (in the case where there was no
2382 // nesting) or two (when a Full GC interrupted a concurrent cycle)
2383 // behind the number of full collections started.
2384
2385 // This is the case for the inner caller, i.e. a Full GC.
2386 assert(concurrent ||
2387 (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2388 (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2389 err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2390 "is inconsistent with _old_marking_cycles_completed = %u",
2391 _old_marking_cycles_started, _old_marking_cycles_completed));
2392
2393 // This is the case for the outer caller, i.e. the concurrent cycle.
2394 assert(!concurrent ||
2395 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2396 err_msg("for outer caller (concurrent cycle): "
2397 "_old_marking_cycles_started = %u "
2398 "is inconsistent with _old_marking_cycles_completed = %u",
2399 _old_marking_cycles_started, _old_marking_cycles_completed));
2400
2401 _old_marking_cycles_completed += 1;
2402
2403 // We need to clear the "in_progress" flag in the CM thread before
2404 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2405 // is set) so that if a waiter requests another System.gc() it doesn't
2406 // incorrectly see that a marking cycle is still in progress.
2407 if (concurrent) {
2408 _cmThread->set_idle();
2409 }
2410
2411 // This notify_all() will ensure that a thread that called
2412 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2413 // and it's waiting for a full GC to finish will be woken up. It is
2414 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2415 FullGCCount_lock->notify_all();
2416 }
2417
register_concurrent_cycle_start(const Ticks & start_time)2418 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2419 _concurrent_cycle_started = true;
2420 _gc_timer_cm->register_gc_start(start_time);
2421
2422 _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2423 trace_heap_before_gc(_gc_tracer_cm);
2424 }
2425
register_concurrent_cycle_end()2426 void G1CollectedHeap::register_concurrent_cycle_end() {
2427 if (_concurrent_cycle_started) {
2428 if (_cm->has_aborted()) {
2429 _gc_tracer_cm->report_concurrent_mode_failure();
2430 }
2431
2432 _gc_timer_cm->register_gc_end();
2433 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2434
2435 // Clear state variables to prepare for the next concurrent cycle.
2436 _concurrent_cycle_started = false;
2437 _heap_summary_sent = false;
2438 }
2439 }
2440
trace_heap_after_concurrent_cycle()2441 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2442 if (_concurrent_cycle_started) {
2443 // This function can be called when:
2444 // the cleanup pause is run
2445 // the concurrent cycle is aborted before the cleanup pause.
2446 // the concurrent cycle is aborted after the cleanup pause,
2447 // but before the concurrent cycle end has been registered.
2448 // Make sure that we only send the heap information once.
2449 if (!_heap_summary_sent) {
2450 trace_heap_after_gc(_gc_tracer_cm);
2451 _heap_summary_sent = true;
2452 }
2453 }
2454 }
2455
yc_type()2456 G1YCType G1CollectedHeap::yc_type() {
2457 bool is_young = g1_policy()->gcs_are_young();
2458 bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2459 bool is_during_mark = mark_in_progress();
2460
2461 if (is_initial_mark) {
2462 return InitialMark;
2463 } else if (is_during_mark) {
2464 return DuringMark;
2465 } else if (is_young) {
2466 return Normal;
2467 } else {
2468 return Mixed;
2469 }
2470 }
2471
collect(GCCause::Cause cause)2472 void G1CollectedHeap::collect(GCCause::Cause cause) {
2473 assert_heap_not_locked();
2474
2475 uint gc_count_before;
2476 uint old_marking_count_before;
2477 uint full_gc_count_before;
2478 bool retry_gc;
2479
2480 do {
2481 retry_gc = false;
2482
2483 {
2484 MutexLocker ml(Heap_lock);
2485
2486 // Read the GC count while holding the Heap_lock
2487 gc_count_before = total_collections();
2488 full_gc_count_before = total_full_collections();
2489 old_marking_count_before = _old_marking_cycles_started;
2490 }
2491
2492 if (should_do_concurrent_full_gc(cause)) {
2493 // Schedule an initial-mark evacuation pause that will start a
2494 // concurrent cycle. We're setting word_size to 0 which means that
2495 // we are not requesting a post-GC allocation.
2496 VM_G1IncCollectionPause op(gc_count_before,
2497 0, /* word_size */
2498 true, /* should_initiate_conc_mark */
2499 g1_policy()->max_pause_time_ms(),
2500 cause);
2501 op.set_allocation_context(AllocationContext::current());
2502
2503 VMThread::execute(&op);
2504 if (!op.pause_succeeded()) {
2505 if (old_marking_count_before == _old_marking_cycles_started) {
2506 retry_gc = op.should_retry_gc();
2507 } else {
2508 // A Full GC happened while we were trying to schedule the
2509 // initial-mark GC. No point in starting a new cycle given
2510 // that the whole heap was collected anyway.
2511 }
2512
2513 if (retry_gc) {
2514 if (GC_locker::is_active_and_needs_gc()) {
2515 GC_locker::stall_until_clear();
2516 }
2517 }
2518 }
2519 } else if (GC_locker::should_discard(cause, gc_count_before)) {
2520 // Return to be consistent with VMOp failure due to another
2521 // collection slipping in after our gc_count but before our
2522 // request is processed. _gc_locker collections upgraded by
2523 // GCLockerInvokesConcurrent are handled above and never discarded.
2524 return;
2525 } else {
2526 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2527 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2528
2529 // Schedule a standard evacuation pause. We're setting word_size
2530 // to 0 which means that we are not requesting a post-GC allocation.
2531 VM_G1IncCollectionPause op(gc_count_before,
2532 0, /* word_size */
2533 false, /* should_initiate_conc_mark */
2534 g1_policy()->max_pause_time_ms(),
2535 cause);
2536 VMThread::execute(&op);
2537 } else {
2538 // Schedule a Full GC.
2539 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2540 VMThread::execute(&op);
2541 }
2542 }
2543 } while (retry_gc);
2544 }
2545
is_in(const void * p) const2546 bool G1CollectedHeap::is_in(const void* p) const {
2547 if (_hrm.reserved().contains(p)) {
2548 // Given that we know that p is in the reserved space,
2549 // heap_region_containing_raw() should successfully
2550 // return the containing region.
2551 HeapRegion* hr = heap_region_containing_raw(p);
2552 return hr->is_in(p);
2553 } else {
2554 return false;
2555 }
2556 }
2557
2558 #ifdef ASSERT
is_in_exact(const void * p) const2559 bool G1CollectedHeap::is_in_exact(const void* p) const {
2560 bool contains = reserved_region().contains(p);
2561 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2562 if (contains && available) {
2563 return true;
2564 } else {
2565 return false;
2566 }
2567 }
2568 #endif
2569
2570 // Iteration functions.
2571
2572 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2573
2574 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2575 ExtendedOopClosure* _cl;
2576 public:
IterateOopClosureRegionClosure(ExtendedOopClosure * cl)2577 IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
doHeapRegion(HeapRegion * r)2578 bool doHeapRegion(HeapRegion* r) {
2579 if (!r->continuesHumongous()) {
2580 r->oop_iterate(_cl);
2581 }
2582 return false;
2583 }
2584 };
2585
oop_iterate(ExtendedOopClosure * cl)2586 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2587 IterateOopClosureRegionClosure blk(cl);
2588 heap_region_iterate(&blk);
2589 }
2590
2591 // Iterates an ObjectClosure over all objects within a HeapRegion.
2592
2593 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2594 ObjectClosure* _cl;
2595 public:
IterateObjectClosureRegionClosure(ObjectClosure * cl)2596 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
doHeapRegion(HeapRegion * r)2597 bool doHeapRegion(HeapRegion* r) {
2598 if (! r->continuesHumongous()) {
2599 r->object_iterate(_cl);
2600 }
2601 return false;
2602 }
2603 };
2604
object_iterate(ObjectClosure * cl)2605 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2606 IterateObjectClosureRegionClosure blk(cl);
2607 heap_region_iterate(&blk);
2608 }
2609
2610 // Calls a SpaceClosure on a HeapRegion.
2611
2612 class SpaceClosureRegionClosure: public HeapRegionClosure {
2613 SpaceClosure* _cl;
2614 public:
SpaceClosureRegionClosure(SpaceClosure * cl)2615 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
doHeapRegion(HeapRegion * r)2616 bool doHeapRegion(HeapRegion* r) {
2617 _cl->do_space(r);
2618 return false;
2619 }
2620 };
2621
space_iterate(SpaceClosure * cl)2622 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2623 SpaceClosureRegionClosure blk(cl);
2624 heap_region_iterate(&blk);
2625 }
2626
heap_region_iterate(HeapRegionClosure * cl) const2627 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2628 _hrm.iterate(cl);
2629 }
2630
2631 void
heap_region_par_iterate_chunked(HeapRegionClosure * cl,uint worker_id,uint num_workers,jint claim_value) const2632 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2633 uint worker_id,
2634 uint num_workers,
2635 jint claim_value) const {
2636 _hrm.par_iterate(cl, worker_id, num_workers, claim_value);
2637 }
2638
2639 class ResetClaimValuesClosure: public HeapRegionClosure {
2640 public:
doHeapRegion(HeapRegion * r)2641 bool doHeapRegion(HeapRegion* r) {
2642 r->set_claim_value(HeapRegion::InitialClaimValue);
2643 return false;
2644 }
2645 };
2646
reset_heap_region_claim_values()2647 void G1CollectedHeap::reset_heap_region_claim_values() {
2648 ResetClaimValuesClosure blk;
2649 heap_region_iterate(&blk);
2650 }
2651
reset_cset_heap_region_claim_values()2652 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2653 ResetClaimValuesClosure blk;
2654 collection_set_iterate(&blk);
2655 }
2656
2657 #ifdef ASSERT
2658 // This checks whether all regions in the heap have the correct claim
2659 // value. I also piggy-backed on this a check to ensure that the
2660 // humongous_start_region() information on "continues humongous"
2661 // regions is correct.
2662
2663 class CheckClaimValuesClosure : public HeapRegionClosure {
2664 private:
2665 jint _claim_value;
2666 uint _failures;
2667 HeapRegion* _sh_region;
2668
2669 public:
CheckClaimValuesClosure(jint claim_value)2670 CheckClaimValuesClosure(jint claim_value) :
2671 _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
doHeapRegion(HeapRegion * r)2672 bool doHeapRegion(HeapRegion* r) {
2673 if (r->claim_value() != _claim_value) {
2674 gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2675 "claim value = %d, should be %d",
2676 HR_FORMAT_PARAMS(r),
2677 r->claim_value(), _claim_value);
2678 ++_failures;
2679 }
2680 if (!r->isHumongous()) {
2681 _sh_region = NULL;
2682 } else if (r->startsHumongous()) {
2683 _sh_region = r;
2684 } else if (r->continuesHumongous()) {
2685 if (r->humongous_start_region() != _sh_region) {
2686 gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2687 "HS = " PTR_FORMAT ", should be " PTR_FORMAT,
2688 HR_FORMAT_PARAMS(r),
2689 p2i(r->humongous_start_region()),
2690 p2i(_sh_region));
2691 ++_failures;
2692 }
2693 }
2694 return false;
2695 }
failures()2696 uint failures() { return _failures; }
2697 };
2698
check_heap_region_claim_values(jint claim_value)2699 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2700 CheckClaimValuesClosure cl(claim_value);
2701 heap_region_iterate(&cl);
2702 return cl.failures() == 0;
2703 }
2704
2705 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2706 private:
2707 jint _claim_value;
2708 uint _failures;
2709
2710 public:
CheckClaimValuesInCSetHRClosure(jint claim_value)2711 CheckClaimValuesInCSetHRClosure(jint claim_value) :
2712 _claim_value(claim_value), _failures(0) { }
2713
failures()2714 uint failures() { return _failures; }
2715
doHeapRegion(HeapRegion * hr)2716 bool doHeapRegion(HeapRegion* hr) {
2717 assert(hr->in_collection_set(), "how?");
2718 assert(!hr->isHumongous(), "H-region in CSet");
2719 if (hr->claim_value() != _claim_value) {
2720 gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
2721 "claim value = %d, should be %d",
2722 HR_FORMAT_PARAMS(hr),
2723 hr->claim_value(), _claim_value);
2724 _failures += 1;
2725 }
2726 return false;
2727 }
2728 };
2729
check_cset_heap_region_claim_values(jint claim_value)2730 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2731 CheckClaimValuesInCSetHRClosure cl(claim_value);
2732 collection_set_iterate(&cl);
2733 return cl.failures() == 0;
2734 }
2735 #endif // ASSERT
2736
2737 // Clear the cached CSet starting regions and (more importantly)
2738 // the time stamps. Called when we reset the GC time stamp.
clear_cset_start_regions()2739 void G1CollectedHeap::clear_cset_start_regions() {
2740 assert(_worker_cset_start_region != NULL, "sanity");
2741 assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2742
2743 int n_queues = MAX2((int)ParallelGCThreads, 1);
2744 for (int i = 0; i < n_queues; i++) {
2745 _worker_cset_start_region[i] = NULL;
2746 _worker_cset_start_region_time_stamp[i] = 0;
2747 }
2748 }
2749
2750 // Given the id of a worker, obtain or calculate a suitable
2751 // starting region for iterating over the current collection set.
start_cset_region_for_worker(uint worker_i)2752 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
2753 assert(get_gc_time_stamp() > 0, "should have been updated by now");
2754
2755 HeapRegion* result = NULL;
2756 unsigned gc_time_stamp = get_gc_time_stamp();
2757
2758 if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2759 // Cached starting region for current worker was set
2760 // during the current pause - so it's valid.
2761 // Note: the cached starting heap region may be NULL
2762 // (when the collection set is empty).
2763 result = _worker_cset_start_region[worker_i];
2764 assert(result == NULL || result->in_collection_set(), "sanity");
2765 return result;
2766 }
2767
2768 // The cached entry was not valid so let's calculate
2769 // a suitable starting heap region for this worker.
2770
2771 // We want the parallel threads to start their collection
2772 // set iteration at different collection set regions to
2773 // avoid contention.
2774 // If we have:
2775 // n collection set regions
2776 // p threads
2777 // Then thread t will start at region floor ((t * n) / p)
2778
2779 result = g1_policy()->collection_set();
2780 if (G1CollectedHeap::use_parallel_gc_threads()) {
2781 uint cs_size = g1_policy()->cset_region_length();
2782 uint active_workers = workers()->active_workers();
2783 assert(UseDynamicNumberOfGCThreads ||
2784 active_workers == workers()->total_workers(),
2785 "Unless dynamic should use total workers");
2786
2787 uint end_ind = (cs_size * worker_i) / active_workers;
2788 uint start_ind = 0;
2789
2790 if (worker_i > 0 &&
2791 _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2792 // Previous workers starting region is valid
2793 // so let's iterate from there
2794 start_ind = (cs_size * (worker_i - 1)) / active_workers;
2795 OrderAccess::loadload();
2796 result = _worker_cset_start_region[worker_i - 1];
2797 }
2798
2799 for (uint i = start_ind; i < end_ind; i++) {
2800 result = result->next_in_collection_set();
2801 }
2802 }
2803
2804 // Note: the calculated starting heap region may be NULL
2805 // (when the collection set is empty).
2806 assert(result == NULL || result->in_collection_set(), "sanity");
2807 assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2808 "should be updated only once per pause");
2809 _worker_cset_start_region[worker_i] = result;
2810 OrderAccess::storestore();
2811 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2812 return result;
2813 }
2814
collection_set_iterate(HeapRegionClosure * cl)2815 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2816 HeapRegion* r = g1_policy()->collection_set();
2817 while (r != NULL) {
2818 HeapRegion* next = r->next_in_collection_set();
2819 if (cl->doHeapRegion(r)) {
2820 cl->incomplete();
2821 return;
2822 }
2823 r = next;
2824 }
2825 }
2826
collection_set_iterate_from(HeapRegion * r,HeapRegionClosure * cl)2827 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2828 HeapRegionClosure *cl) {
2829 if (r == NULL) {
2830 // The CSet is empty so there's nothing to do.
2831 return;
2832 }
2833
2834 assert(r->in_collection_set(),
2835 "Start region must be a member of the collection set.");
2836 HeapRegion* cur = r;
2837 while (cur != NULL) {
2838 HeapRegion* next = cur->next_in_collection_set();
2839 if (cl->doHeapRegion(cur) && false) {
2840 cl->incomplete();
2841 return;
2842 }
2843 cur = next;
2844 }
2845 cur = g1_policy()->collection_set();
2846 while (cur != r) {
2847 HeapRegion* next = cur->next_in_collection_set();
2848 if (cl->doHeapRegion(cur) && false) {
2849 cl->incomplete();
2850 return;
2851 }
2852 cur = next;
2853 }
2854 }
2855
next_compaction_region(const HeapRegion * from) const2856 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2857 HeapRegion* result = _hrm.next_region_in_heap(from);
2858 while (result != NULL && result->isHumongous()) {
2859 result = _hrm.next_region_in_heap(result);
2860 }
2861 return result;
2862 }
2863
space_containing(const void * addr) const2864 Space* G1CollectedHeap::space_containing(const void* addr) const {
2865 return heap_region_containing(addr);
2866 }
2867
block_start(const void * addr) const2868 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2869 Space* sp = space_containing(addr);
2870 return sp->block_start(addr);
2871 }
2872
block_size(const HeapWord * addr) const2873 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2874 Space* sp = space_containing(addr);
2875 return sp->block_size(addr);
2876 }
2877
block_is_obj(const HeapWord * addr) const2878 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2879 Space* sp = space_containing(addr);
2880 return sp->block_is_obj(addr);
2881 }
2882
supports_tlab_allocation() const2883 bool G1CollectedHeap::supports_tlab_allocation() const {
2884 return true;
2885 }
2886
tlab_capacity(Thread * ignored) const2887 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2888 return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
2889 }
2890
tlab_used(Thread * ignored) const2891 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2892 return young_list()->eden_used_bytes();
2893 }
2894
2895 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2896 // must be smaller than the humongous object limit.
max_tlab_size() const2897 size_t G1CollectedHeap::max_tlab_size() const {
2898 return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
2899 }
2900
unsafe_max_tlab_alloc(Thread * ignored) const2901 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2902 // Return the remaining space in the cur alloc region, but not less than
2903 // the min TLAB size.
2904
2905 // Also, this value can be at most the humongous object threshold,
2906 // since we can't allow tlabs to grow big enough to accommodate
2907 // humongous objects.
2908
2909 HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
2910 size_t max_tlab = max_tlab_size() * wordSize;
2911 if (hr == NULL) {
2912 return max_tlab;
2913 } else {
2914 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
2915 }
2916 }
2917
max_capacity() const2918 size_t G1CollectedHeap::max_capacity() const {
2919 return _hrm.reserved().byte_size();
2920 }
2921
millis_since_last_gc()2922 jlong G1CollectedHeap::millis_since_last_gc() {
2923 // assert(false, "NYI");
2924 return 0;
2925 }
2926
prepare_for_verify()2927 void G1CollectedHeap::prepare_for_verify() {
2928 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2929 ensure_parsability(false);
2930 }
2931 g1_rem_set()->prepare_for_verify();
2932 }
2933
allocated_since_marking(oop obj,HeapRegion * hr,VerifyOption vo)2934 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2935 VerifyOption vo) {
2936 switch (vo) {
2937 case VerifyOption_G1UsePrevMarking:
2938 return hr->obj_allocated_since_prev_marking(obj);
2939 case VerifyOption_G1UseNextMarking:
2940 return hr->obj_allocated_since_next_marking(obj);
2941 case VerifyOption_G1UseMarkWord:
2942 return false;
2943 default:
2944 ShouldNotReachHere();
2945 }
2946 return false; // keep some compilers happy
2947 }
2948
top_at_mark_start(HeapRegion * hr,VerifyOption vo)2949 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
2950 switch (vo) {
2951 case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
2952 case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
2953 case VerifyOption_G1UseMarkWord: return NULL;
2954 default: ShouldNotReachHere();
2955 }
2956 return NULL; // keep some compilers happy
2957 }
2958
is_marked(oop obj,VerifyOption vo)2959 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
2960 switch (vo) {
2961 case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
2962 case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
2963 case VerifyOption_G1UseMarkWord: return obj->is_gc_marked();
2964 default: ShouldNotReachHere();
2965 }
2966 return false; // keep some compilers happy
2967 }
2968
top_at_mark_start_str(VerifyOption vo)2969 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
2970 switch (vo) {
2971 case VerifyOption_G1UsePrevMarking: return "PTAMS";
2972 case VerifyOption_G1UseNextMarking: return "NTAMS";
2973 case VerifyOption_G1UseMarkWord: return "NONE";
2974 default: ShouldNotReachHere();
2975 }
2976 return NULL; // keep some compilers happy
2977 }
2978
2979 class VerifyRootsClosure: public OopClosure {
2980 private:
2981 G1CollectedHeap* _g1h;
2982 VerifyOption _vo;
2983 bool _failures;
2984 public:
2985 // _vo == UsePrevMarking -> use "prev" marking information,
2986 // _vo == UseNextMarking -> use "next" marking information,
2987 // _vo == UseMarkWord -> use mark word from object header.
VerifyRootsClosure(VerifyOption vo)2988 VerifyRootsClosure(VerifyOption vo) :
2989 _g1h(G1CollectedHeap::heap()),
2990 _vo(vo),
2991 _failures(false) { }
2992
failures()2993 bool failures() { return _failures; }
2994
do_oop_nv(T * p)2995 template <class T> void do_oop_nv(T* p) {
2996 T heap_oop = oopDesc::load_heap_oop(p);
2997 if (!oopDesc::is_null(heap_oop)) {
2998 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2999 if (_g1h->is_obj_dead_cond(obj, _vo)) {
3000 gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
3001 "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
3002 if (_vo == VerifyOption_G1UseMarkWord) {
3003 gclog_or_tty->print_cr(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
3004 }
3005 obj->print_on(gclog_or_tty);
3006 _failures = true;
3007 }
3008 }
3009 }
3010
do_oop(oop * p)3011 void do_oop(oop* p) { do_oop_nv(p); }
do_oop(narrowOop * p)3012 void do_oop(narrowOop* p) { do_oop_nv(p); }
3013 };
3014
3015 class G1VerifyCodeRootOopClosure: public OopClosure {
3016 G1CollectedHeap* _g1h;
3017 OopClosure* _root_cl;
3018 nmethod* _nm;
3019 VerifyOption _vo;
3020 bool _failures;
3021
do_oop_work(T * p)3022 template <class T> void do_oop_work(T* p) {
3023 // First verify that this root is live
3024 _root_cl->do_oop(p);
3025
3026 if (!G1VerifyHeapRegionCodeRoots) {
3027 // We're not verifying the code roots attached to heap region.
3028 return;
3029 }
3030
3031 // Don't check the code roots during marking verification in a full GC
3032 if (_vo == VerifyOption_G1UseMarkWord) {
3033 return;
3034 }
3035
3036 // Now verify that the current nmethod (which contains p) is
3037 // in the code root list of the heap region containing the
3038 // object referenced by p.
3039
3040 T heap_oop = oopDesc::load_heap_oop(p);
3041 if (!oopDesc::is_null(heap_oop)) {
3042 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3043
3044 // Now fetch the region containing the object
3045 HeapRegion* hr = _g1h->heap_region_containing(obj);
3046 HeapRegionRemSet* hrrs = hr->rem_set();
3047 // Verify that the strong code root list for this region
3048 // contains the nmethod
3049 if (!hrrs->strong_code_roots_list_contains(_nm)) {
3050 gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
3051 "from nmethod " PTR_FORMAT " not in strong "
3052 "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
3053 p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
3054 _failures = true;
3055 }
3056 }
3057 }
3058
3059 public:
G1VerifyCodeRootOopClosure(G1CollectedHeap * g1h,OopClosure * root_cl,VerifyOption vo)3060 G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
3061 _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
3062
do_oop(oop * p)3063 void do_oop(oop* p) { do_oop_work(p); }
do_oop(narrowOop * p)3064 void do_oop(narrowOop* p) { do_oop_work(p); }
3065
set_nmethod(nmethod * nm)3066 void set_nmethod(nmethod* nm) { _nm = nm; }
failures()3067 bool failures() { return _failures; }
3068 };
3069
3070 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
3071 G1VerifyCodeRootOopClosure* _oop_cl;
3072
3073 public:
G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure * oop_cl)3074 G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
3075 _oop_cl(oop_cl) {}
3076
do_code_blob(CodeBlob * cb)3077 void do_code_blob(CodeBlob* cb) {
3078 nmethod* nm = cb->as_nmethod_or_null();
3079 if (nm != NULL) {
3080 _oop_cl->set_nmethod(nm);
3081 nm->oops_do(_oop_cl);
3082 }
3083 }
3084 };
3085
3086 class YoungRefCounterClosure : public OopClosure {
3087 G1CollectedHeap* _g1h;
3088 int _count;
3089 public:
YoungRefCounterClosure(G1CollectedHeap * g1h)3090 YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
do_oop(oop * p)3091 void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
do_oop(narrowOop * p)3092 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3093
count()3094 int count() { return _count; }
reset_count()3095 void reset_count() { _count = 0; };
3096 };
3097
3098 class VerifyKlassClosure: public KlassClosure {
3099 YoungRefCounterClosure _young_ref_counter_closure;
3100 OopClosure *_oop_closure;
3101 public:
VerifyKlassClosure(G1CollectedHeap * g1h,OopClosure * cl)3102 VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
do_klass(Klass * k)3103 void do_klass(Klass* k) {
3104 k->oops_do(_oop_closure);
3105
3106 _young_ref_counter_closure.reset_count();
3107 k->oops_do(&_young_ref_counter_closure);
3108 if (_young_ref_counter_closure.count() > 0) {
3109 guarantee(k->has_modified_oops(), err_msg("Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k)));
3110 }
3111 }
3112 };
3113
3114 class VerifyLivenessOopClosure: public OopClosure {
3115 G1CollectedHeap* _g1h;
3116 VerifyOption _vo;
3117 public:
VerifyLivenessOopClosure(G1CollectedHeap * g1h,VerifyOption vo)3118 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3119 _g1h(g1h), _vo(vo)
3120 { }
do_oop(narrowOop * p)3121 void do_oop(narrowOop *p) { do_oop_work(p); }
do_oop(oop * p)3122 void do_oop( oop *p) { do_oop_work(p); }
3123
do_oop_work(T * p)3124 template <class T> void do_oop_work(T *p) {
3125 oop obj = oopDesc::load_decode_heap_oop(p);
3126 guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3127 "Dead object referenced by a not dead object");
3128 }
3129 };
3130
3131 class VerifyObjsInRegionClosure: public ObjectClosure {
3132 private:
3133 G1CollectedHeap* _g1h;
3134 size_t _live_bytes;
3135 HeapRegion *_hr;
3136 VerifyOption _vo;
3137 public:
3138 // _vo == UsePrevMarking -> use "prev" marking information,
3139 // _vo == UseNextMarking -> use "next" marking information,
3140 // _vo == UseMarkWord -> use mark word from object header.
VerifyObjsInRegionClosure(HeapRegion * hr,VerifyOption vo)3141 VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
3142 : _live_bytes(0), _hr(hr), _vo(vo) {
3143 _g1h = G1CollectedHeap::heap();
3144 }
do_object(oop o)3145 void do_object(oop o) {
3146 VerifyLivenessOopClosure isLive(_g1h, _vo);
3147 assert(o != NULL, "Huh?");
3148 if (!_g1h->is_obj_dead_cond(o, _vo)) {
3149 // If the object is alive according to the mark word,
3150 // then verify that the marking information agrees.
3151 // Note we can't verify the contra-positive of the
3152 // above: if the object is dead (according to the mark
3153 // word), it may not be marked, or may have been marked
3154 // but has since became dead, or may have been allocated
3155 // since the last marking.
3156 if (_vo == VerifyOption_G1UseMarkWord) {
3157 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
3158 }
3159
3160 o->oop_iterate_no_header(&isLive);
3161 if (!_hr->obj_allocated_since_prev_marking(o)) {
3162 size_t obj_size = o->size(); // Make sure we don't overflow
3163 _live_bytes += (obj_size * HeapWordSize);
3164 }
3165 }
3166 }
live_bytes()3167 size_t live_bytes() { return _live_bytes; }
3168 };
3169
3170 class PrintObjsInRegionClosure : public ObjectClosure {
3171 HeapRegion *_hr;
3172 G1CollectedHeap *_g1;
3173 public:
PrintObjsInRegionClosure(HeapRegion * hr)3174 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
3175 _g1 = G1CollectedHeap::heap();
3176 };
3177
do_object(oop o)3178 void do_object(oop o) {
3179 if (o != NULL) {
3180 HeapWord *start = (HeapWord *) o;
3181 size_t word_sz = o->size();
3182 gclog_or_tty->print("\nPrinting obj " PTR_FORMAT " of size " SIZE_FORMAT
3183 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
3184 p2i(o), word_sz,
3185 _g1->isMarkedPrev(o),
3186 _g1->isMarkedNext(o),
3187 _hr->obj_allocated_since_prev_marking(o));
3188 HeapWord *end = start + word_sz;
3189 HeapWord *cur;
3190 int *val;
3191 for (cur = start; cur < end; cur++) {
3192 val = (int *) cur;
3193 gclog_or_tty->print("\t " PTR_FORMAT ": %d\n", p2i(val), *val);
3194 }
3195 }
3196 }
3197 };
3198
3199 class VerifyRegionClosure: public HeapRegionClosure {
3200 private:
3201 bool _par;
3202 VerifyOption _vo;
3203 bool _failures;
3204 public:
3205 // _vo == UsePrevMarking -> use "prev" marking information,
3206 // _vo == UseNextMarking -> use "next" marking information,
3207 // _vo == UseMarkWord -> use mark word from object header.
VerifyRegionClosure(bool par,VerifyOption vo)3208 VerifyRegionClosure(bool par, VerifyOption vo)
3209 : _par(par),
3210 _vo(vo),
3211 _failures(false) {}
3212
failures()3213 bool failures() {
3214 return _failures;
3215 }
3216
doHeapRegion(HeapRegion * r)3217 bool doHeapRegion(HeapRegion* r) {
3218 if (!r->continuesHumongous()) {
3219 bool failures = false;
3220 r->verify(_vo, &failures);
3221 if (failures) {
3222 _failures = true;
3223 } else {
3224 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3225 r->object_iterate(¬_dead_yet_cl);
3226 if (_vo != VerifyOption_G1UseNextMarking) {
3227 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3228 gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3229 "max_live_bytes " SIZE_FORMAT " "
3230 "< calculated " SIZE_FORMAT,
3231 p2i(r->bottom()), p2i(r->end()),
3232 r->max_live_bytes(),
3233 not_dead_yet_cl.live_bytes());
3234 _failures = true;
3235 }
3236 } else {
3237 // When vo == UseNextMarking we cannot currently do a sanity
3238 // check on the live bytes as the calculation has not been
3239 // finalized yet.
3240 }
3241 }
3242 }
3243 return false; // stop the region iteration if we hit a failure
3244 }
3245 };
3246
3247 // This is the task used for parallel verification of the heap regions
3248
3249 class G1ParVerifyTask: public AbstractGangTask {
3250 private:
3251 G1CollectedHeap* _g1h;
3252 VerifyOption _vo;
3253 bool _failures;
3254
3255 public:
3256 // _vo == UsePrevMarking -> use "prev" marking information,
3257 // _vo == UseNextMarking -> use "next" marking information,
3258 // _vo == UseMarkWord -> use mark word from object header.
G1ParVerifyTask(G1CollectedHeap * g1h,VerifyOption vo)3259 G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3260 AbstractGangTask("Parallel verify task"),
3261 _g1h(g1h),
3262 _vo(vo),
3263 _failures(false) { }
3264
failures()3265 bool failures() {
3266 return _failures;
3267 }
3268
work(uint worker_id)3269 void work(uint worker_id) {
3270 HandleMark hm;
3271 VerifyRegionClosure blk(true, _vo);
3272 _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3273 _g1h->workers()->active_workers(),
3274 HeapRegion::ParVerifyClaimValue);
3275 if (blk.failures()) {
3276 _failures = true;
3277 }
3278 }
3279 };
3280
verify(bool silent,VerifyOption vo)3281 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3282 if (SafepointSynchronize::is_at_safepoint()) {
3283 assert(Thread::current()->is_VM_thread(),
3284 "Expected to be executed serially by the VM thread at this point");
3285
3286 if (!silent) { gclog_or_tty->print("Roots "); }
3287 VerifyRootsClosure rootsCl(vo);
3288 VerifyKlassClosure klassCl(this, &rootsCl);
3289 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3290
3291 // We apply the relevant closures to all the oops in the
3292 // system dictionary, class loader data graph, the string table
3293 // and the nmethods in the code cache.
3294 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3295 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3296
3297 {
3298 G1RootProcessor root_processor(this);
3299 root_processor.process_all_roots(&rootsCl,
3300 &cldCl,
3301 &blobsCl);
3302 }
3303
3304 bool failures = rootsCl.failures() || codeRootsCl.failures();
3305
3306 if (vo != VerifyOption_G1UseMarkWord) {
3307 // If we're verifying during a full GC then the region sets
3308 // will have been torn down at the start of the GC. Therefore
3309 // verifying the region sets will fail. So we only verify
3310 // the region sets when not in a full GC.
3311 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3312 verify_region_sets();
3313 }
3314
3315 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3316 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3317 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3318 "sanity check");
3319
3320 G1ParVerifyTask task(this, vo);
3321 assert(UseDynamicNumberOfGCThreads ||
3322 workers()->active_workers() == workers()->total_workers(),
3323 "If not dynamic should be using all the workers");
3324 int n_workers = workers()->active_workers();
3325 set_par_threads(n_workers);
3326 workers()->run_task(&task);
3327 set_par_threads(0);
3328 if (task.failures()) {
3329 failures = true;
3330 }
3331
3332 // Checks that the expected amount of parallel work was done.
3333 // The implication is that n_workers is > 0.
3334 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3335 "sanity check");
3336
3337 reset_heap_region_claim_values();
3338
3339 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3340 "sanity check");
3341 } else {
3342 VerifyRegionClosure blk(false, vo);
3343 heap_region_iterate(&blk);
3344 if (blk.failures()) {
3345 failures = true;
3346 }
3347 }
3348 if (!silent) gclog_or_tty->print("RemSet ");
3349 rem_set()->verify();
3350
3351 if (G1StringDedup::is_enabled()) {
3352 if (!silent) gclog_or_tty->print("StrDedup ");
3353 G1StringDedup::verify();
3354 }
3355
3356 if (failures) {
3357 gclog_or_tty->print_cr("Heap:");
3358 // It helps to have the per-region information in the output to
3359 // help us track down what went wrong. This is why we call
3360 // print_extended_on() instead of print_on().
3361 print_extended_on(gclog_or_tty);
3362 gclog_or_tty->cr();
3363 #ifndef PRODUCT
3364 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3365 concurrent_mark()->print_reachable("at-verification-failure",
3366 vo, false /* all */);
3367 }
3368 #endif
3369 gclog_or_tty->flush();
3370 }
3371 guarantee(!failures, "there should not have been any failures");
3372 } else {
3373 if (!silent) {
3374 gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3375 if (G1StringDedup::is_enabled()) {
3376 gclog_or_tty->print(", StrDedup");
3377 }
3378 gclog_or_tty->print(") ");
3379 }
3380 }
3381 }
3382
verify(bool silent)3383 void G1CollectedHeap::verify(bool silent) {
3384 verify(silent, VerifyOption_G1UsePrevMarking);
3385 }
3386
verify(bool guard,const char * msg)3387 double G1CollectedHeap::verify(bool guard, const char* msg) {
3388 double verify_time_ms = 0.0;
3389
3390 if (guard && total_collections() >= VerifyGCStartAt) {
3391 double verify_start = os::elapsedTime();
3392 HandleMark hm; // Discard invalid handles created during verification
3393 prepare_for_verify();
3394 Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3395 verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3396 }
3397
3398 return verify_time_ms;
3399 }
3400
verify_before_gc()3401 void G1CollectedHeap::verify_before_gc() {
3402 double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3403 g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3404 }
3405
verify_after_gc()3406 void G1CollectedHeap::verify_after_gc() {
3407 double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3408 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3409 }
3410
3411 class PrintRegionClosure: public HeapRegionClosure {
3412 outputStream* _st;
3413 public:
PrintRegionClosure(outputStream * st)3414 PrintRegionClosure(outputStream* st) : _st(st) {}
doHeapRegion(HeapRegion * r)3415 bool doHeapRegion(HeapRegion* r) {
3416 r->print_on(_st);
3417 return false;
3418 }
3419 };
3420
is_obj_dead_cond(const oop obj,const HeapRegion * hr,const VerifyOption vo) const3421 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3422 const HeapRegion* hr,
3423 const VerifyOption vo) const {
3424 switch (vo) {
3425 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3426 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3427 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
3428 default: ShouldNotReachHere();
3429 }
3430 return false; // keep some compilers happy
3431 }
3432
is_obj_dead_cond(const oop obj,const VerifyOption vo) const3433 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3434 const VerifyOption vo) const {
3435 switch (vo) {
3436 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3437 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3438 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
3439 default: ShouldNotReachHere();
3440 }
3441 return false; // keep some compilers happy
3442 }
3443
print_on(outputStream * st) const3444 void G1CollectedHeap::print_on(outputStream* st) const {
3445 st->print(" %-20s", "garbage-first heap");
3446 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3447 capacity()/K, used_unlocked()/K);
3448 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
3449 p2i(_hrm.reserved().start()),
3450 p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
3451 p2i(_hrm.reserved().end()));
3452 st->cr();
3453 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3454 uint young_regions = _young_list->length();
3455 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3456 (size_t) young_regions * HeapRegion::GrainBytes / K);
3457 uint survivor_regions = g1_policy()->recorded_survivor_regions();
3458 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3459 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3460 st->cr();
3461 MetaspaceAux::print_on(st);
3462 }
3463
print_extended_on(outputStream * st) const3464 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3465 print_on(st);
3466
3467 // Print the per-region information.
3468 st->cr();
3469 st->print_cr("Heap Regions: (E=young(eden), S=young(survivor), O=old, "
3470 "HS=humongous(starts), HC=humongous(continues), "
3471 "CS=collection set, F=free, TS=gc time stamp, "
3472 "PTAMS=previous top-at-mark-start, "
3473 "NTAMS=next top-at-mark-start)");
3474 PrintRegionClosure blk(st);
3475 heap_region_iterate(&blk);
3476 }
3477
print_on_error(outputStream * st) const3478 void G1CollectedHeap::print_on_error(outputStream* st) const {
3479 this->CollectedHeap::print_on_error(st);
3480
3481 if (_cm != NULL) {
3482 st->cr();
3483 _cm->print_on_error(st);
3484 }
3485 }
3486
print_gc_threads_on(outputStream * st) const3487 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3488 if (G1CollectedHeap::use_parallel_gc_threads()) {
3489 workers()->print_worker_threads_on(st);
3490 }
3491 _cmThread->print_on(st);
3492 st->cr();
3493 _cm->print_worker_threads_on(st);
3494 _cg1r->print_worker_threads_on(st);
3495 if (G1StringDedup::is_enabled()) {
3496 G1StringDedup::print_worker_threads_on(st);
3497 }
3498 }
3499
gc_threads_do(ThreadClosure * tc) const3500 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3501 if (G1CollectedHeap::use_parallel_gc_threads()) {
3502 workers()->threads_do(tc);
3503 }
3504 tc->do_thread(_cmThread);
3505 _cg1r->threads_do(tc);
3506 if (G1StringDedup::is_enabled()) {
3507 G1StringDedup::threads_do(tc);
3508 }
3509 }
3510
print_tracing_info() const3511 void G1CollectedHeap::print_tracing_info() const {
3512 // We'll overload this to mean "trace GC pause statistics."
3513 if (TraceGen0Time || TraceGen1Time) {
3514 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3515 // to that.
3516 g1_policy()->print_tracing_info();
3517 }
3518 if (G1SummarizeRSetStats) {
3519 g1_rem_set()->print_summary_info();
3520 }
3521 if (G1SummarizeConcMark) {
3522 concurrent_mark()->print_summary_info();
3523 }
3524 g1_policy()->print_yg_surv_rate_info();
3525 SpecializationStats::print();
3526 }
3527
3528 #ifndef PRODUCT
3529 // Helpful for debugging RSet issues.
3530
3531 class PrintRSetsClosure : public HeapRegionClosure {
3532 private:
3533 const char* _msg;
3534 size_t _occupied_sum;
3535
3536 public:
doHeapRegion(HeapRegion * r)3537 bool doHeapRegion(HeapRegion* r) {
3538 HeapRegionRemSet* hrrs = r->rem_set();
3539 size_t occupied = hrrs->occupied();
3540 _occupied_sum += occupied;
3541
3542 gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
3543 HR_FORMAT_PARAMS(r));
3544 if (occupied == 0) {
3545 gclog_or_tty->print_cr(" RSet is empty");
3546 } else {
3547 hrrs->print();
3548 }
3549 gclog_or_tty->print_cr("----------");
3550 return false;
3551 }
3552
PrintRSetsClosure(const char * msg)3553 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3554 gclog_or_tty->cr();
3555 gclog_or_tty->print_cr("========================================");
3556 gclog_or_tty->print_cr("%s", msg);
3557 gclog_or_tty->cr();
3558 }
3559
~PrintRSetsClosure()3560 ~PrintRSetsClosure() {
3561 gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3562 gclog_or_tty->print_cr("========================================");
3563 gclog_or_tty->cr();
3564 }
3565 };
3566
print_cset_rsets()3567 void G1CollectedHeap::print_cset_rsets() {
3568 PrintRSetsClosure cl("Printing CSet RSets");
3569 collection_set_iterate(&cl);
3570 }
3571
print_all_rsets()3572 void G1CollectedHeap::print_all_rsets() {
3573 PrintRSetsClosure cl("Printing All RSets");;
3574 heap_region_iterate(&cl);
3575 }
3576 #endif // PRODUCT
3577
create_g1_heap_summary()3578 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3579
3580 size_t eden_used_bytes = _young_list->eden_used_bytes();
3581 size_t survivor_used_bytes = _young_list->survivor_used_bytes();
3582 size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
3583
3584 size_t eden_capacity_bytes =
3585 (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
3586
3587 VirtualSpaceSummary heap_summary = create_heap_space_summary();
3588 return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
3589 eden_capacity_bytes, survivor_used_bytes, num_regions());
3590 }
3591
trace_heap(GCWhen::Type when,GCTracer * gc_tracer)3592 void G1CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
3593 const G1HeapSummary& heap_summary = create_g1_heap_summary();
3594 gc_tracer->report_gc_heap_summary(when, heap_summary);
3595
3596 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3597 gc_tracer->report_metaspace_summary(when, metaspace_summary);
3598 }
3599
heap()3600 G1CollectedHeap* G1CollectedHeap::heap() {
3601 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3602 "not a garbage-first heap");
3603 return _g1h;
3604 }
3605
gc_prologue(bool full)3606 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3607 // always_do_update_barrier = false;
3608 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3609 // Fill TLAB's and such
3610 accumulate_statistics_all_tlabs();
3611 ensure_parsability(true);
3612
3613 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3614 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3615 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3616 }
3617 }
3618
gc_epilogue(bool full)3619 void G1CollectedHeap::gc_epilogue(bool full) {
3620
3621 if (G1SummarizeRSetStats &&
3622 (G1SummarizeRSetStatsPeriod > 0) &&
3623 // we are at the end of the GC. Total collections has already been increased.
3624 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3625 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3626 }
3627
3628 // FIXME: what is this about?
3629 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3630 // is set.
3631 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3632 "derived pointer present"));
3633 // always_do_update_barrier = true;
3634
3635 resize_all_tlabs();
3636 allocation_context_stats().update(full);
3637
3638 // We have just completed a GC. Update the soft reference
3639 // policy with the new heap occupancy
3640 Universe::update_heap_info_at_gc();
3641 }
3642
do_collection_pause(size_t word_size,uint gc_count_before,bool * succeeded,GCCause::Cause gc_cause)3643 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3644 uint gc_count_before,
3645 bool* succeeded,
3646 GCCause::Cause gc_cause) {
3647 assert_heap_not_locked_and_not_at_safepoint();
3648 g1_policy()->record_stop_world_start();
3649 VM_G1IncCollectionPause op(gc_count_before,
3650 word_size,
3651 false, /* should_initiate_conc_mark */
3652 g1_policy()->max_pause_time_ms(),
3653 gc_cause);
3654
3655 op.set_allocation_context(AllocationContext::current());
3656 VMThread::execute(&op);
3657
3658 HeapWord* result = op.result();
3659 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3660 assert(result == NULL || ret_succeeded,
3661 "the result should be NULL if the VM did not succeed");
3662 *succeeded = ret_succeeded;
3663
3664 assert_heap_not_locked();
3665 return result;
3666 }
3667
3668 void
doConcurrentMark()3669 G1CollectedHeap::doConcurrentMark() {
3670 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3671 if (!_cmThread->in_progress()) {
3672 _cmThread->set_started();
3673 CGC_lock->notify();
3674 }
3675 }
3676
pending_card_num()3677 size_t G1CollectedHeap::pending_card_num() {
3678 size_t extra_cards = 0;
3679 JavaThread *curr = Threads::first();
3680 while (curr != NULL) {
3681 DirtyCardQueue& dcq = curr->dirty_card_queue();
3682 extra_cards += dcq.size();
3683 curr = curr->next();
3684 }
3685 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3686 size_t buffer_size = dcqs.buffer_size();
3687 size_t buffer_num = dcqs.completed_buffers_num();
3688
3689 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3690 // in bytes - not the number of 'entries'. We need to convert
3691 // into a number of cards.
3692 return (buffer_size * buffer_num + extra_cards) / oopSize;
3693 }
3694
cards_scanned()3695 size_t G1CollectedHeap::cards_scanned() {
3696 return g1_rem_set()->cardsScanned();
3697 }
3698
3699 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3700 private:
3701 size_t _total_humongous;
3702 size_t _candidate_humongous;
3703
3704 DirtyCardQueue _dcq;
3705
3706 // We don't nominate objects with many remembered set entries, on
3707 // the assumption that such objects are likely still live.
is_remset_small(HeapRegion * region) const3708 bool is_remset_small(HeapRegion* region) const {
3709 HeapRegionRemSet* const rset = region->rem_set();
3710 return G1EagerReclaimHumongousObjectsWithStaleRefs
3711 ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
3712 : rset->is_empty();
3713 }
3714
is_typeArray_region(HeapRegion * region) const3715 bool is_typeArray_region(HeapRegion* region) const {
3716 return oop(region->bottom())->is_typeArray();
3717 }
3718
humongous_region_is_candidate(G1CollectedHeap * heap,HeapRegion * region) const3719 bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
3720 assert(region->startsHumongous(), "Must start a humongous object");
3721
3722 // Candidate selection must satisfy the following constraints
3723 // while concurrent marking is in progress:
3724 //
3725 // * In order to maintain SATB invariants, an object must not be
3726 // reclaimed if it was allocated before the start of marking and
3727 // has not had its references scanned. Such an object must have
3728 // its references (including type metadata) scanned to ensure no
3729 // live objects are missed by the marking process. Objects
3730 // allocated after the start of concurrent marking don't need to
3731 // be scanned.
3732 //
3733 // * An object must not be reclaimed if it is on the concurrent
3734 // mark stack. Objects allocated after the start of concurrent
3735 // marking are never pushed on the mark stack.
3736 //
3737 // Nominating only objects allocated after the start of concurrent
3738 // marking is sufficient to meet both constraints. This may miss
3739 // some objects that satisfy the constraints, but the marking data
3740 // structures don't support efficiently performing the needed
3741 // additional tests or scrubbing of the mark stack.
3742 //
3743 // However, we presently only nominate is_typeArray() objects.
3744 // A humongous object containing references induces remembered
3745 // set entries on other regions. In order to reclaim such an
3746 // object, those remembered sets would need to be cleaned up.
3747 //
3748 // We also treat is_typeArray() objects specially, allowing them
3749 // to be reclaimed even if allocated before the start of
3750 // concurrent mark. For this we rely on mark stack insertion to
3751 // exclude is_typeArray() objects, preventing reclaiming an object
3752 // that is in the mark stack. We also rely on the metadata for
3753 // such objects to be built-in and so ensured to be kept live.
3754 // Frequent allocation and drop of large binary blobs is an
3755 // important use case for eager reclaim, and this special handling
3756 // may reduce needed headroom.
3757
3758 return is_typeArray_region(region) && is_remset_small(region);
3759 }
3760
3761 public:
RegisterHumongousWithInCSetFastTestClosure()3762 RegisterHumongousWithInCSetFastTestClosure()
3763 : _total_humongous(0),
3764 _candidate_humongous(0),
3765 _dcq(&JavaThread::dirty_card_queue_set()) {
3766 }
3767
doHeapRegion(HeapRegion * r)3768 virtual bool doHeapRegion(HeapRegion* r) {
3769 if (!r->startsHumongous()) {
3770 return false;
3771 }
3772 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3773
3774 bool is_candidate = humongous_region_is_candidate(g1h, r);
3775 uint rindex = r->hrm_index();
3776 g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
3777 if (is_candidate) {
3778 _candidate_humongous++;
3779 g1h->register_humongous_region_with_in_cset_fast_test(rindex);
3780 // Is_candidate already filters out humongous object with large remembered sets.
3781 // If we have a humongous object with a few remembered sets, we simply flush these
3782 // remembered set entries into the DCQS. That will result in automatic
3783 // re-evaluation of their remembered set entries during the following evacuation
3784 // phase.
3785 if (!r->rem_set()->is_empty()) {
3786 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3787 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3788 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3789 HeapRegionRemSetIterator hrrs(r->rem_set());
3790 size_t card_index;
3791 while (hrrs.has_next(card_index)) {
3792 jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3793 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3794 *card_ptr = CardTableModRefBS::dirty_card_val();
3795 _dcq.enqueue(card_ptr);
3796 }
3797 }
3798 assert(hrrs.n_yielded() == r->rem_set()->occupied(),
3799 err_msg("Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
3800 hrrs.n_yielded(), r->rem_set()->occupied()));
3801 r->rem_set()->clear_locked();
3802 }
3803 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3804 }
3805 _total_humongous++;
3806
3807 return false;
3808 }
3809
total_humongous() const3810 size_t total_humongous() const { return _total_humongous; }
candidate_humongous() const3811 size_t candidate_humongous() const { return _candidate_humongous; }
3812
flush_rem_set_entries()3813 void flush_rem_set_entries() { _dcq.flush(); }
3814 };
3815
register_humongous_regions_with_in_cset_fast_test()3816 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3817 if (!G1EagerReclaimHumongousObjects) {
3818 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3819 return;
3820 }
3821 double time = os::elapsed_counter();
3822
3823 // Collect reclaim candidate information and register candidates with cset.
3824 RegisterHumongousWithInCSetFastTestClosure cl;
3825 heap_region_iterate(&cl);
3826
3827 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3828 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3829 cl.total_humongous(),
3830 cl.candidate_humongous());
3831 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3832
3833 // Finally flush all remembered set entries to re-check into the global DCQS.
3834 cl.flush_rem_set_entries();
3835 }
3836
3837 void
setup_surviving_young_words()3838 G1CollectedHeap::setup_surviving_young_words() {
3839 assert(_surviving_young_words == NULL, "pre-condition");
3840 uint array_length = g1_policy()->young_cset_region_length();
3841 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3842 if (_surviving_young_words == NULL) {
3843 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3844 "Not enough space for young surv words summary.");
3845 }
3846 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3847 #ifdef ASSERT
3848 for (uint i = 0; i < array_length; ++i) {
3849 assert( _surviving_young_words[i] == 0, "memset above" );
3850 }
3851 #endif // !ASSERT
3852 }
3853
3854 void
update_surviving_young_words(size_t * surv_young_words)3855 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3856 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3857 uint array_length = g1_policy()->young_cset_region_length();
3858 for (uint i = 0; i < array_length; ++i) {
3859 _surviving_young_words[i] += surv_young_words[i];
3860 }
3861 }
3862
3863 void
cleanup_surviving_young_words()3864 G1CollectedHeap::cleanup_surviving_young_words() {
3865 guarantee( _surviving_young_words != NULL, "pre-condition" );
3866 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
3867 _surviving_young_words = NULL;
3868 }
3869
3870 class VerifyRegionRemSetClosure : public HeapRegionClosure {
3871 public:
doHeapRegion(HeapRegion * hr)3872 bool doHeapRegion(HeapRegion* hr) {
3873 if (!hr->continuesHumongous()) {
3874 hr->verify_rem_set();
3875 }
3876 return false;
3877 }
3878 };
3879
3880 #ifdef ASSERT
3881 class VerifyCSetClosure: public HeapRegionClosure {
3882 public:
doHeapRegion(HeapRegion * hr)3883 bool doHeapRegion(HeapRegion* hr) {
3884 // Here we check that the CSet region's RSet is ready for parallel
3885 // iteration. The fields that we'll verify are only manipulated
3886 // when the region is part of a CSet and is collected. Afterwards,
3887 // we reset these fields when we clear the region's RSet (when the
3888 // region is freed) so they are ready when the region is
3889 // re-allocated. The only exception to this is if there's an
3890 // evacuation failure and instead of freeing the region we leave
3891 // it in the heap. In that case, we reset these fields during
3892 // evacuation failure handling.
3893 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3894
3895 // Here's a good place to add any other checks we'd like to
3896 // perform on CSet regions.
3897 return false;
3898 }
3899 };
3900 #endif // ASSERT
3901
3902 #if TASKQUEUE_STATS
print_taskqueue_stats_hdr(outputStream * const st)3903 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3904 st->print_raw_cr("GC Task Stats");
3905 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3906 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3907 }
3908
print_taskqueue_stats(outputStream * const st) const3909 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3910 print_taskqueue_stats_hdr(st);
3911
3912 TaskQueueStats totals;
3913 const int n = workers() != NULL ? workers()->total_workers() : 1;
3914 for (int i = 0; i < n; ++i) {
3915 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3916 totals += task_queue(i)->stats;
3917 }
3918 st->print_raw("tot "); totals.print(st); st->cr();
3919
3920 DEBUG_ONLY(totals.verify());
3921 }
3922
reset_taskqueue_stats()3923 void G1CollectedHeap::reset_taskqueue_stats() {
3924 const int n = workers() != NULL ? workers()->total_workers() : 1;
3925 for (int i = 0; i < n; ++i) {
3926 task_queue(i)->stats.reset();
3927 }
3928 }
3929 #endif // TASKQUEUE_STATS
3930
log_gc_header()3931 void G1CollectedHeap::log_gc_header() {
3932 if (!G1Log::fine()) {
3933 return;
3934 }
3935
3936 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3937
3938 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3939 .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3940 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3941
3942 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3943 }
3944
log_gc_footer(double pause_time_sec)3945 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3946 if (!G1Log::fine()) {
3947 return;
3948 }
3949
3950 if (G1Log::finer()) {
3951 if (evacuation_failed()) {
3952 gclog_or_tty->print(" (to-space exhausted)");
3953 }
3954 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3955 g1_policy()->phase_times()->note_gc_end();
3956 g1_policy()->phase_times()->print(pause_time_sec);
3957 g1_policy()->print_detailed_heap_transition();
3958 } else {
3959 if (evacuation_failed()) {
3960 gclog_or_tty->print("--");
3961 }
3962 g1_policy()->print_heap_transition();
3963 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3964 }
3965 gclog_or_tty->flush();
3966 }
3967
3968 bool
do_collection_pause_at_safepoint(double target_pause_time_ms)3969 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3970 assert_at_safepoint(true /* should_be_vm_thread */);
3971 guarantee(!is_gc_active(), "collection is not reentrant");
3972
3973 if (GC_locker::check_active_before_gc()) {
3974 return false;
3975 }
3976
3977 _gc_timer_stw->register_gc_start();
3978
3979 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3980
3981 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3982 ResourceMark rm;
3983
3984 print_heap_before_gc();
3985 trace_heap_before_gc(_gc_tracer_stw);
3986
3987 verify_region_sets_optional();
3988 verify_dirty_young_regions();
3989
3990 // This call will decide whether this pause is an initial-mark
3991 // pause. If it is, during_initial_mark_pause() will return true
3992 // for the duration of this pause.
3993 g1_policy()->decide_on_conc_mark_initiation();
3994
3995 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3996 assert(!g1_policy()->during_initial_mark_pause() ||
3997 g1_policy()->gcs_are_young(), "sanity");
3998
3999 // We also do not allow mixed GCs during marking.
4000 assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
4001
4002 // Record whether this pause is an initial mark. When the current
4003 // thread has completed its logging output and it's safe to signal
4004 // the CM thread, the flag's value in the policy has been reset.
4005 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
4006
4007 // Inner scope for scope based logging, timers, and stats collection
4008 {
4009 EvacuationInfo evacuation_info;
4010
4011 if (g1_policy()->during_initial_mark_pause()) {
4012 // We are about to start a marking cycle, so we increment the
4013 // full collection counter.
4014 increment_old_marking_cycles_started();
4015 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
4016 }
4017
4018 _gc_tracer_stw->report_yc_type(yc_type());
4019
4020 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
4021
4022 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
4023 workers()->active_workers(),
4024 Threads::number_of_non_daemon_threads());
4025 assert(UseDynamicNumberOfGCThreads ||
4026 active_workers == workers()->total_workers(),
4027 "If not dynamic should be using all the workers");
4028 workers()->set_active_workers(active_workers);
4029
4030
4031 double pause_start_sec = os::elapsedTime();
4032 g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
4033 log_gc_header();
4034
4035 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
4036 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause(),
4037 yc_type() == Mixed /* allMemoryPoolsAffected */);
4038
4039 // If the secondary_free_list is not empty, append it to the
4040 // free_list. No need to wait for the cleanup operation to finish;
4041 // the region allocation code will check the secondary_free_list
4042 // and wait if necessary. If the G1StressConcRegionFreeing flag is
4043 // set, skip this step so that the region allocation code has to
4044 // get entries from the secondary_free_list.
4045 if (!G1StressConcRegionFreeing) {
4046 append_secondary_free_list_if_not_empty_with_lock();
4047 }
4048
4049 assert(check_young_list_well_formed(), "young list should be well formed");
4050 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
4051 "sanity check");
4052
4053 // Don't dynamically change the number of GC threads this early. A value of
4054 // 0 is used to indicate serial work. When parallel work is done,
4055 // it will be set.
4056
4057 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
4058 IsGCActiveMark x;
4059
4060 gc_prologue(false);
4061 increment_total_collections(false /* full gc */);
4062 increment_gc_time_stamp();
4063
4064 if (VerifyRememberedSets) {
4065 if (!VerifySilently) {
4066 gclog_or_tty->print_cr("[Verifying RemSets before GC]");
4067 }
4068 VerifyRegionRemSetClosure v_cl;
4069 heap_region_iterate(&v_cl);
4070 }
4071
4072 verify_before_gc();
4073 check_bitmaps("GC Start");
4074
4075 COMPILER2_PRESENT(DerivedPointerTable::clear());
4076
4077 // Please see comment in g1CollectedHeap.hpp and
4078 // G1CollectedHeap::ref_processing_init() to see how
4079 // reference processing currently works in G1.
4080
4081 // Enable discovery in the STW reference processor
4082 ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
4083 true /*verify_no_refs*/);
4084
4085 {
4086 // We want to temporarily turn off discovery by the
4087 // CM ref processor, if necessary, and turn it back on
4088 // on again later if we do. Using a scoped
4089 // NoRefDiscovery object will do this.
4090 NoRefDiscovery no_cm_discovery(ref_processor_cm());
4091
4092 // Forget the current alloc region (we might even choose it to be part
4093 // of the collection set!).
4094 _allocator->release_mutator_alloc_region();
4095
4096 // We should call this after we retire the mutator alloc
4097 // region(s) so that all the ALLOC / RETIRE events are generated
4098 // before the start GC event.
4099 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
4100
4101 // This timing is only used by the ergonomics to handle our pause target.
4102 // It is unclear why this should not include the full pause. We will
4103 // investigate this in CR 7178365.
4104 //
4105 // Preserving the old comment here if that helps the investigation:
4106 //
4107 // The elapsed time induced by the start time below deliberately elides
4108 // the possible verification above.
4109 double sample_start_time_sec = os::elapsedTime();
4110
4111 #if YOUNG_LIST_VERBOSE
4112 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
4113 _young_list->print();
4114 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4115 #endif // YOUNG_LIST_VERBOSE
4116
4117 g1_policy()->record_collection_pause_start(sample_start_time_sec, *_gc_tracer_stw);
4118
4119 double scan_wait_start = os::elapsedTime();
4120 // We have to wait until the CM threads finish scanning the
4121 // root regions as it's the only way to ensure that all the
4122 // objects on them have been correctly scanned before we start
4123 // moving them during the GC.
4124 bool waited = _cm->root_regions()->wait_until_scan_finished();
4125 double wait_time_ms = 0.0;
4126 if (waited) {
4127 double scan_wait_end = os::elapsedTime();
4128 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
4129 }
4130 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
4131
4132 #if YOUNG_LIST_VERBOSE
4133 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
4134 _young_list->print();
4135 #endif // YOUNG_LIST_VERBOSE
4136
4137 if (g1_policy()->during_initial_mark_pause()) {
4138 concurrent_mark()->checkpointRootsInitialPre();
4139 }
4140
4141 #if YOUNG_LIST_VERBOSE
4142 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
4143 _young_list->print();
4144 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4145 #endif // YOUNG_LIST_VERBOSE
4146
4147 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4148
4149 // Make sure the remembered sets are up to date. This needs to be
4150 // done before register_humongous_regions_with_cset(), because the
4151 // remembered sets are used there to choose eager reclaim candidates.
4152 // If the remembered sets are not up to date we might miss some
4153 // entries that need to be handled.
4154 g1_rem_set()->cleanupHRRS();
4155
4156 register_humongous_regions_with_in_cset_fast_test();
4157
4158 assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
4159
4160 _cm->note_start_of_gc();
4161 // We call this after finalize_cset() to
4162 // ensure that the CSet has been finalized.
4163 _cm->verify_no_cset_oops();
4164
4165 if (_hr_printer.is_active()) {
4166 HeapRegion* hr = g1_policy()->collection_set();
4167 while (hr != NULL) {
4168 _hr_printer.cset(hr);
4169 hr = hr->next_in_collection_set();
4170 }
4171 }
4172
4173 #ifdef ASSERT
4174 VerifyCSetClosure cl;
4175 collection_set_iterate(&cl);
4176 #endif // ASSERT
4177
4178 setup_surviving_young_words();
4179
4180 // Initialize the GC alloc regions.
4181 _allocator->init_gc_alloc_regions(evacuation_info);
4182
4183 // Actually do the work...
4184 evacuate_collection_set(evacuation_info);
4185
4186 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4187
4188 eagerly_reclaim_humongous_regions();
4189
4190 g1_policy()->clear_collection_set();
4191
4192 cleanup_surviving_young_words();
4193
4194 // Start a new incremental collection set for the next pause.
4195 g1_policy()->start_incremental_cset_building();
4196
4197 clear_cset_fast_test();
4198
4199 _young_list->reset_sampled_info();
4200
4201 // Don't check the whole heap at this point as the
4202 // GC alloc regions from this pause have been tagged
4203 // as survivors and moved on to the survivor list.
4204 // Survivor regions will fail the !is_young() check.
4205 assert(check_young_list_empty(false /* check_heap */),
4206 "young list should be empty");
4207
4208 #if YOUNG_LIST_VERBOSE
4209 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4210 _young_list->print();
4211 #endif // YOUNG_LIST_VERBOSE
4212
4213 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
4214 _young_list->first_survivor_region(),
4215 _young_list->last_survivor_region());
4216
4217 _young_list->reset_auxilary_lists();
4218
4219 if (evacuation_failed()) {
4220 _allocator->set_used(recalculate_used());
4221 uint n_queues = MAX2((int)ParallelGCThreads, 1);
4222 for (uint i = 0; i < n_queues; i++) {
4223 if (_evacuation_failed_info_array[i].has_failed()) {
4224 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
4225 }
4226 }
4227 } else {
4228 // The "used" of the the collection set have already been subtracted
4229 // when they were freed. Add in the bytes evacuated.
4230 _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
4231 }
4232
4233 if (g1_policy()->during_initial_mark_pause()) {
4234 // We have to do this before we notify the CM threads that
4235 // they can start working to make sure that all the
4236 // appropriate initialization is done on the CM object.
4237 concurrent_mark()->checkpointRootsInitialPost();
4238 set_marking_started();
4239 // Note that we don't actually trigger the CM thread at
4240 // this point. We do that later when we're sure that
4241 // the current thread has completed its logging output.
4242 }
4243
4244 allocate_dummy_regions();
4245
4246 #if YOUNG_LIST_VERBOSE
4247 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
4248 _young_list->print();
4249 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4250 #endif // YOUNG_LIST_VERBOSE
4251
4252 _allocator->init_mutator_alloc_region();
4253
4254 {
4255 size_t expand_bytes = g1_policy()->expansion_amount();
4256 if (expand_bytes > 0) {
4257 size_t bytes_before = capacity();
4258 // No need for an ergo verbose message here,
4259 // expansion_amount() does this when it returns a value > 0.
4260 if (!expand(expand_bytes)) {
4261 // We failed to expand the heap. Cannot do anything about it.
4262 }
4263 }
4264 }
4265
4266 // We redo the verification but now wrt to the new CSet which
4267 // has just got initialized after the previous CSet was freed.
4268 _cm->verify_no_cset_oops();
4269 _cm->note_end_of_gc();
4270
4271 // This timing is only used by the ergonomics to handle our pause target.
4272 // It is unclear why this should not include the full pause. We will
4273 // investigate this in CR 7178365.
4274 double sample_end_time_sec = os::elapsedTime();
4275 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
4276 g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
4277
4278 MemoryService::track_memory_usage();
4279
4280 // In prepare_for_verify() below we'll need to scan the deferred
4281 // update buffers to bring the RSets up-to-date if
4282 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
4283 // the update buffers we'll probably need to scan cards on the
4284 // regions we just allocated to (i.e., the GC alloc
4285 // regions). However, during the last GC we called
4286 // set_saved_mark() on all the GC alloc regions, so card
4287 // scanning might skip the [saved_mark_word()...top()] area of
4288 // those regions (i.e., the area we allocated objects into
4289 // during the last GC). But it shouldn't. Given that
4290 // saved_mark_word() is conditional on whether the GC time stamp
4291 // on the region is current or not, by incrementing the GC time
4292 // stamp here we invalidate all the GC time stamps on all the
4293 // regions and saved_mark_word() will simply return top() for
4294 // all the regions. This is a nicer way of ensuring this rather
4295 // than iterating over the regions and fixing them. In fact, the
4296 // GC time stamp increment here also ensures that
4297 // saved_mark_word() will return top() between pauses, i.e.,
4298 // during concurrent refinement. So we don't need the
4299 // is_gc_active() check to decided which top to use when
4300 // scanning cards (see CR 7039627).
4301 increment_gc_time_stamp();
4302
4303 if (VerifyRememberedSets) {
4304 if (!VerifySilently) {
4305 gclog_or_tty->print_cr("[Verifying RemSets after GC]");
4306 }
4307 VerifyRegionRemSetClosure v_cl;
4308 heap_region_iterate(&v_cl);
4309 }
4310
4311 verify_after_gc();
4312 check_bitmaps("GC End");
4313
4314 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4315 ref_processor_stw()->verify_no_references_recorded();
4316
4317 // CM reference discovery will be re-enabled if necessary.
4318 }
4319
4320 // We should do this after we potentially expand the heap so
4321 // that all the COMMIT events are generated before the end GC
4322 // event, and after we retire the GC alloc regions so that all
4323 // RETIRE events are generated before the end GC event.
4324 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4325
4326 #ifdef TRACESPINNING
4327 ParallelTaskTerminator::print_termination_counts();
4328 #endif
4329
4330 gc_epilogue(false);
4331 }
4332
4333 // Print the remainder of the GC log output.
4334 log_gc_footer(os::elapsedTime() - pause_start_sec);
4335
4336 // It is not yet to safe to tell the concurrent mark to
4337 // start as we have some optional output below. We don't want the
4338 // output from the concurrent mark thread interfering with this
4339 // logging output either.
4340
4341 _hrm.verify_optional();
4342 verify_region_sets_optional();
4343
4344 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4345 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4346
4347 print_heap_after_gc();
4348 trace_heap_after_gc(_gc_tracer_stw);
4349
4350 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4351 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4352 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4353 // before any GC notifications are raised.
4354 g1mm()->update_sizes();
4355
4356 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4357 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4358 _gc_timer_stw->register_gc_end();
4359 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4360 }
4361 // It should now be safe to tell the concurrent mark thread to start
4362 // without its logging output interfering with the logging output
4363 // that came from the pause.
4364
4365 if (should_start_conc_mark) {
4366 // CAUTION: after the doConcurrentMark() call below,
4367 // the concurrent marking thread(s) could be running
4368 // concurrently with us. Make sure that anything after
4369 // this point does not assume that we are the only GC thread
4370 // running. Note: of course, the actual marking work will
4371 // not start until the safepoint itself is released in
4372 // SuspendibleThreadSet::desynchronize().
4373 doConcurrentMark();
4374 }
4375
4376 return true;
4377 }
4378
init_for_evac_failure(OopsInHeapRegionClosure * cl)4379 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4380 _drain_in_progress = false;
4381 set_evac_failure_closure(cl);
4382 _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4383 }
4384
finalize_for_evac_failure()4385 void G1CollectedHeap::finalize_for_evac_failure() {
4386 assert(_evac_failure_scan_stack != NULL &&
4387 _evac_failure_scan_stack->length() == 0,
4388 "Postcondition");
4389 assert(!_drain_in_progress, "Postcondition");
4390 delete _evac_failure_scan_stack;
4391 _evac_failure_scan_stack = NULL;
4392 }
4393
remove_self_forwarding_pointers()4394 void G1CollectedHeap::remove_self_forwarding_pointers() {
4395 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4396
4397 double remove_self_forwards_start = os::elapsedTime();
4398
4399 G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4400
4401 if (G1CollectedHeap::use_parallel_gc_threads()) {
4402 set_par_threads();
4403 workers()->run_task(&rsfp_task);
4404 set_par_threads(0);
4405 } else {
4406 rsfp_task.work(0);
4407 }
4408
4409 assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4410
4411 // Reset the claim values in the regions in the collection set.
4412 reset_cset_heap_region_claim_values();
4413
4414 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4415
4416 // Now restore saved marks, if any.
4417 assert(_objs_with_preserved_marks.size() ==
4418 _preserved_marks_of_objs.size(), "Both or none.");
4419 while (!_objs_with_preserved_marks.is_empty()) {
4420 oop obj = _objs_with_preserved_marks.pop();
4421 markOop m = _preserved_marks_of_objs.pop();
4422 obj->set_mark(m);
4423 }
4424 _objs_with_preserved_marks.clear(true);
4425 _preserved_marks_of_objs.clear(true);
4426
4427 g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
4428 }
4429
push_on_evac_failure_scan_stack(oop obj)4430 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4431 _evac_failure_scan_stack->push(obj);
4432 }
4433
drain_evac_failure_scan_stack()4434 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4435 assert(_evac_failure_scan_stack != NULL, "precondition");
4436
4437 while (_evac_failure_scan_stack->length() > 0) {
4438 oop obj = _evac_failure_scan_stack->pop();
4439 _evac_failure_closure->set_region(heap_region_containing(obj));
4440 obj->oop_iterate_backwards(_evac_failure_closure);
4441 }
4442 }
4443
4444 oop
handle_evacuation_failure_par(G1ParScanThreadState * _par_scan_state,oop old)4445 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4446 oop old) {
4447 assert(obj_in_cs(old),
4448 err_msg("obj: " PTR_FORMAT " should still be in the CSet",
4449 p2i(old)));
4450 markOop m = old->mark();
4451 oop forward_ptr = old->forward_to_atomic(old);
4452 if (forward_ptr == NULL) {
4453 // Forward-to-self succeeded.
4454 assert(_par_scan_state != NULL, "par scan state");
4455 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4456 uint queue_num = _par_scan_state->queue_num();
4457
4458 _evacuation_failed = true;
4459 _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
4460 if (_evac_failure_closure != cl) {
4461 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4462 assert(!_drain_in_progress,
4463 "Should only be true while someone holds the lock.");
4464 // Set the global evac-failure closure to the current thread's.
4465 assert(_evac_failure_closure == NULL, "Or locking has failed.");
4466 set_evac_failure_closure(cl);
4467 // Now do the common part.
4468 handle_evacuation_failure_common(old, m);
4469 // Reset to NULL.
4470 set_evac_failure_closure(NULL);
4471 } else {
4472 // The lock is already held, and this is recursive.
4473 assert(_drain_in_progress, "This should only be the recursive case.");
4474 handle_evacuation_failure_common(old, m);
4475 }
4476 return old;
4477 } else {
4478 // Forward-to-self failed. Either someone else managed to allocate
4479 // space for this object (old != forward_ptr) or they beat us in
4480 // self-forwarding it (old == forward_ptr).
4481 assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4482 err_msg("obj: " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
4483 "should not be in the CSet",
4484 p2i(old), p2i(forward_ptr)));
4485 return forward_ptr;
4486 }
4487 }
4488
handle_evacuation_failure_common(oop old,markOop m)4489 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4490 preserve_mark_if_necessary(old, m);
4491
4492 HeapRegion* r = heap_region_containing(old);
4493 if (!r->evacuation_failed()) {
4494 r->set_evacuation_failed(true);
4495 _hr_printer.evac_failure(r);
4496 }
4497
4498 push_on_evac_failure_scan_stack(old);
4499
4500 if (!_drain_in_progress) {
4501 // prevent recursion in copy_to_survivor_space()
4502 _drain_in_progress = true;
4503 drain_evac_failure_scan_stack();
4504 _drain_in_progress = false;
4505 }
4506 }
4507
preserve_mark_if_necessary(oop obj,markOop m)4508 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4509 assert(evacuation_failed(), "Oversaving!");
4510 // We want to call the "for_promotion_failure" version only in the
4511 // case of a promotion failure.
4512 if (m->must_be_preserved_for_promotion_failure(obj)) {
4513 _objs_with_preserved_marks.push(obj);
4514 _preserved_marks_of_objs.push(m);
4515 }
4516 }
4517
mark_object(oop obj)4518 void G1ParCopyHelper::mark_object(oop obj) {
4519 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4520
4521 // We know that the object is not moving so it's safe to read its size.
4522 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4523 }
4524
mark_forwarded_object(oop from_obj,oop to_obj)4525 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4526 assert(from_obj->is_forwarded(), "from obj should be forwarded");
4527 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4528 assert(from_obj != to_obj, "should not be self-forwarded");
4529
4530 assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4531 assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4532
4533 // The object might be in the process of being copied by another
4534 // worker so we cannot trust that its to-space image is
4535 // well-formed. So we have to read its size from its from-space
4536 // image which we know should not be changing.
4537 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4538 }
4539
4540 template <class T>
do_klass_barrier(T * p,oop new_obj)4541 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4542 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4543 _scanned_klass->record_modified_oops();
4544 }
4545 }
4546
4547 template <G1Barrier barrier, G1Mark do_mark_object>
4548 template <class T>
do_oop_work(T * p)4549 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4550 T heap_oop = oopDesc::load_heap_oop(p);
4551
4552 if (oopDesc::is_null(heap_oop)) {
4553 return;
4554 }
4555
4556 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4557
4558 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4559
4560 const InCSetState state = _g1->in_cset_state(obj);
4561 if (state.is_in_cset()) {
4562 oop forwardee;
4563 markOop m = obj->mark();
4564 if (m->is_marked()) {
4565 forwardee = (oop) m->decode_pointer();
4566 } else {
4567 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
4568 }
4569 assert(forwardee != NULL, "forwardee should not be NULL");
4570 oopDesc::encode_store_heap_oop(p, forwardee);
4571 if (do_mark_object != G1MarkNone && forwardee != obj) {
4572 // If the object is self-forwarded we don't need to explicitly
4573 // mark it, the evacuation failure protocol will do so.
4574 mark_forwarded_object(obj, forwardee);
4575 }
4576
4577 if (barrier == G1BarrierKlass) {
4578 do_klass_barrier(p, forwardee);
4579 }
4580 } else {
4581 if (state.is_humongous()) {
4582 _g1->set_humongous_is_live(obj);
4583 }
4584 // The object is not in collection set. If we're a root scanning
4585 // closure during an initial mark pause then attempt to mark the object.
4586 if (do_mark_object == G1MarkFromRoot) {
4587 mark_object(obj);
4588 }
4589 }
4590
4591 if (barrier == G1BarrierEvac) {
4592 _par_scan_state->update_rs(_from, p, _worker_id);
4593 }
4594 }
4595
4596 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4597 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4598
4599 class G1ParEvacuateFollowersClosure : public VoidClosure {
4600 protected:
4601 G1CollectedHeap* _g1h;
4602 G1ParScanThreadState* _par_scan_state;
4603 RefToScanQueueSet* _queues;
4604 ParallelTaskTerminator* _terminator;
4605
par_scan_state()4606 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
queues()4607 RefToScanQueueSet* queues() { return _queues; }
terminator()4608 ParallelTaskTerminator* terminator() { return _terminator; }
4609
4610 public:
G1ParEvacuateFollowersClosure(G1CollectedHeap * g1h,G1ParScanThreadState * par_scan_state,RefToScanQueueSet * queues,ParallelTaskTerminator * terminator)4611 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4612 G1ParScanThreadState* par_scan_state,
4613 RefToScanQueueSet* queues,
4614 ParallelTaskTerminator* terminator)
4615 : _g1h(g1h), _par_scan_state(par_scan_state),
4616 _queues(queues), _terminator(terminator) {}
4617
4618 void do_void();
4619
4620 private:
4621 inline bool offer_termination();
4622 };
4623
offer_termination()4624 bool G1ParEvacuateFollowersClosure::offer_termination() {
4625 G1ParScanThreadState* const pss = par_scan_state();
4626 pss->start_term_time();
4627 const bool res = terminator()->offer_termination();
4628 pss->end_term_time();
4629 return res;
4630 }
4631
do_void()4632 void G1ParEvacuateFollowersClosure::do_void() {
4633 G1ParScanThreadState* const pss = par_scan_state();
4634 pss->trim_queue();
4635 do {
4636 pss->steal_and_trim_queue(queues());
4637 } while (!offer_termination());
4638 }
4639
4640 class G1KlassScanClosure : public KlassClosure {
4641 G1ParCopyHelper* _closure;
4642 bool _process_only_dirty;
4643 int _count;
4644 public:
G1KlassScanClosure(G1ParCopyHelper * closure,bool process_only_dirty)4645 G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4646 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
do_klass(Klass * klass)4647 void do_klass(Klass* klass) {
4648 // If the klass has not been dirtied we know that there's
4649 // no references into the young gen and we can skip it.
4650 if (!_process_only_dirty || klass->has_modified_oops()) {
4651 // Clean the klass since we're going to scavenge all the metadata.
4652 klass->clear_modified_oops();
4653
4654 // Tell the closure that this klass is the Klass to scavenge
4655 // and is the one to dirty if oops are left pointing into the young gen.
4656 _closure->set_scanned_klass(klass);
4657
4658 klass->oops_do(_closure);
4659
4660 _closure->set_scanned_klass(NULL);
4661 }
4662 _count++;
4663 }
4664 };
4665
4666 class G1ParTask : public AbstractGangTask {
4667 protected:
4668 G1CollectedHeap* _g1h;
4669 RefToScanQueueSet *_queues;
4670 G1RootProcessor* _root_processor;
4671 ParallelTaskTerminator _terminator;
4672 uint _n_workers;
4673
4674 Mutex _stats_lock;
stats_lock()4675 Mutex* stats_lock() { return &_stats_lock; }
4676
4677 public:
G1ParTask(G1CollectedHeap * g1h,RefToScanQueueSet * task_queues,G1RootProcessor * root_processor)4678 G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)
4679 : AbstractGangTask("G1 collection"),
4680 _g1h(g1h),
4681 _queues(task_queues),
4682 _root_processor(root_processor),
4683 _terminator(0, _queues),
4684 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4685 {}
4686
queues()4687 RefToScanQueueSet* queues() { return _queues; }
4688
work_queue(int i)4689 RefToScanQueue *work_queue(int i) {
4690 return queues()->queue(i);
4691 }
4692
terminator()4693 ParallelTaskTerminator* terminator() { return &_terminator; }
4694
set_for_termination(int active_workers)4695 virtual void set_for_termination(int active_workers) {
4696 _root_processor->set_num_workers(active_workers);
4697 terminator()->reset_for_reuse(active_workers);
4698 _n_workers = active_workers;
4699 }
4700
4701 // Helps out with CLD processing.
4702 //
4703 // During InitialMark we need to:
4704 // 1) Scavenge all CLDs for the young GC.
4705 // 2) Mark all objects directly reachable from strong CLDs.
4706 template <G1Mark do_mark_object>
4707 class G1CLDClosure : public CLDClosure {
4708 G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
4709 G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
4710 G1KlassScanClosure _klass_in_cld_closure;
4711 bool _claim;
4712
4713 public:
G1CLDClosure(G1ParCopyClosure<G1BarrierNone,do_mark_object> * oop_closure,bool only_young,bool claim)4714 G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4715 bool only_young, bool claim)
4716 : _oop_closure(oop_closure),
4717 _oop_in_klass_closure(oop_closure->g1(),
4718 oop_closure->pss(),
4719 oop_closure->rp()),
4720 _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4721 _claim(claim) {
4722
4723 }
4724
do_cld(ClassLoaderData * cld)4725 void do_cld(ClassLoaderData* cld) {
4726 cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4727 }
4728 };
4729
work(uint worker_id)4730 void work(uint worker_id) {
4731 if (worker_id >= _n_workers) return; // no work needed this round
4732
4733 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());
4734
4735 {
4736 ResourceMark rm;
4737 HandleMark hm;
4738
4739 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4740
4741 G1ParScanThreadState pss(_g1h, worker_id, rp);
4742 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4743
4744 pss.set_evac_failure_closure(&evac_failure_cl);
4745
4746 bool only_young = _g1h->g1_policy()->gcs_are_young();
4747
4748 // Non-IM young GC.
4749 G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
4750 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4751 only_young, // Only process dirty klasses.
4752 false); // No need to claim CLDs.
4753 // IM young GC.
4754 // Strong roots closures.
4755 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
4756 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4757 false, // Process all klasses.
4758 true); // Need to claim CLDs.
4759 // Weak roots closures.
4760 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4761 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4762 false, // Process all klasses.
4763 true); // Need to claim CLDs.
4764
4765 OopClosure* strong_root_cl;
4766 OopClosure* weak_root_cl;
4767 CLDClosure* strong_cld_cl;
4768 CLDClosure* weak_cld_cl;
4769
4770 bool trace_metadata = false;
4771
4772 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4773 // We also need to mark copied objects.
4774 strong_root_cl = &scan_mark_root_cl;
4775 strong_cld_cl = &scan_mark_cld_cl;
4776 if (ClassUnloadingWithConcurrentMark) {
4777 weak_root_cl = &scan_mark_weak_root_cl;
4778 weak_cld_cl = &scan_mark_weak_cld_cl;
4779 trace_metadata = true;
4780 } else {
4781 weak_root_cl = &scan_mark_root_cl;
4782 weak_cld_cl = &scan_mark_cld_cl;
4783 }
4784 } else {
4785 strong_root_cl = &scan_only_root_cl;
4786 weak_root_cl = &scan_only_root_cl;
4787 strong_cld_cl = &scan_only_cld_cl;
4788 weak_cld_cl = &scan_only_cld_cl;
4789 }
4790
4791 pss.start_strong_roots();
4792
4793 _root_processor->evacuate_roots(strong_root_cl,
4794 weak_root_cl,
4795 strong_cld_cl,
4796 weak_cld_cl,
4797 trace_metadata,
4798 worker_id);
4799
4800 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4801 _root_processor->scan_remembered_sets(&push_heap_rs_cl,
4802 weak_root_cl,
4803 worker_id);
4804 pss.end_strong_roots();
4805
4806 {
4807 double start = os::elapsedTime();
4808 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4809 evac.do_void();
4810 double elapsed_sec = os::elapsedTime() - start;
4811 double term_sec = pss.term_time();
4812 _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4813 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4814 _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4815 }
4816 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4817 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4818
4819 if (ParallelGCVerbose) {
4820 MutexLocker x(stats_lock());
4821 pss.print_termination_stats(worker_id);
4822 }
4823
4824 assert(pss.queue_is_empty(), "should be empty");
4825
4826 // Close the inner scope so that the ResourceMark and HandleMark
4827 // destructors are executed here and are included as part of the
4828 // "GC Worker Time".
4829 }
4830 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4831 }
4832 };
4833
4834 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4835 private:
4836 BoolObjectClosure* _is_alive;
4837 int _initial_string_table_size;
4838 int _initial_symbol_table_size;
4839
4840 bool _process_strings;
4841 int _strings_processed;
4842 int _strings_removed;
4843
4844 bool _process_symbols;
4845 int _symbols_processed;
4846 int _symbols_removed;
4847
4848 bool _do_in_parallel;
4849 public:
G1StringSymbolTableUnlinkTask(BoolObjectClosure * is_alive,bool process_strings,bool process_symbols)4850 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4851 AbstractGangTask("String/Symbol Unlinking"),
4852 _is_alive(is_alive),
4853 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
4854 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4855 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4856
4857 _initial_string_table_size = StringTable::the_table()->table_size();
4858 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4859 if (process_strings) {
4860 StringTable::clear_parallel_claimed_index();
4861 }
4862 if (process_symbols) {
4863 SymbolTable::clear_parallel_claimed_index();
4864 }
4865 }
4866
~G1StringSymbolTableUnlinkTask()4867 ~G1StringSymbolTableUnlinkTask() {
4868 guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4869 err_msg("claim value " INT32_FORMAT " after unlink less than initial string table size " INT32_FORMAT,
4870 StringTable::parallel_claimed_index(), _initial_string_table_size));
4871 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4872 err_msg("claim value " INT32_FORMAT " after unlink less than initial symbol table size " INT32_FORMAT,
4873 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4874
4875 if (G1TraceStringSymbolTableScrubbing) {
4876 gclog_or_tty->print_cr("Cleaned string and symbol table, "
4877 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4878 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4879 strings_processed(), strings_removed(),
4880 symbols_processed(), symbols_removed());
4881 }
4882 }
4883
work(uint worker_id)4884 void work(uint worker_id) {
4885 if (_do_in_parallel) {
4886 int strings_processed = 0;
4887 int strings_removed = 0;
4888 int symbols_processed = 0;
4889 int symbols_removed = 0;
4890 if (_process_strings) {
4891 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4892 Atomic::add(strings_processed, &_strings_processed);
4893 Atomic::add(strings_removed, &_strings_removed);
4894 }
4895 if (_process_symbols) {
4896 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4897 Atomic::add(symbols_processed, &_symbols_processed);
4898 Atomic::add(symbols_removed, &_symbols_removed);
4899 }
4900 } else {
4901 if (_process_strings) {
4902 StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
4903 }
4904 if (_process_symbols) {
4905 SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
4906 }
4907 }
4908 }
4909
strings_processed() const4910 size_t strings_processed() const { return (size_t)_strings_processed; }
strings_removed() const4911 size_t strings_removed() const { return (size_t)_strings_removed; }
4912
symbols_processed() const4913 size_t symbols_processed() const { return (size_t)_symbols_processed; }
symbols_removed() const4914 size_t symbols_removed() const { return (size_t)_symbols_removed; }
4915 };
4916
4917 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
4918 private:
4919 static Monitor* _lock;
4920
4921 BoolObjectClosure* const _is_alive;
4922 const bool _unloading_occurred;
4923 const uint _num_workers;
4924
4925 // Variables used to claim nmethods.
4926 nmethod* _first_nmethod;
4927 volatile nmethod* _claimed_nmethod;
4928
4929 // The list of nmethods that need to be processed by the second pass.
4930 volatile nmethod* _postponed_list;
4931 volatile uint _num_entered_barrier;
4932
4933 public:
G1CodeCacheUnloadingTask(uint num_workers,BoolObjectClosure * is_alive,bool unloading_occurred)4934 G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
4935 _is_alive(is_alive),
4936 _unloading_occurred(unloading_occurred),
4937 _num_workers(num_workers),
4938 _first_nmethod(NULL),
4939 _claimed_nmethod(NULL),
4940 _postponed_list(NULL),
4941 _num_entered_barrier(0)
4942 {
4943 nmethod::increase_unloading_clock();
4944 _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
4945 _claimed_nmethod = (volatile nmethod*)_first_nmethod;
4946 }
4947
~G1CodeCacheUnloadingTask()4948 ~G1CodeCacheUnloadingTask() {
4949 CodeCache::verify_clean_inline_caches();
4950
4951 CodeCache::set_needs_cache_clean(false);
4952 guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
4953
4954 CodeCache::verify_icholder_relocations();
4955 }
4956
4957 private:
add_to_postponed_list(nmethod * nm)4958 void add_to_postponed_list(nmethod* nm) {
4959 nmethod* old;
4960 do {
4961 old = (nmethod*)_postponed_list;
4962 nm->set_unloading_next(old);
4963 } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
4964 }
4965
clean_nmethod(nmethod * nm)4966 void clean_nmethod(nmethod* nm) {
4967 bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
4968
4969 if (postponed) {
4970 // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
4971 add_to_postponed_list(nm);
4972 }
4973
4974 // Mark that this thread has been cleaned/unloaded.
4975 // After this call, it will be safe to ask if this nmethod was unloaded or not.
4976 nm->set_unloading_clock(nmethod::global_unloading_clock());
4977 }
4978
clean_nmethod_postponed(nmethod * nm)4979 void clean_nmethod_postponed(nmethod* nm) {
4980 nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
4981 }
4982
4983 static const int MaxClaimNmethods = 16;
4984
claim_nmethods(nmethod ** claimed_nmethods,int * num_claimed_nmethods)4985 void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
4986 nmethod* first;
4987 nmethod* last;
4988
4989 do {
4990 *num_claimed_nmethods = 0;
4991
4992 first = last = (nmethod*)_claimed_nmethod;
4993
4994 if (first != NULL) {
4995 for (int i = 0; i < MaxClaimNmethods; i++) {
4996 last = CodeCache::alive_nmethod(CodeCache::next(last));
4997
4998 if (last == NULL) {
4999 break;
5000 }
5001
5002 claimed_nmethods[i] = last;
5003 (*num_claimed_nmethods)++;
5004 }
5005 }
5006
5007 } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
5008 }
5009
claim_postponed_nmethod()5010 nmethod* claim_postponed_nmethod() {
5011 nmethod* claim;
5012 nmethod* next;
5013
5014 do {
5015 claim = (nmethod*)_postponed_list;
5016 if (claim == NULL) {
5017 return NULL;
5018 }
5019
5020 next = claim->unloading_next();
5021
5022 } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
5023
5024 return claim;
5025 }
5026
5027 public:
5028 // Mark that we're done with the first pass of nmethod cleaning.
barrier_mark(uint worker_id)5029 void barrier_mark(uint worker_id) {
5030 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
5031 _num_entered_barrier++;
5032 if (_num_entered_barrier == _num_workers) {
5033 ml.notify_all();
5034 }
5035 }
5036
5037 // See if we have to wait for the other workers to
5038 // finish their first-pass nmethod cleaning work.
barrier_wait(uint worker_id)5039 void barrier_wait(uint worker_id) {
5040 if (_num_entered_barrier < _num_workers) {
5041 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
5042 while (_num_entered_barrier < _num_workers) {
5043 ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
5044 }
5045 }
5046 }
5047
5048 // Cleaning and unloading of nmethods. Some work has to be postponed
5049 // to the second pass, when we know which nmethods survive.
work_first_pass(uint worker_id)5050 void work_first_pass(uint worker_id) {
5051 // The first nmethods is claimed by the first worker.
5052 if (worker_id == 0 && _first_nmethod != NULL) {
5053 clean_nmethod(_first_nmethod);
5054 _first_nmethod = NULL;
5055 }
5056
5057 int num_claimed_nmethods;
5058 nmethod* claimed_nmethods[MaxClaimNmethods];
5059
5060 while (true) {
5061 claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
5062
5063 if (num_claimed_nmethods == 0) {
5064 break;
5065 }
5066
5067 for (int i = 0; i < num_claimed_nmethods; i++) {
5068 clean_nmethod(claimed_nmethods[i]);
5069 }
5070 }
5071
5072 // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
5073 // Need to retire the buffers now that this thread has stopped cleaning nmethods.
5074 MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
5075 }
5076
work_second_pass(uint worker_id)5077 void work_second_pass(uint worker_id) {
5078 nmethod* nm;
5079 // Take care of postponed nmethods.
5080 while ((nm = claim_postponed_nmethod()) != NULL) {
5081 clean_nmethod_postponed(nm);
5082 }
5083 }
5084 };
5085
5086 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
5087
5088 class G1KlassCleaningTask : public StackObj {
5089 BoolObjectClosure* _is_alive;
5090 volatile jint _clean_klass_tree_claimed;
5091 ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
5092
5093 public:
G1KlassCleaningTask(BoolObjectClosure * is_alive)5094 G1KlassCleaningTask(BoolObjectClosure* is_alive) :
5095 _is_alive(is_alive),
5096 _clean_klass_tree_claimed(0),
5097 _klass_iterator() {
5098 }
5099
5100 private:
claim_clean_klass_tree_task()5101 bool claim_clean_klass_tree_task() {
5102 if (_clean_klass_tree_claimed) {
5103 return false;
5104 }
5105
5106 return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
5107 }
5108
claim_next_klass()5109 InstanceKlass* claim_next_klass() {
5110 Klass* klass;
5111 do {
5112 klass =_klass_iterator.next_klass();
5113 } while (klass != NULL && !klass->oop_is_instance());
5114
5115 return (InstanceKlass*)klass;
5116 }
5117
5118 public:
5119
clean_klass(InstanceKlass * ik)5120 void clean_klass(InstanceKlass* ik) {
5121 ik->clean_weak_instanceklass_links(_is_alive);
5122
5123 if (JvmtiExport::has_redefined_a_class()) {
5124 InstanceKlass::purge_previous_versions(ik);
5125 }
5126 }
5127
work()5128 void work() {
5129 ResourceMark rm;
5130
5131 // One worker will clean the subklass/sibling klass tree.
5132 if (claim_clean_klass_tree_task()) {
5133 Klass::clean_subklass_tree(_is_alive);
5134 }
5135
5136 // All workers will help cleaning the classes,
5137 InstanceKlass* klass;
5138 while ((klass = claim_next_klass()) != NULL) {
5139 clean_klass(klass);
5140 }
5141 }
5142 };
5143
5144 // To minimize the remark pause times, the tasks below are done in parallel.
5145 class G1ParallelCleaningTask : public AbstractGangTask {
5146 private:
5147 G1StringSymbolTableUnlinkTask _string_symbol_task;
5148 G1CodeCacheUnloadingTask _code_cache_task;
5149 G1KlassCleaningTask _klass_cleaning_task;
5150
5151 public:
5152 // The constructor is run in the VMThread.
G1ParallelCleaningTask(BoolObjectClosure * is_alive,bool process_strings,bool process_symbols,uint num_workers,bool unloading_occurred)5153 G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
5154 AbstractGangTask("Parallel Cleaning"),
5155 _string_symbol_task(is_alive, process_strings, process_symbols),
5156 _code_cache_task(num_workers, is_alive, unloading_occurred),
5157 _klass_cleaning_task(is_alive) {
5158 }
5159
pre_work_verification()5160 void pre_work_verification() {
5161 // The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark.
5162 assert(Thread::current()->is_VM_thread()
5163 || !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5164 }
5165
post_work_verification()5166 void post_work_verification() {
5167 assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5168 }
5169
5170 // The parallel work done by all worker threads.
work(uint worker_id)5171 void work(uint worker_id) {
5172 pre_work_verification();
5173
5174 // Do first pass of code cache cleaning.
5175 _code_cache_task.work_first_pass(worker_id);
5176
5177 // Let the threads mark that the first pass is done.
5178 _code_cache_task.barrier_mark(worker_id);
5179
5180 // Clean the Strings and Symbols.
5181 _string_symbol_task.work(worker_id);
5182
5183 // Wait for all workers to finish the first code cache cleaning pass.
5184 _code_cache_task.barrier_wait(worker_id);
5185
5186 // Do the second code cache cleaning work, which realize on
5187 // the liveness information gathered during the first pass.
5188 _code_cache_task.work_second_pass(worker_id);
5189
5190 // Clean all klasses that were not unloaded.
5191 _klass_cleaning_task.work();
5192
5193 post_work_verification();
5194 }
5195 };
5196
5197
parallel_cleaning(BoolObjectClosure * is_alive,bool process_strings,bool process_symbols,bool class_unloading_occurred)5198 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5199 bool process_strings,
5200 bool process_symbols,
5201 bool class_unloading_occurred) {
5202 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5203 workers()->active_workers() : 1);
5204
5205 G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5206 n_workers, class_unloading_occurred);
5207 if (G1CollectedHeap::use_parallel_gc_threads()) {
5208 set_par_threads(n_workers);
5209 workers()->run_task(&g1_unlink_task);
5210 set_par_threads(0);
5211 } else {
5212 g1_unlink_task.work(0);
5213 }
5214 }
5215
unlink_string_and_symbol_table(BoolObjectClosure * is_alive,bool process_strings,bool process_symbols)5216 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5217 bool process_strings, bool process_symbols) {
5218 {
5219 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5220 _g1h->workers()->active_workers() : 1);
5221 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5222 if (G1CollectedHeap::use_parallel_gc_threads()) {
5223 set_par_threads(n_workers);
5224 workers()->run_task(&g1_unlink_task);
5225 set_par_threads(0);
5226 } else {
5227 g1_unlink_task.work(0);
5228 }
5229 }
5230
5231 if (G1StringDedup::is_enabled()) {
5232 G1StringDedup::unlink(is_alive);
5233 }
5234 }
5235
5236 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5237 private:
5238 DirtyCardQueueSet* _queue;
5239 public:
G1RedirtyLoggedCardsTask(DirtyCardQueueSet * queue)5240 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5241
work(uint worker_id)5242 virtual void work(uint worker_id) {
5243 G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
5244 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
5245
5246 RedirtyLoggedCardTableEntryClosure cl;
5247 if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5248 _queue->par_apply_closure_to_all_completed_buffers(&cl);
5249 } else {
5250 _queue->apply_closure_to_all_completed_buffers(&cl);
5251 }
5252
5253 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
5254 }
5255 };
5256
redirty_logged_cards()5257 void G1CollectedHeap::redirty_logged_cards() {
5258 double redirty_logged_cards_start = os::elapsedTime();
5259
5260 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5261 _g1h->workers()->active_workers() : 1);
5262
5263 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5264 dirty_card_queue_set().reset_for_par_iteration();
5265 if (use_parallel_gc_threads()) {
5266 set_par_threads(n_workers);
5267 workers()->run_task(&redirty_task);
5268 set_par_threads(0);
5269 } else {
5270 redirty_task.work(0);
5271 }
5272
5273 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5274 dcq.merge_bufferlists(&dirty_card_queue_set());
5275 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5276
5277 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5278 }
5279
5280 // Weak Reference Processing support
5281
5282 // An always "is_alive" closure that is used to preserve referents.
5283 // If the object is non-null then it's alive. Used in the preservation
5284 // of referent objects that are pointed to by reference objects
5285 // discovered by the CM ref processor.
5286 class G1AlwaysAliveClosure: public BoolObjectClosure {
5287 G1CollectedHeap* _g1;
5288 public:
G1AlwaysAliveClosure(G1CollectedHeap * g1)5289 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
do_object_b(oop p)5290 bool do_object_b(oop p) {
5291 if (p != NULL) {
5292 return true;
5293 }
5294 return false;
5295 }
5296 };
5297
do_object_b(oop p)5298 bool G1STWIsAliveClosure::do_object_b(oop p) {
5299 // An object is reachable if it is outside the collection set,
5300 // or is inside and copied.
5301 return !_g1->obj_in_cs(p) || p->is_forwarded();
5302 }
5303
5304 // Non Copying Keep Alive closure
5305 class G1KeepAliveClosure: public OopClosure {
5306 G1CollectedHeap* _g1;
5307 public:
G1KeepAliveClosure(G1CollectedHeap * g1)5308 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
do_oop(narrowOop * p)5309 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
do_oop(oop * p)5310 void do_oop(oop* p) {
5311 oop obj = *p;
5312 assert(obj != NULL, "the caller should have filtered out NULL values");
5313
5314 const InCSetState cset_state = _g1->in_cset_state(obj);
5315 if (!cset_state.is_in_cset_or_humongous()) {
5316 return;
5317 }
5318 if (cset_state.is_in_cset()) {
5319 assert( obj->is_forwarded(), "invariant" );
5320 *p = obj->forwardee();
5321 } else {
5322 assert(!obj->is_forwarded(), "invariant" );
5323 assert(cset_state.is_humongous(),
5324 err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
5325 _g1->set_humongous_is_live(obj);
5326 }
5327 }
5328 };
5329
5330 // Copying Keep Alive closure - can be called from both
5331 // serial and parallel code as long as different worker
5332 // threads utilize different G1ParScanThreadState instances
5333 // and different queues.
5334
5335 class G1CopyingKeepAliveClosure: public OopClosure {
5336 G1CollectedHeap* _g1h;
5337 OopClosure* _copy_non_heap_obj_cl;
5338 G1ParScanThreadState* _par_scan_state;
5339
5340 public:
G1CopyingKeepAliveClosure(G1CollectedHeap * g1h,OopClosure * non_heap_obj_cl,G1ParScanThreadState * pss)5341 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5342 OopClosure* non_heap_obj_cl,
5343 G1ParScanThreadState* pss):
5344 _g1h(g1h),
5345 _copy_non_heap_obj_cl(non_heap_obj_cl),
5346 _par_scan_state(pss)
5347 {}
5348
do_oop(narrowOop * p)5349 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)5350 virtual void do_oop( oop* p) { do_oop_work(p); }
5351
do_oop_work(T * p)5352 template <class T> void do_oop_work(T* p) {
5353 oop obj = oopDesc::load_decode_heap_oop(p);
5354
5355 if (_g1h->is_in_cset_or_humongous(obj)) {
5356 // If the referent object has been forwarded (either copied
5357 // to a new location or to itself in the event of an
5358 // evacuation failure) then we need to update the reference
5359 // field and, if both reference and referent are in the G1
5360 // heap, update the RSet for the referent.
5361 //
5362 // If the referent has not been forwarded then we have to keep
5363 // it alive by policy. Therefore we have copy the referent.
5364 //
5365 // If the reference field is in the G1 heap then we can push
5366 // on the PSS queue. When the queue is drained (after each
5367 // phase of reference processing) the object and it's followers
5368 // will be copied, the reference field set to point to the
5369 // new location, and the RSet updated. Otherwise we need to
5370 // use the the non-heap or metadata closures directly to copy
5371 // the referent object and update the pointer, while avoiding
5372 // updating the RSet.
5373
5374 if (_g1h->is_in_g1_reserved(p)) {
5375 _par_scan_state->push_on_queue(p);
5376 } else {
5377 assert(!Metaspace::contains((const void*)p),
5378 err_msg("Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p)));
5379 _copy_non_heap_obj_cl->do_oop(p);
5380 }
5381 }
5382 }
5383 };
5384
5385 // Serial drain queue closure. Called as the 'complete_gc'
5386 // closure for each discovered list in some of the
5387 // reference processing phases.
5388
5389 class G1STWDrainQueueClosure: public VoidClosure {
5390 protected:
5391 G1CollectedHeap* _g1h;
5392 G1ParScanThreadState* _par_scan_state;
5393
par_scan_state()5394 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
5395
5396 public:
G1STWDrainQueueClosure(G1CollectedHeap * g1h,G1ParScanThreadState * pss)5397 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5398 _g1h(g1h),
5399 _par_scan_state(pss)
5400 { }
5401
do_void()5402 void do_void() {
5403 G1ParScanThreadState* const pss = par_scan_state();
5404 pss->trim_queue();
5405 }
5406 };
5407
5408 // Parallel Reference Processing closures
5409
5410 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5411 // processing during G1 evacuation pauses.
5412
5413 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5414 private:
5415 G1CollectedHeap* _g1h;
5416 RefToScanQueueSet* _queues;
5417 FlexibleWorkGang* _workers;
5418 int _active_workers;
5419
5420 public:
G1STWRefProcTaskExecutor(G1CollectedHeap * g1h,FlexibleWorkGang * workers,RefToScanQueueSet * task_queues,int n_workers)5421 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5422 FlexibleWorkGang* workers,
5423 RefToScanQueueSet *task_queues,
5424 int n_workers) :
5425 _g1h(g1h),
5426 _queues(task_queues),
5427 _workers(workers),
5428 _active_workers(n_workers)
5429 {
5430 assert(n_workers > 0, "shouldn't call this otherwise");
5431 }
5432
5433 // Executes the given task using concurrent marking worker threads.
5434 virtual void execute(ProcessTask& task);
5435 virtual void execute(EnqueueTask& task);
5436 };
5437
5438 // Gang task for possibly parallel reference processing
5439
5440 class G1STWRefProcTaskProxy: public AbstractGangTask {
5441 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5442 ProcessTask& _proc_task;
5443 G1CollectedHeap* _g1h;
5444 RefToScanQueueSet *_task_queues;
5445 ParallelTaskTerminator* _terminator;
5446
5447 public:
G1STWRefProcTaskProxy(ProcessTask & proc_task,G1CollectedHeap * g1h,RefToScanQueueSet * task_queues,ParallelTaskTerminator * terminator)5448 G1STWRefProcTaskProxy(ProcessTask& proc_task,
5449 G1CollectedHeap* g1h,
5450 RefToScanQueueSet *task_queues,
5451 ParallelTaskTerminator* terminator) :
5452 AbstractGangTask("Process reference objects in parallel"),
5453 _proc_task(proc_task),
5454 _g1h(g1h),
5455 _task_queues(task_queues),
5456 _terminator(terminator)
5457 {}
5458
work(uint worker_id)5459 virtual void work(uint worker_id) {
5460 // The reference processing task executed by a single worker.
5461 ResourceMark rm;
5462 HandleMark hm;
5463
5464 G1STWIsAliveClosure is_alive(_g1h);
5465
5466 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5467 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5468
5469 pss.set_evac_failure_closure(&evac_failure_cl);
5470
5471 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5472
5473 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5474
5475 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5476
5477 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5478 // We also need to mark copied objects.
5479 copy_non_heap_cl = ©_mark_non_heap_cl;
5480 }
5481
5482 // Keep alive closure.
5483 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5484
5485 // Complete GC closure
5486 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5487
5488 // Call the reference processing task's work routine.
5489 _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5490
5491 // Note we cannot assert that the refs array is empty here as not all
5492 // of the processing tasks (specifically phase2 - pp2_work) execute
5493 // the complete_gc closure (which ordinarily would drain the queue) so
5494 // the queue may not be empty.
5495 }
5496 };
5497
5498 // Driver routine for parallel reference processing.
5499 // Creates an instance of the ref processing gang
5500 // task and has the worker threads execute it.
execute(ProcessTask & proc_task)5501 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5502 assert(_workers != NULL, "Need parallel worker threads.");
5503
5504 ParallelTaskTerminator terminator(_active_workers, _queues);
5505 G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5506
5507 _g1h->set_par_threads(_active_workers);
5508 _workers->run_task(&proc_task_proxy);
5509 _g1h->set_par_threads(0);
5510 }
5511
5512 // Gang task for parallel reference enqueueing.
5513
5514 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5515 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5516 EnqueueTask& _enq_task;
5517
5518 public:
G1STWRefEnqueueTaskProxy(EnqueueTask & enq_task)5519 G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5520 AbstractGangTask("Enqueue reference objects in parallel"),
5521 _enq_task(enq_task)
5522 { }
5523
work(uint worker_id)5524 virtual void work(uint worker_id) {
5525 _enq_task.work(worker_id);
5526 }
5527 };
5528
5529 // Driver routine for parallel reference enqueueing.
5530 // Creates an instance of the ref enqueueing gang
5531 // task and has the worker threads execute it.
5532
execute(EnqueueTask & enq_task)5533 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5534 assert(_workers != NULL, "Need parallel worker threads.");
5535
5536 G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5537
5538 _g1h->set_par_threads(_active_workers);
5539 _workers->run_task(&enq_task_proxy);
5540 _g1h->set_par_threads(0);
5541 }
5542
5543 // End of weak reference support closures
5544
5545 // Abstract task used to preserve (i.e. copy) any referent objects
5546 // that are in the collection set and are pointed to by reference
5547 // objects discovered by the CM ref processor.
5548
5549 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5550 protected:
5551 G1CollectedHeap* _g1h;
5552 RefToScanQueueSet *_queues;
5553 ParallelTaskTerminator _terminator;
5554 uint _n_workers;
5555
5556 public:
G1ParPreserveCMReferentsTask(G1CollectedHeap * g1h,int workers,RefToScanQueueSet * task_queues)5557 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5558 AbstractGangTask("ParPreserveCMReferents"),
5559 _g1h(g1h),
5560 _queues(task_queues),
5561 _terminator(workers, _queues),
5562 _n_workers(workers)
5563 { }
5564
work(uint worker_id)5565 void work(uint worker_id) {
5566 ResourceMark rm;
5567 HandleMark hm;
5568
5569 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5570 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5571
5572 pss.set_evac_failure_closure(&evac_failure_cl);
5573
5574 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5575
5576 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5577
5578 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5579
5580 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5581
5582 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5583 // We also need to mark copied objects.
5584 copy_non_heap_cl = ©_mark_non_heap_cl;
5585 }
5586
5587 // Is alive closure
5588 G1AlwaysAliveClosure always_alive(_g1h);
5589
5590 // Copying keep alive closure. Applied to referent objects that need
5591 // to be copied.
5592 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5593
5594 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5595
5596 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5597 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5598
5599 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5600 // So this must be true - but assert just in case someone decides to
5601 // change the worker ids.
5602 assert(0 <= worker_id && worker_id < limit, "sanity");
5603 assert(!rp->discovery_is_atomic(), "check this code");
5604
5605 // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5606 for (uint idx = worker_id; idx < limit; idx += stride) {
5607 DiscoveredList& ref_list = rp->discovered_refs()[idx];
5608
5609 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5610 while (iter.has_next()) {
5611 // Since discovery is not atomic for the CM ref processor, we
5612 // can see some null referent objects.
5613 iter.load_ptrs(DEBUG_ONLY(true));
5614 oop ref = iter.obj();
5615
5616 // This will filter nulls.
5617 if (iter.is_referent_alive()) {
5618 iter.make_referent_alive();
5619 }
5620 iter.move_to_next();
5621 }
5622 }
5623
5624 // Drain the queue - which may cause stealing
5625 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5626 drain_queue.do_void();
5627 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5628 assert(pss.queue_is_empty(), "should be");
5629 }
5630 };
5631
5632 // Weak Reference processing during an evacuation pause (part 1).
process_discovered_references(uint no_of_gc_workers)5633 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5634 double ref_proc_start = os::elapsedTime();
5635
5636 ReferenceProcessor* rp = _ref_processor_stw;
5637 assert(rp->discovery_enabled(), "should have been enabled");
5638
5639 // Any reference objects, in the collection set, that were 'discovered'
5640 // by the CM ref processor should have already been copied (either by
5641 // applying the external root copy closure to the discovered lists, or
5642 // by following an RSet entry).
5643 //
5644 // But some of the referents, that are in the collection set, that these
5645 // reference objects point to may not have been copied: the STW ref
5646 // processor would have seen that the reference object had already
5647 // been 'discovered' and would have skipped discovering the reference,
5648 // but would not have treated the reference object as a regular oop.
5649 // As a result the copy closure would not have been applied to the
5650 // referent object.
5651 //
5652 // We need to explicitly copy these referent objects - the references
5653 // will be processed at the end of remarking.
5654 //
5655 // We also need to do this copying before we process the reference
5656 // objects discovered by the STW ref processor in case one of these
5657 // referents points to another object which is also referenced by an
5658 // object discovered by the STW ref processor.
5659
5660 assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5661 no_of_gc_workers == workers()->active_workers(),
5662 "Need to reset active GC workers");
5663
5664 set_par_threads(no_of_gc_workers);
5665 G1ParPreserveCMReferentsTask keep_cm_referents(this,
5666 no_of_gc_workers,
5667 _task_queues);
5668
5669 if (G1CollectedHeap::use_parallel_gc_threads()) {
5670 workers()->run_task(&keep_cm_referents);
5671 } else {
5672 keep_cm_referents.work(0);
5673 }
5674
5675 set_par_threads(0);
5676
5677 // Closure to test whether a referent is alive.
5678 G1STWIsAliveClosure is_alive(this);
5679
5680 // Even when parallel reference processing is enabled, the processing
5681 // of JNI refs is serial and performed serially by the current thread
5682 // rather than by a worker. The following PSS will be used for processing
5683 // JNI refs.
5684
5685 // Use only a single queue for this PSS.
5686 G1ParScanThreadState pss(this, 0, NULL);
5687
5688 // We do not embed a reference processor in the copying/scanning
5689 // closures while we're actually processing the discovered
5690 // reference objects.
5691 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5692
5693 pss.set_evac_failure_closure(&evac_failure_cl);
5694
5695 assert(pss.queue_is_empty(), "pre-condition");
5696
5697 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5698
5699 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5700
5701 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5702
5703 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5704 // We also need to mark copied objects.
5705 copy_non_heap_cl = ©_mark_non_heap_cl;
5706 }
5707
5708 // Keep alive closure.
5709 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5710
5711 // Serial Complete GC closure
5712 G1STWDrainQueueClosure drain_queue(this, &pss);
5713
5714 // Setup the soft refs policy...
5715 rp->setup_policy(false);
5716
5717 ReferenceProcessorStats stats;
5718 if (!rp->processing_is_mt()) {
5719 // Serial reference processing...
5720 stats = rp->process_discovered_references(&is_alive,
5721 &keep_alive,
5722 &drain_queue,
5723 NULL,
5724 _gc_timer_stw,
5725 _gc_tracer_stw->gc_id());
5726 } else {
5727 // Parallel reference processing
5728 assert(rp->num_q() == no_of_gc_workers, "sanity");
5729 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5730
5731 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5732 stats = rp->process_discovered_references(&is_alive,
5733 &keep_alive,
5734 &drain_queue,
5735 &par_task_executor,
5736 _gc_timer_stw,
5737 _gc_tracer_stw->gc_id());
5738 }
5739
5740 _gc_tracer_stw->report_gc_reference_stats(stats);
5741
5742 // We have completed copying any necessary live referent objects.
5743 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5744
5745 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5746 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5747 }
5748
5749 // Weak Reference processing during an evacuation pause (part 2).
enqueue_discovered_references(uint no_of_gc_workers)5750 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5751 double ref_enq_start = os::elapsedTime();
5752
5753 ReferenceProcessor* rp = _ref_processor_stw;
5754 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5755
5756 // Now enqueue any remaining on the discovered lists on to
5757 // the pending list.
5758 if (!rp->processing_is_mt()) {
5759 // Serial reference processing...
5760 rp->enqueue_discovered_references();
5761 } else {
5762 // Parallel reference enqueueing
5763
5764 assert(no_of_gc_workers == workers()->active_workers(),
5765 "Need to reset active workers");
5766 assert(rp->num_q() == no_of_gc_workers, "sanity");
5767 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5768
5769 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5770 rp->enqueue_discovered_references(&par_task_executor);
5771 }
5772
5773 rp->verify_no_references_recorded();
5774 assert(!rp->discovery_enabled(), "should have been disabled");
5775
5776 // FIXME
5777 // CM's reference processing also cleans up the string and symbol tables.
5778 // Should we do that here also? We could, but it is a serial operation
5779 // and could significantly increase the pause time.
5780
5781 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5782 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5783 }
5784
evacuate_collection_set(EvacuationInfo & evacuation_info)5785 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5786 _expand_heap_after_alloc_failure = true;
5787 _evacuation_failed = false;
5788
5789 // Should G1EvacuationFailureALot be in effect for this GC?
5790 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5791
5792 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5793
5794 // Disable the hot card cache.
5795 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5796 hot_card_cache->reset_hot_cache_claimed_index();
5797 hot_card_cache->set_use_cache(false);
5798
5799 const uint n_workers = workers()->active_workers();
5800 assert(UseDynamicNumberOfGCThreads ||
5801 n_workers == workers()->total_workers(),
5802 "If not dynamic should be using all the workers");
5803 set_par_threads(n_workers);
5804
5805 init_for_evac_failure(NULL);
5806
5807 rem_set()->prepare_for_younger_refs_iterate(true);
5808
5809 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5810 double start_par_time_sec = os::elapsedTime();
5811 double end_par_time_sec;
5812
5813 {
5814 G1RootProcessor root_processor(this);
5815 G1ParTask g1_par_task(this, _task_queues, &root_processor);
5816 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5817 if (g1_policy()->during_initial_mark_pause()) {
5818 ClassLoaderDataGraph::clear_claimed_marks();
5819 }
5820
5821 if (G1CollectedHeap::use_parallel_gc_threads()) {
5822 // The individual threads will set their evac-failure closures.
5823 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5824 // These tasks use ShareHeap::_process_strong_tasks
5825 assert(UseDynamicNumberOfGCThreads ||
5826 workers()->active_workers() == workers()->total_workers(),
5827 "If not dynamic should be using all the workers");
5828 workers()->run_task(&g1_par_task);
5829 } else {
5830 g1_par_task.set_for_termination(n_workers);
5831 g1_par_task.work(0);
5832 }
5833 end_par_time_sec = os::elapsedTime();
5834
5835 // Closing the inner scope will execute the destructor
5836 // for the G1RootProcessor object. We record the current
5837 // elapsed time before closing the scope so that time
5838 // taken for the destructor is NOT included in the
5839 // reported parallel time.
5840 }
5841
5842 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5843
5844 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5845 phase_times->record_par_time(par_time_ms);
5846
5847 double code_root_fixup_time_ms =
5848 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5849 phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5850
5851 set_par_threads(0);
5852
5853 // Process any discovered reference objects - we have
5854 // to do this _before_ we retire the GC alloc regions
5855 // as we may have to copy some 'reachable' referent
5856 // objects (and their reachable sub-graphs) that were
5857 // not copied during the pause.
5858 process_discovered_references(n_workers);
5859
5860 if (G1StringDedup::is_enabled()) {
5861 double fixup_start = os::elapsedTime();
5862
5863 G1STWIsAliveClosure is_alive(this);
5864 G1KeepAliveClosure keep_alive(this);
5865 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);
5866
5867 double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
5868 phase_times->record_string_dedup_fixup_time(fixup_time_ms);
5869 }
5870
5871 _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5872 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5873
5874 // Reset and re-enable the hot card cache.
5875 // Note the counts for the cards in the regions in the
5876 // collection set are reset when the collection set is freed.
5877 hot_card_cache->reset_hot_cache();
5878 hot_card_cache->set_use_cache(true);
5879
5880 purge_code_root_memory();
5881
5882 if (g1_policy()->during_initial_mark_pause()) {
5883 // Reset the claim values set during marking the strong code roots
5884 reset_heap_region_claim_values();
5885 }
5886
5887 finalize_for_evac_failure();
5888
5889 if (evacuation_failed()) {
5890 remove_self_forwarding_pointers();
5891
5892 // Reset the G1EvacuationFailureALot counters and flags
5893 // Note: the values are reset only when an actual
5894 // evacuation failure occurs.
5895 NOT_PRODUCT(reset_evacuation_should_fail();)
5896 }
5897
5898 // Enqueue any remaining references remaining on the STW
5899 // reference processor's discovered lists. We need to do
5900 // this after the card table is cleaned (and verified) as
5901 // the act of enqueueing entries on to the pending list
5902 // will log these updates (and dirty their associated
5903 // cards). We need these updates logged to update any
5904 // RSets.
5905 enqueue_discovered_references(n_workers);
5906
5907 redirty_logged_cards();
5908 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5909 }
5910
free_region(HeapRegion * hr,FreeRegionList * free_list,bool par,bool locked)5911 void G1CollectedHeap::free_region(HeapRegion* hr,
5912 FreeRegionList* free_list,
5913 bool par,
5914 bool locked) {
5915 assert(!hr->is_free(), "the region should not be free");
5916 assert(!hr->is_empty(), "the region should not be empty");
5917 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5918 assert(free_list != NULL, "pre-condition");
5919
5920 if (G1VerifyBitmaps) {
5921 MemRegion mr(hr->bottom(), hr->end());
5922 concurrent_mark()->clearRangePrevBitmap(mr);
5923 }
5924
5925 // Clear the card counts for this region.
5926 // Note: we only need to do this if the region is not young
5927 // (since we don't refine cards in young regions).
5928 if (!hr->is_young()) {
5929 _cg1r->hot_card_cache()->reset_card_counts(hr);
5930 }
5931 hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5932 free_list->add_ordered(hr);
5933 }
5934
free_humongous_region(HeapRegion * hr,FreeRegionList * free_list,bool par)5935 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5936 FreeRegionList* free_list,
5937 bool par) {
5938 assert(hr->startsHumongous(), "this is only for starts humongous regions");
5939 assert(free_list != NULL, "pre-condition");
5940
5941 size_t hr_capacity = hr->capacity();
5942 // We need to read this before we make the region non-humongous,
5943 // otherwise the information will be gone.
5944 uint last_index = hr->last_hc_index();
5945 hr->clear_humongous();
5946 free_region(hr, free_list, par);
5947
5948 uint i = hr->hrm_index() + 1;
5949 while (i < last_index) {
5950 HeapRegion* curr_hr = region_at(i);
5951 assert(curr_hr->continuesHumongous(), "invariant");
5952 curr_hr->clear_humongous();
5953 free_region(curr_hr, free_list, par);
5954 i += 1;
5955 }
5956 }
5957
remove_from_old_sets(const HeapRegionSetCount & old_regions_removed,const HeapRegionSetCount & humongous_regions_removed)5958 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5959 const HeapRegionSetCount& humongous_regions_removed) {
5960 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5961 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5962 _old_set.bulk_remove(old_regions_removed);
5963 _humongous_set.bulk_remove(humongous_regions_removed);
5964 }
5965
5966 }
5967
prepend_to_freelist(FreeRegionList * list)5968 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5969 assert(list != NULL, "list can't be null");
5970 if (!list->is_empty()) {
5971 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5972 _hrm.insert_list_into_free_list(list);
5973 }
5974 }
5975
decrement_summary_bytes(size_t bytes)5976 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
5977 _allocator->decrease_used(bytes);
5978 }
5979
5980 class G1ParCleanupCTTask : public AbstractGangTask {
5981 G1SATBCardTableModRefBS* _ct_bs;
5982 G1CollectedHeap* _g1h;
5983 HeapRegion* volatile _su_head;
5984 public:
G1ParCleanupCTTask(G1SATBCardTableModRefBS * ct_bs,G1CollectedHeap * g1h)5985 G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
5986 G1CollectedHeap* g1h) :
5987 AbstractGangTask("G1 Par Cleanup CT Task"),
5988 _ct_bs(ct_bs), _g1h(g1h) { }
5989
work(uint worker_id)5990 void work(uint worker_id) {
5991 HeapRegion* r;
5992 while (r = _g1h->pop_dirty_cards_region()) {
5993 clear_cards(r);
5994 }
5995 }
5996
clear_cards(HeapRegion * r)5997 void clear_cards(HeapRegion* r) {
5998 // Cards of the survivors should have already been dirtied.
5999 if (!r->is_survivor()) {
6000 _ct_bs->clear(MemRegion(r->bottom(), r->end()));
6001 }
6002 }
6003 };
6004
6005 #ifndef PRODUCT
6006 class G1VerifyCardTableCleanup: public HeapRegionClosure {
6007 G1CollectedHeap* _g1h;
6008 G1SATBCardTableModRefBS* _ct_bs;
6009 public:
G1VerifyCardTableCleanup(G1CollectedHeap * g1h,G1SATBCardTableModRefBS * ct_bs)6010 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
6011 : _g1h(g1h), _ct_bs(ct_bs) { }
doHeapRegion(HeapRegion * r)6012 virtual bool doHeapRegion(HeapRegion* r) {
6013 if (r->is_survivor()) {
6014 _g1h->verify_dirty_region(r);
6015 } else {
6016 _g1h->verify_not_dirty_region(r);
6017 }
6018 return false;
6019 }
6020 };
6021
verify_not_dirty_region(HeapRegion * hr)6022 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
6023 // All of the region should be clean.
6024 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6025 MemRegion mr(hr->bottom(), hr->end());
6026 ct_bs->verify_not_dirty_region(mr);
6027 }
6028
verify_dirty_region(HeapRegion * hr)6029 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
6030 // We cannot guarantee that [bottom(),end()] is dirty. Threads
6031 // dirty allocated blocks as they allocate them. The thread that
6032 // retires each region and replaces it with a new one will do a
6033 // maximal allocation to fill in [pre_dummy_top(),end()] but will
6034 // not dirty that area (one less thing to have to do while holding
6035 // a lock). So we can only verify that [bottom(),pre_dummy_top()]
6036 // is dirty.
6037 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6038 MemRegion mr(hr->bottom(), hr->pre_dummy_top());
6039 if (hr->is_young()) {
6040 ct_bs->verify_g1_young_region(mr);
6041 } else {
6042 ct_bs->verify_dirty_region(mr);
6043 }
6044 }
6045
verify_dirty_young_list(HeapRegion * head)6046 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
6047 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6048 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
6049 verify_dirty_region(hr);
6050 }
6051 }
6052
verify_dirty_young_regions()6053 void G1CollectedHeap::verify_dirty_young_regions() {
6054 verify_dirty_young_list(_young_list->first_region());
6055 }
6056
verify_no_bits_over_tams(const char * bitmap_name,CMBitMapRO * bitmap,HeapWord * tams,HeapWord * end)6057 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
6058 HeapWord* tams, HeapWord* end) {
6059 guarantee(tams <= end,
6060 err_msg("tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)));
6061 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
6062 if (result < end) {
6063 gclog_or_tty->cr();
6064 gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
6065 bitmap_name, p2i(result));
6066 gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
6067 bitmap_name, p2i(tams), p2i(end));
6068 return false;
6069 }
6070 return true;
6071 }
6072
verify_bitmaps(const char * caller,HeapRegion * hr)6073 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
6074 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
6075 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
6076
6077 HeapWord* bottom = hr->bottom();
6078 HeapWord* ptams = hr->prev_top_at_mark_start();
6079 HeapWord* ntams = hr->next_top_at_mark_start();
6080 HeapWord* end = hr->end();
6081
6082 bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
6083
6084 bool res_n = true;
6085 // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
6086 // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
6087 // if we happen to be in that state.
6088 if (mark_in_progress() || !_cmThread->in_progress()) {
6089 res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
6090 }
6091 if (!res_p || !res_n) {
6092 gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
6093 HR_FORMAT_PARAMS(hr));
6094 gclog_or_tty->print_cr("#### Caller: %s", caller);
6095 return false;
6096 }
6097 return true;
6098 }
6099
check_bitmaps(const char * caller,HeapRegion * hr)6100 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
6101 if (!G1VerifyBitmaps) return;
6102
6103 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
6104 }
6105
6106 class G1VerifyBitmapClosure : public HeapRegionClosure {
6107 private:
6108 const char* _caller;
6109 G1CollectedHeap* _g1h;
6110 bool _failures;
6111
6112 public:
G1VerifyBitmapClosure(const char * caller,G1CollectedHeap * g1h)6113 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
6114 _caller(caller), _g1h(g1h), _failures(false) { }
6115
failures()6116 bool failures() { return _failures; }
6117
doHeapRegion(HeapRegion * hr)6118 virtual bool doHeapRegion(HeapRegion* hr) {
6119 if (hr->continuesHumongous()) return false;
6120
6121 bool result = _g1h->verify_bitmaps(_caller, hr);
6122 if (!result) {
6123 _failures = true;
6124 }
6125 return false;
6126 }
6127 };
6128
check_bitmaps(const char * caller)6129 void G1CollectedHeap::check_bitmaps(const char* caller) {
6130 if (!G1VerifyBitmaps) return;
6131
6132 G1VerifyBitmapClosure cl(caller, this);
6133 heap_region_iterate(&cl);
6134 guarantee(!cl.failures(), "bitmap verification");
6135 }
6136
6137 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
6138 private:
6139 bool _failures;
6140 public:
G1CheckCSetFastTableClosure()6141 G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
6142
doHeapRegion(HeapRegion * hr)6143 virtual bool doHeapRegion(HeapRegion* hr) {
6144 uint i = hr->hrm_index();
6145 InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
6146 if (hr->isHumongous()) {
6147 if (hr->in_collection_set()) {
6148 gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
6149 _failures = true;
6150 return true;
6151 }
6152 if (cset_state.is_in_cset()) {
6153 gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
6154 _failures = true;
6155 return true;
6156 }
6157 if (hr->continuesHumongous() && cset_state.is_humongous()) {
6158 gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
6159 _failures = true;
6160 return true;
6161 }
6162 } else {
6163 if (cset_state.is_humongous()) {
6164 gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
6165 _failures = true;
6166 return true;
6167 }
6168 if (hr->in_collection_set() != cset_state.is_in_cset()) {
6169 gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
6170 hr->in_collection_set(), cset_state.value(), i);
6171 _failures = true;
6172 return true;
6173 }
6174 if (cset_state.is_in_cset()) {
6175 if (hr->is_young() != (cset_state.is_young())) {
6176 gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
6177 hr->is_young(), cset_state.value(), i);
6178 _failures = true;
6179 return true;
6180 }
6181 if (hr->is_old() != (cset_state.is_old())) {
6182 gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
6183 hr->is_old(), cset_state.value(), i);
6184 _failures = true;
6185 return true;
6186 }
6187 }
6188 }
6189 return false;
6190 }
6191
failures() const6192 bool failures() const { return _failures; }
6193 };
6194
check_cset_fast_test()6195 bool G1CollectedHeap::check_cset_fast_test() {
6196 G1CheckCSetFastTableClosure cl;
6197 _hrm.iterate(&cl);
6198 return !cl.failures();
6199 }
6200 #endif // PRODUCT
6201
cleanUpCardTable()6202 void G1CollectedHeap::cleanUpCardTable() {
6203 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6204 double start = os::elapsedTime();
6205
6206 {
6207 // Iterate over the dirty cards region list.
6208 G1ParCleanupCTTask cleanup_task(ct_bs, this);
6209
6210 if (G1CollectedHeap::use_parallel_gc_threads()) {
6211 set_par_threads();
6212 workers()->run_task(&cleanup_task);
6213 set_par_threads(0);
6214 } else {
6215 while (_dirty_cards_region_list) {
6216 HeapRegion* r = _dirty_cards_region_list;
6217 cleanup_task.clear_cards(r);
6218 _dirty_cards_region_list = r->get_next_dirty_cards_region();
6219 if (_dirty_cards_region_list == r) {
6220 // The last region.
6221 _dirty_cards_region_list = NULL;
6222 }
6223 r->set_next_dirty_cards_region(NULL);
6224 }
6225 }
6226 #ifndef PRODUCT
6227 if (G1VerifyCTCleanup || VerifyAfterGC) {
6228 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
6229 heap_region_iterate(&cleanup_verifier);
6230 }
6231 #endif
6232 }
6233
6234 double elapsed = os::elapsedTime() - start;
6235 g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
6236 }
6237
free_collection_set(HeapRegion * cs_head,EvacuationInfo & evacuation_info)6238 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6239 size_t pre_used = 0;
6240 FreeRegionList local_free_list("Local List for CSet Freeing");
6241
6242 double young_time_ms = 0.0;
6243 double non_young_time_ms = 0.0;
6244
6245 // Since the collection set is a superset of the the young list,
6246 // all we need to do to clear the young list is clear its
6247 // head and length, and unlink any young regions in the code below
6248 _young_list->clear();
6249
6250 G1CollectorPolicy* policy = g1_policy();
6251
6252 double start_sec = os::elapsedTime();
6253 bool non_young = true;
6254
6255 HeapRegion* cur = cs_head;
6256 int age_bound = -1;
6257 size_t rs_lengths = 0;
6258
6259 while (cur != NULL) {
6260 assert(!is_on_master_free_list(cur), "sanity");
6261 if (non_young) {
6262 if (cur->is_young()) {
6263 double end_sec = os::elapsedTime();
6264 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6265 non_young_time_ms += elapsed_ms;
6266
6267 start_sec = os::elapsedTime();
6268 non_young = false;
6269 }
6270 } else {
6271 if (!cur->is_young()) {
6272 double end_sec = os::elapsedTime();
6273 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6274 young_time_ms += elapsed_ms;
6275
6276 start_sec = os::elapsedTime();
6277 non_young = true;
6278 }
6279 }
6280
6281 rs_lengths += cur->rem_set()->occupied_locked();
6282
6283 HeapRegion* next = cur->next_in_collection_set();
6284 assert(cur->in_collection_set(), "bad CS");
6285 cur->set_next_in_collection_set(NULL);
6286 cur->set_in_collection_set(false);
6287
6288 if (cur->is_young()) {
6289 int index = cur->young_index_in_cset();
6290 assert(index != -1, "invariant");
6291 assert((uint) index < policy->young_cset_region_length(), "invariant");
6292 size_t words_survived = _surviving_young_words[index];
6293 cur->record_surv_words_in_group(words_survived);
6294
6295 // At this point the we have 'popped' cur from the collection set
6296 // (linked via next_in_collection_set()) but it is still in the
6297 // young list (linked via next_young_region()). Clear the
6298 // _next_young_region field.
6299 cur->set_next_young_region(NULL);
6300 } else {
6301 int index = cur->young_index_in_cset();
6302 assert(index == -1, "invariant");
6303 }
6304
6305 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
6306 (!cur->is_young() && cur->young_index_in_cset() == -1),
6307 "invariant" );
6308
6309 if (!cur->evacuation_failed()) {
6310 MemRegion used_mr = cur->used_region();
6311
6312 // And the region is empty.
6313 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6314 pre_used += cur->used();
6315 free_region(cur, &local_free_list, false /* par */, true /* locked */);
6316 } else {
6317 cur->uninstall_surv_rate_group();
6318 if (cur->is_young()) {
6319 cur->set_young_index_in_cset(-1);
6320 }
6321 cur->set_evacuation_failed(false);
6322 // The region is now considered to be old.
6323 cur->set_old();
6324 _old_set.add(cur);
6325 evacuation_info.increment_collectionset_used_after(cur->used());
6326 }
6327 cur = next;
6328 }
6329
6330 evacuation_info.set_regions_freed(local_free_list.length());
6331 policy->record_max_rs_lengths(rs_lengths);
6332 policy->cset_regions_freed();
6333
6334 double end_sec = os::elapsedTime();
6335 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6336
6337 if (non_young) {
6338 non_young_time_ms += elapsed_ms;
6339 } else {
6340 young_time_ms += elapsed_ms;
6341 }
6342
6343 prepend_to_freelist(&local_free_list);
6344 decrement_summary_bytes(pre_used);
6345 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6346 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6347 }
6348
6349 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
6350 private:
6351 FreeRegionList* _free_region_list;
6352 HeapRegionSet* _proxy_set;
6353 HeapRegionSetCount _humongous_regions_removed;
6354 size_t _freed_bytes;
6355 public:
6356
G1FreeHumongousRegionClosure(FreeRegionList * free_region_list)6357 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
6358 _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
6359 }
6360
doHeapRegion(HeapRegion * r)6361 virtual bool doHeapRegion(HeapRegion* r) {
6362 if (!r->startsHumongous()) {
6363 return false;
6364 }
6365
6366 G1CollectedHeap* g1h = G1CollectedHeap::heap();
6367
6368 oop obj = (oop)r->bottom();
6369 CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
6370
6371 // The following checks whether the humongous object is live are sufficient.
6372 // The main additional check (in addition to having a reference from the roots
6373 // or the young gen) is whether the humongous object has a remembered set entry.
6374 //
6375 // A humongous object cannot be live if there is no remembered set for it
6376 // because:
6377 // - there can be no references from within humongous starts regions referencing
6378 // the object because we never allocate other objects into them.
6379 // (I.e. there are no intra-region references that may be missed by the
6380 // remembered set)
6381 // - as soon there is a remembered set entry to the humongous starts region
6382 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6383 // until the end of a concurrent mark.
6384 //
6385 // It is not required to check whether the object has been found dead by marking
6386 // or not, in fact it would prevent reclamation within a concurrent cycle, as
6387 // all objects allocated during that time are considered live.
6388 // SATB marking is even more conservative than the remembered set.
6389 // So if at this point in the collection there is no remembered set entry,
6390 // nobody has a reference to it.
6391 // At the start of collection we flush all refinement logs, and remembered sets
6392 // are completely up-to-date wrt to references to the humongous object.
6393 //
6394 // Other implementation considerations:
6395 // - never consider object arrays at this time because they would pose
6396 // considerable effort for cleaning up the the remembered sets. This is
6397 // required because stale remembered sets might reference locations that
6398 // are currently allocated into.
6399 uint region_idx = r->hrm_index();
6400 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
6401 !r->rem_set()->is_empty()) {
6402
6403 if (G1TraceEagerReclaimHumongousObjects) {
6404 gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
6405 region_idx,
6406 (size_t)obj->size()*HeapWordSize,
6407 p2i(r->bottom()),
6408 r->region_num(),
6409 r->rem_set()->occupied(),
6410 r->rem_set()->strong_code_roots_list_length(),
6411 next_bitmap->isMarked(r->bottom()),
6412 g1h->is_humongous_reclaim_candidate(region_idx),
6413 obj->is_typeArray()
6414 );
6415 }
6416
6417 return false;
6418 }
6419
6420 guarantee(obj->is_typeArray(),
6421 err_msg("Only eagerly reclaiming type arrays is supported, but the object "
6422 PTR_FORMAT " is not.",
6423 p2i(r->bottom())));
6424
6425 if (G1TraceEagerReclaimHumongousObjects) {
6426 gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
6427 region_idx,
6428 (size_t)obj->size()*HeapWordSize,
6429 p2i(r->bottom()),
6430 r->region_num(),
6431 r->rem_set()->occupied(),
6432 r->rem_set()->strong_code_roots_list_length(),
6433 next_bitmap->isMarked(r->bottom()),
6434 g1h->is_humongous_reclaim_candidate(region_idx),
6435 obj->is_typeArray()
6436 );
6437 }
6438 // Need to clear mark bit of the humongous object if already set.
6439 if (next_bitmap->isMarked(r->bottom())) {
6440 next_bitmap->clear(r->bottom());
6441 }
6442 _freed_bytes += r->used();
6443 r->set_containing_set(NULL);
6444 _humongous_regions_removed.increment(1u, r->capacity());
6445 g1h->free_humongous_region(r, _free_region_list, false);
6446
6447 return false;
6448 }
6449
humongous_free_count()6450 HeapRegionSetCount& humongous_free_count() {
6451 return _humongous_regions_removed;
6452 }
6453
bytes_freed() const6454 size_t bytes_freed() const {
6455 return _freed_bytes;
6456 }
6457
humongous_reclaimed() const6458 size_t humongous_reclaimed() const {
6459 return _humongous_regions_removed.length();
6460 }
6461 };
6462
eagerly_reclaim_humongous_regions()6463 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6464 assert_at_safepoint(true);
6465
6466 if (!G1EagerReclaimHumongousObjects ||
6467 (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
6468 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
6469 return;
6470 }
6471
6472 double start_time = os::elapsedTime();
6473
6474 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6475
6476 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6477 heap_region_iterate(&cl);
6478
6479 HeapRegionSetCount empty_set;
6480 remove_from_old_sets(empty_set, cl.humongous_free_count());
6481
6482 G1HRPrinter* hr_printer = _g1h->hr_printer();
6483 if (hr_printer->is_active()) {
6484 FreeRegionListIterator iter(&local_cleanup_list);
6485 while (iter.more_available()) {
6486 HeapRegion* hr = iter.get_next();
6487 hr_printer->cleanup(hr);
6488 }
6489 }
6490
6491 prepend_to_freelist(&local_cleanup_list);
6492 decrement_summary_bytes(cl.bytes_freed());
6493
6494 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
6495 cl.humongous_reclaimed());
6496 }
6497
6498 // This routine is similar to the above but does not record
6499 // any policy statistics or update free lists; we are abandoning
6500 // the current incremental collection set in preparation of a
6501 // full collection. After the full GC we will start to build up
6502 // the incremental collection set again.
6503 // This is only called when we're doing a full collection
6504 // and is immediately followed by the tearing down of the young list.
6505
abandon_collection_set(HeapRegion * cs_head)6506 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6507 HeapRegion* cur = cs_head;
6508
6509 while (cur != NULL) {
6510 HeapRegion* next = cur->next_in_collection_set();
6511 assert(cur->in_collection_set(), "bad CS");
6512 cur->set_next_in_collection_set(NULL);
6513 cur->set_in_collection_set(false);
6514 cur->set_young_index_in_cset(-1);
6515 cur = next;
6516 }
6517 }
6518
set_free_regions_coming()6519 void G1CollectedHeap::set_free_regions_coming() {
6520 if (G1ConcRegionFreeingVerbose) {
6521 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6522 "setting free regions coming");
6523 }
6524
6525 assert(!free_regions_coming(), "pre-condition");
6526 _free_regions_coming = true;
6527 }
6528
reset_free_regions_coming()6529 void G1CollectedHeap::reset_free_regions_coming() {
6530 assert(free_regions_coming(), "pre-condition");
6531
6532 {
6533 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6534 _free_regions_coming = false;
6535 SecondaryFreeList_lock->notify_all();
6536 }
6537
6538 if (G1ConcRegionFreeingVerbose) {
6539 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6540 "reset free regions coming");
6541 }
6542 }
6543
wait_while_free_regions_coming()6544 void G1CollectedHeap::wait_while_free_regions_coming() {
6545 // Most of the time we won't have to wait, so let's do a quick test
6546 // first before we take the lock.
6547 if (!free_regions_coming()) {
6548 return;
6549 }
6550
6551 if (G1ConcRegionFreeingVerbose) {
6552 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6553 "waiting for free regions");
6554 }
6555
6556 {
6557 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6558 while (free_regions_coming()) {
6559 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
6560 }
6561 }
6562
6563 if (G1ConcRegionFreeingVerbose) {
6564 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6565 "done waiting for free regions");
6566 }
6567 }
6568
set_region_short_lived_locked(HeapRegion * hr)6569 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6570 assert(heap_lock_held_for_gc(),
6571 "the heap lock should already be held by or for this thread");
6572 _young_list->push_region(hr);
6573 }
6574
6575 class NoYoungRegionsClosure: public HeapRegionClosure {
6576 private:
6577 bool _success;
6578 public:
NoYoungRegionsClosure()6579 NoYoungRegionsClosure() : _success(true) { }
doHeapRegion(HeapRegion * r)6580 bool doHeapRegion(HeapRegion* r) {
6581 if (r->is_young()) {
6582 gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
6583 p2i(r->bottom()), p2i(r->end()));
6584 _success = false;
6585 }
6586 return false;
6587 }
success()6588 bool success() { return _success; }
6589 };
6590
check_young_list_empty(bool check_heap,bool check_sample)6591 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6592 bool ret = _young_list->check_list_empty(check_sample);
6593
6594 if (check_heap) {
6595 NoYoungRegionsClosure closure;
6596 heap_region_iterate(&closure);
6597 ret = ret && closure.success();
6598 }
6599
6600 return ret;
6601 }
6602
6603 class TearDownRegionSetsClosure : public HeapRegionClosure {
6604 private:
6605 HeapRegionSet *_old_set;
6606
6607 public:
TearDownRegionSetsClosure(HeapRegionSet * old_set)6608 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
6609
doHeapRegion(HeapRegion * r)6610 bool doHeapRegion(HeapRegion* r) {
6611 if (r->is_old()) {
6612 _old_set->remove(r);
6613 } else {
6614 // We ignore free regions, we'll empty the free list afterwards.
6615 // We ignore young regions, we'll empty the young list afterwards.
6616 // We ignore humongous regions, we're not tearing down the
6617 // humongous regions set.
6618 assert(r->is_free() || r->is_young() || r->isHumongous(),
6619 "it cannot be another type");
6620 }
6621 return false;
6622 }
6623
~TearDownRegionSetsClosure()6624 ~TearDownRegionSetsClosure() {
6625 assert(_old_set->is_empty(), "post-condition");
6626 }
6627 };
6628
tear_down_region_sets(bool free_list_only)6629 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6630 assert_at_safepoint(true /* should_be_vm_thread */);
6631
6632 if (!free_list_only) {
6633 TearDownRegionSetsClosure cl(&_old_set);
6634 heap_region_iterate(&cl);
6635
6636 // Note that emptying the _young_list is postponed and instead done as
6637 // the first step when rebuilding the regions sets again. The reason for
6638 // this is that during a full GC string deduplication needs to know if
6639 // a collected region was young or old when the full GC was initiated.
6640 }
6641 _hrm.remove_all_free_regions();
6642 }
6643
6644 class RebuildRegionSetsClosure : public HeapRegionClosure {
6645 private:
6646 bool _free_list_only;
6647 HeapRegionSet* _old_set;
6648 HeapRegionManager* _hrm;
6649 size_t _total_used;
6650
6651 public:
RebuildRegionSetsClosure(bool free_list_only,HeapRegionSet * old_set,HeapRegionManager * hrm)6652 RebuildRegionSetsClosure(bool free_list_only,
6653 HeapRegionSet* old_set, HeapRegionManager* hrm) :
6654 _free_list_only(free_list_only),
6655 _old_set(old_set), _hrm(hrm), _total_used(0) {
6656 assert(_hrm->num_free_regions() == 0, "pre-condition");
6657 if (!free_list_only) {
6658 assert(_old_set->is_empty(), "pre-condition");
6659 }
6660 }
6661
doHeapRegion(HeapRegion * r)6662 bool doHeapRegion(HeapRegion* r) {
6663 if (r->continuesHumongous()) {
6664 return false;
6665 }
6666
6667 if (r->is_empty()) {
6668 // Add free regions to the free list
6669 r->set_free();
6670 r->set_allocation_context(AllocationContext::system());
6671 _hrm->insert_into_free_list(r);
6672 } else if (!_free_list_only) {
6673 assert(!r->is_young(), "we should not come across young regions");
6674
6675 if (r->isHumongous()) {
6676 // We ignore humongous regions, we left the humongous set unchanged
6677 } else {
6678 // Objects that were compacted would have ended up on regions
6679 // that were previously old or free.
6680 assert(r->is_free() || r->is_old(), "invariant");
6681 // We now consider them old, so register as such.
6682 r->set_old();
6683 _old_set->add(r);
6684 }
6685 _total_used += r->used();
6686 }
6687
6688 return false;
6689 }
6690
total_used()6691 size_t total_used() {
6692 return _total_used;
6693 }
6694 };
6695
rebuild_region_sets(bool free_list_only)6696 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6697 assert_at_safepoint(true /* should_be_vm_thread */);
6698
6699 if (!free_list_only) {
6700 _young_list->empty_list();
6701 }
6702
6703 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6704 heap_region_iterate(&cl);
6705
6706 if (!free_list_only) {
6707 _allocator->set_used(cl.total_used());
6708 }
6709 assert(_allocator->used_unlocked() == recalculate_used(),
6710 err_msg("inconsistent _allocator->used_unlocked(), "
6711 "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
6712 _allocator->used_unlocked(), recalculate_used()));
6713 }
6714
set_refine_cte_cl_concurrency(bool concurrent)6715 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6716 _refine_cte_cl->set_concurrent(concurrent);
6717 }
6718
is_in_closed_subset(const void * p) const6719 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6720 HeapRegion* hr = heap_region_containing(p);
6721 return hr->is_in(p);
6722 }
6723
6724 // Methods for the mutator alloc region
6725
new_mutator_alloc_region(size_t word_size,bool force)6726 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6727 bool force) {
6728 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6729 assert(!force || g1_policy()->can_expand_young_list(),
6730 "if force is true we should be able to expand the young list");
6731 bool young_list_full = g1_policy()->is_young_list_full();
6732 if (force || !young_list_full) {
6733 HeapRegion* new_alloc_region = new_region(word_size,
6734 false /* is_old */,
6735 false /* do_expand */);
6736 if (new_alloc_region != NULL) {
6737 set_region_short_lived_locked(new_alloc_region);
6738 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6739 check_bitmaps("Mutator Region Allocation", new_alloc_region);
6740 return new_alloc_region;
6741 }
6742 }
6743 return NULL;
6744 }
6745
retire_mutator_alloc_region(HeapRegion * alloc_region,size_t allocated_bytes)6746 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6747 size_t allocated_bytes) {
6748 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6749 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6750
6751 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6752 _allocator->increase_used(allocated_bytes);
6753 _hr_printer.retire(alloc_region);
6754 // We update the eden sizes here, when the region is retired,
6755 // instead of when it's allocated, since this is the point that its
6756 // used space has been recored in _summary_bytes_used.
6757 g1mm()->update_eden_size();
6758 }
6759
set_par_threads()6760 void G1CollectedHeap::set_par_threads() {
6761 // Don't change the number of workers. Use the value previously set
6762 // in the workgroup.
6763 assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6764 uint n_workers = workers()->active_workers();
6765 assert(UseDynamicNumberOfGCThreads ||
6766 n_workers == workers()->total_workers(),
6767 "Otherwise should be using the total number of workers");
6768 if (n_workers == 0) {
6769 assert(false, "Should have been set in prior evacuation pause.");
6770 n_workers = ParallelGCThreads;
6771 workers()->set_active_workers(n_workers);
6772 }
6773 set_par_threads(n_workers);
6774 }
6775
6776 // Methods for the GC alloc regions
6777
new_gc_alloc_region(size_t word_size,uint count,InCSetState dest)6778 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6779 uint count,
6780 InCSetState dest) {
6781 assert(FreeList_lock->owned_by_self(), "pre-condition");
6782
6783 if (count < g1_policy()->max_regions(dest)) {
6784 const bool is_survivor = (dest.is_young());
6785 HeapRegion* new_alloc_region = new_region(word_size,
6786 !is_survivor,
6787 true /* do_expand */);
6788 if (new_alloc_region != NULL) {
6789 // We really only need to do this for old regions given that we
6790 // should never scan survivors. But it doesn't hurt to do it
6791 // for survivors too.
6792 new_alloc_region->record_timestamp();
6793 if (is_survivor) {
6794 new_alloc_region->set_survivor();
6795 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6796 check_bitmaps("Survivor Region Allocation", new_alloc_region);
6797 } else {
6798 new_alloc_region->set_old();
6799 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6800 check_bitmaps("Old Region Allocation", new_alloc_region);
6801 }
6802 bool during_im = g1_policy()->during_initial_mark_pause();
6803 new_alloc_region->note_start_of_copying(during_im);
6804 return new_alloc_region;
6805 }
6806 }
6807 return NULL;
6808 }
6809
retire_gc_alloc_region(HeapRegion * alloc_region,size_t allocated_bytes,InCSetState dest)6810 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6811 size_t allocated_bytes,
6812 InCSetState dest) {
6813 bool during_im = g1_policy()->during_initial_mark_pause();
6814 alloc_region->note_end_of_copying(during_im);
6815 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6816 if (dest.is_young()) {
6817 young_list()->add_survivor_region(alloc_region);
6818 } else {
6819 _old_set.add(alloc_region);
6820 }
6821 _hr_printer.retire(alloc_region);
6822 }
6823
6824 // Heap region set verification
6825
6826 class VerifyRegionListsClosure : public HeapRegionClosure {
6827 private:
6828 HeapRegionSet* _old_set;
6829 HeapRegionSet* _humongous_set;
6830 HeapRegionManager* _hrm;
6831
6832 public:
6833 HeapRegionSetCount _old_count;
6834 HeapRegionSetCount _humongous_count;
6835 HeapRegionSetCount _free_count;
6836
VerifyRegionListsClosure(HeapRegionSet * old_set,HeapRegionSet * humongous_set,HeapRegionManager * hrm)6837 VerifyRegionListsClosure(HeapRegionSet* old_set,
6838 HeapRegionSet* humongous_set,
6839 HeapRegionManager* hrm) :
6840 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6841 _old_count(), _humongous_count(), _free_count(){ }
6842
doHeapRegion(HeapRegion * hr)6843 bool doHeapRegion(HeapRegion* hr) {
6844 if (hr->continuesHumongous()) {
6845 return false;
6846 }
6847
6848 if (hr->is_young()) {
6849 // TODO
6850 } else if (hr->startsHumongous()) {
6851 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6852 _humongous_count.increment(1u, hr->capacity());
6853 } else if (hr->is_empty()) {
6854 assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6855 _free_count.increment(1u, hr->capacity());
6856 } else if (hr->is_old()) {
6857 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6858 _old_count.increment(1u, hr->capacity());
6859 } else {
6860 ShouldNotReachHere();
6861 }
6862 return false;
6863 }
6864
verify_counts(HeapRegionSet * old_set,HeapRegionSet * humongous_set,HeapRegionManager * free_list)6865 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6866 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6867 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6868 old_set->total_capacity_bytes(), _old_count.capacity()));
6869
6870 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6871 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6872 humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6873
6874 guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6875 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6876 free_list->total_capacity_bytes(), _free_count.capacity()));
6877 }
6878 };
6879
verify_region_sets()6880 void G1CollectedHeap::verify_region_sets() {
6881 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6882
6883 // First, check the explicit lists.
6884 _hrm.verify();
6885 {
6886 // Given that a concurrent operation might be adding regions to
6887 // the secondary free list we have to take the lock before
6888 // verifying it.
6889 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6890 _secondary_free_list.verify_list();
6891 }
6892
6893 // If a concurrent region freeing operation is in progress it will
6894 // be difficult to correctly attributed any free regions we come
6895 // across to the correct free list given that they might belong to
6896 // one of several (free_list, secondary_free_list, any local lists,
6897 // etc.). So, if that's the case we will skip the rest of the
6898 // verification operation. Alternatively, waiting for the concurrent
6899 // operation to complete will have a non-trivial effect on the GC's
6900 // operation (no concurrent operation will last longer than the
6901 // interval between two calls to verification) and it might hide
6902 // any issues that we would like to catch during testing.
6903 if (free_regions_coming()) {
6904 return;
6905 }
6906
6907 // Make sure we append the secondary_free_list on the free_list so
6908 // that all free regions we will come across can be safely
6909 // attributed to the free_list.
6910 append_secondary_free_list_if_not_empty_with_lock();
6911
6912 // Finally, make sure that the region accounting in the lists is
6913 // consistent with what we see in the heap.
6914
6915 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6916 heap_region_iterate(&cl);
6917 cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6918 }
6919
6920 // Optimized nmethod scanning
6921
6922 class RegisterNMethodOopClosure: public OopClosure {
6923 G1CollectedHeap* _g1h;
6924 nmethod* _nm;
6925
do_oop_work(T * p)6926 template <class T> void do_oop_work(T* p) {
6927 T heap_oop = oopDesc::load_heap_oop(p);
6928 if (!oopDesc::is_null(heap_oop)) {
6929 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6930 HeapRegion* hr = _g1h->heap_region_containing(obj);
6931 assert(!hr->continuesHumongous(),
6932 err_msg("trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6933 " starting at " HR_FORMAT,
6934 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6935
6936 // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6937 hr->add_strong_code_root_locked(_nm);
6938 }
6939 }
6940
6941 public:
RegisterNMethodOopClosure(G1CollectedHeap * g1h,nmethod * nm)6942 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6943 _g1h(g1h), _nm(nm) {}
6944
do_oop(oop * p)6945 void do_oop(oop* p) { do_oop_work(p); }
do_oop(narrowOop * p)6946 void do_oop(narrowOop* p) { do_oop_work(p); }
6947 };
6948
6949 class UnregisterNMethodOopClosure: public OopClosure {
6950 G1CollectedHeap* _g1h;
6951 nmethod* _nm;
6952
do_oop_work(T * p)6953 template <class T> void do_oop_work(T* p) {
6954 T heap_oop = oopDesc::load_heap_oop(p);
6955 if (!oopDesc::is_null(heap_oop)) {
6956 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6957 HeapRegion* hr = _g1h->heap_region_containing(obj);
6958 assert(!hr->continuesHumongous(),
6959 err_msg("trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6960 " starting at " HR_FORMAT,
6961 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6962
6963 hr->remove_strong_code_root(_nm);
6964 }
6965 }
6966
6967 public:
UnregisterNMethodOopClosure(G1CollectedHeap * g1h,nmethod * nm)6968 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6969 _g1h(g1h), _nm(nm) {}
6970
do_oop(oop * p)6971 void do_oop(oop* p) { do_oop_work(p); }
do_oop(narrowOop * p)6972 void do_oop(narrowOop* p) { do_oop_work(p); }
6973 };
6974
register_nmethod(nmethod * nm)6975 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6976 CollectedHeap::register_nmethod(nm);
6977
6978 guarantee(nm != NULL, "sanity");
6979 RegisterNMethodOopClosure reg_cl(this, nm);
6980 nm->oops_do(®_cl);
6981 }
6982
unregister_nmethod(nmethod * nm)6983 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
6984 CollectedHeap::unregister_nmethod(nm);
6985
6986 guarantee(nm != NULL, "sanity");
6987 UnregisterNMethodOopClosure reg_cl(this, nm);
6988 nm->oops_do(®_cl, true);
6989 }
6990
purge_code_root_memory()6991 void G1CollectedHeap::purge_code_root_memory() {
6992 double purge_start = os::elapsedTime();
6993 G1CodeRootSet::purge();
6994 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
6995 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
6996 }
6997
6998 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
6999 G1CollectedHeap* _g1h;
7000
7001 public:
RebuildStrongCodeRootClosure(G1CollectedHeap * g1h)7002 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
7003 _g1h(g1h) {}
7004
do_code_blob(CodeBlob * cb)7005 void do_code_blob(CodeBlob* cb) {
7006 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
7007 if (nm == NULL) {
7008 return;
7009 }
7010
7011 if (ScavengeRootsInCode) {
7012 _g1h->register_nmethod(nm);
7013 }
7014 }
7015 };
7016
rebuild_strong_code_roots()7017 void G1CollectedHeap::rebuild_strong_code_roots() {
7018 RebuildStrongCodeRootClosure blob_cl(this);
7019 CodeCache::blobs_do(&blob_cl);
7020 }
7021