1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp"
32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
34 #include "gc_implementation/shared/liveRange.hpp"
35 #include "memory/genOopClosures.inline.hpp"
36 #include "memory/iterator.hpp"
37 #include "memory/space.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40 #include "gc_implementation/g1/heapRegionTracer.hpp"
41
42 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
43
44 int HeapRegion::LogOfHRGrainBytes = 0;
45 int HeapRegion::LogOfHRGrainWords = 0;
46 size_t HeapRegion::GrainBytes = 0;
47 size_t HeapRegion::GrainWords = 0;
48 size_t HeapRegion::CardsPerRegion = 0;
49
HeapRegionDCTOC(G1CollectedHeap * g1,HeapRegion * hr,G1ParPushHeapRSClosure * cl,CardTableModRefBS::PrecisionStyle precision)50 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
51 HeapRegion* hr,
52 G1ParPushHeapRSClosure* cl,
53 CardTableModRefBS::PrecisionStyle precision) :
54 DirtyCardToOopClosure(hr, cl, precision, NULL),
55 _hr(hr), _rs_scan(cl), _g1(g1) { }
56
FilterOutOfRegionClosure(HeapRegion * r,OopClosure * oc)57 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
58 OopClosure* oc) :
59 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
60
walk_mem_region(MemRegion mr,HeapWord * bottom,HeapWord * top)61 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
62 HeapWord* bottom,
63 HeapWord* top) {
64 G1CollectedHeap* g1h = _g1;
65 size_t oop_size;
66 HeapWord* cur = bottom;
67
68 // Start filtering what we add to the remembered set. If the object is
69 // not considered dead, either because it is marked (in the mark bitmap)
70 // or it was allocated after marking finished, then we add it. Otherwise
71 // we can safely ignore the object.
72 if (!g1h->is_obj_dead(oop(cur), _hr)) {
73 oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
74 } else {
75 oop_size = _hr->block_size(cur);
76 }
77
78 cur += oop_size;
79
80 if (cur < top) {
81 oop cur_oop = oop(cur);
82 oop_size = _hr->block_size(cur);
83 HeapWord* next_obj = cur + oop_size;
84 while (next_obj < top) {
85 // Keep filtering the remembered set.
86 if (!g1h->is_obj_dead(cur_oop, _hr)) {
87 // Bottom lies entirely below top, so we can call the
88 // non-memRegion version of oop_iterate below.
89 cur_oop->oop_iterate(_rs_scan);
90 }
91 cur = next_obj;
92 cur_oop = oop(cur);
93 oop_size = _hr->block_size(cur);
94 next_obj = cur + oop_size;
95 }
96
97 // Last object. Need to do dead-obj filtering here too.
98 if (!g1h->is_obj_dead(oop(cur), _hr)) {
99 oop(cur)->oop_iterate(_rs_scan, mr);
100 }
101 }
102 }
103
max_region_size()104 size_t HeapRegion::max_region_size() {
105 return HeapRegionBounds::max_size();
106 }
107
setup_heap_region_size(size_t initial_heap_size,size_t max_heap_size)108 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
109 uintx region_size = G1HeapRegionSize;
110 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
111 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
112 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
113 (uintx) HeapRegionBounds::min_size());
114 }
115
116 int region_size_log = log2_long((jlong) region_size);
117 // Recalculate the region size to make sure it's a power of
118 // 2. This means that region_size is the largest power of 2 that's
119 // <= what we've calculated so far.
120 region_size = ((uintx)1 << region_size_log);
121
122 // Now make sure that we don't go over or under our limits.
123 if (region_size < HeapRegionBounds::min_size()) {
124 region_size = HeapRegionBounds::min_size();
125 } else if (region_size > HeapRegionBounds::max_size()) {
126 region_size = HeapRegionBounds::max_size();
127 }
128
129 // And recalculate the log.
130 region_size_log = log2_long((jlong) region_size);
131
132 // Now, set up the globals.
133 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
134 LogOfHRGrainBytes = region_size_log;
135
136 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
137 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
138
139 guarantee(GrainBytes == 0, "we should only set it once");
140 // The cast to int is safe, given that we've bounded region_size by
141 // MIN_REGION_SIZE and MAX_REGION_SIZE.
142 GrainBytes = (size_t)region_size;
143
144 guarantee(GrainWords == 0, "we should only set it once");
145 GrainWords = GrainBytes >> LogHeapWordSize;
146 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
147
148 guarantee(CardsPerRegion == 0, "we should only set it once");
149 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
150 }
151
reset_after_compaction()152 void HeapRegion::reset_after_compaction() {
153 G1OffsetTableContigSpace::reset_after_compaction();
154 // After a compaction the mark bitmap is invalid, so we must
155 // treat all objects as being inside the unmarked area.
156 zero_marked_bytes();
157 init_top_at_mark_start();
158 }
159
hr_clear(bool par,bool clear_space,bool locked)160 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
161 assert(_humongous_start_region == NULL,
162 "we should have already filtered out humongous regions");
163 assert(_end == _orig_end,
164 "we should have already filtered out humongous regions");
165
166 _in_collection_set = false;
167
168 set_allocation_context(AllocationContext::system());
169 set_young_index_in_cset(-1);
170 uninstall_surv_rate_group();
171 set_free();
172 reset_pre_dummy_top();
173
174 if (!par) {
175 // If this is parallel, this will be done later.
176 HeapRegionRemSet* hrrs = rem_set();
177 if (locked) {
178 hrrs->clear_locked();
179 } else {
180 hrrs->clear();
181 }
182 _claimed = InitialClaimValue;
183 }
184 zero_marked_bytes();
185
186 _offsets.resize(HeapRegion::GrainWords);
187 init_top_at_mark_start();
188 if (clear_space) clear(SpaceDecorator::Mangle);
189 }
190
par_clear()191 void HeapRegion::par_clear() {
192 assert(used() == 0, "the region should have been already cleared");
193 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
194 HeapRegionRemSet* hrrs = rem_set();
195 hrrs->clear();
196 CardTableModRefBS* ct_bs =
197 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
198 ct_bs->clear(MemRegion(bottom(), end()));
199 }
200
calc_gc_efficiency()201 void HeapRegion::calc_gc_efficiency() {
202 // GC efficiency is the ratio of how much space would be
203 // reclaimed over how long we predict it would take to reclaim it.
204 G1CollectedHeap* g1h = G1CollectedHeap::heap();
205 G1CollectorPolicy* g1p = g1h->g1_policy();
206
207 // Retrieve a prediction of the elapsed time for this region for
208 // a mixed gc because the region will only be evacuated during a
209 // mixed gc.
210 double region_elapsed_time_ms =
211 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
212 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
213 }
214
set_free()215 void HeapRegion::set_free() {
216 report_region_type_change(G1HeapRegionTraceType::Free);
217 _type.set_free();
218 }
219
set_eden()220 void HeapRegion::set_eden() {
221 report_region_type_change(G1HeapRegionTraceType::Eden);
222 _type.set_eden();
223 }
224
set_eden_pre_gc()225 void HeapRegion::set_eden_pre_gc() {
226 report_region_type_change(G1HeapRegionTraceType::Eden);
227 _type.set_eden_pre_gc();
228 }
229
set_survivor()230 void HeapRegion::set_survivor() {
231 report_region_type_change(G1HeapRegionTraceType::Survivor);
232 _type.set_survivor();
233 }
234
set_old()235 void HeapRegion::set_old() {
236 report_region_type_change(G1HeapRegionTraceType::Old);
237 _type.set_old();
238 }
239
set_startsHumongous(HeapWord * new_top,HeapWord * new_end)240 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
241 assert(!isHumongous(), "sanity / pre-condition");
242 assert(end() == _orig_end,
243 "Should be normal before the humongous object allocation");
244 assert(top() == bottom(), "should be empty");
245 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
246
247 report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
248 _type.set_starts_humongous();
249 _humongous_start_region = this;
250
251 set_end(new_end);
252 _offsets.set_for_starts_humongous(new_top);
253 }
254
set_continuesHumongous(HeapRegion * first_hr)255 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
256 assert(!isHumongous(), "sanity / pre-condition");
257 assert(end() == _orig_end,
258 "Should be normal before the humongous object allocation");
259 assert(top() == bottom(), "should be empty");
260 assert(first_hr->startsHumongous(), "pre-condition");
261
262 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
263 _type.set_continues_humongous();
264 _humongous_start_region = first_hr;
265 }
266
clear_humongous()267 void HeapRegion::clear_humongous() {
268 assert(isHumongous(), "pre-condition");
269
270 if (startsHumongous()) {
271 assert(top() <= end(), "pre-condition");
272 set_end(_orig_end);
273 if (top() > end()) {
274 // at least one "continues humongous" region after it
275 set_top(end());
276 }
277 } else {
278 // continues humongous
279 assert(end() == _orig_end, "sanity");
280 }
281
282 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
283 _humongous_start_region = NULL;
284 }
285
claimHeapRegion(jint claimValue)286 bool HeapRegion::claimHeapRegion(jint claimValue) {
287 jint current = _claimed;
288 if (current != claimValue) {
289 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
290 if (res == current) {
291 return true;
292 }
293 }
294 return false;
295 }
296
HeapRegion(uint hrm_index,G1BlockOffsetSharedArray * sharedOffsetArray,MemRegion mr)297 HeapRegion::HeapRegion(uint hrm_index,
298 G1BlockOffsetSharedArray* sharedOffsetArray,
299 MemRegion mr) :
300 G1OffsetTableContigSpace(sharedOffsetArray, mr),
301 _hrm_index(hrm_index),
302 _allocation_context(AllocationContext::system()),
303 _humongous_start_region(NULL),
304 _in_collection_set(false),
305 _next_in_special_set(NULL), _orig_end(NULL),
306 _claimed(InitialClaimValue), _evacuation_failed(false),
307 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
308 _next_young_region(NULL),
309 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
310 #ifdef ASSERT
311 _containing_set(NULL),
312 #endif // ASSERT
313 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
314 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
315 _predicted_bytes_to_copy(0)
316 {
317 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
318 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
319
320 initialize(mr);
321 }
322
initialize(MemRegion mr,bool clear_space,bool mangle_space)323 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
324 assert(_rem_set->is_empty(), "Remembered set must be empty");
325
326 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
327
328 _orig_end = mr.end();
329 hr_clear(false /*par*/, false /*clear_space*/);
330 set_top(bottom());
331 record_timestamp();
332 }
333
report_region_type_change(G1HeapRegionTraceType::Type to)334 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
335 HeapRegionTracer::send_region_type_change(_hrm_index,
336 get_trace_type(),
337 to,
338 (uintptr_t)bottom(),
339 used());
340 }
341
next_compaction_space() const342 CompactibleSpace* HeapRegion::next_compaction_space() const {
343 return G1CollectedHeap::heap()->next_compaction_region(this);
344 }
345
note_self_forwarding_removal_start(bool during_initial_mark,bool during_conc_mark)346 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
347 bool during_conc_mark) {
348 // We always recreate the prev marking info and we'll explicitly
349 // mark all objects we find to be self-forwarded on the prev
350 // bitmap. So all objects need to be below PTAMS.
351 _prev_marked_bytes = 0;
352
353 if (during_initial_mark) {
354 // During initial-mark, we'll also explicitly mark all objects
355 // we find to be self-forwarded on the next bitmap. So all
356 // objects need to be below NTAMS.
357 _next_top_at_mark_start = top();
358 _next_marked_bytes = 0;
359 } else if (during_conc_mark) {
360 // During concurrent mark, all objects in the CSet (including
361 // the ones we find to be self-forwarded) are implicitly live.
362 // So all objects need to be above NTAMS.
363 _next_top_at_mark_start = bottom();
364 _next_marked_bytes = 0;
365 }
366 }
367
note_self_forwarding_removal_end(bool during_initial_mark,bool during_conc_mark,size_t marked_bytes)368 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
369 bool during_conc_mark,
370 size_t marked_bytes) {
371 assert(0 <= marked_bytes && marked_bytes <= used(),
372 err_msg("marked: " SIZE_FORMAT " used: " SIZE_FORMAT,
373 marked_bytes, used()));
374 _prev_top_at_mark_start = top();
375 _prev_marked_bytes = marked_bytes;
376 }
377
378 HeapWord*
object_iterate_mem_careful(MemRegion mr,ObjectClosure * cl)379 HeapRegion::object_iterate_mem_careful(MemRegion mr,
380 ObjectClosure* cl) {
381 G1CollectedHeap* g1h = G1CollectedHeap::heap();
382 // We used to use "block_start_careful" here. But we're actually happy
383 // to update the BOT while we do this...
384 HeapWord* cur = block_start(mr.start());
385 mr = mr.intersection(used_region());
386 if (mr.is_empty()) return NULL;
387 // Otherwise, find the obj that extends onto mr.start().
388
389 assert(cur <= mr.start()
390 && (oop(cur)->klass_or_null() == NULL ||
391 cur + oop(cur)->size() > mr.start()),
392 "postcondition of block_start");
393 oop obj;
394 while (cur < mr.end()) {
395 obj = oop(cur);
396 if (obj->klass_or_null() == NULL) {
397 // Ran into an unparseable point.
398 return cur;
399 } else if (!g1h->is_obj_dead(obj)) {
400 cl->do_object(obj);
401 }
402 if (cl->abort()) return cur;
403 // The check above must occur before the operation below, since an
404 // abort might invalidate the "size" operation.
405 cur += block_size(cur);
406 }
407 return NULL;
408 }
409
410 // Humongous objects are allocated directly in the old-gen. Need
411 // special handling for concurrent processing encountering an
412 // in-progress allocation.
do_oops_on_card_in_humongous(MemRegion mr,FilterOutOfRegionClosure * cl,HeapRegion * hr,G1CollectedHeap * g1h)413 static bool do_oops_on_card_in_humongous(MemRegion mr,
414 FilterOutOfRegionClosure* cl,
415 HeapRegion* hr,
416 G1CollectedHeap* g1h) {
417 assert(hr->isHumongous(), "precondition");
418 HeapRegion* sr = hr->humongous_start_region();
419 oop obj = oop(sr->bottom());
420
421 // If concurrent and klass_or_null is NULL, then space has been
422 // allocated but the object has not yet been published by setting
423 // the klass. That can only happen if the card is stale. However,
424 // we've already set the card clean, so we must return failure,
425 // since the allocating thread could have performed a write to the
426 // card that might be missed otherwise.
427 if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
428 return false;
429 }
430
431 // Only filler objects follow a humongous object in the containing
432 // regions, and we can ignore those. So only process the one
433 // humongous object.
434 if (!g1h->is_obj_dead(obj, sr)) {
435 if (obj->is_objArray() || (sr->bottom() < mr.start())) {
436 // objArrays are always marked precisely, so limit processing
437 // with mr. Non-objArrays might be precisely marked, and since
438 // it's humongous it's worthwhile avoiding full processing.
439 // However, the card could be stale and only cover filler
440 // objects. That should be rare, so not worth checking for;
441 // instead let it fall out from the bounded iteration.
442 obj->oop_iterate(cl, mr);
443 } else {
444 // If obj is not an objArray and mr contains the start of the
445 // obj, then this could be an imprecise mark, and we need to
446 // process the entire object.
447 obj->oop_iterate(cl);
448 }
449 }
450 return true;
451 }
452
oops_on_card_seq_iterate_careful(MemRegion mr,FilterOutOfRegionClosure * cl,jbyte * card_ptr)453 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
454 FilterOutOfRegionClosure* cl,
455 jbyte* card_ptr) {
456 assert(card_ptr != NULL, "pre-condition");
457 G1CollectedHeap* g1h = G1CollectedHeap::heap();
458
459 // If we're within a stop-world GC, then we might look at a card in a
460 // GC alloc region that extends onto a GC LAB, which may not be
461 // parseable. Stop such at the "scan_top" of the region.
462 if (g1h->is_gc_active()) {
463 mr = mr.intersection(MemRegion(bottom(), scan_top()));
464 } else {
465 mr = mr.intersection(used_region());
466 }
467 if (mr.is_empty()) {
468 return true;
469 }
470
471 // The intersection of the incoming mr (for the card) and the
472 // allocated part of the region is non-empty. This implies that
473 // we have actually allocated into this region. The code in
474 // G1CollectedHeap.cpp that allocates a new region sets the
475 // is_young tag on the region before allocating. Thus we
476 // safely know if this region is young.
477 if (is_young()) {
478 return true;
479 }
480
481 // We can only clean the card here, after we make the decision that
482 // the card is not young.
483 *card_ptr = CardTableModRefBS::clean_card_val();
484 // We must complete this write before we do any of the reads below.
485 OrderAccess::storeload();
486
487 // Special handling for humongous regions.
488 if (isHumongous()) {
489 return do_oops_on_card_in_humongous(mr, cl, this, g1h);
490 }
491
492 // During GC we limit mr by scan_top. So we never get here with an
493 // mr covering objects allocated during GC. Non-humongous objects
494 // are only allocated in the old-gen during GC. So the parts of the
495 // heap that may be examined here are always parsable; there's no
496 // need to use klass_or_null here to detect in-progress allocations.
497
498 // Cache the boundaries of the memory region in some const locals
499 HeapWord* const start = mr.start();
500 HeapWord* const end = mr.end();
501
502 // Find the obj that extends onto mr.start().
503 // Update BOT as needed while finding start of (possibly dead)
504 // object containing the start of the region.
505 HeapWord* cur = block_start(start);
506
507 #ifdef ASSERT
508 {
509 assert(cur <= start,
510 err_msg("cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)));
511 HeapWord* next = cur + block_size(cur);
512 assert(start < next,
513 err_msg("start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)));
514 }
515 #endif
516
517 do {
518 oop obj = oop(cur);
519 assert(obj->is_oop(true), err_msg("Not an oop at " PTR_FORMAT, p2i(cur)));
520 assert(obj->klass_or_null() != NULL,
521 err_msg("Unparsable heap at " PTR_FORMAT, p2i(cur)));
522
523 if (g1h->is_obj_dead(obj, this)) {
524 // Carefully step over dead object.
525 cur += block_size(cur);
526 } else {
527 // Step over live object, and process its references.
528 cur += obj->size();
529 // Non-objArrays are usually marked imprecise at the object
530 // start, in which case we need to iterate over them in full.
531 // objArrays are precisely marked, but can still be iterated
532 // over in full if completely covered.
533 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
534 obj->oop_iterate(cl);
535 } else {
536 obj->oop_iterate(cl, mr);
537 }
538 }
539 } while (cur < end);
540
541 return true;
542 }
543
544 // Code roots support
545
add_strong_code_root(nmethod * nm)546 void HeapRegion::add_strong_code_root(nmethod* nm) {
547 HeapRegionRemSet* hrrs = rem_set();
548 hrrs->add_strong_code_root(nm);
549 }
550
add_strong_code_root_locked(nmethod * nm)551 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
552 assert_locked_or_safepoint(CodeCache_lock);
553 HeapRegionRemSet* hrrs = rem_set();
554 hrrs->add_strong_code_root_locked(nm);
555 }
556
remove_strong_code_root(nmethod * nm)557 void HeapRegion::remove_strong_code_root(nmethod* nm) {
558 HeapRegionRemSet* hrrs = rem_set();
559 hrrs->remove_strong_code_root(nm);
560 }
561
strong_code_roots_do(CodeBlobClosure * blk) const562 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
563 HeapRegionRemSet* hrrs = rem_set();
564 hrrs->strong_code_roots_do(blk);
565 }
566
567 class VerifyStrongCodeRootOopClosure: public OopClosure {
568 const HeapRegion* _hr;
569 nmethod* _nm;
570 bool _failures;
571 bool _has_oops_in_region;
572
do_oop_work(T * p)573 template <class T> void do_oop_work(T* p) {
574 T heap_oop = oopDesc::load_heap_oop(p);
575 if (!oopDesc::is_null(heap_oop)) {
576 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
577
578 // Note: not all the oops embedded in the nmethod are in the
579 // current region. We only look at those which are.
580 if (_hr->is_in(obj)) {
581 // Object is in the region. Check that its less than top
582 if (_hr->top() <= (HeapWord*)obj) {
583 // Object is above top
584 gclog_or_tty->print_cr("Object " PTR_FORMAT " in region "
585 "[" PTR_FORMAT ", " PTR_FORMAT ") is above "
586 "top " PTR_FORMAT,
587 (void *)obj, _hr->bottom(), _hr->end(), _hr->top());
588 _failures = true;
589 return;
590 }
591 // Nmethod has at least one oop in the current region
592 _has_oops_in_region = true;
593 }
594 }
595 }
596
597 public:
VerifyStrongCodeRootOopClosure(const HeapRegion * hr,nmethod * nm)598 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
599 _hr(hr), _failures(false), _has_oops_in_region(false) {}
600
do_oop(narrowOop * p)601 void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)602 void do_oop(oop* p) { do_oop_work(p); }
603
failures()604 bool failures() { return _failures; }
has_oops_in_region()605 bool has_oops_in_region() { return _has_oops_in_region; }
606 };
607
608 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
609 const HeapRegion* _hr;
610 bool _failures;
611 public:
VerifyStrongCodeRootCodeBlobClosure(const HeapRegion * hr)612 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
613 _hr(hr), _failures(false) {}
614
do_code_blob(CodeBlob * cb)615 void do_code_blob(CodeBlob* cb) {
616 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
617 if (nm != NULL) {
618 // Verify that the nemthod is live
619 if (!nm->is_alive()) {
620 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod "
621 PTR_FORMAT " in its strong code roots",
622 _hr->bottom(), _hr->end(), nm);
623 _failures = true;
624 } else {
625 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
626 nm->oops_do(&oop_cl);
627 if (!oop_cl.has_oops_in_region()) {
628 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod "
629 PTR_FORMAT " in its strong code roots "
630 "with no pointers into region",
631 _hr->bottom(), _hr->end(), nm);
632 _failures = true;
633 } else if (oop_cl.failures()) {
634 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has other "
635 "failures for nmethod " PTR_FORMAT,
636 _hr->bottom(), _hr->end(), nm);
637 _failures = true;
638 }
639 }
640 }
641 }
642
failures()643 bool failures() { return _failures; }
644 };
645
verify_strong_code_roots(VerifyOption vo,bool * failures) const646 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
647 if (!G1VerifyHeapRegionCodeRoots) {
648 // We're not verifying code roots.
649 return;
650 }
651 if (vo == VerifyOption_G1UseMarkWord) {
652 // Marking verification during a full GC is performed after class
653 // unloading, code cache unloading, etc so the strong code roots
654 // attached to each heap region are in an inconsistent state. They won't
655 // be consistent until the strong code roots are rebuilt after the
656 // actual GC. Skip verifying the strong code roots in this particular
657 // time.
658 assert(VerifyDuringGC, "only way to get here");
659 return;
660 }
661
662 HeapRegionRemSet* hrrs = rem_set();
663 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length();
664
665 // if this region is empty then there should be no entries
666 // on its strong code root list
667 if (is_empty()) {
668 if (strong_code_roots_length > 0) {
669 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is empty "
670 "but has " SIZE_FORMAT " code root entries",
671 bottom(), end(), strong_code_roots_length);
672 *failures = true;
673 }
674 return;
675 }
676
677 if (continuesHumongous()) {
678 if (strong_code_roots_length > 0) {
679 gclog_or_tty->print_cr("region " HR_FORMAT " is a continuation of a humongous "
680 "region but has " SIZE_FORMAT " code root entries",
681 HR_FORMAT_PARAMS(this), strong_code_roots_length);
682 *failures = true;
683 }
684 return;
685 }
686
687 VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
688 strong_code_roots_do(&cb_cl);
689
690 if (cb_cl.failures()) {
691 *failures = true;
692 }
693 }
694
print() const695 void HeapRegion::print() const { print_on(gclog_or_tty); }
print_on(outputStream * st) const696 void HeapRegion::print_on(outputStream* st) const {
697 st->print("AC%4u", allocation_context());
698 st->print(" %2s", get_short_type_str());
699 if (in_collection_set())
700 st->print(" CS");
701 else
702 st->print(" ");
703 st->print(" TS %5d", _gc_time_stamp);
704 st->print(" PTAMS " PTR_FORMAT " NTAMS " PTR_FORMAT,
705 prev_top_at_mark_start(), next_top_at_mark_start());
706 G1OffsetTableContigSpace::print_on(st);
707 }
708
709 class G1VerificationClosure : public OopClosure {
710 protected:
711 G1CollectedHeap* _g1h;
712 CardTableModRefBS* _bs;
713 oop _containing_obj;
714 bool _failures;
715 int _n_failures;
716 VerifyOption _vo;
717 public:
718 // _vo == UsePrevMarking -> use "prev" marking information,
719 // _vo == UseNextMarking -> use "next" marking information,
720 // _vo == UseMarkWord -> use mark word from object header.
G1VerificationClosure(G1CollectedHeap * g1h,VerifyOption vo)721 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
722 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
723 _failures(false), _n_failures(0), _vo(vo)
724 {
725 BarrierSet* bs = _g1h->barrier_set();
726 if (bs->is_a(BarrierSet::CardTableModRef))
727 _bs = (CardTableModRefBS*)bs;
728 }
729
set_containing_obj(oop obj)730 void set_containing_obj(oop obj) {
731 _containing_obj = obj;
732 }
733
failures()734 bool failures() { return _failures; }
n_failures()735 int n_failures() { return _n_failures; }
736
print_object(outputStream * out,oop obj)737 void print_object(outputStream* out, oop obj) {
738 #ifdef PRODUCT
739 Klass* k = obj->klass();
740 const char* class_name = InstanceKlass::cast(k)->external_name();
741 out->print_cr("class name %s", class_name);
742 #else // PRODUCT
743 obj->print_on(out);
744 #endif // PRODUCT
745 }
746 };
747
748 class VerifyLiveClosure : public G1VerificationClosure {
749 public:
VerifyLiveClosure(G1CollectedHeap * g1h,VerifyOption vo)750 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {}
do_oop(narrowOop * p)751 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)752 virtual void do_oop(oop* p) { do_oop_work(p); }
753
754 template <class T>
do_oop_work(T * p)755 void do_oop_work(T* p) {
756 assert(_containing_obj != NULL, "Precondition");
757 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
758 "Precondition");
759 verify_liveness(p);
760 }
761
762 template <class T>
verify_liveness(T * p)763 void verify_liveness(T* p) {
764 T heap_oop = oopDesc::load_heap_oop(p);
765 if (!oopDesc::is_null(heap_oop)) {
766 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
767 bool failed = false;
768 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
769 MutexLockerEx x(ParGCRareEvent_lock,
770 Mutex::_no_safepoint_check_flag);
771
772 if (!_failures) {
773 gclog_or_tty->cr();
774 gclog_or_tty->print_cr("----------");
775 }
776 if (!_g1h->is_in_closed_subset(obj)) {
777 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
778 gclog_or_tty->print_cr("Field " PTR_FORMAT
779 " of live obj " PTR_FORMAT " in region "
780 "[" PTR_FORMAT ", " PTR_FORMAT ")",
781 p, (void*) _containing_obj,
782 from->bottom(), from->end());
783 print_object(gclog_or_tty, _containing_obj);
784 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " not in the heap",
785 (void*) obj);
786 } else {
787 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
788 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
789 gclog_or_tty->print_cr("Field " PTR_FORMAT
790 " of live obj " PTR_FORMAT " in region "
791 "[" PTR_FORMAT ", " PTR_FORMAT ")",
792 p, (void*) _containing_obj,
793 from->bottom(), from->end());
794 print_object(gclog_or_tty, _containing_obj);
795 gclog_or_tty->print_cr("points to dead obj " PTR_FORMAT " in region "
796 "[" PTR_FORMAT ", " PTR_FORMAT ")",
797 (void*) obj, to->bottom(), to->end());
798 print_object(gclog_or_tty, obj);
799 }
800 gclog_or_tty->print_cr("----------");
801 gclog_or_tty->flush();
802 _failures = true;
803 failed = true;
804 _n_failures++;
805 }
806 }
807 }
808 };
809
810 class VerifyRemSetClosure : public G1VerificationClosure {
811 public:
VerifyRemSetClosure(G1CollectedHeap * g1h,VerifyOption vo)812 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {}
do_oop(narrowOop * p)813 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
do_oop(oop * p)814 virtual void do_oop(oop* p) { do_oop_work(p); }
815
816 template <class T>
do_oop_work(T * p)817 void do_oop_work(T* p) {
818 assert(_containing_obj != NULL, "Precondition");
819 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
820 "Precondition");
821 verify_remembered_set(p);
822 }
823
824 template <class T>
verify_remembered_set(T * p)825 void verify_remembered_set(T* p) {
826 T heap_oop = oopDesc::load_heap_oop(p);
827 if (!oopDesc::is_null(heap_oop)) {
828 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
829 bool failed = false;
830 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
831 HeapRegion* to = _g1h->heap_region_containing(obj);
832 if (from != NULL && to != NULL &&
833 from != to &&
834 !to->isHumongous()) {
835 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
836 jbyte cv_field = *_bs->byte_for_const(p);
837 const jbyte dirty = CardTableModRefBS::dirty_card_val();
838
839 bool is_bad = !(from->is_young()
840 || to->rem_set()->contains_reference(p)
841 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
842 (_containing_obj->is_objArray() ?
843 cv_field == dirty
844 : cv_obj == dirty || cv_field == dirty));
845 if (is_bad) {
846 MutexLockerEx x(ParGCRareEvent_lock,
847 Mutex::_no_safepoint_check_flag);
848
849 if (!_failures) {
850 gclog_or_tty->cr();
851 gclog_or_tty->print_cr("----------");
852 }
853 gclog_or_tty->print_cr("Missing rem set entry:");
854 gclog_or_tty->print_cr("Field " PTR_FORMAT " "
855 "of obj " PTR_FORMAT ", "
856 "in region " HR_FORMAT,
857 p, (void*) _containing_obj,
858 HR_FORMAT_PARAMS(from));
859 _containing_obj->print_on(gclog_or_tty);
860 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " "
861 "in region " HR_FORMAT,
862 (void*) obj,
863 HR_FORMAT_PARAMS(to));
864 if (obj->is_oop()) {
865 obj->print_on(gclog_or_tty);
866 }
867 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
868 cv_obj, cv_field);
869 gclog_or_tty->print_cr("----------");
870 gclog_or_tty->flush();
871 _failures = true;
872 if (!failed) _n_failures++;
873 }
874 }
875 }
876 }
877 };
878
879 // This really ought to be commoned up into OffsetTableContigSpace somehow.
880 // We would need a mechanism to make that code skip dead objects.
881
verify(VerifyOption vo,bool * failures) const882 void HeapRegion::verify(VerifyOption vo,
883 bool* failures) const {
884 G1CollectedHeap* g1 = G1CollectedHeap::heap();
885 *failures = false;
886 HeapWord* p = bottom();
887 HeapWord* prev_p = NULL;
888 VerifyLiveClosure vl_cl(g1, vo);
889 VerifyRemSetClosure vr_cl(g1, vo);
890 bool is_humongous = isHumongous();
891 bool do_bot_verify = !is_young();
892 size_t object_num = 0;
893 while (p < top()) {
894 oop obj = oop(p);
895 size_t obj_size = block_size(p);
896 object_num += 1;
897
898 if (is_humongous != g1->isHumongous(obj_size) &&
899 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
900 gclog_or_tty->print_cr("obj " PTR_FORMAT " is of %shumongous size ("
901 SIZE_FORMAT " words) in a %shumongous region",
902 p, g1->isHumongous(obj_size) ? "" : "non-",
903 obj_size, is_humongous ? "" : "non-");
904 *failures = true;
905 return;
906 }
907
908 // If it returns false, verify_for_object() will output the
909 // appropriate message.
910 if (do_bot_verify &&
911 !g1->is_obj_dead(obj, this) &&
912 !_offsets.verify_for_object(p, obj_size)) {
913 *failures = true;
914 return;
915 }
916
917 if (!g1->is_obj_dead_cond(obj, this, vo)) {
918 if (obj->is_oop()) {
919 Klass* klass = obj->klass();
920 bool is_metaspace_object = Metaspace::contains(klass) ||
921 (vo == VerifyOption_G1UsePrevMarking &&
922 ClassLoaderDataGraph::unload_list_contains(klass));
923 if (!is_metaspace_object) {
924 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " "
925 "not metadata", klass, (void *)obj);
926 *failures = true;
927 return;
928 } else if (!klass->is_klass()) {
929 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " "
930 "not a klass", klass, (void *)obj);
931 *failures = true;
932 return;
933 } else {
934 vl_cl.set_containing_obj(obj);
935 if (!g1->full_collection() || G1VerifyRSetsDuringFullGC) {
936 // verify liveness and rem_set
937 vr_cl.set_containing_obj(obj);
938 G1Mux2Closure mux(&vl_cl, &vr_cl);
939 obj->oop_iterate_no_header(&mux);
940
941 if (vr_cl.failures()) {
942 *failures = true;
943 }
944 if (G1MaxVerifyFailures >= 0 &&
945 vr_cl.n_failures() >= G1MaxVerifyFailures) {
946 return;
947 }
948 } else {
949 // verify only liveness
950 obj->oop_iterate_no_header(&vl_cl);
951 }
952 if (vl_cl.failures()) {
953 *failures = true;
954 }
955 if (G1MaxVerifyFailures >= 0 &&
956 vl_cl.n_failures() >= G1MaxVerifyFailures) {
957 return;
958 }
959 }
960 } else {
961 gclog_or_tty->print_cr(PTR_FORMAT " not an oop", (void *)obj);
962 *failures = true;
963 return;
964 }
965 }
966 prev_p = p;
967 p += obj_size;
968 }
969
970 if (p != top()) {
971 gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
972 "does not match top " PTR_FORMAT, p, top());
973 *failures = true;
974 return;
975 }
976
977 HeapWord* the_end = end();
978 assert(p == top(), "it should still hold");
979 // Do some extra BOT consistency checking for addresses in the
980 // range [top, end). BOT look-ups in this range should yield
981 // top. No point in doing that if top == end (there's nothing there).
982 if (p < the_end) {
983 // Look up top
984 HeapWord* addr_1 = p;
985 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
986 if (b_start_1 != p) {
987 gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " "
988 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
989 addr_1, b_start_1, p);
990 *failures = true;
991 return;
992 }
993
994 // Look up top + 1
995 HeapWord* addr_2 = p + 1;
996 if (addr_2 < the_end) {
997 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
998 if (b_start_2 != p) {
999 gclog_or_tty->print_cr("BOT look up for top + 1: " PTR_FORMAT " "
1000 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
1001 addr_2, b_start_2, p);
1002 *failures = true;
1003 return;
1004 }
1005 }
1006
1007 // Look up an address between top and end
1008 size_t diff = pointer_delta(the_end, p) / 2;
1009 HeapWord* addr_3 = p + diff;
1010 if (addr_3 < the_end) {
1011 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
1012 if (b_start_3 != p) {
1013 gclog_or_tty->print_cr("BOT look up for top + diff: " PTR_FORMAT " "
1014 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
1015 addr_3, b_start_3, p);
1016 *failures = true;
1017 return;
1018 }
1019 }
1020
1021 // Loook up end - 1
1022 HeapWord* addr_4 = the_end - 1;
1023 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
1024 if (b_start_4 != p) {
1025 gclog_or_tty->print_cr("BOT look up for end - 1: " PTR_FORMAT " "
1026 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
1027 addr_4, b_start_4, p);
1028 *failures = true;
1029 return;
1030 }
1031 }
1032
1033 if (is_humongous && object_num > 1) {
1034 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is humongous "
1035 "but has " SIZE_FORMAT ", objects",
1036 bottom(), end(), object_num);
1037 *failures = true;
1038 return;
1039 }
1040
1041 verify_strong_code_roots(vo, failures);
1042 }
1043
verify() const1044 void HeapRegion::verify() const {
1045 bool dummy = false;
1046 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
1047 }
1048
verify_rem_set(VerifyOption vo,bool * failures) const1049 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const {
1050 G1CollectedHeap* g1 = G1CollectedHeap::heap();
1051 *failures = false;
1052 HeapWord* p = bottom();
1053 HeapWord* prev_p = NULL;
1054 VerifyRemSetClosure vr_cl(g1, vo);
1055 while (p < top()) {
1056 oop obj = oop(p);
1057 size_t obj_size = block_size(p);
1058
1059 if (!g1->is_obj_dead_cond(obj, this, vo)) {
1060 if (obj->is_oop()) {
1061 vr_cl.set_containing_obj(obj);
1062 obj->oop_iterate_no_header(&vr_cl);
1063
1064 if (vr_cl.failures()) {
1065 *failures = true;
1066 }
1067 if (G1MaxVerifyFailures >= 0 &&
1068 vr_cl.n_failures() >= G1MaxVerifyFailures) {
1069 return;
1070 }
1071 } else {
1072 gclog_or_tty->print_cr(PTR_FORMAT " not an oop", p2i(obj));
1073 *failures = true;
1074 return;
1075 }
1076 }
1077
1078 prev_p = p;
1079 p += obj_size;
1080 }
1081 }
1082
verify_rem_set() const1083 void HeapRegion::verify_rem_set() const {
1084 bool failures = false;
1085 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures);
1086 guarantee(!failures, "HeapRegion RemSet verification failed");
1087 }
1088
1089 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
1090 // away eventually.
1091
clear(bool mangle_space)1092 void G1OffsetTableContigSpace::clear(bool mangle_space) {
1093 set_top(bottom());
1094 _scan_top = bottom();
1095 CompactibleSpace::clear(mangle_space);
1096 reset_bot();
1097 }
1098
set_bottom(HeapWord * new_bottom)1099 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
1100 Space::set_bottom(new_bottom);
1101 _offsets.set_bottom(new_bottom);
1102 }
1103
set_end(HeapWord * new_end)1104 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
1105 Space::set_end(new_end);
1106 _offsets.resize(new_end - bottom());
1107 }
1108
print() const1109 void G1OffsetTableContigSpace::print() const {
1110 print_short();
1111 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
1112 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
1113 bottom(), top(), _offsets.threshold(), end());
1114 }
1115
initialize_threshold()1116 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
1117 return _offsets.initialize_threshold();
1118 }
1119
cross_threshold(HeapWord * start,HeapWord * end)1120 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
1121 HeapWord* end) {
1122 _offsets.alloc_block(start, end);
1123 return _offsets.threshold();
1124 }
1125
scan_top() const1126 HeapWord* G1OffsetTableContigSpace::scan_top() const {
1127 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1128 HeapWord* local_top = top();
1129 OrderAccess::loadload();
1130 const unsigned local_time_stamp = _gc_time_stamp;
1131 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
1132 if (local_time_stamp < g1h->get_gc_time_stamp()) {
1133 return local_top;
1134 } else {
1135 return _scan_top;
1136 }
1137 }
1138
record_timestamp()1139 void G1OffsetTableContigSpace::record_timestamp() {
1140 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1141 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1142
1143 if (_gc_time_stamp < curr_gc_time_stamp) {
1144 // Setting the time stamp here tells concurrent readers to look at
1145 // scan_top to know the maximum allowed address to look at.
1146
1147 // scan_top should be bottom for all regions except for the
1148 // retained old alloc region which should have scan_top == top
1149 HeapWord* st = _scan_top;
1150 guarantee(st == _bottom || st == _top, "invariant");
1151
1152 _gc_time_stamp = curr_gc_time_stamp;
1153 }
1154 }
1155
record_retained_region()1156 void G1OffsetTableContigSpace::record_retained_region() {
1157 // scan_top is the maximum address where it's safe for the next gc to
1158 // scan this region.
1159 _scan_top = top();
1160 }
1161
safe_object_iterate(ObjectClosure * blk)1162 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
1163 object_iterate(blk);
1164 }
1165
object_iterate(ObjectClosure * blk)1166 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
1167 HeapWord* p = bottom();
1168 while (p < top()) {
1169 if (block_is_obj(p)) {
1170 blk->do_object(oop(p));
1171 }
1172 p += block_size(p);
1173 }
1174 }
1175
1176 #define block_is_always_obj(q) true
prepare_for_compaction(CompactPoint * cp)1177 void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
1178 SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
1179 }
1180 #undef block_is_always_obj
1181
1182 G1OffsetTableContigSpace::
G1OffsetTableContigSpace(G1BlockOffsetSharedArray * sharedOffsetArray,MemRegion mr)1183 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1184 MemRegion mr) :
1185 _offsets(sharedOffsetArray, mr),
1186 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1187 _gc_time_stamp(0)
1188 {
1189 _offsets.set_space(this);
1190 }
1191
initialize(MemRegion mr,bool clear_space,bool mangle_space)1192 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
1193 CompactibleSpace::initialize(mr, clear_space, mangle_space);
1194 _top = bottom();
1195 _scan_top = bottom();
1196 set_saved_mark_word(NULL);
1197 reset_bot();
1198 }
1199
1200