1 /*
2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1OopClosures.inline.hpp"
30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/prefetch.inline.hpp"
39
G1ParScanThreadState(G1CollectedHeap * g1h,uint worker_id,size_t young_cset_length,size_t optional_cset_length)40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 uint worker_id,
42 size_t young_cset_length,
43 size_t optional_cset_length)
44 : _g1h(g1h),
45 _refs(g1h->task_queue(worker_id)),
46 _dcq(&g1h->dirty_card_queue_set()),
47 _ct(g1h->card_table()),
48 _closures(NULL),
49 _plab_allocator(NULL),
50 _age_table(false),
51 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
52 _scanner(g1h, this),
53 _worker_id(worker_id),
54 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
55 _stack_trim_lower_threshold(GCDrainStackTargetSize),
56 _trim_ticks(),
57 _old_gen_is_full(false),
58 _num_optional_regions(optional_cset_length)
59 {
60 // we allocate G1YoungSurvRateNumRegions plus one entries, since
61 // we "sacrifice" entry 0 to keep track of surviving bytes for
62 // non-young regions (where the age is -1)
63 // We also add a few elements at the beginning and at the end in
64 // an attempt to eliminate cache contention
65 size_t real_length = 1 + young_cset_length;
66 size_t array_length = PADDING_ELEM_NUM +
67 real_length +
68 PADDING_ELEM_NUM;
69 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
70 if (_surviving_young_words_base == NULL)
71 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
72 "Not enough space for young surv histo.");
73 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
74 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
75
76 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
77
78 _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
79 // The dest for Young is used when the objects are aged enough to
80 // need to be moved to the next space.
81 _dest[InCSetState::Young] = InCSetState::Old;
82 _dest[InCSetState::Old] = InCSetState::Old;
83
84 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
85
86 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
87 }
88
89 // Pass locally gathered statistics to global state.
flush(size_t * surviving_young_words)90 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
91 _dcq.flush();
92 // Update allocation statistics.
93 _plab_allocator->flush_and_retire_stats();
94 _g1h->g1_policy()->record_age_table(&_age_table);
95
96 uint length = _g1h->collection_set()->young_region_length();
97 for (uint region_index = 0; region_index < length; region_index++) {
98 surviving_young_words[region_index] += _surviving_young_words[region_index];
99 }
100 }
101
~G1ParScanThreadState()102 G1ParScanThreadState::~G1ParScanThreadState() {
103 delete _plab_allocator;
104 delete _closures;
105 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
106 delete[] _oops_into_optional_regions;
107 }
108
waste(size_t & wasted,size_t & undo_wasted)109 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) {
110 _plab_allocator->waste(wasted, undo_wasted);
111 }
112
113 #ifdef ASSERT
verify_ref(narrowOop * ref) const114 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
115 assert(ref != NULL, "invariant");
116 assert(UseCompressedOops, "sanity");
117 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
118 oop p = RawAccess<>::oop_load(ref);
119 assert(_g1h->is_in_g1_reserved(p),
120 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
121 return true;
122 }
123
verify_ref(oop * ref) const124 bool G1ParScanThreadState::verify_ref(oop* ref) const {
125 assert(ref != NULL, "invariant");
126 if (has_partial_array_mask(ref)) {
127 // Must be in the collection set--it's already been copied.
128 oop p = clear_partial_array_mask(ref);
129 assert(_g1h->is_in_cset(p),
130 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
131 } else {
132 oop p = RawAccess<>::oop_load(ref);
133 assert(_g1h->is_in_g1_reserved(p),
134 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
135 }
136 return true;
137 }
138
verify_task(StarTask ref) const139 bool G1ParScanThreadState::verify_task(StarTask ref) const {
140 if (ref.is_narrow()) {
141 return verify_ref((narrowOop*) ref);
142 } else {
143 return verify_ref((oop*) ref);
144 }
145 }
146 #endif // ASSERT
147
trim_queue()148 void G1ParScanThreadState::trim_queue() {
149 StarTask ref;
150 do {
151 // Fully drain the queue.
152 trim_queue_to_threshold(0);
153 } while (!_refs->is_empty());
154 }
155
allocate_in_next_plab(InCSetState const state,InCSetState * dest,size_t word_sz,bool previous_plab_refill_failed)156 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
157 InCSetState* dest,
158 size_t word_sz,
159 bool previous_plab_refill_failed) {
160 assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
161 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
162
163 // Right now we only have two types of regions (young / old) so
164 // let's keep the logic here simple. We can generalize it when necessary.
165 if (dest->is_young()) {
166 bool plab_refill_in_old_failed = false;
167 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
168 word_sz,
169 &plab_refill_in_old_failed);
170 // Make sure that we won't attempt to copy any other objects out
171 // of a survivor region (given that apparently we cannot allocate
172 // any new ones) to avoid coming into this slow path again and again.
173 // Only consider failed PLAB refill here: failed inline allocations are
174 // typically large, so not indicative of remaining space.
175 if (previous_plab_refill_failed) {
176 _tenuring_threshold = 0;
177 }
178
179 if (obj_ptr != NULL) {
180 dest->set_old();
181 } else {
182 // We just failed to allocate in old gen. The same idea as explained above
183 // for making survivor gen unavailable for allocation applies for old gen.
184 _old_gen_is_full = plab_refill_in_old_failed;
185 }
186 return obj_ptr;
187 } else {
188 _old_gen_is_full = previous_plab_refill_failed;
189 assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
190 // no other space to try.
191 return NULL;
192 }
193 }
194
next_state(InCSetState const state,markOop const m,uint & age)195 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
196 if (state.is_young()) {
197 age = !m->has_displaced_mark_helper() ? m->age()
198 : m->displaced_mark_helper()->age();
199 if (age < _tenuring_threshold) {
200 return state;
201 }
202 }
203 return dest(state);
204 }
205
report_promotion_event(InCSetState const dest_state,oop const old,size_t word_sz,uint age,HeapWord * const obj_ptr) const206 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
207 oop const old, size_t word_sz, uint age,
208 HeapWord * const obj_ptr) const {
209 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state);
210 if (alloc_buf->contains(obj_ptr)) {
211 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
212 dest_state.value() == InCSetState::Old,
213 alloc_buf->word_sz() * HeapWordSize);
214 } else {
215 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
216 dest_state.value() == InCSetState::Old);
217 }
218 }
219
copy_to_survivor_space(InCSetState const state,oop const old,markOop const old_mark)220 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
221 oop const old,
222 markOop const old_mark) {
223 const size_t word_sz = old->size();
224 HeapRegion* const from_region = _g1h->heap_region_containing(old);
225 // +1 to make the -1 indexes valid...
226 const int young_index = from_region->young_index_in_cset()+1;
227 assert( (from_region->is_young() && young_index > 0) ||
228 (!from_region->is_young() && young_index == 0), "invariant" );
229
230 uint age = 0;
231 InCSetState dest_state = next_state(state, old_mark, age);
232 // The second clause is to prevent premature evacuation failure in case there
233 // is still space in survivor, but old gen is full.
234 if (_old_gen_is_full && dest_state.is_old()) {
235 return handle_evacuation_failure_par(old, old_mark);
236 }
237 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz);
238
239 // PLAB allocations should succeed most of the time, so we'll
240 // normally check against NULL once and that's it.
241 if (obj_ptr == NULL) {
242 bool plab_refill_failed = false;
243 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed);
244 if (obj_ptr == NULL) {
245 obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed);
246 if (obj_ptr == NULL) {
247 // This will either forward-to-self, or detect that someone else has
248 // installed a forwarding pointer.
249 return handle_evacuation_failure_par(old, old_mark);
250 }
251 }
252 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
253 // The events are checked individually as part of the actual commit
254 report_promotion_event(dest_state, old, word_sz, age, obj_ptr);
255 }
256 }
257
258 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
259 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
260
261 #ifndef PRODUCT
262 // Should this evacuation fail?
263 if (_g1h->evacuation_should_fail()) {
264 // Doing this after all the allocation attempts also tests the
265 // undo_allocation() method too.
266 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
267 return handle_evacuation_failure_par(old, old_mark);
268 }
269 #endif // !PRODUCT
270
271 // We're going to allocate linearly, so might as well prefetch ahead.
272 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
273
274 const oop obj = oop(obj_ptr);
275 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
276 if (forward_ptr == NULL) {
277 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
278
279 if (dest_state.is_young()) {
280 if (age < markOopDesc::max_age) {
281 age++;
282 }
283 if (old_mark->has_displaced_mark_helper()) {
284 // In this case, we have to install the mark word first,
285 // otherwise obj looks to be forwarded (the old mark word,
286 // which contains the forward pointer, was copied)
287 obj->set_mark_raw(old_mark);
288 markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
289 old_mark->set_displaced_mark_helper(new_mark);
290 } else {
291 obj->set_mark_raw(old_mark->set_age(age));
292 }
293 _age_table.add(age, word_sz);
294 } else {
295 obj->set_mark_raw(old_mark);
296 }
297
298 if (G1StringDedup::is_enabled()) {
299 const bool is_from_young = state.is_young();
300 const bool is_to_young = dest_state.is_young();
301 assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
302 "sanity");
303 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
304 "sanity");
305 G1StringDedup::enqueue_from_evacuation(is_from_young,
306 is_to_young,
307 _worker_id,
308 obj);
309 }
310
311 _surviving_young_words[young_index] += word_sz;
312
313 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
314 // We keep track of the next start index in the length field of
315 // the to-space object. The actual length can be found in the
316 // length field of the from-space object.
317 arrayOop(obj)->set_length(0);
318 oop* old_p = set_partial_array_mask(old);
319 do_oop_partial_array(old_p);
320 } else {
321 G1ScanInYoungSetter x(&_scanner, dest_state.is_young());
322 obj->oop_iterate_backwards(&_scanner);
323 }
324 return obj;
325 } else {
326 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
327 return forward_ptr;
328 }
329 }
330
state_for_worker(uint worker_id)331 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
332 assert(worker_id < _n_workers, "out of bounds access");
333 if (_states[worker_id] == NULL) {
334 _states[worker_id] =
335 new G1ParScanThreadState(_g1h, worker_id, _young_cset_length, _optional_cset_length);
336 }
337 return _states[worker_id];
338 }
339
surviving_young_words() const340 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
341 assert(_flushed, "thread local state from the per thread states should have been flushed");
342 return _surviving_young_words_total;
343 }
344
flush()345 void G1ParScanThreadStateSet::flush() {
346 assert(!_flushed, "thread local state from the per thread states should be flushed once");
347
348 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
349 G1ParScanThreadState* pss = _states[worker_index];
350
351 if (pss == NULL) {
352 continue;
353 }
354
355 pss->flush(_surviving_young_words_total);
356 delete pss;
357 _states[worker_index] = NULL;
358 }
359 _flushed = true;
360 }
361
record_unused_optional_region(HeapRegion * hr)362 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
363 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
364 G1ParScanThreadState* pss = _states[worker_index];
365
366 if (pss == NULL) {
367 continue;
368 }
369
370 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
371 _g1h->g1_policy()->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
372 }
373 }
374
handle_evacuation_failure_par(oop old,markOop m)375 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
376 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
377
378 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
379 if (forward_ptr == NULL) {
380 // Forward-to-self succeeded. We are the "owner" of the object.
381 HeapRegion* r = _g1h->heap_region_containing(old);
382
383 if (!r->evacuation_failed()) {
384 r->set_evacuation_failed(true);
385 _g1h->hr_printer()->evac_failure(r);
386 }
387
388 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
389
390 G1ScanInYoungSetter x(&_scanner, r->is_young());
391 old->oop_iterate_backwards(&_scanner);
392
393 return old;
394 } else {
395 // Forward-to-self failed. Either someone else managed to allocate
396 // space for this object (old != forward_ptr) or they beat us in
397 // self-forwarding it (old == forward_ptr).
398 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
399 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
400 "should not be in the CSet",
401 p2i(old), p2i(forward_ptr));
402 return forward_ptr;
403 }
404 }
G1ParScanThreadStateSet(G1CollectedHeap * g1h,uint n_workers,size_t young_cset_length,size_t optional_cset_length)405 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
406 uint n_workers,
407 size_t young_cset_length,
408 size_t optional_cset_length) :
409 _g1h(g1h),
410 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
411 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)),
412 _young_cset_length(young_cset_length),
413 _optional_cset_length(optional_cset_length),
414 _n_workers(n_workers),
415 _flushed(false) {
416 for (uint i = 0; i < n_workers; ++i) {
417 _states[i] = NULL;
418 }
419 memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t));
420 }
421
~G1ParScanThreadStateSet()422 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
423 assert(_flushed, "thread local state from the per thread states should have been flushed");
424 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
425 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
426 }
427