1 /*
2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27 
28 #include "gc/g1/g1BarrierSet.hpp"
29 #include "gc/g1/g1CollectedHeap.hpp"
30 #include "gc/g1/g1CollectorState.hpp"
31 #include "gc/g1/g1Policy.hpp"
32 #include "gc/g1/g1RemSet.hpp"
33 #include "gc/g1/heapRegionManager.inline.hpp"
34 #include "gc/g1/heapRegionRemSet.hpp"
35 #include "gc/g1/heapRegionSet.inline.hpp"
36 #include "gc/shared/taskqueue.inline.hpp"
37 #include "runtime/orderAccess.hpp"
38 
phase_times() const39 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
40   return _policy->phase_times();
41 }
42 
alloc_buffer_stats(G1HeapRegionAttr dest)43 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
44   switch (dest.type()) {
45     case G1HeapRegionAttr::Young:
46       return &_survivor_evac_stats;
47     case G1HeapRegionAttr::Old:
48       return &_old_evac_stats;
49     default:
50       ShouldNotReachHere();
51       return NULL; // Keep some compilers happy
52   }
53 }
54 
desired_plab_sz(G1HeapRegionAttr dest)55 size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
56   size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
57   // Prevent humongous PLAB sizes for two reasons:
58   // * PLABs are allocated using a similar paths as oops, but should
59   //   never be in a humongous region
60   // * Allowing humongous PLABs needlessly churns the region free lists
61   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
62 }
63 
64 // Inline functions for G1CollectedHeap
65 
66 // Return the region with the given index. It assumes the index is valid.
region_at(uint index) const67 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
68 
69 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
region_at_or_null(uint index) const70 inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
71 
next_region_in_humongous(HeapRegion * hr) const72 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
73   return _hrm->next_region_in_humongous(hr);
74 }
75 
addr_to_region(HeapWord * addr) const76 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
77   assert(is_in_reserved(addr),
78          "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
79          p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
80   return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
81 }
82 
bottom_addr_for_region(uint index) const83 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
84   return _hrm->reserved().start() + index * HeapRegion::GrainWords;
85 }
86 
87 template <class T>
heap_region_containing(const T addr) const88 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
89   assert(addr != NULL, "invariant");
90   assert(is_in_g1_reserved((const void*) addr),
91          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
92          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
93   return _hrm->addr_to_region((HeapWord*) addr);
94 }
95 
96 template <class T>
heap_region_containing_or_null(const T addr) const97 inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
98   assert(addr != NULL, "invariant");
99   assert(is_in_g1_reserved((const void*) addr),
100          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
101          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
102   uint const region_idx = addr_to_region(addr);
103   return region_at_or_null(region_idx);
104 }
105 
old_set_add(HeapRegion * hr)106 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
107   _old_set.add(hr);
108 }
109 
old_set_remove(HeapRegion * hr)110 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
111   _old_set.remove(hr);
112 }
113 
archive_set_add(HeapRegion * hr)114 inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
115   _archive_set.add(hr);
116 }
117 
118 // It dirties the cards that cover the block so that the post
119 // write barrier never queues anything when updating objects on this
120 // block. It is assumed (and in fact we assert) that the block
121 // belongs to a young region.
122 inline void
dirty_young_block(HeapWord * start,size_t word_size)123 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
124   assert_heap_not_locked();
125 
126   // Assign the containing region to containing_hr so that we don't
127   // have to keep calling heap_region_containing() in the
128   // asserts below.
129   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
130   assert(word_size > 0, "pre-condition");
131   assert(containing_hr->is_in(start), "it should contain start");
132   assert(containing_hr->is_young(), "it should be young");
133   assert(!containing_hr->is_humongous(), "it should not be humongous");
134 
135   HeapWord* end = start + word_size;
136   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
137 
138   MemRegion mr(start, end);
139   card_table()->g1_mark_as_young(mr);
140 }
141 
task_queue(uint i) const142 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
143   return _task_queues->queue(i);
144 }
145 
is_marked_next(oop obj) const146 inline bool G1CollectedHeap::is_marked_next(oop obj) const {
147   return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj);
148 }
149 
is_in_cset(oop obj)150 inline bool G1CollectedHeap::is_in_cset(oop obj) {
151   return is_in_cset((HeapWord*)obj);
152 }
153 
is_in_cset(HeapWord * addr)154 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
155   return _region_attr.is_in_cset(addr);
156 }
157 
is_in_cset(const HeapRegion * hr)158 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
159   return _region_attr.is_in_cset(hr);
160 }
161 
is_in_cset_or_humongous(const oop obj)162 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
163   return _region_attr.is_in_cset_or_humongous((HeapWord*)obj);
164 }
165 
region_attr(const void * addr)166 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) {
167   return _region_attr.at((HeapWord*)addr);
168 }
169 
register_humongous_region_with_region_attr(uint index)170 void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
171   _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
172 }
173 
register_region_with_region_attr(HeapRegion * r)174 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
175   _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
176 }
177 
register_old_region_with_region_attr(HeapRegion * r)178 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
179   _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
180   _rem_set->prepare_for_scan_rem_set(r->hrm_index());
181 }
182 
register_optional_region_with_region_attr(HeapRegion * r)183 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
184   _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
185 }
186 
187 #ifndef PRODUCT
188 // Support for G1EvacuationFailureALot
189 
190 inline bool
evacuation_failure_alot_for_gc_type(bool for_young_gc,bool during_initial_mark,bool mark_or_rebuild_in_progress)191 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
192                                                      bool during_initial_mark,
193                                                      bool mark_or_rebuild_in_progress) {
194   bool res = false;
195   if (mark_or_rebuild_in_progress) {
196     res |= G1EvacuationFailureALotDuringConcMark;
197   }
198   if (during_initial_mark) {
199     res |= G1EvacuationFailureALotDuringInitialMark;
200   }
201   if (for_young_gc) {
202     res |= G1EvacuationFailureALotDuringYoungGC;
203   } else {
204     // GCs are mixed
205     res |= G1EvacuationFailureALotDuringMixedGC;
206   }
207   return res;
208 }
209 
210 inline void
set_evacuation_failure_alot_for_current_gc()211 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
212   if (G1EvacuationFailureALot) {
213     // Note we can't assert that _evacuation_failure_alot_for_current_gc
214     // is clear here. It may have been set during a previous GC but that GC
215     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
216     // trigger an evacuation failure and clear the flags and and counts.
217 
218     // Check if we have gone over the interval.
219     const size_t gc_num = total_collections();
220     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
221 
222     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
223 
224     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
225     const bool in_young_only_phase = collector_state()->in_young_only_phase();
226     const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
227     const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
228 
229     _evacuation_failure_alot_for_current_gc &=
230       evacuation_failure_alot_for_gc_type(in_young_only_phase,
231                                           in_initial_mark_gc,
232                                           mark_or_rebuild_in_progress);
233   }
234 }
235 
evacuation_should_fail()236 inline bool G1CollectedHeap::evacuation_should_fail() {
237   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
238     return false;
239   }
240   // G1EvacuationFailureALot is in effect for current GC
241   // Access to _evacuation_failure_alot_count is not atomic;
242   // the value does not have to be exact.
243   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
244     return false;
245   }
246   _evacuation_failure_alot_count = 0;
247   return true;
248 }
249 
reset_evacuation_should_fail()250 inline void G1CollectedHeap::reset_evacuation_should_fail() {
251   if (G1EvacuationFailureALot) {
252     _evacuation_failure_alot_gc_number = total_collections();
253     _evacuation_failure_alot_count = 0;
254     _evacuation_failure_alot_for_current_gc = false;
255   }
256 }
257 #endif  // #ifndef PRODUCT
258 
is_in_young(const oop obj)259 inline bool G1CollectedHeap::is_in_young(const oop obj) {
260   if (obj == NULL) {
261     return false;
262   }
263   return heap_region_containing(obj)->is_young();
264 }
265 
is_obj_dead(const oop obj) const266 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
267   if (obj == NULL) {
268     return false;
269   }
270   return is_obj_dead(obj, heap_region_containing(obj));
271 }
272 
is_obj_ill(const oop obj) const273 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
274   if (obj == NULL) {
275     return false;
276   }
277   return is_obj_ill(obj, heap_region_containing(obj));
278 }
279 
is_obj_dead_full(const oop obj,const HeapRegion * hr) const280 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
281    return !is_marked_next(obj) && !hr->is_archive();
282 }
283 
is_obj_dead_full(const oop obj) const284 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
285     return is_obj_dead_full(obj, heap_region_containing(obj));
286 }
287 
set_humongous_reclaim_candidate(uint region,bool value)288 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
289   assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
290   _humongous_reclaim_candidates.set_candidate(region, value);
291 }
292 
is_humongous_reclaim_candidate(uint region)293 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
294   assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
295   return _humongous_reclaim_candidates.is_candidate(region);
296 }
297 
set_humongous_is_live(oop obj)298 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
299   uint region = addr_to_region((HeapWord*)obj);
300   // Clear the flag in the humongous_reclaim_candidates table.  Also
301   // reset the entry in the region attribute table so that subsequent references
302   // to the same humongous object do not go into the slow path again.
303   // This is racy, as multiple threads may at the same time enter here, but this
304   // is benign.
305   // During collection we only ever clear the "candidate" flag, and only ever clear the
306   // entry in the in_cset_fast_table.
307   // We only ever evaluate the contents of these tables (in the VM thread) after
308   // having synchronized the worker threads with the VM thread, or in the same
309   // thread (i.e. within the VM thread).
310   if (is_humongous_reclaim_candidate(region)) {
311     set_humongous_reclaim_candidate(region, false);
312     _region_attr.clear_humongous(region);
313   }
314 }
315 
316 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
317