1 /*
2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectionSetChooser.hpp"
27 #include "gc/g1/g1RemSetTrackingPolicy.hpp"
28 #include "gc/g1/heapRegion.inline.hpp"
29 #include "gc/g1/heapRegionRemSet.hpp"
30 #include "runtime/safepoint.hpp"
31 
needs_scan_for_rebuild(HeapRegion * r) const32 bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
33   // All non-free, non-young, non-closed archive regions need to be scanned for references;
34   // At every gc we gather references to other regions in young, and closed archive
35   // regions by definition do not have references going outside the closed archive.
36   // Free regions trivially do not need scanning because they do not contain live
37   // objects.
38   return !(r->is_young() || r->is_closed_archive() || r->is_free());
39 }
40 
update_at_allocate(HeapRegion * r)41 void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
42   if (r->is_young()) {
43     // Always collect remembered set for young regions.
44     r->rem_set()->set_state_complete();
45   } else if (r->is_humongous()) {
46     // Collect remembered sets for humongous regions by default to allow eager reclaim.
47     r->rem_set()->set_state_complete();
48   } else if (r->is_archive()) {
49     // Archive regions never move ever. So never build remembered sets for them.
50     r->rem_set()->set_state_empty();
51   } else if (r->is_old()) {
52     // By default, do not create remembered set for new old regions.
53     r->rem_set()->set_state_empty();
54   } else {
55     guarantee(false, "Unhandled region %u with heap region type %s", r->hrm_index(), r->get_type_str());
56   }
57 }
58 
update_at_free(HeapRegion * r)59 void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
60   /* nothing to do */
61 }
62 
print_before_rebuild(HeapRegion * r,bool selected_for_rebuild,size_t total_live_bytes,size_t live_bytes)63 static void print_before_rebuild(HeapRegion* r, bool selected_for_rebuild, size_t total_live_bytes, size_t live_bytes) {
64   log_trace(gc, remset, tracking)("Before rebuild region %u "
65                                   "(ntams: " PTR_FORMAT ") "
66                                   "total_live_bytes " SIZE_FORMAT " "
67                                   "selected %s "
68                                   "(live_bytes " SIZE_FORMAT " "
69                                   "next_marked " SIZE_FORMAT " "
70                                   "marked " SIZE_FORMAT " "
71                                   "type %s)",
72                                   r->hrm_index(),
73                                   p2i(r->next_top_at_mark_start()),
74                                   total_live_bytes,
75                                   BOOL_TO_STR(selected_for_rebuild),
76                                   live_bytes,
77                                   r->next_marked_bytes(),
78                                   r->marked_bytes(),
79                                   r->get_type_str());
80 }
81 
update_humongous_before_rebuild(HeapRegion * r,bool is_live)82 bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r, bool is_live) {
83   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
84   assert(r->is_humongous(), "Region %u should be humongous", r->hrm_index());
85 
86   if (r->is_archive()) {
87     return false;
88   }
89 
90   assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
91 
92   bool selected_for_rebuild = false;
93   // For humongous regions, to be of interest for rebuilding the remembered set the following must apply:
94   // - We always try to update the remembered sets of humongous regions containing
95   // type arrays as they might have been reset after full gc.
96   if (is_live && cast_to_oop(r->humongous_start_region()->bottom())->is_typeArray() && !r->rem_set()->is_tracked()) {
97     r->rem_set()->set_state_updating();
98     selected_for_rebuild = true;
99   }
100 
101   size_t const live_bytes = is_live ? HeapRegion::GrainBytes : 0;
102   print_before_rebuild(r, selected_for_rebuild, live_bytes, live_bytes);
103 
104   return selected_for_rebuild;
105 }
106 
update_before_rebuild(HeapRegion * r,size_t live_bytes)107 bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
108   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
109   assert(!r->is_humongous(), "Region %u is humongous", r->hrm_index());
110 
111   // Only consider updating the remembered set for old gen regions - excluding archive regions
112   // which never move (but are "Old" regions).
113   if (!r->is_old() || r->is_archive()) {
114     return false;
115   }
116 
117   assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
118 
119   size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize;
120   size_t total_live_bytes = live_bytes + between_ntams_and_top;
121 
122   bool selected_for_rebuild = false;
123   // For old regions, to be of interest for rebuilding the remembered set the following must apply:
124   // - They must contain some live data in them.
125   // - Only need to rebuild non-complete remembered sets.
126   // - Otherwise only add those old gen regions which occupancy is low enough that there
127   // is a chance that we will ever evacuate them in the mixed gcs.
128   if ((total_live_bytes > 0) &&
129       G1CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes) &&
130       !r->rem_set()->is_tracked()) {
131 
132     r->rem_set()->set_state_updating();
133     selected_for_rebuild = true;
134   }
135 
136   print_before_rebuild(r, selected_for_rebuild, total_live_bytes, live_bytes);
137 
138   return selected_for_rebuild;
139 }
140 
update_after_rebuild(HeapRegion * r)141 void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
142   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
143 
144   if (r->is_old_or_humongous_or_archive()) {
145     if (r->rem_set()->is_updating()) {
146       assert(!r->is_archive(), "Archive region %u with remembered set", r->hrm_index());
147       r->rem_set()->set_state_complete();
148     }
149     G1CollectedHeap* g1h = G1CollectedHeap::heap();
150     // We can drop remembered sets of humongous regions that have a too large remembered set:
151     // We will never try to eagerly reclaim or move them anyway until the next concurrent
152     // cycle as e.g. remembered set entries will always be added.
153     if (r->is_starts_humongous() && !g1h->is_potential_eager_reclaim_candidate(r)) {
154       // Handle HC regions with the HS region.
155       uint const size_in_regions = (uint)g1h->humongous_obj_size_in_regions(cast_to_oop(r->bottom())->size());
156       uint const region_idx = r->hrm_index();
157       for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
158         HeapRegion* const cur = g1h->region_at(j);
159         assert(!cur->is_continues_humongous() || cur->rem_set()->is_empty(),
160                "Continues humongous region %u remset should be empty", j);
161         cur->rem_set()->clear_locked(true /* only_cardset */);
162       }
163     }
164     G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
165     log_trace(gc, remset, tracking)("After rebuild region %u "
166                                     "(ntams " PTR_FORMAT " "
167                                     "liveness " SIZE_FORMAT " "
168                                     "next_marked_bytes " SIZE_FORMAT " "
169                                     "remset occ " SIZE_FORMAT " "
170                                     "size " SIZE_FORMAT ")",
171                                     r->hrm_index(),
172                                     p2i(r->next_top_at_mark_start()),
173                                     cm->live_bytes(r->hrm_index()),
174                                     r->next_marked_bytes(),
175                                     r->rem_set()->occupied(),
176                                     r->rem_set()->mem_size());
177   }
178 }
179