1 /*
2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/nmethod.hpp"
28 #include "gc/g1/g1CodeRootSetTable.hpp"
29 #include "gc/g1/g1CodeCacheRemSet.hpp"
30 #include "gc/g1/heapRegion.hpp"
31 #include "memory/heap.hpp"
32 #include "memory/iterator.hpp"
33 #include "oops/access.inline.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "runtime/atomic.hpp"
36 #include "utilities/hashtable.inline.hpp"
37 #include "utilities/stack.inline.hpp"
38
39 G1CodeRootSetTable* volatile G1CodeRootSetTable::_purge_list = NULL;
40
mem_size()41 size_t G1CodeRootSetTable::mem_size() {
42 return sizeof(G1CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
43 }
44
new_entry(nmethod * nm)45 G1CodeRootSetTable::Entry* G1CodeRootSetTable::new_entry(nmethod* nm) {
46 unsigned int hash = compute_hash(nm);
47 Entry* entry = (Entry*) new_entry_free_list();
48 if (entry == NULL) {
49 entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC);
50 }
51 entry->set_next(NULL);
52 entry->set_hash(hash);
53 entry->set_literal(nm);
54 return entry;
55 }
56
remove_entry(Entry * e,Entry * previous)57 void G1CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
58 int index = hash_to_index(e->hash());
59 assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null");
60
61 if (previous == NULL) {
62 set_entry(index, e->next());
63 } else {
64 previous->set_next(e->next());
65 }
66 free_entry(e);
67 }
68
~G1CodeRootSetTable()69 G1CodeRootSetTable::~G1CodeRootSetTable() {
70 for (int index = 0; index < table_size(); ++index) {
71 for (Entry* e = bucket(index); e != NULL; ) {
72 Entry* to_remove = e;
73 // read next before freeing.
74 e = e->next();
75 unlink_entry(to_remove);
76 FREE_C_HEAP_ARRAY(char, to_remove);
77 }
78 }
79 assert(number_of_entries() == 0, "should have removed all entries");
80 // Each of the entries in new_entry_free_list() have been allocated in
81 // G1CodeRootSetTable::new_entry(). We never call the block allocator
82 // in BasicHashtable::new_entry().
83 for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
84 FREE_C_HEAP_ARRAY(char, e);
85 }
86 }
87
add(nmethod * nm)88 bool G1CodeRootSetTable::add(nmethod* nm) {
89 if (!contains(nm)) {
90 Entry* e = new_entry(nm);
91 int index = hash_to_index(e->hash());
92 add_entry(index, e);
93 return true;
94 }
95 return false;
96 }
97
contains(nmethod * nm)98 bool G1CodeRootSetTable::contains(nmethod* nm) {
99 int index = hash_to_index(compute_hash(nm));
100 for (Entry* e = bucket(index); e != NULL; e = e->next()) {
101 if (e->literal() == nm) {
102 return true;
103 }
104 }
105 return false;
106 }
107
remove(nmethod * nm)108 bool G1CodeRootSetTable::remove(nmethod* nm) {
109 int index = hash_to_index(compute_hash(nm));
110 Entry* previous = NULL;
111 for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
112 if (e->literal() == nm) {
113 remove_entry(e, previous);
114 return true;
115 }
116 }
117 return false;
118 }
119
copy_to(G1CodeRootSetTable * new_table)120 void G1CodeRootSetTable::copy_to(G1CodeRootSetTable* new_table) {
121 for (int index = 0; index < table_size(); ++index) {
122 for (Entry* e = bucket(index); e != NULL; e = e->next()) {
123 new_table->add(e->literal());
124 }
125 }
126 new_table->copy_freelist(this);
127 }
128
nmethods_do(CodeBlobClosure * blk)129 void G1CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
130 for (int index = 0; index < table_size(); ++index) {
131 for (Entry* e = bucket(index); e != NULL; e = e->next()) {
132 blk->do_code_blob(e->literal());
133 }
134 }
135 }
136
137 template<typename CB>
remove_if(CB & should_remove)138 int G1CodeRootSetTable::remove_if(CB& should_remove) {
139 int num_removed = 0;
140 for (int index = 0; index < table_size(); ++index) {
141 Entry* previous = NULL;
142 Entry* e = bucket(index);
143 while (e != NULL) {
144 Entry* next = e->next();
145 if (should_remove(e->literal())) {
146 remove_entry(e, previous);
147 ++num_removed;
148 } else {
149 previous = e;
150 }
151 e = next;
152 }
153 }
154 return num_removed;
155 }
156
~G1CodeRootSet()157 G1CodeRootSet::~G1CodeRootSet() {
158 delete _table;
159 }
160
load_acquire_table()161 G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
162 return Atomic::load_acquire(&_table);
163 }
164
allocate_small_table()165 void G1CodeRootSet::allocate_small_table() {
166 G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
167
168 Atomic::release_store(&_table, temp);
169 }
170
purge_list_append(G1CodeRootSetTable * table)171 void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
172 for (;;) {
173 table->_purge_next = _purge_list;
174 G1CodeRootSetTable* old = Atomic::cmpxchg(&_purge_list, table->_purge_next, table);
175 if (old == table->_purge_next) {
176 break;
177 }
178 }
179 }
180
purge()181 void G1CodeRootSetTable::purge() {
182 G1CodeRootSetTable* table = _purge_list;
183 _purge_list = NULL;
184 while (table != NULL) {
185 G1CodeRootSetTable* to_purge = table;
186 table = table->_purge_next;
187 delete to_purge;
188 }
189 }
190
move_to_large()191 void G1CodeRootSet::move_to_large() {
192 G1CodeRootSetTable* temp = new G1CodeRootSetTable(LargeSize);
193
194 _table->copy_to(temp);
195
196 G1CodeRootSetTable::purge_list_append(_table);
197
198 Atomic::release_store(&_table, temp);
199 }
200
purge()201 void G1CodeRootSet::purge() {
202 G1CodeRootSetTable::purge();
203 }
204
static_mem_size()205 size_t G1CodeRootSet::static_mem_size() {
206 return G1CodeRootSetTable::static_mem_size();
207 }
208
add(nmethod * method)209 void G1CodeRootSet::add(nmethod* method) {
210 bool added = false;
211 if (is_empty()) {
212 allocate_small_table();
213 }
214 added = _table->add(method);
215 if (added) {
216 if (_length == Threshold) {
217 move_to_large();
218 }
219 ++_length;
220 }
221 assert(_length == (size_t)_table->number_of_entries(), "sizes should match");
222 }
223
remove(nmethod * method)224 bool G1CodeRootSet::remove(nmethod* method) {
225 bool removed = false;
226 if (_table != NULL) {
227 removed = _table->remove(method);
228 }
229 if (removed) {
230 _length--;
231 if (_length == 0) {
232 clear();
233 }
234 }
235 assert((_length == 0 && _table == NULL) ||
236 (_length == (size_t)_table->number_of_entries()), "sizes should match");
237 return removed;
238 }
239
contains(nmethod * method)240 bool G1CodeRootSet::contains(nmethod* method) {
241 G1CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
242 if (table != NULL) {
243 return table->contains(method);
244 }
245 return false;
246 }
247
clear()248 void G1CodeRootSet::clear() {
249 delete _table;
250 _table = NULL;
251 _length = 0;
252 }
253
mem_size()254 size_t G1CodeRootSet::mem_size() {
255 return sizeof(*this) + (_table != NULL ? _table->mem_size() : 0);
256 }
257
nmethods_do(CodeBlobClosure * blk) const258 void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
259 if (_table != NULL) {
260 _table->nmethods_do(blk);
261 }
262 }
263
264 class CleanCallback : public StackObj {
265 class PointsIntoHRDetectionClosure : public OopClosure {
266 HeapRegion* _hr;
267 public:
268 bool _points_into;
PointsIntoHRDetectionClosure(HeapRegion * hr)269 PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
270
do_oop(narrowOop * o)271 void do_oop(narrowOop* o) {
272 do_oop_work(o);
273 }
274
do_oop(oop * o)275 void do_oop(oop* o) {
276 do_oop_work(o);
277 }
278
279 template <typename T>
do_oop_work(T * p)280 void do_oop_work(T* p) {
281 if (_hr->is_in(RawAccess<>::oop_load(p))) {
282 _points_into = true;
283 }
284 }
285 };
286
287 PointsIntoHRDetectionClosure _detector;
288 CodeBlobToOopClosure _blobs;
289
290 public:
CleanCallback(HeapRegion * hr)291 CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
292
operator ()(nmethod * nm)293 bool operator() (nmethod* nm) {
294 _detector._points_into = false;
295 _blobs.do_code_blob(nm);
296 return !_detector._points_into;
297 }
298 };
299
clean(HeapRegion * owner)300 void G1CodeRootSet::clean(HeapRegion* owner) {
301 CleanCallback should_clean(owner);
302 if (_table != NULL) {
303 int removed = _table->remove_if(should_clean);
304 assert((size_t)removed <= _length, "impossible");
305 _length -= removed;
306 }
307 if (_length == 0) {
308 clear();
309 }
310 }
311