1 /*
2 * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "code/codeCache.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
28 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
29 #include "memory/resourceArea.hpp"
30
ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap * > * heaps)31 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
32 _length = heaps->length();
33 _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
34 for (int h = 0; h < _length; h++) {
35 _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
36 }
37 }
38
~ShenandoahParallelCodeCacheIterator()39 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
40 FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
41 }
42
parallel_blobs_do(CodeBlobClosure * f)43 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
44 for (int c = 0; c < _length; c++) {
45 _iters[c].parallel_blobs_do(f);
46 }
47 }
48
ShenandoahParallelCodeHeapIterator(CodeHeap * heap)49 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
50 _heap(heap), _claimed_idx(0), _finished(false) {
51 }
52
parallel_blobs_do(CodeBlobClosure * f)53 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
54 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
55
56 /*
57 * Parallel code heap walk.
58 *
59 * This code makes all threads scan all code heaps, but only one thread would execute the
60 * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
61 * had claimed the block, it can process all blobs in it. Others have to fast-forward to
62 * next attempt without processing.
63 *
64 * Late threads would return immediately if iterator is finished.
65 */
66
67 if (_finished) {
68 return;
69 }
70
71 int stride = 256; // educated guess
72 int stride_mask = stride - 1;
73 assert (is_power_of_2(stride), "sanity");
74
75 int count = 0;
76 bool process_block = true;
77
78 for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
79 int current = count++;
80 if ((current & stride_mask) == 0) {
81 process_block = (current >= _claimed_idx) &&
82 (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current);
83 }
84 if (process_block) {
85 if (cb->is_alive()) {
86 f->do_code_blob(cb);
87 #ifdef ASSERT
88 if (cb->is_nmethod())
89 Universe::heap()->verify_nmethod((nmethod*)cb);
90 #endif
91 }
92 }
93 }
94
95 _finished = true;
96 }
97
98 class ShenandoahNMethodOopDetector : public OopClosure {
99 private:
100 ResourceMark rm; // For growable array allocation below.
101 GrowableArray<oop*> _oops;
102
103 public:
ShenandoahNMethodOopDetector()104 ShenandoahNMethodOopDetector() : _oops(10) {};
105
do_oop(oop * o)106 void do_oop(oop* o) {
107 _oops.append(o);
108 }
do_oop(narrowOop * o)109 void do_oop(narrowOop* o) {
110 fatal("NMethods should not have compressed oops embedded.");
111 }
112
oops()113 GrowableArray<oop*>* oops() {
114 return &_oops;
115 }
116
has_oops()117 bool has_oops() {
118 return !_oops.is_empty();
119 }
120 };
121
122 class ShenandoahNMethodOopInitializer : public OopClosure {
123 private:
124 ShenandoahHeap* const _heap;
125
126 public:
ShenandoahNMethodOopInitializer()127 ShenandoahNMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {};
128
129 private:
130 template <class T>
do_oop_work(T * p)131 inline void do_oop_work(T* p) {
132 T o = RawAccess<>::oop_load(p);
133 if (! CompressedOops::is_null(o)) {
134 oop obj1 = CompressedOops::decode_not_null(o);
135 oop obj2 = ShenandoahBarrierSet::barrier_set()->write_barrier(obj1);
136 if (! oopDesc::equals_raw(obj1, obj2)) {
137 shenandoah_assert_not_in_cset(NULL, obj2);
138 RawAccess<IS_NOT_NULL>::oop_store(p, obj2);
139 if (_heap->is_concurrent_traversal_in_progress()) {
140 ShenandoahBarrierSet::barrier_set()->enqueue(obj2);
141 }
142 }
143 }
144 }
145
146 public:
do_oop(oop * o)147 void do_oop(oop* o) {
148 do_oop_work(o);
149 }
do_oop(narrowOop * o)150 void do_oop(narrowOop* o) {
151 do_oop_work(o);
152 }
153 };
154
155 ShenandoahCodeRoots::PaddedLock ShenandoahCodeRoots::_recorded_nms_lock;
156 GrowableArray<ShenandoahNMethod*>* ShenandoahCodeRoots::_recorded_nms;
157
initialize()158 void ShenandoahCodeRoots::initialize() {
159 _recorded_nms_lock._lock = 0;
160 _recorded_nms = new (ResourceObj::C_HEAP, mtGC) GrowableArray<ShenandoahNMethod*>(100, true, mtGC);
161 }
162
add_nmethod(nmethod * nm)163 void ShenandoahCodeRoots::add_nmethod(nmethod* nm) {
164 switch (ShenandoahCodeRootsStyle) {
165 case 0:
166 case 1: {
167 ShenandoahNMethodOopInitializer init;
168 nm->oops_do(&init);
169 nm->fix_oop_relocations();
170 break;
171 }
172 case 2: {
173 ShenandoahNMethodOopDetector detector;
174 nm->oops_do(&detector);
175
176 if (detector.has_oops()) {
177 ShenandoahNMethodOopInitializer init;
178 nm->oops_do(&init);
179 nm->fix_oop_relocations();
180
181 ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops());
182 nmr->assert_alive_and_correct();
183
184 ShenandoahCodeRootsLock lock(true);
185
186 int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod);
187 if (idx != -1) {
188 ShenandoahNMethod* old = _recorded_nms->at(idx);
189 _recorded_nms->at_put(idx, nmr);
190 delete old;
191 } else {
192 _recorded_nms->append(nmr);
193 }
194 }
195 break;
196 }
197 default:
198 ShouldNotReachHere();
199 }
200 };
201
remove_nmethod(nmethod * nm)202 void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) {
203 switch (ShenandoahCodeRootsStyle) {
204 case 0:
205 case 1: {
206 break;
207 }
208 case 2: {
209 ShenandoahNMethodOopDetector detector;
210 nm->oops_do(&detector, /* allow_zombie = */ true);
211
212 if (detector.has_oops()) {
213 ShenandoahCodeRootsLock lock(true);
214
215 int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod);
216 assert(idx != -1, "nmethod " PTR_FORMAT " should be registered", p2i(nm));
217 ShenandoahNMethod* old = _recorded_nms->at(idx);
218 old->assert_same_oops(detector.oops());
219 _recorded_nms->delete_at(idx);
220 delete old;
221 }
222 break;
223 }
224 default:
225 ShouldNotReachHere();
226 }
227 }
228
ShenandoahCodeRootsIterator()229 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
230 _heap(ShenandoahHeap::heap()),
231 _par_iterator(CodeCache::heaps()),
232 _claimed(0) {
233 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
234 assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
235 switch (ShenandoahCodeRootsStyle) {
236 case 0:
237 case 1: {
238 // No need to do anything here
239 break;
240 }
241 case 2: {
242 ShenandoahCodeRoots::acquire_lock(false);
243 break;
244 }
245 default:
246 ShouldNotReachHere();
247 }
248 }
249
~ShenandoahCodeRootsIterator()250 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
251 switch (ShenandoahCodeRootsStyle) {
252 case 0:
253 case 1: {
254 // No need to do anything here
255 break;
256 }
257 case 2: {
258 ShenandoahCodeRoots::release_lock(false);
259 break;
260 }
261 default:
262 ShouldNotReachHere();
263 }
264 }
265
266 template<bool CSET_FILTER>
dispatch_parallel_blobs_do(CodeBlobClosure * f)267 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) {
268 switch (ShenandoahCodeRootsStyle) {
269 case 0: {
270 if (_seq_claimed.try_set()) {
271 CodeCache::blobs_do(f);
272 }
273 break;
274 }
275 case 1: {
276 _par_iterator.parallel_blobs_do(f);
277 break;
278 }
279 case 2: {
280 ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f);
281 break;
282 }
283 default:
284 ShouldNotReachHere();
285 }
286 }
287
iterator()288 ShenandoahAllCodeRootsIterator ShenandoahCodeRoots::iterator() {
289 return ShenandoahAllCodeRootsIterator();
290 }
291
cset_iterator()292 ShenandoahCsetCodeRootsIterator ShenandoahCodeRoots::cset_iterator() {
293 return ShenandoahCsetCodeRootsIterator();
294 }
295
possibly_parallel_blobs_do(CodeBlobClosure * f)296 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
297 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f);
298 }
299
possibly_parallel_blobs_do(CodeBlobClosure * f)300 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
301 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f);
302 }
303
304 template <bool CSET_FILTER>
fast_parallel_blobs_do(CodeBlobClosure * f)305 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
306 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
307
308 size_t stride = 256; // educated guess
309
310 GrowableArray<ShenandoahNMethod*>* list = ShenandoahCodeRoots::_recorded_nms;
311
312 size_t max = (size_t)list->length();
313 while (_claimed < max) {
314 size_t cur = Atomic::add(stride, &_claimed) - stride;
315 size_t start = cur;
316 size_t end = MIN2(cur + stride, max);
317 if (start >= max) break;
318
319 for (size_t idx = start; idx < end; idx++) {
320 ShenandoahNMethod* nmr = list->at((int) idx);
321 nmr->assert_alive_and_correct();
322
323 if (CSET_FILTER && !nmr->has_cset_oops(_heap)) {
324 continue;
325 }
326
327 f->do_code_blob(nmr->nm());
328 }
329 }
330 }
331
ShenandoahNMethod(nmethod * nm,GrowableArray<oop * > * oops)332 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>* oops) {
333 _nm = nm;
334 _oops = NEW_C_HEAP_ARRAY(oop*, oops->length(), mtGC);
335 _oops_count = oops->length();
336 for (int c = 0; c < _oops_count; c++) {
337 _oops[c] = oops->at(c);
338 }
339 }
340
~ShenandoahNMethod()341 ShenandoahNMethod::~ShenandoahNMethod() {
342 if (_oops != NULL) {
343 FREE_C_HEAP_ARRAY(oop*, _oops);
344 }
345 }
346
has_cset_oops(ShenandoahHeap * heap)347 bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) {
348 for (int c = 0; c < _oops_count; c++) {
349 oop o = RawAccess<>::oop_load(_oops[c]);
350 if (heap->in_collection_set(o)) {
351 return true;
352 }
353 }
354 return false;
355 }
356
357 #ifdef ASSERT
assert_alive_and_correct()358 void ShenandoahNMethod::assert_alive_and_correct() {
359 assert(_nm->is_alive(), "only alive nmethods here");
360 assert(_oops_count > 0, "should have filtered nmethods without oops before");
361 ShenandoahHeap* heap = ShenandoahHeap::heap();
362 for (int c = 0; c < _oops_count; c++) {
363 oop *loc = _oops[c];
364 assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
365 oop o = RawAccess<>::oop_load(loc);
366 shenandoah_assert_correct_except(loc, o, o == NULL || heap->is_full_gc_move_in_progress());
367 }
368 }
369
assert_same_oops(GrowableArray<oop * > * oops)370 void ShenandoahNMethod::assert_same_oops(GrowableArray<oop*>* oops) {
371 assert(_oops_count == oops->length(), "should have the same number of oop*");
372 for (int c = 0; c < _oops_count; c++) {
373 assert(_oops[c] == oops->at(c), "should be the same oop*");
374 }
375 }
376 #endif
377