1 /*
2 * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "precompiled.hpp"
25 #include "classfile/classLoaderData.hpp"
26 #include "gc/z/zAddress.inline.hpp"
27 #include "gc/z/zHeap.inline.hpp"
28 #include "gc/z/zNMethod.hpp"
29 #include "gc/z/zOop.hpp"
30 #include "gc/z/zPageAllocator.hpp"
31 #include "gc/z/zResurrection.hpp"
32 #include "gc/z/zRootsIterator.hpp"
33 #include "gc/z/zStackWatermark.hpp"
34 #include "gc/z/zStat.hpp"
35 #include "gc/z/zVerify.hpp"
36 #include "memory/iterator.inline.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/oop.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/globals.hpp"
41 #include "runtime/handles.hpp"
42 #include "runtime/safepoint.hpp"
43 #include "runtime/stackWatermark.inline.hpp"
44 #include "runtime/stackWatermarkSet.inline.hpp"
45 #include "runtime/thread.hpp"
46 #include "utilities/debug.hpp"
47 #include "utilities/globalDefinitions.hpp"
48 #include "utilities/preserveException.hpp"
49
50 #define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(o), p2i(p)
51
z_verify_oop(oop * p)52 static void z_verify_oop(oop* p) {
53 const oop o = RawAccess<>::oop_load(p);
54 if (o != NULL) {
55 const uintptr_t addr = ZOop::to_address(o);
56 guarantee(ZAddress::is_good(addr), BAD_OOP_ARG(o, p));
57 guarantee(oopDesc::is_oop(ZOop::from_address(addr)), BAD_OOP_ARG(o, p));
58 }
59 }
60
z_verify_possibly_weak_oop(oop * p)61 static void z_verify_possibly_weak_oop(oop* p) {
62 const oop o = RawAccess<>::oop_load(p);
63 if (o != NULL) {
64 const uintptr_t addr = ZOop::to_address(o);
65 guarantee(ZAddress::is_good(addr) || ZAddress::is_finalizable_good(addr), BAD_OOP_ARG(o, p));
66 guarantee(oopDesc::is_oop(ZOop::from_address(ZAddress::good(addr))), BAD_OOP_ARG(o, p));
67 }
68 }
69
70 class ZVerifyRootClosure : public OopClosure {
71 private:
72 const bool _verify_fixed;
73
74 public:
ZVerifyRootClosure(bool verify_fixed)75 ZVerifyRootClosure(bool verify_fixed) :
76 _verify_fixed(verify_fixed) {}
77
do_oop(oop * p)78 virtual void do_oop(oop* p) {
79 if (_verify_fixed) {
80 z_verify_oop(p);
81 } else {
82 // Don't know the state of the oop.
83 oop obj = *p;
84 obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(&obj);
85 z_verify_oop(&obj);
86 }
87 }
88
do_oop(narrowOop *)89 virtual void do_oop(narrowOop*) {
90 ShouldNotReachHere();
91 }
92
verify_fixed() const93 bool verify_fixed() const {
94 return _verify_fixed;
95 }
96 };
97
98 class ZVerifyCodeBlobClosure : public CodeBlobToOopClosure {
99 public:
ZVerifyCodeBlobClosure(ZVerifyRootClosure * _cl)100 ZVerifyCodeBlobClosure(ZVerifyRootClosure* _cl) :
101 CodeBlobToOopClosure(_cl, false /* fix_relocations */) {}
102
do_code_blob(CodeBlob * cb)103 virtual void do_code_blob(CodeBlob* cb) {
104 CodeBlobToOopClosure::do_code_blob(cb);
105 }
106 };
107
108 class ZVerifyStack : public OopClosure {
109 private:
110 ZVerifyRootClosure* const _cl;
111 JavaThread* const _jt;
112 uint64_t _last_good;
113 bool _verifying_bad_frames;
114
115 public:
ZVerifyStack(ZVerifyRootClosure * cl,JavaThread * jt)116 ZVerifyStack(ZVerifyRootClosure* cl, JavaThread* jt) :
117 _cl(cl),
118 _jt(jt),
119 _last_good(0),
120 _verifying_bad_frames(false) {
121 ZStackWatermark* const stack_watermark = StackWatermarkSet::get<ZStackWatermark>(jt, StackWatermarkKind::gc);
122
123 if (_cl->verify_fixed()) {
124 assert(stack_watermark->processing_started(), "Should already have been fixed");
125 assert(stack_watermark->processing_completed(), "Should already have been fixed");
126 } else {
127 // We don't really know the state of the stack, verify watermark.
128 if (!stack_watermark->processing_started()) {
129 _verifying_bad_frames = true;
130 } else {
131 // Not time yet to verify bad frames
132 _last_good = stack_watermark->last_processed();
133 }
134 }
135 }
136
do_oop(oop * p)137 void do_oop(oop* p) {
138 if (_verifying_bad_frames) {
139 const oop obj = *p;
140 guarantee(!ZAddress::is_good(ZOop::to_address(obj)), BAD_OOP_ARG(obj, p));
141 }
142 _cl->do_oop(p);
143 }
144
do_oop(narrowOop * p)145 void do_oop(narrowOop* p) {
146 ShouldNotReachHere();
147 }
148
prepare_next_frame(frame & frame)149 void prepare_next_frame(frame& frame) {
150 if (_cl->verify_fixed()) {
151 // All frames need to be good
152 return;
153 }
154
155 // The verification has two modes, depending on whether we have reached the
156 // last processed frame or not. Before it is reached, we expect everything to
157 // be good. After reaching it, we expect everything to be bad.
158 const uintptr_t sp = reinterpret_cast<uintptr_t>(frame.sp());
159
160 if (!_verifying_bad_frames && sp == _last_good) {
161 // Found the last good frame, now verify the bad ones
162 _verifying_bad_frames = true;
163 }
164 }
165
verify_frames()166 void verify_frames() {
167 ZVerifyCodeBlobClosure cb_cl(_cl);
168 for (StackFrameStream frames(_jt, true /* update */, false /* process_frames */);
169 !frames.is_done();
170 frames.next()) {
171 frame& frame = *frames.current();
172 frame.oops_do(this, &cb_cl, frames.register_map(), DerivedPointerIterationMode::_ignore);
173 prepare_next_frame(frame);
174 }
175 }
176 };
177
178 class ZVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure {
179 private:
180 const bool _verify_weaks;
181
182 public:
ZVerifyOopClosure(bool verify_weaks)183 ZVerifyOopClosure(bool verify_weaks) :
184 ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other),
185 _verify_weaks(verify_weaks) {}
186
do_oop(oop * p)187 virtual void do_oop(oop* p) {
188 if (_verify_weaks) {
189 z_verify_possibly_weak_oop(p);
190 } else {
191 // We should never encounter finalizable oops through strong
192 // paths. This assumes we have only visited strong roots.
193 z_verify_oop(p);
194 }
195 }
196
do_oop(narrowOop * p)197 virtual void do_oop(narrowOop* p) {
198 ShouldNotReachHere();
199 }
200
reference_iteration_mode()201 virtual ReferenceIterationMode reference_iteration_mode() {
202 return _verify_weaks ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
203 }
204 };
205
206 typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_none> ZVerifyCLDClosure;
207
208 class ZVerifyThreadClosure : public ThreadClosure {
209 private:
210 ZVerifyRootClosure* const _cl;
211
212 public:
ZVerifyThreadClosure(ZVerifyRootClosure * cl)213 ZVerifyThreadClosure(ZVerifyRootClosure* cl) :
214 _cl(cl) {}
215
do_thread(Thread * thread)216 virtual void do_thread(Thread* thread) {
217 thread->oops_do_no_frames(_cl, NULL);
218
219 JavaThread* const jt = thread->as_Java_thread();
220 if (!jt->has_last_Java_frame()) {
221 return;
222 }
223
224 ZVerifyStack verify_stack(_cl, jt);
225 verify_stack.verify_frames();
226 }
227 };
228
229 class ZVerifyNMethodClosure : public NMethodClosure {
230 private:
231 OopClosure* const _cl;
232 BarrierSetNMethod* const _bs_nm;
233 const bool _verify_fixed;
234
trust_nmethod_state() const235 bool trust_nmethod_state() const {
236 // The root iterator will visit non-processed
237 // nmethods class unloading is turned off.
238 return ClassUnloading || _verify_fixed;
239 }
240
241 public:
ZVerifyNMethodClosure(OopClosure * cl,bool verify_fixed)242 ZVerifyNMethodClosure(OopClosure* cl, bool verify_fixed) :
243 _cl(cl),
244 _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()),
245 _verify_fixed(verify_fixed) {}
246
do_nmethod(nmethod * nm)247 virtual void do_nmethod(nmethod* nm) {
248 assert(!trust_nmethod_state() || !_bs_nm->is_armed(nm), "Should not encounter any armed nmethods");
249
250 ZNMethod::nmethod_oops_do(nm, _cl);
251 }
252 };
253
roots_strong(bool verify_fixed)254 void ZVerify::roots_strong(bool verify_fixed) {
255 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
256 assert(!ZResurrection::is_blocked(), "Invalid phase");
257
258 ZVerifyRootClosure cl(verify_fixed);
259 ZVerifyCLDClosure cld_cl(&cl);
260 ZVerifyThreadClosure thread_cl(&cl);
261 ZVerifyNMethodClosure nm_cl(&cl, verify_fixed);
262
263 ZRootsIterator iter(ClassLoaderData::_claim_none);
264 iter.apply(&cl,
265 &cld_cl,
266 &thread_cl,
267 &nm_cl);
268 }
269
roots_weak()270 void ZVerify::roots_weak() {
271 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
272 assert(!ZResurrection::is_blocked(), "Invalid phase");
273
274 ZVerifyRootClosure cl(true /* verify_fixed */);
275 ZWeakRootsIterator iter;
276 iter.apply(&cl);
277 }
278
objects(bool verify_weaks)279 void ZVerify::objects(bool verify_weaks) {
280 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
281 assert(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
282 assert(!ZResurrection::is_blocked(), "Invalid phase");
283
284 ZVerifyOopClosure cl(verify_weaks);
285 ObjectToOopClosure object_cl(&cl);
286 ZHeap::heap()->object_iterate(&object_cl, verify_weaks);
287 }
288
before_zoperation()289 void ZVerify::before_zoperation() {
290 // Verify strong roots
291 ZStatTimerDisable disable;
292 if (ZVerifyRoots) {
293 roots_strong(false /* verify_fixed */);
294 }
295 }
296
after_mark()297 void ZVerify::after_mark() {
298 // Verify all strong roots and strong references
299 ZStatTimerDisable disable;
300 if (ZVerifyRoots) {
301 roots_strong(true /* verify_fixed */);
302 }
303 if (ZVerifyObjects) {
304 objects(false /* verify_weaks */);
305 }
306 }
307
after_weak_processing()308 void ZVerify::after_weak_processing() {
309 // Verify all roots and all references
310 ZStatTimerDisable disable;
311 if (ZVerifyRoots) {
312 roots_strong(true /* verify_fixed */);
313 roots_weak();
314 }
315 if (ZVerifyObjects) {
316 objects(true /* verify_weaks */);
317 }
318 }
319
320 template <bool Map>
321 class ZPageDebugMapOrUnmapClosure : public ZPageClosure {
322 private:
323 const ZPageAllocator* const _allocator;
324
325 public:
ZPageDebugMapOrUnmapClosure(const ZPageAllocator * allocator)326 ZPageDebugMapOrUnmapClosure(const ZPageAllocator* allocator) :
327 _allocator(allocator) {}
328
do_page(const ZPage * page)329 void do_page(const ZPage* page) {
330 if (Map) {
331 _allocator->debug_map_page(page);
332 } else {
333 _allocator->debug_unmap_page(page);
334 }
335 }
336 };
337
ZVerifyViewsFlip(const ZPageAllocator * allocator)338 ZVerifyViewsFlip::ZVerifyViewsFlip(const ZPageAllocator* allocator) :
339 _allocator(allocator) {
340 if (ZVerifyViews) {
341 // Unmap all pages
342 ZPageDebugMapOrUnmapClosure<false /* Map */> cl(_allocator);
343 ZHeap::heap()->pages_do(&cl);
344 }
345 }
346
~ZVerifyViewsFlip()347 ZVerifyViewsFlip::~ZVerifyViewsFlip() {
348 if (ZVerifyViews) {
349 // Map all pages
350 ZPageDebugMapOrUnmapClosure<true /* Map */> cl(_allocator);
351 ZHeap::heap()->pages_do(&cl);
352 }
353 }
354
355 #ifdef ASSERT
356
357 class ZVerifyBadOopClosure : public OopClosure {
358 public:
do_oop(oop * p)359 virtual void do_oop(oop* p) {
360 const oop o = *p;
361 assert(!ZAddress::is_good(ZOop::to_address(o)), "Should not be good: " PTR_FORMAT, p2i(o));
362 }
363
do_oop(narrowOop * p)364 virtual void do_oop(narrowOop* p) {
365 ShouldNotReachHere();
366 }
367 };
368
369 // This class encapsulates various marks we need to deal with calling the
370 // frame iteration code from arbitrary points in the runtime. It is mostly
371 // due to problems that we might want to eventually clean up inside of the
372 // frame iteration code, such as creating random handles even though there
373 // is no safepoint to protect against, and fiddling around with exceptions.
374 class StackWatermarkProcessingMark {
375 ResetNoHandleMark _rnhm;
376 HandleMark _hm;
377 PreserveExceptionMark _pem;
378 ResourceMark _rm;
379
380 public:
StackWatermarkProcessingMark(Thread * thread)381 StackWatermarkProcessingMark(Thread* thread) :
382 _rnhm(),
383 _hm(thread),
384 _pem(thread),
385 _rm(thread) {}
386 };
387
verify_frame_bad(const frame & fr,RegisterMap & register_map)388 void ZVerify::verify_frame_bad(const frame& fr, RegisterMap& register_map) {
389 ZVerifyBadOopClosure verify_cl;
390 fr.oops_do(&verify_cl, NULL, ®ister_map, DerivedPointerIterationMode::_ignore);
391 }
392
verify_thread_head_bad(JavaThread * jt)393 void ZVerify::verify_thread_head_bad(JavaThread* jt) {
394 ZVerifyBadOopClosure verify_cl;
395 jt->oops_do_no_frames(&verify_cl, NULL);
396 }
397
verify_thread_frames_bad(JavaThread * jt)398 void ZVerify::verify_thread_frames_bad(JavaThread* jt) {
399 if (jt->has_last_Java_frame()) {
400 ZVerifyBadOopClosure verify_cl;
401 StackWatermarkProcessingMark swpm(Thread::current());
402 // Traverse the execution stack
403 for (StackFrameStream fst(jt, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) {
404 fst.current()->oops_do(&verify_cl, NULL /* code_cl */, fst.register_map(), DerivedPointerIterationMode::_ignore);
405 }
406 }
407 }
408
409 #endif // ASSERT
410