1 /*
2 * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jfr/jfrEvents.hpp"
27 #include "jfr/jni/jfrJavaSupport.hpp"
28 #include "jfr/leakprofiler/chains/edgeStore.hpp"
29 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
30 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
31 #include "jfr/leakprofiler/checkpoint/objectSampleWriter.hpp"
32 #include "jfr/leakprofiler/leakProfiler.hpp"
33 #include "jfr/leakprofiler/sampling/objectSample.hpp"
34 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
35 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
36 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
37 #include "jfr/recorder/service/jfrOptionSet.hpp"
38 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
39 #include "jfr/support/jfrKlassUnloading.hpp"
40 #include "jfr/support/jfrMethodLookup.hpp"
41 #include "jfr/utilities/jfrHashtable.hpp"
42 #include "jfr/utilities/jfrPredicate.hpp"
43 #include "jfr/utilities/jfrRelation.hpp"
44 #include "memory/resourceArea.inline.hpp"
45 #include "oops/instanceKlass.inline.hpp"
46 #include "runtime/interfaceSupport.inline.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "runtime/safepoint.hpp"
49 #include "runtime/thread.inline.hpp"
50
51 const int initial_array_size = 64;
52
53 template <typename T>
c_heap_allocate_array(int size=initial_array_size)54 static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
55 return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, mtTracing);
56 }
57
58 static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
59
60 class ThreadIdExclusiveAccess : public StackObj {
61 private:
62 static Semaphore _mutex_semaphore;
63 public:
ThreadIdExclusiveAccess()64 ThreadIdExclusiveAccess() { _mutex_semaphore.wait(); }
~ThreadIdExclusiveAccess()65 ~ThreadIdExclusiveAccess() { _mutex_semaphore.signal(); }
66 };
67
68 Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
69
has_thread_exited(traceid tid)70 static bool has_thread_exited(traceid tid) {
71 assert(tid != 0, "invariant");
72 return unloaded_thread_id_set != NULL && JfrPredicate<traceid, compare_traceid>::test(unloaded_thread_id_set, tid);
73 }
74
add(GrowableArray<traceid> * set,traceid id)75 static bool add(GrowableArray<traceid>* set, traceid id) {
76 assert(set != NULL, "invariant");
77 return JfrMutablePredicate<traceid, compare_traceid>::test(set, id);
78 }
79
add_to_unloaded_thread_set(traceid tid)80 static void add_to_unloaded_thread_set(traceid tid) {
81 ThreadIdExclusiveAccess lock;
82 if (unloaded_thread_id_set == NULL) {
83 unloaded_thread_id_set = c_heap_allocate_array<traceid>();
84 }
85 add(unloaded_thread_id_set, tid);
86 }
87
on_thread_exit(JavaThread * jt)88 void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
89 assert(jt != NULL, "invariant");
90 if (LeakProfiler::is_running()) {
91 add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id());
92 }
93 }
94
95 template <typename Processor>
do_samples(ObjectSample * sample,const ObjectSample * end,Processor & processor)96 static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
97 assert(sample != NULL, "invariant");
98 while (sample != end) {
99 processor.sample_do(sample);
100 sample = sample->next();
101 }
102 }
103
104 template <typename Processor>
iterate_samples(Processor & processor,bool all=false)105 static void iterate_samples(Processor& processor, bool all = false) {
106 ObjectSampler* const sampler = ObjectSampler::sampler();
107 assert(sampler != NULL, "invariant");
108 ObjectSample* const last = sampler->last();
109 assert(last != NULL, "invariant");
110 do_samples(last, all ? NULL : sampler->last_resolved(), processor);
111 }
112
113 class SampleMarker {
114 private:
115 ObjectSampleMarker& _marker;
116 jlong _last_sweep;
117 int _count;
118 public:
SampleMarker(ObjectSampleMarker & marker,jlong last_sweep)119 SampleMarker(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), _last_sweep(last_sweep), _count(0) {}
sample_do(ObjectSample * sample)120 void sample_do(ObjectSample* sample) {
121 if (sample->is_alive_and_older_than(_last_sweep)) {
122 _marker.mark(sample->object());
123 ++_count;
124 }
125 }
count() const126 int count() const {
127 return _count;
128 }
129 };
130
save_mark_words(const ObjectSampler * sampler,ObjectSampleMarker & marker,bool emit_all)131 int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) {
132 assert(sampler != NULL, "invariant");
133 if (sampler->last() == NULL) {
134 return 0;
135 }
136 SampleMarker sample_marker(marker, emit_all ? max_jlong : ObjectSampler::last_sweep());
137 iterate_samples(sample_marker, true);
138 return sample_marker.count();
139 }
140
141 class BlobCache {
142 typedef HashTableHost<JfrBlobHandle, traceid, JfrHashtableEntry, BlobCache> BlobTable;
143 typedef BlobTable::HashEntry BlobEntry;
144 private:
145 BlobTable _table;
146 traceid _lookup_id;
147 public:
BlobCache(size_t size)148 BlobCache(size_t size) : _table(this, size), _lookup_id(0) {}
149 JfrBlobHandle get(const ObjectSample* sample);
150 void put(const ObjectSample* sample, const JfrBlobHandle& blob);
151 // Hash table callbacks
152 void on_link(const BlobEntry* entry) const;
153 bool on_equals(uintptr_t hash, const BlobEntry* entry) const;
154 void on_unlink(BlobEntry* entry) const;
155 };
156
get(const ObjectSample * sample)157 JfrBlobHandle BlobCache::get(const ObjectSample* sample) {
158 assert(sample != NULL, "invariant");
159 _lookup_id = sample->stack_trace_id();
160 assert(_lookup_id != 0, "invariant");
161 BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash());
162 return entry != NULL ? entry->literal() : JfrBlobHandle();
163 }
164
put(const ObjectSample * sample,const JfrBlobHandle & blob)165 void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) {
166 assert(sample != NULL, "invariant");
167 assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant");
168 _lookup_id = sample->stack_trace_id();
169 assert(_lookup_id != 0, "invariant");
170 _table.put(sample->stack_trace_hash(), blob);
171 }
172
on_link(const BlobEntry * entry) const173 inline void BlobCache::on_link(const BlobEntry* entry) const {
174 assert(entry != NULL, "invariant");
175 assert(entry->id() == 0, "invariant");
176 entry->set_id(_lookup_id);
177 }
178
on_equals(uintptr_t hash,const BlobEntry * entry) const179 inline bool BlobCache::on_equals(uintptr_t hash, const BlobEntry* entry) const {
180 assert(entry != NULL, "invariant");
181 assert(entry->hash() == hash, "invariant");
182 return entry->id() == _lookup_id;
183 }
184
on_unlink(BlobEntry * entry) const185 inline void BlobCache::on_unlink(BlobEntry* entry) const {
186 assert(entry != NULL, "invariant");
187 }
188
189 static GrowableArray<traceid>* id_set = NULL;
190
prepare_for_resolution()191 static void prepare_for_resolution() {
192 id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size());
193 }
194
stack_trace_precondition(const ObjectSample * sample)195 static bool stack_trace_precondition(const ObjectSample* sample) {
196 assert(sample != NULL, "invariant");
197 return sample->has_stack_trace_id() && !sample->is_dead();
198 }
199
200 class StackTraceBlobInstaller {
201 private:
202 BlobCache _cache;
203 void install(ObjectSample* sample);
204 const JfrStackTrace* resolve(const ObjectSample* sample) const;
205 public:
StackTraceBlobInstaller()206 StackTraceBlobInstaller() : _cache(JfrOptionSet::old_object_queue_size()) {
207 prepare_for_resolution();
208 }
~StackTraceBlobInstaller()209 ~StackTraceBlobInstaller() {
210 JfrStackTraceRepository::clear_leak_profiler();
211 }
sample_do(ObjectSample * sample)212 void sample_do(ObjectSample* sample) {
213 if (stack_trace_precondition(sample)) {
214 install(sample);
215 }
216 }
217 };
218
219 #ifdef ASSERT
validate_stack_trace(const ObjectSample * sample,const JfrStackTrace * stack_trace)220 static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) {
221 assert(!sample->has_stacktrace(), "invariant");
222 assert(stack_trace != NULL, "invariant");
223 assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant");
224 assert(stack_trace->id() == sample->stack_trace_id(), "invariant");
225 }
226 #endif
227
resolve(const ObjectSample * sample) const228 inline const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) const {
229 return JfrStackTraceRepository::lookup_for_leak_profiler(sample->stack_trace_hash(), sample->stack_trace_id());
230 }
231
install(ObjectSample * sample)232 void StackTraceBlobInstaller::install(ObjectSample* sample) {
233 JfrBlobHandle blob = _cache.get(sample);
234 if (blob.valid()) {
235 sample->set_stacktrace(blob);
236 return;
237 }
238 const JfrStackTrace* const stack_trace = resolve(sample);
239 DEBUG_ONLY(validate_stack_trace(sample, stack_trace));
240 JfrCheckpointWriter writer;
241 writer.write_type(TYPE_STACKTRACE);
242 writer.write_count(1);
243 ObjectSampleCheckpoint::write_stacktrace(stack_trace, writer);
244 blob = writer.copy();
245 _cache.put(sample, blob);
246 sample->set_stacktrace(blob);
247 }
248
install_stack_traces(const ObjectSampler * sampler)249 static void install_stack_traces(const ObjectSampler* sampler) {
250 assert(sampler != NULL, "invariant");
251 const ObjectSample* const last = sampler->last();
252 if (last != sampler->last_resolved()) {
253 ResourceMark rm;
254 JfrKlassUnloading::sort();
255 StackTraceBlobInstaller installer;
256 iterate_samples(installer);
257 }
258 }
259
on_rotation(const ObjectSampler * sampler)260 void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler) {
261 assert(sampler != NULL, "invariant");
262 assert(LeakProfiler::is_running(), "invariant");
263 JavaThread* const thread = JavaThread::current();
264 DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(thread);)
265 // can safepoint here
266 ThreadInVMfromNative transition(thread);
267 MutexLocker lock(ClassLoaderDataGraph_lock);
268 // the lock is needed to ensure the unload lists do not grow in the middle of inspection.
269 install_stack_traces(sampler);
270 }
271
is_klass_unloaded(traceid klass_id)272 static bool is_klass_unloaded(traceid klass_id) {
273 assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
274 return JfrKlassUnloading::is_unloaded(klass_id);
275 }
276
is_processed(traceid method_id)277 static bool is_processed(traceid method_id) {
278 assert(method_id != 0, "invariant");
279 assert(id_set != NULL, "invariant");
280 return JfrMutablePredicate<traceid, compare_traceid>::test(id_set, method_id);
281 }
282
add_to_leakp_set(const InstanceKlass * ik,traceid method_id)283 void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid method_id) {
284 assert(ik != NULL, "invariant");
285 if (is_processed(method_id) || is_klass_unloaded(JfrMethodLookup::klass_id(method_id))) {
286 return;
287 }
288 const Method* const method = JfrMethodLookup::lookup(ik, method_id);
289 assert(method != NULL, "invariant");
290 assert(method->method_holder() == ik, "invariant");
291 JfrTraceId::load_leakp(ik, method);
292 }
293
write_stacktrace(const JfrStackTrace * trace,JfrCheckpointWriter & writer)294 void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
295 assert(trace != NULL, "invariant");
296 // JfrStackTrace
297 writer.write(trace->id());
298 writer.write((u1)!trace->_reached_root);
299 writer.write(trace->_nr_of_frames);
300 // JfrStackFrames
301 for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
302 const JfrStackFrame& frame = trace->_frames[i];
303 frame.write(writer);
304 add_to_leakp_set(frame._klass, frame._methodid);
305 }
306 }
307
write_blob(const JfrBlobHandle & blob,JfrCheckpointWriter & writer,bool reset)308 static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) {
309 if (reset) {
310 blob->reset_write_state();
311 return;
312 }
313 blob->exclusive_write(writer);
314 }
315
write_type_set_blob(const ObjectSample * sample,JfrCheckpointWriter & writer,bool reset)316 static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
317 if (sample->has_type_set()) {
318 write_blob(sample->type_set(), writer, reset);
319 }
320 }
321
write_thread_blob(const ObjectSample * sample,JfrCheckpointWriter & writer,bool reset)322 static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
323 assert(sample->has_thread(), "invariant");
324 if (has_thread_exited(sample->thread_id())) {
325 write_blob(sample->thread(), writer, reset);
326 }
327 }
328
write_stacktrace_blob(const ObjectSample * sample,JfrCheckpointWriter & writer,bool reset)329 static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
330 if (sample->has_stacktrace()) {
331 write_blob(sample->stacktrace(), writer, reset);
332 }
333 }
334
write_blobs(const ObjectSample * sample,JfrCheckpointWriter & writer,bool reset)335 static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
336 assert(sample != NULL, "invariant");
337 write_stacktrace_blob(sample, writer, reset);
338 write_thread_blob(sample, writer, reset);
339 write_type_set_blob(sample, writer, reset);
340 }
341
342 class BlobWriter {
343 private:
344 const ObjectSampler* _sampler;
345 JfrCheckpointWriter& _writer;
346 const jlong _last_sweep;
347 bool _reset;
348 public:
BlobWriter(const ObjectSampler * sampler,JfrCheckpointWriter & writer,jlong last_sweep)349 BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
350 _sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false) {}
sample_do(ObjectSample * sample)351 void sample_do(ObjectSample* sample) {
352 if (sample->is_alive_and_older_than(_last_sweep)) {
353 write_blobs(sample, _writer, _reset);
354 }
355 }
set_reset()356 void set_reset() {
357 _reset = true;
358 }
359 };
360
write_sample_blobs(const ObjectSampler * sampler,bool emit_all,Thread * thread)361 static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) {
362 // sample set is predicated on time of last sweep
363 const jlong last_sweep = emit_all ? max_jlong : ObjectSampler::last_sweep();
364 JfrCheckpointWriter writer(thread, false);
365 BlobWriter cbw(sampler, writer, last_sweep);
366 iterate_samples(cbw, true);
367 // reset blob write states
368 cbw.set_reset();
369 iterate_samples(cbw, true);
370 }
371
write(const ObjectSampler * sampler,EdgeStore * edge_store,bool emit_all,Thread * thread)372 void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
373 assert(sampler != NULL, "invariant");
374 assert(edge_store != NULL, "invariant");
375 assert(thread != NULL, "invariant");
376 write_sample_blobs(sampler, emit_all, thread);
377 // write reference chains
378 if (!edge_store->is_empty()) {
379 JfrCheckpointWriter writer(thread);
380 ObjectSampleWriter osw(writer, edge_store);
381 edge_store->iterate(osw);
382 }
383 }
384
385 // A linked list of saved type set blobs for the epoch.
386 // The link consist of a reference counted handle.
387 static JfrBlobHandle saved_type_set_blobs;
388
release_state_for_previous_epoch()389 static void release_state_for_previous_epoch() {
390 // decrements the reference count and the list is reinitialized
391 saved_type_set_blobs = JfrBlobHandle();
392 }
393
394 class BlobInstaller {
395 public:
~BlobInstaller()396 ~BlobInstaller() {
397 release_state_for_previous_epoch();
398 }
sample_do(ObjectSample * sample)399 void sample_do(ObjectSample* sample) {
400 if (!sample->is_dead()) {
401 sample->set_type_set(saved_type_set_blobs);
402 }
403 }
404 };
405
install_type_set_blobs()406 static void install_type_set_blobs() {
407 BlobInstaller installer;
408 iterate_samples(installer);
409 }
410
save_type_set_blob(JfrCheckpointWriter & writer,bool copy=false)411 static void save_type_set_blob(JfrCheckpointWriter& writer, bool copy = false) {
412 assert(writer.has_data(), "invariant");
413 const JfrBlobHandle blob = copy ? writer.copy() : writer.move();
414 if (saved_type_set_blobs.valid()) {
415 saved_type_set_blobs->set_next(blob);
416 } else {
417 saved_type_set_blobs = blob;
418 }
419 }
420
on_type_set(JfrCheckpointWriter & writer)421 void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
422 assert(LeakProfiler::is_running(), "invariant");
423 DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(Thread::current());)
424 const ObjectSample* last = ObjectSampler::sampler()->last();
425 if (writer.has_data() && last != NULL) {
426 save_type_set_blob(writer);
427 install_type_set_blobs();
428 ObjectSampler::sampler()->set_last_resolved(last);
429 }
430 }
431
on_type_set_unload(JfrCheckpointWriter & writer)432 void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
433 assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
434 assert(LeakProfiler::is_running(), "invariant");
435 if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
436 save_type_set_blob(writer, true);
437 }
438 }
439