1 /*
2  * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/shared/copyFailedInfo.hpp"
27 #include "gc/shared/gcHeapSummary.hpp"
28 #include "gc/shared/gcTimer.hpp"
29 #include "gc/shared/gcTrace.hpp"
30 #include "gc/shared/gcWhen.hpp"
31 #include "jfr/jfrEvents.hpp"
32 #include "runtime/os.hpp"
33 #include "utilities/macros.hpp"
34 
35 // All GC dependencies against the trace framework is contained within this file.
36 
37 typedef uintptr_t TraceAddress;
38 
send_garbage_collection_event() const39 void GCTracer::send_garbage_collection_event() const {
40   EventGarbageCollection event(UNTIMED);
41   if (event.should_commit()) {
42     event.set_gcId(GCId::current());
43     event.set_name(_shared_gc_info.name());
44     event.set_cause((u2) _shared_gc_info.cause());
45     event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
46     event.set_longestPause(_shared_gc_info.longest_pause());
47     event.set_starttime(_shared_gc_info.start_timestamp());
48     event.set_endtime(_shared_gc_info.end_timestamp());
49     event.commit();
50   }
51 }
52 
send_reference_stats_event(ReferenceType type,size_t count) const53 void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
54   EventGCReferenceStatistics e;
55   if (e.should_commit()) {
56       e.set_gcId(GCId::current());
57       e.set_type((u1)type);
58       e.set_count(count);
59       e.commit();
60   }
61 }
62 
send_metaspace_chunk_free_list_summary(GCWhen::Type when,Metaspace::MetadataType mdtype,const MetaspaceChunkFreeListSummary & summary) const63 void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype,
64                                                       const MetaspaceChunkFreeListSummary& summary) const {
65   EventMetaspaceChunkFreeListSummary e;
66   if (e.should_commit()) {
67     e.set_gcId(GCId::current());
68     e.set_when(when);
69     e.set_metadataType(mdtype);
70 
71     e.set_specializedChunks(summary.num_specialized_chunks());
72     e.set_specializedChunksTotalSize(summary.specialized_chunks_size_in_bytes());
73 
74     e.set_smallChunks(summary.num_small_chunks());
75     e.set_smallChunksTotalSize(summary.small_chunks_size_in_bytes());
76 
77     e.set_mediumChunks(summary.num_medium_chunks());
78     e.set_mediumChunksTotalSize(summary.medium_chunks_size_in_bytes());
79 
80     e.set_humongousChunks(summary.num_humongous_chunks());
81     e.set_humongousChunksTotalSize(summary.humongous_chunks_size_in_bytes());
82 
83     e.commit();
84   }
85 }
86 
send_parallel_old_event() const87 void ParallelOldTracer::send_parallel_old_event() const {
88   EventParallelOldGarbageCollection e(UNTIMED);
89   if (e.should_commit()) {
90     e.set_gcId(GCId::current());
91     e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
92     e.set_starttime(_shared_gc_info.start_timestamp());
93     e.set_endtime(_shared_gc_info.end_timestamp());
94     e.commit();
95   }
96 }
97 
send_young_gc_event() const98 void YoungGCTracer::send_young_gc_event() const {
99   EventYoungGarbageCollection e(UNTIMED);
100   if (e.should_commit()) {
101     e.set_gcId(GCId::current());
102     e.set_tenuringThreshold(_tenuring_threshold);
103     e.set_starttime(_shared_gc_info.start_timestamp());
104     e.set_endtime(_shared_gc_info.end_timestamp());
105     e.commit();
106   }
107 }
108 
should_send_promotion_in_new_plab_event() const109 bool YoungGCTracer::should_send_promotion_in_new_plab_event() const {
110   return EventPromoteObjectInNewPLAB::is_enabled();
111 }
112 
should_send_promotion_outside_plab_event() const113 bool YoungGCTracer::should_send_promotion_outside_plab_event() const {
114   return EventPromoteObjectOutsidePLAB::is_enabled();
115 }
116 
send_promotion_in_new_plab_event(Klass * klass,size_t obj_size,uint age,bool tenured,size_t plab_size) const117 void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
118                                                      uint age, bool tenured,
119                                                      size_t plab_size) const {
120 
121   EventPromoteObjectInNewPLAB event;
122   if (event.should_commit()) {
123     event.set_gcId(GCId::current());
124     event.set_objectClass(klass);
125     event.set_objectSize(obj_size);
126     event.set_tenured(tenured);
127     event.set_tenuringAge(age);
128     event.set_plabSize(plab_size);
129     event.commit();
130   }
131 }
132 
send_promotion_outside_plab_event(Klass * klass,size_t obj_size,uint age,bool tenured) const133 void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
134                                                       uint age, bool tenured) const {
135 
136   EventPromoteObjectOutsidePLAB event;
137   if (event.should_commit()) {
138     event.set_gcId(GCId::current());
139     event.set_objectClass(klass);
140     event.set_objectSize(obj_size);
141     event.set_tenured(tenured);
142     event.set_tenuringAge(age);
143     event.commit();
144   }
145 }
146 
send_old_gc_event() const147 void OldGCTracer::send_old_gc_event() const {
148   EventOldGarbageCollection e(UNTIMED);
149   if (e.should_commit()) {
150     e.set_gcId(GCId::current());
151     e.set_starttime(_shared_gc_info.start_timestamp());
152     e.set_endtime(_shared_gc_info.end_timestamp());
153     e.commit();
154   }
155 }
156 
to_struct(const CopyFailedInfo & cf_info)157 static JfrStructCopyFailed to_struct(const CopyFailedInfo& cf_info) {
158   JfrStructCopyFailed failed_info;
159   failed_info.set_objectCount(cf_info.failed_count());
160   failed_info.set_firstSize(cf_info.first_size());
161   failed_info.set_smallestSize(cf_info.smallest_size());
162   failed_info.set_totalSize(cf_info.total_size());
163   return failed_info;
164 }
165 
send_promotion_failed_event(const PromotionFailedInfo & pf_info) const166 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
167   EventPromotionFailed e;
168   if (e.should_commit()) {
169     e.set_gcId(GCId::current());
170     e.set_promotionFailed(to_struct(pf_info));
171     e.set_thread(pf_info.thread_trace_id());
172     e.commit();
173   }
174 }
175 
176 // G1
send_concurrent_mode_failure_event()177 void OldGCTracer::send_concurrent_mode_failure_event() {
178   EventConcurrentModeFailure e;
179   if (e.should_commit()) {
180     e.set_gcId(GCId::current());
181     e.commit();
182   }
183 }
184 
to_struct(const VirtualSpaceSummary & summary)185 static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) {
186   JfrStructVirtualSpace space;
187   space.set_start((TraceAddress)summary.start());
188   space.set_committedEnd((TraceAddress)summary.committed_end());
189   space.set_committedSize(summary.committed_size());
190   space.set_reservedEnd((TraceAddress)summary.reserved_end());
191   space.set_reservedSize(summary.reserved_size());
192   return space;
193 }
194 
to_struct(const SpaceSummary & summary)195 static JfrStructObjectSpace to_struct(const SpaceSummary& summary) {
196   JfrStructObjectSpace space;
197   space.set_start((TraceAddress)summary.start());
198   space.set_end((TraceAddress)summary.end());
199   space.set_used(summary.used());
200   space.set_size(summary.size());
201   return space;
202 }
203 
204 class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
205   GCWhen::Type _when;
206  public:
GCHeapSummaryEventSender(GCWhen::Type when)207   GCHeapSummaryEventSender(GCWhen::Type when) : _when(when) {}
208 
visit(const GCHeapSummary * heap_summary) const209   void visit(const GCHeapSummary* heap_summary) const {
210     const VirtualSpaceSummary& heap_space = heap_summary->heap();
211 
212     EventGCHeapSummary e;
213     if (e.should_commit()) {
214       e.set_gcId(GCId::current());
215       e.set_when((u1)_when);
216       e.set_heapSpace(to_struct(heap_space));
217       e.set_heapUsed(heap_summary->used());
218       e.commit();
219     }
220   }
221 
visit(const G1HeapSummary * g1_heap_summary) const222   void visit(const G1HeapSummary* g1_heap_summary) const {
223     visit((GCHeapSummary*)g1_heap_summary);
224 
225     EventG1HeapSummary e;
226     if (e.should_commit()) {
227       e.set_gcId(GCId::current());
228       e.set_when((u1)_when);
229       e.set_edenUsedSize(g1_heap_summary->edenUsed());
230       e.set_edenTotalSize(g1_heap_summary->edenCapacity());
231       e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
232       e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
233       e.commit();
234     }
235   }
236 
visit(const PSHeapSummary * ps_heap_summary) const237   void visit(const PSHeapSummary* ps_heap_summary) const {
238     visit((GCHeapSummary*)ps_heap_summary);
239 
240     const VirtualSpaceSummary& old_summary = ps_heap_summary->old();
241     const SpaceSummary& old_space = ps_heap_summary->old_space();
242     const VirtualSpaceSummary& young_summary = ps_heap_summary->young();
243     const SpaceSummary& eden_space = ps_heap_summary->eden();
244     const SpaceSummary& from_space = ps_heap_summary->from();
245     const SpaceSummary& to_space = ps_heap_summary->to();
246 
247     EventPSHeapSummary e;
248     if (e.should_commit()) {
249       e.set_gcId(GCId::current());
250       e.set_when((u1)_when);
251 
252       e.set_oldSpace(to_struct(ps_heap_summary->old()));
253       e.set_oldObjectSpace(to_struct(ps_heap_summary->old_space()));
254       e.set_youngSpace(to_struct(ps_heap_summary->young()));
255       e.set_edenSpace(to_struct(ps_heap_summary->eden()));
256       e.set_fromSpace(to_struct(ps_heap_summary->from()));
257       e.set_toSpace(to_struct(ps_heap_summary->to()));
258       e.commit();
259     }
260   }
261 };
262 
send_gc_heap_summary_event(GCWhen::Type when,const GCHeapSummary & heap_summary) const263 void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
264   GCHeapSummaryEventSender visitor(when);
265   heap_summary.accept(&visitor);
266 }
267 
to_struct(const MetaspaceStats & sizes)268 static JfrStructMetaspaceSizes to_struct(const MetaspaceStats& sizes) {
269   JfrStructMetaspaceSizes meta_sizes;
270   meta_sizes.set_committed(sizes.committed());
271   meta_sizes.set_used(sizes.used());
272   meta_sizes.set_reserved(sizes.reserved());
273   return meta_sizes;
274 }
275 
send_meta_space_summary_event(GCWhen::Type when,const MetaspaceSummary & meta_space_summary) const276 void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
277   EventMetaspaceSummary e;
278   if (e.should_commit()) {
279     e.set_gcId(GCId::current());
280     e.set_when((u1) when);
281     e.set_gcThreshold(meta_space_summary.capacity_until_GC());
282     e.set_metaspace(to_struct(meta_space_summary.stats())); // total stats (class + nonclass)
283     e.set_dataSpace(to_struct(meta_space_summary.stats().non_class_space_stats())); // "dataspace" aka non-class space
284     e.set_classSpace(to_struct(meta_space_summary.stats().class_space_stats()));
285     e.commit();
286   }
287 }
288 
289 class PhaseSender : public PhaseVisitor {
visit_pause(GCPhase * phase)290   void visit_pause(GCPhase* phase) {
291     assert(phase->level() < PhasesStack::PHASE_LEVELS, "Need more event types for PausePhase");
292 
293     switch (phase->level()) {
294       case 0: send_phase<EventGCPhasePause>(phase); break;
295       case 1: send_phase<EventGCPhasePauseLevel1>(phase); break;
296       case 2: send_phase<EventGCPhasePauseLevel2>(phase); break;
297       case 3: send_phase<EventGCPhasePauseLevel3>(phase); break;
298       case 4: send_phase<EventGCPhasePauseLevel4>(phase); break;
299       default: /* Ignore sending this phase */ break;
300     }
301   }
302 
visit_concurrent(GCPhase * phase)303   void visit_concurrent(GCPhase* phase) {
304     assert(phase->level() < 2, "There is only two levels for ConcurrentPhase");
305 
306     switch (phase->level()) {
307       case 0: send_phase<EventGCPhaseConcurrent>(phase); break;
308       case 1: send_phase<EventGCPhaseConcurrentLevel1>(phase); break;
309       default: /* Ignore sending this phase */ break;
310     }
311   }
312 
313  public:
314   template<typename T>
send_phase(GCPhase * phase)315   void send_phase(GCPhase* phase) {
316     T event(UNTIMED);
317     if (event.should_commit()) {
318       event.set_gcId(GCId::current());
319       event.set_name(phase->name());
320       event.set_starttime(phase->start());
321       event.set_endtime(phase->end());
322       event.commit();
323     }
324   }
325 
visit(GCPhase * phase)326   void visit(GCPhase* phase) {
327     if (phase->type() == GCPhase::PausePhaseType) {
328       visit_pause(phase);
329     } else {
330       assert(phase->type() == GCPhase::ConcurrentPhaseType, "Should be ConcurrentPhaseType");
331       visit_concurrent(phase);
332     }
333   }
334 };
335 
send_phase_events(TimePartitions * time_partitions) const336 void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
337   PhaseSender phase_reporter;
338 
339   TimePartitionPhasesIterator iter(time_partitions);
340   while (iter.has_next()) {
341     GCPhase* phase = iter.next();
342     phase->accept(&phase_reporter);
343   }
344 }
345 
346 #if INCLUDE_JFR
347 Ticks GCLockerTracer::_needs_gc_start_timestamp;
348 volatile jint GCLockerTracer::_jni_lock_count = 0;
349 volatile jint GCLockerTracer::_stall_count = 0;
350 
is_started()351 bool GCLockerTracer::is_started() {
352   return _needs_gc_start_timestamp != Ticks();
353 }
354 
start_gc_locker(const jint jni_lock_count)355 void GCLockerTracer::start_gc_locker(const jint jni_lock_count) {
356   assert(SafepointSynchronize::is_at_safepoint(), "sanity");
357   assert(!is_started(), "sanity");
358   assert(_jni_lock_count == 0, "sanity");
359   assert(_stall_count == 0, "sanity");
360   if (EventGCLocker::is_enabled()) {
361     _needs_gc_start_timestamp.stamp();
362     _jni_lock_count = jni_lock_count;
363   }
364 }
365 
inc_stall_count()366 void GCLockerTracer::inc_stall_count() {
367   if (is_started()) {
368     _stall_count++;
369   }
370 }
371 
report_gc_locker()372 void GCLockerTracer::report_gc_locker() {
373   if (is_started()) {
374     EventGCLocker event(UNTIMED);
375     if (event.should_commit()) {
376       event.set_starttime(_needs_gc_start_timestamp);
377       event.set_lockCount(_jni_lock_count);
378       event.set_stallCount(_stall_count);
379       event.commit();
380     }
381     // reset
382     _needs_gc_start_timestamp = Ticks();
383     _jni_lock_count = 0;
384     _stall_count = 0;
385 
386     assert(!is_started(), "sanity");
387   }
388 }
389 #endif
390