1 /*
2 * log.c: mono log profiler
3 *
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Alex Rønne Petersen (alexrp@xamarin.com)
7 *
8 * Copyright 2010 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11 */
12
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/class-internals.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/metadata/loader.h>
18 #include <mono/metadata/metadata-internals.h>
19 #include <mono/metadata/mono-config.h>
20 #include <mono/metadata/mono-gc.h>
21 #include <mono/metadata/mono-perfcounters.h>
22 #include <mono/metadata/tabledefs.h>
23 #include <mono/mini/jit.h>
24 #include <mono/utils/atomic.h>
25 #include <mono/utils/hazard-pointer.h>
26 #include <mono/utils/lock-free-alloc.h>
27 #include <mono/utils/lock-free-queue.h>
28 #include <mono/utils/mono-conc-hashtable.h>
29 #include <mono/utils/mono-coop-mutex.h>
30 #include <mono/utils/mono-counters.h>
31 #include <mono/utils/mono-logger-internals.h>
32 #include <mono/utils/mono-linked-list-set.h>
33 #include <mono/utils/mono-membar.h>
34 #include <mono/utils/mono-mmap.h>
35 #include <mono/utils/mono-os-mutex.h>
36 #include <mono/utils/mono-os-semaphore.h>
37 #include <mono/utils/mono-threads.h>
38 #include <mono/utils/mono-threads-api.h>
39 #include "log.h"
40
41 #ifdef HAVE_DLFCN_H
42 #include <dlfcn.h>
43 #endif
44 #include <fcntl.h>
45 #ifdef HAVE_LINK_H
46 #include <link.h>
47 #endif
48 #ifdef HAVE_UNISTD_H
49 #include <unistd.h>
50 #endif
51 #if defined(__APPLE__)
52 #include <mach/mach_time.h>
53 #endif
54 #include <netinet/in.h>
55 #ifdef HAVE_SYS_MMAN_H
56 #include <sys/mman.h>
57 #endif
58 #include <sys/socket.h>
59 #if defined (HAVE_SYS_ZLIB)
60 #include <zlib.h>
61 #endif
62
63 // Statistics for internal profiler data structures.
64 static gint32 sample_allocations_ctr,
65 buffer_allocations_ctr;
66
67 // Statistics for profiler events.
68 static gint32 sync_points_ctr,
69 heap_objects_ctr,
70 heap_starts_ctr,
71 heap_ends_ctr,
72 heap_roots_ctr,
73 gc_events_ctr,
74 gc_resizes_ctr,
75 gc_allocs_ctr,
76 gc_moves_ctr,
77 gc_handle_creations_ctr,
78 gc_handle_deletions_ctr,
79 finalize_begins_ctr,
80 finalize_ends_ctr,
81 finalize_object_begins_ctr,
82 finalize_object_ends_ctr,
83 image_loads_ctr,
84 image_unloads_ctr,
85 assembly_loads_ctr,
86 assembly_unloads_ctr,
87 class_loads_ctr,
88 class_unloads_ctr,
89 method_entries_ctr,
90 method_exits_ctr,
91 method_exception_exits_ctr,
92 method_jits_ctr,
93 code_buffers_ctr,
94 exception_throws_ctr,
95 exception_clauses_ctr,
96 monitor_events_ctr,
97 thread_starts_ctr,
98 thread_ends_ctr,
99 thread_names_ctr,
100 domain_loads_ctr,
101 domain_unloads_ctr,
102 domain_names_ctr,
103 context_loads_ctr,
104 context_unloads_ctr,
105 sample_ubins_ctr,
106 sample_usyms_ctr,
107 sample_hits_ctr,
108 counter_descriptors_ctr,
109 counter_samples_ctr,
110 perfcounter_descriptors_ctr,
111 perfcounter_samples_ctr,
112 coverage_methods_ctr,
113 coverage_statements_ctr,
114 coverage_classes_ctr,
115 coverage_assemblies_ctr;
116
117 // Pending data to be written to the log, for a single thread.
118 // Threads periodically flush their own LogBuffers by calling safe_send
119 typedef struct _LogBuffer LogBuffer;
120 struct _LogBuffer {
121 // Next (older) LogBuffer in processing queue
122 LogBuffer *next;
123
124 uint64_t time_base;
125 uint64_t last_time;
126 uintptr_t ptr_base;
127 uintptr_t method_base;
128 uintptr_t last_method;
129 uintptr_t obj_base;
130 uintptr_t thread_id;
131
132 // Bytes allocated for this LogBuffer
133 int size;
134
135 // Start of currently unused space in buffer
136 unsigned char* cursor;
137
138 // Pointer to start-of-structure-plus-size (for convenience)
139 unsigned char* buf_end;
140
141 // Start of data in buffer. Contents follow "buffer format" described above.
142 unsigned char buf [1];
143 };
144
145 typedef struct {
146 MonoLinkedListSetNode node;
147
148 // Was this thread added to the LLS?
149 gboolean attached;
150
151 // The current log buffer for this thread.
152 LogBuffer *buffer;
153
154 // Methods referenced by events in `buffer`, see `MethodInfo`.
155 GPtrArray *methods;
156
157 // Current call depth for enter/leave events.
158 int call_depth;
159
160 // Indicates whether this thread is currently writing to its `buffer`.
161 gboolean busy;
162
163 // Has this thread written a thread end event to `buffer`?
164 gboolean ended;
165
166 // Stored in `buffer_lock_state` to take the exclusive lock.
167 int small_id;
168 } MonoProfilerThread;
169
170 // Default value in `profiler_tls` for new threads.
171 #define MONO_PROFILER_THREAD_ZERO ((MonoProfilerThread *) NULL)
172
173 // This is written to `profiler_tls` to indicate that a thread has stopped.
174 #define MONO_PROFILER_THREAD_DEAD ((MonoProfilerThread *) -1)
175
176 // Do not use these TLS macros directly unless you know what you're doing.
177
178 #ifdef HOST_WIN32
179
180 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
181 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
182 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
183 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
184
185 static DWORD profiler_tls;
186
187 #elif HAVE_KW_THREAD
188
189 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
190 #define PROF_TLS_GET() (profiler_tls)
191 #define PROF_TLS_INIT()
192 #define PROF_TLS_FREE()
193
194 static __thread MonoProfilerThread *profiler_tls;
195
196 #else
197
198 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
199 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
200 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
201 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
202
203 static pthread_key_t profiler_tls;
204
205 #endif
206
207 static uintptr_t
thread_id(void)208 thread_id (void)
209 {
210 return (uintptr_t) mono_native_thread_id_get ();
211 }
212
213 static uintptr_t
process_id(void)214 process_id (void)
215 {
216 #ifdef HOST_WIN32
217 return (uintptr_t) GetCurrentProcessId ();
218 #else
219 return (uintptr_t) getpid ();
220 #endif
221 }
222
223 #define ENABLED(EVT) (!!(log_config.effective_mask & (EVT)))
224 #define ENABLE(EVT) do { log_config.effective_mask |= (EVT); } while (0)
225 #define DISABLE(EVT) do { log_config.effective_mask &= ~(EVT); } while (0)
226
227 /*
228 * These macros should be used when writing an event to a log buffer. They
229 * take care of a bunch of stuff that can be repetitive and error-prone, such
230 * as attaching the current thread, acquiring/releasing the buffer lock,
231 * incrementing the event counter, expanding the log buffer, etc. They also
232 * create a scope so that it's harder to leak the LogBuffer pointer, which can
233 * be problematic as the pointer is unstable when the buffer lock isn't
234 * acquired.
235 *
236 * If the calling thread is already attached, these macros will not alter its
237 * attach mode (i.e. whether it's added to the LLS). If the thread is not
238 * attached, init_thread () will be called with add_to_lls = TRUE.
239 */
240
241 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
242 do { \
243 MonoProfilerThread *thread__ = get_thread (); \
244 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
245 thread__->busy = TRUE; \
246 mono_atomic_inc_i32 ((COUNTER)); \
247 if (thread__->attached) \
248 buffer_lock (); \
249 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
250
251 #define EXIT_LOG_EXPLICIT(SEND) \
252 if ((SEND)) \
253 send_log_unsafe (TRUE); \
254 if (thread__->attached) \
255 buffer_unlock (); \
256 thread__->busy = FALSE; \
257 } while (0)
258
259 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
260 #define DO_SEND TRUE
261 #define NO_SEND FALSE
262
263 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
264
265 typedef struct _BinaryObject BinaryObject;
266 struct _BinaryObject {
267 BinaryObject *next;
268 void *addr;
269 char *name;
270 };
271
272 typedef struct MonoCounterAgent {
273 MonoCounter *counter;
274 // MonoCounterAgent specific data :
275 void *value;
276 size_t value_size;
277 guint32 index;
278 gboolean emitted;
279 struct MonoCounterAgent *next;
280 } MonoCounterAgent;
281
282 typedef struct _PerfCounterAgent PerfCounterAgent;
283 struct _PerfCounterAgent {
284 PerfCounterAgent *next;
285 guint32 index;
286 char *category_name;
287 char *name;
288 gint64 value;
289 gboolean emitted;
290 gboolean updated;
291 gboolean deleted;
292 };
293
294 struct _MonoProfiler {
295 MonoProfilerHandle handle;
296
297 FILE* file;
298 #if defined (HAVE_SYS_ZLIB)
299 gzFile gzfile;
300 #endif
301
302 char *args;
303 uint64_t startup_time;
304 int timer_overhead;
305
306 #ifdef __APPLE__
307 mach_timebase_info_data_t timebase_info;
308 #elif defined (HOST_WIN32)
309 LARGE_INTEGER pcounter_freq;
310 #endif
311
312 int pipe_output;
313 int command_port;
314 int server_socket;
315 int pipes [2];
316
317 MonoLinkedListSet profiler_thread_list;
318 volatile gint32 buffer_lock_state;
319 volatile gint32 buffer_lock_exclusive_intent;
320
321 volatile gint32 runtime_inited;
322 volatile gint32 in_shutdown;
323
324 MonoNativeThreadId helper_thread;
325
326 MonoNativeThreadId writer_thread;
327 volatile gint32 run_writer_thread;
328 MonoLockFreeQueue writer_queue;
329 MonoSemType writer_queue_sem;
330
331 MonoLockFreeAllocSizeClass writer_entry_size_class;
332 MonoLockFreeAllocator writer_entry_allocator;
333
334 MonoConcurrentHashTable *method_table;
335 mono_mutex_t method_table_mutex;
336
337 MonoNativeThreadId dumper_thread;
338 volatile gint32 run_dumper_thread;
339 MonoLockFreeQueue dumper_queue;
340 MonoSemType dumper_queue_sem;
341
342 MonoLockFreeAllocSizeClass sample_size_class;
343 MonoLockFreeAllocator sample_allocator;
344 MonoLockFreeQueue sample_reuse_queue;
345
346 BinaryObject *binary_objects;
347
348 volatile gint32 heapshot_requested;
349 guint64 gc_count;
350 guint64 last_hs_time;
351 gboolean do_heap_walk;
352
353 mono_mutex_t counters_mutex;
354 MonoCounterAgent *counters;
355 PerfCounterAgent *perfcounters;
356 guint32 counters_index;
357
358 mono_mutex_t coverage_mutex;
359 GPtrArray *coverage_data;
360
361 GPtrArray *coverage_filters;
362 MonoConcurrentHashTable *coverage_filtered_classes;
363 MonoConcurrentHashTable *coverage_suppressed_assemblies;
364
365 MonoConcurrentHashTable *coverage_methods;
366 MonoConcurrentHashTable *coverage_assemblies;
367 MonoConcurrentHashTable *coverage_classes;
368
369 MonoConcurrentHashTable *coverage_image_to_methods;
370
371 guint32 coverage_previous_offset;
372 guint32 coverage_method_id;
373
374 MonoCoopMutex api_mutex;
375 };
376
377 static ProfilerConfig log_config;
378 static struct _MonoProfiler log_profiler;
379
380 typedef struct {
381 MonoLockFreeQueueNode node;
382 GPtrArray *methods;
383 LogBuffer *buffer;
384 } WriterQueueEntry;
385
386 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
387
388 typedef struct {
389 MonoMethod *method;
390 MonoJitInfo *ji;
391 uint64_t time;
392 } MethodInfo;
393
394 #define TICKS_PER_SEC 1000000000LL
395
396 static uint64_t
current_time(void)397 current_time (void)
398 {
399 #ifdef __APPLE__
400 uint64_t time = mach_absolute_time ();
401
402 time *= log_profiler.timebase_info.numer;
403 time /= log_profiler.timebase_info.denom;
404
405 return time;
406 #elif defined (HOST_WIN32)
407 LARGE_INTEGER value;
408
409 QueryPerformanceCounter (&value);
410
411 return value.QuadPart * TICKS_PER_SEC / log_profiler.pcounter_freq.QuadPart;
412 #elif defined (CLOCK_MONOTONIC)
413 struct timespec tspec;
414
415 clock_gettime (CLOCK_MONOTONIC, &tspec);
416
417 return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
418 #else
419 struct timeval tv;
420
421 gettimeofday (&tv, NULL);
422
423 return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
424 #endif
425 }
426
427 static void
init_time(void)428 init_time (void)
429 {
430 #ifdef __APPLE__
431 mach_timebase_info (&log_profiler.timebase_info);
432 #elif defined (HOST_WIN32)
433 QueryPerformanceFrequency (&log_profiler.pcounter_freq);
434 #endif
435
436 uint64_t time_start = current_time ();
437
438 for (int i = 0; i < 256; ++i)
439 current_time ();
440
441 uint64_t time_end = current_time ();
442
443 log_profiler.timer_overhead = (time_end - time_start) / 256;
444 }
445
446 static char*
pstrdup(const char * s)447 pstrdup (const char *s)
448 {
449 int len = strlen (s) + 1;
450 char *p = (char *) g_malloc (len);
451 memcpy (p, s, len);
452 return p;
453 }
454
455 #define BUFFER_SIZE (4096 * 16)
456
457 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
458 #define LEB128_SIZE 10
459
460 /* Size of a value encoded as a single byte. */
461 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
462 #define BYTE_SIZE 1
463
464 /* Size in bytes of the event prefix (ID + time). */
465 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
466
467 static void *
alloc_buffer(int size)468 alloc_buffer (int size)
469 {
470 return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
471 }
472
473 static void
free_buffer(void * buf,int size)474 free_buffer (void *buf, int size)
475 {
476 mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
477 }
478
479 static LogBuffer*
create_buffer(uintptr_t tid,int bytes)480 create_buffer (uintptr_t tid, int bytes)
481 {
482 LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
483
484 mono_atomic_inc_i32 (&buffer_allocations_ctr);
485
486 buf->size = BUFFER_SIZE;
487 buf->time_base = current_time ();
488 buf->last_time = buf->time_base;
489 buf->buf_end = (unsigned char *) buf + buf->size;
490 buf->cursor = buf->buf;
491 buf->thread_id = tid;
492
493 return buf;
494 }
495
496 /*
497 * Must be called with the reader lock held if thread is the current thread, or
498 * the exclusive lock if thread is a different thread. However, if thread is
499 * the current thread, and init_thread () was called with add_to_lls = FALSE,
500 * then no locking is necessary.
501 */
502 static void
init_buffer_state(MonoProfilerThread * thread)503 init_buffer_state (MonoProfilerThread *thread)
504 {
505 thread->buffer = create_buffer (thread->node.key, 0);
506 thread->methods = NULL;
507 }
508
509 static void
clear_hazard_pointers(MonoThreadHazardPointers * hp)510 clear_hazard_pointers (MonoThreadHazardPointers *hp)
511 {
512 mono_hazard_pointer_clear (hp, 0);
513 mono_hazard_pointer_clear (hp, 1);
514 mono_hazard_pointer_clear (hp, 2);
515 }
516
517 static MonoProfilerThread *
init_thread(gboolean add_to_lls)518 init_thread (gboolean add_to_lls)
519 {
520 MonoProfilerThread *thread = PROF_TLS_GET ();
521
522 g_assert (thread != MONO_PROFILER_THREAD_DEAD && "Why are we trying to resurrect a stopped thread?");
523
524 /*
525 * Sometimes we may try to initialize a thread twice. One example is the
526 * main thread: We initialize it when setting up the profiler, but we will
527 * also get a thread_start () callback for it. Another example is when
528 * attaching new threads to the runtime: We may get a gc_alloc () callback
529 * for that thread's thread object (where we initialize it), soon followed
530 * by a thread_start () callback.
531 *
532 * These cases are harmless anyhow. Just return if we've already done the
533 * initialization work.
534 */
535 if (thread != MONO_PROFILER_THREAD_ZERO)
536 return thread;
537
538 thread = g_malloc (sizeof (MonoProfilerThread));
539 thread->node.key = thread_id ();
540 thread->attached = add_to_lls;
541 thread->call_depth = 0;
542 thread->busy = FALSE;
543 thread->ended = FALSE;
544
545 init_buffer_state (thread);
546
547 thread->small_id = mono_thread_info_register_small_id ();
548
549 /*
550 * Some internal profiler threads don't need to be cleaned up
551 * by the main thread on shutdown.
552 */
553 if (add_to_lls) {
554 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
555 g_assert (mono_lls_insert (&log_profiler.profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
556 clear_hazard_pointers (hp);
557 }
558
559 PROF_TLS_SET (thread);
560
561 return thread;
562 }
563
564 // Only valid if init_thread () was called with add_to_lls = FALSE.
565 static void
deinit_thread(MonoProfilerThread * thread)566 deinit_thread (MonoProfilerThread *thread)
567 {
568 g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
569
570 g_free (thread);
571 PROF_TLS_SET (MONO_PROFILER_THREAD_DEAD);
572 }
573
574 static MonoProfilerThread *
get_thread(void)575 get_thread (void)
576 {
577 return init_thread (TRUE);
578 }
579
580 // Only valid if init_thread () was called with add_to_lls = FALSE.
581 static LogBuffer *
ensure_logbuf_unsafe(MonoProfilerThread * thread,int bytes)582 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
583 {
584 LogBuffer *old = thread->buffer;
585
586 if (old->cursor + bytes < old->buf_end)
587 return old;
588
589 LogBuffer *new_ = create_buffer (thread->node.key, bytes);
590 new_->next = old;
591 thread->buffer = new_;
592
593 return new_;
594 }
595
596 /*
597 * This is a reader/writer spin lock of sorts used to protect log buffers.
598 * When a thread modifies its own log buffer, it increments the reader
599 * count. When a thread wants to access log buffers of other threads, it
600 * takes the exclusive lock.
601 *
602 * `buffer_lock_state` holds the reader count in its lower 16 bits, and
603 * the small ID of the thread currently holding the exclusive (writer)
604 * lock in its upper 16 bits. Both can be zero. It's important that the
605 * whole lock state is a single word that can be read/written atomically
606 * to avoid race conditions where there could end up being readers while
607 * the writer lock is held.
608 *
609 * The lock is writer-biased. When a thread wants to take the exclusive
610 * lock, it increments `buffer_lock_exclusive_intent` which will make new
611 * readers spin until it's back to zero, then takes the exclusive lock
612 * once the reader count has reached zero. After releasing the exclusive
613 * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
614 * reaches zero again, allows readers to increment the reader count.
615 *
616 * The writer bias is necessary because we take the exclusive lock in
617 * `gc_event ()` during STW. If the writer bias was not there, and a
618 * program had a large number of threads, STW-induced pauses could be
619 * significantly longer than they have to be. Also, we emit periodic
620 * sync points from the helper thread, which requires taking the
621 * exclusive lock, and we need those to arrive with a reasonably
622 * consistent frequency so that readers don't have to queue up too many
623 * events between sync points.
624 *
625 * The lock does not support recursion.
626 */
627
628 static void
buffer_lock(void)629 buffer_lock (void)
630 {
631 /*
632 * If the thread holding the exclusive lock tries to modify the
633 * reader count, just make it a no-op. This way, we also avoid
634 * invoking the GC safe point macros below, which could break if
635 * done from a thread that is currently the initiator of STW.
636 *
637 * In other words, we rely on the fact that the GC thread takes
638 * the exclusive lock in the gc_event () callback when the world
639 * is about to stop.
640 */
641 if (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) != get_thread ()->small_id << 16) {
642 MONO_ENTER_GC_SAFE;
643
644 gint32 old, new_;
645
646 do {
647 restart:
648 // Hold off if a thread wants to take the exclusive lock.
649 while (mono_atomic_load_i32 (&log_profiler.buffer_lock_exclusive_intent))
650 mono_thread_info_yield ();
651
652 old = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
653
654 // Is a thread holding the exclusive lock?
655 if (old >> 16) {
656 mono_thread_info_yield ();
657 goto restart;
658 }
659
660 new_ = old + 1;
661 } while (mono_atomic_cas_i32 (&log_profiler.buffer_lock_state, new_, old) != old);
662
663 MONO_EXIT_GC_SAFE;
664 }
665
666 mono_memory_barrier ();
667 }
668
669 static void
buffer_unlock(void)670 buffer_unlock (void)
671 {
672 mono_memory_barrier ();
673
674 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
675
676 // See the comment in buffer_lock ().
677 if (state == get_thread ()->small_id << 16)
678 return;
679
680 g_assert (state && "Why are we decrementing a zero reader count?");
681 g_assert (!(state >> 16) && "Why is the exclusive lock held?");
682
683 mono_atomic_dec_i32 (&log_profiler.buffer_lock_state);
684 }
685
686 static void
buffer_lock_excl(void)687 buffer_lock_excl (void)
688 {
689 gint32 new_ = get_thread ()->small_id << 16;
690
691 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
692
693 mono_atomic_inc_i32 (&log_profiler.buffer_lock_exclusive_intent);
694
695 MONO_ENTER_GC_SAFE;
696
697 while (mono_atomic_cas_i32 (&log_profiler.buffer_lock_state, new_, 0))
698 mono_thread_info_yield ();
699
700 MONO_EXIT_GC_SAFE;
701
702 mono_memory_barrier ();
703 }
704
705 static void
buffer_unlock_excl(void)706 buffer_unlock_excl (void)
707 {
708 mono_memory_barrier ();
709
710 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
711 gint32 excl = state >> 16;
712
713 g_assert (excl && "Why is the exclusive lock not held?");
714 g_assert (excl == get_thread ()->small_id && "Why does another thread hold the exclusive lock?");
715 g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
716
717 mono_atomic_store_i32 (&log_profiler.buffer_lock_state, 0);
718 mono_atomic_dec_i32 (&log_profiler.buffer_lock_exclusive_intent);
719 }
720
721 static void
encode_uleb128(uint64_t value,uint8_t * buf,uint8_t ** endbuf)722 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
723 {
724 uint8_t *p = buf;
725
726 do {
727 uint8_t b = value & 0x7f;
728 value >>= 7;
729
730 if (value != 0) /* more bytes to come */
731 b |= 0x80;
732
733 *p ++ = b;
734 } while (value);
735
736 *endbuf = p;
737 }
738
739 static void
encode_sleb128(intptr_t value,uint8_t * buf,uint8_t ** endbuf)740 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
741 {
742 int more = 1;
743 int negative = (value < 0);
744 unsigned int size = sizeof (intptr_t) * 8;
745 uint8_t byte;
746 uint8_t *p = buf;
747
748 while (more) {
749 byte = value & 0x7f;
750 value >>= 7;
751
752 /* the following is unnecessary if the
753 * implementation of >>= uses an arithmetic rather
754 * than logical shift for a signed left operand
755 */
756 if (negative)
757 /* sign extend */
758 value |= - ((intptr_t) 1 <<(size - 7));
759
760 /* sign bit of byte is second high order bit (0x40) */
761 if ((value == 0 && !(byte & 0x40)) ||
762 (value == -1 && (byte & 0x40)))
763 more = 0;
764 else
765 byte |= 0x80;
766
767 *p ++= byte;
768 }
769
770 *endbuf = p;
771 }
772
773 static void
emit_byte(LogBuffer * logbuffer,int value)774 emit_byte (LogBuffer *logbuffer, int value)
775 {
776 logbuffer->cursor [0] = value;
777 logbuffer->cursor++;
778
779 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
780 }
781
782 static void
emit_value(LogBuffer * logbuffer,int value)783 emit_value (LogBuffer *logbuffer, int value)
784 {
785 encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
786
787 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
788 }
789
790 static void
emit_time(LogBuffer * logbuffer,uint64_t value)791 emit_time (LogBuffer *logbuffer, uint64_t value)
792 {
793 uint64_t tdiff = value - logbuffer->last_time;
794 encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
795 logbuffer->last_time = value;
796
797 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
798 }
799
800 static void
emit_event_time(LogBuffer * logbuffer,int event,uint64_t time)801 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
802 {
803 emit_byte (logbuffer, event);
804 emit_time (logbuffer, time);
805 }
806
807 static void
emit_event(LogBuffer * logbuffer,int event)808 emit_event (LogBuffer *logbuffer, int event)
809 {
810 emit_event_time (logbuffer, event, current_time ());
811 }
812
813 static void
emit_svalue(LogBuffer * logbuffer,int64_t value)814 emit_svalue (LogBuffer *logbuffer, int64_t value)
815 {
816 encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
817
818 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
819 }
820
821 static void
emit_uvalue(LogBuffer * logbuffer,uint64_t value)822 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
823 {
824 encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
825
826 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
827 }
828
829 static void
emit_ptr(LogBuffer * logbuffer,const void * ptr)830 emit_ptr (LogBuffer *logbuffer, const void *ptr)
831 {
832 if (!logbuffer->ptr_base)
833 logbuffer->ptr_base = (uintptr_t) ptr;
834
835 emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
836
837 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
838 }
839
840 static void
emit_method_inner(LogBuffer * logbuffer,void * method)841 emit_method_inner (LogBuffer *logbuffer, void *method)
842 {
843 if (!logbuffer->method_base) {
844 logbuffer->method_base = (intptr_t) method;
845 logbuffer->last_method = (intptr_t) method;
846 }
847
848 encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
849 logbuffer->last_method = (intptr_t) method;
850
851 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
852 }
853
854 // The reader lock must be held.
855 static void
register_method_local(MonoMethod * method,MonoJitInfo * ji)856 register_method_local (MonoMethod *method, MonoJitInfo *ji)
857 {
858 MonoProfilerThread *thread = get_thread ();
859
860 if (!mono_conc_hashtable_lookup (log_profiler.method_table, method)) {
861 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
862
863 info->method = method;
864 info->ji = ji;
865 info->time = current_time ();
866
867 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
868 g_ptr_array_add (arr, info);
869 }
870 }
871
872 static void
emit_method(LogBuffer * logbuffer,MonoMethod * method)873 emit_method (LogBuffer *logbuffer, MonoMethod *method)
874 {
875 register_method_local (method, NULL);
876 emit_method_inner (logbuffer, method);
877 }
878
879 static void
emit_obj(LogBuffer * logbuffer,void * ptr)880 emit_obj (LogBuffer *logbuffer, void *ptr)
881 {
882 if (!logbuffer->obj_base)
883 logbuffer->obj_base = (uintptr_t) ptr >> 3;
884
885 emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
886
887 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
888 }
889
890 static void
emit_string(LogBuffer * logbuffer,const char * str,size_t size)891 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
892 {
893 size_t i = 0;
894 if (str) {
895 for (; i < size; i++) {
896 if (str[i] == '\0')
897 break;
898 emit_byte (logbuffer, str [i]);
899 }
900 }
901 emit_byte (logbuffer, '\0');
902 }
903
904 static void
emit_double(LogBuffer * logbuffer,double value)905 emit_double (LogBuffer *logbuffer, double value)
906 {
907 int i;
908 unsigned char buffer[8];
909 memcpy (buffer, &value, 8);
910 #if G_BYTE_ORDER == G_BIG_ENDIAN
911 for (i = 7; i >= 0; i--)
912 #else
913 for (i = 0; i < 8; i++)
914 #endif
915 emit_byte (logbuffer, buffer[i]);
916 }
917
918 static char*
write_int16(char * buf,int32_t value)919 write_int16 (char *buf, int32_t value)
920 {
921 int i;
922 for (i = 0; i < 2; ++i) {
923 buf [i] = value;
924 value >>= 8;
925 }
926 return buf + 2;
927 }
928
929 static char*
write_int32(char * buf,int32_t value)930 write_int32 (char *buf, int32_t value)
931 {
932 int i;
933 for (i = 0; i < 4; ++i) {
934 buf [i] = value;
935 value >>= 8;
936 }
937 return buf + 4;
938 }
939
940 static char*
write_int64(char * buf,int64_t value)941 write_int64 (char *buf, int64_t value)
942 {
943 int i;
944 for (i = 0; i < 8; ++i) {
945 buf [i] = value;
946 value >>= 8;
947 }
948 return buf + 8;
949 }
950
951 static char *
write_header_string(char * p,const char * str)952 write_header_string (char *p, const char *str)
953 {
954 size_t len = strlen (str) + 1;
955
956 p = write_int32 (p, len);
957 strcpy (p, str);
958
959 return p + len;
960 }
961
962 static void
dump_header(void)963 dump_header (void)
964 {
965 const char *args = log_profiler.args;
966 const char *arch = mono_config_get_cpu ();
967 const char *os = mono_config_get_os ();
968
969 char *hbuf = g_malloc (
970 sizeof (gint32) /* header id */ +
971 sizeof (gint8) /* major version */ +
972 sizeof (gint8) /* minor version */ +
973 sizeof (gint8) /* data version */ +
974 sizeof (gint8) /* word size */ +
975 sizeof (gint64) /* startup time */ +
976 sizeof (gint32) /* timer overhead */ +
977 sizeof (gint32) /* flags */ +
978 sizeof (gint32) /* process id */ +
979 sizeof (gint16) /* command port */ +
980 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
981 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
982 sizeof (gint32) + strlen (os) + 1 /* operating system */
983 );
984 char *p = hbuf;
985
986 p = write_int32 (p, LOG_HEADER_ID);
987 *p++ = LOG_VERSION_MAJOR;
988 *p++ = LOG_VERSION_MINOR;
989 *p++ = LOG_DATA_VERSION;
990 *p++ = sizeof (void *);
991 p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
992 p = write_int32 (p, log_profiler.timer_overhead);
993 p = write_int32 (p, 0); /* flags */
994 p = write_int32 (p, process_id ());
995 p = write_int16 (p, log_profiler.command_port);
996 p = write_header_string (p, args);
997 p = write_header_string (p, arch);
998 p = write_header_string (p, os);
999
1000 #if defined (HAVE_SYS_ZLIB)
1001 if (log_profiler.gzfile) {
1002 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1003 } else
1004 #endif
1005 {
1006 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1007 fflush (log_profiler.file);
1008 }
1009
1010 g_free (hbuf);
1011 }
1012
1013 /*
1014 * Must be called with the reader lock held if thread is the current thread, or
1015 * the exclusive lock if thread is a different thread. However, if thread is
1016 * the current thread, and init_thread () was called with add_to_lls = FALSE,
1017 * then no locking is necessary.
1018 */
1019 static void
send_buffer(MonoProfilerThread * thread)1020 send_buffer (MonoProfilerThread *thread)
1021 {
1022 WriterQueueEntry *entry = mono_lock_free_alloc (&log_profiler.writer_entry_allocator);
1023 entry->methods = thread->methods;
1024 entry->buffer = thread->buffer;
1025
1026 mono_lock_free_queue_node_init (&entry->node, FALSE);
1027
1028 mono_lock_free_queue_enqueue (&log_profiler.writer_queue, &entry->node);
1029 mono_os_sem_post (&log_profiler.writer_queue_sem);
1030 }
1031
1032 static void
free_thread(gpointer p)1033 free_thread (gpointer p)
1034 {
1035 MonoProfilerThread *thread = p;
1036
1037 if (!thread->ended) {
1038 /*
1039 * The thread is being cleaned up by the main thread during
1040 * shutdown. This typically happens for internal runtime
1041 * threads. We need to synthesize a thread end event.
1042 */
1043
1044 mono_atomic_inc_i32 (&thread_ends_ctr);
1045
1046 LogBuffer *buf = ensure_logbuf_unsafe (thread,
1047 EVENT_SIZE /* event */ +
1048 BYTE_SIZE /* type */ +
1049 LEB128_SIZE /* tid */
1050 );
1051
1052 emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1053 emit_byte (buf, TYPE_THREAD);
1054 emit_ptr (buf, (void *) thread->node.key);
1055 }
1056
1057 send_buffer (thread);
1058
1059 g_free (thread);
1060 }
1061
1062 static void
remove_thread(MonoProfilerThread * thread)1063 remove_thread (MonoProfilerThread *thread)
1064 {
1065 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1066
1067 if (mono_lls_remove (&log_profiler.profiler_thread_list, hp, &thread->node))
1068 mono_thread_hazardous_try_free (thread, free_thread);
1069
1070 clear_hazard_pointers (hp);
1071 }
1072
1073 static void
dump_buffer(LogBuffer * buf)1074 dump_buffer (LogBuffer *buf)
1075 {
1076 char hbuf [128];
1077 char *p = hbuf;
1078
1079 if (buf->next)
1080 dump_buffer (buf->next);
1081
1082 if (buf->cursor - buf->buf) {
1083 p = write_int32 (p, BUF_ID);
1084 p = write_int32 (p, buf->cursor - buf->buf);
1085 p = write_int64 (p, buf->time_base);
1086 p = write_int64 (p, buf->ptr_base);
1087 p = write_int64 (p, buf->obj_base);
1088 p = write_int64 (p, buf->thread_id);
1089 p = write_int64 (p, buf->method_base);
1090
1091 #if defined (HAVE_SYS_ZLIB)
1092 if (log_profiler.gzfile) {
1093 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1094 gzwrite (log_profiler.gzfile, buf->buf, buf->cursor - buf->buf);
1095 } else
1096 #endif
1097 {
1098 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1099 fwrite (buf->buf, buf->cursor - buf->buf, 1, log_profiler.file);
1100 fflush (log_profiler.file);
1101 }
1102 }
1103
1104 free_buffer (buf, buf->size);
1105 }
1106
1107 static void
dump_buffer_threadless(LogBuffer * buf)1108 dump_buffer_threadless (LogBuffer *buf)
1109 {
1110 for (LogBuffer *iter = buf; iter; iter = iter->next)
1111 iter->thread_id = 0;
1112
1113 dump_buffer (buf);
1114 }
1115
1116 // Only valid if init_thread () was called with add_to_lls = FALSE.
1117 static void
send_log_unsafe(gboolean if_needed)1118 send_log_unsafe (gboolean if_needed)
1119 {
1120 MonoProfilerThread *thread = get_thread ();
1121
1122 if (!if_needed || (if_needed && thread->buffer->next)) {
1123 if (!thread->attached)
1124 for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1125 iter->thread_id = 0;
1126
1127 send_buffer (thread);
1128 init_buffer_state (thread);
1129 }
1130 }
1131
1132 // Assumes that the exclusive lock is held.
1133 static void
sync_point_flush(void)1134 sync_point_flush (void)
1135 {
1136 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) == get_thread ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1137
1138 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
1139 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1140
1141 send_buffer (thread);
1142 init_buffer_state (thread);
1143 } MONO_LLS_FOREACH_SAFE_END
1144 }
1145
1146 // Assumes that the exclusive lock is held.
1147 static void
sync_point_mark(MonoProfilerSyncPointType type)1148 sync_point_mark (MonoProfilerSyncPointType type)
1149 {
1150 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) == get_thread ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1151
1152 ENTER_LOG (&sync_points_ctr, logbuffer,
1153 EVENT_SIZE /* event */ +
1154 BYTE_SIZE /* type */
1155 );
1156
1157 emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1158 emit_byte (logbuffer, type);
1159
1160 EXIT_LOG_EXPLICIT (NO_SEND);
1161
1162 send_log_unsafe (FALSE);
1163 }
1164
1165 // Assumes that the exclusive lock is held.
1166 static void
sync_point(MonoProfilerSyncPointType type)1167 sync_point (MonoProfilerSyncPointType type)
1168 {
1169 sync_point_flush ();
1170 sync_point_mark (type);
1171 }
1172
1173 static int
gc_reference(MonoObject * obj,MonoClass * klass,uintptr_t size,uintptr_t num,MonoObject ** refs,uintptr_t * offsets,void * data)1174 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1175 {
1176 /* account for object alignment in the heap */
1177 size += 7;
1178 size &= ~7;
1179
1180 ENTER_LOG (&heap_objects_ctr, logbuffer,
1181 EVENT_SIZE /* event */ +
1182 LEB128_SIZE /* obj */ +
1183 LEB128_SIZE /* klass */ +
1184 LEB128_SIZE /* size */ +
1185 LEB128_SIZE /* num */ +
1186 num * (
1187 LEB128_SIZE /* offset */ +
1188 LEB128_SIZE /* ref */
1189 )
1190 );
1191
1192 emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1193 emit_obj (logbuffer, obj);
1194 emit_ptr (logbuffer, klass);
1195 emit_value (logbuffer, size);
1196 emit_value (logbuffer, num);
1197
1198 uintptr_t last_offset = 0;
1199
1200 for (int i = 0; i < num; ++i) {
1201 emit_value (logbuffer, offsets [i] - last_offset);
1202 last_offset = offsets [i];
1203 emit_obj (logbuffer, refs [i]);
1204 }
1205
1206 EXIT_LOG;
1207
1208 return 0;
1209 }
1210
1211 static void
gc_roots(MonoProfiler * prof,MonoObject * const * objects,const MonoProfilerGCRootType * root_types,const uintptr_t * extra_info,uint64_t num)1212 gc_roots (MonoProfiler *prof, MonoObject *const *objects, const MonoProfilerGCRootType *root_types, const uintptr_t *extra_info, uint64_t num)
1213 {
1214 ENTER_LOG (&heap_roots_ctr, logbuffer,
1215 EVENT_SIZE /* event */ +
1216 LEB128_SIZE /* num */ +
1217 LEB128_SIZE /* collections */ +
1218 num * (
1219 LEB128_SIZE /* object */ +
1220 LEB128_SIZE /* root type */ +
1221 LEB128_SIZE /* extra info */
1222 )
1223 );
1224
1225 emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1226 emit_value (logbuffer, num);
1227 emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
1228
1229 for (int i = 0; i < num; ++i) {
1230 emit_obj (logbuffer, objects [i]);
1231 emit_value (logbuffer, root_types [i]);
1232 emit_value (logbuffer, extra_info [i]);
1233 }
1234
1235 EXIT_LOG;
1236 }
1237
1238 static void
trigger_heapshot(void)1239 trigger_heapshot (void)
1240 {
1241 // Rely on the finalization callback triggering a GC.
1242 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 1);
1243 mono_gc_finalize_notify ();
1244 }
1245
1246 static void
process_heapshot(void)1247 process_heapshot (void)
1248 {
1249 if (mono_atomic_load_i32 (&log_profiler.heapshot_requested))
1250 mono_gc_collect (mono_gc_max_generation ());
1251 }
1252
1253 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_EVENTS | PROFLOG_GC_MOVE_EVENTS | PROFLOG_GC_ROOT_EVENTS)
1254
1255 static void
gc_event(MonoProfiler * profiler,MonoProfilerGCEvent ev,uint32_t generation)1256 gc_event (MonoProfiler *profiler, MonoProfilerGCEvent ev, uint32_t generation)
1257 {
1258 if (ENABLED (PROFLOG_GC_EVENTS)) {
1259 ENTER_LOG (&gc_events_ctr, logbuffer,
1260 EVENT_SIZE /* event */ +
1261 BYTE_SIZE /* gc event */ +
1262 BYTE_SIZE /* generation */
1263 );
1264
1265 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1266 emit_byte (logbuffer, ev);
1267 emit_byte (logbuffer, generation);
1268
1269 EXIT_LOG;
1270 }
1271
1272 switch (ev) {
1273 case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1274 switch (log_config.hs_mode) {
1275 case MONO_PROFILER_HEAPSHOT_NONE:
1276 log_profiler.do_heap_walk = FALSE;
1277 break;
1278 case MONO_PROFILER_HEAPSHOT_MAJOR:
1279 log_profiler.do_heap_walk = generation == mono_gc_max_generation ();
1280 break;
1281 case MONO_PROFILER_HEAPSHOT_ON_DEMAND:
1282 // Handled below.
1283 break;
1284 case MONO_PROFILER_HEAPSHOT_X_GC:
1285 log_profiler.do_heap_walk = !(log_profiler.gc_count % log_config.hs_freq_gc);
1286 break;
1287 case MONO_PROFILER_HEAPSHOT_X_MS:
1288 log_profiler.do_heap_walk = (current_time () - log_profiler.last_hs_time) / 1000 * 1000 >= log_config.hs_freq_ms;
1289 break;
1290 default:
1291 g_assert_not_reached ();
1292 }
1293
1294 /*
1295 * heapshot_requested is set either because a heapshot was triggered
1296 * manually (through the API or command server) or because we're doing
1297 * a shutdown heapshot. Either way, a manually triggered heapshot
1298 * overrides any decision we made in the switch above.
1299 */
1300 if (mono_atomic_load_i32 (&log_profiler.heapshot_requested))
1301 log_profiler.do_heap_walk = TRUE;
1302
1303 if (ENABLED (PROFLOG_GC_ROOT_EVENTS) &&
1304 (log_config.always_do_root_report || log_profiler.do_heap_walk))
1305 mono_profiler_set_gc_roots_callback (log_profiler.handle, gc_roots);
1306
1307 /*
1308 * Ensure that no thread can be in the middle of writing to
1309 * a buffer when the world stops...
1310 */
1311 buffer_lock_excl ();
1312
1313 break;
1314 case MONO_GC_EVENT_POST_STOP_WORLD:
1315 /*
1316 * ... So that we now have a consistent view of all buffers.
1317 * This allows us to flush them. We need to do this because
1318 * they may contain object allocation events that need to be
1319 * committed to the log file before any object move events
1320 * that will be produced during this GC.
1321 */
1322 if (ENABLED (ALL_GC_EVENTS_MASK))
1323 sync_point (SYNC_POINT_WORLD_STOP);
1324
1325 // Surround heapshots with HEAP_START/HEAP_END events.
1326 if (log_profiler.do_heap_walk) {
1327 ENTER_LOG (&heap_starts_ctr, logbuffer,
1328 EVENT_SIZE /* event */
1329 );
1330
1331 emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1332
1333 EXIT_LOG;
1334 }
1335
1336 break;
1337 case MONO_GC_EVENT_START:
1338 if (generation == mono_gc_max_generation ())
1339 log_profiler.gc_count++;
1340
1341 break;
1342 case MONO_GC_EVENT_PRE_START_WORLD:
1343 mono_profiler_set_gc_roots_callback (log_profiler.handle, NULL);
1344
1345 if (log_profiler.do_heap_walk) {
1346 mono_gc_walk_heap (0, gc_reference, NULL);
1347
1348 ENTER_LOG (&heap_ends_ctr, logbuffer,
1349 EVENT_SIZE /* event */
1350 );
1351
1352 emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1353
1354 EXIT_LOG;
1355
1356 log_profiler.do_heap_walk = FALSE;
1357 log_profiler.last_hs_time = current_time ();
1358
1359 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 0);
1360 }
1361
1362 /*
1363 * Similarly, we must now make sure that any object moves
1364 * written to the GC thread's buffer are flushed. Otherwise,
1365 * object allocation events for certain addresses could come
1366 * after the move events that made those addresses available.
1367 */
1368 if (ENABLED (ALL_GC_EVENTS_MASK))
1369 sync_point_mark (SYNC_POINT_WORLD_START);
1370
1371 break;
1372 case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1373 /*
1374 * Finally, it is safe to allow other threads to write to
1375 * their buffers again.
1376 */
1377 buffer_unlock_excl ();
1378
1379 break;
1380 default:
1381 break;
1382 }
1383 }
1384
1385 static void
gc_resize(MonoProfiler * profiler,uintptr_t new_size)1386 gc_resize (MonoProfiler *profiler, uintptr_t new_size)
1387 {
1388 ENTER_LOG (&gc_resizes_ctr, logbuffer,
1389 EVENT_SIZE /* event */ +
1390 LEB128_SIZE /* new size */
1391 );
1392
1393 emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1394 emit_value (logbuffer, new_size);
1395
1396 EXIT_LOG;
1397 }
1398
1399 typedef struct {
1400 int count;
1401 MonoMethod* methods [MAX_FRAMES];
1402 int32_t il_offsets [MAX_FRAMES];
1403 int32_t native_offsets [MAX_FRAMES];
1404 } FrameData;
1405
1406 static mono_bool
walk_stack(MonoMethod * method,int32_t native_offset,int32_t il_offset,mono_bool managed,void * data)1407 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1408 {
1409 FrameData *frame = (FrameData *)data;
1410 if (method && frame->count < log_config.num_frames) {
1411 frame->il_offsets [frame->count] = il_offset;
1412 frame->native_offsets [frame->count] = native_offset;
1413 frame->methods [frame->count++] = method;
1414 }
1415 return frame->count == log_config.num_frames;
1416 }
1417
1418 /*
1419 * a note about stack walks: they can cause more profiler events to fire,
1420 * so we need to make sure they don't happen after we started emitting an
1421 * event, hence the collect_bt/emit_bt split.
1422 */
1423 static void
collect_bt(FrameData * data)1424 collect_bt (FrameData *data)
1425 {
1426 data->count = 0;
1427 mono_stack_walk_no_il (walk_stack, data);
1428 }
1429
1430 static void
emit_bt(LogBuffer * logbuffer,FrameData * data)1431 emit_bt (LogBuffer *logbuffer, FrameData *data)
1432 {
1433 emit_value (logbuffer, data->count);
1434
1435 while (data->count)
1436 emit_method (logbuffer, data->methods [--data->count]);
1437 }
1438
1439 static void
gc_alloc(MonoProfiler * prof,MonoObject * obj)1440 gc_alloc (MonoProfiler *prof, MonoObject *obj)
1441 {
1442 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_ALLOC_BT : 0;
1443 FrameData data;
1444 uintptr_t len = mono_object_get_size (obj);
1445 /* account for object alignment in the heap */
1446 len += 7;
1447 len &= ~7;
1448
1449 if (do_bt)
1450 collect_bt (&data);
1451
1452 ENTER_LOG (&gc_allocs_ctr, logbuffer,
1453 EVENT_SIZE /* event */ +
1454 LEB128_SIZE /* klass */ +
1455 LEB128_SIZE /* obj */ +
1456 LEB128_SIZE /* size */ +
1457 (do_bt ? (
1458 LEB128_SIZE /* count */ +
1459 data.count * (
1460 LEB128_SIZE /* method */
1461 )
1462 ) : 0)
1463 );
1464
1465 emit_event (logbuffer, do_bt | TYPE_ALLOC);
1466 emit_ptr (logbuffer, mono_object_get_class (obj));
1467 emit_obj (logbuffer, obj);
1468 emit_value (logbuffer, len);
1469
1470 if (do_bt)
1471 emit_bt (logbuffer, &data);
1472
1473 EXIT_LOG;
1474 }
1475
1476 static void
gc_moves(MonoProfiler * prof,MonoObject * const * objects,uint64_t num)1477 gc_moves (MonoProfiler *prof, MonoObject *const *objects, uint64_t num)
1478 {
1479 ENTER_LOG (&gc_moves_ctr, logbuffer,
1480 EVENT_SIZE /* event */ +
1481 LEB128_SIZE /* num */ +
1482 num * (
1483 LEB128_SIZE /* object */
1484 )
1485 );
1486
1487 emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1488 emit_value (logbuffer, num);
1489
1490 for (int i = 0; i < num; ++i)
1491 emit_obj (logbuffer, objects [i]);
1492
1493 EXIT_LOG;
1494 }
1495
1496 static void
gc_handle(MonoProfiler * prof,int op,MonoGCHandleType type,uint32_t handle,MonoObject * obj)1497 gc_handle (MonoProfiler *prof, int op, MonoGCHandleType type, uint32_t handle, MonoObject *obj)
1498 {
1499 int do_bt = !log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames;
1500 FrameData data;
1501
1502 if (do_bt)
1503 collect_bt (&data);
1504
1505 gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1506
1507 ENTER_LOG (ctr, logbuffer,
1508 EVENT_SIZE /* event */ +
1509 LEB128_SIZE /* type */ +
1510 LEB128_SIZE /* handle */ +
1511 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1512 LEB128_SIZE /* obj */
1513 ) : 0) +
1514 (do_bt ? (
1515 LEB128_SIZE /* count */ +
1516 data.count * (
1517 LEB128_SIZE /* method */
1518 )
1519 ) : 0)
1520 );
1521
1522 if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1523 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1524 else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1525 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1526 else
1527 g_assert_not_reached ();
1528
1529 emit_value (logbuffer, type);
1530 emit_value (logbuffer, handle);
1531
1532 if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1533 emit_obj (logbuffer, obj);
1534
1535 if (do_bt)
1536 emit_bt (logbuffer, &data);
1537
1538 EXIT_LOG;
1539 }
1540
1541 static void
gc_handle_created(MonoProfiler * prof,uint32_t handle,MonoGCHandleType type,MonoObject * obj)1542 gc_handle_created (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type, MonoObject *obj)
1543 {
1544 gc_handle (prof, MONO_PROFILER_GC_HANDLE_CREATED, type, handle, obj);
1545 }
1546
1547 static void
gc_handle_deleted(MonoProfiler * prof,uint32_t handle,MonoGCHandleType type)1548 gc_handle_deleted (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type)
1549 {
1550 gc_handle (prof, MONO_PROFILER_GC_HANDLE_DESTROYED, type, handle, NULL);
1551 }
1552
1553 static void
finalize_begin(MonoProfiler * prof)1554 finalize_begin (MonoProfiler *prof)
1555 {
1556 ENTER_LOG (&finalize_begins_ctr, buf,
1557 EVENT_SIZE /* event */
1558 );
1559
1560 emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1561
1562 EXIT_LOG;
1563 }
1564
1565 static void
finalize_end(MonoProfiler * prof)1566 finalize_end (MonoProfiler *prof)
1567 {
1568 process_heapshot ();
1569
1570 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS)) {
1571 ENTER_LOG (&finalize_ends_ctr, buf,
1572 EVENT_SIZE /* event */
1573 );
1574
1575 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1576
1577 EXIT_LOG;
1578 }
1579 }
1580
1581 static void
finalize_object_begin(MonoProfiler * prof,MonoObject * obj)1582 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1583 {
1584 ENTER_LOG (&finalize_object_begins_ctr, buf,
1585 EVENT_SIZE /* event */ +
1586 LEB128_SIZE /* obj */
1587 );
1588
1589 emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1590 emit_obj (buf, obj);
1591
1592 EXIT_LOG;
1593 }
1594
1595 static void
finalize_object_end(MonoProfiler * prof,MonoObject * obj)1596 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1597 {
1598 ENTER_LOG (&finalize_object_ends_ctr, buf,
1599 EVENT_SIZE /* event */ +
1600 LEB128_SIZE /* obj */
1601 );
1602
1603 emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1604 emit_obj (buf, obj);
1605
1606 EXIT_LOG;
1607 }
1608
1609 static char*
push_nesting(char * p,MonoClass * klass)1610 push_nesting (char *p, MonoClass *klass)
1611 {
1612 MonoClass *nesting;
1613 const char *name;
1614 const char *nspace;
1615 nesting = mono_class_get_nesting_type (klass);
1616 if (nesting) {
1617 p = push_nesting (p, nesting);
1618 *p++ = '/';
1619 *p = 0;
1620 }
1621 name = mono_class_get_name (klass);
1622 nspace = mono_class_get_namespace (klass);
1623 if (*nspace) {
1624 strcpy (p, nspace);
1625 p += strlen (nspace);
1626 *p++ = '.';
1627 *p = 0;
1628 }
1629 strcpy (p, name);
1630 p += strlen (name);
1631 return p;
1632 }
1633
1634 static char*
type_name(MonoClass * klass)1635 type_name (MonoClass *klass)
1636 {
1637 char buf [1024];
1638 char *p;
1639 push_nesting (buf, klass);
1640 p = (char *) g_malloc (strlen (buf) + 1);
1641 strcpy (p, buf);
1642 return p;
1643 }
1644
1645 static void
image_loaded(MonoProfiler * prof,MonoImage * image)1646 image_loaded (MonoProfiler *prof, MonoImage *image)
1647 {
1648 const char *name = mono_image_get_filename (image);
1649 int nlen = strlen (name) + 1;
1650
1651 ENTER_LOG (&image_loads_ctr, logbuffer,
1652 EVENT_SIZE /* event */ +
1653 BYTE_SIZE /* type */ +
1654 LEB128_SIZE /* image */ +
1655 nlen /* name */
1656 );
1657
1658 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1659 emit_byte (logbuffer, TYPE_IMAGE);
1660 emit_ptr (logbuffer, image);
1661 memcpy (logbuffer->cursor, name, nlen);
1662 logbuffer->cursor += nlen;
1663
1664 EXIT_LOG;
1665 }
1666
1667 static void
image_unloaded(MonoProfiler * prof,MonoImage * image)1668 image_unloaded (MonoProfiler *prof, MonoImage *image)
1669 {
1670 const char *name = mono_image_get_filename (image);
1671 int nlen = strlen (name) + 1;
1672
1673 ENTER_LOG (&image_unloads_ctr, logbuffer,
1674 EVENT_SIZE /* event */ +
1675 BYTE_SIZE /* type */ +
1676 LEB128_SIZE /* image */ +
1677 nlen /* name */
1678 );
1679
1680 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1681 emit_byte (logbuffer, TYPE_IMAGE);
1682 emit_ptr (logbuffer, image);
1683 memcpy (logbuffer->cursor, name, nlen);
1684 logbuffer->cursor += nlen;
1685
1686 EXIT_LOG;
1687 }
1688
1689 static void
assembly_loaded(MonoProfiler * prof,MonoAssembly * assembly)1690 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly)
1691 {
1692 char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1693 int nlen = strlen (name) + 1;
1694 MonoImage *image = mono_assembly_get_image (assembly);
1695
1696 ENTER_LOG (&assembly_loads_ctr, logbuffer,
1697 EVENT_SIZE /* event */ +
1698 BYTE_SIZE /* type */ +
1699 LEB128_SIZE /* assembly */ +
1700 LEB128_SIZE /* image */ +
1701 nlen /* name */
1702 );
1703
1704 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1705 emit_byte (logbuffer, TYPE_ASSEMBLY);
1706 emit_ptr (logbuffer, assembly);
1707 emit_ptr (logbuffer, image);
1708 memcpy (logbuffer->cursor, name, nlen);
1709 logbuffer->cursor += nlen;
1710
1711 EXIT_LOG;
1712
1713 mono_free (name);
1714 }
1715
1716 static void
assembly_unloaded(MonoProfiler * prof,MonoAssembly * assembly)1717 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1718 {
1719 char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1720 int nlen = strlen (name) + 1;
1721 MonoImage *image = mono_assembly_get_image (assembly);
1722
1723 ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1724 EVENT_SIZE /* event */ +
1725 BYTE_SIZE /* type */ +
1726 LEB128_SIZE /* assembly */ +
1727 LEB128_SIZE /* image */ +
1728 nlen /* name */
1729 );
1730
1731 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1732 emit_byte (logbuffer, TYPE_ASSEMBLY);
1733 emit_ptr (logbuffer, assembly);
1734 emit_ptr (logbuffer, image);
1735 memcpy (logbuffer->cursor, name, nlen);
1736 logbuffer->cursor += nlen;
1737
1738 EXIT_LOG;
1739
1740 mono_free (name);
1741 }
1742
1743 static void
class_loaded(MonoProfiler * prof,MonoClass * klass)1744 class_loaded (MonoProfiler *prof, MonoClass *klass)
1745 {
1746 char *name;
1747
1748 if (mono_atomic_load_i32 (&log_profiler.runtime_inited))
1749 name = mono_type_get_name (mono_class_get_type (klass));
1750 else
1751 name = type_name (klass);
1752
1753 int nlen = strlen (name) + 1;
1754 MonoImage *image = mono_class_get_image (klass);
1755
1756 ENTER_LOG (&class_loads_ctr, logbuffer,
1757 EVENT_SIZE /* event */ +
1758 BYTE_SIZE /* type */ +
1759 LEB128_SIZE /* klass */ +
1760 LEB128_SIZE /* image */ +
1761 nlen /* name */
1762 );
1763
1764 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1765 emit_byte (logbuffer, TYPE_CLASS);
1766 emit_ptr (logbuffer, klass);
1767 emit_ptr (logbuffer, image);
1768 memcpy (logbuffer->cursor, name, nlen);
1769 logbuffer->cursor += nlen;
1770
1771 EXIT_LOG;
1772
1773 if (mono_atomic_load_i32 (&log_profiler.runtime_inited))
1774 mono_free (name);
1775 else
1776 g_free (name);
1777 }
1778
1779 static void
method_enter(MonoProfiler * prof,MonoMethod * method,MonoProfilerCallContext * ctx)1780 method_enter (MonoProfiler *prof, MonoMethod *method, MonoProfilerCallContext *ctx)
1781 {
1782 if (get_thread ()->call_depth++ <= log_config.max_call_depth) {
1783 ENTER_LOG (&method_entries_ctr, logbuffer,
1784 EVENT_SIZE /* event */ +
1785 LEB128_SIZE /* method */
1786 );
1787
1788 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
1789 emit_method (logbuffer, method);
1790
1791 EXIT_LOG;
1792 }
1793 }
1794
1795 static void
method_leave(MonoProfiler * prof,MonoMethod * method,MonoProfilerCallContext * ctx)1796 method_leave (MonoProfiler *prof, MonoMethod *method, MonoProfilerCallContext *ctx)
1797 {
1798 if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1799 ENTER_LOG (&method_exits_ctr, logbuffer,
1800 EVENT_SIZE /* event */ +
1801 LEB128_SIZE /* method */
1802 );
1803
1804 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
1805 emit_method (logbuffer, method);
1806
1807 EXIT_LOG;
1808 }
1809 }
1810
1811 static void
tail_call(MonoProfiler * prof,MonoMethod * method,MonoMethod * target)1812 tail_call (MonoProfiler *prof, MonoMethod *method, MonoMethod *target)
1813 {
1814 method_leave (prof, method, NULL);
1815 }
1816
1817 static void
method_exc_leave(MonoProfiler * prof,MonoMethod * method,MonoObject * exc)1818 method_exc_leave (MonoProfiler *prof, MonoMethod *method, MonoObject *exc)
1819 {
1820 if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1821 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
1822 EVENT_SIZE /* event */ +
1823 LEB128_SIZE /* method */
1824 );
1825
1826 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
1827 emit_method (logbuffer, method);
1828
1829 EXIT_LOG;
1830 }
1831 }
1832
1833 static MonoProfilerCallInstrumentationFlags
method_filter(MonoProfiler * prof,MonoMethod * method)1834 method_filter (MonoProfiler *prof, MonoMethod *method)
1835 {
1836 if (log_config.callspec.len > 0 &&
1837 !mono_callspec_eval (method, &log_config.callspec))
1838 return MONO_PROFILER_CALL_INSTRUMENTATION_NONE;
1839
1840 return MONO_PROFILER_CALL_INSTRUMENTATION_ENTER |
1841 MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE |
1842 MONO_PROFILER_CALL_INSTRUMENTATION_TAIL_CALL |
1843 MONO_PROFILER_CALL_INSTRUMENTATION_EXCEPTION_LEAVE;
1844 }
1845
1846 static void
method_jitted(MonoProfiler * prof,MonoMethod * method,MonoJitInfo * ji)1847 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji)
1848 {
1849 buffer_lock ();
1850
1851 register_method_local (method, ji);
1852
1853 buffer_unlock ();
1854 }
1855
1856 static void
code_buffer_new(MonoProfiler * prof,const mono_byte * buffer,uint64_t size,MonoProfilerCodeBufferType type,const void * data)1857 code_buffer_new (MonoProfiler *prof, const mono_byte *buffer, uint64_t size, MonoProfilerCodeBufferType type, const void *data)
1858 {
1859 const char *name;
1860 int nlen;
1861
1862 if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
1863 name = (const char *) data;
1864 nlen = strlen (name) + 1;
1865 } else {
1866 name = NULL;
1867 nlen = 0;
1868 }
1869
1870 ENTER_LOG (&code_buffers_ctr, logbuffer,
1871 EVENT_SIZE /* event */ +
1872 BYTE_SIZE /* type */ +
1873 LEB128_SIZE /* buffer */ +
1874 LEB128_SIZE /* size */ +
1875 (name ? (
1876 nlen /* name */
1877 ) : 0)
1878 );
1879
1880 emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
1881 emit_byte (logbuffer, type);
1882 emit_ptr (logbuffer, buffer);
1883 emit_value (logbuffer, size);
1884
1885 if (name) {
1886 memcpy (logbuffer->cursor, name, nlen);
1887 logbuffer->cursor += nlen;
1888 }
1889
1890 EXIT_LOG;
1891 }
1892
1893 static void
throw_exc(MonoProfiler * prof,MonoObject * object)1894 throw_exc (MonoProfiler *prof, MonoObject *object)
1895 {
1896 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_THROW_BT : 0;
1897 FrameData data;
1898
1899 if (do_bt)
1900 collect_bt (&data);
1901
1902 ENTER_LOG (&exception_throws_ctr, logbuffer,
1903 EVENT_SIZE /* event */ +
1904 LEB128_SIZE /* object */ +
1905 (do_bt ? (
1906 LEB128_SIZE /* count */ +
1907 data.count * (
1908 LEB128_SIZE /* method */
1909 )
1910 ) : 0)
1911 );
1912
1913 emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
1914 emit_obj (logbuffer, object);
1915
1916 if (do_bt)
1917 emit_bt (logbuffer, &data);
1918
1919 EXIT_LOG;
1920 }
1921
1922 static void
clause_exc(MonoProfiler * prof,MonoMethod * method,uint32_t clause_num,MonoExceptionEnum clause_type,MonoObject * exc)1923 clause_exc (MonoProfiler *prof, MonoMethod *method, uint32_t clause_num, MonoExceptionEnum clause_type, MonoObject *exc)
1924 {
1925 ENTER_LOG (&exception_clauses_ctr, logbuffer,
1926 EVENT_SIZE /* event */ +
1927 BYTE_SIZE /* clause type */ +
1928 LEB128_SIZE /* clause num */ +
1929 LEB128_SIZE /* method */ +
1930 LEB128_SIZE /* exc */
1931 );
1932
1933 emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
1934 emit_byte (logbuffer, clause_type);
1935 emit_value (logbuffer, clause_num);
1936 emit_method (logbuffer, method);
1937 emit_obj (logbuffer, exc);
1938
1939 EXIT_LOG;
1940 }
1941
1942 static void
monitor_event(MonoProfiler * profiler,MonoObject * object,MonoProfilerMonitorEvent ev)1943 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
1944 {
1945 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_MONITOR_BT : 0;
1946 FrameData data;
1947
1948 if (do_bt)
1949 collect_bt (&data);
1950
1951 ENTER_LOG (&monitor_events_ctr, logbuffer,
1952 EVENT_SIZE /* event */ +
1953 BYTE_SIZE /* ev */ +
1954 LEB128_SIZE /* object */ +
1955 (do_bt ? (
1956 LEB128_SIZE /* count */ +
1957 data.count * (
1958 LEB128_SIZE /* method */
1959 )
1960 ) : 0)
1961 );
1962
1963 emit_event (logbuffer, do_bt | TYPE_MONITOR);
1964 emit_byte (logbuffer, ev);
1965 emit_obj (logbuffer, object);
1966
1967 if (do_bt)
1968 emit_bt (logbuffer, &data);
1969
1970 EXIT_LOG;
1971 }
1972
1973 static void
monitor_contention(MonoProfiler * prof,MonoObject * object)1974 monitor_contention (MonoProfiler *prof, MonoObject *object)
1975 {
1976 monitor_event (prof, object, MONO_PROFILER_MONITOR_CONTENTION);
1977 }
1978
1979 static void
monitor_acquired(MonoProfiler * prof,MonoObject * object)1980 monitor_acquired (MonoProfiler *prof, MonoObject *object)
1981 {
1982 monitor_event (prof, object, MONO_PROFILER_MONITOR_DONE);
1983 }
1984
1985 static void
monitor_failed(MonoProfiler * prof,MonoObject * object)1986 monitor_failed (MonoProfiler *prof, MonoObject *object)
1987 {
1988 monitor_event (prof, object, MONO_PROFILER_MONITOR_FAIL);
1989 }
1990
1991 static void
thread_start(MonoProfiler * prof,uintptr_t tid)1992 thread_start (MonoProfiler *prof, uintptr_t tid)
1993 {
1994 ENTER_LOG (&thread_starts_ctr, logbuffer,
1995 EVENT_SIZE /* event */ +
1996 BYTE_SIZE /* type */ +
1997 LEB128_SIZE /* tid */
1998 );
1999
2000 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2001 emit_byte (logbuffer, TYPE_THREAD);
2002 emit_ptr (logbuffer, (void*) tid);
2003
2004 EXIT_LOG;
2005 }
2006
2007 static void
thread_end(MonoProfiler * prof,uintptr_t tid)2008 thread_end (MonoProfiler *prof, uintptr_t tid)
2009 {
2010 ENTER_LOG (&thread_ends_ctr, logbuffer,
2011 EVENT_SIZE /* event */ +
2012 BYTE_SIZE /* type */ +
2013 LEB128_SIZE /* tid */
2014 );
2015
2016 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2017 emit_byte (logbuffer, TYPE_THREAD);
2018 emit_ptr (logbuffer, (void*) tid);
2019
2020 EXIT_LOG_EXPLICIT (NO_SEND);
2021
2022 MonoProfilerThread *thread = get_thread ();
2023
2024 thread->ended = TRUE;
2025 remove_thread (thread);
2026
2027 PROF_TLS_SET (MONO_PROFILER_THREAD_DEAD);
2028 }
2029
2030 static void
thread_name(MonoProfiler * prof,uintptr_t tid,const char * name)2031 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2032 {
2033 int len = strlen (name) + 1;
2034
2035 ENTER_LOG (&thread_names_ctr, logbuffer,
2036 EVENT_SIZE /* event */ +
2037 BYTE_SIZE /* type */ +
2038 LEB128_SIZE /* tid */ +
2039 len /* name */
2040 );
2041
2042 emit_event (logbuffer, TYPE_METADATA);
2043 emit_byte (logbuffer, TYPE_THREAD);
2044 emit_ptr (logbuffer, (void*)tid);
2045 memcpy (logbuffer->cursor, name, len);
2046 logbuffer->cursor += len;
2047
2048 EXIT_LOG;
2049 }
2050
2051 static void
domain_loaded(MonoProfiler * prof,MonoDomain * domain)2052 domain_loaded (MonoProfiler *prof, MonoDomain *domain)
2053 {
2054 ENTER_LOG (&domain_loads_ctr, logbuffer,
2055 EVENT_SIZE /* event */ +
2056 BYTE_SIZE /* type */ +
2057 LEB128_SIZE /* domain id */
2058 );
2059
2060 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2061 emit_byte (logbuffer, TYPE_DOMAIN);
2062 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2063
2064 EXIT_LOG;
2065 }
2066
2067 static void
domain_unloaded(MonoProfiler * prof,MonoDomain * domain)2068 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2069 {
2070 ENTER_LOG (&domain_unloads_ctr, logbuffer,
2071 EVENT_SIZE /* event */ +
2072 BYTE_SIZE /* type */ +
2073 LEB128_SIZE /* domain id */
2074 );
2075
2076 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2077 emit_byte (logbuffer, TYPE_DOMAIN);
2078 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2079
2080 EXIT_LOG;
2081 }
2082
2083 static void
domain_name(MonoProfiler * prof,MonoDomain * domain,const char * name)2084 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2085 {
2086 int nlen = strlen (name) + 1;
2087
2088 ENTER_LOG (&domain_names_ctr, logbuffer,
2089 EVENT_SIZE /* event */ +
2090 BYTE_SIZE /* type */ +
2091 LEB128_SIZE /* domain id */ +
2092 nlen /* name */
2093 );
2094
2095 emit_event (logbuffer, TYPE_METADATA);
2096 emit_byte (logbuffer, TYPE_DOMAIN);
2097 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2098 memcpy (logbuffer->cursor, name, nlen);
2099 logbuffer->cursor += nlen;
2100
2101 EXIT_LOG;
2102 }
2103
2104 static void
context_loaded(MonoProfiler * prof,MonoAppContext * context)2105 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2106 {
2107 ENTER_LOG (&context_loads_ctr, logbuffer,
2108 EVENT_SIZE /* event */ +
2109 BYTE_SIZE /* type */ +
2110 LEB128_SIZE /* context id */ +
2111 LEB128_SIZE /* domain id */
2112 );
2113
2114 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2115 emit_byte (logbuffer, TYPE_CONTEXT);
2116 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2117 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2118
2119 EXIT_LOG;
2120 }
2121
2122 static void
context_unloaded(MonoProfiler * prof,MonoAppContext * context)2123 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2124 {
2125 ENTER_LOG (&context_unloads_ctr, logbuffer,
2126 EVENT_SIZE /* event */ +
2127 BYTE_SIZE /* type */ +
2128 LEB128_SIZE /* context id */ +
2129 LEB128_SIZE /* domain id */
2130 );
2131
2132 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2133 emit_byte (logbuffer, TYPE_CONTEXT);
2134 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2135 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2136
2137 EXIT_LOG;
2138 }
2139
2140 typedef struct {
2141 MonoMethod *method;
2142 MonoDomain *domain;
2143 void *base_address;
2144 int offset;
2145 } AsyncFrameInfo;
2146
2147 typedef struct {
2148 MonoLockFreeQueueNode node;
2149 uint64_t time;
2150 uintptr_t tid;
2151 const void *ip;
2152 int count;
2153 AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2154 } SampleHit;
2155
2156 static mono_bool
async_walk_stack(MonoMethod * method,MonoDomain * domain,void * base_address,int offset,void * data)2157 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2158 {
2159 SampleHit *sample = (SampleHit *) data;
2160
2161 if (sample->count < log_config.num_frames) {
2162 int i = sample->count;
2163
2164 sample->frames [i].method = method;
2165 sample->frames [i].domain = domain;
2166 sample->frames [i].base_address = base_address;
2167 sample->frames [i].offset = offset;
2168
2169 sample->count++;
2170 }
2171
2172 return sample->count == log_config.num_frames;
2173 }
2174
2175 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2176 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2177
2178 static void
enqueue_sample_hit(gpointer p)2179 enqueue_sample_hit (gpointer p)
2180 {
2181 SampleHit *sample = p;
2182
2183 mono_lock_free_queue_node_unpoison (&sample->node);
2184 mono_lock_free_queue_enqueue (&log_profiler.dumper_queue, &sample->node);
2185 mono_os_sem_post (&log_profiler.dumper_queue_sem);
2186 }
2187
2188 static void
mono_sample_hit(MonoProfiler * profiler,const mono_byte * ip,const void * context)2189 mono_sample_hit (MonoProfiler *profiler, const mono_byte *ip, const void *context)
2190 {
2191 /*
2192 * Please note: We rely on the runtime loading the profiler with
2193 * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2194 * this function (and its siblings) are resolved when the profiler is
2195 * loaded. Otherwise, we would potentially invoke the dynamic linker when
2196 * invoking runtime functions, which is not async-signal-safe.
2197 */
2198
2199 if (mono_atomic_load_i32 (&log_profiler.in_shutdown))
2200 return;
2201
2202 SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2203
2204 if (!sample) {
2205 /*
2206 * If we're out of reusable sample events and we're not allowed to
2207 * allocate more, we have no choice but to drop the event.
2208 */
2209 if (mono_atomic_load_i32 (&sample_allocations_ctr) >= log_config.max_allocated_sample_hits)
2210 return;
2211
2212 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2213 mono_lock_free_queue_node_init (&sample->node, TRUE);
2214
2215 mono_atomic_inc_i32 (&sample_allocations_ctr);
2216 }
2217
2218 sample->count = 0;
2219 mono_stack_walk_async_safe (&async_walk_stack, (void *) context, sample);
2220
2221 sample->time = current_time ();
2222 sample->tid = thread_id ();
2223 sample->ip = ip;
2224
2225 mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2226 }
2227
2228 static uintptr_t *code_pages = 0;
2229 static int num_code_pages = 0;
2230 static int size_code_pages = 0;
2231 #define CPAGE_SHIFT (9)
2232 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2233 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2234 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2235
2236 static uintptr_t
add_code_page(uintptr_t * hash,uintptr_t hsize,uintptr_t page)2237 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2238 {
2239 uintptr_t i;
2240 uintptr_t start_pos;
2241 start_pos = (page >> CPAGE_SHIFT) % hsize;
2242 i = start_pos;
2243 do {
2244 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2245 return 0;
2246 } else if (!hash [i]) {
2247 hash [i] = page;
2248 return 1;
2249 }
2250 /* wrap around */
2251 if (++i == hsize)
2252 i = 0;
2253 } while (i != start_pos);
2254 g_assert_not_reached ();
2255 return 0;
2256 }
2257
2258 static void
add_code_pointer(uintptr_t ip)2259 add_code_pointer (uintptr_t ip)
2260 {
2261 uintptr_t i;
2262 if (num_code_pages * 2 >= size_code_pages) {
2263 uintptr_t *n;
2264 uintptr_t old_size = size_code_pages;
2265 size_code_pages *= 2;
2266 if (size_code_pages == 0)
2267 size_code_pages = 16;
2268 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2269 for (i = 0; i < old_size; ++i) {
2270 if (code_pages [i])
2271 add_code_page (n, size_code_pages, code_pages [i]);
2272 }
2273 if (code_pages)
2274 g_free (code_pages);
2275 code_pages = n;
2276 }
2277 num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2278 }
2279
2280 /* ELF code crashes on some systems. */
2281 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2282 #if 0
2283 static void
2284 dump_ubin (const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2285 {
2286 int len = strlen (filename) + 1;
2287
2288 ENTER_LOG (&sample_ubins_ctr, logbuffer,
2289 EVENT_SIZE /* event */ +
2290 LEB128_SIZE /* load address */ +
2291 LEB128_SIZE /* offset */ +
2292 LEB128_SIZE /* size */ +
2293 len /* file name */
2294 );
2295
2296 emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2297 emit_ptr (logbuffer, load_addr);
2298 emit_uvalue (logbuffer, offset);
2299 emit_uvalue (logbuffer, size);
2300 memcpy (logbuffer->cursor, filename, len);
2301 logbuffer->cursor += len;
2302
2303 EXIT_LOG;
2304 }
2305 #endif
2306
2307 static void
dump_usym(const char * name,uintptr_t value,uintptr_t size)2308 dump_usym (const char *name, uintptr_t value, uintptr_t size)
2309 {
2310 int len = strlen (name) + 1;
2311
2312 ENTER_LOG (&sample_usyms_ctr, logbuffer,
2313 EVENT_SIZE /* event */ +
2314 LEB128_SIZE /* value */ +
2315 LEB128_SIZE /* size */ +
2316 len /* name */
2317 );
2318
2319 emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2320 emit_ptr (logbuffer, (void*)value);
2321 emit_value (logbuffer, size);
2322 memcpy (logbuffer->cursor, name, len);
2323 logbuffer->cursor += len;
2324
2325 EXIT_LOG;
2326 }
2327
2328 /* ELF code crashes on some systems. */
2329 //#if defined(ELFMAG0)
2330 #if 0
2331
2332 #if SIZEOF_VOID_P == 4
2333 #define ELF_WSIZE 32
2334 #else
2335 #define ELF_WSIZE 64
2336 #endif
2337 #ifndef ElfW
2338 #define ElfW(type) _ElfW (Elf, ELF_WSIZE, type)
2339 #define _ElfW(e,w,t) _ElfW_1 (e, w, _##t)
2340 #define _ElfW_1(e,w,t) e##w##t
2341 #endif
2342
2343 static void
2344 dump_elf_symbols (ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2345 {
2346 int i;
2347 for (i = 0; i < num_symbols; ++i) {
2348 const char* sym;
2349 sym = strtab + symbols [i].st_name;
2350 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2351 continue;
2352 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2353 }
2354 }
2355
2356 static int
2357 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2358 {
2359 int fd, i;
2360 void *data;
2361 struct stat statb;
2362 uint64_t file_size;
2363 ElfW(Ehdr) *header;
2364 ElfW(Shdr) *sheader;
2365 ElfW(Shdr) *shstrtabh;
2366 ElfW(Shdr) *symtabh = NULL;
2367 ElfW(Shdr) *strtabh = NULL;
2368 ElfW(Sym) *symbols = NULL;
2369 const char *strtab;
2370 int num_symbols;
2371
2372 fd = open (filename, O_RDONLY);
2373 if (fd < 0)
2374 return 0;
2375 if (fstat (fd, &statb) != 0) {
2376 close (fd);
2377 return 0;
2378 }
2379 file_size = statb.st_size;
2380 data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2381 close (fd);
2382 if (data == MAP_FAILED)
2383 return 0;
2384 header = data;
2385 if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2386 header->e_ident [EI_MAG1] != ELFMAG1 ||
2387 header->e_ident [EI_MAG2] != ELFMAG2 ||
2388 header->e_ident [EI_MAG3] != ELFMAG3 ) {
2389 munmap (data, file_size);
2390 return 0;
2391 }
2392 sheader = (void*)((char*)data + header->e_shoff);
2393 shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2394 strtab = (const char*)data + shstrtabh->sh_offset;
2395 for (i = 0; i < header->e_shnum; ++i) {
2396 if (sheader->sh_type == SHT_SYMTAB) {
2397 symtabh = sheader;
2398 strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2399 break;
2400 }
2401 sheader = (void*)((char*)sheader + header->e_shentsize);
2402 }
2403 if (!symtabh || !strtabh) {
2404 munmap (data, file_size);
2405 return 0;
2406 }
2407 strtab = (const char*)data + strtabh->sh_offset;
2408 num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2409 symbols = (void*)((char*)data + symtabh->sh_offset);
2410 dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2411 munmap (data, file_size);
2412 return 1;
2413 }
2414 #endif
2415
2416 /* ELF code crashes on some systems. */
2417 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2418 #if 0
2419 static int
2420 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2421 {
2422 char buf [256];
2423 const char *filename;
2424 BinaryObject *obj;
2425 char *a = (void*)info->dlpi_addr;
2426 int i, num_sym;
2427 ElfW(Dyn) *dyn = NULL;
2428 ElfW(Sym) *symtab = NULL;
2429 ElfW(Word) *hash_table = NULL;
2430 ElfW(Ehdr) *header = NULL;
2431 const char* strtab = NULL;
2432 for (obj = log_profiler.binary_objects; obj; obj = obj->next) {
2433 if (obj->addr == a)
2434 return 0;
2435 }
2436 filename = info->dlpi_name;
2437 if (!filename)
2438 return 0;
2439 if (!info->dlpi_addr && !filename [0]) {
2440 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2441 if (l > 0) {
2442 buf [l] = 0;
2443 filename = buf;
2444 }
2445 }
2446 obj = g_calloc (sizeof (BinaryObject), 1);
2447 obj->addr = (void*)info->dlpi_addr;
2448 obj->name = pstrdup (filename);
2449 obj->next = log_profiler.binary_objects;
2450 log_profiler.binary_objects = obj;
2451 a = NULL;
2452 for (i = 0; i < info->dlpi_phnum; ++i) {
2453 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2454 header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2455 if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2456 header->e_ident [EI_MAG1] != ELFMAG1 ||
2457 header->e_ident [EI_MAG2] != ELFMAG2 ||
2458 header->e_ident [EI_MAG3] != ELFMAG3 ) {
2459 header = NULL;
2460 }
2461 dump_ubin (filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2462 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2463 dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2464 }
2465 }
2466 if (read_elf_symbols (filename, (void*)info->dlpi_addr))
2467 return 0;
2468 if (!info->dlpi_name || !info->dlpi_name[0])
2469 return 0;
2470 if (!dyn)
2471 return 0;
2472 for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2473 if (dyn [i].d_tag == DT_SYMTAB) {
2474 symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2475 } else if (dyn [i].d_tag == DT_HASH) {
2476 hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2477 } else if (dyn [i].d_tag == DT_STRTAB) {
2478 strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2479 }
2480 }
2481 if (!hash_table)
2482 return 0;
2483 num_sym = hash_table [1];
2484 dump_elf_symbols (symtab, num_sym, strtab, (void*)info->dlpi_addr);
2485 return 0;
2486 }
2487
2488 static int
2489 load_binaries (void)
2490 {
2491 dl_iterate_phdr (elf_dl_callback, NULL);
2492 return 1;
2493 }
2494 #else
2495 static int
load_binaries(void)2496 load_binaries (void)
2497 {
2498 return 0;
2499 }
2500 #endif
2501
2502 static const char*
symbol_for(uintptr_t code)2503 symbol_for (uintptr_t code)
2504 {
2505 #ifdef HAVE_DLADDR
2506 void *ip = (void*)code;
2507 Dl_info di;
2508 if (dladdr (ip, &di)) {
2509 if (di.dli_sname)
2510 return di.dli_sname;
2511 } else {
2512 /* char **names;
2513 names = backtrace_symbols (&ip, 1);
2514 if (names) {
2515 const char* p = names [0];
2516 g_free (names);
2517 return p;
2518 }
2519 */
2520 }
2521 #endif
2522 return NULL;
2523 }
2524
2525 static void
dump_unmanaged_coderefs(void)2526 dump_unmanaged_coderefs (void)
2527 {
2528 int i;
2529 const char* last_symbol;
2530 uintptr_t addr, page_end;
2531
2532 if (load_binaries ())
2533 return;
2534 for (i = 0; i < size_code_pages; ++i) {
2535 const char* sym;
2536 if (!code_pages [i] || code_pages [i] & 1)
2537 continue;
2538 last_symbol = NULL;
2539 addr = CPAGE_ADDR (code_pages [i]);
2540 page_end = addr + CPAGE_SIZE;
2541 code_pages [i] |= 1;
2542 /* we dump the symbols for the whole page */
2543 for (; addr < page_end; addr += 16) {
2544 sym = symbol_for (addr);
2545 if (sym && sym == last_symbol)
2546 continue;
2547 last_symbol = sym;
2548 if (!sym)
2549 continue;
2550 dump_usym (sym, addr, 0); /* let's not guess the size */
2551 }
2552 }
2553 }
2554
2555 static void
counters_add_agent(MonoCounter * counter)2556 counters_add_agent (MonoCounter *counter)
2557 {
2558 if (mono_atomic_load_i32 (&log_profiler.in_shutdown))
2559 return;
2560
2561 MonoCounterAgent *agent, *item;
2562
2563 mono_os_mutex_lock (&log_profiler.counters_mutex);
2564
2565 for (agent = log_profiler.counters; agent; agent = agent->next) {
2566 if (agent->counter == counter) {
2567 agent->value_size = 0;
2568 if (agent->value) {
2569 g_free (agent->value);
2570 agent->value = NULL;
2571 }
2572 goto done;
2573 }
2574 }
2575
2576 agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2577 agent->counter = counter;
2578 agent->value = NULL;
2579 agent->value_size = 0;
2580 agent->index = log_profiler.counters_index++;
2581 agent->emitted = FALSE;
2582 agent->next = NULL;
2583
2584 if (!log_profiler.counters) {
2585 log_profiler.counters = agent;
2586 } else {
2587 item = log_profiler.counters;
2588 while (item->next)
2589 item = item->next;
2590 item->next = agent;
2591 }
2592
2593 done:
2594 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2595 }
2596
2597 static mono_bool
counters_init_foreach_callback(MonoCounter * counter,gpointer data)2598 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2599 {
2600 counters_add_agent (counter);
2601 return TRUE;
2602 }
2603
2604 static void
counters_init(void)2605 counters_init (void)
2606 {
2607 mono_os_mutex_init (&log_profiler.counters_mutex);
2608
2609 log_profiler.counters_index = 1;
2610
2611 mono_counters_on_register (&counters_add_agent);
2612 mono_counters_foreach (counters_init_foreach_callback, NULL);
2613 }
2614
2615 static void
counters_emit(void)2616 counters_emit (void)
2617 {
2618 MonoCounterAgent *agent;
2619 int len = 0;
2620 int size =
2621 EVENT_SIZE /* event */ +
2622 LEB128_SIZE /* len */
2623 ;
2624
2625 mono_os_mutex_lock (&log_profiler.counters_mutex);
2626
2627 for (agent = log_profiler.counters; agent; agent = agent->next) {
2628 if (agent->emitted)
2629 continue;
2630
2631 size +=
2632 LEB128_SIZE /* section */ +
2633 strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2634 BYTE_SIZE /* type */ +
2635 BYTE_SIZE /* unit */ +
2636 BYTE_SIZE /* variance */ +
2637 LEB128_SIZE /* index */
2638 ;
2639
2640 len++;
2641 }
2642
2643 if (!len)
2644 goto done;
2645
2646 ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2647
2648 emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2649 emit_value (logbuffer, len);
2650
2651 for (agent = log_profiler.counters; agent; agent = agent->next) {
2652 const char *name;
2653
2654 if (agent->emitted)
2655 continue;
2656
2657 name = mono_counter_get_name (agent->counter);
2658 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2659 emit_string (logbuffer, name, strlen (name) + 1);
2660 emit_byte (logbuffer, mono_counter_get_type (agent->counter));
2661 emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
2662 emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
2663 emit_value (logbuffer, agent->index);
2664
2665 agent->emitted = TRUE;
2666 }
2667
2668 EXIT_LOG;
2669
2670 done:
2671 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2672 }
2673
2674 static void
counters_sample(uint64_t timestamp)2675 counters_sample (uint64_t timestamp)
2676 {
2677 MonoCounterAgent *agent;
2678 MonoCounter *counter;
2679 int type;
2680 int buffer_size;
2681 void *buffer;
2682 int size;
2683
2684 counters_emit ();
2685
2686 buffer_size = 8;
2687 buffer = g_calloc (1, buffer_size);
2688
2689 mono_os_mutex_lock (&log_profiler.counters_mutex);
2690
2691 size =
2692 EVENT_SIZE /* event */
2693 ;
2694
2695 for (agent = log_profiler.counters; agent; agent = agent->next) {
2696 /*
2697 * FIXME: This calculation is incorrect for string counters since
2698 * mono_counter_get_size () just returns 0 in that case. We should
2699 * address this if we ever actually add any string counters to Mono.
2700 */
2701
2702 size +=
2703 LEB128_SIZE /* index */ +
2704 BYTE_SIZE /* type */ +
2705 mono_counter_get_size (agent->counter) /* value */
2706 ;
2707 }
2708
2709 size +=
2710 LEB128_SIZE /* stop marker */
2711 ;
2712
2713 ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2714
2715 emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2716
2717 for (agent = log_profiler.counters; agent; agent = agent->next) {
2718 size_t size;
2719
2720 counter = agent->counter;
2721
2722 size = mono_counter_get_size (counter);
2723
2724 if (size > buffer_size) {
2725 buffer_size = size;
2726 buffer = g_realloc (buffer, buffer_size);
2727 }
2728
2729 memset (buffer, 0, buffer_size);
2730
2731 g_assert (mono_counters_sample (counter, buffer, size));
2732
2733 type = mono_counter_get_type (counter);
2734
2735 if (!agent->value) {
2736 agent->value = g_calloc (1, size);
2737 agent->value_size = size;
2738 } else {
2739 if (type == MONO_COUNTER_STRING) {
2740 if (strcmp (agent->value, buffer) == 0)
2741 continue;
2742 } else {
2743 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
2744 continue;
2745 }
2746 }
2747
2748 emit_uvalue (logbuffer, agent->index);
2749 emit_byte (logbuffer, type);
2750 switch (type) {
2751 case MONO_COUNTER_INT:
2752 #if SIZEOF_VOID_P == 4
2753 case MONO_COUNTER_WORD:
2754 #endif
2755 emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
2756 break;
2757 case MONO_COUNTER_UINT:
2758 emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
2759 break;
2760 case MONO_COUNTER_TIME_INTERVAL:
2761 case MONO_COUNTER_LONG:
2762 #if SIZEOF_VOID_P == 8
2763 case MONO_COUNTER_WORD:
2764 #endif
2765 emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
2766 break;
2767 case MONO_COUNTER_ULONG:
2768 emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
2769 break;
2770 case MONO_COUNTER_DOUBLE:
2771 emit_double (logbuffer, *(double*)buffer);
2772 break;
2773 case MONO_COUNTER_STRING:
2774 if (size == 0) {
2775 emit_byte (logbuffer, 0);
2776 } else {
2777 emit_byte (logbuffer, 1);
2778 emit_string (logbuffer, (char*)buffer, size);
2779 }
2780 break;
2781 default:
2782 g_assert_not_reached ();
2783 }
2784
2785 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
2786 agent->value = g_realloc (agent->value, size);
2787 agent->value_size = size;
2788 }
2789
2790 if (size > 0)
2791 memcpy (agent->value, buffer, size);
2792 }
2793 g_free (buffer);
2794
2795 emit_value (logbuffer, 0);
2796
2797 EXIT_LOG;
2798
2799 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2800 }
2801
2802 static void
perfcounters_emit(void)2803 perfcounters_emit (void)
2804 {
2805 PerfCounterAgent *pcagent;
2806 int len = 0;
2807 int size =
2808 EVENT_SIZE /* event */ +
2809 LEB128_SIZE /* len */
2810 ;
2811
2812 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2813 if (pcagent->emitted)
2814 continue;
2815
2816 size +=
2817 LEB128_SIZE /* section */ +
2818 strlen (pcagent->category_name) + 1 /* category name */ +
2819 strlen (pcagent->name) + 1 /* name */ +
2820 BYTE_SIZE /* type */ +
2821 BYTE_SIZE /* unit */ +
2822 BYTE_SIZE /* variance */ +
2823 LEB128_SIZE /* index */
2824 ;
2825
2826 len++;
2827 }
2828
2829 if (!len)
2830 return;
2831
2832 ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
2833
2834 emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2835 emit_value (logbuffer, len);
2836
2837 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2838 if (pcagent->emitted)
2839 continue;
2840
2841 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
2842 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
2843 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
2844 emit_byte (logbuffer, MONO_COUNTER_LONG);
2845 emit_byte (logbuffer, MONO_COUNTER_RAW);
2846 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
2847 emit_value (logbuffer, pcagent->index);
2848
2849 pcagent->emitted = TRUE;
2850 }
2851
2852 EXIT_LOG;
2853 }
2854
2855 static gboolean
perfcounters_foreach(char * category_name,char * name,unsigned char type,gint64 value,gpointer user_data)2856 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
2857 {
2858 PerfCounterAgent *pcagent;
2859
2860 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2861 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
2862 continue;
2863 if (pcagent->value == value)
2864 return TRUE;
2865
2866 pcagent->value = value;
2867 pcagent->updated = TRUE;
2868 pcagent->deleted = FALSE;
2869 return TRUE;
2870 }
2871
2872 pcagent = g_new0 (PerfCounterAgent, 1);
2873 pcagent->next = log_profiler.perfcounters;
2874 pcagent->index = log_profiler.counters_index++;
2875 pcagent->category_name = g_strdup (category_name);
2876 pcagent->name = g_strdup (name);
2877 pcagent->value = value;
2878 pcagent->emitted = FALSE;
2879 pcagent->updated = TRUE;
2880 pcagent->deleted = FALSE;
2881
2882 log_profiler.perfcounters = pcagent;
2883
2884 return TRUE;
2885 }
2886
2887 static void
perfcounters_sample(uint64_t timestamp)2888 perfcounters_sample (uint64_t timestamp)
2889 {
2890 PerfCounterAgent *pcagent;
2891 int len = 0;
2892 int size;
2893
2894 mono_os_mutex_lock (&log_profiler.counters_mutex);
2895
2896 /* mark all perfcounters as deleted, foreach will unmark them as necessary */
2897 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next)
2898 pcagent->deleted = TRUE;
2899
2900 mono_perfcounter_foreach (perfcounters_foreach, NULL);
2901
2902 perfcounters_emit ();
2903
2904 size =
2905 EVENT_SIZE /* event */
2906 ;
2907
2908 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2909 if (pcagent->deleted || !pcagent->updated)
2910 continue;
2911
2912 size +=
2913 LEB128_SIZE /* index */ +
2914 BYTE_SIZE /* type */ +
2915 LEB128_SIZE /* value */
2916 ;
2917
2918 len++;
2919 }
2920
2921 if (!len)
2922 goto done;
2923
2924 size +=
2925 LEB128_SIZE /* stop marker */
2926 ;
2927
2928 ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
2929
2930 emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2931
2932 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2933 if (pcagent->deleted || !pcagent->updated)
2934 continue;
2935 emit_uvalue (logbuffer, pcagent->index);
2936 emit_byte (logbuffer, MONO_COUNTER_LONG);
2937 emit_svalue (logbuffer, pcagent->value);
2938
2939 pcagent->updated = FALSE;
2940 }
2941
2942 emit_value (logbuffer, 0);
2943
2944 EXIT_LOG;
2945
2946 done:
2947 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2948 }
2949
2950 static void
counters_and_perfcounters_sample(void)2951 counters_and_perfcounters_sample (void)
2952 {
2953 uint64_t now = current_time ();
2954
2955 counters_sample (now);
2956 perfcounters_sample (now);
2957 }
2958
2959 typedef struct {
2960 MonoLockFreeQueueNode node;
2961 MonoMethod *method;
2962 } MethodNode;
2963
2964 typedef struct {
2965 int offset;
2966 int counter;
2967 char *filename;
2968 int line;
2969 int column;
2970 } CoverageEntry;
2971
2972 static void
free_coverage_entry(gpointer data,gpointer userdata)2973 free_coverage_entry (gpointer data, gpointer userdata)
2974 {
2975 CoverageEntry *entry = (CoverageEntry *)data;
2976 g_free (entry->filename);
2977 g_free (entry);
2978 }
2979
2980 static void
obtain_coverage_for_method(MonoProfiler * prof,const MonoProfilerCoverageData * entry)2981 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfilerCoverageData *entry)
2982 {
2983 int offset = entry->il_offset - log_profiler.coverage_previous_offset;
2984 CoverageEntry *e = g_new (CoverageEntry, 1);
2985
2986 log_profiler.coverage_previous_offset = entry->il_offset;
2987
2988 e->offset = offset;
2989 e->counter = entry->counter;
2990 e->filename = g_strdup(entry->file_name ? entry->file_name : "");
2991 e->line = entry->line;
2992 e->column = entry->column;
2993
2994 g_ptr_array_add (log_profiler.coverage_data, e);
2995 }
2996
2997 static char *
parse_generic_type_names(char * name)2998 parse_generic_type_names(char *name)
2999 {
3000 char *new_name, *ret;
3001 int within_generic_declaration = 0, generic_members = 1;
3002
3003 if (name == NULL || *name == '\0')
3004 return g_strdup ("");
3005
3006 if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
3007 return NULL;
3008
3009 do {
3010 switch (*name) {
3011 case '<':
3012 within_generic_declaration = 1;
3013 break;
3014
3015 case '>':
3016 within_generic_declaration = 0;
3017
3018 if (*(name - 1) != '<') {
3019 *new_name++ = '`';
3020 *new_name++ = '0' + generic_members;
3021 } else {
3022 memcpy (new_name, "<>", 8);
3023 new_name += 8;
3024 }
3025
3026 generic_members = 0;
3027 break;
3028
3029 case ',':
3030 generic_members++;
3031 break;
3032
3033 default:
3034 if (!within_generic_declaration)
3035 *new_name++ = *name;
3036
3037 break;
3038 }
3039 } while (*name++);
3040
3041 return ret;
3042 }
3043
3044 static void
build_method_buffer(gpointer key,gpointer value,gpointer userdata)3045 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
3046 {
3047 MonoMethod *method = (MonoMethod *)value;
3048 MonoClass *klass;
3049 MonoImage *image;
3050 char *class_name;
3051 const char *image_name, *method_name, *sig, *first_filename;
3052 guint i;
3053
3054 log_profiler.coverage_previous_offset = 0;
3055 log_profiler.coverage_data = g_ptr_array_new ();
3056
3057 mono_profiler_get_coverage_data (log_profiler.handle, method, obtain_coverage_for_method);
3058
3059 klass = mono_method_get_class (method);
3060 image = mono_class_get_image (klass);
3061 image_name = mono_image_get_name (image);
3062
3063 sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3064 class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3065 method_name = mono_method_get_name (method);
3066
3067 if (log_profiler.coverage_data->len != 0) {
3068 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[0];
3069 first_filename = entry->filename ? entry->filename : "";
3070 } else
3071 first_filename = "";
3072
3073 image_name = image_name ? image_name : "";
3074 sig = sig ? sig : "";
3075 method_name = method_name ? method_name : "";
3076
3077 ENTER_LOG (&coverage_methods_ctr, logbuffer,
3078 EVENT_SIZE /* event */ +
3079 strlen (image_name) + 1 /* image name */ +
3080 strlen (class_name) + 1 /* class name */ +
3081 strlen (method_name) + 1 /* method name */ +
3082 strlen (sig) + 1 /* signature */ +
3083 strlen (first_filename) + 1 /* first file name */ +
3084 LEB128_SIZE /* token */ +
3085 LEB128_SIZE /* method id */ +
3086 LEB128_SIZE /* entries */
3087 );
3088
3089 emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3090 emit_string (logbuffer, image_name, strlen (image_name) + 1);
3091 emit_string (logbuffer, class_name, strlen (class_name) + 1);
3092 emit_string (logbuffer, method_name, strlen (method_name) + 1);
3093 emit_string (logbuffer, sig, strlen (sig) + 1);
3094 emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3095
3096 emit_uvalue (logbuffer, mono_method_get_token (method));
3097 emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3098 emit_value (logbuffer, log_profiler.coverage_data->len);
3099
3100 EXIT_LOG;
3101
3102 for (i = 0; i < log_profiler.coverage_data->len; i++) {
3103 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[i];
3104
3105 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3106 EVENT_SIZE /* event */ +
3107 LEB128_SIZE /* method id */ +
3108 LEB128_SIZE /* offset */ +
3109 LEB128_SIZE /* counter */ +
3110 LEB128_SIZE /* line */ +
3111 LEB128_SIZE /* column */
3112 );
3113
3114 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3115 emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3116 emit_uvalue (logbuffer, entry->offset);
3117 emit_uvalue (logbuffer, entry->counter);
3118 emit_uvalue (logbuffer, entry->line);
3119 emit_uvalue (logbuffer, entry->column);
3120
3121 EXIT_LOG;
3122 }
3123
3124 log_profiler.coverage_method_id++;
3125
3126 g_free (class_name);
3127
3128 g_ptr_array_foreach (log_profiler.coverage_data, free_coverage_entry, NULL);
3129 g_ptr_array_free (log_profiler.coverage_data, TRUE);
3130 }
3131
3132 /* This empties the queue */
3133 static guint
count_queue(MonoLockFreeQueue * queue)3134 count_queue (MonoLockFreeQueue *queue)
3135 {
3136 MonoLockFreeQueueNode *node;
3137 guint count = 0;
3138
3139 while ((node = mono_lock_free_queue_dequeue (queue))) {
3140 count++;
3141 mono_thread_hazardous_try_free (node, g_free);
3142 }
3143
3144 return count;
3145 }
3146
3147 static void
build_class_buffer(gpointer key,gpointer value,gpointer userdata)3148 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3149 {
3150 MonoClass *klass = (MonoClass *)key;
3151 MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3152 MonoImage *image;
3153 char *class_name;
3154 const char *assembly_name;
3155 int number_of_methods, partially_covered;
3156 guint fully_covered;
3157
3158 image = mono_class_get_image (klass);
3159 assembly_name = mono_image_get_name (image);
3160 class_name = mono_type_get_name (mono_class_get_type (klass));
3161
3162 assembly_name = assembly_name ? assembly_name : "";
3163 number_of_methods = mono_class_num_methods (klass);
3164 fully_covered = count_queue (class_methods);
3165 /* We don't handle partial covered yet */
3166 partially_covered = 0;
3167
3168 ENTER_LOG (&coverage_classes_ctr, logbuffer,
3169 EVENT_SIZE /* event */ +
3170 strlen (assembly_name) + 1 /* assembly name */ +
3171 strlen (class_name) + 1 /* class name */ +
3172 LEB128_SIZE /* no. methods */ +
3173 LEB128_SIZE /* fully covered */ +
3174 LEB128_SIZE /* partially covered */
3175 );
3176
3177 emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3178 emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3179 emit_string (logbuffer, class_name, strlen (class_name) + 1);
3180 emit_uvalue (logbuffer, number_of_methods);
3181 emit_uvalue (logbuffer, fully_covered);
3182 emit_uvalue (logbuffer, partially_covered);
3183
3184 EXIT_LOG;
3185
3186 g_free (class_name);
3187 }
3188
3189 static void
get_coverage_for_image(MonoImage * image,int * number_of_methods,guint * fully_covered,int * partially_covered)3190 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3191 {
3192 MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3193
3194 *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3195 if (image_methods)
3196 *fully_covered = count_queue (image_methods);
3197 else
3198 *fully_covered = 0;
3199
3200 // FIXME: We don't handle partially covered yet.
3201 *partially_covered = 0;
3202 }
3203
3204 static void
build_assembly_buffer(gpointer key,gpointer value,gpointer userdata)3205 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3206 {
3207 MonoAssembly *assembly = (MonoAssembly *)value;
3208 MonoImage *image = mono_assembly_get_image (assembly);
3209 const char *name, *guid, *filename;
3210 int number_of_methods = 0, partially_covered = 0;
3211 guint fully_covered = 0;
3212
3213 name = mono_image_get_name (image);
3214 guid = mono_image_get_guid (image);
3215 filename = mono_image_get_filename (image);
3216
3217 name = name ? name : "";
3218 guid = guid ? guid : "";
3219 filename = filename ? filename : "";
3220
3221 get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3222
3223 ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3224 EVENT_SIZE /* event */ +
3225 strlen (name) + 1 /* name */ +
3226 strlen (guid) + 1 /* guid */ +
3227 strlen (filename) + 1 /* file name */ +
3228 LEB128_SIZE /* no. methods */ +
3229 LEB128_SIZE /* fully covered */ +
3230 LEB128_SIZE /* partially covered */
3231 );
3232
3233 emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3234 emit_string (logbuffer, name, strlen (name) + 1);
3235 emit_string (logbuffer, guid, strlen (guid) + 1);
3236 emit_string (logbuffer, filename, strlen (filename) + 1);
3237 emit_uvalue (logbuffer, number_of_methods);
3238 emit_uvalue (logbuffer, fully_covered);
3239 emit_uvalue (logbuffer, partially_covered);
3240
3241 EXIT_LOG;
3242 }
3243
3244 static void
dump_coverage(void)3245 dump_coverage (void)
3246 {
3247 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3248 mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, build_assembly_buffer, NULL);
3249 mono_conc_hashtable_foreach (log_profiler.coverage_classes, build_class_buffer, NULL);
3250 mono_conc_hashtable_foreach (log_profiler.coverage_methods, build_method_buffer, NULL);
3251 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3252 }
3253
3254 static MonoLockFreeQueueNode *
create_method_node(MonoMethod * method)3255 create_method_node (MonoMethod *method)
3256 {
3257 MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3258 mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3259 node->method = method;
3260
3261 return (MonoLockFreeQueueNode *) node;
3262 }
3263
3264 static gboolean
coverage_filter(MonoProfiler * prof,MonoMethod * method)3265 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3266 {
3267 MonoError error;
3268 MonoClass *klass;
3269 MonoImage *image;
3270 MonoAssembly *assembly;
3271 MonoMethodHeader *header;
3272 guint32 iflags, flags, code_size;
3273 char *fqn, *classname;
3274 gboolean has_positive, found;
3275 MonoLockFreeQueue *image_methods, *class_methods;
3276 MonoLockFreeQueueNode *node;
3277
3278 flags = mono_method_get_flags (method, &iflags);
3279 if ((iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3280 (flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
3281 return FALSE;
3282
3283 // Don't need to do anything else if we're already tracking this method
3284 if (mono_conc_hashtable_lookup (log_profiler.coverage_methods, method))
3285 return TRUE;
3286
3287 klass = mono_method_get_class (method);
3288 image = mono_class_get_image (klass);
3289
3290 // Don't handle coverage for the core assemblies
3291 if (mono_conc_hashtable_lookup (log_profiler.coverage_suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3292 return FALSE;
3293
3294 if (prof->coverage_filters) {
3295 /* Check already filtered classes first */
3296 if (mono_conc_hashtable_lookup (log_profiler.coverage_filtered_classes, klass))
3297 return FALSE;
3298
3299 classname = mono_type_get_name (mono_class_get_type (klass));
3300
3301 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3302
3303 // Check positive filters first
3304 has_positive = FALSE;
3305 found = FALSE;
3306 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3307 char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3308
3309 if (filter [0] == '+') {
3310 filter = &filter [1];
3311
3312 if (strstr (fqn, filter) != NULL)
3313 found = TRUE;
3314
3315 has_positive = TRUE;
3316 }
3317 }
3318
3319 if (has_positive && !found) {
3320 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3321 mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3322 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3323 g_free (fqn);
3324 g_free (classname);
3325
3326 return FALSE;
3327 }
3328
3329 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3330 // FIXME: Is substring search sufficient?
3331 char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3332 if (filter [0] == '+')
3333 continue;
3334
3335 // Skip '-'
3336 filter = &filter [1];
3337
3338 if (strstr (fqn, filter) != NULL) {
3339 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3340 mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3341 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3342 g_free (fqn);
3343 g_free (classname);
3344
3345 return FALSE;
3346 }
3347 }
3348
3349 g_free (fqn);
3350 g_free (classname);
3351 }
3352
3353 header = mono_method_get_header_checked (method, &error);
3354 mono_error_cleanup (&error);
3355
3356 mono_method_header_get_code (header, &code_size, NULL);
3357
3358 assembly = mono_image_get_assembly (image);
3359
3360 // Need to keep the assemblies around for as long as they are kept in the hashtable
3361 // Nunit, for example, has a habit of unloading them before the coverage statistics are
3362 // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3363 mono_assembly_addref (assembly);
3364
3365 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3366 mono_conc_hashtable_insert (log_profiler.coverage_methods, method, method);
3367 mono_conc_hashtable_insert (log_profiler.coverage_assemblies, assembly, assembly);
3368 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3369
3370 image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3371
3372 if (image_methods == NULL) {
3373 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3374 mono_lock_free_queue_init (image_methods);
3375 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3376 mono_conc_hashtable_insert (log_profiler.coverage_image_to_methods, image, image_methods);
3377 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3378 }
3379
3380 node = create_method_node (method);
3381 mono_lock_free_queue_enqueue (image_methods, node);
3382
3383 class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_classes, klass);
3384
3385 if (class_methods == NULL) {
3386 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3387 mono_lock_free_queue_init (class_methods);
3388 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3389 mono_conc_hashtable_insert (log_profiler.coverage_classes, klass, class_methods);
3390 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3391 }
3392
3393 node = create_method_node (method);
3394 mono_lock_free_queue_enqueue (class_methods, node);
3395
3396 return TRUE;
3397 }
3398
3399 #define LINE_BUFFER_SIZE 4096
3400 /* Max file limit of 128KB */
3401 #define MAX_FILE_SIZE 128 * 1024
3402 static char *
get_file_content(FILE * stream)3403 get_file_content (FILE *stream)
3404 {
3405 char *buffer;
3406 ssize_t bytes_read;
3407 long filesize;
3408 int res, offset = 0;
3409
3410 res = fseek (stream, 0, SEEK_END);
3411 if (res < 0)
3412 return NULL;
3413
3414 filesize = ftell (stream);
3415 if (filesize < 0)
3416 return NULL;
3417
3418 res = fseek (stream, 0, SEEK_SET);
3419 if (res < 0)
3420 return NULL;
3421
3422 if (filesize > MAX_FILE_SIZE)
3423 return NULL;
3424
3425 buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3426 while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3427 offset += bytes_read;
3428
3429 /* NULL terminate our buffer */
3430 buffer[filesize] = '\0';
3431 return buffer;
3432 }
3433
3434 static char *
get_next_line(char * contents,char ** next_start)3435 get_next_line (char *contents, char **next_start)
3436 {
3437 char *p = contents;
3438
3439 if (p == NULL || *p == '\0') {
3440 *next_start = NULL;
3441 return NULL;
3442 }
3443
3444 while (*p != '\n' && *p != '\0')
3445 p++;
3446
3447 if (*p == '\n') {
3448 *p = '\0';
3449 *next_start = p + 1;
3450 } else
3451 *next_start = NULL;
3452
3453 return contents;
3454 }
3455
3456 static void
init_suppressed_assemblies(void)3457 init_suppressed_assemblies (void)
3458 {
3459 char *content;
3460 char *line;
3461 FILE *sa_file;
3462
3463 log_profiler.coverage_suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3464 sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3465 if (sa_file == NULL)
3466 return;
3467
3468 /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3469 content = get_file_content (sa_file);
3470 if (content == NULL)
3471 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting.");
3472
3473 while ((line = get_next_line (content, &content))) {
3474 line = g_strchomp (g_strchug (line));
3475 /* No locking needed as we're doing initialization */
3476 mono_conc_hashtable_insert (log_profiler.coverage_suppressed_assemblies, line, line);
3477 }
3478
3479 fclose (sa_file);
3480 }
3481
3482 static void
parse_cov_filter_file(GPtrArray * filters,const char * file)3483 parse_cov_filter_file (GPtrArray *filters, const char *file)
3484 {
3485 FILE *filter_file = fopen (file, "r");
3486
3487 if (filter_file == NULL) {
3488 mono_profiler_printf_err ("Could not open coverage filter file '%s'.", file);
3489 return;
3490 }
3491
3492 /* Don't need to free content as it is referred to by the lines stored in @filters */
3493 char *content = get_file_content (filter_file);
3494
3495 if (content == NULL)
3496 mono_profiler_printf_err ("Coverage filter file '%s' is larger than 128kb - ignoring.", file);
3497
3498 char *line;
3499
3500 while ((line = get_next_line (content, &content)))
3501 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3502
3503 fclose (filter_file);
3504 }
3505
3506 static void
coverage_init(void)3507 coverage_init (void)
3508 {
3509 mono_os_mutex_init (&log_profiler.coverage_mutex);
3510 log_profiler.coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3511 log_profiler.coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3512 log_profiler.coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3513 log_profiler.coverage_filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3514 log_profiler.coverage_image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3515 init_suppressed_assemblies ();
3516 }
3517
3518 static void
unref_coverage_assemblies(gpointer key,gpointer value,gpointer userdata)3519 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3520 {
3521 MonoAssembly *assembly = (MonoAssembly *)value;
3522 mono_assembly_close (assembly);
3523 }
3524
3525 static void
free_sample_hit(gpointer p)3526 free_sample_hit (gpointer p)
3527 {
3528 mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3529 }
3530
3531 static void
cleanup_reusable_samples(void)3532 cleanup_reusable_samples (void)
3533 {
3534 SampleHit *sample;
3535
3536 while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.sample_reuse_queue)))
3537 mono_thread_hazardous_try_free (sample, free_sample_hit);
3538 }
3539
3540 static void
log_early_shutdown(MonoProfiler * prof)3541 log_early_shutdown (MonoProfiler *prof)
3542 {
3543 if (log_config.hs_on_shutdown) {
3544 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 1);
3545 mono_gc_collect (mono_gc_max_generation ());
3546 }
3547 }
3548
3549 static void
log_shutdown(MonoProfiler * prof)3550 log_shutdown (MonoProfiler *prof)
3551 {
3552 mono_atomic_store_i32 (&log_profiler.in_shutdown, 1);
3553
3554 if (ENABLED (PROFLOG_COUNTER_EVENTS))
3555 counters_and_perfcounters_sample ();
3556
3557 if (log_config.collect_coverage)
3558 dump_coverage ();
3559
3560 char c = 1;
3561
3562 if (write (prof->pipes [1], &c, 1) != 1) {
3563 mono_profiler_printf_err ("Could not write to log profiler pipe: %s", g_strerror (errno));
3564 exit (1);
3565 }
3566
3567 mono_native_thread_join (prof->helper_thread);
3568
3569 mono_os_mutex_destroy (&log_profiler.counters_mutex);
3570
3571 MonoCounterAgent *mc_next;
3572
3573 for (MonoCounterAgent *cur = log_profiler.counters; cur; cur = mc_next) {
3574 mc_next = cur->next;
3575 g_free (cur);
3576 }
3577
3578 PerfCounterAgent *pc_next;
3579
3580 for (PerfCounterAgent *cur = log_profiler.perfcounters; cur; cur = pc_next) {
3581 pc_next = cur->next;
3582 g_free (cur);
3583 }
3584
3585 /*
3586 * Ensure that we empty the LLS completely, even if some nodes are
3587 * not immediately removed upon calling mono_lls_remove (), by
3588 * iterating until the head is NULL.
3589 */
3590 while (log_profiler.profiler_thread_list.head) {
3591 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
3592 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3593
3594 remove_thread (thread);
3595 } MONO_LLS_FOREACH_SAFE_END
3596 }
3597
3598 /*
3599 * Ensure that all threads have been freed, so that we don't miss any
3600 * buffers when we shut down the writer thread below.
3601 */
3602 mono_thread_hazardous_try_free_all ();
3603
3604 mono_atomic_store_i32 (&prof->run_dumper_thread, 0);
3605 mono_os_sem_post (&prof->dumper_queue_sem);
3606 mono_native_thread_join (prof->dumper_thread);
3607 mono_os_sem_destroy (&prof->dumper_queue_sem);
3608
3609 mono_atomic_store_i32 (&prof->run_writer_thread, 0);
3610 mono_os_sem_post (&prof->writer_queue_sem);
3611 mono_native_thread_join (prof->writer_thread);
3612 mono_os_sem_destroy (&prof->writer_queue_sem);
3613
3614 /*
3615 * Free all writer queue entries, and ensure that all sample hits will be
3616 * added to the sample reuse queue.
3617 */
3618 mono_thread_hazardous_try_free_all ();
3619
3620 cleanup_reusable_samples ();
3621
3622 /*
3623 * Finally, make sure that all sample hits are freed. This should cover all
3624 * hazardous data from the profiler. We can now be sure that the runtime
3625 * won't later invoke free functions in the profiler library after it has
3626 * been unloaded.
3627 */
3628 mono_thread_hazardous_try_free_all ();
3629
3630 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
3631
3632 g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
3633 g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
3634
3635 #if defined (HAVE_SYS_ZLIB)
3636 if (prof->gzfile)
3637 gzclose (prof->gzfile);
3638 #endif
3639 if (prof->pipe_output)
3640 pclose (prof->file);
3641 else
3642 fclose (prof->file);
3643
3644 mono_conc_hashtable_destroy (prof->method_table);
3645 mono_os_mutex_destroy (&prof->method_table_mutex);
3646
3647 if (log_config.collect_coverage) {
3648 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3649 mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, unref_coverage_assemblies, NULL);
3650 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3651
3652 mono_conc_hashtable_destroy (log_profiler.coverage_methods);
3653 mono_conc_hashtable_destroy (log_profiler.coverage_assemblies);
3654 mono_conc_hashtable_destroy (log_profiler.coverage_classes);
3655 mono_conc_hashtable_destroy (log_profiler.coverage_filtered_classes);
3656
3657 mono_conc_hashtable_destroy (log_profiler.coverage_image_to_methods);
3658 mono_conc_hashtable_destroy (log_profiler.coverage_suppressed_assemblies);
3659 mono_os_mutex_destroy (&log_profiler.coverage_mutex);
3660 }
3661
3662 mono_coop_mutex_destroy (&log_profiler.api_mutex);
3663
3664 PROF_TLS_FREE ();
3665
3666 g_free (prof->args);
3667 }
3668
3669 static char*
new_filename(const char * filename)3670 new_filename (const char* filename)
3671 {
3672 time_t t = time (NULL);
3673 int pid = process_id ();
3674 char pid_buf [16];
3675 char time_buf [16];
3676 char *res, *d;
3677 const char *p;
3678 int count_dates = 0;
3679 int count_pids = 0;
3680 int s_date, s_pid;
3681 struct tm *ts;
3682 for (p = filename; *p; p++) {
3683 if (*p != '%')
3684 continue;
3685 p++;
3686 if (*p == 't')
3687 count_dates++;
3688 else if (*p == 'p')
3689 count_pids++;
3690 else if (*p == 0)
3691 break;
3692 }
3693 if (!count_dates && !count_pids)
3694 return pstrdup (filename);
3695 snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
3696 ts = gmtime (&t);
3697 snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
3698 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
3699 s_date = strlen (time_buf);
3700 s_pid = strlen (pid_buf);
3701 d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
3702 for (p = filename; *p; p++) {
3703 if (*p != '%') {
3704 *d++ = *p;
3705 continue;
3706 }
3707 p++;
3708 if (*p == 't') {
3709 strcpy (d, time_buf);
3710 d += s_date;
3711 continue;
3712 } else if (*p == 'p') {
3713 strcpy (d, pid_buf);
3714 d += s_pid;
3715 continue;
3716 } else if (*p == '%') {
3717 *d++ = '%';
3718 continue;
3719 } else if (*p == 0)
3720 break;
3721 *d++ = '%';
3722 *d++ = *p;
3723 }
3724 *d = 0;
3725 return res;
3726 }
3727
3728 static void
add_to_fd_set(fd_set * set,int fd,int * max_fd)3729 add_to_fd_set (fd_set *set, int fd, int *max_fd)
3730 {
3731 /*
3732 * This should only trigger for the basic FDs (server socket, pipes) at
3733 * startup if for some mysterious reason they're too large. In this case,
3734 * the profiler really can't function, and we're better off printing an
3735 * error and exiting.
3736 */
3737 if (fd >= FD_SETSIZE) {
3738 mono_profiler_printf_err ("File descriptor is out of bounds for fd_set: %d", fd);
3739 exit (1);
3740 }
3741
3742 FD_SET (fd, set);
3743
3744 if (*max_fd < fd)
3745 *max_fd = fd;
3746 }
3747
3748 static void *
helper_thread(void * arg)3749 helper_thread (void *arg)
3750 {
3751 mono_threads_attach_tools_thread ();
3752 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
3753
3754 MonoProfilerThread *thread = init_thread (FALSE);
3755
3756 GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
3757
3758 while (1) {
3759 fd_set rfds;
3760 int max_fd = -1;
3761
3762 FD_ZERO (&rfds);
3763
3764 add_to_fd_set (&rfds, log_profiler.server_socket, &max_fd);
3765 add_to_fd_set (&rfds, log_profiler.pipes [0], &max_fd);
3766
3767 for (gint i = 0; i < command_sockets->len; i++)
3768 add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
3769
3770 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
3771
3772 // Sleep for 1sec or until a file descriptor has data.
3773 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
3774 if (errno == EINTR)
3775 continue;
3776
3777 mono_profiler_printf_err ("Could not poll in log profiler helper thread: %s", g_strerror (errno));
3778 exit (1);
3779 }
3780
3781 if (ENABLED (PROFLOG_COUNTER_EVENTS))
3782 counters_and_perfcounters_sample ();
3783
3784 buffer_lock_excl ();
3785
3786 sync_point (SYNC_POINT_PERIODIC);
3787
3788 buffer_unlock_excl ();
3789
3790 // Are we shutting down?
3791 if (FD_ISSET (log_profiler.pipes [0], &rfds)) {
3792 char c;
3793 read (log_profiler.pipes [0], &c, 1);
3794 break;
3795 }
3796
3797 for (gint i = 0; i < command_sockets->len; i++) {
3798 int fd = g_array_index (command_sockets, int, i);
3799
3800 if (!FD_ISSET (fd, &rfds))
3801 continue;
3802
3803 char buf [64];
3804 int len = read (fd, buf, sizeof (buf) - 1);
3805
3806 if (len == -1)
3807 continue;
3808
3809 if (!len) {
3810 // The other end disconnected.
3811 g_array_remove_index (command_sockets, i);
3812 close (fd);
3813
3814 continue;
3815 }
3816
3817 buf [len] = 0;
3818
3819 if (!strcmp (buf, "heapshot\n"))
3820 trigger_heapshot ();
3821 }
3822
3823 if (FD_ISSET (log_profiler.server_socket, &rfds)) {
3824 int fd = accept (log_profiler.server_socket, NULL, NULL);
3825
3826 if (fd != -1) {
3827 if (fd >= FD_SETSIZE)
3828 close (fd);
3829 else
3830 g_array_append_val (command_sockets, fd);
3831 }
3832 }
3833 }
3834
3835 for (gint i = 0; i < command_sockets->len; i++)
3836 close (g_array_index (command_sockets, int, i));
3837
3838 g_array_free (command_sockets, TRUE);
3839
3840 send_log_unsafe (FALSE);
3841 deinit_thread (thread);
3842
3843 mono_thread_info_detach ();
3844
3845 return NULL;
3846 }
3847
3848 static void
start_helper_thread(void)3849 start_helper_thread (void)
3850 {
3851 if (pipe (log_profiler.pipes) == -1) {
3852 mono_profiler_printf_err ("Could not create log profiler pipe: %s", g_strerror (errno));
3853 exit (1);
3854 }
3855
3856 log_profiler.server_socket = socket (PF_INET, SOCK_STREAM, 0);
3857
3858 if (log_profiler.server_socket == -1) {
3859 mono_profiler_printf_err ("Could not create log profiler server socket: %s", g_strerror (errno));
3860 exit (1);
3861 }
3862
3863 struct sockaddr_in server_address;
3864
3865 memset (&server_address, 0, sizeof (server_address));
3866 server_address.sin_family = AF_INET;
3867 server_address.sin_addr.s_addr = INADDR_ANY;
3868 server_address.sin_port = htons (log_profiler.command_port);
3869
3870 if (bind (log_profiler.server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
3871 mono_profiler_printf_err ("Could not bind log profiler server socket on port %d: %s", log_profiler.command_port, g_strerror (errno));
3872 close (log_profiler.server_socket);
3873 exit (1);
3874 }
3875
3876 if (listen (log_profiler.server_socket, 1) == -1) {
3877 mono_profiler_printf_err ("Could not listen on log profiler server socket: %s", g_strerror (errno));
3878 close (log_profiler.server_socket);
3879 exit (1);
3880 }
3881
3882 socklen_t slen = sizeof (server_address);
3883
3884 if (getsockname (log_profiler.server_socket, (struct sockaddr *) &server_address, &slen)) {
3885 mono_profiler_printf_err ("Could not retrieve assigned port for log profiler server socket: %s", g_strerror (errno));
3886 close (log_profiler.server_socket);
3887 exit (1);
3888 }
3889
3890 log_profiler.command_port = ntohs (server_address.sin_port);
3891
3892 if (!mono_native_thread_create (&log_profiler.helper_thread, helper_thread, NULL)) {
3893 mono_profiler_printf_err ("Could not start log profiler helper thread");
3894 close (log_profiler.server_socket);
3895 exit (1);
3896 }
3897 }
3898
3899 static void
free_writer_entry(gpointer p)3900 free_writer_entry (gpointer p)
3901 {
3902 mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
3903 }
3904
3905 static gboolean
handle_writer_queue_entry(void)3906 handle_writer_queue_entry (void)
3907 {
3908 WriterQueueEntry *entry;
3909
3910 if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&log_profiler.writer_queue))) {
3911 if (!entry->methods)
3912 goto no_methods;
3913
3914 gboolean wrote_methods = FALSE;
3915
3916 /*
3917 * Encode the method events in a temporary log buffer that we
3918 * flush to disk before the main buffer, ensuring that all
3919 * methods have metadata emitted before they're referenced.
3920 *
3921 * We use a 'proper' thread-local buffer for this as opposed
3922 * to allocating and freeing a buffer by hand because the call
3923 * to mono_method_full_name () below may trigger class load
3924 * events when it retrieves the signature of the method. So a
3925 * thread-local buffer needs to exist when such events occur.
3926 */
3927 for (guint i = 0; i < entry->methods->len; i++) {
3928 MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
3929
3930 if (mono_conc_hashtable_lookup (log_profiler.method_table, info->method))
3931 goto free_info; // This method already has metadata emitted.
3932
3933 /*
3934 * Other threads use this hash table to get a general
3935 * idea of whether a method has already been emitted to
3936 * the stream. Due to the way we add to this table, it
3937 * can easily happen that multiple threads queue up the
3938 * same methods, but that's OK since eventually all
3939 * methods will be in this table and the thread-local
3940 * method lists will just be empty for the rest of the
3941 * app's lifetime.
3942 */
3943 mono_os_mutex_lock (&log_profiler.method_table_mutex);
3944 mono_conc_hashtable_insert (log_profiler.method_table, info->method, info->method);
3945 mono_os_mutex_unlock (&log_profiler.method_table_mutex);
3946
3947 char *name = mono_method_full_name (info->method, 1);
3948 int nlen = strlen (name) + 1;
3949 void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
3950 int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
3951
3952 ENTER_LOG (&method_jits_ctr, logbuffer,
3953 EVENT_SIZE /* event */ +
3954 LEB128_SIZE /* method */ +
3955 LEB128_SIZE /* start */ +
3956 LEB128_SIZE /* size */ +
3957 nlen /* name */
3958 );
3959
3960 emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
3961 emit_method_inner (logbuffer, info->method);
3962 emit_ptr (logbuffer, cstart);
3963 emit_value (logbuffer, csize);
3964
3965 memcpy (logbuffer->cursor, name, nlen);
3966 logbuffer->cursor += nlen;
3967
3968 EXIT_LOG_EXPLICIT (NO_SEND);
3969
3970 mono_free (name);
3971
3972 wrote_methods = TRUE;
3973
3974 free_info:
3975 g_free (info);
3976 }
3977
3978 g_ptr_array_free (entry->methods, TRUE);
3979
3980 if (wrote_methods) {
3981 MonoProfilerThread *thread = get_thread ();
3982
3983 dump_buffer_threadless (thread->buffer);
3984 init_buffer_state (thread);
3985 }
3986
3987 no_methods:
3988 dump_buffer (entry->buffer);
3989
3990 mono_thread_hazardous_try_free (entry, free_writer_entry);
3991
3992 return TRUE;
3993 }
3994
3995 return FALSE;
3996 }
3997
3998 static void *
writer_thread(void * arg)3999 writer_thread (void *arg)
4000 {
4001 mono_threads_attach_tools_thread ();
4002 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4003
4004 dump_header ();
4005
4006 MonoProfilerThread *thread = init_thread (FALSE);
4007
4008 while (mono_atomic_load_i32 (&log_profiler.run_writer_thread)) {
4009 mono_os_sem_wait (&log_profiler.writer_queue_sem, MONO_SEM_FLAGS_NONE);
4010 handle_writer_queue_entry ();
4011 }
4012
4013 /* Drain any remaining entries on shutdown. */
4014 while (handle_writer_queue_entry ());
4015
4016 free_buffer (thread->buffer, thread->buffer->size);
4017 deinit_thread (thread);
4018
4019 mono_thread_info_detach ();
4020
4021 return NULL;
4022 }
4023
4024 static void
start_writer_thread(void)4025 start_writer_thread (void)
4026 {
4027 mono_atomic_store_i32 (&log_profiler.run_writer_thread, 1);
4028
4029 if (!mono_native_thread_create (&log_profiler.writer_thread, writer_thread, NULL)) {
4030 mono_profiler_printf_err ("Could not start log profiler writer thread");
4031 exit (1);
4032 }
4033 }
4034
4035 static void
reuse_sample_hit(gpointer p)4036 reuse_sample_hit (gpointer p)
4037 {
4038 SampleHit *sample = p;
4039
4040 mono_lock_free_queue_node_unpoison (&sample->node);
4041 mono_lock_free_queue_enqueue (&log_profiler.sample_reuse_queue, &sample->node);
4042 }
4043
4044 static gboolean
handle_dumper_queue_entry(void)4045 handle_dumper_queue_entry (void)
4046 {
4047 SampleHit *sample;
4048
4049 if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.dumper_queue))) {
4050 for (int i = 0; i < sample->count; ++i) {
4051 MonoMethod *method = sample->frames [i].method;
4052 MonoDomain *domain = sample->frames [i].domain;
4053 void *address = sample->frames [i].base_address;
4054
4055 if (!method) {
4056 g_assert (domain && "What happened to the domain pointer?");
4057 g_assert (address && "What happened to the instruction pointer?");
4058
4059 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
4060
4061 if (ji)
4062 sample->frames [i].method = mono_jit_info_get_method (ji);
4063 }
4064 }
4065
4066 ENTER_LOG (&sample_hits_ctr, logbuffer,
4067 EVENT_SIZE /* event */ +
4068 LEB128_SIZE /* tid */ +
4069 LEB128_SIZE /* count */ +
4070 1 * (
4071 LEB128_SIZE /* ip */
4072 ) +
4073 LEB128_SIZE /* managed count */ +
4074 sample->count * (
4075 LEB128_SIZE /* method */
4076 )
4077 );
4078
4079 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4080 emit_ptr (logbuffer, (void *) sample->tid);
4081 emit_value (logbuffer, 1);
4082
4083 // TODO: Actual native unwinding.
4084 for (int i = 0; i < 1; ++i) {
4085 emit_ptr (logbuffer, sample->ip);
4086 add_code_pointer ((uintptr_t) sample->ip);
4087 }
4088
4089 /* new in data version 6 */
4090 emit_uvalue (logbuffer, sample->count);
4091
4092 for (int i = 0; i < sample->count; ++i)
4093 emit_method (logbuffer, sample->frames [i].method);
4094
4095 EXIT_LOG;
4096
4097 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4098
4099 dump_unmanaged_coderefs ();
4100 }
4101
4102 return FALSE;
4103 }
4104
4105 static void *
dumper_thread(void * arg)4106 dumper_thread (void *arg)
4107 {
4108 mono_threads_attach_tools_thread ();
4109 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4110
4111 MonoProfilerThread *thread = init_thread (FALSE);
4112
4113 while (mono_atomic_load_i32 (&log_profiler.run_dumper_thread)) {
4114 /*
4115 * Flush samples every second so it doesn't seem like the profiler is
4116 * not working if the program is mostly idle.
4117 */
4118 if (mono_os_sem_timedwait (&log_profiler.dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4119 send_log_unsafe (FALSE);
4120
4121 handle_dumper_queue_entry ();
4122 }
4123
4124 /* Drain any remaining entries on shutdown. */
4125 while (handle_dumper_queue_entry ());
4126
4127 send_log_unsafe (FALSE);
4128 deinit_thread (thread);
4129
4130 mono_thread_info_detach ();
4131
4132 return NULL;
4133 }
4134
4135 static void
start_dumper_thread(void)4136 start_dumper_thread (void)
4137 {
4138 mono_atomic_store_i32 (&log_profiler.run_dumper_thread, 1);
4139
4140 if (!mono_native_thread_create (&log_profiler.dumper_thread, dumper_thread, NULL)) {
4141 mono_profiler_printf_err ("Could not start log profiler dumper thread");
4142 exit (1);
4143 }
4144 }
4145
4146 static void
register_counter(const char * name,gint32 * counter)4147 register_counter (const char *name, gint32 *counter)
4148 {
4149 mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4150 }
4151
4152 ICALL_EXPORT gint32
proflog_icall_GetMaxStackTraceFrames(void)4153 proflog_icall_GetMaxStackTraceFrames (void)
4154 {
4155 return MAX_FRAMES;
4156 }
4157
4158 ICALL_EXPORT gint32
proflog_icall_GetStackTraceFrames(void)4159 proflog_icall_GetStackTraceFrames (void)
4160 {
4161 return log_config.num_frames;
4162 }
4163
4164 ICALL_EXPORT void
proflog_icall_SetStackTraceFrames(gint32 value)4165 proflog_icall_SetStackTraceFrames (gint32 value)
4166 {
4167 log_config.num_frames = value;
4168 }
4169
4170 ICALL_EXPORT MonoProfilerHeapshotMode
proflog_icall_GetHeapshotMode(void)4171 proflog_icall_GetHeapshotMode (void)
4172 {
4173 return log_config.hs_mode;
4174 }
4175
4176 ICALL_EXPORT void
proflog_icall_SetHeapshotMode(MonoProfilerHeapshotMode value)4177 proflog_icall_SetHeapshotMode (MonoProfilerHeapshotMode value)
4178 {
4179 log_config.hs_mode = value;
4180 }
4181
4182 ICALL_EXPORT gint32
proflog_icall_GetHeapshotMillisecondsFrequency(void)4183 proflog_icall_GetHeapshotMillisecondsFrequency (void)
4184 {
4185 return log_config.hs_freq_ms;
4186 }
4187
4188 ICALL_EXPORT void
proflog_icall_SetHeapshotMillisecondsFrequency(gint32 value)4189 proflog_icall_SetHeapshotMillisecondsFrequency (gint32 value)
4190 {
4191 log_config.hs_freq_ms = value;
4192 }
4193
4194 ICALL_EXPORT gint32
proflog_icall_GetHeapshotCollectionsFrequency(void)4195 proflog_icall_GetHeapshotCollectionsFrequency (void)
4196 {
4197 return log_config.hs_freq_gc;
4198 }
4199
4200 ICALL_EXPORT void
proflog_icall_SetHeapshotCollectionsFrequency(gint32 value)4201 proflog_icall_SetHeapshotCollectionsFrequency (gint32 value)
4202 {
4203 log_config.hs_freq_gc = value;
4204 }
4205
4206 ICALL_EXPORT void
proflog_icall_TriggerHeapshot(void)4207 proflog_icall_TriggerHeapshot (void)
4208 {
4209 trigger_heapshot ();
4210 }
4211
4212 ICALL_EXPORT gint32
proflog_icall_GetCallDepth(void)4213 proflog_icall_GetCallDepth (void)
4214 {
4215 return log_config.max_call_depth;
4216 }
4217
4218 ICALL_EXPORT void
proflog_icall_SetCallDepth(gint32 value)4219 proflog_icall_SetCallDepth (gint32 value)
4220 {
4221 log_config.max_call_depth = value;
4222 }
4223
4224 ICALL_EXPORT void
proflog_icall_GetSampleMode(MonoProfilerSampleMode * mode,gint32 * frequency)4225 proflog_icall_GetSampleMode (MonoProfilerSampleMode *mode, gint32 *frequency)
4226 {
4227 uint32_t freq;
4228
4229 mono_profiler_get_sample_mode (log_profiler.handle, mode, &freq);
4230
4231 *frequency = freq;
4232 }
4233
4234 ICALL_EXPORT MonoBoolean
proflog_icall_SetSampleMode(MonoProfilerSampleMode mode,gint32 frequency)4235 proflog_icall_SetSampleMode (MonoProfilerSampleMode mode, gint32 frequency)
4236 {
4237 mono_coop_mutex_lock (&log_profiler.api_mutex);
4238
4239 mono_bool result = mono_profiler_set_sample_mode (log_profiler.handle, mode, frequency);
4240
4241 if (mode != MONO_PROFILER_SAMPLE_MODE_NONE) {
4242 ENABLE (PROFLOG_SAMPLE_EVENTS);
4243 mono_profiler_set_sample_hit_callback (log_profiler.handle, mono_sample_hit);
4244 } else {
4245 DISABLE (PROFLOG_SAMPLE_EVENTS);
4246 mono_profiler_set_sample_hit_callback (log_profiler.handle, NULL);
4247 }
4248
4249 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4250
4251 return result;
4252 }
4253
4254 ICALL_EXPORT MonoBoolean
proflog_icall_GetExceptionEvents(void)4255 proflog_icall_GetExceptionEvents (void)
4256 {
4257 return ENABLED (PROFLOG_EXCEPTION_EVENTS);
4258 }
4259
4260 ICALL_EXPORT void
proflog_icall_SetExceptionEvents(MonoBoolean value)4261 proflog_icall_SetExceptionEvents (MonoBoolean value)
4262 {
4263 mono_coop_mutex_lock (&log_profiler.api_mutex);
4264
4265 if (value) {
4266 ENABLE (PROFLOG_EXCEPTION_EVENTS);
4267 mono_profiler_set_exception_throw_callback (log_profiler.handle, throw_exc);
4268 mono_profiler_set_exception_clause_callback (log_profiler.handle, clause_exc);
4269 } else {
4270 DISABLE (PROFLOG_EXCEPTION_EVENTS);
4271 mono_profiler_set_exception_throw_callback (log_profiler.handle, NULL);
4272 mono_profiler_set_exception_clause_callback (log_profiler.handle, NULL);
4273 }
4274
4275 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4276 }
4277
4278 ICALL_EXPORT MonoBoolean
proflog_icall_GetMonitorEvents(void)4279 proflog_icall_GetMonitorEvents (void)
4280 {
4281 return ENABLED (PROFLOG_MONITOR_EVENTS);
4282 }
4283
4284 ICALL_EXPORT void
proflog_icall_SetMonitorEvents(MonoBoolean value)4285 proflog_icall_SetMonitorEvents (MonoBoolean value)
4286 {
4287 mono_coop_mutex_lock (&log_profiler.api_mutex);
4288
4289 if (value) {
4290 ENABLE (PROFLOG_MONITOR_EVENTS);
4291 mono_profiler_set_monitor_contention_callback (log_profiler.handle, monitor_contention);
4292 mono_profiler_set_monitor_acquired_callback (log_profiler.handle, monitor_acquired);
4293 mono_profiler_set_monitor_failed_callback (log_profiler.handle, monitor_failed);
4294 } else {
4295 DISABLE (PROFLOG_MONITOR_EVENTS);
4296 mono_profiler_set_monitor_contention_callback (log_profiler.handle, NULL);
4297 mono_profiler_set_monitor_acquired_callback (log_profiler.handle, NULL);
4298 mono_profiler_set_monitor_failed_callback (log_profiler.handle, NULL);
4299 }
4300
4301 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4302 }
4303
4304 ICALL_EXPORT MonoBoolean
proflog_icall_GetGCEvents(void)4305 proflog_icall_GetGCEvents (void)
4306 {
4307 return ENABLED (PROFLOG_GC_EVENTS);
4308 }
4309
4310 ICALL_EXPORT void
proflog_icall_SetGCEvents(MonoBoolean value)4311 proflog_icall_SetGCEvents (MonoBoolean value)
4312 {
4313 mono_coop_mutex_lock (&log_profiler.api_mutex);
4314
4315 if (value)
4316 ENABLE (PROFLOG_GC_EVENTS);
4317 else
4318 DISABLE (PROFLOG_GC_EVENTS);
4319
4320 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4321 }
4322
4323 ICALL_EXPORT MonoBoolean
proflog_icall_GetGCAllocationEvents(void)4324 proflog_icall_GetGCAllocationEvents (void)
4325 {
4326 return ENABLED (PROFLOG_GC_ALLOCATION_EVENTS);
4327 }
4328
4329 ICALL_EXPORT void
proflog_icall_SetGCAllocationEvents(MonoBoolean value)4330 proflog_icall_SetGCAllocationEvents (MonoBoolean value)
4331 {
4332 mono_coop_mutex_lock (&log_profiler.api_mutex);
4333
4334 if (value) {
4335 ENABLE (PROFLOG_GC_ALLOCATION_EVENTS);
4336 mono_profiler_set_gc_allocation_callback (log_profiler.handle, gc_alloc);
4337 } else {
4338 DISABLE (PROFLOG_GC_ALLOCATION_EVENTS);
4339 mono_profiler_set_gc_allocation_callback (log_profiler.handle, NULL);
4340 }
4341
4342 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4343 }
4344
4345 ICALL_EXPORT MonoBoolean
proflog_icall_GetGCMoveEvents(void)4346 proflog_icall_GetGCMoveEvents (void)
4347 {
4348 return ENABLED (PROFLOG_GC_MOVE_EVENTS);
4349 }
4350
4351 ICALL_EXPORT void
proflog_icall_SetGCMoveEvents(MonoBoolean value)4352 proflog_icall_SetGCMoveEvents (MonoBoolean value)
4353 {
4354 mono_coop_mutex_lock (&log_profiler.api_mutex);
4355
4356 if (value) {
4357 ENABLE (PROFLOG_GC_MOVE_EVENTS);
4358 mono_profiler_set_gc_moves_callback (log_profiler.handle, gc_moves);
4359 } else {
4360 DISABLE (PROFLOG_GC_MOVE_EVENTS);
4361 mono_profiler_set_gc_moves_callback (log_profiler.handle, NULL);
4362 }
4363
4364 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4365 }
4366
4367 ICALL_EXPORT MonoBoolean
proflog_icall_GetGCRootEvents(void)4368 proflog_icall_GetGCRootEvents (void)
4369 {
4370 return ENABLED (PROFLOG_GC_ROOT_EVENTS);
4371 }
4372
4373 ICALL_EXPORT void
proflog_icall_SetGCRootEvents(MonoBoolean value)4374 proflog_icall_SetGCRootEvents (MonoBoolean value)
4375 {
4376 mono_coop_mutex_lock (&log_profiler.api_mutex);
4377
4378 if (value)
4379 ENABLE (PROFLOG_GC_ROOT_EVENTS);
4380 else
4381 DISABLE (PROFLOG_GC_ROOT_EVENTS);
4382
4383 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4384 }
4385
4386 ICALL_EXPORT MonoBoolean
proflog_icall_GetGCHandleEvents(void)4387 proflog_icall_GetGCHandleEvents (void)
4388 {
4389 return ENABLED (PROFLOG_GC_HANDLE_EVENTS);
4390 }
4391
4392 ICALL_EXPORT void
proflog_icall_SetGCHandleEvents(MonoBoolean value)4393 proflog_icall_SetGCHandleEvents (MonoBoolean value)
4394 {
4395 mono_coop_mutex_lock (&log_profiler.api_mutex);
4396
4397 if (value) {
4398 ENABLE (PROFLOG_GC_HANDLE_EVENTS);
4399 mono_profiler_set_gc_handle_created_callback (log_profiler.handle, gc_handle_created);
4400 mono_profiler_set_gc_handle_deleted_callback (log_profiler.handle, gc_handle_deleted);
4401 } else {
4402 DISABLE (PROFLOG_GC_HANDLE_EVENTS);
4403 mono_profiler_set_gc_handle_created_callback (log_profiler.handle, NULL);
4404 mono_profiler_set_gc_handle_deleted_callback (log_profiler.handle, NULL);
4405 }
4406
4407 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4408 }
4409
4410 ICALL_EXPORT MonoBoolean
proflog_icall_GetGCFinalizationEvents(void)4411 proflog_icall_GetGCFinalizationEvents (void)
4412 {
4413 return ENABLED (PROFLOG_GC_FINALIZATION_EVENTS);
4414 }
4415
4416 ICALL_EXPORT void
proflog_icall_SetGCFinalizationEvents(MonoBoolean value)4417 proflog_icall_SetGCFinalizationEvents (MonoBoolean value)
4418 {
4419 mono_coop_mutex_lock (&log_profiler.api_mutex);
4420
4421 if (value) {
4422 ENABLE (PROFLOG_GC_FINALIZATION_EVENTS);
4423 mono_profiler_set_gc_finalizing_callback (log_profiler.handle, finalize_begin);
4424 mono_profiler_set_gc_finalizing_object_callback (log_profiler.handle, finalize_object_begin);
4425 mono_profiler_set_gc_finalized_object_callback (log_profiler.handle, finalize_object_end);
4426 } else {
4427 DISABLE (PROFLOG_GC_FINALIZATION_EVENTS);
4428 mono_profiler_set_gc_finalizing_callback (log_profiler.handle, NULL);
4429 mono_profiler_set_gc_finalizing_object_callback (log_profiler.handle, NULL);
4430 mono_profiler_set_gc_finalized_object_callback (log_profiler.handle, NULL);
4431 }
4432
4433 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4434 }
4435
4436 ICALL_EXPORT MonoBoolean
proflog_icall_GetCounterEvents(void)4437 proflog_icall_GetCounterEvents (void)
4438 {
4439 return ENABLED (PROFLOG_COUNTER_EVENTS);
4440 }
4441
4442 ICALL_EXPORT void
proflog_icall_SetCounterEvents(MonoBoolean value)4443 proflog_icall_SetCounterEvents (MonoBoolean value)
4444 {
4445 mono_coop_mutex_lock (&log_profiler.api_mutex);
4446
4447 if (value)
4448 ENABLE (PROFLOG_COUNTER_EVENTS);
4449 else
4450 DISABLE (PROFLOG_COUNTER_EVENTS);
4451
4452 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4453 }
4454
4455 ICALL_EXPORT MonoBoolean
proflog_icall_GetJitEvents(void)4456 proflog_icall_GetJitEvents (void)
4457 {
4458 return ENABLED (PROFLOG_JIT_EVENTS);
4459 }
4460
4461 ICALL_EXPORT void
proflog_icall_SetJitEvents(MonoBoolean value)4462 proflog_icall_SetJitEvents (MonoBoolean value)
4463 {
4464 mono_coop_mutex_lock (&log_profiler.api_mutex);
4465
4466 if (value) {
4467 ENABLE (PROFLOG_JIT_EVENTS);
4468 mono_profiler_set_jit_code_buffer_callback (log_profiler.handle, code_buffer_new);
4469 } else {
4470 DISABLE (PROFLOG_JIT_EVENTS);
4471 mono_profiler_set_jit_code_buffer_callback (log_profiler.handle, NULL);
4472 }
4473
4474 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4475 }
4476
4477 static void
runtime_initialized(MonoProfiler * profiler)4478 runtime_initialized (MonoProfiler *profiler)
4479 {
4480 mono_atomic_store_i32 (&log_profiler.runtime_inited, 1);
4481
4482 register_counter ("Sample events allocated", &sample_allocations_ctr);
4483 register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4484
4485 register_counter ("Event: Sync points", &sync_points_ctr);
4486 register_counter ("Event: Heap objects", &heap_objects_ctr);
4487 register_counter ("Event: Heap starts", &heap_starts_ctr);
4488 register_counter ("Event: Heap ends", &heap_ends_ctr);
4489 register_counter ("Event: Heap roots", &heap_roots_ctr);
4490 register_counter ("Event: GC events", &gc_events_ctr);
4491 register_counter ("Event: GC resizes", &gc_resizes_ctr);
4492 register_counter ("Event: GC allocations", &gc_allocs_ctr);
4493 register_counter ("Event: GC moves", &gc_moves_ctr);
4494 register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4495 register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4496 register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4497 register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4498 register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4499 register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4500 register_counter ("Event: Image loads", &image_loads_ctr);
4501 register_counter ("Event: Image unloads", &image_unloads_ctr);
4502 register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4503 register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4504 register_counter ("Event: Class loads", &class_loads_ctr);
4505 register_counter ("Event: Class unloads", &class_unloads_ctr);
4506 register_counter ("Event: Method entries", &method_entries_ctr);
4507 register_counter ("Event: Method exits", &method_exits_ctr);
4508 register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4509 register_counter ("Event: Method JITs", &method_jits_ctr);
4510 register_counter ("Event: Code buffers", &code_buffers_ctr);
4511 register_counter ("Event: Exception throws", &exception_throws_ctr);
4512 register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4513 register_counter ("Event: Monitor events", &monitor_events_ctr);
4514 register_counter ("Event: Thread starts", &thread_starts_ctr);
4515 register_counter ("Event: Thread ends", &thread_ends_ctr);
4516 register_counter ("Event: Thread names", &thread_names_ctr);
4517 register_counter ("Event: Domain loads", &domain_loads_ctr);
4518 register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4519 register_counter ("Event: Domain names", &domain_names_ctr);
4520 register_counter ("Event: Context loads", &context_loads_ctr);
4521 register_counter ("Event: Context unloads", &context_unloads_ctr);
4522 register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4523 register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4524 register_counter ("Event: Sample hits", &sample_hits_ctr);
4525 register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4526 register_counter ("Event: Counter samples", &counter_samples_ctr);
4527 register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4528 register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4529 register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4530 register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4531 register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4532 register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4533
4534 counters_init ();
4535
4536 /*
4537 * We must start the helper thread before the writer thread. This is
4538 * because the helper thread sets up the command port which is written to
4539 * the log header by the writer thread.
4540 */
4541 start_helper_thread ();
4542 start_writer_thread ();
4543 start_dumper_thread ();
4544
4545 mono_coop_mutex_init (&log_profiler.api_mutex);
4546
4547 #define ADD_ICALL(NAME) \
4548 mono_add_internal_call ("Mono.Profiler.Log.LogProfiler::" EGLIB_STRINGIFY (NAME), proflog_icall_ ## NAME);
4549
4550 ADD_ICALL (GetMaxStackTraceFrames);
4551 ADD_ICALL (GetStackTraceFrames);
4552 ADD_ICALL (SetStackTraceFrames);
4553 ADD_ICALL (GetHeapshotMode);
4554 ADD_ICALL (SetHeapshotMode);
4555 ADD_ICALL (GetHeapshotMillisecondsFrequency);
4556 ADD_ICALL (SetHeapshotMillisecondsFrequency);
4557 ADD_ICALL (GetHeapshotCollectionsFrequency);
4558 ADD_ICALL (SetHeapshotCollectionsFrequency);
4559 ADD_ICALL (TriggerHeapshot);
4560 ADD_ICALL (GetCallDepth);
4561 ADD_ICALL (SetCallDepth);
4562 ADD_ICALL (GetSampleMode);
4563 ADD_ICALL (SetSampleMode);
4564 ADD_ICALL (GetExceptionEvents);
4565 ADD_ICALL (SetExceptionEvents);
4566 ADD_ICALL (GetMonitorEvents);
4567 ADD_ICALL (SetMonitorEvents);
4568 ADD_ICALL (GetGCEvents);
4569 ADD_ICALL (SetGCEvents);
4570 ADD_ICALL (GetGCAllocationEvents);
4571 ADD_ICALL (SetGCAllocationEvents);
4572 ADD_ICALL (GetGCMoveEvents);
4573 ADD_ICALL (SetGCMoveEvents);
4574 ADD_ICALL (GetGCRootEvents);
4575 ADD_ICALL (SetGCRootEvents);
4576 ADD_ICALL (GetGCHandleEvents);
4577 ADD_ICALL (SetGCHandleEvents);
4578 ADD_ICALL (GetGCFinalizationEvents);
4579 ADD_ICALL (SetGCFinalizationEvents);
4580 ADD_ICALL (GetCounterEvents);
4581 ADD_ICALL (SetCounterEvents);
4582 ADD_ICALL (GetJitEvents);
4583 ADD_ICALL (SetJitEvents);
4584
4585 #undef ADD_ICALL
4586 }
4587
4588 static void
create_profiler(const char * args,const char * filename,GPtrArray * filters)4589 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4590 {
4591 char *nf;
4592
4593 log_profiler.args = pstrdup (args);
4594 log_profiler.command_port = log_config.command_port;
4595
4596 //If filename begin with +, append the pid at the end
4597 if (filename && *filename == '+')
4598 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4599
4600 if (!filename) {
4601 if (log_config.do_report)
4602 filename = "|mprof-report -";
4603 else
4604 filename = "output.mlpd";
4605 nf = (char*)filename;
4606 } else {
4607 nf = new_filename (filename);
4608 if (log_config.do_report) {
4609 int s = strlen (nf) + 32;
4610 char *p = (char *) g_malloc (s);
4611 snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4612 g_free (nf);
4613 nf = p;
4614 }
4615 }
4616 if (*nf == '|') {
4617 log_profiler.file = popen (nf + 1, "w");
4618 log_profiler.pipe_output = 1;
4619 } else if (*nf == '#') {
4620 int fd = strtol (nf + 1, NULL, 10);
4621 log_profiler.file = fdopen (fd, "a");
4622 } else
4623 log_profiler.file = fopen (nf, "wb");
4624
4625 if (!log_profiler.file) {
4626 mono_profiler_printf_err ("Could not create log profiler output file '%s': %s", nf, g_strerror (errno));
4627 exit (1);
4628 }
4629
4630 #if defined (HAVE_SYS_ZLIB)
4631 if (log_config.use_zip)
4632 log_profiler.gzfile = gzdopen (fileno (log_profiler.file), "wb");
4633 #endif
4634
4635 // FIXME: We should free this stuff too.
4636 mono_lock_free_allocator_init_size_class (&log_profiler.sample_size_class, SAMPLE_SLOT_SIZE (log_config.num_frames), SAMPLE_BLOCK_SIZE);
4637 mono_lock_free_allocator_init_allocator (&log_profiler.sample_allocator, &log_profiler.sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4638
4639 mono_lock_free_queue_init (&log_profiler.sample_reuse_queue);
4640
4641 // FIXME: We should free this stuff too.
4642 mono_lock_free_allocator_init_size_class (&log_profiler.writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4643 mono_lock_free_allocator_init_allocator (&log_profiler.writer_entry_allocator, &log_profiler.writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4644
4645 mono_lock_free_queue_init (&log_profiler.writer_queue);
4646 mono_os_sem_init (&log_profiler.writer_queue_sem, 0);
4647
4648 mono_lock_free_queue_init (&log_profiler.dumper_queue);
4649 mono_os_sem_init (&log_profiler.dumper_queue_sem, 0);
4650
4651 mono_os_mutex_init (&log_profiler.method_table_mutex);
4652 log_profiler.method_table = mono_conc_hashtable_new (NULL, NULL);
4653
4654 if (log_config.collect_coverage)
4655 coverage_init ();
4656
4657 log_profiler.coverage_filters = filters;
4658
4659 log_profiler.startup_time = current_time ();
4660 }
4661
4662 MONO_API void
4663 mono_profiler_init_log (const char *desc);
4664
4665 void
mono_profiler_init_log(const char * desc)4666 mono_profiler_init_log (const char *desc)
4667 {
4668 /*
4669 * If you hit this assert while increasing MAX_FRAMES, you need to increase
4670 * SAMPLE_BLOCK_SIZE as well.
4671 */
4672 g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4673 g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4674
4675 GPtrArray *filters = NULL;
4676
4677 proflog_parse_args (&log_config, desc [3] == ':' ? desc + 4 : "");
4678
4679 if (log_config.cov_filter_files) {
4680 filters = g_ptr_array_new ();
4681 int i;
4682 for (i = 0; i < log_config.cov_filter_files->len; ++i) {
4683 const char *name = log_config.cov_filter_files->pdata [i];
4684 parse_cov_filter_file (filters, name);
4685 }
4686 }
4687
4688 MonoProfilerHandle handle = log_profiler.handle = mono_profiler_create (&log_profiler);
4689
4690 if (log_config.enter_leave)
4691 mono_profiler_set_call_instrumentation_filter_callback (handle, method_filter);
4692
4693 /*
4694 * If the runtime was invoked for the purpose of AOT compilation only, the
4695 * only thing we want to do is install the call instrumentation filter.
4696 */
4697 if (mono_jit_aot_compiling ())
4698 goto done;
4699
4700 init_time ();
4701
4702 PROF_TLS_INIT ();
4703
4704 create_profiler (desc, log_config.output_filename, filters);
4705
4706 mono_lls_init (&log_profiler.profiler_thread_list, NULL);
4707
4708 /*
4709 * Required callbacks. These are either necessary for the profiler itself
4710 * to function, or provide metadata that's needed if other events (e.g.
4711 * allocations, exceptions) are dynamically enabled/disabled.
4712 */
4713
4714 mono_profiler_set_runtime_shutdown_begin_callback (handle, log_early_shutdown);
4715 mono_profiler_set_runtime_shutdown_end_callback (handle, log_shutdown);
4716 mono_profiler_set_runtime_initialized_callback (handle, runtime_initialized);
4717
4718 mono_profiler_set_gc_event_callback (handle, gc_event);
4719
4720 mono_profiler_set_thread_started_callback (handle, thread_start);
4721 mono_profiler_set_thread_exited_callback (handle, thread_end);
4722 mono_profiler_set_thread_name_callback (handle, thread_name);
4723
4724 mono_profiler_set_domain_loaded_callback (handle, domain_loaded);
4725 mono_profiler_set_domain_unloading_callback (handle, domain_unloaded);
4726 mono_profiler_set_domain_name_callback (handle, domain_name);
4727
4728 mono_profiler_set_context_loaded_callback (handle, context_loaded);
4729 mono_profiler_set_context_unloaded_callback (handle, context_unloaded);
4730
4731 mono_profiler_set_assembly_loaded_callback (handle, assembly_loaded);
4732 mono_profiler_set_assembly_unloading_callback (handle, assembly_unloaded);
4733
4734 mono_profiler_set_image_loaded_callback (handle, image_loaded);
4735 mono_profiler_set_image_unloading_callback (handle, image_unloaded);
4736
4737 mono_profiler_set_class_loaded_callback (handle, class_loaded);
4738
4739 mono_profiler_set_jit_done_callback (handle, method_jitted);
4740
4741 if (ENABLED (PROFLOG_EXCEPTION_EVENTS)) {
4742 mono_profiler_set_exception_throw_callback (handle, throw_exc);
4743 mono_profiler_set_exception_clause_callback (handle, clause_exc);
4744 }
4745
4746 if (ENABLED (PROFLOG_MONITOR_EVENTS)) {
4747 mono_profiler_set_monitor_contention_callback (handle, monitor_contention);
4748 mono_profiler_set_monitor_acquired_callback (handle, monitor_acquired);
4749 mono_profiler_set_monitor_failed_callback (handle, monitor_failed);
4750 }
4751
4752 if (ENABLED (PROFLOG_GC_EVENTS))
4753 mono_profiler_set_gc_resize_callback (handle, gc_resize);
4754
4755 if (ENABLED (PROFLOG_GC_ALLOCATION_EVENTS))
4756 mono_profiler_set_gc_allocation_callback (handle, gc_alloc);
4757
4758 if (ENABLED (PROFLOG_GC_MOVE_EVENTS))
4759 mono_profiler_set_gc_moves_callback (handle, gc_moves);
4760
4761 if (ENABLED (PROFLOG_GC_HANDLE_EVENTS)) {
4762 mono_profiler_set_gc_handle_created_callback (handle, gc_handle_created);
4763 mono_profiler_set_gc_handle_deleted_callback (handle, gc_handle_deleted);
4764 }
4765
4766 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS)) {
4767 mono_profiler_set_gc_finalizing_callback (handle, finalize_begin);
4768 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4769 mono_profiler_set_gc_finalizing_object_callback (handle, finalize_object_begin);
4770 }
4771
4772 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4773 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4774
4775 if (ENABLED (PROFLOG_SAMPLE_EVENTS))
4776 mono_profiler_set_sample_hit_callback (handle, mono_sample_hit);
4777
4778 if (ENABLED (PROFLOG_JIT_EVENTS))
4779 mono_profiler_set_jit_code_buffer_callback (handle, code_buffer_new);
4780
4781 if (log_config.enter_leave) {
4782 mono_profiler_set_method_enter_callback (handle, method_enter);
4783 mono_profiler_set_method_leave_callback (handle, method_leave);
4784 mono_profiler_set_method_tail_call_callback (handle, tail_call);
4785 mono_profiler_set_method_exception_leave_callback (handle, method_exc_leave);
4786 }
4787
4788 if (log_config.collect_coverage) {
4789 mono_profiler_enable_coverage ();
4790 mono_profiler_set_coverage_filter_callback (handle, coverage_filter);
4791 }
4792
4793 mono_profiler_enable_allocations ();
4794 mono_profiler_enable_sampling (handle);
4795
4796 /*
4797 * If no sample option was given by the user, this just leaves the sampling
4798 * thread in idle mode. We do this even if no option was given so that we
4799 * can warn if another profiler controls sampling parameters.
4800 */
4801 if (!mono_profiler_set_sample_mode (handle, log_config.sampling_mode, log_config.sample_freq))
4802 mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");
4803
4804 done:
4805 ;
4806 }
4807