1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2006-2009
4 *
5 * Debug and performance tracing
6 *
7 * ---------------------------------------------------------------------------*/
8
9 // external headers
10 #include "Rts.h"
11
12 // internal headers
13 #include "Trace.h"
14
15 #if defined(TRACING)
16
17 #include "GetTime.h"
18 #include "GetEnv.h"
19 #include "Stats.h"
20 #include "eventlog/EventLog.h"
21 #include "rts/EventLogWriter.h"
22 #include "Threads.h"
23 #include "Printer.h"
24 #include "RtsFlags.h"
25
26 #if defined(HAVE_UNISTD_H)
27 #include <unistd.h>
28 #endif
29
30 // events
31 int TRACE_sched;
32 int TRACE_gc;
33 int TRACE_nonmoving_gc;
34 int TRACE_spark_sampled;
35 int TRACE_spark_full;
36 int TRACE_user;
37 int TRACE_cap;
38
39 #if defined(THREADED_RTS)
40 static Mutex trace_utx;
41 #endif
42
43 /* ---------------------------------------------------------------------------
44 Starting up / shutting down the tracing facilities
45 --------------------------------------------------------------------------- */
46
initTracing(void)47 void initTracing (void)
48 {
49 #if defined(THREADED_RTS)
50 initMutex(&trace_utx);
51 #endif
52
53 // -Ds turns on scheduler tracing too
54 TRACE_sched =
55 RtsFlags.TraceFlags.scheduler ||
56 RtsFlags.DebugFlags.scheduler;
57
58 // -Dg turns on gc tracing too
59 TRACE_gc =
60 RtsFlags.TraceFlags.gc ||
61 RtsFlags.DebugFlags.gc ||
62 RtsFlags.DebugFlags.scheduler;
63 if (TRACE_gc && RtsFlags.GcFlags.giveStats == NO_GC_STATS) {
64 RtsFlags.GcFlags.giveStats = COLLECT_GC_STATS;
65 }
66
67 TRACE_nonmoving_gc =
68 RtsFlags.TraceFlags.nonmoving_gc;
69
70 TRACE_spark_sampled =
71 RtsFlags.TraceFlags.sparks_sampled;
72
73 // -Dr turns on full spark tracing
74 TRACE_spark_full =
75 RtsFlags.TraceFlags.sparks_full ||
76 RtsFlags.DebugFlags.sparks;
77
78 TRACE_user =
79 RtsFlags.TraceFlags.user;
80
81 // We trace cap events if we're tracing anything else
82 TRACE_cap =
83 TRACE_sched ||
84 TRACE_gc ||
85 TRACE_spark_sampled ||
86 TRACE_spark_full ||
87 TRACE_user;
88
89 /* Note: we can have any of the TRACE_* flags turned on even when
90 eventlog_enabled is off. In the DEBUG way we may be tracing to stderr.
91 */
92 initEventLogging();
93
94 if (RtsFlags.TraceFlags.tracing == TRACE_EVENTLOG
95 && rtsConfig.eventlog_writer != NULL) {
96 startEventLogging(rtsConfig.eventlog_writer);
97 }
98 }
99
endTracing(void)100 void endTracing (void)
101 {
102 if (eventlog_enabled) {
103 endEventLogging();
104 }
105 }
106
freeTracing(void)107 void freeTracing (void)
108 {
109 if (eventlog_enabled) {
110 freeEventLogging();
111 }
112 }
113
114 // Used to reset tracing in a forked child
resetTracing(void)115 void resetTracing (void)
116 {
117 restartEventLogging();
118 }
119
flushTrace(void)120 void flushTrace (void)
121 {
122 if (eventlog_enabled) {
123 flushEventLog();
124 }
125 }
126
tracingAddCapapilities(uint32_t from,uint32_t to)127 void tracingAddCapapilities (uint32_t from, uint32_t to)
128 {
129 if (eventlog_enabled) {
130 moreCapEventBufs(from,to);
131 }
132 }
133
134 /* ---------------------------------------------------------------------------
135 Emitting trace messages/events
136 --------------------------------------------------------------------------- */
137
138 #if defined(DEBUG)
tracePreface(void)139 static void tracePreface (void)
140 {
141 #if defined(THREADED_RTS)
142 debugBelch("%12lx: ", (unsigned long)osThreadId());
143 #endif
144 if (RtsFlags.TraceFlags.timestamp) {
145 debugBelch("%9" FMT_Word64 ": ", stat_getElapsedTime());
146 }
147 }
148 #endif
149
150 #if defined(DEBUG)
151 static char *thread_stop_reasons[] = {
152 [HeapOverflow] = "heap overflow",
153 [StackOverflow] = "stack overflow",
154 [ThreadYielding] = "yielding",
155 [ThreadBlocked] = "blocked",
156 [ThreadFinished] = "finished",
157 [THREAD_SUSPENDED_FOREIGN_CALL] = "suspended while making a foreign call",
158 [6 + BlockedOnMVar] = "blocked on an MVar",
159 [6 + BlockedOnMVarRead] = "blocked on an atomic MVar read",
160 [6 + BlockedOnBlackHole] = "blocked on a black hole",
161 [6 + BlockedOnRead] = "blocked on a read operation",
162 [6 + BlockedOnWrite] = "blocked on a write operation",
163 [6 + BlockedOnDelay] = "blocked on a delay operation",
164 [6 + BlockedOnSTM] = "blocked on STM",
165 [6 + BlockedOnDoProc] = "blocked on asyncDoProc",
166 [6 + BlockedOnCCall] = "blocked on a foreign call",
167 [6 + BlockedOnCCall_Interruptible] = "blocked on a foreign call (interruptible)",
168 [6 + BlockedOnMsgThrowTo] = "blocked on throwTo",
169 [6 + ThreadMigrating] = "migrating"
170 };
171 #endif
172
173 #if defined(DEBUG)
traceSchedEvent_stderr(Capability * cap,EventTypeNum tag,StgTSO * tso,StgWord info1 STG_UNUSED,StgWord info2 STG_UNUSED)174 static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,
175 StgTSO *tso,
176 StgWord info1 STG_UNUSED,
177 StgWord info2 STG_UNUSED)
178 {
179 ACQUIRE_LOCK(&trace_utx);
180
181 tracePreface();
182 switch (tag) {
183 case EVENT_CREATE_THREAD: // (cap, thread)
184 debugBelch("cap %d: created thread %" FMT_Word "\n",
185 cap->no, (W_)tso->id);
186 break;
187 case EVENT_RUN_THREAD: // (cap, thread)
188 debugBelch("cap %d: running thread %" FMT_Word " (%s)\n",
189 cap->no, (W_)tso->id, what_next_strs[tso->what_next]);
190 break;
191 case EVENT_THREAD_RUNNABLE: // (cap, thread)
192 debugBelch("cap %d: thread %" FMT_Word " appended to run queue\n",
193 cap->no, (W_)tso->id);
194 break;
195 case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
196 debugBelch("cap %d: thread %" FMT_Word " migrating to cap %d\n",
197 cap->no, (W_)tso->id, (int)info1);
198 break;
199 case EVENT_THREAD_WAKEUP: // (cap, thread, info1_cap)
200 debugBelch("cap %d: waking up thread %" FMT_Word " on cap %d\n",
201 cap->no, (W_)tso->id, (int)info1);
202 break;
203
204 case EVENT_STOP_THREAD: // (cap, thread, status)
205 if (info1 == 6 + BlockedOnBlackHole) {
206 debugBelch("cap %d: thread %" FMT_Word " stopped (blocked on black hole owned by thread %lu)\n",
207 cap->no, (W_)tso->id, (long)info2);
208 } else if (info1 == StackOverflow) {
209 debugBelch("cap %d: thead %" FMT_Word
210 " stopped (stack overflow, size %lu)\n",
211 cap->no, (W_)tso->id, (long)info2);
212
213 } else {
214 debugBelch("cap %d: thread %" FMT_Word " stopped (%s)\n",
215 cap->no, (W_)tso->id, thread_stop_reasons[info1]);
216 }
217 break;
218 default:
219 debugBelch("cap %d: thread %" FMT_Word ": event %d\n\n",
220 cap->no, (W_)tso->id, tag);
221 break;
222 }
223
224 RELEASE_LOCK(&trace_utx);
225 }
226 #endif
227
traceSchedEvent_(Capability * cap,EventTypeNum tag,StgTSO * tso,StgWord info1,StgWord info2)228 void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
229 StgTSO *tso, StgWord info1, StgWord info2)
230 {
231 #if defined(DEBUG)
232 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
233 traceSchedEvent_stderr(cap, tag, tso, info1, info2);
234 } else
235 #endif
236 {
237 postSchedEvent(cap,tag,tso ? tso->id : 0, info1, info2);
238 }
239 }
240
241 #if defined(DEBUG)
traceGcEvent_stderr(Capability * cap,EventTypeNum tag)242 static void traceGcEvent_stderr (Capability *cap, EventTypeNum tag)
243 {
244 ACQUIRE_LOCK(&trace_utx);
245
246 tracePreface();
247 switch (tag) {
248 case EVENT_REQUEST_SEQ_GC: // (cap)
249 debugBelch("cap %d: requesting sequential GC\n", cap->no);
250 break;
251 case EVENT_REQUEST_PAR_GC: // (cap)
252 debugBelch("cap %d: requesting parallel GC\n", cap->no);
253 break;
254 case EVENT_GC_START: // (cap)
255 debugBelch("cap %d: starting GC\n", cap->no);
256 break;
257 case EVENT_GC_END: // (cap)
258 debugBelch("cap %d: finished GC\n", cap->no);
259 break;
260 case EVENT_GC_IDLE: // (cap)
261 debugBelch("cap %d: GC idle\n", cap->no);
262 break;
263 case EVENT_GC_WORK: // (cap)
264 debugBelch("cap %d: GC working\n", cap->no);
265 break;
266 case EVENT_GC_DONE: // (cap)
267 debugBelch("cap %d: GC done\n", cap->no);
268 break;
269 case EVENT_GC_GLOBAL_SYNC: // (cap)
270 debugBelch("cap %d: all caps stopped for GC\n", cap->no);
271 break;
272 default:
273 barf("traceGcEvent: unknown event tag %d", tag);
274 break;
275 }
276
277 RELEASE_LOCK(&trace_utx);
278 }
279 #endif
280
traceGcEvent_(Capability * cap,EventTypeNum tag)281 void traceGcEvent_ (Capability *cap, EventTypeNum tag)
282 {
283 #if defined(DEBUG)
284 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
285 traceGcEvent_stderr(cap, tag);
286 } else
287 #endif
288 {
289 /* currently all GC events are nullary events */
290 postEvent(cap, tag);
291 }
292 }
293
traceGcEventAtT_(Capability * cap,StgWord64 ts,EventTypeNum tag)294 void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag)
295 {
296 #if defined(DEBUG)
297 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
298 traceGcEvent_stderr(cap, tag);
299 } else
300 #endif
301 {
302 /* assuming nullary events and explicitly inserting a timestamp */
303 postEventAtTimestamp(cap, ts, tag);
304 }
305 }
306
traceHeapEvent_(Capability * cap,EventTypeNum tag,CapsetID heap_capset,W_ info1)307 void traceHeapEvent_ (Capability *cap,
308 EventTypeNum tag,
309 CapsetID heap_capset,
310 W_ info1)
311 {
312 #if defined(DEBUG)
313 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
314 /* no stderr equivalent for these ones */
315 } else
316 #endif
317 {
318 postHeapEvent(cap, tag, heap_capset, info1);
319 }
320 }
321
traceEventHeapInfo_(CapsetID heap_capset,uint32_t gens,W_ maxHeapSize,W_ allocAreaSize,W_ mblockSize,W_ blockSize)322 void traceEventHeapInfo_ (CapsetID heap_capset,
323 uint32_t gens,
324 W_ maxHeapSize,
325 W_ allocAreaSize,
326 W_ mblockSize,
327 W_ blockSize)
328 {
329 #if defined(DEBUG)
330 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
331 /* no stderr equivalent for these ones */
332 } else
333 #endif
334 {
335 postEventHeapInfo(heap_capset, gens,
336 maxHeapSize, allocAreaSize,
337 mblockSize, blockSize);
338 }
339 }
340
traceEventGcStats_(Capability * cap,CapsetID heap_capset,uint32_t gen,W_ copied,W_ slop,W_ fragmentation,uint32_t par_n_threads,W_ par_max_copied,W_ par_tot_copied,W_ par_balanced_copied)341 void traceEventGcStats_ (Capability *cap,
342 CapsetID heap_capset,
343 uint32_t gen,
344 W_ copied,
345 W_ slop,
346 W_ fragmentation,
347 uint32_t par_n_threads,
348 W_ par_max_copied,
349 W_ par_tot_copied,
350 W_ par_balanced_copied)
351 {
352 #if defined(DEBUG)
353 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
354 /* no stderr equivalent for these ones */
355 } else
356 #endif
357 {
358 postEventGcStats(cap, heap_capset, gen,
359 copied, slop, fragmentation,
360 par_n_threads, par_max_copied,
361 par_tot_copied, par_balanced_copied);
362 }
363 }
364
traceCapEvent_(Capability * cap,EventTypeNum tag)365 void traceCapEvent_ (Capability *cap,
366 EventTypeNum tag)
367 {
368 #if defined(DEBUG)
369 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
370 ACQUIRE_LOCK(&trace_utx);
371
372 tracePreface();
373 switch (tag) {
374 case EVENT_CAP_CREATE: // (cap)
375 debugBelch("cap %d: initialised\n", cap->no);
376 break;
377 case EVENT_CAP_DELETE: // (cap)
378 debugBelch("cap %d: shutting down\n", cap->no);
379 break;
380 case EVENT_CAP_ENABLE: // (cap)
381 debugBelch("cap %d: enabling capability\n", cap->no);
382 break;
383 case EVENT_CAP_DISABLE: // (cap)
384 debugBelch("cap %d: disabling capability\n", cap->no);
385 break;
386 }
387 RELEASE_LOCK(&trace_utx);
388 } else
389 #endif
390 {
391 if (eventlog_enabled) {
392 postCapEvent(tag, (EventCapNo)cap->no);
393 }
394 }
395 }
396
traceCapsetEvent_(EventTypeNum tag,CapsetID capset,StgWord info)397 void traceCapsetEvent_ (EventTypeNum tag,
398 CapsetID capset,
399 StgWord info)
400 {
401 #if defined(DEBUG)
402 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_sched)
403 // When events go to stderr, it is annoying to see the capset
404 // events every time, so we only emit them with -Ds.
405 {
406 ACQUIRE_LOCK(&trace_utx);
407
408 tracePreface();
409 switch (tag) {
410 case EVENT_CAPSET_CREATE: // (capset, capset_type)
411 debugBelch("created capset %" FMT_Word32 " of type %d\n", capset,
412 (int)info);
413 break;
414 case EVENT_CAPSET_DELETE: // (capset)
415 debugBelch("deleted capset %" FMT_Word32 "\n", capset);
416 break;
417 case EVENT_CAPSET_ASSIGN_CAP: // (capset, capno)
418 debugBelch("assigned cap %" FMT_Word " to capset %" FMT_Word32 "\n",
419 info, capset);
420 break;
421 case EVENT_CAPSET_REMOVE_CAP: // (capset, capno)
422 debugBelch("removed cap %" FMT_Word " from capset %" FMT_Word32
423 "\n", info, capset);
424 break;
425 }
426 RELEASE_LOCK(&trace_utx);
427 } else
428 #endif
429 {
430 if (eventlog_enabled) {
431 postCapsetEvent(tag, capset, info);
432 }
433 }
434 }
435
traceWallClockTime_(void)436 void traceWallClockTime_(void) {
437 if (eventlog_enabled) {
438 postWallClockTime(CAPSET_CLOCKDOMAIN_DEFAULT);
439 }
440 }
441
traceOSProcessInfo_(void)442 void traceOSProcessInfo_(void) {
443 if (eventlog_enabled) {
444 postCapsetEvent(EVENT_OSPROCESS_PID,
445 CAPSET_OSPROCESS_DEFAULT,
446 getpid());
447
448 #if !defined(mingw32_HOST_OS)
449 /* Windows has no strong concept of process hierarchy, so no getppid().
450 * In any case, this trace event is mainly useful for tracing programs
451 * that use 'forkProcess' which Windows doesn't support anyway.
452 */
453 postCapsetEvent(EVENT_OSPROCESS_PPID,
454 CAPSET_OSPROCESS_DEFAULT,
455 getppid());
456 #endif
457 {
458 char buf[256];
459 snprintf(buf, sizeof(buf), "GHC-%s %s", ProjectVersion, RtsWay);
460 postCapsetStrEvent(EVENT_RTS_IDENTIFIER,
461 CAPSET_OSPROCESS_DEFAULT,
462 buf);
463 }
464 {
465 int argc = 0; char **argv;
466 getFullProgArgv(&argc, &argv);
467 if (argc != 0) {
468 postCapsetVecEvent(EVENT_PROGRAM_ARGS,
469 CAPSET_OSPROCESS_DEFAULT,
470 argc, argv);
471 }
472 }
473 }
474 }
475
476 #if defined(DEBUG)
traceSparkEvent_stderr(Capability * cap,EventTypeNum tag,StgWord info1)477 static void traceSparkEvent_stderr (Capability *cap, EventTypeNum tag,
478 StgWord info1)
479 {
480 ACQUIRE_LOCK(&trace_utx);
481
482 tracePreface();
483 switch (tag) {
484
485 case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
486 debugBelch("cap %d: creating spark thread %lu\n",
487 cap->no, (long)info1);
488 break;
489 case EVENT_SPARK_CREATE: // (cap)
490 debugBelch("cap %d: added spark to pool\n",
491 cap->no);
492 break;
493 case EVENT_SPARK_DUD: // (cap)
494 debugBelch("cap %d: discarded dud spark\n",
495 cap->no);
496 break;
497 case EVENT_SPARK_OVERFLOW: // (cap)
498 debugBelch("cap %d: discarded overflowed spark\n",
499 cap->no);
500 break;
501 case EVENT_SPARK_RUN: // (cap)
502 debugBelch("cap %d: running a spark\n",
503 cap->no);
504 break;
505 case EVENT_SPARK_STEAL: // (cap, victim_cap)
506 debugBelch("cap %d: stealing a spark from cap %d\n",
507 cap->no, (int)info1);
508 break;
509 case EVENT_SPARK_FIZZLE: // (cap)
510 debugBelch("cap %d: fizzled spark removed from pool\n",
511 cap->no);
512 break;
513 case EVENT_SPARK_GC: // (cap)
514 debugBelch("cap %d: GCd spark removed from pool\n",
515 cap->no);
516 break;
517 default:
518 barf("traceSparkEvent: unknown event tag %d", tag);
519 break;
520 }
521
522 RELEASE_LOCK(&trace_utx);
523 }
524 #endif
525
traceSparkEvent_(Capability * cap,EventTypeNum tag,StgWord info1)526 void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1)
527 {
528 #if defined(DEBUG)
529 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
530 traceSparkEvent_stderr(cap, tag, info1);
531 } else
532 #endif
533 {
534 postSparkEvent(cap,tag,info1);
535 }
536 }
537
traceSparkCounters_(Capability * cap,SparkCounters counters,StgWord remaining)538 void traceSparkCounters_ (Capability *cap,
539 SparkCounters counters,
540 StgWord remaining)
541 {
542 #if defined(DEBUG)
543 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
544 /* we currently don't do debug tracing of spark stats but we must
545 test for TRACE_STDERR because of the !eventlog_enabled case. */
546 } else
547 #endif
548 {
549 postSparkCountersEvent(cap, counters, remaining);
550 }
551 }
552
traceTaskCreate_(Task * task,Capability * cap)553 void traceTaskCreate_ (Task *task,
554 Capability *cap)
555 {
556 #if defined(DEBUG)
557 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
558 /* We currently don't do debug tracing of tasks but we must
559 test for TRACE_STDERR because of the !eventlog_enabled case. */
560 } else
561 #endif
562 {
563 EventTaskId taskid = serialisableTaskId(task);
564 EventKernelThreadId tid = kernelThreadId();
565 postTaskCreateEvent(taskid, cap->no, tid);
566 }
567 }
568
traceTaskMigrate_(Task * task,Capability * cap,Capability * new_cap)569 void traceTaskMigrate_ (Task *task,
570 Capability *cap,
571 Capability *new_cap)
572 {
573 #if defined(DEBUG)
574 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
575 /* We currently don't do debug tracing of tasks but we must
576 test for TRACE_STDERR because of the !eventlog_enabled case. */
577 } else
578 #endif
579 {
580 EventTaskId taskid = serialisableTaskId(task);
581 postTaskMigrateEvent(taskid, cap->no, new_cap->no);
582 }
583 }
584
traceTaskDelete_(Task * task)585 void traceTaskDelete_ (Task *task)
586 {
587 #if defined(DEBUG)
588 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
589 /* We currently don't do debug tracing of tasks but we must
590 test for TRACE_STDERR because of the !eventlog_enabled case. */
591 } else
592 #endif
593 {
594 EventTaskId taskid = serialisableTaskId(task);
595 postTaskDeleteEvent(taskid);
596 }
597 }
598
traceHeapProfBegin(StgWord8 profile_id)599 void traceHeapProfBegin(StgWord8 profile_id)
600 {
601 if (eventlog_enabled) {
602 postHeapProfBegin(profile_id);
603 }
604 }
traceHeapBioProfSampleBegin(StgInt era,StgWord64 time)605 void traceHeapBioProfSampleBegin(StgInt era, StgWord64 time)
606 {
607 if (eventlog_enabled) {
608 postHeapBioProfSampleBegin(era, time);
609 }
610 }
611
traceHeapProfSampleBegin(StgInt era)612 void traceHeapProfSampleBegin(StgInt era)
613 {
614 if (eventlog_enabled) {
615 postHeapProfSampleBegin(era);
616 }
617 }
618
traceHeapProfSampleEnd(StgInt era)619 void traceHeapProfSampleEnd(StgInt era)
620 {
621 if (eventlog_enabled) {
622 postHeapProfSampleEnd(era);
623 }
624 }
625
traceHeapProfSampleString(StgWord8 profile_id,const char * label,StgWord residency)626 void traceHeapProfSampleString(StgWord8 profile_id,
627 const char *label, StgWord residency)
628 {
629 if (eventlog_enabled) {
630 postHeapProfSampleString(profile_id, label, residency);
631 }
632 }
633
634 #if defined(PROFILING)
traceHeapProfCostCentre(StgWord32 ccID,const char * label,const char * module,const char * srcloc,StgBool is_caf)635 void traceHeapProfCostCentre(StgWord32 ccID,
636 const char *label,
637 const char *module,
638 const char *srcloc,
639 StgBool is_caf)
640 {
641 if (eventlog_enabled) {
642 postHeapProfCostCentre(ccID, label, module, srcloc, is_caf);
643 }
644 }
645
646 // This one is for .hp samples
traceHeapProfSampleCostCentre(StgWord8 profile_id,CostCentreStack * stack,StgWord residency)647 void traceHeapProfSampleCostCentre(StgWord8 profile_id,
648 CostCentreStack *stack, StgWord residency)
649 {
650 if (eventlog_enabled) {
651 postHeapProfSampleCostCentre(profile_id, stack, residency);
652 }
653 }
654
655 // This one is for .prof samples
traceProfSampleCostCentre(Capability * cap,CostCentreStack * stack,StgWord tick)656 void traceProfSampleCostCentre(Capability *cap,
657 CostCentreStack *stack, StgWord tick)
658 {
659 if (eventlog_enabled) {
660 postProfSampleCostCentre(cap, stack, tick);
661 }
662 }
traceProfBegin(void)663 void traceProfBegin(void)
664 {
665 if (eventlog_enabled) {
666 postProfBegin();
667 }
668 }
669 #endif
670
671 #if defined(DEBUG)
vtraceCap_stderr(Capability * cap,char * msg,va_list ap)672 static void vtraceCap_stderr(Capability *cap, char *msg, va_list ap)
673 {
674 ACQUIRE_LOCK(&trace_utx);
675
676 tracePreface();
677 debugBelch("cap %d: ", cap->no);
678 vdebugBelch(msg,ap);
679 debugBelch("\n");
680
681 RELEASE_LOCK(&trace_utx);
682 }
683
traceCap_stderr(Capability * cap,char * msg,...)684 static void traceCap_stderr(Capability *cap, char *msg, ...)
685 {
686 va_list ap;
687 va_start(ap,msg);
688 vtraceCap_stderr(cap, msg, ap);
689 va_end(ap);
690 }
691 #endif
692
traceCap_(Capability * cap,char * msg,...)693 void traceCap_(Capability *cap, char *msg, ...)
694 {
695 va_list ap;
696 va_start(ap,msg);
697
698 #if defined(DEBUG)
699 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
700 vtraceCap_stderr(cap, msg, ap);
701 } else
702 #endif
703 {
704 postCapMsg(cap, msg, ap);
705 }
706
707 va_end(ap);
708 }
709
710 #if defined(DEBUG)
vtrace_stderr(char * msg,va_list ap)711 static void vtrace_stderr(char *msg, va_list ap)
712 {
713 ACQUIRE_LOCK(&trace_utx);
714
715 tracePreface();
716 vdebugBelch(msg,ap);
717 debugBelch("\n");
718
719 RELEASE_LOCK(&trace_utx);
720 }
721 #endif
722
trace_(char * msg,...)723 void trace_(char *msg, ...)
724 {
725 va_list ap;
726 va_start(ap,msg);
727
728 #if defined(DEBUG)
729 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
730 vtrace_stderr(msg, ap);
731 } else
732 #endif
733 {
734 postMsg(msg, ap);
735 }
736
737 va_end(ap);
738 }
739
traceUserMsg(Capability * cap,char * msg)740 void traceUserMsg(Capability *cap, char *msg)
741 {
742 /* Note: normally we don't check the TRACE_* flags here as they're checked
743 by the wrappers in Trace.h. But traceUserMsg is special since it has no
744 wrapper (it's called from cmm code), so we check TRACE_user here
745 */
746 #if defined(DEBUG)
747 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
748 // Use "%s" as format string to ignore format specifiers in msg (#3874).
749 traceCap_stderr(cap, "%s", msg);
750 } else
751 #endif
752 {
753 if (eventlog_enabled && TRACE_user) {
754 postUserEvent(cap, EVENT_USER_MSG, msg);
755 }
756 }
757 dtraceUserMsg(cap->no, msg);
758 }
759
traceUserBinaryMsg(Capability * cap,uint8_t * msg,size_t size)760 void traceUserBinaryMsg(Capability *cap, uint8_t *msg, size_t size)
761 {
762 /* Note: normally we don't check the TRACE_* flags here as they're checked
763 by the wrappers in Trace.h. But traceUserMsg is special since it has no
764 wrapper (it's called from cmm code), so we check TRACE_user here
765 */
766 if (eventlog_enabled && TRACE_user) {
767 postUserBinaryEvent(cap, EVENT_USER_BINARY_MSG, msg, size);
768 }
769 }
770
traceUserMarker(Capability * cap,char * markername)771 void traceUserMarker(Capability *cap, char *markername)
772 {
773 /* Note: traceUserMarker is special since it has no wrapper (it's called
774 from cmm code), so we check eventlog_enabled and TRACE_user here.
775 */
776 #if defined(DEBUG)
777 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
778 traceCap_stderr(cap, "User marker: %s", markername);
779 } else
780 #endif
781 {
782 if (eventlog_enabled && TRACE_user) {
783 postUserEvent(cap, EVENT_USER_MARKER, markername);
784 }
785 }
786 dtraceUserMarker(cap->no, markername);
787 }
788
789
traceThreadLabel_(Capability * cap,StgTSO * tso,char * label)790 void traceThreadLabel_(Capability *cap,
791 StgTSO *tso,
792 char *label)
793 {
794 #if defined(DEBUG)
795 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
796 ACQUIRE_LOCK(&trace_utx);
797 tracePreface();
798 debugBelch("cap %d: thread %" FMT_Word " has label %s\n",
799 cap->no, (W_)tso->id, label);
800 RELEASE_LOCK(&trace_utx);
801 } else
802 #endif
803 {
804 postThreadLabel(cap, tso->id, label);
805 }
806 }
807
traceConcMarkBegin()808 void traceConcMarkBegin()
809 {
810 if (eventlog_enabled)
811 postEventNoCap(EVENT_CONC_MARK_BEGIN);
812 }
813
traceConcMarkEnd(StgWord32 marked_obj_count)814 void traceConcMarkEnd(StgWord32 marked_obj_count)
815 {
816 if (eventlog_enabled)
817 postConcMarkEnd(marked_obj_count);
818 }
819
traceConcSyncBegin()820 void traceConcSyncBegin()
821 {
822 if (eventlog_enabled)
823 postEventNoCap(EVENT_CONC_SYNC_BEGIN);
824 }
825
traceConcSyncEnd()826 void traceConcSyncEnd()
827 {
828 if (eventlog_enabled)
829 postEventNoCap(EVENT_CONC_SYNC_END);
830 }
831
traceConcSweepBegin()832 void traceConcSweepBegin()
833 {
834 if (eventlog_enabled)
835 postEventNoCap(EVENT_CONC_SWEEP_BEGIN);
836 }
837
traceConcSweepEnd()838 void traceConcSweepEnd()
839 {
840 if (eventlog_enabled)
841 postEventNoCap(EVENT_CONC_SWEEP_END);
842 }
843
traceConcUpdRemSetFlush(Capability * cap)844 void traceConcUpdRemSetFlush(Capability *cap)
845 {
846 if (eventlog_enabled)
847 postConcUpdRemSetFlush(cap);
848 }
849
traceNonmovingHeapCensus(uint32_t log_blk_size,const struct NonmovingAllocCensus * census)850 void traceNonmovingHeapCensus(uint32_t log_blk_size,
851 const struct NonmovingAllocCensus *census)
852 {
853 if (eventlog_enabled && TRACE_nonmoving_gc)
854 postNonmovingHeapCensus(log_blk_size, census);
855 }
856
traceThreadStatus_(StgTSO * tso USED_IF_DEBUG)857 void traceThreadStatus_ (StgTSO *tso USED_IF_DEBUG)
858 {
859 #if defined(DEBUG)
860 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
861 printThreadStatus(tso);
862 } else
863 #endif
864 {
865 /* nothing - no event for this one yet */
866 }
867 }
868
869 #if defined(DEBUG)
traceBegin(const char * str,...)870 void traceBegin (const char *str, ...)
871 {
872 va_list ap;
873 va_start(ap,str);
874
875 ACQUIRE_LOCK(&trace_utx);
876
877 tracePreface();
878 vdebugBelch(str,ap);
879 va_end(ap);
880 }
881
traceEnd(void)882 void traceEnd (void)
883 {
884 debugBelch("\n");
885 RELEASE_LOCK(&trace_utx);
886 }
887 #endif /* DEBUG */
888
889 #endif /* TRACING */
890
891 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
892 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
893 //
894 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
895
dtraceUserMsgWrapper(Capability * cap,char * msg)896 void dtraceUserMsgWrapper(Capability *cap, char *msg)
897 {
898 dtraceUserMsg(cap->no, msg);
899 }
900
dtraceUserMarkerWrapper(Capability * cap,char * msg)901 void dtraceUserMarkerWrapper(Capability *cap, char *msg)
902 {
903 dtraceUserMarker(cap->no, msg);
904 }
905
906 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */
907