1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 1999-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 /*
22 * Support functions for tracing.
23 *
24 * Ideas for future speed improvements in tracing framework:
25 * * Move ErtsTracerNif into ErtsTracer
26 * + Removes need for locking
27 * + Removes hash lookup overhead
28 * + Use a refc on the ErtsTracerNif to know when it can
29 * be freed. We don't want to allocate a separate
30 * ErtsTracerNif for each module used.
31 * * Optimize GenericBp for cache locality by reusing equivalent
32 * GenericBp and GenericBpData in multiple tracer points.
33 * + Possibly we want to use specialized instructions for different
34 * types of trace so that the knowledge of which struct is used
35 * can be in the instruction.
36 */
37
38 #ifdef HAVE_CONFIG_H
39 # include "config.h"
40 #endif
41
42 #include "sys.h"
43 #include "erl_vm.h"
44 #include "global.h"
45 #include "erl_process.h"
46 #include "big.h"
47 #include "bif.h"
48 #include "dist.h"
49 #include "beam_bp.h"
50 #include "error.h"
51 #include "erl_binary.h"
52 #include "erl_bits.h"
53 #include "erl_thr_progress.h"
54 #include "erl_bif_unique.h"
55 #include "erl_map.h"
56
57 #if 0
58 #define DEBUG_PRINTOUTS
59 #else
60 #undef DEBUG_PRINTOUTS
61 #endif
62
63 /* Pseudo export entries. Never filled in with data, only used to
64 yield unique pointers of the correct type. */
65 Export exp_send, exp_receive, exp_timeout;
66
67 static ErtsTracer system_seq_tracer;
68 static Uint default_proc_trace_flags;
69 static ErtsTracer default_proc_tracer;
70 static Uint default_port_trace_flags;
71 static ErtsTracer default_port_tracer;
72
73 static Eterm system_monitor;
74 static Eterm system_profile;
75 static erts_atomic_t system_logger;
76
77 #ifdef HAVE_ERTS_NOW_CPU
78 int erts_cpu_timestamp;
79 #endif
80
81 static erts_mtx_t smq_mtx;
82 static erts_rwmtx_t sys_trace_rwmtx;
83
84 enum ErtsSysMsgType {
85 SYS_MSG_TYPE_UNDEFINED,
86 SYS_MSG_TYPE_SYSMON,
87 SYS_MSG_TYPE_ERRLGR,
88 SYS_MSG_TYPE_PROC_MSG,
89 SYS_MSG_TYPE_SYSPROF
90 };
91
92 #define ERTS_TRACE_TS_NOW_MAX_SIZE \
93 4
94 #define ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
95 ERTS_MAX_SINT64_HEAP_SIZE
96 #define ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE \
97 (3 + ERTS_MAX_SINT64_HEAP_SIZE \
98 + ERTS_MAX_UINT64_HEAP_SIZE)
99
100 #define ERTS_TRACE_PATCH_TS_MAX_SIZE \
101 (1 + ((ERTS_TRACE_TS_NOW_MAX_SIZE \
102 > ERTS_TRACE_TS_MONOTONIC_MAX_SIZE) \
103 ? ((ERTS_TRACE_TS_NOW_MAX_SIZE \
104 > ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
105 ? ERTS_TRACE_TS_NOW_MAX_SIZE \
106 : ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
107 : ((ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
108 > ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
109 ? ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
110 : ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE)))
111
112 #define TFLGS_TS_TYPE(p) ERTS_TFLGS2TSTYPE(ERTS_TRACE_FLAGS((p)))
113
114 /*
115 * FUTURE CHANGES:
116 *
117 * The timestamp functionality has intentionally been
118 * split in two parts for future use even though it
119 * is not used like this today. take_timestamp() takes
120 * the timestamp and calculate heap need for it (which
121 * is not constant). write_timestamp() writes the
122 * timestamp to the allocated heap. That is, one typically
123 * want to take the timestamp before allocating the heap
124 * and then write it to the heap.
125 *
126 * The trace output functionality now use patch_ts_size(),
127 * write_ts(), and patch_ts(). write_ts() both takes the
128 * timestamp and writes it. Since we don't know the
129 * heap need when allocating the heap area we need to
130 * over allocate (maximum size from patch_ts_size()) and
131 * then potentially (often) shrink the heap area after the
132 * timestamp has been written. The only reason it is
133 * currently done this way is because we do not want to
134 * make major changes of the trace behavior in a patch.
135 * This is planned to be changed in next major release.
136 */
137
138 typedef struct {
139 int ts_type_flag;
140 union {
141 struct {
142 Uint ms;
143 Uint s;
144 Uint us;
145 } now;
146 struct {
147 ErtsMonotonicTime time;
148 Sint64 raw_unique;
149 } monotonic;
150 } u;
151 } ErtsTraceTimeStamp;
152
153 static ERTS_INLINE Uint
take_timestamp(ErtsTraceTimeStamp * tsp,int ts_type)154 take_timestamp(ErtsTraceTimeStamp *tsp, int ts_type)
155 {
156 int ts_type_flag = ts_type & -ts_type; /* least significant flag */
157
158 ASSERT(ts_type_flag == ERTS_TRACE_FLG_NOW_TIMESTAMP
159 || ts_type_flag == ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP
160 || ts_type_flag == ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP
161 || ts_type_flag == 0);
162
163 tsp->ts_type_flag = ts_type_flag;
164 switch (ts_type_flag) {
165 case 0:
166 return (Uint) 0;
167 case ERTS_TRACE_FLG_NOW_TIMESTAMP:
168 #ifdef HAVE_ERTS_NOW_CPU
169 if (erts_cpu_timestamp)
170 erts_get_now_cpu(&tsp->u.now.ms, &tsp->u.now.s, &tsp->u.now.us);
171 else
172 #endif
173 get_now(&tsp->u.now.ms, &tsp->u.now.s, &tsp->u.now.us);
174 return (Uint) 4;
175 case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
176 case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP: {
177 Uint hsz = 0;
178 ErtsMonotonicTime mtime = erts_get_monotonic_time(NULL);
179 mtime = ERTS_MONOTONIC_TO_NSEC(mtime);
180 mtime += ERTS_MONOTONIC_OFFSET_NSEC;
181 hsz = (IS_SSMALL(mtime) ?
182 (Uint) 0
183 : ERTS_SINT64_HEAP_SIZE((Sint64) mtime));
184 tsp->u.monotonic.time = mtime;
185 if (ts_type_flag == ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP) {
186 Sint64 raw_unique;
187 hsz += 3; /* 2-tuple */
188 raw_unique = erts_raw_get_unique_monotonic_integer();
189 tsp->u.monotonic.raw_unique = raw_unique;
190 hsz += erts_raw_unique_monotonic_integer_heap_size(raw_unique, 0);
191 }
192 return hsz;
193 }
194 default:
195 ERTS_INTERNAL_ERROR("invalid timestamp type");
196 return 0;
197 }
198 }
199
200 static ERTS_INLINE Eterm
write_timestamp(ErtsTraceTimeStamp * tsp,Eterm ** hpp)201 write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp)
202 {
203 int ts_type_flag = tsp->ts_type_flag;
204 Eterm res;
205
206 switch (ts_type_flag) {
207 case 0:
208 return NIL;
209 case ERTS_TRACE_FLG_NOW_TIMESTAMP:
210 res = TUPLE3(*hpp,
211 make_small(tsp->u.now.ms),
212 make_small(tsp->u.now.s),
213 make_small(tsp->u.now.us));
214 *hpp += 4;
215 return res;
216 case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
217 case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP: {
218 Sint64 mtime, raw;
219 Eterm unique, emtime;
220
221 mtime = (Sint64) tsp->u.monotonic.time;
222 emtime = (IS_SSMALL(mtime)
223 ? make_small((Sint64) mtime)
224 : erts_sint64_to_big((Sint64) mtime, hpp));
225
226 if (ts_type_flag == ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP)
227 return emtime;
228
229 raw = tsp->u.monotonic.raw_unique;
230 unique = erts_raw_make_unique_monotonic_integer_value(hpp, raw, 0);
231 res = TUPLE2(*hpp, emtime, unique);
232 *hpp += 3;
233 return res;
234 }
235 default:
236 ERTS_INTERNAL_ERROR("invalid timestamp type");
237 return THE_NON_VALUE;
238 }
239 }
240
241
242 static ERTS_INLINE Uint
patch_ts_size(int ts_type)243 patch_ts_size(int ts_type)
244 {
245 int ts_type_flag = ts_type & -ts_type; /* least significant flag */
246 switch (ts_type_flag) {
247 case 0:
248 return 0;
249 case ERTS_TRACE_FLG_NOW_TIMESTAMP:
250 return 1 + ERTS_TRACE_TS_NOW_MAX_SIZE;
251 case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
252 return 1 + ERTS_TRACE_TS_MONOTONIC_MAX_SIZE;
253 case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP:
254 return 1 + ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE;
255 default:
256 ERTS_INTERNAL_ERROR("invalid timestamp type");
257 return 0;
258 }
259 }
260
261 /*
262 * Write a timestamp. The timestamp MUST be the last
263 * thing built on the heap. This since write_ts() might
264 * adjust the size of the used area.
265 */
266 static Eterm
write_ts(int ts_type,Eterm * hp,ErlHeapFragment * bp,Process * tracer)267 write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer)
268 {
269 ErtsTraceTimeStamp ts;
270 Sint shrink;
271 Eterm res, *ts_hp = hp;
272 Uint hsz;
273
274 ASSERT(ts_type);
275
276 hsz = take_timestamp(&ts, ts_type);
277
278 res = write_timestamp(&ts, &ts_hp);
279
280 ASSERT(ts_hp == hp + hsz);
281
282 switch (ts.ts_type_flag) {
283 case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
284 shrink = ERTS_TRACE_TS_MONOTONIC_MAX_SIZE;
285 break;
286 case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP:
287 shrink = ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE;
288 break;
289 default:
290 return res;
291 }
292
293 shrink -= hsz;
294
295 ASSERT(shrink >= 0);
296
297 if (shrink) {
298 if (bp)
299 bp->used_size -= shrink;
300 }
301
302 return res;
303 }
304
305 static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
306 Eterm from,
307 Eterm to,
308 Eterm msg,
309 ErlHeapFragment *bp);
310 static void enqueue_sys_msg(enum ErtsSysMsgType type,
311 Eterm from,
312 Eterm to,
313 Eterm msg,
314 ErlHeapFragment *bp);
315 static void init_sys_msg_dispatcher(void);
316
317 static void init_tracer_nif(void);
318 static int tracer_cmp_fun(void*, void*);
319 static HashValue tracer_hash_fun(void*);
320 static void *tracer_alloc_fun(void*);
321 static void tracer_free_fun(void*);
322
323 typedef struct ErtsTracerNif_ ErtsTracerNif;
324
erts_init_trace(void)325 void erts_init_trace(void) {
326 erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
327 rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
328 rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
329
330 erts_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL,
331 ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
332
333 #ifdef HAVE_ERTS_NOW_CPU
334 erts_cpu_timestamp = 0;
335 #endif
336 erts_bif_trace_init();
337 erts_system_monitor_clear(NULL);
338 erts_system_profile_clear(NULL);
339 default_proc_trace_flags = F_INITIAL_TRACE_FLAGS;
340 default_proc_tracer = erts_tracer_nil;
341 default_port_trace_flags = F_INITIAL_TRACE_FLAGS;
342 default_port_tracer = erts_tracer_nil;
343 system_seq_tracer = erts_tracer_nil;
344 erts_atomic_init_nob(&system_logger, am_logger);
345 init_sys_msg_dispatcher();
346 init_tracer_nif();
347 }
348
349 #define ERTS_ALLOC_SYSMSG_HEAP(SZ, BPP, OHPP, UNUSED) \
350 (*(BPP) = new_message_buffer((SZ)), \
351 *(OHPP) = &(*(BPP))->off_heap, \
352 (*(BPP))->mem)
353
354 enum ErtsTracerOpt {
355 TRACE_FUN_DEFAULT = 0,
356 TRACE_FUN_ENABLED = 1,
357 TRACE_FUN_T_SEND = 2,
358 TRACE_FUN_T_RECEIVE = 3,
359 TRACE_FUN_T_CALL = 4,
360 TRACE_FUN_T_SCHED_PROC = 5,
361 TRACE_FUN_T_SCHED_PORT = 6,
362 TRACE_FUN_T_GC = 7,
363 TRACE_FUN_T_PROCS = 8,
364 TRACE_FUN_T_PORTS = 9,
365 TRACE_FUN_E_SEND = 10,
366 TRACE_FUN_E_RECEIVE = 11,
367 TRACE_FUN_E_CALL = 12,
368 TRACE_FUN_E_SCHED_PROC = 13,
369 TRACE_FUN_E_SCHED_PORT = 14,
370 TRACE_FUN_E_GC = 15,
371 TRACE_FUN_E_PROCS = 16,
372 TRACE_FUN_E_PORTS = 17
373 };
374
375 #define NIF_TRACER_TYPES (18)
376
377
378 static ERTS_INLINE int
379 send_to_tracer_nif_raw(Process *c_p, Process *tracee, const ErtsTracer tracer,
380 Uint trace_flags, Eterm t_p_id, ErtsTracerNif *tnif,
381 enum ErtsTracerOpt topt,
382 Eterm tag, Eterm msg, Eterm extra, Eterm pam_result);
383 static ERTS_INLINE int
384 send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
385 Eterm t_p_id, ErtsTracerNif *tnif,
386 enum ErtsTracerOpt topt,
387 Eterm tag, Eterm msg, Eterm extra,
388 Eterm pam_result);
389 static ERTS_INLINE Eterm
390 call_enabled_tracer(const ErtsTracer tracer,
391 ErtsTracerNif **tnif_ref,
392 enum ErtsTracerOpt topt,
393 Eterm tag, Eterm t_p_id);
394 static int
395 is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
396 ErtsPTabElementCommon *t_p,
397 ErtsTracerNif **tnif_ret,
398 enum ErtsTracerOpt topt, Eterm tag);
399
400 static Uint active_sched;
401
402 void
erts_system_profile_setup_active_schedulers(void)403 erts_system_profile_setup_active_schedulers(void)
404 {
405 ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
406 active_sched = erts_active_schedulers();
407 }
408
409 static void
exiting_reset(Eterm exiting)410 exiting_reset(Eterm exiting)
411 {
412 erts_rwmtx_rwlock(&sys_trace_rwmtx);
413 if (exiting == system_monitor) {
414 system_monitor = NIL;
415 /* Let the trace message dispatcher clear flags, etc */
416 }
417 if (exiting == system_profile) {
418 system_profile = NIL;
419 /* Let the trace message dispatcher clear flags, etc */
420 }
421 erts_rwmtx_rwunlock(&sys_trace_rwmtx);
422 }
423
424 void
erts_trace_check_exiting(Eterm exiting)425 erts_trace_check_exiting(Eterm exiting)
426 {
427 int reset = 0;
428 erts_rwmtx_rlock(&sys_trace_rwmtx);
429 if (exiting == system_monitor)
430 reset = 1;
431 else if (exiting == system_profile)
432 reset = 1;
433 erts_rwmtx_runlock(&sys_trace_rwmtx);
434 if (reset)
435 exiting_reset(exiting);
436 }
437
438 ErtsTracer
erts_set_system_seq_tracer(Process * c_p,ErtsProcLocks c_p_locks,ErtsTracer new)439 erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new)
440 {
441 ErtsTracer old;
442
443 if (!ERTS_TRACER_IS_NIL(new)) {
444 Eterm nif_result = call_enabled_tracer(
445 new, NULL, TRACE_FUN_ENABLED, am_trace_status, am_undefined);
446 switch (nif_result) {
447 case am_trace: break;
448 default:
449 return THE_NON_VALUE;
450 }
451 }
452
453 erts_rwmtx_rwlock(&sys_trace_rwmtx);
454 old = system_seq_tracer;
455 system_seq_tracer = erts_tracer_nil;
456 erts_tracer_update(&system_seq_tracer, new);
457
458 #ifdef DEBUG_PRINTOUTS
459 erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old);
460 #endif
461 erts_rwmtx_rwunlock(&sys_trace_rwmtx);
462 return old;
463 }
464
465 ErtsTracer
erts_get_system_seq_tracer(void)466 erts_get_system_seq_tracer(void)
467 {
468 ErtsTracer st;
469 erts_rwmtx_rlock(&sys_trace_rwmtx);
470 st = system_seq_tracer;
471 #ifdef DEBUG_PRINTOUTS
472 erts_fprintf(stderr, "get seq tracer %T\n", st);
473 #endif
474 erts_rwmtx_runlock(&sys_trace_rwmtx);
475
476 if (st != erts_tracer_nil &&
477 call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED,
478 am_trace_status, am_undefined) == am_remove) {
479 st = erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil);
480 ERTS_TRACER_CLEAR(&st);
481 }
482
483 return st;
484 }
485
486 static ERTS_INLINE void
get_default_tracing(Uint * flagsp,ErtsTracer * tracerp,Uint * default_trace_flags,ErtsTracer * default_tracer)487 get_default_tracing(Uint *flagsp, ErtsTracer *tracerp,
488 Uint *default_trace_flags,
489 ErtsTracer *default_tracer)
490 {
491 if (!(*default_trace_flags & TRACEE_FLAGS))
492 ERTS_TRACER_CLEAR(default_tracer);
493
494 if (ERTS_TRACER_IS_NIL(*default_tracer)) {
495 *default_trace_flags &= ~TRACEE_FLAGS;
496 } else {
497 Eterm nif_res;
498 nif_res = call_enabled_tracer(*default_tracer,
499 NULL, TRACE_FUN_ENABLED,
500 am_trace_status, am_undefined);
501 switch (nif_res) {
502 case am_trace: break;
503 default: {
504 ErtsTracer curr_default_tracer = *default_tracer;
505 if (tracerp) {
506 /* we only have a rlock, so we have to unlock and then rwlock */
507 erts_rwmtx_runlock(&sys_trace_rwmtx);
508 erts_rwmtx_rwlock(&sys_trace_rwmtx);
509 }
510 /* check if someone else changed default tracer
511 while we got the write lock, if so we don't do
512 anything. */
513 if (curr_default_tracer == *default_tracer) {
514 *default_trace_flags &= ~TRACEE_FLAGS;
515 ERTS_TRACER_CLEAR(default_tracer);
516 }
517 if (tracerp) {
518 erts_rwmtx_rwunlock(&sys_trace_rwmtx);
519 erts_rwmtx_rlock(&sys_trace_rwmtx);
520 }
521 }
522 }
523 }
524
525 if (flagsp)
526 *flagsp = *default_trace_flags;
527 if (tracerp) {
528 erts_tracer_update(tracerp,*default_tracer);
529 }
530 }
531
532 static ERTS_INLINE void
erts_change_default_tracing(int setflags,Uint flags,const ErtsTracer tracer,Uint * default_trace_flags,ErtsTracer * default_tracer)533 erts_change_default_tracing(int setflags, Uint flags,
534 const ErtsTracer tracer,
535 Uint *default_trace_flags,
536 ErtsTracer *default_tracer)
537 {
538 if (setflags)
539 *default_trace_flags |= flags;
540 else
541 *default_trace_flags &= ~flags;
542
543 erts_tracer_update(default_tracer, tracer);
544
545 get_default_tracing(NULL, NULL, default_trace_flags, default_tracer);
546 }
547
548 void
erts_change_default_proc_tracing(int setflags,Uint flagsp,const ErtsTracer tracer)549 erts_change_default_proc_tracing(int setflags, Uint flagsp,
550 const ErtsTracer tracer)
551 {
552 erts_rwmtx_rwlock(&sys_trace_rwmtx);
553 erts_change_default_tracing(
554 setflags, flagsp, tracer,
555 &default_proc_trace_flags,
556 &default_proc_tracer);
557 erts_rwmtx_rwunlock(&sys_trace_rwmtx);
558 }
559
560 void
erts_change_default_port_tracing(int setflags,Uint flagsp,const ErtsTracer tracer)561 erts_change_default_port_tracing(int setflags, Uint flagsp,
562 const ErtsTracer tracer)
563 {
564 erts_rwmtx_rwlock(&sys_trace_rwmtx);
565 erts_change_default_tracing(
566 setflags, flagsp, tracer,
567 &default_port_trace_flags,
568 &default_port_tracer);
569 erts_rwmtx_rwunlock(&sys_trace_rwmtx);
570 }
571
572 void
erts_get_default_proc_tracing(Uint * flagsp,ErtsTracer * tracerp)573 erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp)
574 {
575 erts_rwmtx_rlock(&sys_trace_rwmtx);
576 *tracerp = erts_tracer_nil; /* initialize */
577 get_default_tracing(
578 flagsp, tracerp,
579 &default_proc_trace_flags,
580 &default_proc_tracer);
581 erts_rwmtx_runlock(&sys_trace_rwmtx);
582 }
583
584 void
erts_get_default_port_tracing(Uint * flagsp,ErtsTracer * tracerp)585 erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp)
586 {
587 erts_rwmtx_rlock(&sys_trace_rwmtx);
588 *tracerp = erts_tracer_nil; /* initialize */
589 get_default_tracing(
590 flagsp, tracerp,
591 &default_port_trace_flags,
592 &default_port_tracer);
593 erts_rwmtx_runlock(&sys_trace_rwmtx);
594 }
595
596 void
erts_set_system_monitor(Eterm monitor)597 erts_set_system_monitor(Eterm monitor)
598 {
599 erts_rwmtx_rwlock(&sys_trace_rwmtx);
600 system_monitor = monitor;
601 erts_rwmtx_rwunlock(&sys_trace_rwmtx);
602 }
603
604 Eterm
erts_get_system_monitor(void)605 erts_get_system_monitor(void)
606 {
607 Eterm monitor;
608 erts_rwmtx_rlock(&sys_trace_rwmtx);
609 monitor = system_monitor;
610 erts_rwmtx_runlock(&sys_trace_rwmtx);
611 return monitor;
612 }
613
614 /* Performance monitoring */
erts_set_system_profile(Eterm profile)615 void erts_set_system_profile(Eterm profile) {
616 erts_rwmtx_rwlock(&sys_trace_rwmtx);
617 system_profile = profile;
618 erts_rwmtx_rwunlock(&sys_trace_rwmtx);
619 }
620
621 Eterm
erts_get_system_profile(void)622 erts_get_system_profile(void) {
623 Eterm profile;
624 erts_rwmtx_rlock(&sys_trace_rwmtx);
625 profile = system_profile;
626 erts_rwmtx_runlock(&sys_trace_rwmtx);
627 return profile;
628 }
629
630 static void
write_sys_msg_to_port(Eterm unused_to,Port * trace_port,Eterm unused_from,enum ErtsSysMsgType unused_type,Eterm message)631 write_sys_msg_to_port(Eterm unused_to,
632 Port* trace_port,
633 Eterm unused_from,
634 enum ErtsSysMsgType unused_type,
635 Eterm message) {
636 byte *buffer;
637 byte *ptr;
638 Uint size;
639
640 if (erts_encode_ext_size(message, &size) != ERTS_EXT_SZ_OK)
641 erts_exit(ERTS_ERROR_EXIT, "Internal error: System limit\n");
642
643 buffer = (byte *) erts_alloc(ERTS_ALC_T_TMP, size);
644
645 ptr = buffer;
646
647 erts_encode_ext(message, &ptr);
648 if (!(ptr <= buffer+size)) {
649 erts_exit(ERTS_ERROR_EXIT, "Internal error in do_send_to_port: %d\n", ptr-buffer);
650 }
651
652 erts_raw_port_command(trace_port, buffer, ptr-buffer);
653
654 erts_free(ERTS_ALC_T_TMP, (void *) buffer);
655 }
656
657
658 static void
trace_sched_aux(Process * p,ErtsProcLocks locks,Eterm what)659 trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
660 {
661 Eterm tmp, *hp;
662 int curr_func;
663 ErtsTracerNif *tnif = NULL;
664
665 if (ERTS_TRACER_IS_NIL(ERTS_TRACER(p)))
666 return;
667
668 switch (what) {
669 case am_out:
670 case am_out_exiting:
671 case am_out_exited:
672 case am_in:
673 case am_in_exiting:
674 break;
675 default:
676 ASSERT(0);
677 break;
678 }
679
680 if (!is_tracer_enabled(p, locks, &p->common, &tnif, TRACE_FUN_E_SCHED_PROC, what))
681 return;
682
683 if (ERTS_PROC_IS_EXITING(p))
684 curr_func = 0;
685 else {
686 if (!p->current)
687 p->current = erts_find_function_from_pc(p->i);
688 curr_func = p->current != NULL;
689 }
690
691 if (!curr_func) {
692 tmp = make_small(0);
693 } else {
694 hp = HAlloc(p, 4);
695 tmp = TUPLE3(hp,p->current->module,p->current->function,
696 make_small(p->current->arity));
697 hp += 4;
698 }
699
700 send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SCHED_PROC,
701 what, tmp, THE_NON_VALUE, am_true);
702 }
703
704 /* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
705 * or {trace, Pid, What, {Mod, Func, Arity}}
706 *
707 * where 'What' is supposed to be 'in', 'out', 'in_exiting',
708 * 'out_exiting', or 'out_exited'.
709 */
710 void
trace_sched(Process * p,ErtsProcLocks locks,Eterm what)711 trace_sched(Process *p, ErtsProcLocks locks, Eterm what)
712 {
713 trace_sched_aux(p, locks, what);
714 }
715
716 /* Send {trace_ts, Pid, Send, Msg, DestPid, PamResult, Timestamp}
717 * or {trace_ts, Pid, Send, Msg, DestPid, Timestamp}
718 * or {trace, Pid, Send, Msg, DestPid, PamResult}
719 * or {trace, Pid, Send, Msg, DestPid}
720 *
721 * where 'Send' is 'send' or 'send_to_non_existing_process'.
722 */
723 void
trace_send(Process * p,Eterm to,Eterm msg)724 trace_send(Process *p, Eterm to, Eterm msg)
725 {
726 Eterm operation = am_send;
727 ErtsTracerNif *tnif = NULL;
728 ErtsTracingEvent* te;
729 Eterm pam_result;
730 ErtsThrPrgrDelayHandle dhndl;
731
732 ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND));
733
734 te = &erts_send_tracing[erts_active_bp_ix()];
735 if (!te->on) {
736 return;
737 }
738 if (te->match_spec) {
739 Eterm args[2];
740 Uint32 return_flags;
741 args[0] = to;
742 args[1] = msg;
743 pam_result = erts_match_set_run_trace(p, p,
744 te->match_spec, args, 2,
745 ERTS_PAM_TMP_RESULT, &return_flags);
746 if (pam_result == am_false)
747 return;
748 if (ERTS_TRACE_FLAGS(p) & F_TRACE_SILENT) {
749 erts_match_set_release_result_trace(p, pam_result);
750 return;
751 }
752 } else
753 pam_result = am_true;
754
755 dhndl = erts_thr_progress_unmanaged_delay();
756
757 if (is_internal_pid(to)) {
758 if (!erts_proc_lookup(to))
759 goto send_to_non_existing_process;
760 }
761 else if(is_external_pid(to)
762 && external_pid_dist_entry(to) == erts_this_dist_entry) {
763 send_to_non_existing_process:
764 operation = am_send_to_non_existing_process;
765 }
766
767 if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
768 TRACE_FUN_E_SEND, operation)) {
769 send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SEND,
770 operation, msg, to, pam_result);
771 }
772
773 erts_thr_progress_unmanaged_continue(dhndl);
774
775 erts_match_set_release_result_trace(p, pam_result);
776 }
777
778 /* Send {trace_ts, Pid, receive, Msg, PamResult, Timestamp}
779 * or {trace_ts, Pid, receive, Msg, Timestamp}
780 * or {trace, Pid, receive, Msg, PamResult}
781 * or {trace, Pid, receive, Msg}
782 */
783 void
trace_receive(Process * receiver,Eterm from,Eterm msg,ErtsTracingEvent * te)784 trace_receive(Process* receiver,
785 Eterm from,
786 Eterm msg, ErtsTracingEvent* te)
787 {
788 ErtsTracerNif *tnif = NULL;
789 Eterm pam_result;
790
791 if (!te) {
792 te = &erts_receive_tracing[erts_active_bp_ix()];
793 if (!te->on)
794 return;
795 }
796 else ASSERT(te->on);
797
798 if (te->match_spec) {
799 Eterm args[3];
800 Uint32 return_flags;
801 if (is_pid(from)) {
802 args[0] = pid_node_name(from);
803 args[1] = from;
804 }
805 else {
806 ASSERT(is_atom(from));
807 args[0] = from; /* node name or other atom (e.g 'system') */
808 args[1] = am_undefined;
809 }
810 args[2] = msg;
811 pam_result = erts_match_set_run_trace(NULL, receiver,
812 te->match_spec, args, 3,
813 ERTS_PAM_TMP_RESULT, &return_flags);
814 if (pam_result == am_false)
815 return;
816 if (ERTS_TRACE_FLAGS(receiver) & F_TRACE_SILENT) {
817 erts_match_set_release_result_trace(NULL, pam_result);
818 return;
819 }
820 } else
821 pam_result = am_true;
822
823 if (is_tracer_enabled(NULL, 0, &receiver->common, &tnif,
824 TRACE_FUN_E_RECEIVE, am_receive)) {
825 send_to_tracer_nif(NULL, &receiver->common, receiver->common.id,
826 tnif, TRACE_FUN_T_RECEIVE,
827 am_receive, msg, THE_NON_VALUE, pam_result);
828 }
829 erts_match_set_release_result_trace(NULL, pam_result);
830 }
831
832 int
seq_trace_update_serial(Process * p)833 seq_trace_update_serial(Process *p)
834 {
835 ErtsTracer seq_tracer = erts_get_system_seq_tracer();
836 ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
837 if (have_no_seqtrace(SEQ_TRACE_TOKEN(p)) ||
838 (seq_tracer != NIL &&
839 call_enabled_tracer(seq_tracer, NULL,
840 TRACE_FUN_ENABLED, am_seq_trace,
841 p ? p->common.id : am_undefined) != am_trace)
842 #ifdef USE_VM_PROBES
843 || (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
844 #endif
845 ) {
846 return 0;
847 }
848 SEQ_TRACE_TOKEN_SENDER(p) = p->common.id;
849 SEQ_TRACE_TOKEN_SERIAL(p) =
850 make_small(++(p -> seq_trace_clock));
851 SEQ_TRACE_TOKEN_LASTCNT(p) =
852 make_small(p -> seq_trace_lastcnt);
853 return 1;
854 }
855
856 void
erts_seq_trace_update_node_token(Eterm token)857 erts_seq_trace_update_node_token(Eterm token)
858 {
859 Eterm serial;
860 Uint serial_num;
861 SEQ_TRACE_T_SENDER(token) = erts_this_dist_entry->sysname;
862 serial = SEQ_TRACE_T_SERIAL(token);
863 serial_num = unsigned_val(serial);
864 serial_num++;
865 SEQ_TRACE_T_SERIAL(token) = make_small(serial_num);
866 }
867
868
869 /* Send a sequential trace message to the sequential tracer.
870 * p is the caller (which contains the trace token),
871 * msg is the original message, type is trace type (SEQ_TRACE_SEND etc),
872 * and receiver is the receiver of the message.
873 *
874 * The message to be received by the sequential tracer is:
875 *
876 * TraceMsg =
877 * {seq_trace, Label, {Type, {Lastcnt, Serial}, Sender, Receiver, Msg} [,Timestamp] }
878 *
879 */
880 void
seq_trace_output_generic(Eterm token,Eterm msg,Uint type,Eterm receiver,Process * process,Eterm exitfrom)881 seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
882 Eterm receiver, Process *process, Eterm exitfrom)
883 {
884 Eterm mess;
885 Eterm* hp;
886 Eterm label;
887 Eterm lastcnt_serial;
888 Eterm type_atom;
889 ErtsTracer seq_tracer;
890 int seq_tracer_flags = 0;
891 #define LOCAL_HEAP_SIZE (64)
892 DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
893
894 seq_tracer = erts_get_system_seq_tracer();
895
896 ASSERT(is_tuple(token) || is_nil(token));
897 if (token == NIL || (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE) ||
898 ERTS_TRACER_IS_NIL(seq_tracer) ||
899 call_enabled_tracer(seq_tracer,
900 NULL, TRACE_FUN_ENABLED,
901 am_seq_trace,
902 process ? process->common.id : am_undefined) != am_trace) {
903 return;
904 }
905
906 if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & type) == 0) {
907 /* No flags set, nothing to do */
908 return;
909 }
910
911 switch (type) {
912 case SEQ_TRACE_SEND: type_atom = am_send; break;
913 case SEQ_TRACE_PRINT: type_atom = am_print; break;
914 case SEQ_TRACE_RECEIVE: type_atom = am_receive; break;
915 default:
916 erts_exit(ERTS_ERROR_EXIT, "invalid type in seq_trace_output_generic: %d:\n", type);
917 return; /* To avoid warning */
918 }
919
920 UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
921
922 hp = local_heap;
923 label = SEQ_TRACE_T_LABEL(token);
924 lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
925 SEQ_TRACE_T_SERIAL(token));
926 hp += 3;
927 if (exitfrom != NIL) {
928 msg = TUPLE3(hp, am_EXIT, exitfrom, msg);
929 hp += 4;
930 }
931 mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token), receiver, msg);
932 hp += 6;
933
934 seq_tracer_flags |= ERTS_SEQTFLGS2TFLGS(unsigned_val(SEQ_TRACE_T_FLAGS(token)));
935
936 send_to_tracer_nif_raw(NULL, process, seq_tracer, seq_tracer_flags,
937 label, NULL, TRACE_FUN_DEFAULT, am_seq_trace, mess,
938 THE_NON_VALUE, am_true);
939
940 UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
941 #undef LOCAL_HEAP_SIZE
942 }
943
944
945
946
947 /* Send {trace_ts, Pid, return_to, {Mod, Func, Arity}, Timestamp}
948 * or {trace, Pid, return_to, {Mod, Func, Arity}}
949 */
950 void
erts_trace_return_to(Process * p,ErtsCodePtr pc)951 erts_trace_return_to(Process *p, ErtsCodePtr pc)
952 {
953 const ErtsCodeMFA *cmfa = erts_find_function_from_pc(pc);
954 Eterm mfa;
955
956 if (!cmfa) {
957 mfa = am_undefined;
958 } else {
959 Eterm *hp = HAlloc(p, 4);
960 mfa = TUPLE3(hp, cmfa->module, cmfa->function,
961 make_small(cmfa->arity));
962 }
963
964 send_to_tracer_nif(p, &p->common, p->common.id, NULL, TRACE_FUN_T_CALL,
965 am_return_to, mfa, THE_NON_VALUE, am_true);
966 }
967
968
969 /* Send {trace_ts, Pid, return_from, {Mod, Name, Arity}, Retval, Timestamp}
970 * or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
971 */
972 void
erts_trace_return(Process * p,ErtsCodeMFA * mfa,Eterm retval,ErtsTracer * tracer)973 erts_trace_return(Process* p, ErtsCodeMFA *mfa,
974 Eterm retval, ErtsTracer *tracer)
975 {
976 Eterm* hp;
977 Eterm mfa_tuple;
978 Uint meta_flags, *tracee_flags;
979
980 ASSERT(tracer);
981 if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
982 /* Breakpoint trace enabled without specifying tracer =>
983 * use process tracer and flags
984 */
985 tracer = &ERTS_TRACER(p);
986 }
987 if (ERTS_TRACER_IS_NIL(*tracer)) {
988 /* Trace disabled */
989 return;
990 }
991 ASSERT(IS_TRACER_VALID(*tracer));
992 if (tracer == &ERTS_TRACER(p)) {
993 /* Tracer specified in process structure =>
994 * non-breakpoint trace =>
995 * use process flags
996 */
997 tracee_flags = &ERTS_TRACE_FLAGS(p);
998 if (! (*tracee_flags & F_TRACE_CALLS)) {
999 return;
1000 }
1001 } else {
1002 /* Tracer not specified in process structure =>
1003 * tracer specified in breakpoint =>
1004 * meta trace =>
1005 * use fixed flag set instead of process flags
1006 */
1007 meta_flags = F_TRACE_CALLS | F_NOW_TS;
1008 tracee_flags = &meta_flags;
1009 }
1010
1011 hp = HAlloc(p, 4);
1012 mfa_tuple = TUPLE3(hp, mfa->module, mfa->function,
1013 make_small(mfa->arity));
1014 hp += 4;
1015 send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
1016 NULL, TRACE_FUN_T_CALL, am_return_from, mfa_tuple,
1017 retval, am_true);
1018 }
1019
1020 /* Send {trace_ts, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
1021 * Timestamp}
1022 * or {trace, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
1023 * Timestamp}
1024 *
1025 * Where Class is atomic but Value is any term.
1026 */
1027 void
erts_trace_exception(Process * p,ErtsCodeMFA * mfa,Eterm class,Eterm value,ErtsTracer * tracer)1028 erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
1029 ErtsTracer *tracer)
1030 {
1031 Eterm* hp;
1032 Eterm mfa_tuple, cv;
1033 Uint meta_flags, *tracee_flags;
1034
1035 ASSERT(tracer);
1036 if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
1037 /* Breakpoint trace enabled without specifying tracer =>
1038 * use process tracer and flags
1039 */
1040 tracer = &ERTS_TRACER(p);
1041 }
1042 if (ERTS_TRACER_IS_NIL(*tracer)) {
1043 /* Trace disabled */
1044 return;
1045 }
1046 ASSERT(IS_TRACER_VALID(*tracer));
1047 if (tracer == &ERTS_TRACER(p)) {
1048 /* Tracer specified in process structure =>
1049 * non-breakpoint trace =>
1050 * use process flags
1051 */
1052 tracee_flags = &ERTS_TRACE_FLAGS(p);
1053 if (! (*tracee_flags & F_TRACE_CALLS)) {
1054 return;
1055 }
1056 } else {
1057 /* Tracer not specified in process structure =>
1058 * tracer specified in breakpoint =>
1059 * meta trace =>
1060 * use fixed flag set instead of process flags
1061 */
1062 meta_flags = F_TRACE_CALLS | F_NOW_TS;
1063 tracee_flags = &meta_flags;
1064 }
1065
1066 hp = HAlloc(p, 7);;
1067 mfa_tuple = TUPLE3(hp, mfa->module, mfa->function, make_small(mfa->arity));
1068 hp += 4;
1069 cv = TUPLE2(hp, class, value);
1070 hp += 3;
1071 send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
1072 NULL, TRACE_FUN_T_CALL, am_exception_from, mfa_tuple, cv, am_true);
1073 }
1074
1075 /*
1076 * This function implements the new call trace.
1077 *
1078 * Send {trace_ts, Pid, call, {Mod, Func, A}, PamResult, Timestamp}
1079 * or {trace_ts, Pid, call, {Mod, Func, A}, Timestamp}
1080 * or {trace, Pid, call, {Mod, Func, A}, PamResult}
1081 * or {trace, Pid, call, {Mod, Func, A}
1082 *
1083 * where 'A' is arity or argument list depending on trace flag 'arity'.
1084 *
1085 * If *tracer_pid is am_true, it is a breakpoint trace that shall use
1086 * the process tracer, if it is NIL no trace message is generated,
1087 * if it is a pid or port we do a meta trace.
1088 */
1089 Uint32
erts_call_trace(Process * p,ErtsCodeInfo * info,Binary * match_spec,Eterm * args,int local,ErtsTracer * tracer)1090 erts_call_trace(Process* p, ErtsCodeInfo *info, Binary *match_spec,
1091 Eterm* args, int local, ErtsTracer *tracer)
1092 {
1093 Eterm* hp;
1094 Eterm mfa_tuple;
1095 int arity;
1096 int i;
1097 Uint32 return_flags;
1098 Eterm pam_result = am_true;
1099 Uint meta_flags, *tracee_flags;
1100 ErtsTracerNif *tnif = NULL;
1101 Eterm transformed_args[MAX_ARG];
1102 ErtsTracer pre_ms_tracer = erts_tracer_nil;
1103
1104 ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN);
1105
1106 ASSERT(tracer);
1107 if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
1108 /* Breakpoint trace enabled without specifying tracer =>
1109 * use process tracer and flags
1110 */
1111 tracer = &ERTS_TRACER(p);
1112 }
1113 if (ERTS_TRACER_IS_NIL(*tracer)) {
1114 /* Trace disabled */
1115 return 0;
1116 }
1117 ASSERT(IS_TRACER_VALID(*tracer));
1118 if (tracer == &ERTS_TRACER(p)) {
1119 /* Tracer specified in process structure =>
1120 * non-breakpoint trace =>
1121 * use process flags
1122 */
1123 tracee_flags = &ERTS_TRACE_FLAGS(p);
1124 /* Is is not ideal at all to call this check twice,
1125 it should be optimized so that only one call is made. */
1126 if (!is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
1127 TRACE_FUN_ENABLED, am_trace_status)
1128 || !is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
1129 TRACE_FUN_E_CALL, am_call)) {
1130 return 0;
1131 }
1132 } else {
1133 /* Tracer not specified in process structure =>
1134 * tracer specified in breakpoint =>
1135 * meta trace =>
1136 * use fixed flag set instead of process flags
1137 */
1138 if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
1139 /* No trace messages for sensitive processes. */
1140 return 0;
1141 }
1142 meta_flags = F_TRACE_CALLS | F_NOW_TS;
1143 tracee_flags = &meta_flags;
1144 switch (call_enabled_tracer(*tracer,
1145 &tnif, TRACE_FUN_ENABLED,
1146 am_trace_status, p->common.id)) {
1147 default:
1148 case am_remove: *tracer = erts_tracer_nil;
1149 case am_discard: return 0;
1150 case am_trace:
1151 switch (call_enabled_tracer(*tracer,
1152 &tnif, TRACE_FUN_T_CALL,
1153 am_call, p->common.id)) {
1154 default:
1155 case am_discard: return 0;
1156 case am_trace: break;
1157 }
1158 break;
1159 }
1160 }
1161
1162 /*
1163 * Because of the delayed sub-binary creation optimization introduced in
1164 * R12B, (at most) one of arguments can be a match context instead of
1165 * a binary. Since we don't want to handle match contexts in utility functions
1166 * such as size_object() and copy_struct(), we must make sure that we
1167 * temporarily convert any match contexts to sub binaries.
1168 */
1169 arity = info->mfa.arity;
1170 for (i = 0; i < arity; i++) {
1171 Eterm arg = args[i];
1172 if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
1173 ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(arg);
1174 ErlBinMatchBuffer* mb = &ms->mb;
1175 Uint bit_size;
1176 ErlSubBin *sub_bin_heap = (ErlSubBin *)HAlloc(p, ERL_SUB_BIN_SIZE);
1177
1178 bit_size = mb->size - mb->offset;
1179 sub_bin_heap->thing_word = HEADER_SUB_BIN;
1180 sub_bin_heap->size = BYTE_OFFSET(bit_size);
1181 sub_bin_heap->bitsize = BIT_OFFSET(bit_size);
1182 sub_bin_heap->offs = BYTE_OFFSET(mb->offset);
1183 sub_bin_heap->bitoffs = BIT_OFFSET(mb->offset);
1184 sub_bin_heap->is_writable = 0;
1185 sub_bin_heap->orig = mb->orig;
1186
1187 arg = make_binary(sub_bin_heap);
1188 }
1189 transformed_args[i] = arg;
1190 }
1191 args = transformed_args;
1192
1193 /*
1194 * If there is a PAM program, run it. Return if it fails.
1195 *
1196 * Some precedence rules:
1197 *
1198 * - No proc flags, e.g 'silent' or 'return_to'
1199 * has any effect on meta trace.
1200 * - The 'silent' process trace flag silences all call
1201 * related messages, e.g 'call', 'return_to' and 'return_from'.
1202 * - The {message,_} PAM function does not affect {return_trace}.
1203 * - The {message,false} PAM function shall give the same
1204 * 'call' trace message as no PAM match.
1205 * - The {message,true} PAM function shall give the same
1206 * 'call' trace message as a nonexistent PAM program.
1207 */
1208
1209 return_flags = 0;
1210 if (match_spec) {
1211 /* we have to make a copy of the tracer here as the match spec
1212 may remove it, and we still want to generate a trace message */
1213 erts_tracer_update(&pre_ms_tracer, *tracer);
1214 tracer = &pre_ms_tracer;
1215 pam_result = erts_match_set_run_trace(p, p,
1216 match_spec, args, arity,
1217 ERTS_PAM_TMP_RESULT, &return_flags);
1218 }
1219
1220 if (tracee_flags == &meta_flags) {
1221 /* Meta trace */
1222 if (pam_result == am_false) {
1223 UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
1224 ERTS_TRACER_CLEAR(&pre_ms_tracer);
1225 return return_flags;
1226 }
1227 } else {
1228 /* Non-meta trace */
1229 if (*tracee_flags & F_TRACE_SILENT) {
1230 erts_match_set_release_result_trace(p, pam_result);
1231 UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
1232 ERTS_TRACER_CLEAR(&pre_ms_tracer);
1233 return 0;
1234 }
1235 if (pam_result == am_false) {
1236 UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
1237 ERTS_TRACER_CLEAR(&pre_ms_tracer);
1238 return return_flags;
1239 }
1240 if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
1241 return_flags |= MATCH_SET_RETURN_TO_TRACE;
1242 }
1243 }
1244
1245 ASSERT(!ERTS_TRACER_IS_NIL(*tracer));
1246
1247 /*
1248 * Build the the {M,F,A} tuple in the local heap.
1249 * (A is arguments or arity.)
1250 */
1251
1252
1253 if (*tracee_flags & F_TRACE_ARITY_ONLY) {
1254 hp = HAlloc(p, 4);
1255 mfa_tuple = make_small(arity);
1256 } else {
1257 hp = HAlloc(p, 4 + arity * 2);
1258 mfa_tuple = NIL;
1259 for (i = arity-1; i >= 0; i--) {
1260 mfa_tuple = CONS(hp, args[i], mfa_tuple);
1261 hp += 2;
1262 }
1263 }
1264 mfa_tuple = TUPLE3(hp, info->mfa.module, info->mfa.function, mfa_tuple);
1265 hp += 4;
1266
1267 /*
1268 * Build the trace tuple and send it to the port.
1269 */
1270 send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
1271 tnif, TRACE_FUN_T_CALL, am_call, mfa_tuple,
1272 THE_NON_VALUE, pam_result);
1273
1274 if (match_spec) {
1275 erts_match_set_release_result_trace(p, pam_result);
1276 if (tracer == &pre_ms_tracer)
1277 ERTS_TRACER_CLEAR(&pre_ms_tracer);
1278 }
1279
1280 return return_flags;
1281 }
1282
1283 /* Sends trace message:
1284 * {trace_ts, ProcessPid, What, Data, Timestamp}
1285 * or {trace, ProcessPid, What, Data}
1286 *
1287 * 'what' must be atomic, 'data' may be a deep term.
1288 * 'c_p' is the currently executing process, may be NULL.
1289 * 't_p' is the traced process.
1290 */
1291 void
trace_proc(Process * c_p,ErtsProcLocks c_p_locks,Process * t_p,Eterm what,Eterm data)1292 trace_proc(Process *c_p, ErtsProcLocks c_p_locks,
1293 Process *t_p, Eterm what, Eterm data)
1294 {
1295 ErtsTracerNif *tnif = NULL;
1296 if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif,
1297 TRACE_FUN_E_PROCS, what))
1298 send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PROCS,
1299 what, data, THE_NON_VALUE, am_true);
1300 }
1301
1302
1303 /* Sends trace message:
1304 * {trace_ts, ParentPid, spawn, ChildPid, {Mod, Func, Args}, Timestamp}
1305 * or {trace, ParentPid, spawn, ChildPid, {Mod, Func, Args}}
1306 *
1307 * 'pid' is the ChildPid, 'mod' and 'func' must be atomic,
1308 * and 'args' may be a deep term.
1309 */
1310 void
trace_proc_spawn(Process * p,Eterm what,Eterm pid,Eterm mod,Eterm func,Eterm args)1311 trace_proc_spawn(Process *p, Eterm what, Eterm pid,
1312 Eterm mod, Eterm func, Eterm args)
1313 {
1314 ErtsTracerNif *tnif = NULL;
1315 if (is_tracer_enabled(NULL, 0,
1316 &p->common, &tnif, TRACE_FUN_E_PROCS, what)) {
1317 Eterm mfa;
1318 Eterm* hp;
1319
1320 hp = HAlloc(p, 4);
1321 mfa = TUPLE3(hp, mod, func, args);
1322 hp += 4;
1323 send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PROCS,
1324 what, pid, mfa, am_true);
1325 }
1326 }
1327
save_calls(Process * p,Export * e)1328 void save_calls(Process *p, Export *e)
1329 {
1330 if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
1331 struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
1332 if (scb) {
1333 Export **ct = &scb->ct[0];
1334 int len = scb->len;
1335
1336 ct[scb->cur] = e;
1337 if (++scb->cur >= len)
1338 scb->cur = 0;
1339 if (scb->n < len)
1340 scb->n++;
1341 }
1342 }
1343 }
1344
1345 /* Sends trace message:
1346 * {trace_ts, Pid, What, Msg, Timestamp}
1347 * or {trace, Pid, What, Msg}
1348 *
1349 * where 'What' must be atomic and 'Msg' is:
1350 * [{heap_size, HeapSize}, {old_heap_size, OldHeapSize},
1351 * {stack_size, StackSize}, {recent_size, RecentSize},
1352 * {mbuf_size, MbufSize}]
1353 *
1354 * where 'HeapSize', 'OldHeapSize', 'StackSize', 'RecentSize and 'MbufSize'
1355 * are all small (atomic) integers.
1356 */
1357 void
trace_gc(Process * p,Eterm what,Uint size,Eterm msg)1358 trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
1359 {
1360 ErtsTracerNif *tnif = NULL;
1361 Eterm* o_hp = NULL;
1362 Eterm* hp;
1363 Uint sz = 0;
1364 Eterm tup;
1365 ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
1366
1367 if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
1368 TRACE_FUN_E_GC, what)) {
1369
1370 if (is_non_value(msg)) {
1371
1372 (void) erts_process_gc_info(p, &sz, NULL, 0, 0);
1373 o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, (sz + 3 + 2) * sizeof(Eterm));
1374
1375 msg = erts_process_gc_info(p, NULL, &hp, 0, 0);
1376 tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3;
1377 msg = CONS(hp, tup, msg); hp += 2;
1378 }
1379
1380 send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_GC,
1381 what, msg, THE_NON_VALUE, am_true);
1382 if (o_hp)
1383 erts_free(ERTS_ALC_T_TMP, o_hp);
1384 }
1385 erts_thr_progress_unmanaged_continue(dhndl);
1386 }
1387
1388 void
monitor_long_schedule_proc(Process * p,const ErtsCodeMFA * in_fp,const ErtsCodeMFA * out_fp,Uint time)1389 monitor_long_schedule_proc(Process *p, const ErtsCodeMFA *in_fp,
1390 const ErtsCodeMFA *out_fp, Uint time)
1391 {
1392 ErlHeapFragment *bp;
1393 ErlOffHeap *off_heap;
1394 Uint hsz;
1395 Eterm *hp, list, in_mfa = am_undefined, out_mfa = am_undefined;
1396 Eterm in_tpl, out_tpl, tmo_tpl, tmo, msg;
1397
1398
1399 /*
1400 * Size: {monitor, pid, long_schedule, [{timeout, T}, {in, {M,F,A}},{out,{M,F,A}}]} ->
1401 * 5 (top tuple of 4), (3 (elements) * 2 (cons)) + 3 (timeout tuple of 2) + size of Timeout +
1402 * (2 * 3 (in/out tuple of 2)) +
1403 * 0 (unknown) or 4 (MFA tuple of 3) + 0 (unknown) or 4 (MFA tuple of 3)
1404 * = 20 + (in_fp != NULL) ? 4 : 0 + (out_fp != NULL) ? 4 : 0 + size of Timeout
1405 */
1406 hsz = 20 + ((in_fp != NULL) ? 4 : 0) + ((out_fp != NULL) ? 4 : 0);
1407 (void) erts_bld_uint(NULL, &hsz, time);
1408 hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
1409 tmo = erts_bld_uint(&hp, NULL, time);
1410 if (in_fp != NULL) {
1411 in_mfa = TUPLE3(hp, in_fp->module, in_fp->function,
1412 make_small(in_fp->arity));
1413 hp +=4;
1414 }
1415 if (out_fp != NULL) {
1416 out_mfa = TUPLE3(hp, out_fp->module, out_fp->function,
1417 make_small(out_fp->arity));
1418 hp +=4;
1419 }
1420 tmo_tpl = TUPLE2(hp,am_timeout, tmo);
1421 hp += 3;
1422 in_tpl = TUPLE2(hp,am_in,in_mfa);
1423 hp += 3;
1424 out_tpl = TUPLE2(hp,am_out,out_mfa);
1425 hp += 3;
1426 list = CONS(hp,out_tpl,NIL);
1427 hp += 2;
1428 list = CONS(hp,in_tpl,list);
1429 hp += 2;
1430 list = CONS(hp,tmo_tpl,list);
1431 hp += 2;
1432 msg = TUPLE4(hp, am_monitor, p->common.id, am_long_schedule, list);
1433 hp += 5;
1434 enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
1435 }
1436 void
monitor_long_schedule_port(Port * pp,ErtsPortTaskType type,Uint time)1437 monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
1438 {
1439 ErlHeapFragment *bp;
1440 ErlOffHeap *off_heap;
1441 Uint hsz;
1442 Eterm *hp, list, op;
1443 Eterm op_tpl, tmo_tpl, tmo, msg;
1444
1445
1446 /*
1447 * Size: {monitor, port, long_schedule, [{timeout, T}, {op, Operation}]} ->
1448 * 5 (top tuple of 4), (2 (elements) * 2 (cons)) + 3 (timeout tuple of 2)
1449 * + size of Timeout + 3 (op tuple of 2 atoms)
1450 * = 15 + size of Timeout
1451 */
1452 hsz = 15;
1453 (void) erts_bld_uint(NULL, &hsz, time);
1454
1455 hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
1456
1457 switch (type) {
1458 case ERTS_PORT_TASK_PROC_SIG: op = am_proc_sig; break;
1459 case ERTS_PORT_TASK_TIMEOUT: op = am_timeout; break;
1460 case ERTS_PORT_TASK_INPUT: op = am_input; break;
1461 case ERTS_PORT_TASK_OUTPUT: op = am_output; break;
1462 case ERTS_PORT_TASK_DIST_CMD: op = am_dist_cmd; break;
1463 default: op = am_undefined; break;
1464 }
1465
1466 tmo = erts_bld_uint(&hp, NULL, time);
1467
1468 op_tpl = TUPLE2(hp,am_port_op,op);
1469 hp += 3;
1470
1471 tmo_tpl = TUPLE2(hp,am_timeout, tmo);
1472 hp += 3;
1473
1474 list = CONS(hp,op_tpl,NIL);
1475 hp += 2;
1476 list = CONS(hp,tmo_tpl,list);
1477 hp += 2;
1478 msg = TUPLE4(hp, am_monitor, pp->common.id, am_long_schedule, list);
1479 hp += 5;
1480 enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, pp->common.id, NIL, msg, bp);
1481 }
1482
1483 void
monitor_long_gc(Process * p,Uint time)1484 monitor_long_gc(Process *p, Uint time) {
1485 ErlHeapFragment *bp;
1486 ErlOffHeap *off_heap;
1487 Uint hsz;
1488 Eterm *hp, list, msg;
1489 Eterm tags[] = {
1490 am_timeout,
1491 am_old_heap_block_size,
1492 am_heap_block_size,
1493 am_mbuf_size,
1494 am_stack_size,
1495 am_old_heap_size,
1496 am_heap_size
1497 };
1498 UWord values[] = {
1499 time,
1500 OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
1501 HEAP_SIZE(p),
1502 MBUF_SIZE(p),
1503 STACK_START(p) - p->stop,
1504 OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
1505 HEAP_TOP(p) - HEAP_START(p)
1506 };
1507 #ifdef DEBUG
1508 Eterm *hp_end;
1509 #endif
1510
1511
1512 hsz = 0;
1513 (void) erts_bld_atom_uword_2tup_list(NULL,
1514 &hsz,
1515 sizeof(values)/sizeof(*values),
1516 tags,
1517 values);
1518 hsz += 5 /* 4-tuple */;
1519
1520 hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
1521
1522 #ifdef DEBUG
1523 hp_end = hp + hsz;
1524 #endif
1525
1526 list = erts_bld_atom_uword_2tup_list(&hp,
1527 NULL,
1528 sizeof(values)/sizeof(*values),
1529 tags,
1530 values);
1531 msg = TUPLE4(hp, am_monitor, p->common.id, am_long_gc, list);
1532
1533 #ifdef DEBUG
1534 hp += 5 /* 4-tuple */;
1535 ASSERT(hp == hp_end);
1536 #endif
1537
1538 enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
1539 }
1540
1541 void
monitor_large_heap(Process * p)1542 monitor_large_heap(Process *p) {
1543 ErlHeapFragment *bp;
1544 ErlOffHeap *off_heap;
1545 Uint hsz;
1546 Eterm *hp, list, msg;
1547 Eterm tags[] = {
1548 am_old_heap_block_size,
1549 am_heap_block_size,
1550 am_mbuf_size,
1551 am_stack_size,
1552 am_old_heap_size,
1553 am_heap_size
1554 };
1555 UWord values[] = {
1556 OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
1557 HEAP_SIZE(p),
1558 MBUF_SIZE(p),
1559 STACK_START(p) - p->stop,
1560 OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
1561 HEAP_TOP(p) - HEAP_START(p)
1562 };
1563 #ifdef DEBUG
1564 Eterm *hp_end;
1565 #endif
1566
1567
1568
1569 hsz = 0;
1570 (void) erts_bld_atom_uword_2tup_list(NULL,
1571 &hsz,
1572 sizeof(values)/sizeof(*values),
1573 tags,
1574 values);
1575 hsz += 5 /* 4-tuple */;
1576
1577 hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
1578
1579 #ifdef DEBUG
1580 hp_end = hp + hsz;
1581 #endif
1582
1583 list = erts_bld_atom_uword_2tup_list(&hp,
1584 NULL,
1585 sizeof(values)/sizeof(*values),
1586 tags,
1587 values);
1588 msg = TUPLE4(hp, am_monitor, p->common.id, am_large_heap, list);
1589
1590 #ifdef DEBUG
1591 hp += 5 /* 4-tuple */;
1592 ASSERT(hp == hp_end);
1593 #endif
1594
1595 enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
1596 }
1597
1598 void
monitor_generic(Process * p,Eterm type,Eterm spec)1599 monitor_generic(Process *p, Eterm type, Eterm spec) {
1600 ErlHeapFragment *bp;
1601 ErlOffHeap *off_heap;
1602 Eterm *hp, msg;
1603
1604
1605 hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p);
1606
1607 msg = TUPLE4(hp, am_monitor, p->common.id, type, spec);
1608 hp += 5;
1609
1610 enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
1611
1612 }
1613
1614
1615 /* Begin system_profile tracing */
1616 /* Scheduler profiling */
1617
1618 void
profile_scheduler(Eterm scheduler_id,Eterm state)1619 profile_scheduler(Eterm scheduler_id, Eterm state) {
1620 Eterm *hp, msg;
1621 ErlHeapFragment *bp = NULL;
1622
1623 Uint hsz;
1624
1625 hsz = 7 + patch_ts_size(erts_system_profile_ts_type)-1;
1626
1627 bp = new_message_buffer(hsz);
1628 hp = bp->mem;
1629
1630 erts_mtx_lock(&smq_mtx);
1631
1632 switch (state) {
1633 case am_active:
1634 active_sched++;
1635 break;
1636 case am_inactive:
1637 active_sched--;
1638 break;
1639 default:
1640 ASSERT(!"Invalid state");
1641 break;
1642 }
1643
1644 msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id,
1645 state, make_small(active_sched),
1646 NIL /* Will be overwritten by timestamp */);
1647 hp += 7;
1648
1649 /* Write timestamp in element 6 of the 'msg' tuple */
1650 hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
1651
1652 enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
1653 erts_mtx_unlock(&smq_mtx);
1654
1655 }
1656
1657 /* Port profiling */
1658
1659 void
trace_port_open(Port * p,Eterm calling_pid,Eterm drv_name)1660 trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
1661 ErtsTracerNif *tnif = NULL;
1662 ERTS_CHK_NO_PROC_LOCKS;
1663 if (is_tracer_enabled(NULL, 0, &p->common, &tnif, TRACE_FUN_E_PORTS, am_open))
1664 send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PORTS,
1665 am_open, calling_pid, drv_name, am_true);
1666 }
1667
1668 /* Sends trace message:
1669 * {trace_ts, PortPid, What, Data, Timestamp}
1670 * or {trace, PortPid, What, Data}
1671 *
1672 * 'what' must be atomic, 'data' must be atomic.
1673 * 't_p' is the traced port.
1674 */
1675 void
trace_port(Port * t_p,Eterm what,Eterm data)1676 trace_port(Port *t_p, Eterm what, Eterm data) {
1677
1678 ErtsTracerNif *tnif = NULL;
1679 ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
1680 || erts_thr_progress_is_blocking());
1681 ERTS_CHK_NO_PROC_LOCKS;
1682 if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PORTS, what))
1683 send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PORTS,
1684 what, data, THE_NON_VALUE, am_true);
1685 }
1686
1687
1688 static Eterm
trace_port_tmp_binary(char * bin,Sint sz,Binary ** bptrp,Eterm ** hp)1689 trace_port_tmp_binary(char *bin, Sint sz, Binary **bptrp, Eterm **hp)
1690 {
1691 if (sz <= ERL_ONHEAP_BIN_LIMIT) {
1692 ErlHeapBin *hb = (ErlHeapBin *)*hp;
1693 hb->thing_word = header_heap_bin(sz);
1694 hb->size = sz;
1695 sys_memcpy(hb->data, bin, sz);
1696 *hp += heap_bin_size(sz);
1697 return make_binary(hb);
1698 } else {
1699 ProcBin* pb = (ProcBin *)*hp;
1700 Binary *bptr = erts_bin_nrml_alloc(sz);
1701 sys_memcpy(bptr->orig_bytes, bin, sz);
1702 pb->thing_word = HEADER_PROC_BIN;
1703 pb->size = sz;
1704 pb->next = NULL;
1705 pb->val = bptr;
1706 pb->bytes = (byte*) bptr->orig_bytes;
1707 pb->flags = 0;
1708 *bptrp = bptr;
1709 *hp += PROC_BIN_SIZE;
1710 return make_binary(pb);
1711 }
1712 }
1713
1714 /* Sends trace message:
1715 * {trace, PortPid, 'receive', {pid(), {command, iolist()}}}
1716 * {trace, PortPid, 'receive', {pid(), {control, pid()}}}
1717 * {trace, PortPid, 'receive', {pid(), exit}}
1718 *
1719 */
1720 void
trace_port_receive(Port * t_p,Eterm caller,Eterm what,...)1721 trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...)
1722 {
1723 ErtsTracerNif *tnif = NULL;
1724 ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
1725 || erts_thr_progress_is_blocking());
1726 ERTS_CHK_NO_PROC_LOCKS;
1727 if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_RECEIVE, am_receive)) {
1728 /* We can use a stack heap here, as the nif is called in the
1729 context of a port */
1730 #define LOCAL_HEAP_SIZE (3 + 3 + heap_bin_size(ERL_ONHEAP_BIN_LIMIT) + 3)
1731 DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
1732
1733 Eterm *hp, data, *orig_hp = NULL;
1734 Binary *bptr = NULL;
1735 va_list args;
1736 UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
1737 hp = local_heap;
1738
1739 if (what == am_close) {
1740 data = what;
1741 } else {
1742 Eterm arg;
1743 va_start(args, what);
1744 if (what == am_command) {
1745 char *bin = va_arg(args, char *);
1746 Sint sz = va_arg(args, Sint);
1747 va_end(args);
1748 arg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
1749 } else if (what == am_call || what == am_control) {
1750 unsigned int command = va_arg(args, unsigned int);
1751 char *bin = va_arg(args, char *);
1752 Sint sz = va_arg(args, Sint);
1753 Eterm cmd;
1754 va_end(args);
1755 arg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
1756 #if defined(ARCH_32)
1757 if (!IS_USMALL(0, command)) {
1758 *hp = make_pos_bignum_header(1);
1759 BIG_DIGIT(hp, 0) = (Uint)command;
1760 cmd = make_big(hp);
1761 hp += 2;
1762 } else
1763 #endif
1764 {
1765 cmd = make_small((Sint)command);
1766 }
1767 arg = TUPLE2(hp, cmd, arg);
1768 hp += 3;
1769 } else if (what == am_commandv) {
1770 ErlIOVec *evp = va_arg(args, ErlIOVec*);
1771 int i;
1772 va_end(args);
1773 if ((6 + evp->vsize * (2+PROC_BIN_SIZE+ERL_SUB_BIN_SIZE)) > LOCAL_HEAP_SIZE) {
1774 hp = erts_alloc(ERTS_ALC_T_TMP,
1775 (6 + evp->vsize * (2+PROC_BIN_SIZE+ERL_SUB_BIN_SIZE)) * sizeof(Eterm));
1776 orig_hp = hp;
1777 }
1778 arg = NIL;
1779 /* Convert each element in the ErlIOVec to a sub bin that points
1780 to a procbin. We don't have to increment the proc bin refc as
1781 the port task keeps the reference alive. */
1782 for (i = evp->vsize-1; i >= 0; i--) {
1783 if (evp->iov[i].iov_len) {
1784 ProcBin* pb = (ProcBin*)hp;
1785 ErlSubBin *sb;
1786 ASSERT(evp->binv[i]);
1787 pb->thing_word = HEADER_PROC_BIN;
1788 pb->val = ErlDrvBinary2Binary(evp->binv[i]);
1789 pb->size = pb->val->orig_size;
1790 pb->next = NULL;
1791 pb->bytes = (byte*) pb->val->orig_bytes;
1792 pb->flags = 0;
1793 hp += PROC_BIN_SIZE;
1794
1795 sb = (ErlSubBin*) hp;
1796 sb->thing_word = HEADER_SUB_BIN;
1797 sb->size = evp->iov[i].iov_len;
1798 sb->offs = (byte*)(evp->iov[i].iov_base) - pb->bytes;
1799 sb->orig = make_binary(pb);
1800 sb->bitoffs = 0;
1801 sb->bitsize = 0;
1802 sb->is_writable = 0;
1803 hp += ERL_SUB_BIN_SIZE;
1804
1805 arg = CONS(hp, make_binary(sb), arg);
1806 hp += 2;
1807 }
1808 }
1809 what = am_command;
1810 } else {
1811 arg = va_arg(args, Eterm);
1812 va_end(args);
1813 }
1814 data = TUPLE2(hp, what, arg);
1815 hp += 3;
1816 }
1817
1818 data = TUPLE2(hp, caller, data);
1819 hp += 3;
1820 ASSERT(hp <= (local_heap + LOCAL_HEAP_SIZE) || orig_hp);
1821 send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif,
1822 TRACE_FUN_T_RECEIVE,
1823 am_receive, data, THE_NON_VALUE, am_true);
1824
1825 if (bptr)
1826 erts_bin_release(bptr);
1827
1828 if (orig_hp)
1829 erts_free(ERTS_ALC_T_TMP, orig_hp);
1830
1831 UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
1832 }
1833 #undef LOCAL_HEAP_SIZE
1834 }
1835
1836 void
trace_port_send(Port * t_p,Eterm receiver,Eterm msg,int exists)1837 trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists)
1838 {
1839 ErtsTracerNif *tnif = NULL;
1840 Eterm op = exists ? am_send : am_send_to_non_existing_process;
1841 ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
1842 || erts_thr_progress_is_blocking());
1843 ERTS_CHK_NO_PROC_LOCKS;
1844 if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, op))
1845 send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
1846 op, msg, receiver, am_true);
1847 }
1848
trace_port_send_binary(Port * t_p,Eterm to,Eterm what,char * bin,Sint sz)1849 void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
1850 {
1851 ErtsTracerNif *tnif = NULL;
1852 ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
1853 || erts_thr_progress_is_blocking());
1854 ERTS_CHK_NO_PROC_LOCKS;
1855 if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, am_send)) {
1856 Eterm msg;
1857 Binary* bptr = NULL;
1858 #define LOCAL_HEAP_SIZE (3 + 3 + heap_bin_size(ERL_ONHEAP_BIN_LIMIT))
1859 DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
1860
1861 Eterm *hp;
1862
1863 ERTS_CT_ASSERT(heap_bin_size(ERL_ONHEAP_BIN_LIMIT) >= PROC_BIN_SIZE);
1864 UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
1865 hp = local_heap;
1866
1867 msg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
1868
1869 msg = TUPLE2(hp, what, msg);
1870 hp += 3;
1871 msg = TUPLE2(hp, t_p->common.id, msg);
1872 hp += 3;
1873
1874 send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
1875 am_send, msg, to, am_true);
1876 if (bptr)
1877 erts_bin_release(bptr);
1878
1879 UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
1880 #undef LOCAL_HEAP_SIZE
1881 }
1882 }
1883
1884 /* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
1885 * or {trace, Pid, What, {Mod, Func, Arity}}
1886 *
1887 * where 'What' is supposed to be 'in' or 'out' and
1888 * where 'where' is supposed to be location (callback)
1889 * for the port.
1890 */
1891
1892 void
trace_sched_ports(Port * p,Eterm what)1893 trace_sched_ports(Port *p, Eterm what) {
1894 trace_sched_ports_where(p, what, make_small(0));
1895 }
1896
1897 void
trace_sched_ports_where(Port * t_p,Eterm what,Eterm where)1898 trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) {
1899 ErtsTracerNif *tnif = NULL;
1900 ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
1901 || erts_thr_progress_is_blocking());
1902 ERTS_CHK_NO_PROC_LOCKS;
1903 if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SCHED_PORT, what))
1904 send_to_tracer_nif(NULL, &t_p->common, t_p->common.id,
1905 tnif, TRACE_FUN_T_SCHED_PORT,
1906 what, where, THE_NON_VALUE, am_true);
1907 }
1908
1909 /* Port profiling */
1910
1911 void
profile_runnable_port(Port * p,Eterm status)1912 profile_runnable_port(Port *p, Eterm status) {
1913 Eterm *hp, msg;
1914 ErlHeapFragment *bp = NULL;
1915 Eterm count = make_small(0);
1916
1917 Uint hsz;
1918
1919 hsz = 6 + patch_ts_size(erts_system_profile_ts_type)-1;
1920
1921 bp = new_message_buffer(hsz);
1922 hp = bp->mem;
1923
1924 erts_mtx_lock(&smq_mtx);
1925
1926 msg = TUPLE5(hp, am_profile, p->common.id, status, count,
1927 NIL /* Will be overwritten by timestamp */);
1928 hp += 6;
1929
1930 /* Write timestamp in element 5 of the 'msg' tuple */
1931 hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
1932
1933 enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
1934 erts_mtx_unlock(&smq_mtx);
1935 }
1936
1937 /* Process profiling */
1938 void
profile_runnable_proc(Process * p,Eterm status)1939 profile_runnable_proc(Process *p, Eterm status){
1940 Eterm *hp, msg;
1941 Eterm where = am_undefined;
1942 ErlHeapFragment *bp = NULL;
1943 const ErtsCodeMFA *cmfa = NULL;
1944
1945 ErtsThrPrgrDelayHandle dhndl;
1946 Uint hsz = 4 + 6 + patch_ts_size(erts_system_profile_ts_type)-1;
1947 /* Assumptions:
1948 * We possibly don't have the MAIN_LOCK for the process p here.
1949 * We assume that we can read from p->current and p->i atomically
1950 */
1951 dhndl = erts_thr_progress_unmanaged_delay(); /* suspend purge operations */
1952
1953 if (!ERTS_PROC_IS_EXITING(p)) {
1954 if (p->current) {
1955 cmfa = p->current;
1956 } else {
1957 cmfa = erts_find_function_from_pc(p->i);
1958 }
1959 }
1960
1961 if (!cmfa) {
1962 hsz -= 4;
1963 }
1964
1965 bp = new_message_buffer(hsz);
1966 hp = bp->mem;
1967
1968 if (cmfa) {
1969 where = TUPLE3(hp, cmfa->module, cmfa->function,
1970 make_small(cmfa->arity));
1971 hp += 4;
1972 } else {
1973 where = make_small(0);
1974 }
1975
1976 erts_thr_progress_unmanaged_continue(dhndl);
1977
1978 erts_mtx_lock(&smq_mtx);
1979
1980 msg = TUPLE5(hp, am_profile, p->common.id, status, where,
1981 NIL /* Will be overwritten by timestamp */);
1982 hp += 6;
1983
1984 /* Write timestamp in element 5 of the 'msg' tuple */
1985 hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
1986
1987 enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
1988 erts_mtx_unlock(&smq_mtx);
1989 }
1990 /* End system_profile tracing */
1991
1992
1993
1994
1995 typedef struct ErtsSysMsgQ_ ErtsSysMsgQ;
1996 struct ErtsSysMsgQ_ {
1997 ErtsSysMsgQ *next;
1998 enum ErtsSysMsgType type;
1999 Eterm from;
2000 Eterm to;
2001 Eterm msg;
2002 ErlHeapFragment *bp;
2003 };
2004
2005 static ErtsSysMsgQ *sys_message_queue;
2006 static ErtsSysMsgQ *sys_message_queue_end;
2007
2008 static erts_tid_t sys_msg_dispatcher_tid;
2009 static erts_cnd_t smq_cnd;
2010
2011 ERTS_QUALLOC_IMPL(smq_element, ErtsSysMsgQ, 20, ERTS_ALC_T_SYS_MSG_Q)
2012
2013 static void
enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,Eterm from,Eterm to,Eterm msg,ErlHeapFragment * bp)2014 enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
2015 Eterm from,
2016 Eterm to,
2017 Eterm msg,
2018 ErlHeapFragment *bp)
2019 {
2020 ErtsSysMsgQ *smqp;
2021
2022 smqp = smq_element_alloc();
2023 smqp->next = NULL;
2024 smqp->type = type;
2025 smqp->from = from;
2026 smqp->to = to;
2027 smqp->msg = msg;
2028 smqp->bp = bp;
2029
2030 if (sys_message_queue_end) {
2031 ASSERT(sys_message_queue);
2032 sys_message_queue_end->next = smqp;
2033 }
2034 else {
2035 ASSERT(!sys_message_queue);
2036 sys_message_queue = smqp;
2037 }
2038 sys_message_queue_end = smqp;
2039 erts_cnd_signal(&smq_cnd);
2040 }
2041
2042 static void
enqueue_sys_msg(enum ErtsSysMsgType type,Eterm from,Eterm to,Eterm msg,ErlHeapFragment * bp)2043 enqueue_sys_msg(enum ErtsSysMsgType type,
2044 Eterm from,
2045 Eterm to,
2046 Eterm msg,
2047 ErlHeapFragment *bp)
2048 {
2049 erts_mtx_lock(&smq_mtx);
2050 enqueue_sys_msg_unlocked(type, from, to, msg, bp);
2051 erts_mtx_unlock(&smq_mtx);
2052 }
2053
2054 Eterm
erts_get_system_logger(void)2055 erts_get_system_logger(void)
2056 {
2057 return (Eterm)erts_atomic_read_nob(&system_logger);
2058 }
2059
2060 Eterm
erts_set_system_logger(Eterm logger)2061 erts_set_system_logger(Eterm logger)
2062 {
2063 if (logger != am_logger && logger != am_undefined && !is_internal_pid(logger))
2064 return THE_NON_VALUE;
2065 return (Eterm)erts_atomic_xchg_nob(&system_logger, logger);
2066 }
2067
2068 void
erts_queue_error_logger_message(Eterm from,Eterm msg,ErlHeapFragment * bp)2069 erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
2070 {
2071 enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, erts_get_system_logger(), msg, bp);
2072 }
2073
2074 void
erts_send_sys_msg_proc(Eterm from,Eterm to,Eterm msg,ErlHeapFragment * bp)2075 erts_send_sys_msg_proc(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
2076 {
2077 ASSERT(is_internal_pid(to));
2078 enqueue_sys_msg(SYS_MSG_TYPE_PROC_MSG, from, to, msg, bp);
2079 }
2080
2081 #ifdef DEBUG_PRINTOUTS
2082 static void
print_msg_type(ErtsSysMsgQ * smqp)2083 print_msg_type(ErtsSysMsgQ *smqp)
2084 {
2085 switch (smqp->type) {
2086 case SYS_MSG_TYPE_SYSMON:
2087 erts_fprintf(stderr, "SYSMON ");
2088 break;
2089 case SYS_MSG_TYPE_SYSPROF:
2090 erts_fprintf(stderr, "SYSPROF ");
2091 break;
2092 case SYS_MSG_TYPE_ERRLGR:
2093 erts_fprintf(stderr, "ERRLGR ");
2094 break;
2095 case SYS_MSG_TYPE_PROC_MSG:
2096 erts_fprintf(stderr, "PROC_MSG ");
2097 break;
2098 default:
2099 erts_fprintf(stderr, "??? ");
2100 break;
2101 }
2102 }
2103 #endif
2104
2105 static void
sys_msg_disp_failure(ErtsSysMsgQ * smqp,Eterm receiver)2106 sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
2107 {
2108 switch (smqp->type) {
2109 case SYS_MSG_TYPE_SYSMON:
2110 if (receiver == NIL
2111 && !erts_system_monitor_long_gc
2112 && !erts_system_monitor_long_schedule
2113 && !erts_system_monitor_large_heap
2114 && !erts_system_monitor_flags.busy_port
2115 && !erts_system_monitor_flags.busy_dist_port)
2116 break; /* Everything is disabled */
2117 erts_thr_progress_block();
2118 if (system_monitor == receiver || receiver == NIL)
2119 erts_system_monitor_clear(NULL);
2120 erts_thr_progress_unblock();
2121 break;
2122 case SYS_MSG_TYPE_SYSPROF:
2123 if (receiver == NIL
2124 && !erts_system_profile_flags.runnable_procs
2125 && !erts_system_profile_flags.runnable_ports
2126 && !erts_system_profile_flags.exclusive
2127 && !erts_system_profile_flags.scheduler)
2128 break;
2129 /* Block system to clear flags */
2130 erts_thr_progress_block();
2131 if (system_profile == receiver || receiver == NIL) {
2132 erts_system_profile_clear(NULL);
2133 }
2134 erts_thr_progress_unblock();
2135 break;
2136 case SYS_MSG_TYPE_ERRLGR: {
2137 Eterm *tp;
2138 Eterm tag;
2139
2140 if (is_not_tuple(smqp->msg)) {
2141 goto unexpected_error_msg;
2142 }
2143 tp = tuple_val(smqp->msg);
2144 if (arityval(tp[0]) != 2) {
2145 goto unexpected_error_msg;
2146 }
2147 if (is_not_tuple(tp[2])) {
2148 goto unexpected_error_msg;
2149 }
2150 tp = tuple_val(tp[2]);
2151 if (arityval(tp[0]) != 3) {
2152 goto unexpected_error_msg;
2153 }
2154 tag = tp[1];
2155 if (is_not_tuple(tp[3])) {
2156 goto unexpected_error_msg;
2157 }
2158 tp = tuple_val(tp[3]);
2159 if (arityval(tp[0]) != 3) {
2160 goto unexpected_error_msg;
2161 }
2162 if (is_not_list(tp[3])) {
2163 goto unexpected_error_msg;
2164 }
2165
2166 {
2167 static const char *no_logger = "(no logger present)";
2168 /* no_error_logger: */
2169 erts_fprintf(stderr, "%s %T: %T\n",
2170 no_logger, tag, CAR(list_val(tp[3])));
2171 break;
2172 unexpected_error_msg:
2173 erts_fprintf(stderr,
2174 "%s unexpected logger message: %T\n",
2175 no_logger,
2176 smqp->msg);
2177 break;
2178 }
2179 ASSERT(0);
2180 }
2181 case SYS_MSG_TYPE_PROC_MSG:
2182 break;
2183 default:
2184 ASSERT(0);
2185 }
2186 }
2187
2188 static void
sys_msg_dispatcher_wakeup(void * vwait_p)2189 sys_msg_dispatcher_wakeup(void *vwait_p)
2190 {
2191 int *wait_p = (int *) vwait_p;
2192 erts_mtx_lock(&smq_mtx);
2193 *wait_p = 0;
2194 erts_cnd_signal(&smq_cnd);
2195 erts_mtx_unlock(&smq_mtx);
2196 }
2197
2198 static void
sys_msg_dispatcher_prep_wait(void * vwait_p)2199 sys_msg_dispatcher_prep_wait(void *vwait_p)
2200 {
2201 int *wait_p = (int *) vwait_p;
2202 erts_mtx_lock(&smq_mtx);
2203 *wait_p = 1;
2204 erts_mtx_unlock(&smq_mtx);
2205 }
2206
2207 static void
sys_msg_dispatcher_fin_wait(void * vwait_p)2208 sys_msg_dispatcher_fin_wait(void *vwait_p)
2209 {
2210 int *wait_p = (int *) vwait_p;
2211 erts_mtx_lock(&smq_mtx);
2212 *wait_p = 0;
2213 erts_mtx_unlock(&smq_mtx);
2214 }
2215
2216 static void
sys_msg_dispatcher_wait(void * vwait_p)2217 sys_msg_dispatcher_wait(void *vwait_p)
2218 {
2219 int *wait_p = (int *) vwait_p;
2220 erts_mtx_lock(&smq_mtx);
2221 while (*wait_p)
2222 erts_cnd_wait(&smq_cnd, &smq_mtx);
2223 erts_mtx_unlock(&smq_mtx);
2224 }
2225
2226 static ErtsSysMsgQ *local_sys_message_queue = NULL;
2227
2228 static void *
sys_msg_dispatcher_func(void * unused)2229 sys_msg_dispatcher_func(void *unused)
2230 {
2231 ErtsThrPrgrCallbacks callbacks;
2232 ErtsThrPrgrData *tpd;
2233 int wait = 0;
2234
2235 #ifdef ERTS_ENABLE_LOCK_CHECK
2236 erts_lc_set_thread_name("system message dispatcher");
2237 #endif
2238
2239 local_sys_message_queue = NULL;
2240
2241 callbacks.arg = (void *) &wait;
2242 callbacks.wakeup = sys_msg_dispatcher_wakeup;
2243 callbacks.prepare_wait = sys_msg_dispatcher_prep_wait;
2244 callbacks.wait = sys_msg_dispatcher_wait;
2245 callbacks.finalize_wait = sys_msg_dispatcher_fin_wait;
2246
2247 tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0, 0);
2248
2249 while (1) {
2250 int end_wait = 0;
2251 ErtsSysMsgQ *smqp;
2252
2253 ERTS_LC_ASSERT(!erts_thr_progress_is_blocking());
2254
2255 erts_mtx_lock(&smq_mtx);
2256
2257 /* Free previously used queue ... */
2258 while (local_sys_message_queue) {
2259 smqp = local_sys_message_queue;
2260 local_sys_message_queue = smqp->next;
2261 smq_element_free(smqp);
2262 }
2263
2264 /* Fetch current trace message queue ... */
2265 if (!sys_message_queue) {
2266 wait = 1;
2267 erts_mtx_unlock(&smq_mtx);
2268 end_wait = 1;
2269 erts_thr_progress_active(tpd, 0);
2270 erts_thr_progress_prepare_wait(tpd);
2271 erts_mtx_lock(&smq_mtx);
2272 }
2273
2274 while (!sys_message_queue) {
2275 if (wait)
2276 erts_cnd_wait(&smq_cnd, &smq_mtx);
2277 if (sys_message_queue)
2278 break;
2279 wait = 1;
2280 erts_mtx_unlock(&smq_mtx);
2281 /*
2282 * Ensure thread progress continue. We might have
2283 * been the last thread to go to sleep. In that case
2284 * erts_thr_progress_finalize_wait() will take care
2285 * of it...
2286 */
2287 erts_thr_progress_finalize_wait(tpd);
2288 erts_thr_progress_prepare_wait(tpd);
2289 erts_mtx_lock(&smq_mtx);
2290 }
2291
2292 local_sys_message_queue = sys_message_queue;
2293 sys_message_queue = NULL;
2294 sys_message_queue_end = NULL;
2295
2296 erts_mtx_unlock(&smq_mtx);
2297
2298 if (end_wait) {
2299 erts_thr_progress_finalize_wait(tpd);
2300 erts_thr_progress_active(tpd, 1);
2301 }
2302
2303 /* Send trace messages ... */
2304
2305 ASSERT(local_sys_message_queue);
2306
2307 for (smqp = local_sys_message_queue; smqp; smqp = smqp->next) {
2308 Eterm receiver;
2309 ErtsProcLocks proc_locks = ERTS_PROC_LOCKS_MSG_SEND;
2310 Process *proc = NULL;
2311 Port *port = NULL;
2312
2313 ASSERT(is_value(smqp->msg));
2314
2315 if (erts_thr_progress_update(tpd))
2316 erts_thr_progress_leader_update(tpd);
2317
2318 #ifdef DEBUG_PRINTOUTS
2319 print_msg_type(smqp);
2320 #endif
2321 switch (smqp->type) {
2322 case SYS_MSG_TYPE_PROC_MSG:
2323 receiver = smqp->to;
2324 break;
2325 case SYS_MSG_TYPE_SYSMON:
2326 receiver = erts_get_system_monitor();
2327 if (smqp->from == receiver) {
2328 #ifdef DEBUG_PRINTOUTS
2329 erts_fprintf(stderr, "MSG=%T to %T... ",
2330 smqp->msg, receiver);
2331 #endif
2332 goto drop_sys_msg;
2333 }
2334 break;
2335 case SYS_MSG_TYPE_SYSPROF:
2336 receiver = erts_get_system_profile();
2337 if (smqp->from == receiver) {
2338 #ifdef DEBUG_PRINTOUTS
2339 erts_fprintf(stderr, "MSG=%T to %T... ",
2340 smqp->msg, receiver);
2341 #endif
2342 goto drop_sys_msg;
2343 }
2344 break;
2345 case SYS_MSG_TYPE_ERRLGR:
2346 receiver = smqp->to;
2347 break;
2348 default:
2349 receiver = NIL;
2350 break;
2351 }
2352
2353 #ifdef DEBUG_PRINTOUTS
2354 erts_fprintf(stderr, "MSG=%T to %T... ", smqp->msg, receiver);
2355 #endif
2356
2357 if (is_internal_pid(receiver)) {
2358 proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
2359 if (!proc) {
2360 if (smqp->type == SYS_MSG_TYPE_ERRLGR) {
2361 /* Bad logger process, send to kernel 'logger' process */
2362 erts_set_system_logger(am_logger);
2363 receiver = erts_get_system_logger();
2364 goto logger;
2365 } else {
2366 /* Bad tracer */
2367 goto failure;
2368 }
2369 }
2370 else {
2371 ErtsMessage *mp;
2372 queue_proc_msg:
2373 mp = erts_alloc_message(0, NULL);
2374 mp->data.heap_frag = smqp->bp;
2375 erts_queue_message(proc,proc_locks,mp,smqp->msg,am_system);
2376 #ifdef DEBUG_PRINTOUTS
2377 erts_fprintf(stderr, "delivered\n");
2378 #endif
2379 erts_proc_unlock(proc, proc_locks);
2380 }
2381 } else if (receiver == am_logger) {
2382 logger:
2383 proc = erts_whereis_process(NULL,0,am_logger,proc_locks,0);
2384 if (!proc)
2385 goto failure;
2386 else if (smqp->from == proc->common.id)
2387 goto drop_sys_msg;
2388 else
2389 goto queue_proc_msg;
2390 }
2391 else if (receiver == am_undefined) {
2392 goto drop_sys_msg;
2393 }
2394 else if (is_internal_port(receiver)) {
2395 port = erts_thr_id2port_sflgs(receiver,
2396 ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
2397 if (!port)
2398 goto failure;
2399 else {
2400 write_sys_msg_to_port(receiver,
2401 port,
2402 smqp->from,
2403 smqp->type,
2404 smqp->msg);
2405 if (port->control_flags & PORT_CONTROL_FLAG_HEAVY)
2406 port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
2407 #ifdef DEBUG_PRINTOUTS
2408 erts_fprintf(stderr, "delivered\n");
2409 #endif
2410 erts_thr_port_release(port);
2411 if (smqp->bp)
2412 free_message_buffer(smqp->bp);
2413 }
2414 }
2415 else {
2416 failure:
2417 sys_msg_disp_failure(smqp, receiver);
2418 drop_sys_msg:
2419 if (proc)
2420 erts_proc_unlock(proc, proc_locks);
2421 if (smqp->bp)
2422 free_message_buffer(smqp->bp);
2423 #ifdef DEBUG_PRINTOUTS
2424 erts_fprintf(stderr, "dropped\n");
2425 #endif
2426 }
2427 smqp->msg = THE_NON_VALUE;
2428 }
2429 }
2430
2431 return NULL;
2432 }
2433
2434 void
erts_debug_foreach_sys_msg_in_q(void (* func)(Eterm,Eterm,Eterm,ErlHeapFragment *))2435 erts_debug_foreach_sys_msg_in_q(void (*func)(Eterm,
2436 Eterm,
2437 Eterm,
2438 ErlHeapFragment *))
2439 {
2440 ErtsSysMsgQ *smq[] = {sys_message_queue, local_sys_message_queue};
2441 int i;
2442
2443 ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
2444
2445 for (i = 0; i < sizeof(smq)/sizeof(smq[0]); i++) {
2446 ErtsSysMsgQ *sm;
2447 for (sm = smq[i]; sm; sm = sm->next) {
2448 Eterm to;
2449 switch (sm->type) {
2450 case SYS_MSG_TYPE_SYSMON:
2451 to = erts_get_system_monitor();
2452 break;
2453 case SYS_MSG_TYPE_SYSPROF:
2454 to = erts_get_system_profile();
2455 break;
2456 case SYS_MSG_TYPE_ERRLGR:
2457 to = erts_get_system_logger();
2458 break;
2459 default:
2460 to = NIL;
2461 break;
2462 }
2463 if (is_value(sm->msg))
2464 (*func)(sm->from, to, sm->msg, sm->bp);
2465 }
2466 }
2467 }
2468
2469
2470 static void
init_sys_msg_dispatcher(void)2471 init_sys_msg_dispatcher(void)
2472 {
2473 erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
2474 thr_opts.detached = 1;
2475 thr_opts.name = "sys_msg_dispatcher";
2476 init_smq_element_alloc();
2477 sys_message_queue = NULL;
2478 sys_message_queue_end = NULL;
2479 erts_cnd_init(&smq_cnd);
2480 erts_mtx_init(&smq_mtx, "sys_msg_q", NIL,
2481 ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
2482 erts_thr_create(&sys_msg_dispatcher_tid,
2483 sys_msg_dispatcher_func,
2484 NULL,
2485 &thr_opts);
2486 }
2487
2488
2489 #include "erl_nif.h"
2490
2491 typedef struct {
2492 char *name;
2493 Uint arity;
2494 ErlNifFunc *cb;
2495 } ErtsTracerType;
2496
2497 struct ErtsTracerNif_ {
2498 HashBucket hb;
2499 Eterm module;
2500 struct erl_module_nif* nif_mod;
2501 ErtsTracerType tracers[NIF_TRACER_TYPES];
2502 };
2503
init_tracer_template(ErtsTracerNif * tnif)2504 static void init_tracer_template(ErtsTracerNif *tnif) {
2505
2506 /* default tracer functions */
2507 tnif->tracers[TRACE_FUN_DEFAULT].name = "trace";
2508 tnif->tracers[TRACE_FUN_DEFAULT].arity = 5;
2509 tnif->tracers[TRACE_FUN_DEFAULT].cb = NULL;
2510
2511 tnif->tracers[TRACE_FUN_ENABLED].name = "enabled";
2512 tnif->tracers[TRACE_FUN_ENABLED].arity = 3;
2513 tnif->tracers[TRACE_FUN_ENABLED].cb = NULL;
2514
2515 /* specific tracer functions */
2516 tnif->tracers[TRACE_FUN_T_SEND].name = "trace_send";
2517 tnif->tracers[TRACE_FUN_T_SEND].arity = 5;
2518 tnif->tracers[TRACE_FUN_T_SEND].cb = NULL;
2519
2520 tnif->tracers[TRACE_FUN_T_RECEIVE].name = "trace_receive";
2521 tnif->tracers[TRACE_FUN_T_RECEIVE].arity = 5;
2522 tnif->tracers[TRACE_FUN_T_RECEIVE].cb = NULL;
2523
2524 tnif->tracers[TRACE_FUN_T_CALL].name = "trace_call";
2525 tnif->tracers[TRACE_FUN_T_CALL].arity = 5;
2526 tnif->tracers[TRACE_FUN_T_CALL].cb = NULL;
2527
2528 tnif->tracers[TRACE_FUN_T_SCHED_PROC].name = "trace_running_procs";
2529 tnif->tracers[TRACE_FUN_T_SCHED_PROC].arity = 5;
2530 tnif->tracers[TRACE_FUN_T_SCHED_PROC].cb = NULL;
2531
2532 tnif->tracers[TRACE_FUN_T_SCHED_PORT].name = "trace_running_ports";
2533 tnif->tracers[TRACE_FUN_T_SCHED_PORT].arity = 5;
2534 tnif->tracers[TRACE_FUN_T_SCHED_PORT].cb = NULL;
2535
2536 tnif->tracers[TRACE_FUN_T_GC].name = "trace_garbage_collection";
2537 tnif->tracers[TRACE_FUN_T_GC].arity = 5;
2538 tnif->tracers[TRACE_FUN_T_GC].cb = NULL;
2539
2540 tnif->tracers[TRACE_FUN_T_PROCS].name = "trace_procs";
2541 tnif->tracers[TRACE_FUN_T_PROCS].arity = 5;
2542 tnif->tracers[TRACE_FUN_T_PROCS].cb = NULL;
2543
2544 tnif->tracers[TRACE_FUN_T_PORTS].name = "trace_ports";
2545 tnif->tracers[TRACE_FUN_T_PORTS].arity = 5;
2546 tnif->tracers[TRACE_FUN_T_PORTS].cb = NULL;
2547
2548 /* specific enabled functions */
2549 tnif->tracers[TRACE_FUN_E_SEND].name = "enabled_send";
2550 tnif->tracers[TRACE_FUN_E_SEND].arity = 3;
2551 tnif->tracers[TRACE_FUN_E_SEND].cb = NULL;
2552
2553 tnif->tracers[TRACE_FUN_E_RECEIVE].name = "enabled_receive";
2554 tnif->tracers[TRACE_FUN_E_RECEIVE].arity = 3;
2555 tnif->tracers[TRACE_FUN_E_RECEIVE].cb = NULL;
2556
2557 tnif->tracers[TRACE_FUN_E_CALL].name = "enabled_call";
2558 tnif->tracers[TRACE_FUN_E_CALL].arity = 3;
2559 tnif->tracers[TRACE_FUN_E_CALL].cb = NULL;
2560
2561 tnif->tracers[TRACE_FUN_E_SCHED_PROC].name = "enabled_running_procs";
2562 tnif->tracers[TRACE_FUN_E_SCHED_PROC].arity = 3;
2563 tnif->tracers[TRACE_FUN_E_SCHED_PROC].cb = NULL;
2564
2565 tnif->tracers[TRACE_FUN_E_SCHED_PORT].name = "enabled_running_ports";
2566 tnif->tracers[TRACE_FUN_E_SCHED_PORT].arity = 3;
2567 tnif->tracers[TRACE_FUN_E_SCHED_PORT].cb = NULL;
2568
2569 tnif->tracers[TRACE_FUN_E_GC].name = "enabled_garbage_collection";
2570 tnif->tracers[TRACE_FUN_E_GC].arity = 3;
2571 tnif->tracers[TRACE_FUN_E_GC].cb = NULL;
2572
2573 tnif->tracers[TRACE_FUN_E_PROCS].name = "enabled_procs";
2574 tnif->tracers[TRACE_FUN_E_PROCS].arity = 3;
2575 tnif->tracers[TRACE_FUN_E_PROCS].cb = NULL;
2576
2577 tnif->tracers[TRACE_FUN_E_PORTS].name = "enabled_ports";
2578 tnif->tracers[TRACE_FUN_E_PORTS].arity = 3;
2579 tnif->tracers[TRACE_FUN_E_PORTS].cb = NULL;
2580 }
2581
2582 static Hash *tracer_hash = NULL;
2583 static erts_rwmtx_t tracer_mtx;
2584
2585 static ErtsTracerNif *
load_tracer_nif(const ErtsTracer tracer)2586 load_tracer_nif(const ErtsTracer tracer)
2587 {
2588 Module* mod = erts_get_module(ERTS_TRACER_MODULE(tracer),
2589 erts_active_code_ix());
2590 struct erl_module_instance *instance;
2591 ErlNifFunc *funcs;
2592 int num_of_funcs;
2593 ErtsTracerNif tnif_tmpl, *tnif;
2594 ErtsTracerType *tracers;
2595 int i,j;
2596
2597 if (!mod || !mod->curr.nif) {
2598 return NULL;
2599 }
2600
2601 instance = &mod->curr;
2602
2603 init_tracer_template(&tnif_tmpl);
2604 tnif_tmpl.nif_mod = instance->nif;
2605 tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
2606 tracers = tnif_tmpl.tracers;
2607
2608 num_of_funcs = erts_nif_get_funcs(instance->nif, &funcs);
2609
2610 for(i = 0; i < num_of_funcs; i++) {
2611 for (j = 0; j < NIF_TRACER_TYPES; j++) {
2612 if (sys_strcmp(tracers[j].name, funcs[i].name) == 0 && tracers[j].arity == funcs[i].arity) {
2613 tracers[j].cb = &(funcs[i]);
2614 break;
2615 }
2616 }
2617 }
2618
2619 if (tracers[TRACE_FUN_DEFAULT].cb == NULL || tracers[TRACE_FUN_ENABLED].cb == NULL ) {
2620 return NULL;
2621 }
2622
2623 erts_rwmtx_rwlock(&tracer_mtx);
2624 tnif = hash_put(tracer_hash, &tnif_tmpl);
2625 erts_rwmtx_rwunlock(&tracer_mtx);
2626
2627 return tnif;
2628 }
2629
2630 static ERTS_INLINE ErtsTracerNif *
lookup_tracer_nif(const ErtsTracer tracer)2631 lookup_tracer_nif(const ErtsTracer tracer)
2632 {
2633 ErtsTracerNif tnif_tmpl;
2634 ErtsTracerNif *tnif;
2635 tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
2636 ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying() || erts_get_scheduler_id() > 0);
2637 erts_rwmtx_rlock(&tracer_mtx);
2638 if ((tnif = hash_get(tracer_hash, &tnif_tmpl)) == NULL) {
2639 erts_rwmtx_runlock(&tracer_mtx);
2640 tnif = load_tracer_nif(tracer);
2641 ASSERT(!tnif || tnif->nif_mod);
2642 return tnif;
2643 }
2644 erts_rwmtx_runlock(&tracer_mtx);
2645 ASSERT(tnif->nif_mod);
2646 return tnif;
2647 }
2648
2649 /* This function converts an Erlang tracer term to ErtsTracer.
2650 It returns THE_NON_VALUE if an invalid tracer term was given.
2651 Accepted input is:
2652 pid() || port() || {prefix, pid()} || {prefix, port()} ||
2653 {prefix, atom(), term()} || {atom(), term()}
2654 */
2655 ErtsTracer
erts_term_to_tracer(Eterm prefix,Eterm t)2656 erts_term_to_tracer(Eterm prefix, Eterm t)
2657 {
2658 ErtsTracer tracer = erts_tracer_nil;
2659 ASSERT(is_atom(prefix) || prefix == THE_NON_VALUE);
2660 if (!is_nil(t)) {
2661 Eterm module = am_erl_tracer, state = THE_NON_VALUE;
2662 Eterm hp[2];
2663 if (is_tuple(t)) {
2664 Eterm *tp = tuple_val(t);
2665 if (prefix != THE_NON_VALUE) {
2666 if (arityval(tp[0]) == 2 && tp[1] == prefix)
2667 t = tp[2];
2668 else if (arityval(tp[0]) == 3 && tp[1] == prefix && is_atom(tp[2])) {
2669 module = tp[2];
2670 state = tp[3];
2671 }
2672 } else {
2673 if (arityval(tp[0]) == 2 && is_atom(tp[1])) {
2674 module = tp[1];
2675 state = tp[2];
2676 }
2677 }
2678 }
2679 if (state == THE_NON_VALUE && (is_internal_pid(t) || is_internal_port(t)))
2680 state = t;
2681 if (state == THE_NON_VALUE)
2682 return THE_NON_VALUE;
2683 erts_tracer_update(&tracer, CONS(hp, module, state));
2684 }
2685 if (!lookup_tracer_nif(tracer)) {
2686 ASSERT(ERTS_TRACER_MODULE(tracer) != am_erl_tracer);
2687 ERTS_TRACER_CLEAR(&tracer);
2688 return THE_NON_VALUE;
2689 }
2690 return tracer;
2691 }
2692
2693 Eterm
erts_tracer_to_term(Process * p,ErtsTracer tracer)2694 erts_tracer_to_term(Process *p, ErtsTracer tracer)
2695 {
2696 if (ERTS_TRACER_IS_NIL(tracer))
2697 return am_false;
2698 if (ERTS_TRACER_MODULE(tracer) == am_erl_tracer)
2699 /* Have to manage these specifically in order to be
2700 backwards compatible */
2701 return ERTS_TRACER_STATE(tracer);
2702 else {
2703 Eterm *hp = HAlloc(p, 3);
2704 return TUPLE2(hp, ERTS_TRACER_MODULE(tracer),
2705 copy_object(ERTS_TRACER_STATE(tracer), p));
2706 }
2707 }
2708
2709 Eterm
erts_build_tracer_to_term(Eterm ** hpp,ErlOffHeap * ohp,Uint * szp,ErtsTracer tracer)2710 erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer)
2711 {
2712 Eterm res;
2713 Eterm state;
2714 Uint sz;
2715
2716 if (ERTS_TRACER_IS_NIL(tracer))
2717 return am_false;
2718
2719 state = ERTS_TRACER_STATE(tracer);
2720 sz = is_immed(state) ? 0 : size_object(state);
2721
2722 if (szp)
2723 *szp += sz;
2724
2725 if (hpp)
2726 res = is_immed(state) ? state : copy_struct(state, sz, hpp, ohp);
2727 else
2728 res = THE_NON_VALUE;
2729
2730 if (ERTS_TRACER_MODULE(tracer) != am_erl_tracer) {
2731 if (szp)
2732 *szp += 3;
2733 if (hpp) {
2734 res = TUPLE2(*hpp, ERTS_TRACER_MODULE(tracer), res);
2735 *hpp += 3;
2736 }
2737 }
2738
2739 return res;
2740 }
2741
2742 static ERTS_INLINE int
send_to_tracer_nif_raw(Process * c_p,Process * tracee,const ErtsTracer tracer,Uint tracee_flags,Eterm t_p_id,ErtsTracerNif * tnif,enum ErtsTracerOpt topt,Eterm tag,Eterm msg,Eterm extra,Eterm pam_result)2743 send_to_tracer_nif_raw(Process *c_p, Process *tracee,
2744 const ErtsTracer tracer, Uint tracee_flags,
2745 Eterm t_p_id, ErtsTracerNif *tnif,
2746 enum ErtsTracerOpt topt,
2747 Eterm tag, Eterm msg, Eterm extra, Eterm pam_result)
2748 {
2749 if (tnif || (tnif = lookup_tracer_nif(tracer)) != NULL) {
2750 #define MAP_SIZE 4
2751 Eterm argv[5], local_heap[3+MAP_SIZE /* values */ + (MAP_SIZE+1 /* keys */)];
2752 flatmap_t *map = (flatmap_t*)(local_heap+(MAP_SIZE+1));
2753 Eterm *map_values = flatmap_get_values(map);
2754 Eterm *map_keys = local_heap + 1;
2755 Uint map_elem_count = 0;
2756
2757 topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_DEFAULT;
2758 ASSERT(topt < NIF_TRACER_TYPES);
2759
2760 argv[0] = tag;
2761 argv[1] = ERTS_TRACER_STATE(tracer);
2762 argv[2] = t_p_id;
2763 argv[3] = msg;
2764 argv[4] = make_flatmap(map);
2765
2766 map->thing_word = MAP_HEADER_FLATMAP;
2767
2768 if (extra != THE_NON_VALUE) {
2769 map_keys[map_elem_count] = am_extra;
2770 map_values[map_elem_count++] = extra;
2771 }
2772
2773 if (pam_result != am_true) {
2774 map_keys[map_elem_count] = am_match_spec_result;
2775 map_values[map_elem_count++] = pam_result;
2776 }
2777
2778 if (tracee_flags & F_TRACE_SCHED_NO) {
2779 map_keys[map_elem_count] = am_scheduler_id;
2780 map_values[map_elem_count++] = make_small(erts_get_scheduler_id());
2781 }
2782 map_keys[map_elem_count] = am_timestamp;
2783 if (tracee_flags & F_NOW_TS)
2784 #ifdef HAVE_ERTS_NOW_CPU
2785 if (erts_cpu_timestamp)
2786 map_values[map_elem_count++] = am_cpu_timestamp;
2787 else
2788 #endif
2789 map_values[map_elem_count++] = am_timestamp;
2790 else if (tracee_flags & F_STRICT_MON_TS)
2791 map_values[map_elem_count++] = am_strict_monotonic;
2792 else if (tracee_flags & F_MON_TS)
2793 map_values[map_elem_count++] = am_monotonic;
2794
2795 map->size = map_elem_count;
2796 map->keys = make_tuple(local_heap);
2797 local_heap[0] = make_arityval(map_elem_count);
2798
2799 #undef MAP_SIZE
2800 erts_nif_call_function(c_p, tracee ? tracee : c_p,
2801 tnif->nif_mod,
2802 tnif->tracers[topt].cb,
2803 tnif->tracers[topt].arity,
2804 argv);
2805 }
2806 return 1;
2807 }
2808
2809 static ERTS_INLINE int
send_to_tracer_nif(Process * c_p,ErtsPTabElementCommon * t_p,Eterm t_p_id,ErtsTracerNif * tnif,enum ErtsTracerOpt topt,Eterm tag,Eterm msg,Eterm extra,Eterm pam_result)2810 send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
2811 Eterm t_p_id, ErtsTracerNif *tnif, enum ErtsTracerOpt topt,
2812 Eterm tag, Eterm msg, Eterm extra, Eterm pam_result)
2813 {
2814 #if defined(ERTS_ENABLE_LOCK_CHECK)
2815 if (c_p) {
2816 /* We have to hold the main lock of the currently executing process */
2817 erts_proc_lc_chk_have_proc_locks(c_p, ERTS_PROC_LOCK_MAIN);
2818 }
2819 if (is_internal_pid(t_p->id)) {
2820 /* We have to have at least one lock */
2821 ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL);
2822 } else {
2823 ASSERT(is_internal_port(t_p->id));
2824 ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p));
2825 }
2826 #endif
2827
2828 return send_to_tracer_nif_raw(c_p,
2829 is_internal_pid(t_p->id) ? (Process*)t_p : NULL,
2830 t_p->tracer, t_p->trace_flags,
2831 t_p_id, tnif, topt, tag, msg, extra,
2832 pam_result);
2833 }
2834
2835 static ERTS_INLINE Eterm
call_enabled_tracer(const ErtsTracer tracer,ErtsTracerNif ** tnif_ret,enum ErtsTracerOpt topt,Eterm tag,Eterm t_p_id)2836 call_enabled_tracer(const ErtsTracer tracer,
2837 ErtsTracerNif **tnif_ret,
2838 enum ErtsTracerOpt topt,
2839 Eterm tag, Eterm t_p_id) {
2840 ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
2841 if (tnif) {
2842 Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id},
2843 ret;
2844 topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_ENABLED;
2845 ASSERT(topt < NIF_TRACER_TYPES);
2846 ASSERT(tnif->tracers[topt].cb != NULL);
2847 if (tnif_ret) *tnif_ret = tnif;
2848 ret = erts_nif_call_function(NULL, NULL, tnif->nif_mod,
2849 tnif->tracers[topt].cb,
2850 tnif->tracers[topt].arity,
2851 argv);
2852 if (tag == am_trace_status && ret != am_remove)
2853 return am_trace;
2854 ASSERT(tag == am_trace_status || ret != am_remove);
2855 return ret;
2856 }
2857 return tag == am_trace_status ? am_remove : am_discard;
2858 }
2859
2860 static int
is_tracer_enabled(Process * c_p,ErtsProcLocks c_p_locks,ErtsPTabElementCommon * t_p,ErtsTracerNif ** tnif_ret,enum ErtsTracerOpt topt,Eterm tag)2861 is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
2862 ErtsPTabElementCommon *t_p,
2863 ErtsTracerNif **tnif_ret,
2864 enum ErtsTracerOpt topt, Eterm tag) {
2865 Eterm nif_result;
2866
2867 ASSERT(t_p);
2868
2869 #if defined(ERTS_ENABLE_LOCK_CHECK)
2870 if (c_p)
2871 ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks
2872 || erts_thr_progress_is_blocking());
2873 if (is_internal_pid(t_p->id)) {
2874 /* We have to have at least one lock */
2875 ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL
2876 || erts_thr_progress_is_blocking());
2877 } else {
2878 ASSERT(is_internal_port(t_p->id));
2879 ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
2880 || erts_thr_progress_is_blocking());
2881 }
2882 #endif
2883
2884 nif_result = call_enabled_tracer(t_p->tracer, tnif_ret, topt, tag, t_p->id);
2885 switch (nif_result) {
2886 case am_discard: return 0;
2887 case am_trace: return 1;
2888 case THE_NON_VALUE:
2889 case am_remove: ASSERT(tag == am_trace_status); break;
2890 default:
2891 /* only am_remove should be returned, but if
2892 something else is returned we fall-through
2893 and remove the tracer. */
2894 ASSERT(0);
2895 }
2896
2897 /* Only remove tracer on (self() or ports) AND we are on a normal scheduler */
2898 if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) {
2899 ErtsSchedulerData *esdp = erts_get_scheduler_data();
2900 ErtsProcLocks c_p_xlocks = 0;
2901 if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
2902 if (is_internal_pid(t_p->id)) {
2903 ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
2904 if (c_p_locks != ERTS_PROC_LOCKS_ALL) {
2905 c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL;
2906 if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) {
2907 erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN);
2908 erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
2909 }
2910 }
2911 }
2912
2913 erts_tracer_replace(t_p, erts_tracer_nil);
2914 t_p->trace_flags &= ~TRACEE_FLAGS;
2915
2916 if (c_p_xlocks)
2917 erts_proc_unlock(c_p, c_p_xlocks);
2918 }
2919 }
2920
2921 return 0;
2922 }
2923
erts_is_tracer_enabled(const ErtsTracer tracer,ErtsPTabElementCommon * t_p)2924 int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p)
2925 {
2926 ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
2927 if (tnif) {
2928 Eterm nif_result = call_enabled_tracer(tracer, &tnif,
2929 TRACE_FUN_ENABLED,
2930 am_trace_status,
2931 t_p->id);
2932 switch (nif_result) {
2933 case am_discard:
2934 case am_trace: return 1;
2935 default:
2936 break;
2937 }
2938 }
2939 return 0;
2940 }
2941
erts_is_tracer_proc_enabled(Process * c_p,ErtsProcLocks c_p_locks,ErtsPTabElementCommon * t_p)2942 int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
2943 ErtsPTabElementCommon *t_p)
2944 {
2945 return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_ENABLED,
2946 am_trace_status);
2947 }
2948
erts_is_tracer_proc_enabled_send(Process * c_p,ErtsProcLocks c_p_locks,ErtsPTabElementCommon * t_p)2949 int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
2950 ErtsPTabElementCommon *t_p)
2951 {
2952 return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_T_SEND, am_send);
2953 }
2954
2955
erts_tracer_replace(ErtsPTabElementCommon * t_p,const ErtsTracer tracer)2956 void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer)
2957 {
2958 #if defined(ERTS_ENABLE_LOCK_CHECK)
2959 if (is_internal_pid(t_p->id) && !erts_thr_progress_is_blocking()) {
2960 erts_proc_lc_chk_have_proc_locks((Process*)t_p, ERTS_PROC_LOCKS_ALL);
2961 } else if (is_internal_port(t_p->id)) {
2962 ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
2963 || erts_thr_progress_is_blocking());
2964 }
2965 #endif
2966 if (ERTS_TRACER_COMPARE(t_p->tracer, tracer))
2967 return;
2968
2969 erts_tracer_update(&t_p->tracer, tracer);
2970 }
2971
free_tracer(void * p)2972 static void free_tracer(void *p)
2973 {
2974 ErtsTracer tracer = (ErtsTracer)p;
2975
2976 if (is_immed(ERTS_TRACER_STATE(tracer))) {
2977 erts_free(ERTS_ALC_T_HEAP_FRAG, ptr_val(tracer));
2978 } else {
2979 ErlHeapFragment *hf = (void*)((char*)(ptr_val(tracer)) - offsetof(ErlHeapFragment, mem));
2980 free_message_buffer(hf);
2981 }
2982 }
2983
2984 /* un-define erts_tracer_update before implementation */
2985 #ifdef erts_tracer_update
2986 #undef erts_tracer_update
2987 #endif
2988
2989 /*
2990 * ErtsTracer is either NIL, 'true' or [Mod | State]
2991 *
2992 * - If State is immediate then the memory for
2993 * the cons cell is just two words + sizeof(ErtsThrPrgrLaterOp) large.
2994 * - If State is a complex term then the cons cell
2995 * is allocated in an ErlHeapFragment where the cons
2996 * ptr points to the mem field. So in order to get the
2997 * ptr to the fragment you do this:
2998 * (char*)(ptr_val(tracer)) - offsetof(ErlHeapFragment, mem)
2999 * Normally you shouldn't have to care about this though
3000 * as erts_tracer_update takes care of it for you.
3001 *
3002 * When ErtsTracer is stored in the stack as part of a
3003 * return trace, the cons cell is stored on the heap of
3004 * the process.
3005 *
3006 * The cons cell is not always stored on the heap as:
3007 * 1) for port/meta tracing there is no heap
3008 * 2) we would need the main lock in order to
3009 * read the tracer which is undesirable.
3010 *
3011 * One way to optimize this (memory wise) is to keep an refc and only bump
3012 * the refc when *tracer is NIL.
3013 */
3014 void
erts_tracer_update(ErtsTracer * tracer,const ErtsTracer new_tracer)3015 erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
3016 {
3017 ErlHeapFragment *hf;
3018
3019 if (is_not_nil(*tracer)) {
3020 Uint offs = 2;
3021 UWord size = 2 * sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp);
3022 ErtsThrPrgrLaterOp *lop;
3023 ASSERT(is_list(*tracer));
3024 if (is_not_immed(ERTS_TRACER_STATE(*tracer))) {
3025 hf = (void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem)));
3026 offs = hf->used_size;
3027 size = hf->alloc_size * sizeof(Eterm) + sizeof(ErlHeapFragment);
3028 ASSERT(offs == size_object(*tracer));
3029 }
3030
3031 /* sparc assumes that all structs are double word aligned, so we
3032 have to align the ErtsThrPrgrLaterOp struct otherwise it may
3033 segfault.*/
3034 if ((UWord)(ptr_val(*tracer) + offs) % (sizeof(UWord)*2) == sizeof(UWord))
3035 offs += 1;
3036
3037 lop = (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs);
3038 ASSERT((UWord)lop % (sizeof(UWord)*2) == 0);
3039
3040 /* We schedule the free:ing of the tracer until after a thread progress
3041 has been made so that we know that no schedulers have any references
3042 to it. Because we do this, it is possible to release all locks of a
3043 process/port and still use the ErtsTracer of that port/process
3044 without having to worry if it is free'd.
3045 */
3046 erts_schedule_thr_prgr_later_cleanup_op(
3047 free_tracer, (void*)(*tracer), lop, size);
3048 }
3049
3050 if (is_nil(new_tracer)) {
3051 *tracer = new_tracer;
3052 } else if (is_immed(ERTS_TRACER_STATE(new_tracer))) {
3053 /* If tracer state is an immediate we only allocate a 2 Eterm heap.
3054 Not sure if it is worth it, we save 4 words (sizeof(ErlHeapFragment))
3055 per tracer. */
3056 Eterm *hp = erts_alloc(ERTS_ALC_T_HEAP_FRAG,
3057 3*sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp));
3058 *tracer = CONS(hp, ERTS_TRACER_MODULE(new_tracer),
3059 ERTS_TRACER_STATE(new_tracer));
3060 } else {
3061 Eterm *hp, tracer_state = ERTS_TRACER_STATE(new_tracer),
3062 tracer_module = ERTS_TRACER_MODULE(new_tracer);
3063 Uint sz = size_object(tracer_state);
3064 hf = new_message_buffer(sz + 2 /* cons cell */ +
3065 (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1);
3066 hp = hf->mem + 2;
3067 hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1;
3068 *tracer = copy_struct(tracer_state, sz, &hp, &hf->off_heap);
3069 *tracer = CONS(hf->mem, tracer_module, *tracer);
3070 ASSERT((void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))) == hf);
3071 }
3072 }
3073
init_tracer_nif()3074 static void init_tracer_nif()
3075 {
3076 erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
3077 rwmtx_opt.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
3078 rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
3079
3080 erts_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL,
3081 ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
3082
3083 erts_tracer_nif_clear();
3084
3085 }
3086
erts_tracer_nif_clear()3087 int erts_tracer_nif_clear()
3088 {
3089
3090 erts_rwmtx_rlock(&tracer_mtx);
3091 if (!tracer_hash || tracer_hash->nobjs) {
3092
3093 HashFunctions hf;
3094 hf.hash = tracer_hash_fun;
3095 hf.cmp = tracer_cmp_fun;
3096 hf.alloc = tracer_alloc_fun;
3097 hf.free = tracer_free_fun;
3098 hf.meta_alloc = (HMALLOC_FUN) erts_alloc;
3099 hf.meta_free = (HMFREE_FUN) erts_free;
3100 hf.meta_print = (HMPRINT_FUN) erts_print;
3101
3102 erts_rwmtx_runlock(&tracer_mtx);
3103 erts_rwmtx_rwlock(&tracer_mtx);
3104
3105 if (tracer_hash)
3106 hash_delete(tracer_hash);
3107
3108 tracer_hash = hash_new(ERTS_ALC_T_TRACER_NIF, "tracer_hash", 10, hf);
3109
3110 erts_rwmtx_rwunlock(&tracer_mtx);
3111 return 1;
3112 }
3113
3114 erts_rwmtx_runlock(&tracer_mtx);
3115 return 0;
3116 }
3117
tracer_cmp_fun(void * a,void * b)3118 static int tracer_cmp_fun(void* a, void* b)
3119 {
3120 return ((ErtsTracerNif*)a)->module != ((ErtsTracerNif*)b)->module;
3121 }
3122
tracer_hash_fun(void * obj)3123 static HashValue tracer_hash_fun(void* obj)
3124 {
3125 return make_internal_hash(((ErtsTracerNif*)obj)->module, 0);
3126 }
3127
tracer_alloc_fun(void * tmpl)3128 static void *tracer_alloc_fun(void* tmpl)
3129 {
3130 ErtsTracerNif *obj = erts_alloc(ERTS_ALC_T_TRACER_NIF,
3131 sizeof(ErtsTracerNif) +
3132 sizeof(ErtsThrPrgrLaterOp));
3133 sys_memcpy(obj, tmpl, sizeof(*obj));
3134 return obj;
3135 }
3136
tracer_free_fun_cb(void * obj)3137 static void tracer_free_fun_cb(void* obj)
3138 {
3139 erts_free(ERTS_ALC_T_TRACER_NIF, obj);
3140 }
3141
tracer_free_fun(void * obj)3142 static void tracer_free_fun(void* obj)
3143 {
3144 ErtsTracerNif *tnif = obj;
3145 erts_schedule_thr_prgr_later_op(
3146 tracer_free_fun_cb, obj,
3147 (ErtsThrPrgrLaterOp*)(tnif + 1));
3148
3149 }
3150