1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 1999-2020. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 
21 /*
22  * Trace BIFs.
23  */
24 
25 #ifdef HAVE_CONFIG_H
26 #  include "config.h"
27 #endif
28 
29 #include "sys.h"
30 #include "erl_vm.h"
31 #include "global.h"
32 #include "erl_process.h"
33 #include "error.h"
34 #include "erl_driver.h"
35 #include "bif.h"
36 #include "big.h"
37 #include "dist.h"
38 #include "erl_version.h"
39 #include "beam_bp.h"
40 #include "erl_binary.h"
41 #include "erl_thr_progress.h"
42 #include "erl_bif_unique.h"
43 #include "erl_proc_sig_queue.h"
44 
45 #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
46 
47 const struct trace_pattern_flags   erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
48 
49 /*
50  * The following variables are protected by code write permission.
51  */
52 static int                         erts_default_trace_pattern_is_on;
53 static Binary                     *erts_default_match_spec;
54 static Binary                     *erts_default_meta_match_spec;
55 static struct trace_pattern_flags  erts_default_trace_pattern_flags;
56 static ErtsTracer                  erts_default_meta_tracer;
57 
58 static struct {			/* Protected by code write permission */
59     int current;
60     int install;
61     int local;
62     BpFunctions f;		/* Local functions */
63     BpFunctions e;		/* Export entries */
64     Process* stager;
65     ErtsThrPrgrLaterOp lop;
66 } finish_bp;
67 
68 static Eterm
69 trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
70 static int
71 erts_set_tracing_event_pattern(Eterm event, Binary*, int on);
72 
73 static void smp_bp_finisher(void* arg);
74 static BIF_RETTYPE
75 system_monitor(Process *p, Eterm monitor_pid, Eterm list);
76 
77 static void new_seq_trace_token(Process* p, int); /* help func for seq_trace_2*/
78 static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
79 static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
80 static Eterm trace_info_on_load(Process* p, Eterm key);
81 static Eterm trace_info_event(Process* p, Eterm event, Eterm key);
82 
83 
84 static void reset_bif_trace(void);
85 static void setup_bif_trace(void);
86 static void install_exp_breakpoints(BpFunctions* f);
87 static void uninstall_exp_breakpoints(BpFunctions* f);
88 static void clean_export_entries(BpFunctions* f);
89 
90 ErtsTracingEvent erts_send_tracing[ERTS_NUM_BP_IX];
91 ErtsTracingEvent erts_receive_tracing[ERTS_NUM_BP_IX];
92 
93 void
erts_bif_trace_init(void)94 erts_bif_trace_init(void)
95 {
96     int i;
97 
98     erts_default_trace_pattern_is_on = 0;
99     erts_default_match_spec = NULL;
100     erts_default_meta_match_spec = NULL;
101     erts_default_trace_pattern_flags = erts_trace_pattern_flags_off;
102     erts_default_meta_tracer = erts_tracer_nil;
103 
104     for (i=0; i<ERTS_NUM_BP_IX; i++) {
105         erts_send_tracing[i].on = 1;
106         erts_send_tracing[i].match_spec = NULL;
107 	erts_receive_tracing[i].on = 1;
108 	erts_receive_tracing[i].match_spec = NULL;
109     }
110 }
111 
112 /*
113  * Turn on/off call tracing for the given function(s).
114  */
115 
116 Eterm
erts_internal_trace_pattern_3(BIF_ALIST_3)117 erts_internal_trace_pattern_3(BIF_ALIST_3)
118 {
119     return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
120 }
121 
122 static Eterm
trace_pattern(Process * p,Eterm MFA,Eterm Pattern,Eterm flaglist)123 trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
124 {
125     int i;
126     int matches = -1;
127     int specified = 0;
128     enum erts_break_op on;
129     Binary* match_prog_set;
130     Eterm l;
131     struct trace_pattern_flags flags = erts_trace_pattern_flags_off;
132     int is_global;
133     ErtsTracer meta_tracer = erts_tracer_nil;
134 
135     if (!erts_try_seize_code_write_permission(p)) {
136 	ERTS_BIF_YIELD3(bif_export[BIF_erts_internal_trace_pattern_3], p, MFA, Pattern, flaglist);
137     }
138     finish_bp.current = -1;
139 
140     UseTmpHeap(3,p);
141     /*
142      * Check and compile the match specification.
143      */
144 
145     if (Pattern == am_false) {
146 	match_prog_set = NULL;
147 	on = 0;
148     } else if (is_nil(Pattern) || Pattern == am_true) {
149 	match_prog_set = NULL;
150 	on = 1;
151     } else if (Pattern == am_restart) {
152 	match_prog_set = NULL;
153 	on = ERTS_BREAK_RESTART;
154     } else if (Pattern == am_pause) {
155 	match_prog_set = NULL;
156 	on = ERTS_BREAK_PAUSE;
157     } else {
158 	match_prog_set = erts_match_set_compile(p, Pattern, MFA);
159 	if (match_prog_set) {
160 	    MatchSetRef(match_prog_set);
161 	    on = 1;
162 	} else{
163 	    goto error;
164 	}
165     }
166 
167     is_global = 0;
168     for(l = flaglist; is_list(l); l = CDR(list_val(l))) {
169 	if (is_tuple(CAR(list_val(l)))) {
170             meta_tracer = erts_term_to_tracer(am_meta, CAR(list_val(l)));
171             if (meta_tracer == THE_NON_VALUE) {
172                 meta_tracer = erts_tracer_nil;
173                 goto error;
174             }
175 	    flags.breakpoint = 1;
176 	    flags.meta       = 1;
177 	} else {
178 	    switch (CAR(list_val(l))) {
179 	    case am_local:
180 		if (is_global) {
181 		    goto error;
182 		}
183 		flags.breakpoint = 1;
184 		flags.local      = 1;
185 		break;
186 	    case am_meta:
187 		if (is_global) {
188 		    goto error;
189 		}
190 		flags.breakpoint = 1;
191 		flags.meta       = 1;
192                 if (ERTS_TRACER_IS_NIL(meta_tracer))
193                     meta_tracer = erts_term_to_tracer(THE_NON_VALUE, p->common.id);
194 		break;
195 	    case am_global:
196 		if (flags.breakpoint) {
197 		    goto error;
198 		}
199 		is_global = !0;
200 		break;
201 	    case am_call_count:
202 		if (is_global) {
203 		    goto error;
204 		}
205 		flags.breakpoint = 1;
206 		flags.call_count = 1;
207 		break;
208 	    case am_call_time:
209 		if (is_global) {
210 		    goto error;
211 		}
212 		flags.breakpoint = 1;
213 		flags.call_time = 1;
214 		break;
215 
216 	    default:
217 		goto error;
218 	    }
219 	}
220     }
221     if (l != NIL) {
222 	goto error;
223     }
224 
225     if (match_prog_set && !flags.local && !flags.meta && (flags.call_count || flags.call_time)) {
226 	/* A match prog is not allowed with just call_count or call_time*/
227 	goto error;
228     }
229 
230     /*
231      * Check the MFA specification.
232      */
233 
234     if (MFA == am_on_load) {
235 	if (flags.local || (! flags.breakpoint)) {
236 	    MatchSetUnref(erts_default_match_spec);
237 	    erts_default_match_spec = match_prog_set;
238 	    MatchSetRef(erts_default_match_spec);
239 	}
240 	if (flags.meta) {
241 	    MatchSetUnref(erts_default_meta_match_spec);
242 	    erts_default_meta_match_spec = match_prog_set;
243 	    MatchSetRef(erts_default_meta_match_spec);
244             erts_tracer_update(&erts_default_meta_tracer, meta_tracer);
245 	} else if (! flags.breakpoint) {
246 	    MatchSetUnref(erts_default_meta_match_spec);
247 	    erts_default_meta_match_spec = NULL;
248 	    ERTS_TRACER_CLEAR(&erts_default_meta_tracer);
249 	}
250 	if (erts_default_trace_pattern_flags.breakpoint &&
251 	    flags.breakpoint) {
252 	    /* Breakpoint trace -> breakpoint trace */
253 	    ASSERT(erts_default_trace_pattern_is_on);
254 	    if (on) {
255 		erts_default_trace_pattern_flags.local
256 		    |= flags.local;
257 		erts_default_trace_pattern_flags.meta
258 		    |= flags.meta;
259 		erts_default_trace_pattern_flags.call_count
260 		    |= (on == 1) ? flags.call_count : 0;
261 		erts_default_trace_pattern_flags.call_time
262 		    |= (on == 1) ? flags.call_time : 0;
263 	    } else {
264 		erts_default_trace_pattern_flags.local
265 		    &= ~flags.local;
266 		erts_default_trace_pattern_flags.meta
267 		    &= ~flags.meta;
268 		erts_default_trace_pattern_flags.call_count
269 		    &= ~flags.call_count;
270 		erts_default_trace_pattern_flags.call_time
271 		    &= ~flags.call_time;
272 		if (! (erts_default_trace_pattern_flags.breakpoint =
273 		       erts_default_trace_pattern_flags.local |
274 		       erts_default_trace_pattern_flags.meta |
275 		       erts_default_trace_pattern_flags.call_count |
276 		       erts_default_trace_pattern_flags.call_time)) {
277 		    erts_default_trace_pattern_is_on = !!on; /* i.e off */
278 		}
279 	    }
280 	} else if (! erts_default_trace_pattern_flags.breakpoint &&
281 		   ! flags.breakpoint) {
282 	    /* Global call trace -> global call trace */
283 	    erts_default_trace_pattern_is_on = !!on;
284 	} else if (erts_default_trace_pattern_flags.breakpoint &&
285 		   ! flags.breakpoint) {
286 	    /* Breakpoint trace -> global call trace */
287 	    if (on) {
288 		erts_default_trace_pattern_flags = flags; /* Struct copy */
289 		erts_default_trace_pattern_is_on = !!on;
290 	    }
291 	} else {
292 	    ASSERT(! erts_default_trace_pattern_flags.breakpoint &&
293 		   flags.breakpoint);
294 	    /* Global call trace -> breakpoint trace */
295 	    if (on) {
296 		if (on != 1) {
297 		    flags.call_count = 0;
298 		    flags.call_time  = 0;
299 		}
300 		flags.breakpoint = flags.local | flags.meta | flags.call_count | flags.call_time;
301 		erts_default_trace_pattern_flags = flags; /* Struct copy */
302 		erts_default_trace_pattern_is_on = !!flags.breakpoint;
303 	    }
304 	}
305 	matches = 0;
306     } else if (is_tuple(MFA)) {
307         ErtsCodeMFA mfa;
308 	Eterm *tp = tuple_val(MFA);
309 	if (tp[0] != make_arityval(3)) {
310 	    goto error;
311 	}
312 	if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
313 	    (!is_small(tp[3]) && tp[3] != am_Underscore)) {
314 	    goto error;
315 	}
316 	for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
317 	    /* Empty loop body */
318 	}
319 	for (i = specified; i < 3; i++) {
320 	    if (tp[i+1] != am_Underscore) {
321 		goto error;
322 	    }
323 	}
324 	mfa.module   = tp[1];
325 	mfa.function = tp[2];
326 	if (specified == 3) {
327             mfa.arity = signed_val(tp[3]);
328 	}
329 
330 	matches = erts_set_trace_pattern(p, &mfa, specified,
331 					 match_prog_set, match_prog_set,
332 					 on, flags, meta_tracer, 0);
333     } else if (is_atom(MFA)) {
334         if (is_global || flags.breakpoint || on > ERTS_BREAK_SET) {
335             goto error;
336         }
337         matches = erts_set_tracing_event_pattern(MFA, match_prog_set, on);
338     }
339 
340  error:
341     MatchSetUnref(match_prog_set);
342 
343     ERTS_TRACER_CLEAR(&meta_tracer);
344 
345     if (finish_bp.current >= 0) {
346 	ASSERT(matches >= 0);
347 	ASSERT(finish_bp.stager == NULL);
348 	finish_bp.stager = p;
349 	erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop);
350 	erts_proc_inc_refc(p);
351 	erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
352 	ERTS_BIF_YIELD_RETURN(p, make_small(matches));
353     }
354 
355     erts_release_code_write_permission();
356 
357     if (matches >= 0) {
358 	return make_small(matches);
359     }
360     else {
361 	BIF_ERROR(p, BADARG);
362     }
363 }
364 
smp_bp_finisher(void * null)365 static void smp_bp_finisher(void* null)
366 {
367     if (erts_finish_breakpointing()) { /* Not done */
368 	/* Arrange for being called again */
369 	erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop);
370     }
371     else {			/* Done */
372 	Process* p = finish_bp.stager;
373 #ifdef DEBUG
374 	finish_bp.stager = NULL;
375 #endif
376 	erts_release_code_write_permission();
377 	erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
378 	if (!ERTS_PROC_IS_EXITING(p)) {
379 	    erts_resume(p, ERTS_PROC_LOCK_STATUS);
380 	}
381 	erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
382 	erts_proc_dec_refc(p);
383     }
384 }
385 
386 void
erts_get_default_trace_pattern(int * trace_pattern_is_on,Binary ** match_spec,Binary ** meta_match_spec,struct trace_pattern_flags * trace_pattern_flags,ErtsTracer * meta_tracer)387 erts_get_default_trace_pattern(int *trace_pattern_is_on,
388 			       Binary **match_spec,
389 			       Binary **meta_match_spec,
390 			       struct trace_pattern_flags *trace_pattern_flags,
391 			       ErtsTracer *meta_tracer)
392 {
393     ERTS_LC_ASSERT(erts_has_code_write_permission() ||
394 		       erts_thr_progress_is_blocking());
395     if (trace_pattern_is_on)
396 	*trace_pattern_is_on = erts_default_trace_pattern_is_on;
397     if (match_spec)
398 	*match_spec = erts_default_match_spec;
399     if (meta_match_spec)
400 	*meta_match_spec = erts_default_meta_match_spec;
401     if (trace_pattern_flags)
402 	*trace_pattern_flags = erts_default_trace_pattern_flags;
403     if (meta_tracer)
404 	*meta_tracer = erts_default_meta_tracer;
405 }
406 
erts_is_default_trace_enabled(void)407 int erts_is_default_trace_enabled(void)
408 {
409     ERTS_LC_ASSERT(erts_has_code_write_permission() ||
410 		       erts_thr_progress_is_blocking());
411     return erts_default_trace_pattern_is_on;
412 }
413 
414 Uint
erts_trace_flag2bit(Eterm flag)415 erts_trace_flag2bit(Eterm flag)
416 {
417     switch (flag) {
418     case am_timestamp: return F_NOW_TS;
419     case am_strict_monotonic_timestamp: return F_STRICT_MON_TS;
420     case am_monotonic_timestamp: return F_MON_TS;
421     case am_all: return TRACEE_FLAGS;
422     case am_send: return F_TRACE_SEND;
423     case am_receive: return F_TRACE_RECEIVE;
424     case am_set_on_spawn: return F_TRACE_SOS;
425     case am_procs: return F_TRACE_PROCS;
426     case am_set_on_first_spawn: return F_TRACE_SOS1;
427     case am_set_on_link: return F_TRACE_SOL;
428     case am_set_on_first_link: return F_TRACE_SOL1;
429     case am_running: return F_TRACE_SCHED;
430     case am_exiting: return F_TRACE_SCHED_EXIT;
431     case am_garbage_collection: return F_TRACE_GC;
432     case am_call: return  F_TRACE_CALLS;
433     case am_arity: return F_TRACE_ARITY_ONLY;
434     case am_return_to: return F_TRACE_RETURN_TO;
435     case am_silent: return F_TRACE_SILENT;
436     case am_scheduler_id: return F_TRACE_SCHED_NO;
437     case am_running_ports: return F_TRACE_SCHED_PORTS;
438     case am_running_procs: return F_TRACE_SCHED_PROCS;
439     case am_ports: return F_TRACE_PORTS;
440     default: return 0;
441     }
442 }
443 
444 /* Scan the argument list and sort out the trace flags.
445 **
446 ** Returns !0 on success, 0 on failure.
447 **
448 ** Sets the result variables on success, if their flags has
449 ** occurred in the argument list.
450 */
451 int
erts_trace_flags(Eterm List,Uint * pMask,ErtsTracer * pTracer,int * pCpuTimestamp)452 erts_trace_flags(Eterm List,
453                  Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp)
454 {
455     Eterm list = List;
456     Uint mask = 0;
457     ErtsTracer tracer = erts_tracer_nil;
458     int cpu_timestamp = 0;
459 
460     while (is_list(list)) {
461 	Uint bit;
462 	Eterm item = CAR(list_val(list));
463 	if (is_atom(item) && (bit = erts_trace_flag2bit(item))) {
464 	    mask |= bit;
465 #ifdef HAVE_ERTS_NOW_CPU
466 	} else if (item == am_cpu_timestamp) {
467 	    cpu_timestamp = !0;
468 #endif
469 	} else if (is_tuple(item)) {
470             tracer = erts_term_to_tracer(am_tracer, item);
471             if (tracer == THE_NON_VALUE)
472                 goto error;
473 	} else goto error;
474 	list = CDR(list_val(list));
475     }
476     if (is_not_nil(list)) goto error;
477 
478     if (pMask && mask)                           *pMask         = mask;
479     if (pTracer && !ERTS_TRACER_IS_NIL(tracer))  *pTracer       = tracer;
480     if (pCpuTimestamp && cpu_timestamp)          *pCpuTimestamp = cpu_timestamp;
481     return !0;
482  error:
483     return 0;
484 }
485 
486 static ERTS_INLINE int
start_trace(Process * c_p,ErtsTracer tracer,ErtsPTabElementCommon * common,int on,int mask)487 start_trace(Process *c_p, ErtsTracer tracer,
488             ErtsPTabElementCommon *common,
489             int on, int mask)
490 {
491     /* We can use the common part of both port+proc without checking what it is
492        In the code below port is used for both proc and port */
493     Port *port = (Port*)common;
494 
495     /*
496      * SMP build assumes that either system is blocked or:
497      * * main lock is held on c_p
498      * * all locks are held on port common
499      */
500 
501     if (!ERTS_TRACER_IS_NIL(tracer)) {
502         if ((ERTS_TRACE_FLAGS(port) & TRACEE_FLAGS)
503             && !ERTS_TRACER_COMPARE(ERTS_TRACER(port), tracer)) {
504             /* This tracee is already being traced, and not by the
505              * tracer to be */
506             if (erts_is_tracer_enabled(ERTS_TRACER(port), common)) {
507                 /* The tracer is still in use */
508                 return 1;
509             }
510             /* Current tracer now invalid */
511         }
512     }
513 
514     if (on)
515         ERTS_TRACE_FLAGS(port) |= mask;
516     else
517         ERTS_TRACE_FLAGS(port) &= ~mask;
518 
519     if ((ERTS_TRACE_FLAGS(port) & TRACEE_FLAGS) == 0) {
520         tracer = erts_tracer_nil;
521         erts_tracer_replace(common, erts_tracer_nil);
522     } else if (!ERTS_TRACER_IS_NIL(tracer))
523         erts_tracer_replace(common, tracer);
524 
525     return 0;
526 }
527 
erts_internal_trace_3(BIF_ALIST_3)528 Eterm erts_internal_trace_3(BIF_ALIST_3)
529 {
530     Process* p = BIF_P;
531     Eterm pid_spec = BIF_ARG_1;
532     Eterm how = BIF_ARG_2;
533     Eterm list = BIF_ARG_3;
534     int on;
535     ErtsTracer tracer = erts_tracer_nil;
536     int matches = 0;
537     Uint mask = 0;
538     int cpu_ts = 0;
539     int system_blocked = 0;
540 
541     if (! erts_trace_flags(list, &mask, &tracer, &cpu_ts)) {
542 	BIF_ERROR(p, BADARG);
543     }
544 
545     if (!erts_try_seize_code_write_permission(BIF_P)) {
546 	ERTS_BIF_YIELD3(bif_export[BIF_erts_internal_trace_3],
547                         BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
548     }
549 
550     switch (how) {
551     case am_false:
552 	on = 0;
553 	break;
554     case am_true:
555 	on = 1;
556         if (ERTS_TRACER_IS_NIL(tracer))
557             tracer = erts_term_to_tracer(am_tracer, p->common.id);
558 
559         if (tracer == THE_NON_VALUE) {
560             tracer = erts_tracer_nil;
561             goto error;
562         }
563 
564 	break;
565     default:
566 	goto error;
567     }
568 
569     /*
570      * Set/reset the call trace flag for the given Pids.
571      */
572 
573     if (is_port(pid_spec)) {
574 	Port *tracee_port;
575 
576 #ifdef HAVE_ERTS_NOW_CPU
577 	if (cpu_ts) {
578 	    goto error;
579 	}
580 #endif
581 
582 	tracee_port = erts_id2port_sflgs(pid_spec,
583 					 p,
584 					 ERTS_PROC_LOCK_MAIN,
585 					 ERTS_PORT_SFLGS_INVALID_LOOKUP);
586 
587 	if (!tracee_port)
588 	    goto error;
589 
590         if (start_trace(p, tracer, &tracee_port->common, on, mask)) {
591 	    erts_port_release(tracee_port);
592 	    goto already_traced;
593         }
594         erts_port_release(tracee_port);
595         matches = 1;
596     } else if (is_pid(pid_spec)) {
597 	Process *tracee_p;
598 
599 #ifdef HAVE_ERTS_NOW_CPU
600 	if (cpu_ts) {
601 	    goto error;
602 	}
603 #endif
604 	/* Check that the tracee is not dead, not tracing
605 	 * and not about to be tracing.
606 	 */
607 
608 	tracee_p = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
609 				 pid_spec, ERTS_PROC_LOCKS_ALL);
610 	if (!tracee_p)
611 	    goto error;
612 
613         if (start_trace(tracee_p, tracer, &tracee_p->common, on, mask)) {
614 	    erts_proc_unlock(tracee_p,
615 				 (tracee_p == p
616 				  ? ERTS_PROC_LOCKS_ALL_MINOR
617 				  : ERTS_PROC_LOCKS_ALL));
618 	    goto already_traced;
619         }
620         erts_proc_unlock(tracee_p,
621 			     (tracee_p == p
622 			      ? ERTS_PROC_LOCKS_ALL_MINOR
623 			      : ERTS_PROC_LOCKS_ALL));
624 
625 	matches = 1;
626     } else {
627 	int ok = 0;
628 
629 #ifdef HAVE_ERTS_NOW_CPU
630 	if (cpu_ts) {
631 	    if (pid_spec == am_all) {
632 		if (on) {
633 		    if (!erts_cpu_timestamp) {
634 #ifdef HAVE_CLOCK_GETTIME_CPU_TIME
635 			/*
636 			   Perhaps clock_gettime was found during config
637 			   on a different machine than this. We check
638 			   if it works here and now, then don't bother
639 			   about checking return value for error later.
640 			*/
641 			{
642 			    SysCpuTime start, stop;
643 			    SysTimespec tp;
644 			    int i;
645 
646 			    if (sys_get_cputime(start, tp) < 0)
647 				goto error;
648 			    start = ((SysCpuTime)tp.tv_sec * 1000000000LL) +
649 				    (SysCpuTime)tp.tv_nsec;
650 			    for (i = 0; i < 100; i++)
651 				sys_get_cputime(stop, tp);
652 			    stop = ((SysCpuTime)tp.tv_sec * 1000000000LL) +
653 				   (SysCpuTime)tp.tv_nsec;
654 			    if (start == 0) goto error;
655 			    if (start == stop) goto error;
656 			}
657 #else /* HAVE_GETHRVTIME */
658 			if (erts_start_now_cpu() < 0) {
659 			    goto error;
660 			}
661 #endif /* HAVE_CLOCK_GETTIME_CPU_TIME */
662 			erts_cpu_timestamp = !0;
663 		    }
664 		}
665 	    } else {
666 		goto error;
667 	    }
668 	}
669 #endif
670 
671 	if (pid_spec == am_all || pid_spec == am_existing ||
672             pid_spec == am_ports || pid_spec == am_processes ||
673             pid_spec == am_existing_ports || pid_spec == am_existing_processes
674             ) {
675 	    int i;
676 	    int procs = 0;
677 	    int ports = 0;
678 	    int mods = 0;
679 
680 	    if (mask & (ERTS_PROC_TRACEE_FLAGS & ~ERTS_TRACEE_MODIFIER_FLAGS))
681 		procs = pid_spec != am_ports && pid_spec != am_existing_ports;
682 	    if (mask & (ERTS_PORT_TRACEE_FLAGS & ~ERTS_TRACEE_MODIFIER_FLAGS))
683 		ports = pid_spec != am_processes && pid_spec != am_existing_processes;
684 	    if (mask & ERTS_TRACEE_MODIFIER_FLAGS) {
685                 if (pid_spec == am_ports || pid_spec == am_existing_ports)
686                     ports = 1;
687                 else if (pid_spec == am_processes || pid_spec == am_existing_processes)
688                     procs = 1;
689                 else
690                     mods = 1;
691             }
692 
693 	    erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
694 	    erts_thr_progress_block();
695 	    system_blocked = 1;
696 
697 	    ok = 1;
698 	    if (procs || mods) {
699 		int max = erts_ptab_max(&erts_proc);
700 		/* tracing of processes */
701 		for (i = 0; i < max; i++) {
702 		    Process* tracee_p = erts_pix2proc(i);
703 		    if (! tracee_p)
704 			continue;
705                     if (!start_trace(p, tracer, &tracee_p->common, on, mask))
706                         matches++;
707 		}
708 	    }
709 	    if (ports || mods) {
710 		int max = erts_ptab_max(&erts_port);
711 		/* tracing of ports */
712 		for (i = 0; i < max; i++) {
713 		    erts_aint32_t state;
714 		    Port *tracee_port = erts_pix2port(i);
715 		    if (!tracee_port)
716 			continue;
717 		    state = erts_atomic32_read_nob(&tracee_port->state);
718 		    if (state & ERTS_PORT_SFLGS_DEAD)
719 			continue;
720                     if (!start_trace(p, tracer, &tracee_port->common, on, mask))
721                         matches++;
722 		}
723 	    }
724 	}
725 
726 	if (pid_spec == am_all || pid_spec == am_new
727             || pid_spec == am_ports || pid_spec == am_processes
728             || pid_spec == am_new_ports || pid_spec == am_new_processes
729             ) {
730 
731 	    ok = 1;
732             if (mask & ERTS_PROC_TRACEE_FLAGS &&
733                 pid_spec != am_ports && pid_spec != am_new_ports)
734                 erts_change_default_proc_tracing(
735                     on, mask & ERTS_PROC_TRACEE_FLAGS, tracer);
736             if (mask & ERTS_PORT_TRACEE_FLAGS &&
737                 pid_spec != am_processes && pid_spec != am_new_processes)
738                 erts_change_default_port_tracing(
739                     on, mask & ERTS_PORT_TRACEE_FLAGS, tracer);
740 
741 #ifdef HAVE_ERTS_NOW_CPU
742 	    if (cpu_ts && !on) {
743 		/* cpu_ts => pid_spec == am_all */
744 		if (erts_cpu_timestamp) {
745 #ifdef HAVE_GETHRVTIME
746 		    erts_stop_now_cpu();
747 #endif
748 		    erts_cpu_timestamp = 0;
749 		}
750 	    }
751 #endif
752 	}
753 
754 	if (!ok)
755 	    goto error;
756     }
757 
758     if (system_blocked) {
759 	erts_thr_progress_unblock();
760 	erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
761     }
762     erts_release_code_write_permission();
763     ERTS_TRACER_CLEAR(&tracer);
764 
765     BIF_RET(make_small(matches));
766 
767  already_traced:
768     erts_send_error_to_logger_str(p->group_leader,
769 				  "** can only have one tracer per process\n");
770 
771  error:
772 
773     ERTS_TRACER_CLEAR(&tracer);
774 
775     if (system_blocked) {
776 	erts_thr_progress_unblock();
777 	erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
778     }
779     erts_release_code_write_permission();
780 
781     BIF_ERROR(p, BADARG);
782 }
783 
784 /*
785  * Return information about a process or an external function being traced.
786  */
787 
trace_info_2(BIF_ALIST_2)788 Eterm trace_info_2(BIF_ALIST_2)
789 {
790     Process* p = BIF_P;
791     Eterm What = BIF_ARG_1;
792     Eterm Key = BIF_ARG_2;
793     Eterm res;
794 
795     if (!erts_try_seize_code_write_permission(p)) {
796 	ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, What, Key);
797     }
798 
799     if (What == am_on_load) {
800 	res = trace_info_on_load(p, Key);
801     } else if (What == am_send || What == am_receive) {
802         res = trace_info_event(p, What, Key);
803     } else if (is_atom(What) || is_pid(What) || is_port(What)) {
804 	res = trace_info_pid(p, What, Key);
805     } else if (is_tuple(What)) {
806 	res = trace_info_func(p, What, Key);
807     } else {
808 	erts_release_code_write_permission();
809 	BIF_ERROR(p, BADARG);
810     }
811     erts_release_code_write_permission();
812 
813     if (is_value(res) && is_internal_ref(res))
814         BIF_TRAP1(erts_await_result, BIF_P, res);
815 
816     BIF_RET(res);
817 }
818 
819 static Eterm
build_trace_flags_term(Eterm ** hpp,Uint * szp,Uint trace_flags)820 build_trace_flags_term(Eterm **hpp, Uint *szp, Uint trace_flags)
821 {
822 
823 #define ERTS_TFLAG__(F, FN)                             \
824     if (trace_flags & F) {                              \
825         if (szp)                                        \
826             sz += 2;                                    \
827         if (hp) {                                       \
828             res = CONS(hp, FN, res);                    \
829             hp += 2;                                    \
830         }                                               \
831     }
832 
833     Eterm res;
834     Uint sz = 0;
835     Eterm *hp;
836 
837     if (hpp) {
838         hp = *hpp;
839         res = NIL;
840     }
841     else {
842         hp = NULL;
843         res = THE_NON_VALUE;
844     }
845 
846     ERTS_TFLAG__(F_NOW_TS, am_timestamp);
847     ERTS_TFLAG__(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
848     ERTS_TFLAG__(F_MON_TS, am_monotonic_timestamp);
849     ERTS_TFLAG__(F_TRACE_SEND, am_send);
850     ERTS_TFLAG__(F_TRACE_RECEIVE, am_receive);
851     ERTS_TFLAG__(F_TRACE_SOS, am_set_on_spawn);
852     ERTS_TFLAG__(F_TRACE_CALLS, am_call);
853     ERTS_TFLAG__(F_TRACE_PROCS, am_procs);
854     ERTS_TFLAG__(F_TRACE_SOS1, am_set_on_first_spawn);
855     ERTS_TFLAG__(F_TRACE_SOL, am_set_on_link);
856     ERTS_TFLAG__(F_TRACE_SOL1, am_set_on_first_link);
857     ERTS_TFLAG__(F_TRACE_SCHED, am_running);
858     ERTS_TFLAG__(F_TRACE_SCHED_EXIT, am_exiting);
859     ERTS_TFLAG__(F_TRACE_GC, am_garbage_collection);
860     ERTS_TFLAG__(F_TRACE_ARITY_ONLY, am_arity);
861     ERTS_TFLAG__(F_TRACE_RETURN_TO, am_return_to);
862     ERTS_TFLAG__(F_TRACE_SILENT, am_silent);
863     ERTS_TFLAG__(F_TRACE_SCHED_NO, am_scheduler_id);
864     ERTS_TFLAG__(F_TRACE_PORTS, am_ports);
865     ERTS_TFLAG__(F_TRACE_SCHED_PORTS, am_running_ports);
866     ERTS_TFLAG__(F_TRACE_SCHED_PROCS, am_running_procs);
867 
868     if (szp)
869         *szp += sz;
870 
871     if (hpp)
872         *hpp = hp;
873 
874     return res;
875 
876 #undef ERTS_TFLAG__
877 }
878 
879 static Eterm
trace_info_tracee(Process * c_p,void * arg,int * redsp,ErlHeapFragment ** bpp)880 trace_info_tracee(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
881 {
882     ErlHeapFragment *bp;
883     Eterm *hp, res, key;
884     Uint sz;
885 
886     *redsp = 1;
887 
888     if (ERTS_PROC_IS_EXITING(c_p))
889         return am_undefined;
890 
891     key = (Eterm) arg;
892     sz = 3;
893 
894     if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(c_p)))
895         erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCK_MAIN,
896                                     &c_p->common);
897 
898     switch (key) {
899     case am_tracer:
900 
901         erts_build_tracer_to_term(NULL, NULL, &sz, ERTS_TRACER(c_p));
902         bp = new_message_buffer(sz);
903         hp = bp->mem;
904         res = erts_build_tracer_to_term(&hp, &bp->off_heap,
905                                         NULL, ERTS_TRACER(c_p));
906         if (res == am_false)
907             res = NIL;
908         break;
909 
910     case am_flags:
911 
912         build_trace_flags_term(NULL, &sz, ERTS_TRACE_FLAGS(c_p));
913         bp = new_message_buffer(sz);
914         hp = bp->mem;
915         res = build_trace_flags_term(&hp, NULL, ERTS_TRACE_FLAGS(c_p));
916         break;
917 
918     default:
919 
920         ERTS_INTERNAL_ERROR("Key not supported");
921         res = NIL;
922         bp = NULL;
923         hp = NULL;
924         break;
925     }
926 
927     *redsp += 2;
928 
929     res = TUPLE2(hp, key, res);
930     *bpp = bp;
931     return res;
932 }
933 
934 static Eterm
trace_info_pid(Process * p,Eterm pid_spec,Eterm key)935 trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
936 {
937     Eterm tracer;
938     Uint trace_flags = am_false;
939     Eterm* hp;
940 
941     if (pid_spec == am_new || pid_spec == am_new_processes) {
942         ErtsTracer def_tracer;
943 	erts_get_default_proc_tracing(&trace_flags, &def_tracer);
944         tracer = erts_tracer_to_term(p, def_tracer);
945         ERTS_TRACER_CLEAR(&def_tracer);
946     } else if (pid_spec == am_new_ports) {
947         ErtsTracer def_tracer;
948 	erts_get_default_port_tracing(&trace_flags, &def_tracer);
949         tracer = erts_tracer_to_term(p, def_tracer);
950         ERTS_TRACER_CLEAR(&def_tracer);
951     } else if (is_internal_port(pid_spec)) {
952         Port *tracee;
953         tracee = erts_id2port_sflgs(pid_spec, p, ERTS_PROC_LOCK_MAIN,
954                                     ERTS_PORT_SFLGS_INVALID_LOOKUP);
955 
956         if (!tracee)
957             return am_undefined;
958 
959         if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
960             erts_is_tracer_proc_enabled(NULL, 0, &tracee->common);
961 
962         tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
963         trace_flags = ERTS_TRACE_FLAGS(tracee);
964 
965         erts_port_release(tracee);
966 
967     } else if (is_internal_pid(pid_spec)) {
968         Eterm ref;
969 
970         if (key != am_flags && key != am_tracer)
971             goto error;
972 
973         ref = erts_proc_sig_send_rpc_request(p, pid_spec, !0,
974                                              trace_info_tracee,
975                                              (void *) key);
976 
977         if (is_non_value(ref))
978             return am_undefined;
979 
980         return ref;
981     } else if (is_external_pid(pid_spec)
982 	       && external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
983 	    return am_undefined;
984     } else {
985     error:
986 	BIF_ERROR(p, BADARG);
987     }
988 
989     if (key == am_flags) {
990 	Eterm flag_list;
991         Uint sz = 3;
992         Eterm *hp;
993 
994         build_trace_flags_term(NULL, &sz, trace_flags);
995 
996         hp = HAlloc(p, sz);
997 
998         flag_list = build_trace_flags_term(&hp, NULL, trace_flags);
999 
1000 	return TUPLE2(hp, key, flag_list);
1001     } else if (key == am_tracer) {
1002         if (tracer == am_false)
1003             tracer = NIL;
1004         hp = HAlloc(p, 3);
1005         return TUPLE2(hp, key, tracer);
1006     } else {
1007 	goto error;
1008     }
1009 }
1010 
1011 #define FUNC_TRACE_NOEXIST      0
1012 #define FUNC_TRACE_UNTRACED     (1<<0)
1013 #define FUNC_TRACE_GLOBAL_TRACE (1<<1)
1014 #define FUNC_TRACE_LOCAL_TRACE  (1<<2)
1015 #define FUNC_TRACE_META_TRACE   (1<<3)
1016 #define FUNC_TRACE_COUNT_TRACE  (1<<4)
1017 #define FUNC_TRACE_TIME_TRACE   (1<<5)
1018 /*
1019  * Returns either FUNC_TRACE_NOEXIST, FUNC_TRACE_UNTRACED,
1020  * FUNC_TRACE_GLOBAL_TRACE, or,
1021  * an or'ed combination of at least one of FUNC_TRACE_LOCAL_TRACE,
1022  * FUNC_TRACE_META_TRACE, FUNC_TRACE_COUNT_TRACE.
1023  *
1024  * If the return value contains FUNC_TRACE_GLOBAL_TRACE
1025  * or FUNC_TRACE_LOCAL_TRACE *ms is set.
1026  *
1027  * If the return value contains FUNC_TRACE_META_TRACE,
1028  * *ms_meta or *tracer_pid_meta is set.
1029  *
1030  * If the return value contains FUNC_TRACE_COUNT_TRACE, *count is set.
1031  */
function_is_traced(Process * p,Eterm mfa[3],Binary ** ms,Binary ** ms_meta,ErtsTracer * tracer_pid_meta,Uint * count,Eterm * call_time)1032 static int function_is_traced(Process *p,
1033 			      Eterm mfa[3],
1034 			      Binary    **ms,              /* out */
1035 			      Binary    **ms_meta,         /* out */
1036 			      ErtsTracer *tracer_pid_meta, /* out */
1037 			      Uint       *count,           /* out */
1038 			      Eterm      *call_time)       /* out */
1039 {
1040     Export e;
1041     Export* ep;
1042     BeamInstr* pc;
1043     ErtsCodeInfo *ci;
1044 
1045     /* First look for an export entry */
1046     e.info.mfa.module = mfa[0];
1047     e.info.mfa.function = mfa[1];
1048     e.info.mfa.arity = mfa[2];
1049     if ((ep = export_get(&e)) != NULL) {
1050 	pc = ep->beam;
1051 	if (ep->addressv[erts_active_code_ix()] == pc &&
1052 	    ! BeamIsOpCode(*pc, op_call_error_handler)) {
1053 
1054 	    int r = 0;
1055 
1056 	    ASSERT(BeamIsOpCode(*pc, op_apply_bif) ||
1057 		   BeamIsOpCode(*pc, op_i_generic_breakpoint));
1058 
1059 	    if (erts_is_trace_break(&ep->info, ms, 0)) {
1060 		return FUNC_TRACE_GLOBAL_TRACE;
1061 	    }
1062 
1063 	    if (erts_is_trace_break(&ep->info, ms, 1)) {
1064 		r |= FUNC_TRACE_LOCAL_TRACE;
1065 	    }
1066 	    if (erts_is_mtrace_break(&ep->info, ms_meta, tracer_pid_meta)) {
1067 		r |= FUNC_TRACE_META_TRACE;
1068 	    }
1069 	    if (erts_is_time_break(p, &ep->info, call_time)) {
1070 		r |= FUNC_TRACE_TIME_TRACE;
1071 	    }
1072 	    return r ? r : FUNC_TRACE_UNTRACED;
1073 	}
1074     }
1075 
1076     /* OK, now look for breakpoint tracing */
1077     if ((ci = erts_find_local_func(&e.info.mfa)) != NULL) {
1078 	int r =
1079 	    (erts_is_trace_break(ci, ms, 1)
1080 	     ? FUNC_TRACE_LOCAL_TRACE : 0)
1081 	    | (erts_is_mtrace_break(ci, ms_meta, tracer_pid_meta)
1082 	       ? FUNC_TRACE_META_TRACE : 0)
1083 	    | (erts_is_count_break(ci, count)
1084 	       ? FUNC_TRACE_COUNT_TRACE : 0)
1085 	    | (erts_is_time_break(p, ci, call_time)
1086 	       ? FUNC_TRACE_TIME_TRACE : 0);
1087 
1088 	return r ? r : FUNC_TRACE_UNTRACED;
1089     }
1090     return FUNC_TRACE_NOEXIST;
1091 }
1092 
1093 static Eterm
trace_info_func(Process * p,Eterm func_spec,Eterm key)1094 trace_info_func(Process* p, Eterm func_spec, Eterm key)
1095 {
1096     Eterm* tp;
1097     Eterm* hp;
1098     DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
1099     Binary *ms = NULL, *ms_meta = NULL;
1100     Uint count = 0;
1101     Eterm traced = am_false;
1102     Eterm match_spec = am_false;
1103     Eterm retval = am_false;
1104     ErtsTracer meta = erts_tracer_nil;
1105     Eterm call_time = NIL;
1106     int r;
1107 
1108 
1109     UseTmpHeap(3,p);
1110 
1111     if (!is_tuple(func_spec)) {
1112 	goto error;
1113     }
1114     tp = tuple_val(func_spec);
1115     if (tp[0] != make_arityval(3)) {
1116 	goto error;
1117     }
1118     if (!is_atom(tp[1]) || !is_atom(tp[2]) || !is_small(tp[3])) {
1119 	goto error;
1120     }
1121     mfa[0] = tp[1];
1122     mfa[1] = tp[2];
1123     mfa[2] = signed_val(tp[3]);
1124 
1125     if ( (key == am_call_time) || (key == am_all)) {
1126 	erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
1127 	erts_thr_progress_block();
1128         erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
1129     }
1130     erts_mtx_lock(&erts_dirty_bp_ix_mtx);
1131 
1132 
1133     r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
1134 
1135     erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
1136     if ( (key == am_call_time) || (key == am_all)) {
1137 	erts_thr_progress_unblock();
1138     }
1139 
1140     switch (r) {
1141     case FUNC_TRACE_NOEXIST:
1142 	UnUseTmpHeap(3,p);
1143 	hp = HAlloc(p, 3);
1144 	return TUPLE2(hp, key, am_undefined);
1145     case FUNC_TRACE_UNTRACED:
1146 	UnUseTmpHeap(3,p);
1147 	hp = HAlloc(p, 3);
1148 	return TUPLE2(hp, key, am_false);
1149     case FUNC_TRACE_GLOBAL_TRACE:
1150 	traced = am_global;
1151 	match_spec = NIL; /* Fix up later if it's asked for*/
1152 	break;
1153     default:
1154 	if (r & FUNC_TRACE_LOCAL_TRACE) {
1155 	    traced = am_local;
1156 	    match_spec = NIL; /* Fix up later if it's asked for*/
1157 	}
1158 	break;
1159     }
1160 
1161     switch (key) {
1162     case am_traced:
1163 	retval = traced;
1164 	break;
1165     case am_match_spec:
1166 	if (ms) {
1167 	    match_spec = MatchSetGetSource(ms);
1168 	    match_spec = copy_object(match_spec, p);
1169 	}
1170 	retval = match_spec;
1171 	break;
1172     case am_meta:
1173         retval = erts_tracer_to_term(p, meta);
1174         if (retval == am_false)
1175             /* backwards compatibility */
1176             retval = NIL;
1177 	break;
1178     case am_meta_match_spec:
1179 	if (r & FUNC_TRACE_META_TRACE) {
1180 	    if (ms_meta) {
1181 		retval = MatchSetGetSource(ms_meta);
1182 		retval = copy_object(retval, p);
1183 	    } else {
1184 		retval = NIL;
1185 	    }
1186 	}
1187 	break;
1188     case am_call_count:
1189 	if (r & FUNC_TRACE_COUNT_TRACE) {
1190 	    retval = erts_make_integer(count, p);
1191 	}
1192 	break;
1193     case am_call_time:
1194 	if (r & FUNC_TRACE_TIME_TRACE) {
1195 	    retval = call_time;
1196 	}
1197 	break;
1198     case am_all: {
1199 	Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false,
1200             m = am_false;
1201 
1202 	if (ms) {
1203 	    match_spec = MatchSetGetSource(ms);
1204 	    match_spec = copy_object(match_spec, p);
1205 	}
1206 	if (r & FUNC_TRACE_META_TRACE) {
1207 	    if (ms_meta) {
1208 		match_spec_meta = MatchSetGetSource(ms_meta);
1209 		match_spec_meta = copy_object(match_spec_meta, p);
1210 	    } else
1211 		match_spec_meta = NIL;
1212 	}
1213 	if (r & FUNC_TRACE_COUNT_TRACE) {
1214 	    c = erts_make_integer(count, p);
1215 	}
1216 	if (r & FUNC_TRACE_TIME_TRACE) {
1217 	    ct = call_time;
1218 	}
1219 
1220         m = erts_tracer_to_term(p, meta);
1221 
1222 	hp = HAlloc(p, (3+2)*6);
1223 	retval = NIL;
1224 	t = TUPLE2(hp, am_call_count, c); hp += 3;
1225 	retval = CONS(hp, t, retval); hp += 2;
1226 	t = TUPLE2(hp, am_call_time, ct); hp += 3;
1227 	retval = CONS(hp, t, retval); hp += 2;
1228 	t = TUPLE2(hp, am_meta_match_spec, match_spec_meta); hp += 3;
1229 	retval = CONS(hp, t, retval); hp += 2;
1230 	t = TUPLE2(hp, am_meta, m); hp += 3;
1231 	retval = CONS(hp, t, retval); hp += 2;
1232 	t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
1233 	retval = CONS(hp, t, retval); hp += 2;
1234 	t = TUPLE2(hp, am_traced, traced); hp += 3;
1235 	retval = CONS(hp, t, retval); hp += 2;
1236     }   break;
1237     default:
1238 	goto error;
1239     }
1240     UnUseTmpHeap(3,p);
1241     hp = HAlloc(p, 3);
1242     return TUPLE2(hp, key, retval);
1243 
1244  error:
1245     UnUseTmpHeap(3,p);
1246     BIF_ERROR(p, BADARG);
1247 }
1248 
1249 static Eterm
trace_info_on_load(Process * p,Eterm key)1250 trace_info_on_load(Process* p, Eterm key)
1251 {
1252     Eterm* hp;
1253 
1254     if (! erts_default_trace_pattern_is_on) {
1255 	hp = HAlloc(p, 3);
1256 	return TUPLE2(hp, key, am_false);
1257     }
1258     switch (key) {
1259     case am_traced:
1260 	{
1261 	    Eterm traced = am_false;
1262 
1263 	    if (! erts_default_trace_pattern_flags.breakpoint) {
1264 		traced = am_global;
1265 	    } else if (erts_default_trace_pattern_flags.local) {
1266 		traced = am_local;
1267 	    }
1268 	    hp = HAlloc(p, 3);
1269 	    return TUPLE2(hp, key, traced);
1270 	}
1271     case am_match_spec:
1272 	{
1273 	    Eterm match_spec = am_false;
1274 
1275 	    if ((! erts_default_trace_pattern_flags.breakpoint) ||
1276 		erts_default_trace_pattern_flags.local) {
1277 		if (erts_default_match_spec) {
1278 		    match_spec = MatchSetGetSource(erts_default_match_spec);
1279 		    match_spec = copy_object(match_spec, p);
1280 		    hp = HAlloc(p, 3);
1281 		} else {
1282 		    match_spec = NIL;
1283 		    hp = HAlloc(p, 3);
1284 		}
1285 	    } else {
1286 		hp = HAlloc(p, 3);
1287 	    }
1288 	    return TUPLE2(hp, key, match_spec);
1289 	}
1290     case am_meta:
1291 	hp = HAlloc(p, 3);
1292 	if (erts_default_trace_pattern_flags.meta) {
1293             ASSERT(!ERTS_TRACER_IS_NIL(erts_default_meta_tracer));
1294 	    return TUPLE2(hp, key, erts_tracer_to_term(p, erts_default_meta_tracer));
1295 	} else {
1296 	    return TUPLE2(hp, key, am_false);
1297 	}
1298     case am_meta_match_spec:
1299 	{
1300 	    Eterm match_spec = am_false;
1301 
1302 	    if (erts_default_trace_pattern_flags.meta) {
1303 		if (erts_default_meta_match_spec) {
1304 		    match_spec =
1305 			MatchSetGetSource(erts_default_meta_match_spec);
1306 		    match_spec = copy_object(match_spec, p);
1307 		    hp = HAlloc(p, 3);
1308 		} else {
1309 		    match_spec = NIL;
1310 		    hp = HAlloc(p, 3);
1311 		}
1312 	    } else {
1313 		hp = HAlloc(p, 3);
1314 	    }
1315 	    return TUPLE2(hp, key, match_spec);
1316 	}
1317     case am_call_count:
1318 	hp = HAlloc(p, 3);
1319 	if (erts_default_trace_pattern_flags.call_count) {
1320 	    return TUPLE2(hp, key, am_true);
1321 	} else {
1322 	    return TUPLE2(hp, key, am_false);
1323 	}
1324     case am_call_time:
1325 	hp = HAlloc(p, 3);
1326 	if (erts_default_trace_pattern_flags.call_time) {
1327 	    return TUPLE2(hp, key, am_true);
1328 	} else {
1329 	    return TUPLE2(hp, key, am_false);
1330 	}
1331     case am_all:
1332 	{
1333 	    Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t, m;
1334 
1335 	    if (erts_default_trace_pattern_flags.local ||
1336 		(! erts_default_trace_pattern_flags.breakpoint)) {
1337 		match_spec = NIL;
1338 	    }
1339 	    if (erts_default_match_spec) {
1340 		match_spec = MatchSetGetSource(erts_default_match_spec);
1341 		match_spec = copy_object(match_spec, p);
1342 	    }
1343 	    if (erts_default_trace_pattern_flags.meta) {
1344 		meta_match_spec = NIL;
1345 	    }
1346 	    if (erts_default_meta_match_spec) {
1347 		meta_match_spec =
1348 		    MatchSetGetSource(erts_default_meta_match_spec);
1349 		meta_match_spec = copy_object(meta_match_spec, p);
1350 	    }
1351             m = (erts_default_trace_pattern_flags.meta
1352                  ? erts_tracer_to_term(p, erts_default_meta_tracer) : am_false);
1353 	    hp = HAlloc(p, (3+2)*5 + 3);
1354 	    t = TUPLE2(hp, am_call_count,
1355 		       (erts_default_trace_pattern_flags.call_count
1356 			? am_true : am_false)); hp += 3;
1357 	    r = CONS(hp, t, r); hp += 2;
1358 	    t = TUPLE2(hp, am_meta_match_spec, meta_match_spec); hp += 3;
1359 	    r = CONS(hp, t, r); hp += 2;
1360 	    t = TUPLE2(hp, am_meta, m); hp += 3;
1361 	    r = CONS(hp, t, r); hp += 2;
1362 	    t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
1363 	    r = CONS(hp, t, r); hp += 2;
1364 	    t = TUPLE2(hp, am_traced,
1365 		       (! erts_default_trace_pattern_flags.breakpoint ?
1366 			am_global : (erts_default_trace_pattern_flags.local ?
1367 				     am_local : am_false))); hp += 3;
1368 	    r = CONS(hp, t, r); hp += 2;
1369 	    return TUPLE2(hp, key, r);
1370 	}
1371     default:
1372 	BIF_ERROR(p, BADARG);
1373     }
1374 }
1375 
1376 static Eterm
trace_info_event(Process * p,Eterm event,Eterm key)1377 trace_info_event(Process* p, Eterm event, Eterm key)
1378 {
1379     ErtsTracingEvent* te;
1380     Eterm retval;
1381     Eterm* hp;
1382 
1383     switch (event) {
1384     case am_send:    te = erts_send_tracing;    break;
1385     case am_receive: te = erts_receive_tracing; break;
1386     default:
1387         goto error;
1388     }
1389 
1390     if (key != am_match_spec)
1391         goto error;
1392 
1393     te = &te[erts_active_bp_ix()];
1394 
1395     if (te->on) {
1396         if (!te->match_spec)
1397             retval = am_true;
1398         else
1399             retval = copy_object(MatchSetGetSource(te->match_spec), p);
1400     }
1401     else
1402         retval = am_false;
1403 
1404     hp = HAlloc(p, 3);
1405     return TUPLE2(hp, key, retval);
1406 
1407  error:
1408     BIF_ERROR(p, BADARG);
1409 }
1410 
1411 
1412 #undef FUNC_TRACE_NOEXIST
1413 #undef FUNC_TRACE_UNTRACED
1414 #undef FUNC_TRACE_GLOBAL_TRACE
1415 #undef FUNC_TRACE_LOCAL_TRACE
1416 
1417 int
erts_set_trace_pattern(Process * p,ErtsCodeMFA * mfa,int specified,Binary * match_prog_set,Binary * meta_match_prog_set,int on,struct trace_pattern_flags flags,ErtsTracer meta_tracer,int is_blocking)1418 erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
1419 		       Binary* match_prog_set, Binary *meta_match_prog_set,
1420 		       int on, struct trace_pattern_flags flags,
1421 		       ErtsTracer meta_tracer, int is_blocking)
1422 {
1423     const ErtsCodeIndex code_ix = erts_active_code_ix();
1424     int matches = 0;
1425     int i;
1426     int n;
1427     BpFunction* fp;
1428 
1429     /*
1430      * First work on normal functions (not real BIFs).
1431      */
1432 
1433     erts_bp_match_export(&finish_bp.e, mfa, specified);
1434     fp = finish_bp.e.matching;
1435     n = finish_bp.e.matched;
1436 
1437     for (i = 0; i < n; i++) {
1438         ErtsCodeInfo *ci = fp[i].ci;
1439 	BeamInstr* pc = erts_codeinfo_to_code(ci);
1440 	Export* ep = ErtsContainerStruct(ci, Export, info);
1441 
1442 	if (on && !flags.breakpoint) {
1443 	    /* Turn on global call tracing */
1444 	    if (ep->addressv[code_ix] != pc) {
1445 		fp[i].mod->curr.num_traced_exports++;
1446 #ifdef DEBUG
1447 		ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
1448 #endif
1449                 ep->beam[0] = BeamOpCodeAddr(op_trace_jump_W);
1450 		ep->beam[1] = (BeamInstr) ep->addressv[code_ix];
1451 	    }
1452 	    erts_set_call_trace_bif(ci, match_prog_set, 0);
1453 	    if (ep->addressv[code_ix] != pc) {
1454 		ep->beam[0] = BeamOpCodeAddr(op_i_generic_breakpoint);
1455 	    }
1456 	} else if (!on && flags.breakpoint) {
1457 	    /* Turn off breakpoint tracing -- nothing to do here. */
1458 	} else {
1459 	    /*
1460 	     * Turn off global tracing, either explicitly or implicitly
1461 	     * before turning on breakpoint tracing.
1462 	     */
1463 	    erts_clear_call_trace_bif(ci, 0);
1464 	    if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) {
1465 		ep->beam[0] = BeamOpCodeAddr(op_trace_jump_W);
1466 	    }
1467 	}
1468     }
1469 
1470     /*
1471     ** OK, now for the bif's
1472     */
1473     for (i = 0; i < BIF_SIZE; ++i) {
1474 	Export *ep = bif_export[i];
1475 
1476 	if (!ExportIsBuiltIn(ep)) {
1477 	    continue;
1478 	}
1479 
1480 	if (bif_table[i].f == bif_table[i].traced) {
1481 	    /* Trace wrapper same as regular function - untraceable */
1482 	    continue;
1483 	}
1484 
1485         switch (specified) {
1486         case 3:
1487             if (mfa->arity != ep->info.mfa.arity)
1488                 continue;
1489         case 2:
1490             if (mfa->function != ep->info.mfa.function)
1491                 continue;
1492         case 1:
1493             if (mfa->module != ep->info.mfa.module)
1494                 continue;
1495         case 0:
1496             break;
1497         default:
1498             ASSERT(0);
1499         }
1500 
1501         if (! flags.breakpoint) { /* Export entry call trace */
1502             if (on) {
1503                 erts_clear_call_trace_bif(&ep->info, 1);
1504                 erts_clear_mtrace_bif(&ep->info);
1505                 erts_set_call_trace_bif(&ep->info, match_prog_set, 0);
1506             } else { /* off */
1507                 erts_clear_call_trace_bif(&ep->info, 0);
1508             }
1509             matches++;
1510         } else { /* Breakpoint call trace */
1511             int m = 0;
1512 
1513             if (on) {
1514                 if (flags.local) {
1515                     erts_clear_call_trace_bif(&ep->info, 0);
1516                     erts_set_call_trace_bif(&ep->info, match_prog_set, 1);
1517                     m = 1;
1518                 }
1519                 if (flags.meta) {
1520                     erts_set_mtrace_bif(&ep->info, meta_match_prog_set,
1521                                         meta_tracer);
1522                     m = 1;
1523                 }
1524                 if (flags.call_time) {
1525                     erts_set_time_trace_bif(&ep->info, on);
1526                     /* I don't want to remove any other tracers */
1527                     m = 1;
1528                 }
1529             } else { /* off */
1530                 if (flags.local) {
1531                     erts_clear_call_trace_bif(&ep->info, 1);
1532                     m = 1;
1533                 }
1534                 if (flags.meta) {
1535                     erts_clear_mtrace_bif(&ep->info);
1536                     m = 1;
1537                 }
1538                 if (flags.call_time) {
1539                     erts_clear_time_trace_bif(&ep->info);
1540                     m = 1;
1541                 }
1542             }
1543             matches += m;
1544         }
1545     }
1546 
1547     /*
1548     ** So, now for breakpoint tracing
1549     */
1550     erts_bp_match_functions(&finish_bp.f, mfa, specified);
1551     if (on) {
1552 	if (! flags.breakpoint) {
1553 	    erts_clear_all_breaks(&finish_bp.f);
1554 	} else {
1555 	    if (flags.local) {
1556 		erts_set_trace_break(&finish_bp.f, match_prog_set);
1557 	    }
1558 	    if (flags.meta) {
1559 		erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
1560 				      meta_tracer);
1561 	    }
1562 	    if (flags.call_count) {
1563 		erts_set_count_break(&finish_bp.f, on);
1564 	    }
1565 	    if (flags.call_time) {
1566 		erts_set_time_break(&finish_bp.f, on);
1567 	    }
1568 	}
1569     } else {
1570 	if (flags.local) {
1571 	    erts_clear_trace_break(&finish_bp.f);
1572 	}
1573 	if (flags.meta) {
1574 	    erts_clear_mtrace_break(&finish_bp.f);
1575 	}
1576 	if (flags.call_count) {
1577 	    erts_clear_count_break(&finish_bp.f);
1578 	}
1579 	if (flags.call_time) {
1580 	    erts_clear_time_break(&finish_bp.f);
1581 	}
1582     }
1583 
1584     finish_bp.current = 0;
1585     finish_bp.install = on;
1586     finish_bp.local = flags.breakpoint;
1587 
1588     if (is_blocking) {
1589 	ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
1590 	while (erts_finish_breakpointing()) {
1591 	    /* Empty loop body */
1592 	}
1593 	finish_bp.current = -1;
1594     }
1595 
1596     if (flags.breakpoint) {
1597 	matches += finish_bp.f.matched;
1598     } else {
1599 	matches += finish_bp.e.matched;
1600     }
1601     return matches;
1602 }
1603 
1604 int
erts_set_tracing_event_pattern(Eterm event,Binary * match_spec,int on)1605 erts_set_tracing_event_pattern(Eterm event, Binary* match_spec, int on)
1606 {
1607     ErtsBpIndex ix = erts_staging_bp_ix();
1608     ErtsTracingEvent* st;
1609 
1610     switch (event) {
1611     case am_send: st = &erts_send_tracing[ix]; break;
1612     case am_receive: st = &erts_receive_tracing[ix]; break;
1613     default: return -1;
1614     }
1615 
1616     MatchSetUnref(st->match_spec);
1617 
1618     st->on = on;
1619     st->match_spec = match_spec;
1620     MatchSetRef(match_spec);
1621 
1622     finish_bp.current = 1;  /* prepare phase not needed for event trace */
1623     finish_bp.install = on;
1624     finish_bp.e.matched = 0;
1625     finish_bp.e.matching = NULL;
1626     finish_bp.f.matched = 0;
1627     finish_bp.f.matching = NULL;
1628 
1629     return 1;
1630 }
1631 
1632 static void
consolidate_event_tracing(ErtsTracingEvent te[])1633 consolidate_event_tracing(ErtsTracingEvent te[])
1634 {
1635     ErtsTracingEvent* src = &te[erts_active_bp_ix()];
1636     ErtsTracingEvent* dst = &te[erts_staging_bp_ix()];
1637 
1638     MatchSetUnref(dst->match_spec);
1639     dst->on = src->on;
1640     dst->match_spec = src->match_spec;
1641     MatchSetRef(dst->match_spec);
1642 }
1643 
1644 int
erts_finish_breakpointing(void)1645 erts_finish_breakpointing(void)
1646 {
1647     ERTS_LC_ASSERT(erts_has_code_write_permission());
1648 
1649     /*
1650      * Memory barriers will be issued for all schedulers *before*
1651      * each of the stages below. (Unless the other schedulers
1652      * are blocked, in which case memory barriers will be issued
1653      * when they are awaken.)
1654      */
1655     switch (finish_bp.current++) {
1656     case 0:
1657 	/*
1658 	 * At this point, in all functions that are to be breakpointed,
1659 	 * a pointer to a GenericBp struct has already been added,
1660 	 *
1661 	 * Insert the new breakpoints (if any) into the
1662 	 * code. Different schedulers may see breakpoint instruction
1663 	 * at different times, but it does not matter since the newly
1664 	 * added breakpoints are disabled.
1665 	 */
1666 	if (finish_bp.install) {
1667 	    if (finish_bp.local) {
1668 		erts_install_breakpoints(&finish_bp.f);
1669 	    } else {
1670 		install_exp_breakpoints(&finish_bp.e);
1671 	    }
1672 	}
1673 	setup_bif_trace();
1674 	return 1;
1675     case 1:
1676 	/*
1677 	 * Switch index for the breakpoint data, activating the staged
1678 	 * data. (Depending on the changes in the breakpoint data,
1679 	 * that could either activate breakpoints or disable
1680 	 * breakpoints.)
1681 	 */
1682 	erts_commit_staged_bp();
1683 	return 1;
1684     case 2:
1685 	/*
1686 	 * Remove breakpoints instructions for disabled breakpoints
1687 	 * (if any).
1688 	 */
1689 	if (finish_bp.install) {
1690 	    if (finish_bp.local) {
1691 		uninstall_exp_breakpoints(&finish_bp.e);
1692 	    } else {
1693 		erts_uninstall_breakpoints(&finish_bp.f);
1694 	    }
1695 	} else {
1696 	    if (finish_bp.local) {
1697 		erts_uninstall_breakpoints(&finish_bp.f);
1698 	    } else {
1699 		uninstall_exp_breakpoints(&finish_bp.e);
1700 	    }
1701 	}
1702 	reset_bif_trace();
1703 	return 1;
1704     case 3:
1705 	/*
1706 	 * Now all breakpoints have either been inserted or removed.
1707 	 * For all updated breakpoints, copy the active breakpoint
1708 	 * data to the staged breakpoint data to make them equal
1709 	 * (simplifying for the next time breakpoints are to be
1710 	 * updated).  If any breakpoints have been totally disabled,
1711 	 * deallocate the GenericBp structs for them.
1712 	 */
1713 	erts_consolidate_bif_bp_data();
1714 	clean_export_entries(&finish_bp.e);
1715 	erts_consolidate_bp_data(&finish_bp.e, 0);
1716 	erts_consolidate_bp_data(&finish_bp.f, 1);
1717 	erts_bp_free_matched_functions(&finish_bp.e);
1718 	erts_bp_free_matched_functions(&finish_bp.f);
1719         consolidate_event_tracing(erts_send_tracing);
1720 	consolidate_event_tracing(erts_receive_tracing);
1721 	return 0;
1722     default:
1723 	ASSERT(0);
1724     }
1725     return 0;
1726 }
1727 
1728 static void
install_exp_breakpoints(BpFunctions * f)1729 install_exp_breakpoints(BpFunctions* f)
1730 {
1731     const ErtsCodeIndex code_ix = erts_active_code_ix();
1732     BpFunction* fp = f->matching;
1733     Uint ne = f->matched;
1734     Uint i;
1735 
1736     for (i = 0; i < ne; i++) {
1737 	Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
1738 
1739 	ep->addressv[code_ix] = ep->beam;
1740     }
1741 }
1742 
1743 static void
uninstall_exp_breakpoints(BpFunctions * f)1744 uninstall_exp_breakpoints(BpFunctions* f)
1745 {
1746     const ErtsCodeIndex code_ix = erts_active_code_ix();
1747     BpFunction* fp = f->matching;
1748     Uint ne = f->matched;
1749     Uint i;
1750 
1751     for (i = 0; i < ne; i++) {
1752 	Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
1753 
1754 	if (ep->addressv[code_ix] != ep->beam) {
1755 	    continue;
1756 	}
1757 	ASSERT(BeamIsOpCode(ep->beam[0], op_trace_jump_W));
1758 	ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
1759     }
1760 }
1761 
1762 static void
clean_export_entries(BpFunctions * f)1763 clean_export_entries(BpFunctions* f)
1764 {
1765     const ErtsCodeIndex code_ix = erts_active_code_ix();
1766     BpFunction* fp = f->matching;
1767     Uint ne = f->matched;
1768     Uint i;
1769 
1770     for (i = 0; i < ne; i++) {
1771 	Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
1772 
1773 	if (ep->addressv[code_ix] == ep->beam) {
1774 	    continue;
1775 	}
1776 	if (BeamIsOpCode(ep->beam[0], op_trace_jump_W)) {
1777 	    ep->beam[0] = (BeamInstr) 0;
1778 	    ep->beam[1] = (BeamInstr) 0;
1779 	}
1780     }
1781 }
1782 
1783 static void
setup_bif_trace(void)1784 setup_bif_trace(void)
1785 {
1786     int i;
1787 
1788     for (i = 0; i < BIF_SIZE; ++i) {
1789 	Export *ep = bif_export[i];
1790 	GenericBp* g = ep->info.u.gen_bp;
1791 	if (g) {
1792 	    if (ExportIsBuiltIn(ep)) {
1793 		ASSERT(ep->beam[1]);
1794 		ep->beam[1] = (BeamInstr) bif_table[i].traced;
1795 	    }
1796 	}
1797     }
1798 }
1799 
1800 static void
reset_bif_trace(void)1801 reset_bif_trace(void)
1802 {
1803     int i;
1804     ErtsBpIndex active = erts_active_bp_ix();
1805 
1806     for (i = 0; i < BIF_SIZE; ++i) {
1807 	Export *ep = bif_export[i];
1808 	GenericBp* g = ep->info.u.gen_bp;
1809 	if (g && g->data[active].flags == 0) {
1810 	    if (ExportIsBuiltIn(ep)) {
1811 		ASSERT(ep->beam[1]);
1812 		ep->beam[1] = (BeamInstr) bif_table[i].f;
1813 	    }
1814 	}
1815     }
1816 }
1817 
1818 /*
1819  * Sequential tracing
1820  *
1821  * The sequential trace token is internally implemented as
1822  * a tuple
1823  *         {Flags, Label, Serial, Sender, LastSerial}
1824  *
1825  * where
1826  *       - Flags is an integer (using masks 1, 2, and 4, for send,
1827  *         receive and print, respectively),
1828  *       - Label is any term, Serial (for now XXX) is an integer (it should
1829  *         be a list reflecting split traces), and
1830  *       - Sender is the Pid of the sender (i.e. the current process,
1831  *         except immediately after a message reception, in case it is
1832  *         the pid of the process that sent the message).
1833  *
1834  */
1835 
seq_trace_2(BIF_ALIST_2)1836 BIF_RETTYPE seq_trace_2(BIF_ALIST_2)
1837 {
1838     Eterm res;
1839     res = erts_seq_trace(BIF_P, BIF_ARG_1, BIF_ARG_2, 1);
1840     if (is_non_value(res)) {
1841 	BIF_ERROR(BIF_P, BADARG);
1842     }
1843     BIF_RET(res);
1844 }
1845 
erts_seq_trace(Process * p,Eterm arg1,Eterm arg2,int build_result)1846 Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
1847 			  int build_result)
1848 {
1849     Eterm flags;
1850     Eterm old_value = am_true;
1851     Eterm* hp;
1852     int current_flag;
1853 
1854     if (!is_atom(arg1)) {
1855 	return THE_NON_VALUE;
1856     }
1857 
1858 
1859     if (arg1 == am_send) {
1860 	current_flag = SEQ_TRACE_SEND;
1861     } else if (arg1 == am_receive) {
1862 	current_flag = SEQ_TRACE_RECEIVE;
1863     } else if (arg1 == am_print) {
1864 	current_flag = SEQ_TRACE_PRINT;
1865     } else if (arg1 == am_timestamp) {
1866 	current_flag = SEQ_TRACE_NOW_TS;
1867     } else if (arg1 == am_strict_monotonic_timestamp) {
1868 	current_flag = SEQ_TRACE_STRICT_MON_TS;
1869     } else if (arg1 == am_monotonic_timestamp) {
1870 	current_flag = SEQ_TRACE_MON_TS;
1871     }
1872     else
1873 	current_flag = 0;
1874 
1875     if (current_flag && ( (arg2 == am_true) || (arg2 == am_false)) ) {
1876 	/* Flags */
1877         new_seq_trace_token(p, 0);
1878         flags = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(p));
1879 	if (build_result) {
1880 	    old_value = flags & current_flag ? am_true : am_false;
1881 	}
1882 	if (arg2 == am_true)
1883 	    SEQ_TRACE_TOKEN_FLAGS(p) = make_small(flags|current_flag);
1884 	else if (arg2 == am_false)
1885 	    SEQ_TRACE_TOKEN_FLAGS(p) = make_small(flags&~current_flag);
1886 	else {
1887 	    return THE_NON_VALUE;
1888 	}
1889 	return old_value;
1890     }
1891     else if (arg1 == am_label) {
1892         new_seq_trace_token(p, is_not_immed(arg2));
1893 	if (build_result) {
1894 	    old_value = SEQ_TRACE_TOKEN_LABEL(p);
1895 	}
1896         SEQ_TRACE_TOKEN_LABEL(p) = arg2;
1897     	return old_value;
1898     }
1899     else if (arg1 == am_serial) {
1900 	Eterm* tp;
1901 	if (is_not_tuple(arg2)) {
1902 	    return THE_NON_VALUE;
1903 	}
1904 	tp = tuple_val(arg2);
1905 	if ((*tp != make_arityval(2)) || is_not_small(*(tp+1)) || is_not_small(*(tp+2))) {
1906 	    return THE_NON_VALUE;
1907         }
1908         new_seq_trace_token(p, 0);
1909 	if (build_result) {
1910 	    hp = HAlloc(p,3);
1911 	    old_value = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(p),
1912 			       SEQ_TRACE_TOKEN_SERIAL(p));
1913 	}
1914 	SEQ_TRACE_TOKEN_LASTCNT(p) = *(tp+1);
1915  	SEQ_TRACE_TOKEN_SERIAL(p) = *(tp+2);
1916 	p->seq_trace_clock = unsigned_val(*(tp+2));
1917 	p->seq_trace_lastcnt = unsigned_val(*(tp+1));
1918     	return old_value;
1919     }
1920     else if (arg1 == am_sequential_trace_token) {
1921 	if (is_not_nil(arg2)) {
1922 	    return THE_NON_VALUE;
1923         }
1924 	if (build_result) {
1925 #ifdef USE_VM_PROBES
1926 	    old_value = (SEQ_TRACE_TOKEN(p) == am_have_dt_utag) ? NIL : SEQ_TRACE_TOKEN(p);
1927 #else
1928 	    old_value = SEQ_TRACE_TOKEN(p);
1929 #endif
1930 	}
1931 #ifdef USE_VM_PROBES
1932         SEQ_TRACE_TOKEN(p) = (DT_UTAG(p) != NIL) ? am_have_dt_utag : NIL;
1933 #else
1934         SEQ_TRACE_TOKEN(p) = NIL;
1935 #endif
1936         return old_value;
1937     }
1938     else {
1939 	return THE_NON_VALUE;
1940     }
1941 }
1942 
1943 static void
new_seq_trace_token(Process * p,int ensure_new_heap)1944 new_seq_trace_token(Process* p, int ensure_new_heap)
1945 {
1946     Eterm* hp;
1947 
1948     if (have_no_seqtrace(SEQ_TRACE_TOKEN(p))) {
1949 	hp = HAlloc(p, 6);
1950 	SEQ_TRACE_TOKEN(p) = TUPLE5(hp, make_small(0),		/* Flags  */
1951 				    make_small(0),		/* Label  */
1952 				    make_small(0),		/* Serial */
1953 				    p->common.id, /* Internal pid */	/* From   */
1954 				    make_small(p->seq_trace_lastcnt));
1955     }
1956     else if (ensure_new_heap) {
1957         Eterm* tpl = tuple_val(SEQ_TRACE_TOKEN(p));
1958         ASSERT(arityval(tpl[0]) == 5);
1959         if (ErtsInArea(tpl, OLD_HEAP(p),
1960                        (OLD_HEND(p) - OLD_HEAP(p))*sizeof(Eterm))) {
1961             hp = HAlloc(p, 6);
1962             sys_memcpy(hp, tpl, 6*sizeof(Eterm));
1963             SEQ_TRACE_TOKEN(p) = make_tuple(hp);
1964         }
1965     }
1966 }
1967 
erl_seq_trace_info(Process * p,Eterm item)1968 BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item)
1969 {
1970     Eterm res;
1971     Eterm* hp;
1972     Uint current_flag;
1973 
1974     if (is_not_atom(item)) {
1975 	BIF_ERROR(p, BADARG);
1976     }
1977 
1978     if (have_no_seqtrace(SEQ_TRACE_TOKEN(p))) {
1979 	if ((item == am_send)  || (item == am_receive) ||
1980 	    (item == am_print) || (item == am_timestamp)
1981 	    || (item == am_monotonic_timestamp)
1982 	    || (item == am_strict_monotonic_timestamp)) {
1983 	    hp = HAlloc(p,3);
1984 	    res = TUPLE2(hp, item, am_false);
1985 	    BIF_RET(res);
1986 	} else if ((item == am_label) || (item == am_serial)) {
1987 	    BIF_RET(NIL);
1988 	} else {
1989 	    goto error;
1990 	}
1991     }
1992 
1993     if (item == am_send) {
1994 	current_flag = SEQ_TRACE_SEND;
1995     } else if (item == am_receive) {
1996 	current_flag = SEQ_TRACE_RECEIVE;
1997     } else if (item == am_print) {
1998 	current_flag = SEQ_TRACE_PRINT;
1999     } else if (item == am_timestamp) {
2000 	current_flag = SEQ_TRACE_NOW_TS;
2001     } else if (item == am_strict_monotonic_timestamp) {
2002 	current_flag = SEQ_TRACE_STRICT_MON_TS;
2003     } else if (item == am_monotonic_timestamp) {
2004 	current_flag = SEQ_TRACE_MON_TS;
2005     } else {
2006 	current_flag = 0;
2007     }
2008 
2009     if (current_flag) {
2010 	res = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(p)) & current_flag ?
2011 	    am_true : am_false;
2012     } else if (item == am_label) {
2013 	res = SEQ_TRACE_TOKEN_LABEL(p);
2014     } else if (item  == am_serial) {
2015 	hp = HAlloc(p, 3);
2016 	res = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(p), SEQ_TRACE_TOKEN_SERIAL(p));
2017     } else {
2018     error:
2019 	BIF_ERROR(p, BADARG);
2020     }
2021     hp = HAlloc(p, 3);
2022     res = TUPLE2(hp, item, res);
2023     BIF_RET(res);
2024 }
2025 
seq_trace_info_1(BIF_ALIST_1)2026 BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
2027 {
2028     BIF_RET(erl_seq_trace_info(BIF_P, BIF_ARG_1));
2029 }
2030 
2031 /*
2032    seq_trace_print(Message) -> true | false
2033    This function passes Message to the system_tracer
2034    if the trace_token is not NIL.
2035    Returns true if Message is passed else false
2036    Note! That true is returned if the conditions to pass Message is
2037    fulfilled, but nothing is passed if system_seq_tracer is not set.
2038  */
seq_trace_print_1(BIF_ALIST_1)2039 BIF_RETTYPE seq_trace_print_1(BIF_ALIST_1)
2040 {
2041     if (have_no_seqtrace(SEQ_TRACE_TOKEN(BIF_P))) {
2042 	BIF_RET(am_false);
2043     }
2044     seq_trace_update_send(BIF_P);
2045     seq_trace_output(SEQ_TRACE_TOKEN(BIF_P), BIF_ARG_1,
2046 		     SEQ_TRACE_PRINT, NIL, BIF_P);
2047     BIF_RET(am_true);
2048 }
2049 
2050 /*
2051    seq_trace_print(Label,Message) -> true | false
2052    This function passes Message to the system_tracer
2053    if the trace_token is not NIL and the trace_token label is equal to
2054    Label. Returns true if Message is passed else false
2055    Note! That true is returned if the conditions to pass Message is
2056    fulfilled, but nothing is passed if system_seq_tracer is not set.
2057  */
seq_trace_print_2(BIF_ALIST_2)2058 BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2)
2059 {
2060     if (have_no_seqtrace(SEQ_TRACE_TOKEN(BIF_P))) {
2061 	BIF_RET(am_false);
2062     }
2063     if (!EQ(BIF_ARG_1, SEQ_TRACE_TOKEN_LABEL(BIF_P)))
2064 	BIF_RET(am_false);
2065     seq_trace_update_send(BIF_P);
2066     seq_trace_output(SEQ_TRACE_TOKEN(BIF_P), BIF_ARG_2,
2067 		     SEQ_TRACE_PRINT, NIL, BIF_P);
2068     BIF_RET(am_true);
2069 }
2070 
erts_system_monitor_clear(Process * c_p)2071 void erts_system_monitor_clear(Process *c_p) {
2072     if (c_p) {
2073 	erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
2074 	erts_thr_progress_block();
2075     }
2076     erts_set_system_monitor(NIL);
2077     erts_system_monitor_long_gc = 0;
2078     erts_system_monitor_long_schedule = 0;
2079     erts_system_monitor_large_heap = 0;
2080     erts_system_monitor_flags.busy_port = 0;
2081     erts_system_monitor_flags.busy_dist_port = 0;
2082     if (c_p) {
2083 	erts_thr_progress_unblock();
2084 	erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
2085     }
2086 }
2087 
2088 
system_monitor_get(Process * p)2089 static Eterm system_monitor_get(Process *p)
2090 {
2091     Eterm *hp;
2092     Eterm system_monitor = erts_get_system_monitor();
2093 
2094     if (system_monitor == NIL) {
2095 	return am_undefined;
2096     } else {
2097 	Eterm res;
2098 	Uint hsz = 3 + (erts_system_monitor_flags.busy_dist_port ? 2 : 0) +
2099 	    (erts_system_monitor_flags.busy_port ? 2 : 0);
2100 	Eterm long_gc = NIL;
2101 	Eterm long_schedule = NIL;
2102 	Eterm large_heap = NIL;
2103 
2104 	if (erts_system_monitor_long_gc != 0) {
2105 	    hsz += 2+3;
2106 	    (void) erts_bld_uint(NULL, &hsz, erts_system_monitor_long_gc);
2107 	}
2108 	if (erts_system_monitor_long_schedule != 0) {
2109 	    hsz += 2+3;
2110 	    (void) erts_bld_uint(NULL, &hsz, erts_system_monitor_long_schedule);
2111 	}
2112 	if (erts_system_monitor_large_heap != 0) {
2113 	    hsz += 2+3;
2114 	    (void) erts_bld_uint(NULL, &hsz, erts_system_monitor_large_heap);
2115 	}
2116 
2117 	hp = HAlloc(p, hsz);
2118 	if (erts_system_monitor_long_gc != 0) {
2119 	    long_gc = erts_bld_uint(&hp, NULL, erts_system_monitor_long_gc);
2120 	}
2121 	if (erts_system_monitor_long_schedule != 0) {
2122 	    long_schedule = erts_bld_uint(&hp, NULL,
2123 					  erts_system_monitor_long_schedule);
2124 	}
2125 	if (erts_system_monitor_large_heap != 0) {
2126 	    large_heap = erts_bld_uint(&hp, NULL, erts_system_monitor_large_heap);
2127 	}
2128 	res = NIL;
2129 	if (long_gc != NIL) {
2130 	    Eterm t = TUPLE2(hp, am_long_gc, long_gc); hp += 3;
2131 	    res = CONS(hp, t, res); hp += 2;
2132 	}
2133 	if (long_schedule != NIL) {
2134 	    Eterm t = TUPLE2(hp, am_long_schedule, long_schedule); hp += 3;
2135 	    res = CONS(hp, t, res); hp += 2;
2136 	}
2137 	if (large_heap != NIL) {
2138 	    Eterm t = TUPLE2(hp, am_large_heap, large_heap); hp += 3;
2139 	    res = CONS(hp, t, res); hp += 2;
2140 	}
2141 	if (erts_system_monitor_flags.busy_port) {
2142 	    res = CONS(hp, am_busy_port, res); hp += 2;
2143 	}
2144 	if (erts_system_monitor_flags.busy_dist_port) {
2145 	    res = CONS(hp, am_busy_dist_port, res); hp += 2;
2146 	}
2147 	return TUPLE2(hp, system_monitor, res);
2148     }
2149 }
2150 
2151 
system_monitor_0(BIF_ALIST_0)2152 BIF_RETTYPE system_monitor_0(BIF_ALIST_0)
2153 {
2154     BIF_RET(system_monitor_get(BIF_P));
2155 }
2156 
system_monitor_1(BIF_ALIST_1)2157 BIF_RETTYPE system_monitor_1(BIF_ALIST_1)
2158 {
2159     Process* p = BIF_P;
2160     Eterm spec = BIF_ARG_1;
2161 
2162     if (spec == am_undefined) {
2163 	BIF_RET(system_monitor(p, spec, NIL));
2164     } else if (is_tuple(spec)) {
2165 	Eterm *tp = tuple_val(spec);
2166 	if (tp[0] != make_arityval(2)) goto error;
2167 	BIF_RET(system_monitor(p, tp[1], tp[2]));
2168     }
2169  error:
2170     BIF_ERROR(p, BADARG);
2171 }
2172 
system_monitor_2(BIF_ALIST_2)2173 BIF_RETTYPE system_monitor_2(BIF_ALIST_2)
2174 {
2175     return system_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2);
2176 }
2177 
2178 static BIF_RETTYPE
system_monitor(Process * p,Eterm monitor_pid,Eterm list)2179 system_monitor(Process *p, Eterm monitor_pid, Eterm list)
2180 {
2181     Eterm prev;
2182     int system_blocked = 0;
2183 
2184     if (monitor_pid == am_undefined || list == NIL) {
2185 	prev = system_monitor_get(p);
2186 	erts_system_monitor_clear(p);
2187 	BIF_RET(prev);
2188     }
2189     if (is_not_list(list)) goto error;
2190     else {
2191 	Uint long_gc, long_schedule, large_heap;
2192 	int busy_port, busy_dist_port;
2193 
2194 	system_blocked = 1;
2195 	erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
2196 	erts_thr_progress_block();
2197         erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
2198 
2199 	if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0))
2200 	    goto error;
2201 
2202 	for (long_gc = 0, long_schedule = 0, large_heap = 0,
2203 		 busy_port = 0, busy_dist_port = 0;
2204 	     is_list(list);
2205 	     list = CDR(list_val(list))) {
2206 	    Eterm t = CAR(list_val(list));
2207 	    if (is_tuple(t)) {
2208 		Eterm *tp = tuple_val(t);
2209 		if (arityval(tp[0]) != 2) goto error;
2210 		if (tp[1] == am_long_gc) {
2211 		    if (! term_to_Uint(tp[2], &long_gc)) goto error;
2212 		    if (long_gc < 1) long_gc = 1;
2213 		} else if (tp[1] == am_long_schedule) {
2214 		    if (! term_to_Uint(tp[2], &long_schedule)) goto error;
2215 		    if (long_schedule < 1) long_schedule = 1;
2216 		} else if (tp[1] == am_large_heap) {
2217 		    if (! term_to_Uint(tp[2], &large_heap)) goto error;
2218 		    if (large_heap < 16384) large_heap = 16384;
2219 		    /* 16 Kword is not an unnatural heap size */
2220 		} else goto error;
2221 	    } else if (t == am_busy_port) {
2222 		busy_port = !0;
2223 	    } else if (t == am_busy_dist_port) {
2224 		busy_dist_port = !0;
2225 	    } else goto error;
2226 	}
2227 	if (is_not_nil(list)) goto error;
2228 	prev = system_monitor_get(p);
2229 	erts_set_system_monitor(monitor_pid);
2230 	erts_system_monitor_long_gc = long_gc;
2231 	erts_system_monitor_long_schedule = long_schedule;
2232 	erts_system_monitor_large_heap = large_heap;
2233 	erts_system_monitor_flags.busy_port = !!busy_port;
2234 	erts_system_monitor_flags.busy_dist_port = !!busy_dist_port;
2235 
2236 	erts_thr_progress_unblock();
2237 	BIF_RET(prev);
2238     }
2239 
2240  error:
2241 
2242     if (system_blocked) {
2243 	erts_thr_progress_unblock();
2244     }
2245 
2246     BIF_ERROR(p, BADARG);
2247 }
2248 
2249 /* Begin: Trace for System Profiling */
2250 
erts_system_profile_clear(Process * c_p)2251 void erts_system_profile_clear(Process *c_p) {
2252     if (c_p) {
2253 	erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
2254 	erts_thr_progress_block();
2255     }
2256     erts_set_system_profile(NIL);
2257     erts_system_profile_flags.scheduler = 0;
2258     erts_system_profile_flags.runnable_procs = 0;
2259     erts_system_profile_flags.runnable_ports = 0;
2260     erts_system_profile_flags.exclusive = 0;
2261     if (c_p) {
2262 	erts_thr_progress_unblock();
2263 	erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
2264     }
2265 }
2266 
system_profile_get(Process * p)2267 static Eterm system_profile_get(Process *p) {
2268     Eterm *hp;
2269     Eterm system_profile = erts_get_system_profile();
2270     if (system_profile == NIL) {
2271     	return am_undefined;
2272     } else {
2273     	Eterm res;
2274 	Uint hsz = 3
2275 		 + (erts_system_profile_flags.scheduler ? 2 : 0)
2276 		 + (erts_system_profile_flags.runnable_ports ? 2 : 0)
2277 		 + (erts_system_profile_flags.exclusive ? 2 : 0)
2278 		 + (erts_system_profile_flags.runnable_procs ? 2 : 0);
2279 
2280 	hp = HAlloc(p, hsz);
2281 	res = NIL;
2282 	if (erts_system_profile_flags.runnable_ports) {
2283 	    res = CONS(hp, am_runnable_ports, res); hp += 2;
2284 	}
2285 	if (erts_system_profile_flags.runnable_procs) {
2286 	    res = CONS(hp, am_runnable_procs, res); hp += 2;
2287 	}
2288 	if (erts_system_profile_flags.scheduler) {
2289 	    res = CONS(hp, am_scheduler, res); hp += 2;
2290 	}
2291 	if (erts_system_profile_flags.exclusive) {
2292 	    res = CONS(hp, am_exclusive, res); hp += 2;
2293 	}
2294 
2295     	return TUPLE2(hp, system_profile, res);
2296     }
2297 }
2298 
system_profile_0(BIF_ALIST_0)2299 BIF_RETTYPE system_profile_0(BIF_ALIST_0)
2300 {
2301     BIF_RET(system_profile_get(BIF_P));
2302 }
2303 
system_profile_2(BIF_ALIST_2)2304 BIF_RETTYPE system_profile_2(BIF_ALIST_2)
2305 {
2306     Process *p = BIF_P;
2307     Eterm profiler = BIF_ARG_1;
2308     Eterm list = BIF_ARG_2;
2309     Eterm prev;
2310     int system_blocked = 0;
2311     Process *profiler_p = NULL;
2312     Port *profiler_port = NULL;
2313     int ts;
2314 
2315     if (profiler == am_undefined || list == NIL) {
2316 	prev = system_profile_get(p);
2317 	erts_system_profile_clear(p);
2318 	BIF_RET(prev);
2319     }
2320     if (is_not_list(list)) {
2321 	goto error;
2322     } else {
2323 	int scheduler, runnable_procs, runnable_ports, exclusive;
2324 	system_blocked = 1;
2325 
2326 	erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
2327 	erts_thr_progress_block();
2328 
2329 	/* Check if valid process, no locks are taken */
2330 
2331 	if (is_internal_pid(profiler)) {
2332 	    profiler_p = erts_proc_lookup(profiler);
2333 	    if (!profiler_p)
2334 		goto error;
2335 	} else if (is_internal_port(profiler)) {
2336 	    profiler_port = (erts_port_lookup(
2337 				 profiler,
2338 				 ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP));
2339 	    if (!profiler_port)
2340 		goto error;
2341 	} else {
2342 	    goto error;
2343 	}
2344 
2345 	for (ts = ERTS_TRACE_FLG_NOW_TIMESTAMP, scheduler = 0,
2346 		 runnable_ports = 0, runnable_procs = 0, exclusive = 0;
2347 	    is_list(list);
2348 	    list = CDR(list_val(list))) {
2349 
2350 	    Eterm t = CAR(list_val(list));
2351 	    if (t == am_runnable_procs) {
2352 	   	 runnable_procs = !0;
2353 	    } else if (t == am_runnable_ports) {
2354 		runnable_ports = !0;
2355 	    } else if (t == am_exclusive) {
2356 		exclusive = !0;
2357 	    } else if (t == am_scheduler) {
2358 		scheduler = !0;
2359 	    } else if (t == am_timestamp) {
2360 		ts = ERTS_TRACE_FLG_NOW_TIMESTAMP;
2361 	    } else if (t == am_strict_monotonic_timestamp) {
2362 		ts = ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP;
2363 	    } else if (t == am_monotonic_timestamp) {
2364 		ts = ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP;
2365 	    } else goto error;
2366 	}
2367 	if (is_not_nil(list)) goto error;
2368 	prev = system_profile_get(p);
2369 	erts_set_system_profile(profiler);
2370 
2371 	erts_system_profile_flags.scheduler = !!scheduler;
2372 	if (erts_system_profile_flags.scheduler)
2373 	    erts_system_profile_setup_active_schedulers();
2374 	erts_system_profile_flags.runnable_ports = !!runnable_ports;
2375 	erts_system_profile_flags.runnable_procs = !!runnable_procs;
2376 	erts_system_profile_flags.exclusive = !!exclusive;
2377 	erts_system_profile_ts_type = ts;
2378 	erts_thr_progress_unblock();
2379 	erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
2380 
2381 	BIF_RET(prev);
2382 
2383     }
2384 
2385     error:
2386 	if (system_blocked) {
2387 	    erts_thr_progress_unblock();
2388 	    erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
2389     	}
2390 
2391     BIF_ERROR(p, BADARG);
2392 }
2393 /* End: Trace for System Profiling */
2394 
2395 /* Trace delivered send an aux work message to all schedulers
2396    and when all schedulers have acknowledged that they have seen
2397    the message the message is sent to the requesting process.
2398 
2399    IMPORTANT: We have to make sure that the all messages sent
2400    using enif_send have been delivered before we send the message
2401    to the caller.
2402 
2403    There used to be a separate implementation for when only a pid
2404    is passed in, but since this is not performance critical code
2405    we now use the same approach for both.
2406 */
2407 
2408 typedef struct {
2409     Process *proc;
2410     Eterm ref;
2411     Eterm ref_heap[ERTS_REF_THING_SIZE];
2412     Eterm target;
2413     erts_atomic32_t refc;
2414 } ErtsTraceDeliveredAll;
2415 
2416 static void
reply_trace_delivered_all(void * vtdarp)2417 reply_trace_delivered_all(void *vtdarp)
2418 {
2419     ErtsTraceDeliveredAll *tdarp = (ErtsTraceDeliveredAll *) vtdarp;
2420 
2421     if (erts_atomic32_dec_read_nob(&tdarp->refc) == 0) {
2422         Eterm ref_copy, msg;
2423         Process *rp = tdarp->proc;
2424         Eterm *hp = NULL;
2425         ErlOffHeap *ohp;
2426         ErlHeapFragment *bp;
2427         bp = new_message_buffer(4 + NC_HEAP_SIZE(tdarp->ref));
2428         hp = &bp->mem[0];
2429         ohp = &bp->off_heap;
2430 
2431         ref_copy = STORE_NC(&hp, ohp, tdarp->ref);
2432         msg = TUPLE3(hp, am_trace_delivered, tdarp->target, ref_copy);
2433 
2434         erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp);
2435 
2436 	erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp);
2437         erts_proc_dec_refc(rp);
2438     }
2439 }
2440 
2441 BIF_RETTYPE
trace_delivered_1(BIF_ALIST_1)2442 trace_delivered_1(BIF_ALIST_1)
2443 {
2444 
2445     if (BIF_ARG_1 == am_all || is_internal_pid(BIF_ARG_1)) {
2446         Eterm *hp, ref;
2447         ErtsTraceDeliveredAll *tdarp =
2448             erts_alloc(ERTS_ALC_T_MISC_AUX_WORK, sizeof(ErtsTraceDeliveredAll));
2449 
2450         tdarp->proc = BIF_P;
2451         ref = erts_make_ref(BIF_P);
2452         hp = &tdarp->ref_heap[0];
2453         tdarp->ref = STORE_NC(&hp, NULL, ref);
2454         tdarp->target = BIF_ARG_1;
2455         erts_atomic32_init_nob(&tdarp->refc,
2456                                    (erts_aint32_t) erts_no_schedulers);
2457         erts_proc_add_refc(BIF_P, 1);
2458         erts_schedule_multi_misc_aux_work(0,
2459                                           erts_no_schedulers,
2460                                           reply_trace_delivered_all,
2461                                           (void *) tdarp);
2462         BIF_RET(ref);
2463     } else {
2464         BIF_ERROR(BIF_P, BADARG);
2465     }
2466 }
2467