1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 2000-2020. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 
21 #ifdef HAVE_CONFIG_H
22 #  include "config.h"
23 #endif
24 
25 #include "sys.h"
26 #include "erl_vm.h"
27 #include "global.h"
28 #include "erl_process.h"
29 #include "beam_load.h"
30 #include "bif.h"
31 #include "error.h"
32 #include "erl_binary.h"
33 #include "beam_bp.h"
34 #include "erl_term.h"
35 #include "erl_nfunc_sched.h"
36 
37 /* *************************************************************************
38 ** Macros
39 */
40 
41 /*
42 ** Memory allocation macros
43 */
44 /* Breakpoint data */
45 #define Alloc(SZ)		erts_alloc(ERTS_ALC_T_BPD, (SZ))
46 #define ReAlloc(P, SIZ)		erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
47 #define Free(P)			erts_free(ERTS_ALC_T_BPD, (P))
48 
49 #if defined(ERTS_ENABLE_LOCK_CHECK)
50 #  define ERTS_REQ_PROC_MAIN_LOCK(P) \
51       if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
52 					 __FILE__, __LINE__)
53 #  define ERTS_UNREQ_PROC_MAIN_LOCK(P) \
54       if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
55 #else
56 #  define ERTS_REQ_PROC_MAIN_LOCK(P)
57 #  define ERTS_UNREQ_PROC_MAIN_LOCK(P)
58 #endif
59 
60 #define ERTS_BPF_LOCAL_TRACE       0x01
61 #define ERTS_BPF_META_TRACE        0x02
62 #define ERTS_BPF_COUNT             0x04
63 #define ERTS_BPF_COUNT_ACTIVE      0x08
64 #define ERTS_BPF_DEBUG             0x10
65 #define ERTS_BPF_TIME_TRACE        0x20
66 #define ERTS_BPF_TIME_TRACE_ACTIVE 0x40
67 #define ERTS_BPF_GLOBAL_TRACE      0x80
68 
69 #define ERTS_BPF_ALL               0xFF
70 
71 extern BeamInstr beam_return_to_trace[1];   /* OpCode(i_return_to_trace) */
72 extern BeamInstr beam_return_trace[1];      /* OpCode(i_return_trace) */
73 extern BeamInstr beam_exception_trace[1];   /* OpCode(i_exception_trace) */
74 extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
75 
76 erts_atomic32_t erts_active_bp_index;
77 erts_atomic32_t erts_staging_bp_index;
78 erts_mtx_t erts_dirty_bp_ix_mtx;
79 
80 /*
81  * Inlined helpers
82  */
83 
84 static ERTS_INLINE ErtsMonotonicTime
get_mtime(Process * c_p)85 get_mtime(Process *c_p)
86 {
87     return erts_get_monotonic_time(erts_proc_sched_data(c_p));
88 }
89 
90 static ERTS_INLINE Uint32
acquire_bp_sched_ix(Process * c_p)91 acquire_bp_sched_ix(Process *c_p)
92 {
93     ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
94     ASSERT(esdp);
95     if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
96 	erts_mtx_lock(&erts_dirty_bp_ix_mtx);
97         return (Uint32) erts_no_schedulers;
98     }
99     return (Uint32) esdp->no - 1;
100 }
101 
102 static ERTS_INLINE void
release_bp_sched_ix(Uint32 ix)103 release_bp_sched_ix(Uint32 ix)
104 {
105     if (ix == (Uint32) erts_no_schedulers)
106         erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
107 }
108 
109 
110 
111 /* *************************************************************************
112 ** Local prototypes
113 */
114 
115 /*
116 ** Helpers
117 */
118 static ErtsTracer do_call_trace(Process* c_p, ErtsCodeInfo *info, Eterm* reg,
119                                 int local, Binary* ms, ErtsTracer tracer);
120 static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
121 		      enum erts_break_op count_op, ErtsTracer tracer);
122 static void set_function_break(ErtsCodeInfo *ci,
123 			       Binary *match_spec,
124 			       Uint break_flags,
125 			       enum erts_break_op count_op,
126 			       ErtsTracer tracer);
127 
128 static void clear_break(BpFunctions* f, Uint break_flags);
129 static int clear_function_break(ErtsCodeInfo *ci, Uint break_flags);
130 
131 static BpDataTime* get_time_break(ErtsCodeInfo *ci);
132 static GenericBpData* check_break(ErtsCodeInfo *ci, Uint break_flags);
133 
134 static void bp_meta_unref(BpMetaTracer *bmt);
135 static void bp_count_unref(BpCount *bcp);
136 static void bp_time_unref(BpDataTime *bdt);
137 static void consolidate_bp_data(Module *modp, ErtsCodeInfo *ci, int local);
138 static void uninstall_breakpoint(ErtsCodeInfo *ci);
139 
140 /* bp_hash */
141 #define BP_TIME_ADD(pi0, pi1)                       \
142     do {                                            \
143 	(pi0)->count   += (pi1)->count;             \
144 	(pi0)->time    += (pi1)->time;              \
145     } while(0)
146 
147 static void bp_hash_init(bp_time_hash_t *hash, Uint n);
148 static void bp_hash_rehash(bp_time_hash_t *hash, Uint n);
149 static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
150 static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
151 static void bp_hash_delete(bp_time_hash_t *hash);
152 
153 /* *************************************************************************
154 ** External interfaces
155 */
156 
157 void
erts_bp_init(void)158 erts_bp_init(void) {
159     erts_atomic32_init_nob(&erts_active_bp_index, 0);
160     erts_atomic32_init_nob(&erts_staging_bp_index, 1);
161     erts_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL,
162         ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
163 }
164 
165 
166 void
erts_bp_match_functions(BpFunctions * f,ErtsCodeMFA * mfa,int specified)167 erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
168 {
169     ErtsCodeIndex code_ix = erts_active_code_ix();
170     Uint max_funcs = 0;
171     int current;
172     int max_modules = module_code_size(code_ix);
173     int num_modules = 0;
174     Module* modp;
175     Module** module;
176     Uint i;
177 
178     module = (Module **) Alloc(max_modules*sizeof(Module *));
179     num_modules = 0;
180     for (current = 0; current < max_modules; current++) {
181 	modp = module_code(current, code_ix);
182 	if (modp->curr.code_hdr) {
183 	    max_funcs += modp->curr.code_hdr->num_functions;
184 	    module[num_modules++] = modp;
185 	}
186     }
187 
188     f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction));
189     i = 0;
190     for (current = 0; current < num_modules; current++) {
191 	BeamCodeHeader* code_hdr = module[current]->curr.code_hdr;
192 	ErtsCodeInfo* ci;
193 	Uint num_functions = (Uint)(UWord) code_hdr->num_functions;
194 	Uint fi;
195 
196 	if (specified > 0) {
197 	    if (mfa->module != make_atom(module[current]->module)) {
198 		/* Wrong module name */
199 		continue;
200 	    }
201 	}
202 
203 	for (fi = 0; fi < num_functions; fi++) {
204 
205 	    ci = code_hdr->functions[fi];
206 	    ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
207 	    if (erts_is_function_native(ci)) {
208 		continue;
209 	    }
210             switch (specified) {
211             case 3:
212                 if (ci->mfa.arity != mfa->arity)
213                     continue;
214             case 2:
215                 if (ci->mfa.function != mfa->function)
216                     continue;
217             case 1:
218                 if (ci->mfa.module != mfa->module)
219                     continue;
220             case 0:
221                 break;
222             }
223             /* Store match */
224             f->matching[i].ci = ci;
225             f->matching[i].mod = module[current];
226             i++;
227 	}
228     }
229     f->matched = i;
230     Free(module);
231 }
232 
233 void
erts_bp_match_export(BpFunctions * f,ErtsCodeMFA * mfa,int specified)234 erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
235 {
236     ErtsCodeIndex code_ix = erts_active_code_ix();
237     int i;
238     int num_exps = export_list_size(code_ix);
239     int ne;
240 
241     f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction));
242     ne = 0;
243     for (i = 0; i < num_exps; i++) {
244         BeamInstr *func;
245         Export* ep;
246 
247         ep = export_list(i, code_ix);
248 
249         switch (specified) {
250         case 3:
251             if (mfa->arity != ep->info.mfa.arity)
252                 continue;
253         case 2:
254             if (mfa->function != ep->info.mfa.function)
255                 continue;
256         case 1:
257             if (mfa->module != ep->info.mfa.module)
258                 continue;
259         case 0:
260             break;
261         default:
262             ASSERT(0);
263         }
264 
265         func = ep->addressv[code_ix];
266 
267         if (func == ep->trampoline.raw) {
268             if (BeamIsOpCode(*func, op_call_error_handler)) {
269                     continue;
270             }
271             ASSERT(BeamIsOpCode(*func, op_i_generic_breakpoint));
272         } else if (erts_is_function_native(erts_code_to_codeinfo(func))) {
273             continue;
274         }
275 
276 	f->matching[ne].ci = &ep->info;
277 	f->matching[ne].mod = erts_get_module(ep->info.mfa.module, code_ix);
278 
279 	ne++;
280 
281     }
282     f->matched = ne;
283 }
284 
285 void
erts_bp_free_matched_functions(BpFunctions * f)286 erts_bp_free_matched_functions(BpFunctions* f)
287 {
288     if (f->matching) {
289 	Free(f->matching);
290     }
291     else ASSERT(f->matched == 0);
292 }
293 
294 void
erts_consolidate_bp_data(BpFunctions * f,int local)295 erts_consolidate_bp_data(BpFunctions* f, int local)
296 {
297     BpFunction* fs = f->matching;
298     Uint i;
299     Uint n = f->matched;
300 
301     ERTS_LC_ASSERT(erts_has_code_write_permission());
302 
303     for (i = 0; i < n; i++) {
304 	consolidate_bp_data(fs[i].mod, fs[i].ci, local);
305     }
306 }
307 
308 static void
consolidate_bp_data(Module * modp,ErtsCodeInfo * ci,int local)309 consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
310 {
311     GenericBp* g = ci->u.gen_bp;
312     GenericBpData* src;
313     GenericBpData* dst;
314     Uint flags;
315 
316     if (g == 0) {
317 	return;
318     }
319 
320     src = &g->data[erts_active_bp_ix()];
321     dst = &g->data[erts_staging_bp_ix()];
322 
323     /*
324      * The contents of the staging area may be out of date.
325      * Decrement all reference pointers.
326      */
327 
328     flags = dst->flags;
329     if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
330 	MatchSetUnref(dst->local_ms);
331     }
332     if (flags & ERTS_BPF_META_TRACE) {
333 	bp_meta_unref(dst->meta_tracer);
334 	MatchSetUnref(dst->meta_ms);
335     }
336     if (flags & ERTS_BPF_COUNT) {
337 	bp_count_unref(dst->count);
338     }
339     if (flags & ERTS_BPF_TIME_TRACE) {
340 	bp_time_unref(dst->time);
341     }
342 
343     /*
344      * If all flags are zero, deallocate all breakpoint data.
345      */
346 
347     flags = dst->flags = src->flags;
348     if (flags == 0) {
349 	if (modp) {
350 	    if (local) {
351 		modp->curr.num_breakpoints--;
352 	    } else {
353 		modp->curr.num_traced_exports--;
354 	    }
355 	    ASSERT(modp->curr.num_breakpoints >= 0);
356 	    ASSERT(modp->curr.num_traced_exports >= 0);
357 	    ASSERT(! BeamIsOpCode(*erts_codeinfo_to_code(ci),
358                                   op_i_generic_breakpoint));
359 	}
360 	ci->u.gen_bp = NULL;
361 	Free(g);
362 	return;
363     }
364 
365     /*
366      * Copy the active data to the staging area (making it ready
367      * for the next time it will be used).
368      */
369 
370     if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
371 	dst->local_ms = src->local_ms;
372 	MatchSetRef(dst->local_ms);
373     }
374     if (flags & ERTS_BPF_META_TRACE) {
375 	dst->meta_tracer = src->meta_tracer;
376 	erts_refc_inc(&dst->meta_tracer->refc, 1);
377 	dst->meta_ms = src->meta_ms;
378 	MatchSetRef(dst->meta_ms);
379     }
380     if (flags & ERTS_BPF_COUNT) {
381 	dst->count = src->count;
382 	erts_refc_inc(&dst->count->refc, 1);
383     }
384     if (flags & ERTS_BPF_TIME_TRACE) {
385 	dst->time = src->time;
386 	erts_refc_inc(&dst->time->refc, 1);
387 	ASSERT(dst->time->hash);
388     }
389 }
390 
391 void
erts_commit_staged_bp(void)392 erts_commit_staged_bp(void)
393 {
394     ErtsBpIndex staging = erts_staging_bp_ix();
395     ErtsBpIndex active = erts_active_bp_ix();
396 
397     erts_atomic32_set_nob(&erts_active_bp_index, staging);
398     erts_atomic32_set_nob(&erts_staging_bp_index, active);
399 }
400 
401 void
erts_install_breakpoints(BpFunctions * f)402 erts_install_breakpoints(BpFunctions* f)
403 {
404     Uint i;
405     Uint n = f->matched;
406     BeamInstr br = BeamOpCodeAddr(op_i_generic_breakpoint);
407 
408     for (i = 0; i < n; i++) {
409 	ErtsCodeInfo* ci = f->matching[i].ci;
410 	GenericBp* g = ci->u.gen_bp;
411         BeamInstr volatile *pc = erts_codeinfo_to_code(ci);
412         BeamInstr instr = *pc;
413 
414 	if (!BeamIsOpCode(instr, op_i_generic_breakpoint) && g) {
415 	    Module* modp = f->matching[i].mod;
416 
417 	    /*
418 	     * The breakpoint must be disabled in the active data
419 	     * (it will enabled later by switching bp indices),
420 	     * and enabled in the staging data.
421 	     */
422 	    ASSERT(g->data[erts_active_bp_ix()].flags == 0);
423 	    ASSERT(g->data[erts_staging_bp_ix()].flags != 0);
424 
425 	    /*
426 	     * The following write is not protected by any lock. We
427 	     * assume that the hardware guarantees that a write of an
428 	     * aligned word-size writes is atomic (i.e. that other
429 	     * processes executing this code will not see a half
430 	     * pointer).
431              *
432              * The contents of *pc is marked 'volatile' to ensure that
433              * the compiler will do a single full-word write, and not
434              * try any fancy optimizations to write a half word.
435 	     */
436             instr = BeamSetCodeAddr(instr, br);
437             *pc = instr;
438 	    modp->curr.num_breakpoints++;
439 	}
440     }
441 }
442 
443 void
erts_uninstall_breakpoints(BpFunctions * f)444 erts_uninstall_breakpoints(BpFunctions* f)
445 {
446     Uint i;
447     Uint n = f->matched;
448 
449     for (i = 0; i < n; i++) {
450 	uninstall_breakpoint(f->matching[i].ci);
451     }
452 }
453 
454 static void
uninstall_breakpoint(ErtsCodeInfo * ci)455 uninstall_breakpoint(ErtsCodeInfo *ci)
456 {
457     BeamInstr *pc = erts_codeinfo_to_code(ci);
458     if (BeamIsOpCode(*pc, op_i_generic_breakpoint)) {
459 	GenericBp* g = ci->u.gen_bp;
460 	if (g->data[erts_active_bp_ix()].flags == 0) {
461 	    /*
462 	     * The following write is not protected by any lock. We
463 	     * assume that the hardware guarantees that a write of an
464 	     * aligned word-size (or half-word) writes is atomic
465 	     * (i.e. that other processes executing this code will not
466 	     * see a half pointer).
467 	     */
468 	    *pc = g->orig_instr;
469 	}
470     }
471 }
472 
473 void
erts_set_trace_break(BpFunctions * f,Binary * match_spec)474 erts_set_trace_break(BpFunctions* f, Binary *match_spec)
475 {
476     set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, erts_tracer_true);
477 }
478 
479 void
erts_set_mtrace_break(BpFunctions * f,Binary * match_spec,ErtsTracer tracer)480 erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
481 {
482     set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
483 }
484 
485 void
erts_set_export_trace(ErtsCodeInfo * ci,Binary * match_spec,int local)486 erts_set_export_trace(ErtsCodeInfo *ci, Binary *match_spec, int local)
487 {
488     Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
489 
490     set_function_break(ci, match_spec, flags, 0, erts_tracer_nil);
491 }
492 
493 void
erts_set_debug_break(BpFunctions * f)494 erts_set_debug_break(BpFunctions* f) {
495     set_break(f, NULL, ERTS_BPF_DEBUG, 0, erts_tracer_nil);
496 }
497 
498 void
erts_set_count_break(BpFunctions * f,enum erts_break_op count_op)499 erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
500 {
501     set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
502 	      count_op, erts_tracer_nil);
503 }
504 
505 void
erts_set_time_break(BpFunctions * f,enum erts_break_op count_op)506 erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
507 {
508     set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
509 	      count_op, erts_tracer_nil);
510 }
511 
512 void
erts_clear_trace_break(BpFunctions * f)513 erts_clear_trace_break(BpFunctions* f)
514 {
515     clear_break(f, ERTS_BPF_LOCAL_TRACE);
516 }
517 
518 void
erts_clear_export_trace(ErtsCodeInfo * ci,int local)519 erts_clear_export_trace(ErtsCodeInfo *ci, int local)
520 {
521     GenericBp* g = ci->u.gen_bp;
522 
523     if (g) {
524 	Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
525 	if (g->data[erts_staging_bp_ix()].flags & flags) {
526 	    clear_function_break(ci, flags);
527 	}
528     }
529 }
530 
531 void
erts_clear_mtrace_break(BpFunctions * f)532 erts_clear_mtrace_break(BpFunctions* f)
533 {
534     clear_break(f, ERTS_BPF_META_TRACE);
535 }
536 
537 void
erts_clear_debug_break(BpFunctions * f)538 erts_clear_debug_break(BpFunctions* f)
539 {
540     ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
541     clear_break(f, ERTS_BPF_DEBUG);
542 }
543 
544 void
erts_clear_count_break(BpFunctions * f)545 erts_clear_count_break(BpFunctions* f)
546 {
547     clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE);
548 }
549 
550 void
erts_clear_time_break(BpFunctions * f)551 erts_clear_time_break(BpFunctions* f)
552 {
553     clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
554 }
555 
556 void
erts_clear_all_breaks(BpFunctions * f)557 erts_clear_all_breaks(BpFunctions* f)
558 {
559     clear_break(f, ERTS_BPF_ALL);
560 }
561 
562 int
erts_clear_module_break(Module * modp)563 erts_clear_module_break(Module *modp) {
564     BeamCodeHeader* code_hdr;
565     Uint n;
566     Uint i;
567 
568     ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
569     ASSERT(modp);
570     code_hdr = modp->curr.code_hdr;
571     if (!code_hdr) {
572 	return 0;
573     }
574     n = (Uint)(UWord) code_hdr->num_functions;
575     for (i = 0; i < n; ++i) {
576 	ErtsCodeInfo *ci = code_hdr->functions[i];
577 	if (erts_is_function_native(ci))
578 	    continue;
579 	clear_function_break(ci, ERTS_BPF_ALL);
580     }
581 
582     erts_commit_staged_bp();
583 
584     for (i = 0; i < n; ++i) {
585 	ErtsCodeInfo *ci = code_hdr->functions[i];
586 	if (erts_is_function_native(ci))
587 	    continue;
588 	uninstall_breakpoint(ci);
589 	consolidate_bp_data(modp, ci, 1);
590 	ASSERT(ci->u.gen_bp == NULL);
591     }
592     return n;
593 }
594 
595 void
erts_clear_export_break(Module * modp,Export * ep)596 erts_clear_export_break(Module* modp, Export *ep)
597 {
598     ErtsCodeInfo *ci;
599 
600     ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
601 
602     ci = &ep->info;
603 
604     ASSERT(erts_codeinfo_to_code(ci) == ep->trampoline.raw);
605 
606     ASSERT(BeamIsOpCode(ep->trampoline.op, op_i_generic_breakpoint));
607     ep->trampoline.op = 0;
608 
609     clear_function_break(ci, ERTS_BPF_ALL);
610     erts_commit_staged_bp();
611 
612     consolidate_bp_data(modp, ci, 0);
613     ASSERT(ci->u.gen_bp == NULL);
614 }
615 
616 /*
617  * If the topmost continuation pointer on the stack is a trace return
618  * instruction, we modify it to be the place where we again start to
619  * execute code.
620  *
621  * This continuation pointer is used by match spec {caller} to get the
622  * calling function, and if we don't do this fixup it will be
623  * 'undefined'. This has the odd side effect of {caller} not really
624  * being the function which is the caller, but rather the function
625  * which we are about to return to.
626  */
fixup_cp_before_trace(Process * c_p,int * return_to_trace)627 static void fixup_cp_before_trace(Process *c_p, int *return_to_trace)
628 {
629     Eterm *cpp = c_p->stop;
630 
631     for (;;) {
632         BeamInstr w = *cp_val(*cpp);
633         if (BeamIsOpCode(w, op_return_trace)) {
634             cpp += 3;
635         } else if (BeamIsOpCode(w, op_i_return_to_trace)) {
636             *return_to_trace = 1;
637             cpp += 1;
638         } else if (BeamIsOpCode(w, op_i_return_time_trace)) {
639             cpp += 2;
640         } else {
641             break;
642         }
643     }
644     c_p->stop[0] = (Eterm) cp_val(*cpp);
645     ASSERT(is_CP(*cpp));
646 }
647 
648 BeamInstr
erts_generic_breakpoint(Process * c_p,ErtsCodeInfo * info,Eterm * reg)649 erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg)
650 {
651     GenericBp* g;
652     GenericBpData* bp;
653     Uint bp_flags;
654     ErtsBpIndex ix = erts_active_bp_ix();
655 
656     ASSERT(BeamIsOpCode(info->op, op_i_func_info_IaaI));
657 
658     g = info->u.gen_bp;
659     bp = &g->data[ix];
660     bp_flags = bp->flags;
661     ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
662     if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
663 		    ERTS_BPF_GLOBAL_TRACE|
664 		    ERTS_BPF_TIME_TRACE_ACTIVE) &&
665 	!IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
666 	bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
667 		      ERTS_BPF_GLOBAL_TRACE|
668 		      ERTS_BPF_TIME_TRACE|
669 		      ERTS_BPF_TIME_TRACE_ACTIVE);
670 	if (bp_flags == 0) {	/* Quick exit */
671 	    return g->orig_instr;
672 	}
673     }
674 
675     if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
676 	ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
677 	(void) do_call_trace(c_p, info, reg, 1, bp->local_ms, erts_tracer_true);
678     } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
679 	(void) do_call_trace(c_p, info, reg, 0, bp->local_ms, erts_tracer_true);
680     }
681 
682     if (bp_flags & ERTS_BPF_META_TRACE) {
683 	ErtsTracer old_tracer, new_tracer;
684 
685 	old_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer);
686 
687 	new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer);
688 
689 	if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
690             if (old_tracer == erts_atomic_cmpxchg_acqb(
691                     &bp->meta_tracer->tracer,
692                     (erts_aint_t)new_tracer,
693                     (erts_aint_t)old_tracer)) {
694                 ERTS_TRACER_CLEAR(&old_tracer);
695             } else {
696                 ERTS_TRACER_CLEAR(&new_tracer);
697             }
698 	}
699     }
700 
701     if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
702 	erts_atomic_inc_nob(&bp->count->acount);
703     }
704 
705     if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
706 	BeamInstr w;
707         Eterm* E;
708 	ErtsCodeInfo* prev_info = erts_trace_time_call(c_p, info, bp->time);
709         E = c_p->stop;
710         w = *(BeamInstr*) E[0];
711 	if (! (BeamIsOpCode(w, op_i_return_time_trace) ||
712 	       BeamIsOpCode(w, op_return_trace) ||
713                BeamIsOpCode(w, op_i_return_to_trace)) ) {
714 	    ASSERT(c_p->htop <= E && E <= c_p->hend);
715 	    if (E - 2 < c_p->htop) {
716 		(void) erts_garbage_collect(c_p, 2, reg, info->mfa.arity);
717 		ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
718 	    }
719 	    E = c_p->stop;
720 
721 	    ASSERT(c_p->htop <= E && E <= c_p->hend);
722 
723 	    E -= 2;
724 	    E[1] = prev_info ? make_cp(erts_codeinfo_to_code(prev_info)) : NIL;
725 	    E[0] = (Eterm) beam_return_time_trace;
726 	    c_p->stop = E;
727 	}
728     }
729 
730     if (bp_flags & ERTS_BPF_DEBUG) {
731 	return BeamOpCodeAddr(op_i_debug_breakpoint);
732     } else {
733 	return g->orig_instr;
734     }
735 }
736 
737 static ErtsTracer
do_call_trace(Process * c_p,ErtsCodeInfo * info,Eterm * reg,int local,Binary * ms,ErtsTracer tracer)738 do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg,
739 	      int local, Binary* ms, ErtsTracer tracer)
740 {
741     int return_to_trace = 0;
742     Uint32 flags;
743     Uint need = 0;
744     Eterm cp_save;
745     Eterm* E = c_p->stop;
746 
747     cp_save = E[0];
748 
749     fixup_cp_before_trace(c_p, &return_to_trace);
750     ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
751     flags = erts_call_trace(c_p, info, ms, reg, local, &tracer);
752     ERTS_REQ_PROC_MAIN_LOCK(c_p);
753 
754     E[0] = cp_save;
755 
756     ASSERT(!ERTS_PROC_IS_EXITING(c_p));
757     if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
758 	need += 1;
759     }
760     if (flags & MATCH_SET_RX_TRACE) {
761 	need += 3 + size_object(tracer);
762     }
763     if (need) {
764 	ASSERT(c_p->htop <= E && E <= c_p->hend);
765 	if (E - need < c_p->htop) {
766 	    (void) erts_garbage_collect(c_p, need, reg, info->mfa.arity);
767 	    ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
768 	    E = c_p->stop;
769 	}
770     }
771     if (flags & MATCH_SET_RETURN_TO_TRACE && !return_to_trace) {
772 	E -= 1;
773 	ASSERT(c_p->htop <= E && E <= c_p->hend);
774 	E[0] = (Eterm) beam_return_to_trace;
775         c_p->stop = E;
776     }
777     if (flags & MATCH_SET_RX_TRACE) {
778 	E -= 3;
779         c_p->stop = E;
780 	ASSERT(c_p->htop <= E && E <= c_p->hend);
781 	ASSERT(is_CP((Eterm) (UWord) (&info->mfa.module)));
782 	ASSERT(IS_TRACER_VALID(tracer));
783         E[2] = copy_object(tracer, c_p);
784         E[1] = make_cp(&info->mfa.module);
785         E[0] = (Eterm) ((flags & MATCH_SET_EXCEPTION_TRACE) ?
786                         beam_exception_trace : beam_return_trace);
787 	erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
788 	ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
789 	erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
790     }
791     return tracer;
792 }
793 
794 ErtsCodeInfo*
erts_trace_time_call(Process * c_p,ErtsCodeInfo * info,BpDataTime * bdt)795 erts_trace_time_call(Process* c_p, ErtsCodeInfo *info, BpDataTime* bdt)
796 {
797     ErtsMonotonicTime time;
798     process_breakpoint_time_t *pbt = NULL;
799     bp_data_time_item_t sitem, *item = NULL;
800     bp_time_hash_t *h = NULL;
801     BpDataTime *pbdt = NULL;
802     Uint32 six = acquire_bp_sched_ix(c_p);
803     ErtsCodeInfo* prev_info;
804 
805     ASSERT(c_p);
806     ASSERT(erts_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
807 						       | ERTS_PSFLG_DIRTY_RUNNING));
808 
809     /* get previous timestamp and breakpoint
810      * from the process psd  */
811 
812     pbt = ERTS_PROC_GET_CALL_TIME(c_p);
813     time = get_mtime(c_p);
814 
815     /* get pbt
816      * timestamp = t0
817      * lookup bdt from code
818      * set ts0 to pbt
819      * add call count here?
820      */
821     if (pbt == 0) {
822 	/* First call of process to instrumented function */
823 	pbt = Alloc(sizeof(process_breakpoint_time_t));
824 	(void) ERTS_PROC_SET_CALL_TIME(c_p, pbt);
825         pbt->ci = NULL;
826     }
827     else if (pbt->ci) {
828 	/* add time to previous code */
829 	sitem.time = time - pbt->time;
830 	sitem.pid = c_p->common.id;
831 	sitem.count = 0;
832 
833 	/* previous breakpoint */
834 	pbdt = get_time_break(pbt->ci);
835 
836 	/* if null then the breakpoint was removed */
837 	if (pbdt) {
838 	    h = &(pbdt->hash[six]);
839 
840 	    ASSERT(h);
841 	    ASSERT(h->item);
842 
843 	    item = bp_hash_get(h, &sitem);
844 	    if (!item) {
845 		item = bp_hash_put(h, &sitem);
846 	    } else {
847 		BP_TIME_ADD(item, &sitem);
848 	    }
849 	}
850     }
851     /*else caller is not call_time traced */
852 
853     /* Add count to this code */
854     sitem.pid     = c_p->common.id;
855     sitem.count   = 1;
856     sitem.time    = 0;
857 
858     /* this breakpoint */
859     ASSERT(bdt);
860     h = &(bdt->hash[six]);
861 
862     ASSERT(h);
863     ASSERT(h->item);
864 
865     item = bp_hash_get(h, &sitem);
866     if (!item) {
867 	item = bp_hash_put(h, &sitem);
868     } else {
869 	BP_TIME_ADD(item, &sitem);
870     }
871 
872     prev_info = pbt->ci;
873     pbt->ci = info;
874     pbt->time = time;
875 
876     release_bp_sched_ix(six);
877     return prev_info;
878 }
879 
880 void
erts_trace_time_return(Process * p,ErtsCodeInfo * prev_info)881 erts_trace_time_return(Process *p, ErtsCodeInfo *prev_info)
882 {
883     ErtsMonotonicTime time;
884     process_breakpoint_time_t *pbt = NULL;
885     bp_data_time_item_t sitem, *item = NULL;
886     bp_time_hash_t *h = NULL;
887     BpDataTime *pbdt = NULL;
888     Uint32 six = acquire_bp_sched_ix(p);
889 
890     ASSERT(p);
891     ASSERT(erts_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
892 						     | ERTS_PSFLG_DIRTY_RUNNING));
893 
894     /* get previous timestamp and breakpoint
895      * from the process psd  */
896 
897     pbt = ERTS_PROC_GET_CALL_TIME(p);
898     time = get_mtime(p);
899 
900     /* get pbt
901      * lookup bdt from code
902      * timestamp = t1
903      * get ts0 from pbt
904      * get item from bdt->hash[bp_hash(p->id)]
905      * ack diff (t1, t0) to item
906      */
907 
908     if (pbt) {
909 
910 	/* might have been removed due to
911 	 * trace_pattern(false)
912 	 */
913 	ASSERT(pbt->ci);
914 
915 	sitem.time = time - pbt->time;
916 	sitem.pid   = p->common.id;
917 	sitem.count = 0;
918 
919 	/* previous breakpoint */
920 	pbdt = get_time_break(pbt->ci);
921 
922 	/* beware, the trace_pattern might have been removed */
923 	if (pbdt) {
924 
925 	    h = &(pbdt->hash[six]);
926 
927 	    ASSERT(h);
928 	    ASSERT(h->item);
929 
930 	    item = bp_hash_get(h, &sitem);
931 	    if (!item) {
932 		item = bp_hash_put(h, &sitem);
933 	    } else {
934 		BP_TIME_ADD(item, &sitem);
935 	    }
936 
937 	}
938 
939 	pbt->ci = prev_info;
940 	pbt->time = time;
941 
942     }
943 
944     release_bp_sched_ix(six);
945 }
946 
947 int
erts_is_trace_break(ErtsCodeInfo * ci,Binary ** match_spec_ret,int local)948 erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local)
949 {
950     Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
951     GenericBpData* bp = check_break(ci, flags);
952 
953     if (bp) {
954 	if (match_spec_ret) {
955 	    *match_spec_ret = bp->local_ms;
956 	}
957 	return 1;
958     }
959     return 0;
960 }
961 
962 int
erts_is_mtrace_break(ErtsCodeInfo * ci,Binary ** match_spec_ret,ErtsTracer * tracer_ret)963 erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
964 		     ErtsTracer *tracer_ret)
965 {
966     GenericBpData* bp = check_break(ci, ERTS_BPF_META_TRACE);
967 
968     if (bp) {
969 	if (match_spec_ret) {
970 	    *match_spec_ret = bp->meta_ms;
971 	}
972 	if (tracer_ret) {
973             *tracer_ret = erts_atomic_read_nob(&bp->meta_tracer->tracer);
974 	}
975 	return 1;
976     }
977     return 0;
978 }
979 
980 int
erts_is_count_break(ErtsCodeInfo * ci,Uint * count_ret)981 erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret)
982 {
983     GenericBpData* bp = check_break(ci, ERTS_BPF_COUNT);
984 
985     if (bp) {
986 	if (count_ret) {
987 	    *count_ret = (Uint) erts_atomic_read_nob(&bp->count->acount);
988 	}
989 	return 1;
990     }
991     return 0;
992 }
993 
erts_is_time_break(Process * p,ErtsCodeInfo * ci,Eterm * retval)994 int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *retval) {
995     Uint i, ix;
996     bp_time_hash_t hash;
997     Uint size;
998     Eterm *hp, t;
999     bp_data_time_item_t *item = NULL;
1000     BpDataTime *bdt = get_time_break(ci);
1001 
1002     if (bdt) {
1003 	if (retval) {
1004 	    /* collect all hashes to one hash */
1005 	    bp_hash_init(&hash, 64);
1006 	    /* foreach threadspecific hash */
1007 	    for (i = 0; i < bdt->n; i++) {
1008 		bp_data_time_item_t *sitem;
1009 
1010 	        /* foreach hash bucket not NIL*/
1011 		for(ix = 0; ix < bdt->hash[i].n; ix++) {
1012 		    item = &(bdt->hash[i].item[ix]);
1013 		    if (item->pid != NIL) {
1014 			sitem = bp_hash_get(&hash, item);
1015 			if (sitem) {
1016 			    BP_TIME_ADD(sitem, item);
1017 			} else {
1018 			    bp_hash_put(&hash, item);
1019 			}
1020 		    }
1021 		}
1022 	    }
1023 	    /* *retval should be NIL or term from previous bif in export entry */
1024 
1025 	    if (hash.used > 0) {
1026 		size = (5 + 2)*hash.used;
1027 		hp   = HAlloc(p, size);
1028 
1029 		for(ix = 0; ix < hash.n; ix++) {
1030 		    item = &(hash.item[ix]);
1031 		    if (item->pid != NIL) {
1032 			ErtsMonotonicTime sec, usec;
1033 			usec = ERTS_MONOTONIC_TO_USEC(item->time);
1034 			sec = usec / 1000000;
1035 			usec = usec - sec*1000000;
1036 			t = TUPLE4(hp, item->pid,
1037 				make_small(item->count),
1038 				   make_small((Uint) sec),
1039 				   make_small((Uint) usec));
1040 			hp += 5;
1041 			*retval = CONS(hp, t, *retval); hp += 2;
1042 		    }
1043 		}
1044 	    }
1045 	    bp_hash_delete(&hash);
1046 	}
1047 	return 1;
1048     }
1049 
1050     return 0;
1051 }
1052 
1053 
1054 ErtsCodeInfo *
erts_find_local_func(ErtsCodeMFA * mfa)1055 erts_find_local_func(ErtsCodeMFA *mfa) {
1056     Module *modp;
1057     BeamCodeHeader* code_hdr;
1058     ErtsCodeInfo* ci;
1059     Uint i,n;
1060 
1061     if ((modp = erts_get_module(mfa->module, erts_active_code_ix())) == NULL)
1062 	return NULL;
1063     if ((code_hdr = modp->curr.code_hdr) == NULL)
1064 	return NULL;
1065     n = (BeamInstr) code_hdr->num_functions;
1066     for (i = 0; i < n; ++i) {
1067 	ci = code_hdr->functions[i];
1068 	ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
1069 	ASSERT(mfa->module == ci->mfa.module || is_nil(ci->mfa.module));
1070 	if (mfa->function == ci->mfa.function &&
1071 	    mfa->arity == ci->mfa.arity) {
1072 	    return ci;
1073 	}
1074     }
1075     return NULL;
1076 }
1077 
bp_hash_init(bp_time_hash_t * hash,Uint n)1078 static void bp_hash_init(bp_time_hash_t *hash, Uint n) {
1079     Uint size = sizeof(bp_data_time_item_t)*n;
1080     Uint i;
1081 
1082     hash->n    = n;
1083     hash->used = 0;
1084 
1085     hash->item = (bp_data_time_item_t *)Alloc(size);
1086     sys_memzero(hash->item, size);
1087 
1088     for(i = 0; i < n; ++i) {
1089 	hash->item[i].pid = NIL;
1090     }
1091 }
1092 
bp_hash_rehash(bp_time_hash_t * hash,Uint n)1093 static void bp_hash_rehash(bp_time_hash_t *hash, Uint n) {
1094     bp_data_time_item_t *item = NULL;
1095     Uint size = sizeof(bp_data_time_item_t)*n;
1096     Uint ix;
1097     Uint hval;
1098 
1099     item = (bp_data_time_item_t *)Alloc(size);
1100     sys_memzero(item, size);
1101 
1102     for( ix = 0; ix < n; ++ix) {
1103 	item[ix].pid = NIL;
1104     }
1105 
1106     /* rehash, old hash -> new hash */
1107 
1108     for( ix = 0; ix < hash->n; ix++) {
1109 	if (hash->item[ix].pid != NIL) {
1110 
1111 	    hval = ((hash->item[ix].pid) >> 4) % n; /* new n */
1112 
1113 	    while (item[hval].pid != NIL) {
1114 		hval = (hval + 1) % n;
1115 	    }
1116 	    item[hval].pid     = hash->item[ix].pid;
1117 	    item[hval].count   = hash->item[ix].count;
1118 	    item[hval].time    = hash->item[ix].time;
1119 	}
1120     }
1121 
1122     Free(hash->item);
1123     hash->n = n;
1124     hash->item = item;
1125 }
bp_hash_get(bp_time_hash_t * hash,bp_data_time_item_t * sitem)1126 static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem) {
1127     Eterm pid = sitem->pid;
1128     Uint hval = (pid >> 4) % hash->n;
1129     bp_data_time_item_t *item = NULL;
1130 
1131     item = hash->item;
1132 
1133     while (item[hval].pid != pid) {
1134 	if (item[hval].pid == NIL) return NULL;
1135 	hval = (hval + 1) % hash->n;
1136     }
1137 
1138     return &(item[hval]);
1139 }
1140 
bp_hash_put(bp_time_hash_t * hash,bp_data_time_item_t * sitem)1141 static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t* sitem) {
1142     Uint hval;
1143     float r = 0.0;
1144     bp_data_time_item_t *item;
1145 
1146     /* make sure that the hash is not saturated */
1147     /* if saturated, rehash it */
1148 
1149     r = hash->used / (float) hash->n;
1150 
1151     if (r > 0.7f) {
1152 	bp_hash_rehash(hash, hash->n * 2);
1153     }
1154     /* Do hval after rehash */
1155     hval = (sitem->pid >> 4) % hash->n;
1156 
1157     /* find free slot */
1158     item = hash->item;
1159 
1160     while (item[hval].pid != NIL) {
1161 	hval = (hval + 1) % hash->n;
1162     }
1163     item = &(hash->item[hval]);
1164 
1165     item->pid     = sitem->pid;
1166     item->time    = sitem->time;
1167     item->count   = sitem->count;
1168     hash->used++;
1169 
1170     return item;
1171 }
1172 
bp_hash_delete(bp_time_hash_t * hash)1173 static void bp_hash_delete(bp_time_hash_t *hash) {
1174     hash->n = 0;
1175     hash->used = 0;
1176     Free(hash->item);
1177     hash->item = NULL;
1178 }
1179 
erts_schedule_time_break(Process * p,Uint schedule)1180 void erts_schedule_time_break(Process *p, Uint schedule) {
1181     process_breakpoint_time_t *pbt = NULL;
1182     bp_data_time_item_t sitem, *item = NULL;
1183     bp_time_hash_t *h = NULL;
1184     BpDataTime *pbdt = NULL;
1185     Uint32 six = acquire_bp_sched_ix(p);
1186 
1187     ASSERT(p);
1188 
1189     pbt = ERTS_PROC_GET_CALL_TIME(p);
1190 
1191     if (pbt) {
1192 
1193 	switch(schedule) {
1194 	case ERTS_BP_CALL_TIME_SCHEDULE_EXITING :
1195 	    break;
1196 	case ERTS_BP_CALL_TIME_SCHEDULE_OUT :
1197 	    /* When a process is scheduled _out_,
1198 	     * timestamp it and add its delta to
1199 	     * the previous breakpoint.
1200 	     */
1201 
1202             if (pbt->ci) {
1203                 pbdt = get_time_break(pbt->ci);
1204                 if (pbdt) {
1205                     sitem.time = get_mtime(p) - pbt->time;
1206                     sitem.pid   = p->common.id;
1207                     sitem.count = 0;
1208 
1209                     h = &(pbdt->hash[six]);
1210 
1211                     ASSERT(h);
1212                     ASSERT(h->item);
1213 
1214                     item = bp_hash_get(h, &sitem);
1215                     if (!item) {
1216                         item = bp_hash_put(h, &sitem);
1217                     } else {
1218                         BP_TIME_ADD(item, &sitem);
1219                     }
1220                 }
1221             }
1222 	    break;
1223 	case ERTS_BP_CALL_TIME_SCHEDULE_IN :
1224 	    /* When a process is scheduled _in_,
1225 	     * timestamp it and remove the previous
1226 	     * timestamp in the psd.
1227 	     */
1228 	    pbt->time = get_mtime(p);
1229 	    break;
1230 	default :
1231 	    ASSERT(0);
1232 		/* will never happen */
1233 	    break;
1234 	}
1235     } /* pbt */
1236 
1237     release_bp_sched_ix(six);
1238 }
1239 
1240 /* *************************************************************************
1241 ** Local helpers
1242 */
1243 
1244 
1245 static void
set_break(BpFunctions * f,Binary * match_spec,Uint break_flags,enum erts_break_op count_op,ErtsTracer tracer)1246 set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
1247 	  enum erts_break_op count_op, ErtsTracer tracer)
1248 {
1249     Uint i;
1250     Uint n;
1251 
1252     n = f->matched;
1253     for (i = 0; i < n; i++) {
1254 	set_function_break(f->matching[i].ci,
1255                            match_spec, break_flags,
1256 			   count_op, tracer);
1257     }
1258 }
1259 
1260 static void
set_function_break(ErtsCodeInfo * ci,Binary * match_spec,Uint break_flags,enum erts_break_op count_op,ErtsTracer tracer)1261 set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
1262 		   enum erts_break_op count_op, ErtsTracer tracer)
1263 {
1264     GenericBp* g;
1265     GenericBpData* bp;
1266     Uint common;
1267     ErtsBpIndex ix = erts_staging_bp_ix();
1268 
1269     ERTS_LC_ASSERT(erts_has_code_write_permission());
1270     g = ci->u.gen_bp;
1271     if (g == 0) {
1272 	int i;
1273 	if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) {
1274 	    /* Do not insert a new breakpoint */
1275 	    return;
1276 	}
1277 	g = Alloc(sizeof(GenericBp));
1278 	g->orig_instr = *erts_codeinfo_to_code(ci);
1279 	for (i = 0; i < ERTS_NUM_BP_IX; i++) {
1280 	    g->data[i].flags = 0;
1281 	}
1282 	ci->u.gen_bp = g;
1283     }
1284     bp = &g->data[ix];
1285 
1286     /*
1287      * If we are changing an existing breakpoint, clean up old data.
1288      */
1289 
1290     common = break_flags & bp->flags;
1291     if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
1292 	MatchSetUnref(bp->local_ms);
1293     } else if (common & ERTS_BPF_META_TRACE) {
1294 	MatchSetUnref(bp->meta_ms);
1295 	bp_meta_unref(bp->meta_tracer);
1296     } else if (common & ERTS_BPF_COUNT) {
1297 	if (count_op == ERTS_BREAK_PAUSE) {
1298 	    bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
1299 	} else {
1300 	    bp->flags |= ERTS_BPF_COUNT_ACTIVE;
1301 	    erts_atomic_set_nob(&bp->count->acount, 0);
1302 	}
1303 	ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1304 	return;
1305     } else if (common & ERTS_BPF_TIME_TRACE) {
1306 	BpDataTime* bdt = bp->time;
1307 	Uint i = 0;
1308 
1309 	if (count_op == ERTS_BREAK_PAUSE) {
1310 	    bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE;
1311 	} else {
1312 	    bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE;
1313 	    for (i = 0; i < bdt->n; i++) {
1314 		bp_hash_delete(&(bdt->hash[i]));
1315 		bp_hash_init(&(bdt->hash[i]), 32);
1316 	    }
1317 	}
1318 	ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1319 	return;
1320     }
1321 
1322     /*
1323      * Initialize the new breakpoint data.
1324      */
1325 
1326     if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
1327 	MatchSetRef(match_spec);
1328 	bp->local_ms = match_spec;
1329     } else if (break_flags & ERTS_BPF_META_TRACE) {
1330 	BpMetaTracer* bmt;
1331         ErtsTracer meta_tracer = erts_tracer_nil;
1332 	MatchSetRef(match_spec);
1333 	bp->meta_ms = match_spec;
1334 	bmt = Alloc(sizeof(BpMetaTracer));
1335 	erts_refc_init(&bmt->refc, 1);
1336         erts_tracer_update(&meta_tracer, tracer); /* copy tracer */
1337 	erts_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer);
1338 	bp->meta_tracer = bmt;
1339     } else if (break_flags & ERTS_BPF_COUNT) {
1340 	BpCount* bcp;
1341 
1342 	ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
1343 	bcp = Alloc(sizeof(BpCount));
1344 	erts_refc_init(&bcp->refc, 1);
1345 	erts_atomic_init_nob(&bcp->acount, 0);
1346 	bp->count = bcp;
1347     } else if (break_flags & ERTS_BPF_TIME_TRACE) {
1348 	BpDataTime* bdt;
1349 	int i;
1350 
1351 	ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
1352 	bdt = Alloc(sizeof(BpDataTime));
1353 	erts_refc_init(&bdt->refc, 1);
1354 	bdt->n = erts_no_schedulers + 1;
1355 	bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
1356 	for (i = 0; i < bdt->n; i++) {
1357 	    bp_hash_init(&(bdt->hash[i]), 32);
1358 	}
1359 	bp->time = bdt;
1360     }
1361 
1362     bp->flags |= break_flags;
1363     ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1364 }
1365 
1366 static void
clear_break(BpFunctions * f,Uint break_flags)1367 clear_break(BpFunctions* f, Uint break_flags)
1368 {
1369     Uint i;
1370     Uint n;
1371 
1372     n = f->matched;
1373     for (i = 0; i < n; i++) {
1374 	clear_function_break(f->matching[i].ci, break_flags);
1375     }
1376 }
1377 
1378 static int
clear_function_break(ErtsCodeInfo * ci,Uint break_flags)1379 clear_function_break(ErtsCodeInfo *ci, Uint break_flags)
1380 {
1381     GenericBp* g;
1382     GenericBpData* bp;
1383     Uint common;
1384     ErtsBpIndex ix = erts_staging_bp_ix();
1385 
1386     ERTS_LC_ASSERT(erts_has_code_write_permission());
1387 
1388     if ((g = ci->u.gen_bp) == NULL) {
1389 	return 1;
1390     }
1391 
1392     bp = &g->data[ix];
1393     ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1394     common = bp->flags & break_flags;
1395     bp->flags &= ~break_flags;
1396     if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
1397 	MatchSetUnref(bp->local_ms);
1398     }
1399     if (common & ERTS_BPF_META_TRACE) {
1400 	MatchSetUnref(bp->meta_ms);
1401 	bp_meta_unref(bp->meta_tracer);
1402     }
1403     if (common & ERTS_BPF_COUNT) {
1404 	ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
1405 	bp_count_unref(bp->count);
1406     }
1407     if (common & ERTS_BPF_TIME_TRACE) {
1408 	ASSERT((bp->flags & ERTS_BPF_TIME_TRACE_ACTIVE) == 0);
1409 	bp_time_unref(bp->time);
1410     }
1411 
1412     ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1413     return 1;
1414 }
1415 
1416 static void
bp_meta_unref(BpMetaTracer * bmt)1417 bp_meta_unref(BpMetaTracer* bmt)
1418 {
1419     if (erts_refc_dectest(&bmt->refc, 0) <= 0) {
1420         ErtsTracer trc = erts_atomic_read_nob(&bmt->tracer);
1421         ERTS_TRACER_CLEAR(&trc);
1422 	Free(bmt);
1423     }
1424 }
1425 
1426 static void
bp_count_unref(BpCount * bcp)1427 bp_count_unref(BpCount* bcp)
1428 {
1429     if (erts_refc_dectest(&bcp->refc, 0) <= 0) {
1430 	Free(bcp);
1431     }
1432 }
1433 
1434 static void
bp_time_unref(BpDataTime * bdt)1435 bp_time_unref(BpDataTime* bdt)
1436 {
1437     if (erts_refc_dectest(&bdt->refc, 0) <= 0) {
1438 	Uint i = 0;
1439 
1440 	for (i = 0; i < bdt->n; ++i) {
1441 	    bp_hash_delete(&(bdt->hash[i]));
1442 	}
1443 	Free(bdt->hash);
1444 	Free(bdt);
1445     }
1446 }
1447 
1448 static BpDataTime*
get_time_break(ErtsCodeInfo * ci)1449 get_time_break(ErtsCodeInfo *ci)
1450 {
1451     GenericBpData* bp = check_break(ci, ERTS_BPF_TIME_TRACE);
1452     return bp ? bp->time : 0;
1453 }
1454 
1455 static GenericBpData*
check_break(ErtsCodeInfo * ci,Uint break_flags)1456 check_break(ErtsCodeInfo *ci, Uint break_flags)
1457 {
1458     GenericBp* g = ci->u.gen_bp;
1459 
1460     ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
1461     if (erts_is_function_native(ci)) {
1462 	return 0;
1463     }
1464     if (g) {
1465 	GenericBpData* bp = &g->data[erts_active_bp_ix()];
1466 	ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1467 	if (bp->flags & break_flags) {
1468 	    return bp;
1469 	}
1470     }
1471     return 0;
1472 }
1473