1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 2000-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24
25 #include "sys.h"
26 #include "erl_vm.h"
27 #include "global.h"
28 #include "erl_process.h"
29 #include "beam_code.h"
30 #include "bif.h"
31 #include "error.h"
32 #include "erl_binary.h"
33 #include "beam_bp.h"
34 #include "erl_term.h"
35 #include "erl_nfunc_sched.h"
36
37 #include "beam_common.h"
38 #include "jit/beam_asm.h"
39
40
41 /* *************************************************************************
42 ** Macros
43 */
44
45 /*
46 ** Memory allocation macros
47 */
48 /* Breakpoint data */
49 #define Alloc(SZ) erts_alloc(ERTS_ALC_T_BPD, (SZ))
50 #define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
51 #define Free(P) erts_free(ERTS_ALC_T_BPD, (P))
52
53 #define ERTS_BPF_LOCAL_TRACE 0x01
54 #define ERTS_BPF_META_TRACE 0x02
55 #define ERTS_BPF_COUNT 0x04
56 #define ERTS_BPF_COUNT_ACTIVE 0x08
57 #define ERTS_BPF_DEBUG 0x10
58 #define ERTS_BPF_TIME_TRACE 0x20
59 #define ERTS_BPF_TIME_TRACE_ACTIVE 0x40
60 #define ERTS_BPF_GLOBAL_TRACE 0x80
61
62 #define ERTS_BPF_ALL 0xFF
63
64 erts_atomic32_t erts_active_bp_index;
65 erts_atomic32_t erts_staging_bp_index;
66 erts_mtx_t erts_dirty_bp_ix_mtx;
67
68 /*
69 * Inlined helpers
70 */
71
72 static ERTS_INLINE ErtsMonotonicTime
get_mtime(Process * c_p)73 get_mtime(Process *c_p)
74 {
75 return erts_get_monotonic_time(erts_proc_sched_data(c_p));
76 }
77
78 static ERTS_INLINE Uint32
acquire_bp_sched_ix(Process * c_p)79 acquire_bp_sched_ix(Process *c_p)
80 {
81 ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
82 ASSERT(esdp);
83 if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
84 erts_mtx_lock(&erts_dirty_bp_ix_mtx);
85 return (Uint32) erts_no_schedulers;
86 }
87 return (Uint32) esdp->no - 1;
88 }
89
90 static ERTS_INLINE void
release_bp_sched_ix(Uint32 ix)91 release_bp_sched_ix(Uint32 ix)
92 {
93 if (ix == (Uint32) erts_no_schedulers)
94 erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
95 }
96
97
98
99 /* *************************************************************************
100 ** Local prototypes
101 */
102
103 /*
104 ** Helpers
105 */
106 static ErtsTracer do_call_trace(Process* c_p, ErtsCodeInfo *info, Eterm* reg,
107 int local, Binary* ms, ErtsTracer tracer);
108 static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
109 enum erts_break_op count_op, ErtsTracer tracer);
110 static void set_function_break(ErtsCodeInfo *ci,
111 Binary *match_spec,
112 Uint break_flags,
113 enum erts_break_op count_op,
114 ErtsTracer tracer);
115
116 static void clear_break(BpFunctions* f, Uint break_flags);
117 static int clear_function_break(const ErtsCodeInfo *ci, Uint break_flags);
118
119 static BpDataTime* get_time_break(const ErtsCodeInfo *ci);
120 static GenericBpData* check_break(const ErtsCodeInfo *ci, Uint break_flags);
121
122 static void bp_meta_unref(BpMetaTracer *bmt);
123 static void bp_count_unref(BpCount *bcp);
124 static void bp_time_unref(BpDataTime *bdt);
125 static void consolidate_bp_data(Module *modp, ErtsCodeInfo *ci, int local);
126 static void uninstall_breakpoint(ErtsCodeInfo *ci);
127
128 /* bp_hash */
129 #define BP_TIME_ADD(pi0, pi1) \
130 do { \
131 (pi0)->count += (pi1)->count; \
132 (pi0)->time += (pi1)->time; \
133 } while(0)
134
135 static void bp_hash_init(bp_time_hash_t *hash, Uint n);
136 static void bp_hash_rehash(bp_time_hash_t *hash, Uint n);
137 static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
138 static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
139 static void bp_hash_delete(bp_time_hash_t *hash);
140
141 /* *************************************************************************
142 ** External interfaces
143 */
144
145 void
erts_bp_init(void)146 erts_bp_init(void) {
147 erts_atomic32_init_nob(&erts_active_bp_index, 0);
148 erts_atomic32_init_nob(&erts_staging_bp_index, 1);
149 erts_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL,
150 ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
151 }
152
153
154 void
erts_bp_match_functions(BpFunctions * f,ErtsCodeMFA * mfa,int specified)155 erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
156 {
157 ErtsCodeIndex code_ix = erts_active_code_ix();
158 Uint max_funcs = 0;
159 int current;
160 int max_modules = module_code_size(code_ix);
161 int num_modules = 0;
162 Module* modp;
163 Module** module;
164 Uint i;
165
166 module = (Module **) Alloc(max_modules*sizeof(Module *));
167 num_modules = 0;
168 for (current = 0; current < max_modules; current++) {
169 modp = module_code(current, code_ix);
170 if (modp->curr.code_hdr) {
171 max_funcs += modp->curr.code_hdr->num_functions;
172 module[num_modules++] = modp;
173 }
174 }
175
176 f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction));
177 i = 0;
178 for (current = 0; current < num_modules; current++) {
179 const BeamCodeHeader* code_hdr = module[current]->curr.code_hdr;
180 Uint num_functions = (Uint)(UWord) code_hdr->num_functions;
181 Uint fi;
182
183 if (specified > 0) {
184 if (mfa->module != make_atom(module[current]->module)) {
185 /* Wrong module name */
186 continue;
187 }
188 }
189
190 for (fi = 0; fi < num_functions; fi++) {
191 ErtsCodeInfo* ci;
192 void *w_ptr;
193
194 w_ptr = erts_writable_code_ptr(&module[current]->curr,
195 code_hdr->functions[fi]);
196 ci = (ErtsCodeInfo*)w_ptr;
197
198 #ifndef BEAMASM
199 ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
200 #endif
201 switch (specified) {
202 case 3:
203 if (ci->mfa.arity != mfa->arity)
204 continue;
205 case 2:
206 if (ci->mfa.function != mfa->function)
207 continue;
208 case 1:
209 if (ci->mfa.module != mfa->module)
210 continue;
211 case 0:
212 break;
213 }
214 /* Store match */
215 f->matching[i].ci = ci;
216 f->matching[i].mod = module[current];
217 i++;
218 }
219 }
220 f->matched = i;
221 Free(module);
222 }
223
224 void
erts_bp_match_export(BpFunctions * f,ErtsCodeMFA * mfa,int specified)225 erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
226 {
227 ErtsCodeIndex code_ix = erts_active_code_ix();
228 int i;
229 int num_exps = export_list_size(code_ix);
230 int ne;
231
232 f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction));
233 ne = 0;
234 for (i = 0; i < num_exps; i++) {
235 Export* ep;
236
237 ep = export_list(i, code_ix);
238
239 switch (specified) {
240 case 3:
241 if (mfa->arity != ep->info.mfa.arity)
242 continue;
243 case 2:
244 if (mfa->function != ep->info.mfa.function)
245 continue;
246 case 1:
247 if (mfa->module != ep->info.mfa.module)
248 continue;
249 case 0:
250 break;
251 default:
252 ASSERT(0);
253 }
254
255 if (erts_is_export_trampoline_active(ep, code_ix)) {
256 if (BeamIsOpCode(ep->trampoline.common.op, op_call_error_handler)) {
257 continue;
258 }
259
260 ASSERT(BeamIsOpCode(ep->trampoline.common.op, op_i_generic_breakpoint));
261 }
262
263 f->matching[ne].ci = &ep->info;
264 f->matching[ne].mod = erts_get_module(ep->info.mfa.module, code_ix);
265
266 ne++;
267
268 }
269 f->matched = ne;
270 }
271
272 void
erts_bp_free_matched_functions(BpFunctions * f)273 erts_bp_free_matched_functions(BpFunctions* f)
274 {
275 if (f->matching) {
276 Free(f->matching);
277 }
278 else ASSERT(f->matched == 0);
279 }
280
281 void
erts_consolidate_bp_data(BpFunctions * f,int local)282 erts_consolidate_bp_data(BpFunctions* f, int local)
283 {
284 BpFunction* fs = f->matching;
285 Uint i;
286 Uint n = f->matched;
287
288 ERTS_LC_ASSERT(erts_has_code_write_permission());
289
290 for (i = 0; i < n; i++) {
291 consolidate_bp_data(fs[i].mod, fs[i].ci, local);
292 }
293 }
294
295 static void
consolidate_bp_data(Module * modp,ErtsCodeInfo * ci,int local)296 consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
297 {
298 GenericBp* g = ci->u.gen_bp;
299 GenericBpData* src;
300 GenericBpData* dst;
301 Uint flags;
302
303 if (g == 0) {
304 return;
305 }
306
307 src = &g->data[erts_active_bp_ix()];
308 dst = &g->data[erts_staging_bp_ix()];
309
310 /*
311 * The contents of the staging area may be out of date.
312 * Decrement all reference pointers.
313 */
314
315 flags = dst->flags;
316 if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
317 MatchSetUnref(dst->local_ms);
318 }
319 if (flags & ERTS_BPF_META_TRACE) {
320 bp_meta_unref(dst->meta_tracer);
321 MatchSetUnref(dst->meta_ms);
322 }
323 if (flags & ERTS_BPF_COUNT) {
324 bp_count_unref(dst->count);
325 }
326 if (flags & ERTS_BPF_TIME_TRACE) {
327 bp_time_unref(dst->time);
328 }
329
330 /*
331 * If all flags are zero, deallocate all breakpoint data.
332 */
333
334 flags = dst->flags = src->flags;
335 if (flags == 0) {
336 if (modp) {
337 if (local) {
338 modp->curr.num_breakpoints--;
339 } else {
340 modp->curr.num_traced_exports--;
341 }
342 ASSERT(modp->curr.num_breakpoints >= 0);
343 ASSERT(modp->curr.num_traced_exports >= 0);
344 #if !defined(BEAMASM) && defined(DEBUG)
345 {
346 BeamInstr instr = *(const BeamInstr*)erts_codeinfo_to_code(ci);
347 ASSERT(!BeamIsOpCode(instr, op_i_generic_breakpoint));
348 }
349 #endif
350 }
351 ci->u.gen_bp = NULL;
352 Free(g);
353 return;
354 }
355
356 /*
357 * Copy the active data to the staging area (making it ready
358 * for the next time it will be used).
359 */
360
361 if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
362 dst->local_ms = src->local_ms;
363 MatchSetRef(dst->local_ms);
364 }
365 if (flags & ERTS_BPF_META_TRACE) {
366 dst->meta_tracer = src->meta_tracer;
367 erts_refc_inc(&dst->meta_tracer->refc, 1);
368 dst->meta_ms = src->meta_ms;
369 MatchSetRef(dst->meta_ms);
370 }
371 if (flags & ERTS_BPF_COUNT) {
372 dst->count = src->count;
373 erts_refc_inc(&dst->count->refc, 1);
374 }
375 if (flags & ERTS_BPF_TIME_TRACE) {
376 dst->time = src->time;
377 erts_refc_inc(&dst->time->refc, 1);
378 ASSERT(dst->time->hash);
379 }
380 }
381
382 void
erts_commit_staged_bp(void)383 erts_commit_staged_bp(void)
384 {
385 ErtsBpIndex staging = erts_staging_bp_ix();
386 ErtsBpIndex active = erts_active_bp_ix();
387
388 erts_atomic32_set_nob(&erts_active_bp_index, staging);
389 erts_atomic32_set_nob(&erts_staging_bp_index, active);
390 }
391
392 void
erts_install_breakpoints(BpFunctions * f)393 erts_install_breakpoints(BpFunctions* f)
394 {
395 Uint i;
396 Uint n = f->matched;
397
398 for (i = 0; i < n; i++) {
399 ErtsCodeInfo* ci = f->matching[i].ci;
400 GenericBp* g = ci->u.gen_bp;
401 Module* modp = f->matching[i].mod;
402 #ifdef BEAMASM
403 if ((erts_asm_bp_get_flags(ci) & ERTS_ASM_BP_FLAG_BP) == 0 && g) {
404 /*
405 * The breakpoint must be disabled in the active data
406 * (it will enabled later by switching bp indices),
407 * and enabled in the staging data.
408 */
409 ASSERT(g->data[erts_active_bp_ix()].flags == 0);
410 ASSERT(g->data[erts_staging_bp_ix()].flags != 0);
411
412 erts_asm_bp_set_flag(ci, ERTS_ASM_BP_FLAG_BP);
413 modp->curr.num_breakpoints++;
414 }
415 #else
416 BeamInstr volatile *pc = (BeamInstr*)erts_codeinfo_to_code(ci);
417 BeamInstr instr = *pc;
418 if (!BeamIsOpCode(instr, op_i_generic_breakpoint) && g) {
419 BeamInstr br = BeamOpCodeAddr(op_i_generic_breakpoint);
420
421 /*
422 * The breakpoint must be disabled in the active data
423 * (it will enabled later by switching bp indices),
424 * and enabled in the staging data.
425 */
426 ASSERT(g->data[erts_active_bp_ix()].flags == 0);
427 ASSERT(g->data[erts_staging_bp_ix()].flags != 0);
428
429 /*
430 * The following write is not protected by any lock. We
431 * assume that the hardware guarantees that a write of an
432 * aligned word-size writes is atomic (i.e. that other
433 * processes executing this code will not see a half
434 * pointer).
435 *
436 * The contents of *pc is marked 'volatile' to ensure that
437 * the compiler will do a single full-word write, and not
438 * try any fancy optimizations to write a half word.
439 */
440 instr = BeamSetCodeAddr(instr, br);
441 *pc = instr;
442 modp->curr.num_breakpoints++;
443 }
444 #endif
445 }
446 }
447
448 void
erts_uninstall_breakpoints(BpFunctions * f)449 erts_uninstall_breakpoints(BpFunctions* f)
450 {
451 Uint i;
452 Uint n = f->matched;
453
454 for (i = 0; i < n; i++) {
455 uninstall_breakpoint(f->matching[i].ci);
456 }
457 }
458
459 #ifdef BEAMASM
460 static void
uninstall_breakpoint(ErtsCodeInfo * ci)461 uninstall_breakpoint(ErtsCodeInfo *ci)
462 {
463 if (erts_asm_bp_get_flags(ci) & ERTS_ASM_BP_FLAG_BP) {
464 GenericBp* g = ci->u.gen_bp;
465
466 if (g->data[erts_active_bp_ix()].flags == 0) {
467 erts_asm_bp_unset_flag(ci, ERTS_ASM_BP_FLAG_BP);
468 }
469 }
470 }
471 #else
472 static void
uninstall_breakpoint(ErtsCodeInfo * ci)473 uninstall_breakpoint(ErtsCodeInfo *ci)
474 {
475 BeamInstr *pc = (BeamInstr*)erts_codeinfo_to_code(ci);
476
477 if (BeamIsOpCode(*pc, op_i_generic_breakpoint)) {
478 GenericBp* g = ci->u.gen_bp;
479
480 if (g->data[erts_active_bp_ix()].flags == 0) {
481 /*
482 * The following write is not protected by any lock. We
483 * assume that the hardware guarantees that a write of an
484 * aligned word-size (or half-word) writes is atomic
485 * (i.e. that other processes executing this code will not
486 * see a half pointer).
487 */
488 *pc = g->orig_instr;
489 }
490 }
491 }
492 #endif
493
494 void
erts_set_trace_break(BpFunctions * f,Binary * match_spec)495 erts_set_trace_break(BpFunctions* f, Binary *match_spec)
496 {
497 set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, erts_tracer_true);
498 }
499
500 void
erts_set_mtrace_break(BpFunctions * f,Binary * match_spec,ErtsTracer tracer)501 erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
502 {
503 set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
504 }
505
506 void
erts_set_export_trace(ErtsCodeInfo * ci,Binary * match_spec,int local)507 erts_set_export_trace(ErtsCodeInfo *ci, Binary *match_spec, int local)
508 {
509 Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
510
511 set_function_break(ci, match_spec, flags, 0, erts_tracer_nil);
512 }
513
514 void
erts_set_debug_break(BpFunctions * f)515 erts_set_debug_break(BpFunctions* f) {
516 set_break(f, NULL, ERTS_BPF_DEBUG, 0, erts_tracer_nil);
517 }
518
519 void
erts_set_count_break(BpFunctions * f,enum erts_break_op count_op)520 erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
521 {
522 set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
523 count_op, erts_tracer_nil);
524 }
525
526 void
erts_set_time_break(BpFunctions * f,enum erts_break_op count_op)527 erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
528 {
529 set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
530 count_op, erts_tracer_nil);
531 }
532
533 void
erts_clear_trace_break(BpFunctions * f)534 erts_clear_trace_break(BpFunctions* f)
535 {
536 clear_break(f, ERTS_BPF_LOCAL_TRACE);
537 }
538
539 void
erts_clear_export_trace(ErtsCodeInfo * ci,int local)540 erts_clear_export_trace(ErtsCodeInfo *ci, int local)
541 {
542 GenericBp* g = ci->u.gen_bp;
543
544 if (g) {
545 Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
546
547 if (g->data[erts_staging_bp_ix()].flags & flags) {
548 clear_function_break(ci, flags);
549 }
550 }
551 }
552
553 void
erts_clear_mtrace_break(BpFunctions * f)554 erts_clear_mtrace_break(BpFunctions* f)
555 {
556 clear_break(f, ERTS_BPF_META_TRACE);
557 }
558
559 void
erts_clear_debug_break(BpFunctions * f)560 erts_clear_debug_break(BpFunctions* f)
561 {
562 ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
563 clear_break(f, ERTS_BPF_DEBUG);
564 }
565
566 void
erts_clear_count_break(BpFunctions * f)567 erts_clear_count_break(BpFunctions* f)
568 {
569 clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE);
570 }
571
572 void
erts_clear_time_break(BpFunctions * f)573 erts_clear_time_break(BpFunctions* f)
574 {
575 clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
576 }
577
578 void
erts_clear_all_breaks(BpFunctions * f)579 erts_clear_all_breaks(BpFunctions* f)
580 {
581 clear_break(f, ERTS_BPF_ALL);
582 }
583
584 int
erts_clear_module_break(Module * modp)585 erts_clear_module_break(Module *modp) {
586 const BeamCodeHeader* code_hdr;
587 Uint n;
588 Uint i;
589
590 ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
591 ASSERT(modp);
592
593 code_hdr = modp->curr.code_hdr;
594 if (!code_hdr) {
595 return 0;
596 }
597
598 n = (Uint)code_hdr->num_functions;
599 for (i = 0; i < n; ++i) {
600 const ErtsCodeInfo *ci = code_hdr->functions[i];
601
602 clear_function_break(ci, ERTS_BPF_ALL);
603 }
604
605 erts_commit_staged_bp();
606
607 for (i = 0; i < n; ++i) {
608 ErtsCodeInfo* ci;
609 void *w_ptr;
610
611 w_ptr = erts_writable_code_ptr(&modp->curr,
612 code_hdr->functions[i]);
613 ci = (ErtsCodeInfo*)w_ptr;
614
615 uninstall_breakpoint(ci);
616 consolidate_bp_data(modp, ci, 1);
617 ASSERT(ci->u.gen_bp == NULL);
618 }
619
620 return n;
621 }
622
623 void
erts_clear_export_break(Module * modp,Export * ep)624 erts_clear_export_break(Module* modp, Export *ep)
625 {
626 ErtsCodeInfo *ci;
627
628 ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
629
630 ci = &ep->info;
631
632 ASSERT(erts_codeinfo_to_code(ci) == ep->trampoline.raw);
633 #ifndef BEAMASM
634 ASSERT(BeamIsOpCode(ep->trampoline.common.op, op_i_generic_breakpoint));
635 #endif
636 ep->trampoline.common.op = 0;
637
638 clear_function_break(ci, ERTS_BPF_ALL);
639 erts_commit_staged_bp();
640
641 consolidate_bp_data(modp, ci, 0);
642 ASSERT(ci->u.gen_bp == NULL);
643 }
644
645 /*
646 * If the topmost continuation pointer on the stack is a trace return
647 * instruction, we modify it to be the place where we again start to
648 * execute code.
649 *
650 * This continuation pointer is used by match spec {caller} to get the
651 * calling function, and if we don't do this fixup it will be
652 * 'undefined'. This has the odd side effect of {caller} not really
653 * being the function which is the caller, but rather the function
654 * which we are about to return to.
655 */
fixup_cp_before_trace(Process * c_p,int * return_to_trace)656 static void fixup_cp_before_trace(Process *c_p, int *return_to_trace)
657 {
658 Eterm *cpp = c_p->stop;
659
660 for (;;) {
661 ErtsCodePtr w = cp_val(*cpp);
662 if (BeamIsReturnTrace(w)) {
663 cpp += 3;
664 } else if (BeamIsReturnToTrace(w)) {
665 *return_to_trace = 1;
666 cpp += 1;
667 } else if (BeamIsReturnTimeTrace(w)) {
668 cpp += 2;
669 } else {
670 break;
671 }
672 }
673 c_p->stop[0] = (Eterm) cp_val(*cpp);
674 ASSERT(is_CP(*cpp));
675 }
676
677 BeamInstr
erts_generic_breakpoint(Process * c_p,ErtsCodeInfo * info,Eterm * reg)678 erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg)
679 {
680 GenericBp* g;
681 GenericBpData* bp;
682 Uint bp_flags;
683 ErtsBpIndex ix = erts_active_bp_ix();
684
685 #ifndef BEAMASM
686 ASSERT(BeamIsOpCode(info->op, op_i_func_info_IaaI));
687 #endif
688
689 g = info->u.gen_bp;
690 bp = &g->data[ix];
691 bp_flags = bp->flags;
692 ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
693 if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
694 ERTS_BPF_GLOBAL_TRACE|
695 ERTS_BPF_TIME_TRACE_ACTIVE) &&
696 !IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
697 bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
698 ERTS_BPF_GLOBAL_TRACE|
699 ERTS_BPF_TIME_TRACE|
700 ERTS_BPF_TIME_TRACE_ACTIVE);
701 if (bp_flags == 0) { /* Quick exit */
702 return g->orig_instr;
703 }
704 }
705
706 if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
707 ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
708 (void) do_call_trace(c_p, info, reg, 1, bp->local_ms, erts_tracer_true);
709 } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
710 (void) do_call_trace(c_p, info, reg, 0, bp->local_ms, erts_tracer_true);
711 }
712
713 if (bp_flags & ERTS_BPF_META_TRACE) {
714 ErtsTracer old_tracer, new_tracer;
715
716 old_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer);
717
718 new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer);
719
720 if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
721 if ((erts_aint_t)old_tracer == erts_atomic_cmpxchg_acqb(
722 &bp->meta_tracer->tracer,
723 (erts_aint_t)new_tracer,
724 (erts_aint_t)old_tracer)) {
725 ERTS_TRACER_CLEAR(&old_tracer);
726 } else {
727 ERTS_TRACER_CLEAR(&new_tracer);
728 }
729 }
730 }
731
732 if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
733 erts_atomic_inc_nob(&bp->count->acount);
734 }
735
736 if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
737 const ErtsCodeInfo* prev_info;
738 ErtsCodePtr w;
739 Eterm* E;
740
741 prev_info= erts_trace_time_call(c_p, info, bp->time);
742 E = c_p->stop;
743 w = (ErtsCodePtr) E[0];
744 if (!(BeamIsReturnTrace(w) || BeamIsReturnToTrace(w) || BeamIsReturnTimeTrace(w))) {
745 ASSERT(c_p->htop <= E && E <= c_p->hend);
746 if (HeapWordsLeft(c_p) < 2) {
747 (void) erts_garbage_collect(c_p, 2, reg, info->mfa.arity);
748 ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
749 }
750 E = c_p->stop;
751
752 ASSERT(c_p->htop <= E && E <= c_p->hend);
753
754 E -= 2;
755 E[1] = prev_info ? make_cp(erts_codeinfo_to_code(prev_info)) : NIL;
756 E[0] = (Eterm) beam_return_time_trace;
757 c_p->stop = E;
758 }
759 }
760
761 if (bp_flags & ERTS_BPF_DEBUG) {
762 return BeamOpCodeAddr(op_i_debug_breakpoint);
763 } else {
764 return g->orig_instr;
765 }
766 }
767
768 static ErtsTracer
do_call_trace(Process * c_p,ErtsCodeInfo * info,Eterm * reg,int local,Binary * ms,ErtsTracer tracer)769 do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg,
770 int local, Binary* ms, ErtsTracer tracer)
771 {
772 int return_to_trace = 0;
773 Uint32 flags;
774 Uint need = 0;
775 Eterm cp_save;
776 Eterm* E = c_p->stop;
777
778 cp_save = E[0];
779
780 fixup_cp_before_trace(c_p, &return_to_trace);
781 ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
782 flags = erts_call_trace(c_p, info, ms, reg, local, &tracer);
783 ERTS_REQ_PROC_MAIN_LOCK(c_p);
784
785 E[0] = cp_save;
786
787 ASSERT(!ERTS_PROC_IS_EXITING(c_p));
788 if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
789 need += 1;
790 }
791 if (flags & MATCH_SET_RX_TRACE) {
792 need += 3 + size_object(tracer);
793 }
794 if (need) {
795 ASSERT(c_p->htop <= E && E <= c_p->hend);
796 if (HeapWordsLeft(c_p) < need) {
797 (void) erts_garbage_collect(c_p, need, reg, info->mfa.arity);
798 ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
799 E = c_p->stop;
800 }
801 }
802 if (flags & MATCH_SET_RETURN_TO_TRACE && !return_to_trace) {
803 E -= 1;
804 ASSERT(c_p->htop <= E && E <= c_p->hend);
805 E[0] = (Eterm) beam_return_to_trace;
806 c_p->stop = E;
807 }
808 if (flags & MATCH_SET_RX_TRACE) {
809 E -= 3;
810 c_p->stop = E;
811 ASSERT(c_p->htop <= E && E <= c_p->hend);
812 ASSERT(is_CP((Eterm) (UWord) (&info->mfa.module)));
813 ASSERT(IS_TRACER_VALID(tracer));
814 E[2] = copy_object(tracer, c_p);
815 E[1] = make_cp(&info->mfa.module);
816 E[0] = (Eterm) ((flags & MATCH_SET_EXCEPTION_TRACE) ?
817 beam_exception_trace : beam_return_trace);
818 erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
819 ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
820 erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
821 }
822 return tracer;
823 }
824
825 const ErtsCodeInfo*
erts_trace_time_call(Process * c_p,const ErtsCodeInfo * info,BpDataTime * bdt)826 erts_trace_time_call(Process* c_p, const ErtsCodeInfo *info, BpDataTime* bdt)
827 {
828 ErtsMonotonicTime time;
829 process_breakpoint_time_t *pbt = NULL;
830 bp_data_time_item_t sitem, *item = NULL;
831 bp_time_hash_t *h = NULL;
832 BpDataTime *pbdt = NULL;
833 Uint32 six = acquire_bp_sched_ix(c_p);
834 const ErtsCodeInfo* prev_info;
835
836 ASSERT(c_p);
837 ASSERT(erts_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
838 | ERTS_PSFLG_DIRTY_RUNNING));
839
840 /* get previous timestamp and breakpoint
841 * from the process psd */
842
843 pbt = ERTS_PROC_GET_CALL_TIME(c_p);
844 time = get_mtime(c_p);
845
846 /* get pbt
847 * timestamp = t0
848 * lookup bdt from code
849 * set ts0 to pbt
850 * add call count here?
851 */
852 if (pbt == 0) {
853 /* First call of process to instrumented function */
854 pbt = Alloc(sizeof(process_breakpoint_time_t));
855 (void) ERTS_PROC_SET_CALL_TIME(c_p, pbt);
856 pbt->ci = NULL;
857 }
858 else if (pbt->ci) {
859 /* add time to previous code */
860 sitem.time = time - pbt->time;
861 sitem.pid = c_p->common.id;
862 sitem.count = 0;
863
864 /* previous breakpoint */
865 pbdt = get_time_break(pbt->ci);
866
867 /* if null then the breakpoint was removed */
868 if (pbdt) {
869 h = &(pbdt->hash[six]);
870
871 ASSERT(h);
872 ASSERT(h->item);
873
874 item = bp_hash_get(h, &sitem);
875 if (!item) {
876 item = bp_hash_put(h, &sitem);
877 } else {
878 BP_TIME_ADD(item, &sitem);
879 }
880 }
881 }
882 /*else caller is not call_time traced */
883
884 /* Add count to this code */
885 sitem.pid = c_p->common.id;
886 sitem.count = 1;
887 sitem.time = 0;
888
889 /* this breakpoint */
890 ASSERT(bdt);
891 h = &(bdt->hash[six]);
892
893 ASSERT(h);
894 ASSERT(h->item);
895
896 item = bp_hash_get(h, &sitem);
897 if (!item) {
898 item = bp_hash_put(h, &sitem);
899 } else {
900 BP_TIME_ADD(item, &sitem);
901 }
902
903 prev_info = pbt->ci;
904 pbt->ci = info;
905 pbt->time = time;
906
907 release_bp_sched_ix(six);
908 return prev_info;
909 }
910
911 void
erts_trace_time_return(Process * p,const ErtsCodeInfo * prev_info)912 erts_trace_time_return(Process *p, const ErtsCodeInfo *prev_info)
913 {
914 ErtsMonotonicTime time;
915 process_breakpoint_time_t *pbt = NULL;
916 bp_data_time_item_t sitem, *item = NULL;
917 bp_time_hash_t *h = NULL;
918 BpDataTime *pbdt = NULL;
919 Uint32 six = acquire_bp_sched_ix(p);
920
921 ASSERT(p);
922 ASSERT(erts_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
923 | ERTS_PSFLG_DIRTY_RUNNING));
924
925 /* get previous timestamp and breakpoint
926 * from the process psd */
927
928 pbt = ERTS_PROC_GET_CALL_TIME(p);
929 time = get_mtime(p);
930
931 /* get pbt
932 * lookup bdt from code
933 * timestamp = t1
934 * get ts0 from pbt
935 * get item from bdt->hash[bp_hash(p->id)]
936 * ack diff (t1, t0) to item
937 */
938
939 if (pbt) {
940
941 /* might have been removed due to
942 * trace_pattern(false)
943 */
944 ASSERT(pbt->ci);
945
946 sitem.time = time - pbt->time;
947 sitem.pid = p->common.id;
948 sitem.count = 0;
949
950 /* previous breakpoint */
951 pbdt = get_time_break(pbt->ci);
952
953 /* beware, the trace_pattern might have been removed */
954 if (pbdt) {
955
956 h = &(pbdt->hash[six]);
957
958 ASSERT(h);
959 ASSERT(h->item);
960
961 item = bp_hash_get(h, &sitem);
962 if (!item) {
963 item = bp_hash_put(h, &sitem);
964 } else {
965 BP_TIME_ADD(item, &sitem);
966 }
967
968 }
969
970 pbt->ci = prev_info;
971 pbt->time = time;
972
973 }
974
975 release_bp_sched_ix(six);
976 }
977
978 int
erts_is_trace_break(const ErtsCodeInfo * ci,Binary ** match_spec_ret,int local)979 erts_is_trace_break(const ErtsCodeInfo *ci, Binary **match_spec_ret, int local)
980 {
981 Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
982 GenericBpData* bp = check_break(ci, flags);
983
984 if (bp) {
985 if (match_spec_ret) {
986 *match_spec_ret = bp->local_ms;
987 }
988 return 1;
989 }
990 return 0;
991 }
992
993 int
erts_is_mtrace_break(const ErtsCodeInfo * ci,Binary ** match_spec_ret,ErtsTracer * tracer_ret)994 erts_is_mtrace_break(const ErtsCodeInfo *ci, Binary **match_spec_ret,
995 ErtsTracer *tracer_ret)
996 {
997 GenericBpData* bp = check_break(ci, ERTS_BPF_META_TRACE);
998
999 if (bp) {
1000 if (match_spec_ret) {
1001 *match_spec_ret = bp->meta_ms;
1002 }
1003 if (tracer_ret) {
1004 *tracer_ret = erts_atomic_read_nob(&bp->meta_tracer->tracer);
1005 }
1006 return 1;
1007 }
1008 return 0;
1009 }
1010
1011 int
erts_is_count_break(const ErtsCodeInfo * ci,Uint * count_ret)1012 erts_is_count_break(const ErtsCodeInfo *ci, Uint *count_ret)
1013 {
1014 GenericBpData* bp = check_break(ci, ERTS_BPF_COUNT);
1015
1016 if (bp) {
1017 if (count_ret) {
1018 *count_ret = (Uint) erts_atomic_read_nob(&bp->count->acount);
1019 }
1020 return 1;
1021 }
1022 return 0;
1023 }
1024
erts_is_time_break(Process * p,const ErtsCodeInfo * ci,Eterm * retval)1025 int erts_is_time_break(Process *p, const ErtsCodeInfo *ci, Eterm *retval) {
1026 Uint i, ix;
1027 bp_time_hash_t hash;
1028 Uint size;
1029 Eterm *hp, t;
1030 bp_data_time_item_t *item = NULL;
1031 BpDataTime *bdt = get_time_break(ci);
1032
1033 if (bdt) {
1034 if (retval) {
1035 /* collect all hashes to one hash */
1036 bp_hash_init(&hash, 64);
1037 /* foreach threadspecific hash */
1038 for (i = 0; i < bdt->n; i++) {
1039 bp_data_time_item_t *sitem;
1040
1041 /* foreach hash bucket not NIL*/
1042 for(ix = 0; ix < bdt->hash[i].n; ix++) {
1043 item = &(bdt->hash[i].item[ix]);
1044 if (item->pid != NIL) {
1045 sitem = bp_hash_get(&hash, item);
1046 if (sitem) {
1047 BP_TIME_ADD(sitem, item);
1048 } else {
1049 bp_hash_put(&hash, item);
1050 }
1051 }
1052 }
1053 }
1054 /* *retval should be NIL or term from previous bif in export entry */
1055
1056 if (hash.used > 0) {
1057 size = (5 + 2)*hash.used;
1058 hp = HAlloc(p, size);
1059
1060 for(ix = 0; ix < hash.n; ix++) {
1061 item = &(hash.item[ix]);
1062 if (item->pid != NIL) {
1063 ErtsMonotonicTime sec, usec;
1064 usec = ERTS_MONOTONIC_TO_USEC(item->time);
1065 sec = usec / 1000000;
1066 usec = usec - sec*1000000;
1067 t = TUPLE4(hp, item->pid,
1068 make_small(item->count),
1069 make_small((Uint) sec),
1070 make_small((Uint) usec));
1071 hp += 5;
1072 *retval = CONS(hp, t, *retval); hp += 2;
1073 }
1074 }
1075 }
1076 bp_hash_delete(&hash);
1077 }
1078 return 1;
1079 }
1080
1081 return 0;
1082 }
1083
1084
1085 const ErtsCodeInfo *
erts_find_local_func(const ErtsCodeMFA * mfa)1086 erts_find_local_func(const ErtsCodeMFA *mfa) {
1087 const BeamCodeHeader *code_hdr;
1088 const ErtsCodeInfo *ci;
1089 Module *modp;
1090 Uint i,n;
1091
1092 if ((modp = erts_get_module(mfa->module, erts_active_code_ix())) == NULL)
1093 return NULL;
1094 if ((code_hdr = modp->curr.code_hdr) == NULL)
1095 return NULL;
1096 n = (BeamInstr) code_hdr->num_functions;
1097 for (i = 0; i < n; ++i) {
1098 ci = code_hdr->functions[i];
1099 #ifndef BEAMASM
1100 ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
1101 #endif
1102 ASSERT(mfa->module == ci->mfa.module || is_nil(ci->mfa.module));
1103 if (mfa->function == ci->mfa.function &&
1104 mfa->arity == ci->mfa.arity) {
1105 return ci;
1106 }
1107 }
1108 return NULL;
1109 }
1110
bp_hash_init(bp_time_hash_t * hash,Uint n)1111 static void bp_hash_init(bp_time_hash_t *hash, Uint n) {
1112 Uint size = sizeof(bp_data_time_item_t)*n;
1113 Uint i;
1114
1115 hash->n = n;
1116 hash->used = 0;
1117
1118 hash->item = (bp_data_time_item_t *)Alloc(size);
1119 sys_memzero(hash->item, size);
1120
1121 for(i = 0; i < n; ++i) {
1122 hash->item[i].pid = NIL;
1123 }
1124 }
1125
bp_hash_rehash(bp_time_hash_t * hash,Uint n)1126 static void bp_hash_rehash(bp_time_hash_t *hash, Uint n) {
1127 bp_data_time_item_t *item = NULL;
1128 Uint size = sizeof(bp_data_time_item_t)*n;
1129 Uint ix;
1130 Uint hval;
1131
1132 ASSERT(n > 0);
1133
1134 item = (bp_data_time_item_t *)Alloc(size);
1135 sys_memzero(item, size);
1136
1137 for( ix = 0; ix < n; ++ix) {
1138 item[ix].pid = NIL;
1139 }
1140
1141
1142 /* rehash, old hash -> new hash */
1143
1144 for( ix = 0; ix < hash->n; ix++) {
1145 if (hash->item[ix].pid != NIL) {
1146
1147 hval = ((hash->item[ix].pid) >> 4) % n; /* new n */
1148
1149 while (item[hval].pid != NIL) {
1150 hval = (hval + 1) % n;
1151 }
1152 item[hval].pid = hash->item[ix].pid;
1153 item[hval].count = hash->item[ix].count;
1154 item[hval].time = hash->item[ix].time;
1155 }
1156 }
1157
1158 Free(hash->item);
1159 hash->n = n;
1160 hash->item = item;
1161 }
bp_hash_get(bp_time_hash_t * hash,bp_data_time_item_t * sitem)1162 static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem) {
1163 Eterm pid = sitem->pid;
1164 Uint hval = (pid >> 4) % hash->n;
1165 bp_data_time_item_t *item = NULL;
1166
1167 item = hash->item;
1168
1169 while (item[hval].pid != pid) {
1170 if (item[hval].pid == NIL) return NULL;
1171 hval = (hval + 1) % hash->n;
1172 }
1173
1174 return &(item[hval]);
1175 }
1176
bp_hash_put(bp_time_hash_t * hash,bp_data_time_item_t * sitem)1177 static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t* sitem) {
1178 Uint hval;
1179 float r = 0.0;
1180 bp_data_time_item_t *item;
1181
1182 /* make sure that the hash is not saturated */
1183 /* if saturated, rehash it */
1184
1185 r = hash->used / (float) hash->n;
1186
1187 if (r > 0.7f) {
1188 bp_hash_rehash(hash, hash->n * 2);
1189 }
1190 /* Do hval after rehash */
1191 hval = (sitem->pid >> 4) % hash->n;
1192
1193 /* find free slot */
1194 item = hash->item;
1195
1196 while (item[hval].pid != NIL) {
1197 hval = (hval + 1) % hash->n;
1198 }
1199 item = &(hash->item[hval]);
1200
1201 item->pid = sitem->pid;
1202 item->time = sitem->time;
1203 item->count = sitem->count;
1204 hash->used++;
1205
1206 return item;
1207 }
1208
bp_hash_delete(bp_time_hash_t * hash)1209 static void bp_hash_delete(bp_time_hash_t *hash) {
1210 hash->n = 0;
1211 hash->used = 0;
1212 Free(hash->item);
1213 hash->item = NULL;
1214 }
1215
erts_schedule_time_break(Process * p,Uint schedule)1216 void erts_schedule_time_break(Process *p, Uint schedule) {
1217 process_breakpoint_time_t *pbt = NULL;
1218 bp_data_time_item_t sitem, *item = NULL;
1219 bp_time_hash_t *h = NULL;
1220 BpDataTime *pbdt = NULL;
1221 Uint32 six = acquire_bp_sched_ix(p);
1222
1223 ASSERT(p);
1224
1225 pbt = ERTS_PROC_GET_CALL_TIME(p);
1226
1227 if (pbt) {
1228
1229 switch(schedule) {
1230 case ERTS_BP_CALL_TIME_SCHEDULE_EXITING :
1231 break;
1232 case ERTS_BP_CALL_TIME_SCHEDULE_OUT :
1233 /* When a process is scheduled _out_,
1234 * timestamp it and add its delta to
1235 * the previous breakpoint.
1236 */
1237
1238 if (pbt->ci) {
1239 pbdt = get_time_break(pbt->ci);
1240 if (pbdt) {
1241 sitem.time = get_mtime(p) - pbt->time;
1242 sitem.pid = p->common.id;
1243 sitem.count = 0;
1244
1245 h = &(pbdt->hash[six]);
1246
1247 ASSERT(h);
1248 ASSERT(h->item);
1249
1250 item = bp_hash_get(h, &sitem);
1251 if (!item) {
1252 item = bp_hash_put(h, &sitem);
1253 } else {
1254 BP_TIME_ADD(item, &sitem);
1255 }
1256 }
1257 }
1258 break;
1259 case ERTS_BP_CALL_TIME_SCHEDULE_IN :
1260 /* When a process is scheduled _in_,
1261 * timestamp it and remove the previous
1262 * timestamp in the psd.
1263 */
1264 pbt->time = get_mtime(p);
1265 break;
1266 default :
1267 ASSERT(0);
1268 /* will never happen */
1269 break;
1270 }
1271 } /* pbt */
1272
1273 release_bp_sched_ix(six);
1274 }
1275
1276 /* *************************************************************************
1277 ** Local helpers
1278 */
1279
1280
1281 static void
set_break(BpFunctions * f,Binary * match_spec,Uint break_flags,enum erts_break_op count_op,ErtsTracer tracer)1282 set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
1283 enum erts_break_op count_op, ErtsTracer tracer)
1284 {
1285 Uint i;
1286 Uint n;
1287
1288 n = f->matched;
1289 for (i = 0; i < n; i++) {
1290 set_function_break(f->matching[i].ci,
1291 match_spec, break_flags,
1292 count_op, tracer);
1293 }
1294 }
1295
1296 static void
set_function_break(ErtsCodeInfo * ci,Binary * match_spec,Uint break_flags,enum erts_break_op count_op,ErtsTracer tracer)1297 set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
1298 enum erts_break_op count_op, ErtsTracer tracer)
1299 {
1300 GenericBp* g;
1301 GenericBpData* bp;
1302 Uint common;
1303 ErtsBpIndex ix = erts_staging_bp_ix();
1304
1305 ERTS_LC_ASSERT(erts_has_code_write_permission());
1306 g = ci->u.gen_bp;
1307 if (g == 0) {
1308 int i;
1309 if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) {
1310 /* Do not insert a new breakpoint */
1311 return;
1312 }
1313 g = Alloc(sizeof(GenericBp));
1314 {
1315 const UWord *instr_word = (const UWord *)erts_codeinfo_to_code(ci);
1316
1317 #ifdef BEAMASM
1318 /* The orig_instr is only used in global tracing for BEAMASM and
1319 * there the address i located within the trampoline in the export
1320 * entry so we read it from there.
1321 *
1322 * For local tracing this value is patched in
1323 * erts_set_trace_pattern. */
1324 g->orig_instr = instr_word[2];
1325 #else
1326 ERTS_CT_ASSERT(sizeof(UWord) == sizeof(BeamInstr));
1327 g->orig_instr = instr_word[0];
1328 #endif
1329 }
1330
1331 for (i = 0; i < ERTS_NUM_BP_IX; i++) {
1332 g->data[i].flags = 0;
1333 }
1334 ci->u.gen_bp = g;
1335 }
1336 bp = &g->data[ix];
1337
1338 /*
1339 * If we are changing an existing breakpoint, clean up old data.
1340 */
1341
1342 common = break_flags & bp->flags;
1343 if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
1344 MatchSetUnref(bp->local_ms);
1345 } else if (common & ERTS_BPF_META_TRACE) {
1346 MatchSetUnref(bp->meta_ms);
1347 bp_meta_unref(bp->meta_tracer);
1348 } else if (common & ERTS_BPF_COUNT) {
1349 if (count_op == ERTS_BREAK_PAUSE) {
1350 bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
1351 } else {
1352 bp->flags |= ERTS_BPF_COUNT_ACTIVE;
1353 erts_atomic_set_nob(&bp->count->acount, 0);
1354 }
1355 ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1356 return;
1357 } else if (common & ERTS_BPF_TIME_TRACE) {
1358 BpDataTime* bdt = bp->time;
1359 Uint i = 0;
1360
1361 if (count_op == ERTS_BREAK_PAUSE) {
1362 bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE;
1363 } else {
1364 bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE;
1365 for (i = 0; i < bdt->n; i++) {
1366 bp_hash_delete(&(bdt->hash[i]));
1367 bp_hash_init(&(bdt->hash[i]), 32);
1368 }
1369 }
1370 ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1371 return;
1372 }
1373
1374 /*
1375 * Initialize the new breakpoint data.
1376 */
1377
1378 if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
1379 MatchSetRef(match_spec);
1380 bp->local_ms = match_spec;
1381 } else if (break_flags & ERTS_BPF_META_TRACE) {
1382 BpMetaTracer* bmt;
1383 ErtsTracer meta_tracer = erts_tracer_nil;
1384 MatchSetRef(match_spec);
1385 bp->meta_ms = match_spec;
1386 bmt = Alloc(sizeof(BpMetaTracer));
1387 erts_refc_init(&bmt->refc, 1);
1388 erts_tracer_update(&meta_tracer, tracer); /* copy tracer */
1389 erts_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer);
1390 bp->meta_tracer = bmt;
1391 } else if (break_flags & ERTS_BPF_COUNT) {
1392 BpCount* bcp;
1393
1394 ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
1395 bcp = Alloc(sizeof(BpCount));
1396 erts_refc_init(&bcp->refc, 1);
1397 erts_atomic_init_nob(&bcp->acount, 0);
1398 bp->count = bcp;
1399 } else if (break_flags & ERTS_BPF_TIME_TRACE) {
1400 BpDataTime* bdt;
1401 Uint i;
1402
1403 ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
1404 bdt = Alloc(sizeof(BpDataTime));
1405 erts_refc_init(&bdt->refc, 1);
1406 bdt->n = erts_no_schedulers + 1;
1407 bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
1408 for (i = 0; i < bdt->n; i++) {
1409 bp_hash_init(&(bdt->hash[i]), 32);
1410 }
1411 bp->time = bdt;
1412 }
1413
1414 bp->flags |= break_flags;
1415 ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1416 }
1417
1418 static void
clear_break(BpFunctions * f,Uint break_flags)1419 clear_break(BpFunctions* f, Uint break_flags)
1420 {
1421 Uint i;
1422 Uint n;
1423
1424 n = f->matched;
1425 for (i = 0; i < n; i++) {
1426 clear_function_break(f->matching[i].ci, break_flags);
1427 }
1428 }
1429
1430 static int
clear_function_break(const ErtsCodeInfo * ci,Uint break_flags)1431 clear_function_break(const ErtsCodeInfo *ci, Uint break_flags)
1432 {
1433 GenericBp* g;
1434 GenericBpData* bp;
1435 Uint common;
1436 ErtsBpIndex ix = erts_staging_bp_ix();
1437
1438 ERTS_LC_ASSERT(erts_has_code_write_permission());
1439
1440 if ((g = ci->u.gen_bp) == NULL) {
1441 return 1;
1442 }
1443
1444 bp = &g->data[ix];
1445 ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1446 common = bp->flags & break_flags;
1447 bp->flags &= ~break_flags;
1448 if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
1449 MatchSetUnref(bp->local_ms);
1450 }
1451 if (common & ERTS_BPF_META_TRACE) {
1452 MatchSetUnref(bp->meta_ms);
1453 bp_meta_unref(bp->meta_tracer);
1454 }
1455 if (common & ERTS_BPF_COUNT) {
1456 ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
1457 bp_count_unref(bp->count);
1458 }
1459 if (common & ERTS_BPF_TIME_TRACE) {
1460 ASSERT((bp->flags & ERTS_BPF_TIME_TRACE_ACTIVE) == 0);
1461 bp_time_unref(bp->time);
1462 }
1463
1464 ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1465 return 1;
1466 }
1467
1468 static void
bp_meta_unref(BpMetaTracer * bmt)1469 bp_meta_unref(BpMetaTracer* bmt)
1470 {
1471 if (erts_refc_dectest(&bmt->refc, 0) <= 0) {
1472 ErtsTracer trc = erts_atomic_read_nob(&bmt->tracer);
1473 ERTS_TRACER_CLEAR(&trc);
1474 Free(bmt);
1475 }
1476 }
1477
1478 static void
bp_count_unref(BpCount * bcp)1479 bp_count_unref(BpCount* bcp)
1480 {
1481 if (erts_refc_dectest(&bcp->refc, 0) <= 0) {
1482 Free(bcp);
1483 }
1484 }
1485
1486 static void
bp_time_unref(BpDataTime * bdt)1487 bp_time_unref(BpDataTime* bdt)
1488 {
1489 if (erts_refc_dectest(&bdt->refc, 0) <= 0) {
1490 Uint i = 0;
1491
1492 for (i = 0; i < bdt->n; ++i) {
1493 bp_hash_delete(&(bdt->hash[i]));
1494 }
1495 Free(bdt->hash);
1496 Free(bdt);
1497 }
1498 }
1499
1500 static BpDataTime*
get_time_break(const ErtsCodeInfo * ci)1501 get_time_break(const ErtsCodeInfo *ci)
1502 {
1503 GenericBpData* bp = check_break(ci, ERTS_BPF_TIME_TRACE);
1504 return bp ? bp->time : 0;
1505 }
1506
1507 static GenericBpData*
check_break(const ErtsCodeInfo * ci,Uint break_flags)1508 check_break(const ErtsCodeInfo *ci, Uint break_flags)
1509 {
1510 GenericBp* g = ci->u.gen_bp;
1511
1512 #ifndef BEAMASM
1513 ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
1514 #endif
1515
1516 if (g) {
1517 GenericBpData* bp = &g->data[erts_active_bp_ix()];
1518
1519 ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
1520 if (bp->flags & break_flags) {
1521 return bp;
1522 }
1523 }
1524
1525 return 0;
1526 }
1527