1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 1998-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 /*
22 * Purpose: Basic debugging support.
23 */
24
25 #ifdef HAVE_CONFIG_H
26 # include "config.h"
27 #endif
28
29 #include "sys.h"
30 #include "erl_vm.h"
31 #include "global.h"
32 #include "erl_process.h"
33 #include "error.h"
34 #include "erl_driver.h"
35 #include "bif.h"
36 #include "big.h"
37 #include "external.h"
38 #include "beam_load.h"
39 #include "beam_bp.h"
40 #include "erl_binary.h"
41 #include "erl_thr_progress.h"
42 #include "erl_nfunc_sched.h"
43 #include "beam_catches.h"
44
45 #ifdef ARCH_64
46 # define HEXF "%016bpX"
47 #else
48 # define HEXF "%08bpX"
49 #endif
50 #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
51
52 void dbg_bt(Process* p, Eterm* sp);
53 void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
54
55 static int print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr);
56 static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif);
57 static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap);
58 static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap);
59 static void print_byte_string(fmtfn_t to, void *to_arg, byte* str, Uint bytes);
60
61 BIF_RETTYPE
erts_debug_same_2(BIF_ALIST_2)62 erts_debug_same_2(BIF_ALIST_2)
63 {
64 return (BIF_ARG_1 == BIF_ARG_2) ? am_true : am_false;
65 }
66
67 BIF_RETTYPE
erts_debug_flat_size_1(BIF_ALIST_1)68 erts_debug_flat_size_1(BIF_ALIST_1)
69 {
70 Process* p = BIF_P;
71 Eterm term = BIF_ARG_1;
72 Uint size = size_object(term);
73
74 if (IS_USMALL(0, size)) {
75 BIF_RET(make_small(size));
76 } else {
77 Eterm* hp = HAlloc(p, BIG_UINT_HEAP_SIZE);
78 BIF_RET(uint_to_big(size, hp));
79 }
80 }
81
82 BIF_RETTYPE
erts_debug_size_shared_1(BIF_ALIST_1)83 erts_debug_size_shared_1(BIF_ALIST_1)
84 {
85 Process* p = BIF_P;
86 Eterm term = BIF_ARG_1;
87 Uint size = size_shared(term);
88
89 if (IS_USMALL(0, size)) {
90 BIF_RET(make_small(size));
91 } else {
92 Eterm* hp = HAlloc(p, BIG_UINT_HEAP_SIZE);
93 BIF_RET(uint_to_big(size, hp));
94 }
95 }
96
97 BIF_RETTYPE
erts_debug_copy_shared_1(BIF_ALIST_1)98 erts_debug_copy_shared_1(BIF_ALIST_1)
99 {
100 Process* p = BIF_P;
101 Eterm term = BIF_ARG_1;
102 Uint size;
103 Eterm* hp;
104 Eterm copy;
105 erts_shcopy_t info;
106 INITIALIZE_SHCOPY(info);
107
108 size = copy_shared_calculate(term, &info);
109 if (size > 0) {
110 hp = HAlloc(p, size);
111 }
112 copy = copy_shared_perform(term, size, &info, &hp, &p->off_heap);
113 DESTROY_SHCOPY(info);
114 BIF_RET(copy);
115 }
116
117 BIF_RETTYPE
erts_debug_breakpoint_2(BIF_ALIST_2)118 erts_debug_breakpoint_2(BIF_ALIST_2)
119 {
120 Process* p = BIF_P;
121 Eterm MFA = BIF_ARG_1;
122 Eterm boolean = BIF_ARG_2;
123 Eterm* tp;
124 ErtsCodeMFA mfa;
125 int i;
126 int specified = 0;
127 Eterm res;
128 BpFunctions f;
129
130 if (boolean != am_true && boolean != am_false)
131 goto error;
132
133 if (is_not_tuple(MFA)) {
134 goto error;
135 }
136 tp = tuple_val(MFA);
137 if (*tp != make_arityval(3)) {
138 goto error;
139 }
140 if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
141 (!is_small(tp[3]) && tp[3] != am_Underscore)) {
142 goto error;
143 }
144 for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
145 /* Empty loop body */
146 }
147 for (i = specified; i < 3; i++) {
148 if (tp[i+1] != am_Underscore) {
149 goto error;
150 }
151 }
152
153 mfa.module = tp[1];
154 mfa.function = tp[2];
155
156 if (is_small(tp[3])) {
157 mfa.arity = signed_val(tp[3]);
158 }
159
160 if (!erts_try_seize_code_write_permission(BIF_P)) {
161 ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2],
162 BIF_P, BIF_ARG_1, BIF_ARG_2);
163 }
164 erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
165 erts_thr_progress_block();
166
167 erts_bp_match_functions(&f, &mfa, specified);
168 if (boolean == am_true) {
169 erts_set_debug_break(&f);
170 erts_install_breakpoints(&f);
171 erts_commit_staged_bp();
172 } else {
173 erts_clear_debug_break(&f);
174 erts_commit_staged_bp();
175 erts_uninstall_breakpoints(&f);
176 }
177 erts_consolidate_bp_data(&f, 1);
178 res = make_small(f.matched);
179 erts_bp_free_matched_functions(&f);
180
181 erts_thr_progress_unblock();
182 erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
183 erts_release_code_write_permission();
184 return res;
185
186 error:
187 BIF_ERROR(p, BADARG);
188 }
189
190 #if 0 /* Kept for conveninence when hard debugging. */
191 void debug_dump_code(BeamInstr *I, int num)
192 {
193 BeamInstr *code_ptr = I;
194 BeamInstr *end = code_ptr + num;
195 erts_dsprintf_buf_t *dsbufp;
196 BeamInstr instr;
197 int i;
198
199 dsbufp = erts_create_tmp_dsbuf(0);
200 while (code_ptr < end) {
201 erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
202 instr = (BeamInstr) code_ptr[0];
203 for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
204 if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') {
205 code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
206 i, opc[i].sz-1, code_ptr) + 1;
207 break;
208 }
209 }
210 if (i >= NUM_SPECIFIC_OPS) {
211 erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp,
212 "unknown " HEXF "\n", instr);
213 code_ptr++;
214 }
215 }
216 dsbufp->str[dsbufp->str_len] = 0;
217 erts_fprintf(stderr,"%s", dsbufp->str);
218 erts_destroy_tmp_dsbuf(dsbufp);
219 }
220 #endif
221
222 BIF_RETTYPE
erts_debug_instructions_0(BIF_ALIST_0)223 erts_debug_instructions_0(BIF_ALIST_0)
224 {
225 int i = 0;
226 Uint needed = num_instructions * 2;
227 Eterm* hp;
228 Eterm res = NIL;
229
230 for (i = 0; i < num_instructions; i++) {
231 needed += 2*sys_strlen(opc[i].name);
232 }
233 hp = HAlloc(BIF_P, needed);
234 for (i = num_instructions-1; i >= 0; i--) {
235 Eterm s = erts_bld_string_n(&hp, 0, opc[i].name, sys_strlen(opc[i].name));
236 res = erts_bld_cons(&hp, 0, s, res);
237 }
238 return res;
239 }
240
241 BIF_RETTYPE
erts_debug_disassemble_1(BIF_ALIST_1)242 erts_debug_disassemble_1(BIF_ALIST_1)
243 {
244 Process* p = BIF_P;
245 Eterm addr = BIF_ARG_1;
246 erts_dsprintf_buf_t *dsbufp;
247 Eterm* hp;
248 Eterm* tp;
249 Eterm bin;
250 Eterm mfa;
251 ErtsCodeMFA *cmfa = NULL;
252 BeamCodeHeader* code_hdr;
253 BeamInstr *code_ptr;
254 BeamInstr instr;
255 BeamInstr uaddr;
256 Uint hsz;
257 int i;
258
259 if (term_to_UWord(addr, &uaddr)) {
260 code_ptr = (BeamInstr *) uaddr;
261 if ((cmfa = find_function_from_pc(code_ptr)) == NULL) {
262 BIF_RET(am_false);
263 }
264 } else if (is_tuple(addr)) {
265 ErtsCodeIndex code_ix;
266 Module* modp;
267 Eterm mod;
268 Eterm name;
269 Export* ep;
270 Sint arity;
271 int n;
272
273 tp = tuple_val(addr);
274 if (tp[0] != make_arityval(3)) {
275 error:
276 BIF_ERROR(p, BADARG);
277 }
278 mod = tp[1];
279 name = tp[2];
280 if (!is_atom(mod) || !is_atom(name) || !is_small(tp[3])) {
281 goto error;
282 }
283 arity = signed_val(tp[3]);
284 code_ix = erts_active_code_ix();
285 modp = erts_get_module(mod, code_ix);
286
287 /*
288 * Try the export entry first to allow disassembly of special functions
289 * such as erts_debug:apply/4. Then search for it in the module.
290 */
291 if ((ep = erts_find_function(mod, name, arity, code_ix)) != NULL) {
292 /* XXX: add "&& ep->address != ep->code" condition?
293 * Consider a traced function.
294 * Its ep will have ep->address == ep->code.
295 * erts_find_function() will return the non-NULL ep.
296 * Below we'll try to derive a code_ptr from ep->address.
297 * But this code_ptr will point to the start of the Export,
298 * not the function's func_info instruction. BOOM !?
299 */
300 cmfa = erts_code_to_codemfa(ep->addressv[code_ix]);
301 } else if (modp == NULL || (code_hdr = modp->curr.code_hdr) == NULL) {
302 BIF_RET(am_undef);
303 } else {
304 n = code_hdr->num_functions;
305 for (i = 0; i < n; i++) {
306 cmfa = &code_hdr->functions[i]->mfa;
307 if (cmfa->function == name && cmfa->arity == arity) {
308 break;
309 }
310 }
311 if (i == n) {
312 BIF_RET(am_undef);
313 }
314 }
315 code_ptr = (BeamInstr*)erts_code_to_codeinfo(erts_codemfa_to_code(cmfa));
316 } else {
317 goto error;
318 }
319
320 dsbufp = erts_create_tmp_dsbuf(0);
321 erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
322 instr = (BeamInstr) code_ptr[0];
323 for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
324 if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') {
325 code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
326 i, opc[i].sz-1, code_ptr) + 1;
327 break;
328 }
329 }
330 if (i >= NUM_SPECIFIC_OPS) {
331 erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp,
332 "unknown " HEXF "\n", instr);
333 code_ptr++;
334 }
335 if (i == op_call_nif) {
336 /*
337 * The rest of the code will not be executed. Don't disassemble any
338 * more code in this function.
339 */
340 code_ptr = 0;
341 }
342 bin = new_binary(p, (byte *) dsbufp->str, dsbufp->str_len);
343 erts_destroy_tmp_dsbuf(dsbufp);
344 hsz = 4+4;
345 (void) erts_bld_uword(NULL, &hsz, (BeamInstr) code_ptr);
346 hp = HAlloc(p, hsz);
347 addr = erts_bld_uword(&hp, NULL, (BeamInstr) code_ptr);
348 ASSERT(is_atom(cmfa->module) || is_nil(cmfa->module));
349 ASSERT(is_atom(cmfa->function) || is_nil(cmfa->function));
350 mfa = TUPLE3(hp, cmfa->module, cmfa->function,
351 make_small(cmfa->arity));
352 hp += 4;
353 return TUPLE3(hp, addr, bin, mfa);
354 }
355
356 BIF_RETTYPE
erts_debug_interpreter_size_0(BIF_ALIST_0)357 erts_debug_interpreter_size_0(BIF_ALIST_0)
358 {
359 int i;
360 BeamInstr low, high;
361
362 low = high = (BeamInstr) process_main;
363 for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
364 BeamInstr a = BeamOpCodeAddr(i);
365 if (a > high) {
366 high = a;
367 }
368 }
369 return erts_make_integer(high - low, BIF_P);
370 }
371
372 void
dbg_bt(Process * p,Eterm * sp)373 dbg_bt(Process* p, Eterm* sp)
374 {
375 Eterm* stack = STACK_START(p);
376
377 while (sp < stack) {
378 if (is_CP(*sp)) {
379 ErtsCodeMFA* cmfa = find_function_from_pc(cp_val(*sp));
380 if (cmfa)
381 erts_fprintf(stderr,
382 HEXF ": %T:%T/%bpu\n",
383 &cmfa->module, cmfa->module,
384 cmfa->function, cmfa->arity);
385 }
386 sp++;
387 }
388 }
389
390 void
dbg_where(BeamInstr * addr,Eterm x0,Eterm * reg)391 dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg)
392 {
393 ErtsCodeMFA* cmfa = find_function_from_pc(addr);
394
395 if (cmfa == NULL) {
396 erts_fprintf(stderr, "???\n");
397 } else {
398 int arity;
399 int i;
400
401 arity = cmfa->arity;
402 erts_fprintf(stderr, HEXF ": %T:%T(", addr,
403 cmfa->module, cmfa->function);
404 for (i = 0; i < arity; i++)
405 erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0);
406 erts_fprintf(stderr, ")\n");
407 }
408 }
409
410 static int
print_op(fmtfn_t to,void * to_arg,int op,int size,BeamInstr * addr)411 print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
412 {
413 int i;
414 BeamInstr tag;
415 char* sign;
416 char* start_prog; /* Start of program for packer. */
417 char* prog; /* Current position in packer program. */
418 BeamInstr stack[8]; /* Stack for packer. */
419 BeamInstr* sp = stack; /* Points to next free position. */
420 BeamInstr packed = 0; /* Accumulator for packed operations. */
421 BeamInstr args[8]; /* Arguments for this instruction. */
422 BeamInstr* ap; /* Pointer to arguments. */
423 BeamInstr* unpacked; /* Unpacked arguments */
424 BeamInstr* first_arg; /* First argument */
425
426 start_prog = opc[op].pack;
427
428 if (start_prog[0] == '\0') {
429 /*
430 * There is no pack program.
431 * Avoid copying because instructions containing bignum operands
432 * are bigger than actually declared.
433 */
434 addr++;
435 ap = addr;
436 } else {
437 #if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
438 BeamInstr instr_word = addr[0];
439 #endif
440 addr++;
441
442 /*
443 * Copy all arguments to a local buffer for the unpacking.
444 */
445
446 ASSERT(size <= sizeof(args)/sizeof(args[0]));
447 ap = args;
448 for (i = 0; i < size; i++) {
449 *ap++ = addr[i];
450 }
451
452 /*
453 * Undo any packing done by the loader. This is easily done by running
454 * the packing program backwards and in reverse.
455 */
456
457 prog = start_prog + sys_strlen(start_prog);
458 while (start_prog < prog) {
459 prog--;
460 switch (*prog) {
461 case 'f':
462 case 'g':
463 case 'q':
464 *ap++ = *--sp;
465 break;
466 #ifdef ARCH_64
467 case '1': /* Tightest shift */
468 *ap++ = (packed & BEAM_TIGHTEST_MASK) << 3;
469 packed >>= BEAM_TIGHTEST_SHIFT;
470 break;
471 #endif
472 case '2': /* Tight shift */
473 *ap++ = packed & BEAM_TIGHT_MASK;
474 packed >>= BEAM_TIGHT_SHIFT;
475 break;
476 case '3': /* Loose shift */
477 *ap++ = packed & BEAM_LOOSE_MASK;
478 packed >>= BEAM_LOOSE_SHIFT;
479 break;
480 #ifdef ARCH_64
481 case '4': /* Shift 32 steps */
482 *ap++ = packed & BEAM_WIDE_MASK;
483 packed >>= BEAM_WIDE_SHIFT;
484 break;
485 #endif
486 case 'p':
487 *sp++ = *--ap;
488 break;
489 case 'P':
490 packed = *--sp;
491 break;
492 #if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
493 case '#': /* -1 */
494 case '$': /* -2 */
495 case '%': /* -3 */
496 case '&': /* -4 */
497 case '\'': /* -5 */
498 case '(': /* -6 */
499 packed = (packed << BEAM_WIDE_SHIFT) | BeamExtraData(instr_word);
500 break;
501 #endif
502 default:
503 erts_exit(ERTS_ERROR_EXIT, "beam_debug: invalid packing op: %c\n", *prog);
504 }
505 }
506 ap = args;
507 }
508
509 first_arg = ap;
510
511 /*
512 * Print the name and all operands of the instructions.
513 */
514
515 erts_print(to, to_arg, "%s ", opc[op].name);
516 sign = opc[op].sign;
517 while (*sign) {
518 switch (*sign) {
519 case 'r': /* x(0) */
520 erts_print(to, to_arg, "r(0)");
521 break;
522 case 'x': /* x(N) */
523 {
524 Uint n = ap[0] / sizeof(Eterm);
525 erts_print(to, to_arg, "x(%d)", n);
526 ap++;
527 }
528 break;
529 case 'y': /* y(N) */
530 {
531 Uint n = ap[0] / sizeof(Eterm) - CP_SIZE;
532 erts_print(to, to_arg, "y(%d)", n);
533 ap++;
534 }
535 break;
536 case 'n': /* Nil */
537 erts_print(to, to_arg, "[]");
538 break;
539 case 'S': /* Register */
540 {
541 Uint reg_type = (*ap & 1) ? 'y' : 'x';
542 Uint n = ap[0] / sizeof(Eterm);
543 erts_print(to, to_arg, "%c(%d)", reg_type, n);
544 ap++;
545 break;
546 }
547 case 's': /* Any source (tagged constant or register) */
548 tag = loader_tag(*ap);
549 if (tag == LOADER_X_REG) {
550 erts_print(to, to_arg, "x(%d)", loader_x_reg_index(*ap));
551 ap++;
552 break;
553 } else if (tag == LOADER_Y_REG) {
554 erts_print(to, to_arg, "y(%d)", loader_y_reg_index(*ap) - CP_SIZE);
555 ap++;
556 break;
557 }
558 /*FALLTHROUGH*/
559 case 'a': /* Tagged atom */
560 case 'i': /* Tagged integer */
561 case 'c': /* Tagged constant */
562 case 'q': /* Tagged literal */
563 erts_print(to, to_arg, "%T", (Eterm) *ap);
564 ap++;
565 break;
566 case 'A':
567 erts_print(to, to_arg, "%d", arityval( (Eterm) ap[0]));
568 ap++;
569 break;
570 case 'd': /* Destination (x(0), x(N), y(N)) */
571 if (*ap & 1) {
572 erts_print(to, to_arg, "y(%d)",
573 *ap / sizeof(Eterm) - CP_SIZE);
574 } else {
575 erts_print(to, to_arg, "x(%d)",
576 *ap / sizeof(Eterm));
577 }
578 ap++;
579 break;
580 case 't': /* Untagged integers */
581 case 'I':
582 case 'W':
583 switch (op) {
584 case op_i_make_fun_Wt:
585 if (*sign == 'W') {
586 ErlFunEntry* fe = (ErlFunEntry *) *ap;
587 ErtsCodeMFA* cmfa = find_function_from_pc(fe->address);
588 erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
589 cmfa->function, cmfa->arity);
590 } else {
591 erts_print(to, to_arg, "%d", *ap);
592 }
593 break;
594 case op_i_bs_match_string_xfWW:
595 case op_i_bs_match_string_yfWW:
596 if (ap - first_arg < 3) {
597 erts_print(to, to_arg, "%d", *ap);
598 } else {
599 Uint bits = ap[-1];
600 Uint bytes = (bits+7)/8;
601 byte* str = (byte *) *ap;
602 print_byte_string(to, to_arg, str, bytes);
603 }
604 break;
605 case op_bs_put_string_WW:
606 if (ap - first_arg == 0) {
607 erts_print(to, to_arg, "%d", *ap);
608 } else {
609 Uint bytes = ap[-1];
610 byte* str = (byte *) ap[0];
611 print_byte_string(to, to_arg, str, bytes);
612 }
613 break;
614 default:
615 erts_print(to, to_arg, "%d", *ap);
616 }
617 ap++;
618 break;
619 case 'f': /* Destination label */
620 switch (op) {
621 case op_catch_yf:
622 erts_print(to, to_arg, "f(" HEXF ")", catch_pc((BeamInstr)*ap));
623 break;
624 default:
625 {
626 BeamInstr* target = f_to_addr(addr, op, ap);
627 ErtsCodeMFA* cmfa = find_function_from_pc(target);
628 if (!cmfa || erts_codemfa_to_code(cmfa) != target) {
629 erts_print(to, to_arg, "f(" HEXF ")", target);
630 } else {
631 erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
632 cmfa->function, cmfa->arity);
633 }
634 ap++;
635 }
636 break;
637 }
638 break;
639 case 'p': /* Pointer (to label) */
640 {
641 BeamInstr* target = f_to_addr(addr, op, ap);
642 erts_print(to, to_arg, "p(" HEXF ")", target);
643 ap++;
644 }
645 break;
646 case 'j': /* Pointer (to label) */
647 if (*ap == 0) {
648 erts_print(to, to_arg, "j(0)");
649 } else {
650 BeamInstr* target = f_to_addr(addr, op, ap);
651 erts_print(to, to_arg, "j(" HEXF ")", target);
652 }
653 ap++;
654 break;
655 case 'e': /* Export entry */
656 {
657 Export* ex = (Export *) *ap;
658 erts_print(to, to_arg,
659 "%T:%T/%bpu", (Eterm) ex->info.mfa.module,
660 (Eterm) ex->info.mfa.function,
661 ex->info.mfa.arity);
662 ap++;
663 }
664 break;
665 case 'F': /* Function definition */
666 break;
667 case 'b':
668 print_bif_name(to, to_arg, (BifFunction) *ap);
669 ap++;
670 break;
671 case 'P': /* Byte offset into tuple (see beam_load.c) */
672 case 'Q': /* Like 'P', but packable */
673 erts_print(to, to_arg, "%d", (*ap / sizeof(Eterm)) - 1);
674 ap++;
675 break;
676 case 'l': /* fr(N) */
677 erts_print(to, to_arg, "fr(%d)", ap[0] / sizeof(FloatDef));
678 ap++;
679 break;
680 default:
681 erts_print(to, to_arg, "???");
682 ap++;
683 break;
684 }
685 erts_print(to, to_arg, " ");
686 sign++;
687 }
688
689 /*
690 * Print more information about certain instructions.
691 */
692
693 unpacked = ap;
694 ap = addr + size;
695
696 /*
697 * In the code below, never use ap[-1], ap[-2], ...
698 * (will not work if the arguments have been packed).
699 *
700 * Instead use unpacked[-1], unpacked[-2], ...
701 */
702 switch (op) {
703 case op_i_select_val_lins_xfI:
704 case op_i_select_val_lins_yfI:
705 case op_i_select_val_bins_xfI:
706 case op_i_select_val_bins_yfI:
707 {
708 int n = unpacked[-1];
709 int ix = n;
710 Sint32* jump_tab = (Sint32 *)(ap + n);
711
712 while (ix--) {
713 erts_print(to, to_arg, "%T ", (Eterm) ap[0]);
714 ap++;
715 size++;
716 }
717 ix = n;
718 while (ix--) {
719 BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
720 erts_print(to, to_arg, "f(" HEXF ") ", target);
721 jump_tab++;
722 }
723 size += (n+1) / 2;
724 }
725 break;
726 case op_i_select_tuple_arity_xfI:
727 case op_i_select_tuple_arity_yfI:
728 {
729 int n = unpacked[-1];
730 int ix = n - 1; /* without sentinel */
731 Sint32* jump_tab = (Sint32 *)(ap + n);
732
733 while (ix--) {
734 Uint arity = arityval(ap[0]);
735 erts_print(to, to_arg, "{%d} ", arity, ap[1]);
736 ap++;
737 size++;
738 }
739 /* print sentinel */
740 erts_print(to, to_arg, "{%T} ", ap[0], ap[1]);
741 ap++;
742 size++;
743 ix = n;
744 while (ix--) {
745 BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
746 erts_print(to, to_arg, "f(" HEXF ") ", target);
747 jump_tab++;
748 }
749 size += (n+1) / 2;
750 }
751 break;
752 case op_i_select_val2_xfcc:
753 case op_i_select_val2_yfcc:
754 case op_i_select_tuple_arity2_xfAA:
755 case op_i_select_tuple_arity2_yfAA:
756 {
757 Sint32* jump_tab = (Sint32 *) ap;
758 BeamInstr* target;
759 int i;
760
761 for (i = 0; i < 2; i++) {
762 target = f_to_addr_packed(addr, op, jump_tab++);
763 erts_print(to, to_arg, "f(" HEXF ") ", target);
764 }
765 size += 1;
766 }
767 break;
768 case op_i_jump_on_val_xfIW:
769 case op_i_jump_on_val_yfIW:
770 {
771 int n = unpacked[-2];
772 Sint32* jump_tab = (Sint32 *) ap;
773
774 size += (n+1) / 2;
775 while (n-- > 0) {
776 BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
777 erts_print(to, to_arg, "f(" HEXF ") ", target);
778 jump_tab++;
779 }
780 }
781 break;
782 case op_i_jump_on_val_zero_xfI:
783 case op_i_jump_on_val_zero_yfI:
784 {
785 int n = unpacked[-1];
786 Sint32* jump_tab = (Sint32 *) ap;
787
788 size += (n+1) / 2;
789 while (n-- > 0) {
790 BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
791 erts_print(to, to_arg, "f(" HEXF ") ", target);
792 jump_tab++;
793 }
794 }
795 break;
796 case op_put_tuple2_xI:
797 case op_put_tuple2_yI:
798 case op_new_map_dtI:
799 case op_update_map_assoc_xdtI:
800 case op_update_map_assoc_ydtI:
801 case op_update_map_assoc_cdtI:
802 case op_update_map_exact_xjdtI:
803 case op_update_map_exact_yjdtI:
804 {
805 int n = unpacked[-1];
806
807 while (n > 0) {
808 switch (loader_tag(ap[0])) {
809 case LOADER_X_REG:
810 erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
811 break;
812 case LOADER_Y_REG:
813 erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
814 break;
815 default:
816 erts_print(to, to_arg, " %T", (Eterm) ap[0]);
817 break;
818 }
819 ap++, size++, n--;
820 }
821 }
822 break;
823 case op_i_new_small_map_lit_dtq:
824 {
825 Eterm *tp = tuple_val(unpacked[-1]);
826 int n = arityval(*tp);
827
828 while (n > 0) {
829 switch (loader_tag(ap[0])) {
830 case LOADER_X_REG:
831 erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
832 break;
833 case LOADER_Y_REG:
834 erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
835 break;
836 default:
837 erts_print(to, to_arg, " %T", (Eterm) ap[0]);
838 break;
839 }
840 ap++, size++, n--;
841 }
842 }
843 break;
844 case op_i_get_map_elements_fsI:
845 {
846 int n = unpacked[-1];
847
848 while (n > 0) {
849 if (n % 3 == 1) {
850 erts_print(to, to_arg, " %X", ap[0]);
851 } else {
852 switch (loader_tag(ap[0])) {
853 case LOADER_X_REG:
854 erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
855 break;
856 case LOADER_Y_REG:
857 erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
858 break;
859 default:
860 erts_print(to, to_arg, " %T", (Eterm) ap[0]);
861 break;
862 }
863 }
864 ap++, size++, n--;
865 }
866 }
867 break;
868 }
869 erts_print(to, to_arg, "\n");
870
871 return size;
872 }
873
print_bif_name(fmtfn_t to,void * to_arg,BifFunction bif)874 static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif)
875 {
876 int i;
877
878 for (i = 0; i < BIF_SIZE; i++) {
879 if (bif == bif_table[i].f) {
880 break;
881 }
882 }
883 if (i == BIF_SIZE) {
884 erts_print(to, to_arg, "b(%d)", (Uint) bif);
885 } else {
886 Eterm name = bif_table[i].name;
887 unsigned arity = bif_table[i].arity;
888 erts_print(to, to_arg, "%T/%u", name, arity);
889 }
890 }
891
f_to_addr(BeamInstr * base,int op,BeamInstr * ap)892 static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap)
893 {
894 return base - 1 + opc[op].adjust + (Sint32) *ap;
895 }
896
f_to_addr_packed(BeamInstr * base,int op,Sint32 * ap)897 static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap)
898 {
899 return base - 1 + opc[op].adjust + *ap;
900 }
901
print_byte_string(fmtfn_t to,void * to_arg,byte * str,Uint bytes)902 static void print_byte_string(fmtfn_t to, void *to_arg, byte* str, Uint bytes)
903 {
904 Uint i;
905
906 for (i = 0; i < bytes; i++) {
907 erts_print(to, to_arg, "%02X", str[i]);
908 }
909 }
910
911 /*
912 * Dirty BIF testing.
913 *
914 * The erts_debug:dirty_cpu/2, erts_debug:dirty_io/1, and
915 * erts_debug:dirty/3 BIFs are used by the dirty_bif_SUITE
916 * test suite.
917 */
918
919 static int ms_wait(Process *c_p, Eterm etimeout, int busy);
920 static int dirty_send_message(Process *c_p, Eterm to, Eterm tag);
921 static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I);
922
923 /*
924 * erts_debug:dirty_cpu/2 is statically determined to execute on
925 * a dirty CPU scheduler (see erts_dirty_bif.tab).
926 */
927 BIF_RETTYPE
erts_debug_dirty_cpu_2(BIF_ALIST_2)928 erts_debug_dirty_cpu_2(BIF_ALIST_2)
929 {
930 return dirty_test(BIF_P, am_dirty_cpu, BIF_ARG_1, BIF_ARG_2, BIF_I);
931 }
932
933 /*
934 * erts_debug:dirty_io/2 is statically determined to execute on
935 * a dirty I/O scheduler (see erts_dirty_bif.tab).
936 */
937 BIF_RETTYPE
erts_debug_dirty_io_2(BIF_ALIST_2)938 erts_debug_dirty_io_2(BIF_ALIST_2)
939 {
940 return dirty_test(BIF_P, am_dirty_io, BIF_ARG_1, BIF_ARG_2, BIF_I);
941 }
942
943 /*
944 * erts_debug:dirty/3 executes on a normal scheduler.
945 */
946 BIF_RETTYPE
erts_debug_dirty_3(BIF_ALIST_3)947 erts_debug_dirty_3(BIF_ALIST_3)
948 {
949 Eterm argv[2];
950 switch (BIF_ARG_1) {
951 case am_normal:
952 return dirty_test(BIF_P, am_normal, BIF_ARG_2, BIF_ARG_3, BIF_I);
953 case am_dirty_cpu:
954 argv[0] = BIF_ARG_2;
955 argv[1] = BIF_ARG_3;
956 return erts_schedule_bif(BIF_P,
957 argv,
958 BIF_I,
959 erts_debug_dirty_cpu_2,
960 ERTS_SCHED_DIRTY_CPU,
961 am_erts_debug,
962 am_dirty_cpu,
963 2);
964 case am_dirty_io:
965 argv[0] = BIF_ARG_2;
966 argv[1] = BIF_ARG_3;
967 return erts_schedule_bif(BIF_P,
968 argv,
969 BIF_I,
970 erts_debug_dirty_io_2,
971 ERTS_SCHED_DIRTY_IO,
972 am_erts_debug,
973 am_dirty_io,
974 2);
975 default:
976 BIF_ERROR(BIF_P, EXC_BADARG);
977 }
978 }
979
980
981 static BIF_RETTYPE
dirty_test(Process * c_p,Eterm type,Eterm arg1,Eterm arg2,UWord * I)982 dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
983 {
984 BIF_RETTYPE ret;
985 if (am_scheduler == arg1) {
986 ErtsSchedulerData *esdp;
987 if (arg2 != am_type)
988 goto badarg;
989 esdp = erts_proc_sched_data(c_p);
990 if (!esdp)
991 goto scheduler_type_error;
992
993 switch (esdp->type) {
994 case ERTS_SCHED_NORMAL:
995 ERTS_BIF_PREP_RET(ret, am_normal);
996 break;
997 case ERTS_SCHED_DIRTY_CPU:
998 ERTS_BIF_PREP_RET(ret, am_dirty_cpu);
999 break;
1000 case ERTS_SCHED_DIRTY_IO:
1001 ERTS_BIF_PREP_RET(ret, am_dirty_io);
1002 break;
1003 default:
1004 scheduler_type_error:
1005 ERTS_BIF_PREP_RET(ret, am_error);
1006 break;
1007 }
1008 }
1009 else if (am_error == arg1) {
1010 switch (arg2) {
1011 case am_notsup:
1012 ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP);
1013 break;
1014 case am_undef:
1015 ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
1016 break;
1017 case am_badarith:
1018 ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH);
1019 break;
1020 case am_noproc:
1021 ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC);
1022 break;
1023 case am_system_limit:
1024 ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
1025 break;
1026 case am_badarg:
1027 default:
1028 goto badarg;
1029 }
1030 }
1031 else if (am_copy == arg1) {
1032 int i;
1033 Eterm res;
1034
1035 for (res = NIL, i = 0; i < 1000; i++) {
1036 Eterm *hp, sz;
1037 Eterm cpy;
1038 /* We do not want this to be optimized,
1039 but rather the oposite... */
1040 sz = size_object(arg2);
1041 hp = HAlloc(c_p, sz);
1042 cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap);
1043 hp = HAlloc(c_p, 2);
1044 res = CONS(hp, cpy, res);
1045 }
1046
1047 ERTS_BIF_PREP_RET(ret, res);
1048 }
1049 else if (am_send == arg1) {
1050 dirty_send_message(c_p, arg2, am_ok);
1051 ERTS_BIF_PREP_RET(ret, am_ok);
1052 }
1053 else if (ERTS_IS_ATOM_STR("wait", arg1)) {
1054 if (!ms_wait(c_p, arg2, type == am_dirty_cpu))
1055 goto badarg;
1056 ERTS_BIF_PREP_RET(ret, am_ok);
1057 }
1058 else if (ERTS_IS_ATOM_STR("reschedule", arg1)) {
1059 /*
1060 * Reschedule operation after decrement of two until we reach
1061 * zero. Switch between dirty scheduler types when 'n' is
1062 * evenly divided by 4. If the initial value wasn't evenly
1063 * dividable by 2, throw badarg exception.
1064 */
1065 Eterm next_type;
1066 Sint n;
1067 if (!term_to_Sint(arg2, &n) || n < 0)
1068 goto badarg;
1069 if (n == 0)
1070 ERTS_BIF_PREP_RET(ret, am_ok);
1071 else {
1072 Eterm argv[3];
1073 Eterm eint = erts_make_integer((Uint) (n - 2), c_p);
1074 if (n % 4 != 0)
1075 next_type = type;
1076 else {
1077 switch (type) {
1078 case am_dirty_cpu: next_type = am_dirty_io; break;
1079 case am_dirty_io: next_type = am_normal; break;
1080 case am_normal: next_type = am_dirty_cpu; break;
1081 default: goto badarg;
1082 }
1083 }
1084 switch (next_type) {
1085 case am_dirty_io:
1086 argv[0] = arg1;
1087 argv[1] = eint;
1088 ret = erts_schedule_bif(c_p,
1089 argv,
1090 I,
1091 erts_debug_dirty_io_2,
1092 ERTS_SCHED_DIRTY_IO,
1093 am_erts_debug,
1094 am_dirty_io,
1095 2);
1096 break;
1097 case am_dirty_cpu:
1098 argv[0] = arg1;
1099 argv[1] = eint;
1100 ret = erts_schedule_bif(c_p,
1101 argv,
1102 I,
1103 erts_debug_dirty_cpu_2,
1104 ERTS_SCHED_DIRTY_CPU,
1105 am_erts_debug,
1106 am_dirty_cpu,
1107 2);
1108 break;
1109 case am_normal:
1110 argv[0] = am_normal;
1111 argv[1] = arg1;
1112 argv[2] = eint;
1113 ret = erts_schedule_bif(c_p,
1114 argv,
1115 I,
1116 erts_debug_dirty_3,
1117 ERTS_SCHED_NORMAL,
1118 am_erts_debug,
1119 am_dirty,
1120 3);
1121 break;
1122 default:
1123 goto badarg;
1124 }
1125 }
1126 }
1127 else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) {
1128 ERTS_DECL_AM(ready);
1129 ERTS_DECL_AM(done);
1130 dirty_send_message(c_p, arg2, AM_ready);
1131 ms_wait(c_p, make_small(6000), 0);
1132 dirty_send_message(c_p, arg2, AM_done);
1133 ERTS_BIF_PREP_RET(ret, am_ok);
1134 }
1135 else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) {
1136 Process *real_c_p = erts_proc_shadow2real(c_p);
1137 Eterm *hp, *hp2;
1138 Uint sz;
1139 int i;
1140 ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
1141 int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO;
1142
1143 if (ERTS_PROC_IS_EXITING(real_c_p))
1144 goto badarg;
1145 dirty_send_message(c_p, arg2, am_alive);
1146
1147 /* Wait until dead */
1148 while (!ERTS_PROC_IS_EXITING(real_c_p)) {
1149 if (dirty_io)
1150 ms_wait(c_p, make_small(100), 0);
1151 else
1152 erts_thr_yield();
1153 }
1154
1155 ms_wait(c_p, make_small(1000), 0);
1156
1157 /* Should still be able to allocate memory */
1158 hp = HAlloc(c_p, 3); /* Likely on heap */
1159 sz = 10000;
1160 hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */
1161 *hp2 = make_pos_bignum_header(sz);
1162 for (i = 1; i < sz; i++)
1163 hp2[i] = (Eterm) 4711;
1164 ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2)));
1165 }
1166 else {
1167 badarg:
1168 ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
1169 }
1170 return ret;
1171 }
1172
1173
1174 static int
dirty_send_message(Process * c_p,Eterm to,Eterm tag)1175 dirty_send_message(Process *c_p, Eterm to, Eterm tag)
1176 {
1177 ErtsProcLocks c_p_locks, rp_locks;
1178 Process *rp, *real_c_p;
1179 Eterm msg, *hp;
1180 ErlOffHeap *ohp;
1181 ErtsMessage *mp;
1182
1183 ASSERT(is_immed(tag));
1184
1185 real_c_p = erts_proc_shadow2real(c_p);
1186 if (real_c_p != c_p)
1187 c_p_locks = 0;
1188 else
1189 c_p_locks = ERTS_PROC_LOCK_MAIN;
1190
1191 ASSERT(real_c_p->common.id == c_p->common.id);
1192
1193 rp = erts_pid2proc_opt(real_c_p, c_p_locks,
1194 to, 0,
1195 ERTS_P2P_FLG_INC_REFC);
1196
1197 if (!rp)
1198 return 0;
1199
1200 rp_locks = 0;
1201 mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);
1202
1203 msg = TUPLE2(hp, tag, c_p->common.id);
1204 ERL_MESSAGE_TOKEN(mp) = am_undefined;
1205 erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
1206
1207 if (rp == real_c_p)
1208 rp_locks &= ~c_p_locks;
1209 if (rp_locks)
1210 erts_proc_unlock(rp, rp_locks);
1211
1212 erts_proc_dec_refc(rp);
1213
1214 return 1;
1215 }
1216
1217 static int
ms_wait(Process * c_p,Eterm etimeout,int busy)1218 ms_wait(Process *c_p, Eterm etimeout, int busy)
1219 {
1220 ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
1221 ErtsMonotonicTime time, timeout_time;
1222 Sint64 ms;
1223
1224 if (!term_to_Sint64(etimeout, &ms))
1225 return 0;
1226
1227 time = erts_get_monotonic_time(esdp);
1228
1229 if (ms < 0)
1230 timeout_time = time;
1231 else
1232 timeout_time = time + ERTS_MSEC_TO_MONOTONIC(ms);
1233
1234 while (time < timeout_time) {
1235 if (busy)
1236 erts_thr_yield();
1237 else {
1238 ErtsMonotonicTime timeout = timeout_time - time;
1239
1240 #ifdef __WIN32__
1241 Sleep((DWORD) ERTS_MONOTONIC_TO_MSEC(timeout));
1242 #else
1243 {
1244 ErtsMonotonicTime to = ERTS_MONOTONIC_TO_USEC(timeout);
1245 struct timeval tv;
1246
1247 tv.tv_sec = (long) to / (1000*1000);
1248 tv.tv_usec = (long) to % (1000*1000);
1249
1250 select(0, NULL, NULL, NULL, &tv);
1251 }
1252 #endif
1253 }
1254
1255 time = erts_get_monotonic_time(esdp);
1256 }
1257 return 1;
1258 }
1259
1260
1261 # define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
1262
1263 /*
1264 * The below functions is for testing of the stack
1265 * limit functionality. They are intentionally
1266 * written body recursive in order to prevent
1267 * last call optimization...
1268 */
1269
1270 UWord
erts_check_stack_recursion_downwards(char * start_c)1271 erts_check_stack_recursion_downwards(char *start_c)
1272 {
1273 char *limit = ERTS_STACK_LIMIT;
1274 char c;
1275 UWord res;
1276 if (erts_check_below_limit(&c, limit + 1024))
1277 return (char *) erts_ptr_id(start_c) - (char *) erts_ptr_id(&c);
1278 res = erts_check_stack_recursion_downwards(start_c);
1279 erts_ptr_id(&c);
1280 return res;
1281 }
1282
1283 UWord
erts_check_stack_recursion_upwards(char * start_c)1284 erts_check_stack_recursion_upwards(char *start_c)
1285 {
1286 char *limit = ERTS_STACK_LIMIT;
1287 char c;
1288 UWord res;
1289 if (erts_check_above_limit(&c, limit - 1024))
1290 return (char *) erts_ptr_id(&c) - (char *) erts_ptr_id(start_c);
1291 res = erts_check_stack_recursion_upwards(start_c);
1292 erts_ptr_id(&c);
1293 return res;
1294 }
1295
1296 int
erts_is_above_stack_limit(char * ptr)1297 erts_is_above_stack_limit(char *ptr)
1298 {
1299 return (char *) ptr > ERTS_STACK_LIMIT;
1300 }
1301