1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 1999-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24
25 #define ERTS_WANT_MEM_MAPPERS
26 #include "sys.h"
27 #include "erl_vm.h"
28 #include "global.h"
29 #include "erl_process.h"
30 #include "error.h"
31 #include "erl_driver.h"
32 #include "erl_nif.h"
33 #include "bif.h"
34 #include "big.h"
35 #include "erl_version.h"
36 #include "erl_compile_flags.h"
37 #include "erl_db_util.h"
38 #include "erl_message.h"
39 #include "erl_binary.h"
40 #include "erl_db.h"
41 #include "erl_mtrace.h"
42 #include "dist.h"
43 #include "erl_gc.h"
44 #include "erl_cpu_topology.h"
45 #include "erl_async.h"
46 #include "erl_thr_progress.h"
47 #include "erl_bif_unique.h"
48 #include "erl_map.h"
49 #include "erl_check_io.h"
50 #define ERTS_PTAB_WANT_DEBUG_FUNCS__
51 #include "erl_ptab.h"
52 #include "erl_time.h"
53 #include "erl_proc_sig_queue.h"
54 #include "erl_alloc_util.h"
55 #include "erl_global_literals.h"
56
57 #ifdef ERTS_ENABLE_LOCK_COUNT
58 #include "erl_lock_count.h"
59 #endif
60
61 #ifdef VALGRIND
62 # include <valgrind/valgrind.h>
63 # include <valgrind/memcheck.h>
64 #endif
65 #ifdef ADDRESS_SANITIZER
66 # include <sanitizer/lsan_interface.h>
67 #endif
68
69 static Export* alloc_info_trap = NULL;
70 static Export* alloc_sizes_trap = NULL;
71 static Export* gather_io_bytes_trap = NULL;
72
73 static Export *gather_sched_wall_time_res_trap;
74 static Export *gather_msacc_res_trap;
75 static Export *gather_gc_info_res_trap;
76 static Export *gather_system_check_res_trap;
77
78 static Export *is_process_alive_trap;
79 static Export *get_internal_state_blocked;
80
81 #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
82
83 static char otp_version[] = ERLANG_OTP_VERSION;
84 /* Keep erts_system_version as a global variable for easy access from a core */
85 static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
86 "%s"
87 " [erts-" ERLANG_VERSION "]"
88 #ifndef OTP_RELEASE
89 #ifdef ERLANG_GIT_VERSION
90 " [source-" ERLANG_GIT_VERSION "]"
91 #else
92 " [source]"
93 #endif
94 #endif
95 #ifdef ARCH_64
96 " [64-bit]"
97 #endif
98 " [smp:%beu:%beu]"
99 " [ds:%beu:%beu:%beu]"
100 #if defined(ERTS_DIRTY_SCHEDULERS_TEST)
101 " [dirty-schedulers-TEST]"
102 #endif
103 " [async-threads:%d]"
104 #ifdef BEAMASM
105 #ifdef NATIVE_ERLANG_STACK
106 " [jit]"
107 #else
108 " [jit:no-native-stack]"
109 #endif
110 #endif
111 #ifdef ET_DEBUG
112 #if ET_DEBUG
113 " [type-assertions]"
114 #endif
115 #endif
116 #ifdef DEBUG
117 " [debug-compiled]"
118 #endif
119 #ifdef ERTS_ENABLE_LOCK_CHECK
120 " [lock-checking]"
121 #endif
122 #ifdef ERTS_ENABLE_LOCK_COUNT
123 " [lock-counting]"
124 #endif
125 #ifdef ERTS_OPCODE_COUNTER_SUPPORT
126 " [instruction-counting]"
127 #endif
128 #ifdef VALGRIND
129 " [valgrind-compiled]"
130 #endif
131 #ifdef ADDRESS_SANITIZER
132 " [address-sanitizer]"
133 #endif
134 #ifdef ERTS_FRMPTR
135 " [frame-pointer]"
136 #endif
137 #ifdef USE_LTTNG
138 " [lttng]"
139 #endif
140 #ifdef USE_DTRACE
141 " [dtrace]"
142 #endif
143 #ifdef USE_SYSTEMTAP
144 " [systemtap]"
145 #endif
146 #ifdef SHCOPY
147 " [sharing-preserving]"
148 #endif
149 "\n");
150
151 #define ASIZE(a) (sizeof(a)/sizeof(a[0]))
152
153 #if defined(HAVE_SOLARIS_SPARC_PERFMON)
154 # include <sys/ioccom.h>
155 # define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
156 # define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
157 #endif
158
159 static Eterm
160 current_function(Process* p, ErtsHeapFactory *hfact, Process* rp,
161 int full_info, Uint reserve_size, int flags);
162 static Eterm
163 current_stacktrace(Process* p, ErtsHeapFactory *hfact, Process* rp,
164 Uint reserve_size, int flags);
165
166 Eterm
erts_bld_bin_list(Uint ** hpp,Uint * szp,ErlOffHeap * oh,Eterm tail)167 erts_bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh, Eterm tail)
168 {
169 union erl_off_heap_ptr u;
170 Eterm res = tail;
171 Eterm tuple;
172 struct erts_tmp_aligned_offheap tmp;
173
174 for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
175 erts_align_offheap(&u, &tmp);
176 if (u.hdr->thing_word == HEADER_PROC_BIN) {
177 Eterm val = erts_bld_uword(hpp, szp, (UWord) u.pb->val);
178 Eterm orig_size = erts_bld_uint(hpp, szp, u.pb->val->orig_size);
179
180 if (szp)
181 *szp += 4+2;
182 if (hpp) {
183 Uint refc = (Uint) erts_refc_read(&u.pb->val->intern.refc, 1);
184 tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
185 res = CONS(*hpp + 4, tuple, res);
186 *hpp += 4+2;
187 }
188 }
189 }
190 return res;
191 }
192
193 static Eterm
bld_magic_ref_bin_list(Uint ** hpp,Uint * szp,ErlOffHeap * oh)194 bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
195 {
196 struct erl_off_heap_header* ohh;
197 Eterm res = NIL;
198 Eterm tuple;
199
200 for (ohh = oh->first; ohh; ohh = ohh->next) {
201 if (is_ref_thing_header((*((Eterm *) ohh)))) {
202 ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
203 Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
204 Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
205
206 if (szp)
207 *szp += 4+2;
208 if (hpp) {
209 Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
210 tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
211 res = CONS(*hpp + 4, tuple, res);
212 *hpp += 4+2;
213 }
214 }
215 }
216 return res;
217 }
218
219
220 /*
221 make_monitor_list:
222 returns a list of records..
223 -record(erl_monitor, {
224 type, % process | port | time_offset | dist_process | resource
225 % | node | nodes | suspend
226 dir, % origin | target
227 ref, % reference or []
228 pid, % Process or nodename
229 extra % registered name, integer or []
230 }).
231 */
232
do_calc_mon_size(ErtsMonitor * mon,void * vpsz,Sint reds)233 static int do_calc_mon_size(ErtsMonitor *mon, void *vpsz, Sint reds)
234 {
235 ErtsMonitorData *mdp = erts_monitor_to_data(mon);
236 Uint *psz = vpsz;
237 *psz += is_immed(mdp->ref) ? 0 : NC_HEAP_SIZE(mdp->ref);
238
239 if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
240 *psz += erts_resource_ref_size(mon->other.ptr);
241 else
242 *psz += is_immed(mon->other.item) ? 0 : NC_HEAP_SIZE(mon->other.item);
243
244 *psz += 9; /* CONS + 6-tuple */
245 return 1;
246 }
247
248 typedef struct {
249 Process *p;
250 Eterm *hp;
251 Eterm res;
252 Eterm tag;
253 } MonListContext;
254
do_make_one_mon_element(ErtsMonitor * mon,void * vpmlc,Sint reds)255 static int do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc, Sint reds)
256 {
257 ErtsMonitorData *mdp = erts_monitor_to_data(mon);
258 MonListContext *pmlc = vpmlc;
259 Eterm tup, t, d, r, p, x;
260
261 r = is_immed(mdp->ref) ? mdp->ref : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mdp->ref);
262 if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
263 p = erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->other.ptr);
264 else
265 p = (is_immed(mon->other.item)
266 ? mon->other.item
267 : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->other.item));
268
269 if (mon->flags & ERTS_ML_FLG_NAME)
270 x = ((ErtsMonitorDataExtended *) mdp)->u.name;
271 else if (erts_monitor_is_target(mon))
272 x = NIL;
273 else if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES)
274 x = make_small(((ErtsMonitorDataExtended *) mdp)->u.refc);
275 else
276 x = NIL;
277
278 switch (mon->type) {
279 case ERTS_MON_TYPE_PROC:
280 t = am_process;
281 break;
282 case ERTS_MON_TYPE_PORT:
283 t = am_port;
284 break;
285 case ERTS_MON_TYPE_TIME_OFFSET:
286 t = am_time_offset;
287 break;
288 case ERTS_MON_TYPE_DIST_PROC: {
289 ERTS_DECL_AM(dist_process);
290 t = AM_dist_process;
291 break;
292 }
293 case ERTS_MON_TYPE_RESOURCE: {
294 ERTS_DECL_AM(resource);
295 t = AM_resource;
296 break;
297 }
298 case ERTS_MON_TYPE_NODE:
299 t = am_node;
300 break;
301 case ERTS_MON_TYPE_NODES: {
302 ERTS_DECL_AM(nodes);
303 t = AM_nodes;
304 break;
305 }
306 case ERTS_MON_TYPE_SUSPEND:
307 t = am_suspend;
308 break;
309 default:
310 ERTS_INTERNAL_ERROR("Unknown monitor type");
311 t = am_error;
312 break;
313 }
314 if (erts_monitor_is_target(mon)) {
315 ERTS_DECL_AM(target);
316 d = AM_target;
317 }
318 else {
319 ERTS_DECL_AM(origin);
320 d = AM_origin;
321 }
322 tup = TUPLE6(pmlc->hp, pmlc->tag, t, d, r, p, x);
323 pmlc->hp += 7;
324 pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
325 pmlc->hp += 2;
326 return 1;
327 }
328
329 static Eterm
make_monitor_list(Process * p,int tree,ErtsMonitor * root,Eterm tail)330 make_monitor_list(Process *p, int tree, ErtsMonitor *root, Eterm tail)
331 {
332 DECL_AM(erl_monitor);
333 Uint sz = 0;
334 MonListContext mlc;
335 void (*foreach)(ErtsMonitor *,
336 ErtsMonitorFunc,
337 void *);
338
339 foreach = tree ? erts_monitor_tree_foreach : erts_monitor_list_foreach;
340
341 (*foreach)(root, do_calc_mon_size, &sz);
342 if (sz == 0)
343 return tail;
344 mlc.p = p;
345 mlc.hp = HAlloc(p,sz);
346 mlc.res = tail;
347 mlc.tag = AM_erl_monitor;
348 (*foreach)(root, do_make_one_mon_element, &mlc);
349 return mlc.res;
350 }
351
352 /*
353 make_link_list:
354 returns a list of records..
355 -record(erl_link, {
356 type, % process | port | dist_process
357 pid, % Process or port
358 state, % linked | unlinking
359 id % (address)
360 }).
361 */
362
calc_lnk_size(ErtsLink * lnk,void * vpsz,Sint reds)363 static int calc_lnk_size(ErtsLink *lnk, void *vpsz, Sint reds)
364 {
365 Uint *psz = vpsz;
366 Uint sz = 0;
367 UWord addr;
368
369 if (lnk->type == ERTS_LNK_TYPE_DIST_PROC)
370 addr = (UWord) erts_link_to_elink(lnk);
371 else
372 addr = (UWord) lnk;
373
374 (void) erts_bld_uword(NULL, &sz, (UWord) addr);
375
376 *psz += sz;
377 *psz += is_immed(lnk->other.item) ? 0 : size_object(lnk->other.item);
378 *psz += 8; /* CONS + 5-tuple */
379 return 1;
380 }
381
382 typedef struct {
383 Process *p;
384 Eterm *hp;
385 Eterm res;
386 Eterm tag;
387 } LnkListContext;
388
make_one_lnk_element(ErtsLink * lnk,void * vpllc,Sint reds)389 static int make_one_lnk_element(ErtsLink *lnk, void * vpllc, Sint reds)
390 {
391 LnkListContext *pllc = vpllc;
392 Eterm tup, t, pid, id, state;
393 UWord addr;
394 ERTS_DECL_AM(linked);
395 ERTS_DECL_AM(unlinking);
396
397 if (lnk->type == ERTS_LNK_TYPE_DIST_PROC) {
398 ErtsELink *elnk = erts_link_to_elink(lnk);
399 state = elnk->unlinking ? AM_unlinking : AM_linked;
400 addr = (UWord) elnk;
401 }
402 else {
403 ErtsILink *ilnk = (ErtsILink *) lnk;
404 state = ilnk->unlinking ? AM_unlinking : AM_linked;
405 addr = (UWord) ilnk;
406 }
407
408 id = erts_bld_uword(&pllc->hp, NULL, (UWord) addr);
409
410 if (is_immed(lnk->other.item))
411 pid = lnk->other.item;
412 else {
413 Uint sz = size_object(lnk->other.item);
414 pid = copy_struct(lnk->other.item, sz, &(pllc->hp), &MSO(pllc->p));
415 }
416
417 switch (lnk->type) {
418 case ERTS_LNK_TYPE_PROC:
419 t = am_process;
420 break;
421 case ERTS_LNK_TYPE_PORT:
422 t = am_port;
423 break;
424 case ERTS_LNK_TYPE_DIST_PROC: {
425 ERTS_DECL_AM(dist_process);
426 t = AM_dist_process;
427 break;
428 }
429 default:
430 ERTS_INTERNAL_ERROR("Unkown link type");
431 t = am_undefined;
432 break;
433 }
434
435 tup = TUPLE5(pllc->hp, pllc->tag, t, pid, state, id);
436 pllc->hp += 6;
437 pllc->res = CONS(pllc->hp, tup, pllc->res);
438 pllc->hp += 2;
439 return 1;
440 }
441
442 static Eterm
make_link_list(Process * p,int tree,ErtsLink * root,Eterm tail)443 make_link_list(Process *p, int tree, ErtsLink *root, Eterm tail)
444 {
445 DECL_AM(erl_link);
446 Uint sz = 0;
447 LnkListContext llc;
448 void (*foreach)(ErtsLink *,
449 ErtsLinkFunc,
450 void *);
451
452 foreach = tree ? erts_link_tree_foreach : erts_link_list_foreach;
453
454 (*foreach)(root, calc_lnk_size, (void *) &sz);
455 if (sz == 0) {
456 return tail;
457 }
458 llc.p = p;
459 llc.hp = HAlloc(p,sz);
460 llc.res = tail;
461 llc.tag = AM_erl_link;
462 (*foreach)(root, make_one_lnk_element, (void *) &llc);
463 return llc.res;
464 }
465
466 int
erts_print_system_version(fmtfn_t to,void * arg,Process * c_p)467 erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
468 {
469 int i, rc = -1;
470 char *rc_str = "";
471 char rc_buf[100];
472 char *ov = otp_version;
473 Uint total, online, active;
474 Uint dirty_cpu, dirty_cpu_onln, dirty_io;
475
476 erts_schedulers_state(&total, &online, &active,
477 &dirty_cpu, &dirty_cpu_onln, NULL,
478 &dirty_io, NULL);
479 for (i = 0; i < sizeof(otp_version)-4; i++) {
480 if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
481 rc = atoi(&ov[i+3]);
482 }
483 if (rc >= 0) {
484 if (rc == 0)
485 rc_str = " [DEVELOPMENT]";
486 else {
487 erts_snprintf(rc_buf, sizeof(rc_buf), " [RELEASE CANDIDATE %d]", rc);
488 rc_str = rc_buf;
489 }
490 }
491 return erts_print(to, arg, erts_system_version,
492 rc_str
493 , total, online
494 , dirty_cpu, dirty_cpu_onln, dirty_io
495 , erts_async_max_threads
496 );
497 }
498
499 typedef struct {
500 /* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
501 * {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
502 * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
503 * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
504 union {
505 Eterm term;
506 ErtsResource* resource;
507 }entity;
508 int named;
509 Uint16 type;
510 Eterm node;
511 /* pid is actual target being monitored, no matter pid/port or name */
512 Eterm pid;
513 } MonitorInfo;
514
515 typedef struct {
516 MonitorInfo *mi;
517 Uint mi_i;
518 Uint mi_max;
519 int sz;
520 } MonitorInfoCollection;
521
522 #define INIT_MONITOR_INFOS(MIC) do { \
523 (MIC).mi = NULL; \
524 (MIC).mi_i = (MIC).mi_max = 0; \
525 (MIC).sz = 0; \
526 } while(0)
527
528 #define MI_INC 50
529 #define EXTEND_MONITOR_INFOS(MICP) \
530 do { \
531 if ((MICP)->mi_i >= (MICP)->mi_max) { \
532 (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
533 (MICP)->mi, \
534 ((MICP)->mi_max+MI_INC) \
535 * sizeof(MonitorInfo)) \
536 : erts_alloc(ERTS_ALC_T_TMP, \
537 MI_INC*sizeof(MonitorInfo))); \
538 (MICP)->mi_max += MI_INC; \
539 } \
540 } while (0)
541 #define DESTROY_MONITOR_INFOS(MIC) \
542 do { \
543 if ((MIC).mi != NULL) { \
544 erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
545 } \
546 } while (0)
547
collect_one_link(ErtsLink * lnk,void * vmicp,Sint reds)548 static int collect_one_link(ErtsLink *lnk, void *vmicp, Sint reds)
549 {
550 MonitorInfoCollection *micp = vmicp;
551 if (lnk->type != ERTS_LNK_TYPE_DIST_PROC) {
552 if (((ErtsILink *) lnk)->unlinking)
553 return 1;
554 }
555 else {
556 ErtsELink *elnk = erts_link_to_elink(lnk);
557 if (elnk->unlinking)
558 return 1;
559 }
560 EXTEND_MONITOR_INFOS(micp);
561 micp->mi[micp->mi_i].entity.term = lnk->other.item;
562 micp->sz += 2 + NC_HEAP_SIZE(lnk->other.item);
563 micp->mi_i++;
564 return 1;
565 }
566
collect_one_origin_monitor(ErtsMonitor * mon,void * vmicp,Sint reds)567 static int collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp, Sint reds)
568 {
569 if (erts_monitor_is_origin(mon)) {
570 MonitorInfoCollection *micp = vmicp;
571
572 EXTEND_MONITOR_INFOS(micp);
573
574 micp->mi[micp->mi_i].type = mon->type;
575
576 switch (mon->type) {
577 case ERTS_MON_TYPE_PROC:
578 case ERTS_MON_TYPE_PORT:
579 case ERTS_MON_TYPE_DIST_PROC:
580 case ERTS_MON_TYPE_TIME_OFFSET:
581 if (mon->flags & ERTS_ML_FLG_SPAWN_PENDING)
582 break; /* Not an active monitor... */
583 if (!(mon->flags & ERTS_ML_FLG_NAME)) {
584 micp->mi[micp->mi_i].named = 0;
585 micp->mi[micp->mi_i].entity.term = mon->other.item;
586 micp->mi[micp->mi_i].node = NIL;
587 if (is_not_atom(mon->other.item))
588 micp->sz += NC_HEAP_SIZE(mon->other.item);
589 }
590 else {
591 ErtsMonitorDataExtended *mdep;
592 micp->mi[micp->mi_i].named = !0;
593 mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
594 micp->mi[micp->mi_i].entity.term = mdep->u.name;
595 if (mdep->dist)
596 micp->mi[micp->mi_i].node = mdep->dist->nodename;
597 else
598 micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
599 micp->sz += 3; /* need one 2-tuple */
600 }
601
602 /* have always pid at hand, to assist with figuring out if its a port or
603 * a process, when we monitored by name and process_info is requested.
604 * See: erl_bif_info.c:process_info_aux section for am_monitors */
605 micp->mi[micp->mi_i].pid = mon->other.item;
606
607 micp->mi_i++;
608 micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
609 break;
610 default:
611 break;
612 }
613 }
614 return 1;
615 }
616
collect_one_target_monitor(ErtsMonitor * mon,void * vmicp,Sint reds)617 static int collect_one_target_monitor(ErtsMonitor *mon, void *vmicp, Sint reds)
618 {
619 MonitorInfoCollection *micp = vmicp;
620
621 if (erts_monitor_is_target(mon)) {
622
623 EXTEND_MONITOR_INFOS(micp);
624
625 micp->mi[micp->mi_i].type = mon->type;
626 micp->mi[micp->mi_i].named = !!(mon->flags & ERTS_ML_FLG_NAME);
627 switch (mon->type) {
628
629 case ERTS_MON_TYPE_PROC:
630 case ERTS_MON_TYPE_PORT:
631 case ERTS_MON_TYPE_DIST_PROC:
632
633 micp->mi[micp->mi_i].entity.term = mon->other.item;
634 micp->mi[micp->mi_i].node = NIL;
635 micp->sz += NC_HEAP_SIZE(mon->other.item);
636
637 micp->sz += 2; /* cons */;
638 micp->mi_i++;
639 break;
640
641 case ERTS_MON_TYPE_RESOURCE:
642
643 micp->mi[micp->mi_i].entity.resource = mon->other.ptr;
644 micp->mi[micp->mi_i].node = NIL;
645 micp->sz += erts_resource_ref_size(mon->other.ptr);
646
647 micp->sz += 2; /* cons */;
648 micp->mi_i++;
649 break;
650
651 default:
652 break;
653 }
654 }
655 return 1;
656 }
657
658 typedef struct {
659 ErtsMonitorSuspend **smi;
660 Uint smi_i;
661 Uint smi_max;
662 Uint sz;
663 } ErtsSuspendMonitorInfoCollection;
664
665 #define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \
666 (SMIC).smi = NULL; \
667 (SMIC).smi_i = (SMIC).smi_max = 0; \
668 (SMIC).sz = 0; \
669 } while(0)
670
671 #define ERTS_SMI_INC 50
672 #define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
673 do { \
674 if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
675 (SMICP)->smi = ((SMICP)->smi \
676 ? erts_realloc(ERTS_ALC_T_TMP, \
677 (SMICP)->smi, \
678 ((SMICP)->smi_max \
679 + ERTS_SMI_INC) \
680 * sizeof(ErtsMonitorSuspend *)) \
681 : erts_alloc(ERTS_ALC_T_TMP, \
682 ERTS_SMI_INC \
683 * sizeof(ErtsMonitorSuspend *))); \
684 (SMICP)->smi_max += ERTS_SMI_INC; \
685 } \
686 } while (0)
687
688 #define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
689 do { \
690 if ((SMIC).smi != NULL) { \
691 erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
692 } \
693 } while (0)
694
695 static int
collect_one_suspend_monitor(ErtsMonitor * mon,void * vsmicp,Sint reds)696 collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp, Sint reds)
697 {
698 if (mon->type == ERTS_MON_TYPE_SUSPEND) {
699 Sint count;
700 erts_aint_t mstate;
701 ErtsMonitorSuspend *msp;
702 ErtsSuspendMonitorInfoCollection *smicp;
703
704 msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
705 smicp = vsmicp;
706
707 ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
708
709 smicp->smi[smicp->smi_i] = msp;
710 smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
711
712 mstate = erts_atomic_read_nob(&msp->state);
713
714 count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
715 if (!IS_SSMALL(count))
716 smicp->sz += BIG_UINT_HEAP_SIZE;
717
718 smicp->smi_i++;
719 }
720 return 1;
721 }
722
723 /*
724 * process_info/[1,2]
725 */
726
727 /*
728 * All valid process_info arguments.
729 */
730
731 #define ERTS_PI_IX_REGISTERED_NAME 0
732 #define ERTS_PI_IX_CURRENT_FUNCTION 1
733 #define ERTS_PI_IX_INITIAL_CALL 2
734 #define ERTS_PI_IX_STATUS 3
735 #define ERTS_PI_IX_MESSAGES 4
736 #define ERTS_PI_IX_MESSAGE_QUEUE_LEN 5
737 #define ERTS_PI_IX_LINKS 6
738 #define ERTS_PI_IX_MONITORS 7
739 #define ERTS_PI_IX_MONITORED_BY 8
740 #define ERTS_PI_IX_DICTIONARY 9
741 #define ERTS_PI_IX_TRAP_EXIT 10
742 #define ERTS_PI_IX_ERROR_HANDLER 11
743 #define ERTS_PI_IX_HEAP_SIZE 12
744 #define ERTS_PI_IX_STACK_SIZE 13
745 #define ERTS_PI_IX_MEMORY 14
746 #define ERTS_PI_IX_GARBAGE_COLLECTION 15
747 #define ERTS_PI_IX_GROUP_LEADER 16
748 #define ERTS_PI_IX_REDUCTIONS 17
749 #define ERTS_PI_IX_PRIORITY 18
750 #define ERTS_PI_IX_TRACE 19
751 #define ERTS_PI_IX_BINARY 20
752 #define ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN 21
753 #define ERTS_PI_IX_CATCHLEVEL 22
754 #define ERTS_PI_IX_BACKTRACE 23
755 #define ERTS_PI_IX_LAST_CALLS 24
756 #define ERTS_PI_IX_TOTAL_HEAP_SIZE 25
757 #define ERTS_PI_IX_SUSPENDING 26
758 #define ERTS_PI_IX_MIN_HEAP_SIZE 27
759 #define ERTS_PI_IX_MIN_BIN_VHEAP_SIZE 28
760 #define ERTS_PI_IX_MAX_HEAP_SIZE 29
761 #define ERTS_PI_IX_CURRENT_LOCATION 30
762 #define ERTS_PI_IX_CURRENT_STACKTRACE 31
763 #define ERTS_PI_IX_MESSAGE_QUEUE_DATA 32
764 #define ERTS_PI_IX_GARBAGE_COLLECTION_INFO 33
765 #define ERTS_PI_IX_MAGIC_REF 34
766 #define ERTS_PI_IX_FULLSWEEP_AFTER 35
767
768 #define ERTS_PI_FLAG_SINGELTON (1 << 0)
769 #define ERTS_PI_FLAG_ALWAYS_WRAP (1 << 1)
770 #define ERTS_PI_FLAG_WANT_MSGS (1 << 2)
771 #define ERTS_PI_FLAG_NEED_MSGQ_LEN (1 << 3)
772 #define ERTS_PI_FLAG_FORCE_SIG_SEND (1 << 4)
773 #define ERTS_PI_FLAG_REQUEST_FOR_OTHER (1 << 5)
774
775 #define ERTS_PI_UNRESERVE(RS, SZ) \
776 (ASSERT((RS) >= (SZ)), (RS) -= (SZ))
777
778
779 typedef struct {
780 Eterm name;
781 Uint reserve_size;
782 int flags;
783 ErtsProcLocks locks;
784 } ErtsProcessInfoArgs;
785
786 static ErtsProcessInfoArgs pi_args[] = {
787 {am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN},
788 {am_current_function, 4, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
789 {am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN},
790 {am_status, 0, 0, 0},
791 {am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
792 {am_message_queue_len, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN, ERTS_PROC_LOCK_MAIN},
793 {am_links, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
794 {am_monitors, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
795 {am_monitored_by, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
796 {am_dictionary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
797 {am_trap_exit, 0, 0, ERTS_PROC_LOCK_MAIN},
798 {am_error_handler, 0, 0, ERTS_PROC_LOCK_MAIN},
799 {am_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
800 {am_stack_size, 0, 0, ERTS_PROC_LOCK_MAIN},
801 {am_memory, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
802 {am_garbage_collection, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + ERTS_MAX_HEAP_SIZE_MAP_SZ, 0, ERTS_PROC_LOCK_MAIN},
803 {am_group_leader, 0, 0, ERTS_PROC_LOCK_MAIN},
804 {am_reductions, 0, 0, ERTS_PROC_LOCK_MAIN},
805 {am_priority, 0, 0, 0},
806 {am_trace, 0, 0, ERTS_PROC_LOCK_MAIN},
807 {am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
808 {am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN},
809 {am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN},
810 {am_backtrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
811 {am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN},
812 {am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
813 {am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0},
814 {am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
815 {am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
816 {am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
817 {am_current_location, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
818 {am_current_stacktrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
819 {am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN},
820 {am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN},
821 {am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
822 {am_fullsweep_after, 0, 0, ERTS_PROC_LOCK_MAIN}
823 };
824
825 #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(pi_args[0])))
826
827 #ifdef DEBUG
828 # define ERTS_PI_DEF_ARR_SZ 2
829 #else
830 # define ERTS_PI_DEF_ARR_SZ ERTS_PI_ARGS
831 #endif
832
833 static ERTS_INLINE Eterm
pi_ix2arg(int ix)834 pi_ix2arg(int ix)
835 {
836 if (ix < 0 || ERTS_PI_ARGS <= ix)
837 return am_undefined;
838 return pi_args[ix].name;
839 }
840
841 static ERTS_INLINE int
pi_ix2flags(int ix)842 pi_ix2flags(int ix)
843 {
844 if (ix < 0 || ERTS_PI_ARGS <= ix)
845 return 0;
846 return pi_args[ix].flags;
847 }
848
849 static ERTS_INLINE Uint
pi_ix2rsz(int ix)850 pi_ix2rsz(int ix)
851 {
852 if (ix < 0 || ERTS_PI_ARGS <= ix)
853 return 0;
854 return pi_args[ix].reserve_size;
855 }
856
857 static ERTS_INLINE ErtsProcLocks
pi_ix2locks(int ix)858 pi_ix2locks(int ix)
859 {
860 if (ix < 0 || ERTS_PI_ARGS <= ix)
861 return 0;
862 return pi_args[ix].locks;
863 }
864
865 static ERTS_INLINE int
pi_arg2ix(Eterm arg)866 pi_arg2ix(Eterm arg)
867 {
868 switch (arg) {
869 case am_registered_name:
870 return ERTS_PI_IX_REGISTERED_NAME;
871 case am_current_function:
872 return ERTS_PI_IX_CURRENT_FUNCTION;
873 case am_initial_call:
874 return ERTS_PI_IX_INITIAL_CALL;
875 case am_status:
876 return ERTS_PI_IX_STATUS;
877 case am_messages:
878 return ERTS_PI_IX_MESSAGES;
879 case am_message_queue_len:
880 return ERTS_PI_IX_MESSAGE_QUEUE_LEN;
881 case am_links:
882 return ERTS_PI_IX_LINKS;
883 case am_monitors:
884 return ERTS_PI_IX_MONITORS;
885 case am_monitored_by:
886 return ERTS_PI_IX_MONITORED_BY;
887 case am_dictionary:
888 return ERTS_PI_IX_DICTIONARY;
889 case am_trap_exit:
890 return ERTS_PI_IX_TRAP_EXIT;
891 case am_error_handler:
892 return ERTS_PI_IX_ERROR_HANDLER;
893 case am_heap_size:
894 return ERTS_PI_IX_HEAP_SIZE;
895 case am_stack_size:
896 return ERTS_PI_IX_STACK_SIZE;
897 case am_memory:
898 return ERTS_PI_IX_MEMORY;
899 case am_garbage_collection:
900 return ERTS_PI_IX_GARBAGE_COLLECTION;
901 case am_group_leader:
902 return ERTS_PI_IX_GROUP_LEADER;
903 case am_reductions:
904 return ERTS_PI_IX_REDUCTIONS;
905 case am_priority:
906 return ERTS_PI_IX_PRIORITY;
907 case am_trace:
908 return ERTS_PI_IX_TRACE;
909 case am_binary:
910 return ERTS_PI_IX_BINARY;
911 case am_sequential_trace_token:
912 return ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN;
913 case am_catchlevel:
914 return ERTS_PI_IX_CATCHLEVEL;
915 case am_backtrace:
916 return ERTS_PI_IX_BACKTRACE;
917 case am_last_calls:
918 return ERTS_PI_IX_LAST_CALLS;
919 case am_total_heap_size:
920 return ERTS_PI_IX_TOTAL_HEAP_SIZE;
921 case am_suspending:
922 return ERTS_PI_IX_SUSPENDING;
923 case am_min_heap_size:
924 return ERTS_PI_IX_MIN_HEAP_SIZE;
925 case am_min_bin_vheap_size:
926 return ERTS_PI_IX_MIN_BIN_VHEAP_SIZE;
927 case am_max_heap_size:
928 return ERTS_PI_IX_MAX_HEAP_SIZE;
929 case am_current_location:
930 return ERTS_PI_IX_CURRENT_LOCATION;
931 case am_current_stacktrace:
932 return ERTS_PI_IX_CURRENT_STACKTRACE;
933 case am_message_queue_data:
934 return ERTS_PI_IX_MESSAGE_QUEUE_DATA;
935 case am_garbage_collection_info:
936 return ERTS_PI_IX_GARBAGE_COLLECTION_INFO;
937 case am_magic_ref:
938 return ERTS_PI_IX_MAGIC_REF;
939 case am_fullsweep_after:
940 return ERTS_PI_IX_FULLSWEEP_AFTER;
941 default:
942 return -1;
943 }
944 }
945
946 static Eterm pi_1_keys[] = {
947 am_registered_name,
948 am_current_function,
949 am_initial_call,
950 am_status,
951 am_message_queue_len,
952 am_links,
953 am_dictionary,
954 am_trap_exit,
955 am_error_handler,
956 am_priority,
957 am_group_leader,
958 am_total_heap_size,
959 am_heap_size,
960 am_stack_size,
961 am_reductions,
962 am_garbage_collection,
963 am_suspending
964 };
965
966 #define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
967
968 static Eterm pi_1_keys_list;
969 static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
970
971 static void
process_info_init(void)972 process_info_init(void)
973 {
974 Eterm *hp = &pi_1_keys_list_heap[0];
975 int i;
976
977 pi_1_keys_list = NIL;
978
979 for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
980 pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
981 hp += 2;
982 }
983
984 #ifdef DEBUG
985 { /* Make sure the process_info argument mappings are consistent */
986 int ix;
987 for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
988 ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
989 }
990 }
991 #endif
992
993 }
994
995 static BIF_RETTYPE
996 process_info_aux(Process *c_p,
997 ErtsHeapFactory *hfact,
998 Process *rp,
999 ErtsProcLocks rp_locks,
1000 int item_ix,
1001 int flags,
1002 Uint *reserve_sizep,
1003 Uint *reds);
1004
1005 Eterm
erts_process_info(Process * c_p,ErtsHeapFactory * hfact,Process * rp,ErtsProcLocks rp_locks,int * item_ix,int item_ix_len,int flags,Uint reserve_size,Uint * reds)1006 erts_process_info(Process *c_p,
1007 ErtsHeapFactory *hfact,
1008 Process *rp,
1009 ErtsProcLocks rp_locks,
1010 int *item_ix,
1011 int item_ix_len,
1012 int flags,
1013 Uint reserve_size,
1014 Uint *reds)
1015 {
1016 Eterm res;
1017 Eterm part_res[ERTS_PI_ARGS];
1018 int item_ix_ix, ix;
1019
1020 if (ERTS_PI_FLAG_SINGELTON & flags) {
1021 ASSERT(item_ix_len == 1);
1022 res = process_info_aux(c_p, hfact, rp, rp_locks, item_ix[0],
1023 flags, &reserve_size, reds);
1024 return res;
1025 }
1026
1027 for (ix = 0; ix < ERTS_PI_ARGS; ix++)
1028 part_res[ix] = THE_NON_VALUE;
1029
1030 /*
1031 * We always handle 'messages' first if it should be part
1032 * of the result. This since if both 'messages' and
1033 * 'message_queue_len' are wanted, 'messages' may
1034 * change the result of 'message_queue_len' (in case
1035 * the queue contain bad distribution messages).
1036 */
1037 if (flags & ERTS_PI_FLAG_WANT_MSGS) {
1038 ix = pi_arg2ix(am_messages);
1039 ASSERT(part_res[ix] == THE_NON_VALUE);
1040 res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
1041 flags, &reserve_size, reds);
1042 ASSERT(res != am_undefined);
1043 ASSERT(res != THE_NON_VALUE);
1044 part_res[ix] = res;
1045 }
1046
1047 for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
1048 ix = item_ix[item_ix_ix];
1049 if (part_res[ix] == THE_NON_VALUE) {
1050 res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
1051 flags, &reserve_size, reds);
1052 ASSERT(res != am_undefined);
1053 ASSERT(res != THE_NON_VALUE);
1054 part_res[ix] = res;
1055 }
1056 }
1057
1058 res = NIL;
1059
1060 for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
1061 ix = item_ix[item_ix_ix];
1062 ASSERT(part_res[ix] != THE_NON_VALUE);
1063 /*
1064 * If we should ignore the value of registered_name,
1065 * its value is nil. For more info, see comment in the
1066 * beginning of process_info_aux().
1067 */
1068 if (is_nil(part_res[ix])) {
1069 ASSERT(!(flags & ERTS_PI_FLAG_ALWAYS_WRAP));
1070 ASSERT(pi_ix2arg(ix) == am_registered_name);
1071 }
1072 else {
1073 Eterm *hp;
1074 ERTS_PI_UNRESERVE(reserve_size, 2);
1075 hp = erts_produce_heap(hfact, 2, reserve_size);
1076 res = CONS(hp, part_res[ix], res);
1077 }
1078 }
1079
1080 return res;
1081 }
1082
1083 static void
1084 pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix);
1085
1086 static BIF_RETTYPE
process_info_bif(Process * c_p,Eterm pid,Eterm opt,int always_wrap,int pi2)1087 process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
1088 {
1089 ErtsHeapFactory hfact;
1090 int def_arr[ERTS_PI_DEF_ARR_SZ];
1091 int *item_ix = &def_arr[0];
1092 Process *rp = NULL;
1093 erts_aint32_t state;
1094 BIF_RETTYPE ret;
1095 Uint reds = 0;
1096 ErtsProcLocks locks = 0;
1097 int flags;
1098 Uint reserve_size;
1099 int len;
1100 Eterm res;
1101
1102 ERTS_CT_ASSERT(ERTS_PI_DEF_ARR_SZ > 0);
1103
1104 if (c_p->common.id == pid) {
1105 int local_only = c_p->sig_qs.flags & FS_LOCAL_SIGS_ONLY;
1106 int sres, sreds, reds_left;
1107
1108 reds_left = ERTS_BIF_REDS_LEFT(c_p);
1109 sreds = reds_left;
1110
1111 if (!local_only) {
1112 erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
1113 erts_proc_sig_fetch(c_p);
1114 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
1115 }
1116
1117 sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
1118
1119 BUMP_REDS(c_p, (int) sreds);
1120 reds_left -= sreds;
1121
1122 if (state & ERTS_PSFLG_EXITING) {
1123 c_p->sig_qs.flags &= ~FS_LOCAL_SIGS_ONLY;
1124 goto exited;
1125 }
1126 if (!sres | (reds_left <= 0)) {
1127 /*
1128 * More signals to handle or out of reds; need
1129 * to yield and continue. Prevent fetching of
1130 * more signals by setting local-sigs-only flag.
1131 */
1132 c_p->sig_qs.flags |= FS_LOCAL_SIGS_ONLY;
1133 goto yield;
1134 }
1135
1136 c_p->sig_qs.flags &= ~FS_LOCAL_SIGS_ONLY;
1137 }
1138
1139 if (is_atom(opt)) {
1140 int ix = pi_arg2ix(opt);
1141 item_ix[0] = ix;
1142 len = 1;
1143 locks = pi_ix2locks(ix);
1144 reserve_size = 3 + pi_ix2rsz(ix);
1145 flags = ERTS_PI_FLAG_SINGELTON;
1146 flags |= pi_ix2flags(ix);
1147 if (ix < 0)
1148 goto badarg;
1149 }
1150 else {
1151 Eterm list = opt;
1152 Uint size = ERTS_PI_DEF_ARR_SZ;
1153
1154 len = 0;
1155 reserve_size = 0;
1156 locks = 0;
1157 flags = 0;
1158
1159 while (is_list(list)) {
1160 Eterm *consp = list_val(list);
1161 Eterm arg = CAR(consp);
1162 int ix = pi_arg2ix(arg);
1163 if (ix < 0)
1164 goto badarg;
1165
1166 if (len >= size)
1167 pi_setup_grow(&item_ix, def_arr, &size, len);
1168
1169 item_ix[len++] = ix;
1170
1171 locks |= pi_ix2locks(ix);
1172 flags |= pi_ix2flags(ix);
1173 reserve_size += pi_ix2rsz(ix);
1174 reserve_size += 3; /* 2-tuple */
1175 reserve_size += 2; /* cons */
1176
1177 list = CDR(consp);
1178 }
1179
1180 if (is_not_nil(list))
1181 goto badarg;
1182 }
1183
1184 if (is_not_internal_pid(pid)) {
1185 if (is_external_pid(pid)
1186 && external_pid_dist_entry(pid) == erts_this_dist_entry)
1187 goto undefined;
1188 goto badarg;
1189 }
1190
1191 if (always_wrap)
1192 flags |= ERTS_PI_FLAG_ALWAYS_WRAP;
1193
1194 if (c_p->common.id == pid) {
1195 rp = c_p;
1196 if (locks & ~ERTS_PROC_LOCK_MAIN)
1197 erts_proc_lock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
1198 locks |= ERTS_PROC_LOCK_MAIN;
1199 }
1200 else {
1201 if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
1202 goto send_signal;
1203 state = ERTS_PSFLG_RUNNING; /* fail state... */
1204 rp = erts_try_lock_sig_free_proc(pid, locks, &state);
1205 if (!rp)
1206 goto undefined;
1207 if (rp == ERTS_PROC_LOCK_BUSY) {
1208 rp = NULL;
1209 goto send_signal;
1210 }
1211 if (state & ERTS_PSFLG_EXITING) {
1212 if (locks)
1213 erts_proc_unlock(rp, locks);
1214 locks = 0;
1215 /* wait for it to terminate properly... */
1216 goto send_signal;
1217 }
1218 if (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN) {
1219 ASSERT(locks & ERTS_PROC_LOCK_MAIN);
1220 erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
1221 erts_proc_sig_fetch(rp);
1222 if (c_p->sig_qs.cont) {
1223 erts_proc_unlock(rp, locks|ERTS_PROC_LOCK_MSGQ);
1224 locks = 0;
1225 goto send_signal;
1226 }
1227 erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
1228 }
1229 }
1230
1231 erts_factory_proc_init(&hfact, c_p);
1232
1233 res = erts_process_info(c_p, &hfact, rp, locks, item_ix, len,
1234 flags, reserve_size, &reds);
1235
1236 erts_factory_close(&hfact);
1237
1238 if (reds > INT_MAX/2)
1239 reds = INT_MAX/2;
1240 BUMP_REDS(c_p, (int) reds);
1241
1242 state = erts_atomic32_read_acqb(&rp->state);
1243 if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)) {
1244 if (state & ERTS_PSFLG_FREE) {
1245 ASSERT(!locks);
1246 goto undefined;
1247 }
1248 if (locks)
1249 erts_proc_unlock(rp, locks);
1250 locks = 0;
1251 /* wait for it to terminate properly... */
1252 goto send_signal;
1253 }
1254
1255 if (c_p == rp || !ERTS_PROC_HAS_INCOMING_SIGNALS(c_p))
1256 ERTS_BIF_PREP_RET(ret, res);
1257 else
1258 ERTS_BIF_PREP_HANDLE_SIGNALS_RETURN(ret, c_p, res);
1259
1260 done:
1261
1262 if (c_p == rp)
1263 locks &= ~ERTS_PROC_LOCK_MAIN;
1264
1265 if (locks && rp)
1266 erts_proc_unlock(rp, locks);
1267
1268 if (item_ix != def_arr)
1269 erts_free(ERTS_ALC_T_TMP, item_ix);
1270
1271 return ret;
1272
1273 badarg:
1274 ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
1275 goto done;
1276
1277 undefined:
1278 ERTS_BIF_PREP_RET(ret, am_undefined);
1279 goto done;
1280
1281 exited:
1282 ERTS_BIF_PREP_EXITED(ret, c_p);
1283 goto done;
1284
1285 yield:
1286 if (pi2)
1287 ERTS_BIF_PREP_YIELD2(ret, BIF_TRAP_EXPORT(BIF_process_info_2), c_p, pid, opt);
1288 else
1289 ERTS_BIF_PREP_YIELD1(ret, BIF_TRAP_EXPORT(BIF_process_info_1), c_p, pid);
1290 goto done;
1291
1292 send_signal: {
1293 Eterm ref = erts_make_ref(c_p);
1294 int enqueued, need_msgq_len;
1295 flags |= ERTS_PI_FLAG_REQUEST_FOR_OTHER;
1296 need_msgq_len = (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1297 /*
1298 * Set save pointer to the end of the message queue so we wont
1299 * have to scan the whole* message queue for the result. Note
1300 * that caller unconditionally has to enter a receive only
1301 * matching messages containing 'ref', or restore save pointer.
1302 */
1303 erts_msgq_set_save_end(c_p);
1304 enqueued = erts_proc_sig_send_process_info_request(c_p, pid, item_ix,
1305 len, need_msgq_len,
1306 flags, reserve_size,
1307 ref);
1308 if (!enqueued) {
1309 /* Restore save pointer... */
1310 erts_msgq_set_save_first(c_p);
1311 goto undefined;
1312 }
1313 ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, ref);
1314 goto done;
1315 }
1316 }
1317
1318 static void
pi_setup_grow(int ** arr,int * def_arr,Uint * sz,int ix)1319 pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix)
1320 {
1321 *sz = (ix+1) + ERTS_PI_DEF_ARR_SZ;
1322 if (*arr != def_arr)
1323 *arr = erts_realloc(ERTS_ALC_T_TMP, *arr, (*sz)*sizeof(int));
1324 else {
1325 int *new_arr = erts_alloc(ERTS_ALC_T_TMP, (*sz)*sizeof(int));
1326 sys_memcpy((void *) new_arr, (void *) def_arr,
1327 sizeof(int)*ERTS_PI_DEF_ARR_SZ);
1328 *arr = new_arr;
1329 }
1330 }
1331
1332
process_info_2(BIF_ALIST_2)1333 BIF_RETTYPE process_info_2(BIF_ALIST_2)
1334 {
1335 return process_info_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, !is_atom(BIF_ARG_2), !0);
1336 }
1337
process_info_1(BIF_ALIST_1)1338 BIF_RETTYPE process_info_1(BIF_ALIST_1)
1339 {
1340 return process_info_bif(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, 0);
1341 }
1342
1343 Eterm
process_info_aux(Process * c_p,ErtsHeapFactory * hfact,Process * rp,ErtsProcLocks rp_locks,int item_ix,int flags,Uint * reserve_sizep,Uint * reds)1344 process_info_aux(Process *c_p,
1345 ErtsHeapFactory *hfact,
1346 Process *rp,
1347 ErtsProcLocks rp_locks,
1348 int item_ix,
1349 int flags,
1350 Uint *reserve_sizep,
1351 Uint *reds)
1352 {
1353 Eterm *hp;
1354 Eterm res = NIL;
1355 Uint reserved;
1356 Uint reserve_size = *reserve_sizep;
1357
1358 #ifdef ERTS_ENABLE_LOCK_CHECK
1359 ErtsProcLocks locks = erts_proc_lc_my_proc_locks(rp);
1360
1361 switch (item_ix) {
1362 case ERTS_PI_IX_STATUS:
1363 case ERTS_PI_IX_PRIORITY:
1364 case ERTS_PI_IX_SUSPENDING:
1365 ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCK_MAIN) == 0);
1366 break;
1367 default:
1368 ERTS_LC_ASSERT(locks == ERTS_PROC_LOCK_MAIN);
1369 break;
1370 }
1371 #endif
1372
1373 reserved = pi_ix2rsz(item_ix);
1374 ERTS_PI_UNRESERVE(reserve_size, reserved);
1375
1376 (*reds)++;
1377
1378 ASSERT(rp);
1379
1380 /*
1381 * Q: Why this ERTS_PI_FLAG_ALWAYS_WRAP flag?
1382 *
1383 * A: registered_name is strange. If process has no registered name,
1384 * process_info(Pid, registered_name) returns [], and
1385 * the result of process_info(Pid) has no {registered_name, Name}
1386 * tuple in the resulting list. This is inconsistent with all other
1387 * options, but we do not dare to change it.
1388 *
1389 * When process_info/2 is called with a list as second argument,
1390 * registered_name behaves as it should, i.e. a
1391 * {registered_name, []} will appear in the resulting list.
1392 *
1393 * If ERTS_PI_FLAG_ALWAYS_WRAP is set, process_info_aux() always
1394 * wrap the result in a key two tuple.
1395 */
1396
1397 switch (item_ix) {
1398
1399 case ERTS_PI_IX_REGISTERED_NAME:
1400 if (rp->common.u.alive.reg)
1401 res = rp->common.u.alive.reg->name;
1402 else {
1403 if (flags & ERTS_PI_FLAG_ALWAYS_WRAP)
1404 res = NIL;
1405 else
1406 return NIL;
1407 }
1408 break;
1409
1410 case ERTS_PI_IX_CURRENT_FUNCTION:
1411 res = current_function(c_p, hfact, rp, 0,
1412 reserve_size, flags);
1413 break;
1414
1415 case ERTS_PI_IX_CURRENT_LOCATION:
1416 res = current_function(c_p, hfact, rp, 1,
1417 reserve_size, flags);
1418 break;
1419
1420 case ERTS_PI_IX_CURRENT_STACKTRACE:
1421 res = current_stacktrace(c_p, hfact, rp, reserve_size, flags);
1422 break;
1423
1424 case ERTS_PI_IX_INITIAL_CALL:
1425 hp = erts_produce_heap(hfact, 4, reserve_size);
1426 res = TUPLE3(hp,
1427 rp->u.initial.module,
1428 rp->u.initial.function,
1429 make_small(rp->u.initial.arity));
1430 hp += 4;
1431 break;
1432
1433 case ERTS_PI_IX_STATUS: {
1434 erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
1435 res = erts_process_state2status(state);
1436 if (res == am_running && (state & ERTS_PSFLG_RUNNING_SYS)) {
1437 ASSERT(c_p == rp);
1438 ASSERT(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
1439 if (!(state & (ERTS_PSFLG_ACTIVE
1440 | ERTS_PSFLG_SIG_Q
1441 | ERTS_PSFLG_SIG_IN_Q))) {
1442 int sys_tasks = 0;
1443 if (state & ERTS_PSFLG_SYS_TASKS)
1444 sys_tasks = erts_have_non_prio_elev_sys_tasks(rp,
1445 rp_locks);
1446 if (!sys_tasks) {
1447 /*
1448 * We are servicing a process-info request from
1449 * another process. If that other process could
1450 * have inspected our state itself, we would have
1451 * been in the 'waiting' state.
1452 */
1453 res = am_waiting;
1454 }
1455 }
1456 }
1457 break;
1458 }
1459
1460 case ERTS_PI_IX_MESSAGES: {
1461 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1462 if (rp->sig_qs.len == 0 || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE))
1463 res = NIL;
1464 else {
1465 int info_on_self = !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
1466 ErtsMessageInfo *mip;
1467 Sint i, len;
1468 Uint heap_need;
1469
1470 mip = erts_alloc(ERTS_ALC_T_TMP,
1471 rp->sig_qs.len*sizeof(ErtsMessageInfo));
1472
1473 /*
1474 * Note that message queue may shrink when calling
1475 * erts_proc_sig_prep_msgq_for_inspection() since it removes
1476 * corrupt distribution messages.
1477 */
1478 heap_need = erts_proc_sig_prep_msgq_for_inspection(c_p, rp,
1479 rp_locks,
1480 info_on_self,
1481 mip);
1482 len = rp->sig_qs.len;
1483
1484 heap_need += len*2; /* Cons cells */
1485
1486 reserve_size += heap_need;
1487
1488 /* Build list of messages... */
1489 for (i = len - 1, res = NIL; i >= 0; i--) {
1490 Eterm msg = ERL_MESSAGE_TERM(mip[i].msgp);
1491 Uint sz = mip[i].size;
1492
1493 ERTS_PI_UNRESERVE(reserve_size, sz+2);
1494 hp = erts_produce_heap(hfact, sz+2, reserve_size);
1495
1496 if (sz != 0)
1497 msg = copy_struct(msg, sz, &hp, hfact->off_heap);
1498
1499 res = CONS(hp, msg, res);
1500 hp += 2;
1501 }
1502
1503 *reds += (Uint) len / 4;
1504
1505 erts_free(ERTS_ALC_T_TMP, mip);
1506 }
1507 break;
1508 }
1509
1510 case ERTS_PI_IX_MESSAGE_QUEUE_LEN: {
1511 Sint len = rp->sig_qs.len;
1512 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1513 ASSERT(len >= 0);
1514 if (len <= MAX_SMALL)
1515 res = make_small(len);
1516 else {
1517 hp = erts_produce_heap(hfact, BIG_UINT_HEAP_SIZE, reserve_size);
1518 res = uint_to_big((Uint) len, hp);
1519 }
1520 break;
1521 }
1522
1523 case ERTS_PI_IX_LINKS: {
1524 MonitorInfoCollection mic;
1525 int i;
1526 Eterm item;
1527
1528 INIT_MONITOR_INFOS(mic);
1529
1530 erts_link_tree_foreach(ERTS_P_LINKS(rp), collect_one_link, (void *) &mic);
1531
1532 reserve_size += mic.sz;
1533 res = NIL;
1534 for (i = 0; i < mic.mi_i; i++) {
1535 Eterm item_src = mic.mi[i].entity.term;
1536 Uint sz = NC_HEAP_SIZE(item_src) + 2;
1537 ERTS_PI_UNRESERVE(reserve_size, sz);
1538 hp = erts_produce_heap(hfact, sz, reserve_size);
1539 item = STORE_NC(&hp, hfact->off_heap, item_src);
1540 res = CONS(hp, item, res);
1541 }
1542
1543 *reds += (Uint) mic.mi_i / 4;
1544
1545 DESTROY_MONITOR_INFOS(mic);
1546 break;
1547 }
1548
1549 case ERTS_PI_IX_MONITORS: {
1550 MonitorInfoCollection mic;
1551 int i;
1552
1553 INIT_MONITOR_INFOS(mic);
1554 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1555 collect_one_origin_monitor,
1556 (void *) &mic);
1557
1558 reserve_size += mic.sz;
1559 res = NIL;
1560 for (i = 0; i < mic.mi_i; i++) {
1561 if (mic.mi[i].named) {
1562 /* Monitor by name.
1563 * Build {process|port, {Name, Node}} and cons it.
1564 */
1565 Eterm t1, t2;
1566 /* If pid is an atom, then it is a remote named monitor, which
1567 has to be a process */
1568 Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
1569 ASSERT(is_pid(mic.mi[i].pid)
1570 || is_port(mic.mi[i].pid)
1571 || is_atom(mic.mi[i].pid));
1572
1573 ERTS_PI_UNRESERVE(reserve_size, 3+3+2);
1574 hp = erts_produce_heap(hfact, 3+3+2, reserve_size);
1575
1576 t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
1577 hp += 3;
1578 t2 = TUPLE2(hp, m_type, t1);
1579 hp += 3;
1580 res = CONS(hp, t2, res);
1581 }
1582 else {
1583 /* Build {process|port|time_offset, Pid|clock_service} and cons it. */
1584 Eterm t;
1585 Eterm pid;
1586 Eterm m_type;
1587 Eterm pid_src = mic.mi[i].entity.term;
1588 Uint sz = is_atom(pid_src) ? 0 : NC_HEAP_SIZE(pid_src);
1589 sz += 3 + 2;
1590
1591 ERTS_PI_UNRESERVE(reserve_size, sz);
1592 hp = erts_produce_heap(hfact, sz, reserve_size);
1593
1594 pid = (is_atom(pid_src)
1595 ? pid_src
1596 : STORE_NC(&hp, hfact->off_heap, pid_src));
1597
1598 switch (mic.mi[i].type) {
1599 case ERTS_MON_TYPE_PORT:
1600 m_type = am_port;
1601 break;
1602 case ERTS_MON_TYPE_TIME_OFFSET:
1603 m_type = am_time_offset;
1604 break;
1605 default:
1606 m_type = am_process;
1607 break;
1608 }
1609
1610 ASSERT(is_pid(mic.mi[i].pid)
1611 || is_port(mic.mi[i].pid));
1612
1613 t = TUPLE2(hp, m_type, pid);
1614 hp += 3;
1615 res = CONS(hp, t, res);
1616 }
1617 }
1618
1619 *reds += (Uint) mic.mi_i / 4;
1620
1621 DESTROY_MONITOR_INFOS(mic);
1622 break;
1623 }
1624
1625 case ERTS_PI_IX_MONITORED_BY: {
1626 MonitorInfoCollection mic;
1627 int i;
1628 Eterm item;
1629
1630 INIT_MONITOR_INFOS(mic);
1631 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(rp),
1632 collect_one_target_monitor,
1633 (void *) &mic);
1634 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1635 collect_one_target_monitor,
1636 (void *) &mic);
1637
1638 reserve_size += mic.sz;
1639
1640 res = NIL;
1641 for (i = 0; i < mic.mi_i; ++i) {
1642 Uint sz = 2;
1643
1644 if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
1645 sz += erts_resource_ref_size(mic.mi[i].entity.resource);
1646 else
1647 sz += NC_HEAP_SIZE(mic.mi[i].entity.term);
1648
1649 ERTS_PI_UNRESERVE(reserve_size, sz);
1650 hp = erts_produce_heap(hfact, sz, reserve_size);
1651
1652 if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
1653 item = erts_bld_resource_ref(&hp,
1654 hfact->off_heap,
1655 mic.mi[i].entity.resource);
1656 else
1657 item = STORE_NC(&hp,
1658 hfact->off_heap,
1659 mic.mi[i].entity.term);
1660 res = CONS(hp, item, res);
1661 }
1662
1663 *reds += (Uint) mic.mi_i / 4;
1664
1665 DESTROY_MONITOR_INFOS(mic);
1666 break;
1667 }
1668
1669 case ERTS_PI_IX_SUSPENDING: {
1670 ErtsSuspendMonitorInfoCollection smic;
1671 int i;
1672
1673 ERTS_INIT_SUSPEND_MONITOR_INFOS(smic);
1674
1675 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1676 collect_one_suspend_monitor,
1677 (void *) &smic);
1678
1679 reserve_size += smic.sz;
1680
1681 res = NIL;
1682 for (i = 0; i < smic.smi_i; i++) {
1683 ErtsMonitorSuspend *msp;
1684 erts_aint_t mstate;
1685 Sint ci;
1686 Eterm ct, active, pending, item;
1687 Uint sz = 4 + 2;
1688
1689 msp = smic.smi[i];
1690 mstate = erts_atomic_read_nob(&msp->state);
1691
1692 ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
1693 if (!IS_SSMALL(ci))
1694 sz += BIG_UINT_HEAP_SIZE;
1695
1696 ERTS_PI_UNRESERVE(reserve_size, sz);
1697 hp = erts_produce_heap(hfact, sz, reserve_size);
1698
1699 if (IS_SSMALL(ci))
1700 ct = make_small(ci);
1701 else {
1702 ct = small_to_big(ci, hp);
1703 hp += BIG_UINT_HEAP_SIZE;
1704 }
1705
1706 if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) {
1707 active = ct;
1708 pending = make_small(0);
1709 }
1710 else {
1711 active = make_small(0);
1712 pending = ct;
1713 }
1714
1715 ASSERT(is_internal_pid(msp->md.origin.other.item));
1716
1717 item = TUPLE3(hp, msp->md.origin.other.item, active, pending);
1718 hp += 4;
1719 res = CONS(hp, item, res);
1720 }
1721
1722 *reds += (Uint) smic.smi_i / 4;
1723
1724 ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
1725
1726 break;
1727 }
1728
1729 case ERTS_PI_IX_DICTIONARY:
1730 if (!rp->dictionary || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE)) {
1731 res = NIL;
1732 } else {
1733 Uint num = rp->dictionary->numElements;
1734 res = erts_dictionary_copy(hfact, rp->dictionary, reserve_size);
1735 *reds += (Uint) num / 4;
1736 }
1737
1738 break;
1739
1740 case ERTS_PI_IX_TRAP_EXIT:
1741 res = (rp->flags & F_TRAP_EXIT) ? am_true : am_false;
1742 break;
1743
1744 case ERTS_PI_IX_ERROR_HANDLER:
1745 res = erts_proc_get_error_handler(rp);
1746 break;
1747
1748 case ERTS_PI_IX_HEAP_SIZE: {
1749 Uint hsz = 0;
1750 (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
1751 hp = erts_produce_heap(hfact, hsz, reserve_size);
1752 res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
1753 break;
1754 }
1755
1756 case ERTS_PI_IX_FULLSWEEP_AFTER: {
1757 Uint hsz = 0;
1758 (void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
1759 hp = erts_produce_heap(hfact, hsz, reserve_size);
1760 res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
1761 break;
1762 }
1763
1764 case ERTS_PI_IX_MIN_HEAP_SIZE: {
1765 Uint hsz = 0;
1766 (void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
1767 hp = erts_produce_heap(hfact, hsz, reserve_size);
1768 res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
1769 break;
1770 }
1771
1772 case ERTS_PI_IX_MIN_BIN_VHEAP_SIZE: {
1773 Uint hsz = 0;
1774 (void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
1775 hp = erts_produce_heap(hfact, hsz, reserve_size);
1776 res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
1777 break;
1778 }
1779
1780 case ERTS_PI_IX_MAX_HEAP_SIZE: {
1781 Uint hsz = 0;
1782 (void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
1783 MAX_HEAP_SIZE_FLAGS_GET(rp),
1784 NULL, &hsz);
1785 hp = erts_produce_heap(hfact, hsz, reserve_size);
1786 res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
1787 MAX_HEAP_SIZE_FLAGS_GET(rp),
1788 &hp, NULL);
1789 break;
1790 }
1791
1792 case ERTS_PI_IX_TOTAL_HEAP_SIZE: {
1793 Uint total_heap_size;
1794 Uint hsz = 0;
1795
1796 total_heap_size = rp->heap_sz;
1797 if (rp->old_hend && rp->old_heap)
1798 total_heap_size += rp->old_hend - rp->old_heap;
1799
1800 total_heap_size += rp->mbuf_sz;
1801
1802 if (rp->sig_qs.flags & FS_ON_HEAP_MSGQ) {
1803 ErtsMessage *mp;
1804 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1805 for (mp = rp->sig_qs.first; mp; mp = mp->next) {
1806 if (ERTS_SIG_IS_RECV_MARKER(mp))
1807 continue;
1808 ASSERT(ERTS_SIG_IS_MSG(mp));
1809 if (mp->data.attached)
1810 total_heap_size += erts_msg_attached_data_size(mp);
1811 }
1812 *reds += (Uint) rp->sig_qs.len / 4;
1813 }
1814
1815 (void) erts_bld_uint(NULL, &hsz, total_heap_size);
1816 hp = erts_produce_heap(hfact, hsz, reserve_size);
1817 res = erts_bld_uint(&hp, NULL, total_heap_size);
1818 break;
1819 }
1820
1821 case ERTS_PI_IX_STACK_SIZE: {
1822 Uint stack_size = STACK_START(rp) - rp->stop;
1823 Uint hsz = 0;
1824 (void) erts_bld_uint(NULL, &hsz, stack_size);
1825 hp = erts_produce_heap(hfact, hsz, reserve_size);
1826 res = erts_bld_uint(&hp, NULL, stack_size);
1827 break;
1828 }
1829
1830 case ERTS_PI_IX_MEMORY: { /* Memory consumed in bytes */
1831 Uint hsz = 0;
1832 Uint size = erts_process_memory(rp, 0);
1833 (void) erts_bld_uint(NULL, &hsz, size);
1834 hp = erts_produce_heap(hfact, hsz, reserve_size);
1835 res = erts_bld_uint(&hp, NULL, size);
1836
1837 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1838 *reds += (Uint) rp->sig_qs.len / 4;
1839
1840 break;
1841 }
1842
1843 case ERTS_PI_IX_GARBAGE_COLLECTION: {
1844 DECL_AM(minor_gcs);
1845 Eterm t;
1846 Uint map_sz = 0;
1847
1848 erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
1849
1850 hp = erts_produce_heap(hfact, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz, reserve_size);
1851
1852 t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
1853 res = CONS(hp, t, NIL); hp += 2;
1854 t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp))); hp += 3;
1855 res = CONS(hp, t, res); hp += 2;
1856
1857 t = TUPLE2(hp, am_min_heap_size, make_small(MIN_HEAP_SIZE(rp))); hp += 3;
1858 res = CONS(hp, t, res); hp += 2;
1859 t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
1860 res = CONS(hp, t, res); hp += 2;
1861
1862 t = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), &hp, NULL);
1863
1864 t = TUPLE2(hp, am_max_heap_size, t); hp += 3;
1865 res = CONS(hp, t, res); hp += 2;
1866 break;
1867 }
1868
1869 case ERTS_PI_IX_GARBAGE_COLLECTION_INFO: {
1870 Uint sz = 0, actual_sz = 0;
1871
1872 erts_process_gc_info(rp, &sz, NULL, 0, 0);
1873
1874 hp = erts_produce_heap(hfact, sz, reserve_size);
1875 res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
1876
1877 break;
1878 }
1879
1880 case ERTS_PI_IX_GROUP_LEADER: {
1881 int sz = NC_HEAP_SIZE(rp->group_leader);
1882 hp = erts_produce_heap(hfact, sz, reserve_size);
1883 res = STORE_NC(&hp, hfact->off_heap, rp->group_leader);
1884 break;
1885 }
1886
1887 case ERTS_PI_IX_REDUCTIONS: {
1888 Uint reds = rp->reds + erts_current_reductions(c_p, rp);
1889 Uint hsz = 0;
1890 (void) erts_bld_uint(NULL, &hsz, reds);
1891 hp = erts_produce_heap(hfact, hsz, reserve_size);
1892 res = erts_bld_uint(&hp, NULL, reds);
1893 break;
1894 }
1895
1896 case ERTS_PI_IX_PRIORITY: {
1897 erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
1898 if (ERTS_PSFLG_EXITING & state)
1899 return am_undefined;
1900 res = erts_get_process_priority(state);
1901 break;
1902 }
1903
1904 case ERTS_PI_IX_TRACE:
1905 res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
1906 break;
1907
1908 case ERTS_PI_IX_BINARY: {
1909 ErlHeapFragment *hfrag;
1910 Uint sz;
1911
1912 res = NIL;
1913 sz = 0;
1914
1915 (void)erts_bld_bin_list(NULL, &sz, &MSO(rp), NIL);
1916 for (hfrag = rp->mbuf; hfrag != NULL; hfrag = hfrag->next) {
1917 (void)erts_bld_bin_list(NULL, &sz, &hfrag->off_heap, NIL);
1918 }
1919
1920 hp = erts_produce_heap(hfact, sz, reserve_size);
1921
1922 res = erts_bld_bin_list(&hp, NULL, &MSO(rp), NIL);
1923 for (hfrag = rp->mbuf; hfrag != NULL; hfrag = hfrag->next) {
1924 res = erts_bld_bin_list(&hp, NULL, &hfrag->off_heap, res);
1925 }
1926
1927 break;
1928 }
1929
1930 case ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN: {
1931 Uint sz = size_object(rp->seq_trace_token);
1932 hp = erts_produce_heap(hfact, sz, reserve_size);
1933 res = copy_struct(rp->seq_trace_token, sz, &hp, hfact->off_heap);
1934 break;
1935 }
1936
1937 case ERTS_PI_IX_CATCHLEVEL:
1938 res = make_small(catchlevel(rp));
1939 break;
1940
1941 case ERTS_PI_IX_BACKTRACE: {
1942 erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
1943 erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
1944 res = erts_heap_factory_new_binary(hfact, (byte *) dsbufp->str,
1945 dsbufp->str_len, reserve_size);
1946 erts_destroy_tmp_dsbuf(dsbufp);
1947 break;
1948 }
1949
1950 case ERTS_PI_IX_LAST_CALLS: {
1951 struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
1952 if (!scb) {
1953 res = am_false;
1954 } else {
1955 /*
1956 * One cons cell and a 3-struct, and a 2-tuple.
1957 * Might be less than that, if there are sends, receives or timeouts,
1958 * so we must do a HRelease() to avoid creating holes.
1959 */
1960 Sint needed = scb->n*(2+4);
1961 Eterm term, list;
1962 int i, j;
1963 Export *exp;
1964
1965 reserve_size += needed;
1966
1967 list = NIL;
1968 for (i = 0; i < scb->n; i++) {
1969 Uint sz;
1970 j = scb->cur - i - 1;
1971 if (j < 0)
1972 j += scb->len;
1973
1974 sz = 2;
1975 exp = scb->ct[j];
1976 if (exp != &exp_send && exp != &exp_receive && exp != &exp_timeout)
1977 sz += 4;
1978
1979 needed -= sz;
1980 ERTS_PI_UNRESERVE(reserve_size, sz);
1981 hp = erts_produce_heap(hfact, sz, reserve_size);
1982
1983 if (exp == &exp_send)
1984 term = am_send;
1985 else if (exp == &exp_receive)
1986 term = am_receive;
1987 else if (exp == &exp_timeout)
1988 term = am_timeout;
1989 else {
1990 term = TUPLE3(hp,
1991 scb->ct[j]->info.mfa.module,
1992 scb->ct[j]->info.mfa.function,
1993 make_small(scb->ct[j]->info.mfa.arity));
1994 hp += 4;
1995 }
1996 list = CONS(hp, term, list);
1997 }
1998
1999 ASSERT(needed >= 0);
2000 if (needed > 0)
2001 reserve_size -= needed;
2002
2003 res = list;
2004 }
2005 break;
2006 }
2007
2008 case ERTS_PI_IX_MESSAGE_QUEUE_DATA:
2009 switch (rp->sig_qs.flags & (FS_OFF_HEAP_MSGQ|FS_ON_HEAP_MSGQ)) {
2010 case FS_OFF_HEAP_MSGQ:
2011 res = am_off_heap;
2012 break;
2013 case FS_ON_HEAP_MSGQ:
2014 res = am_on_heap;
2015 break;
2016 default:
2017 res = am_error;
2018 ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
2019 break;
2020 }
2021 break;
2022
2023 case ERTS_PI_IX_MAGIC_REF: {
2024 Uint sz = 0;
2025 (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
2026 hp = erts_produce_heap(hfact, sz, 0);
2027 res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
2028
2029 *reds += (Uint) 10;
2030 break;
2031 }
2032
2033 default:
2034 return THE_NON_VALUE; /* will produce badarg */
2035
2036 }
2037
2038 ERTS_PI_UNRESERVE(reserve_size, 3);
2039 *reserve_sizep = reserve_size;
2040 hp = erts_produce_heap(hfact, 3, reserve_size);
2041
2042 return TUPLE2(hp, pi_ix2arg(item_ix), res);
2043 }
2044 #undef MI_INC
2045
2046 static Eterm
current_function(Process * c_p,ErtsHeapFactory * hfact,Process * rp,int full_info,Uint reserve_size,int flags)2047 current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
2048 int full_info, Uint reserve_size, int flags)
2049 {
2050 Eterm* hp;
2051 Eterm res;
2052 FunctionInfo fi;
2053
2054 if (rp->current == NULL) {
2055 erts_lookup_function_info(&fi, rp->i, full_info);
2056 rp->current = fi.mfa;
2057 } else if (full_info) {
2058 erts_lookup_function_info(&fi, rp->i, full_info);
2059 if (fi.mfa == NULL) {
2060 /* Use the current function without location info */
2061 erts_set_current_function(&fi, rp->current);
2062 }
2063 }
2064
2065 if (c_p == rp && !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) {
2066 ErtsCodePtr return_address;
2067 FunctionInfo caller_fi;
2068
2069 /*
2070 * The current function is erlang:process_info/{1,2}, and we've
2071 * historically returned the *calling* function in that case. We
2072 * therefore use the continuation pointer stored at the top of the
2073 * stack instead, which is safe since process_info is a "heavy" BIF
2074 * that is only called through its export entry.
2075 */
2076 return_address = erts_printable_return_address(rp, STACK_TOP(rp));
2077
2078 erts_lookup_function_info(&caller_fi, return_address, full_info);
2079 if (caller_fi.mfa) {
2080 fi = caller_fi;
2081 rp->current = caller_fi.mfa;
2082 }
2083 }
2084
2085 /*
2086 * Return the result.
2087 */
2088 if (rp->current == NULL) {
2089 res = am_undefined;
2090 } else if (full_info) {
2091 hp = erts_produce_heap(hfact, fi.needed, reserve_size);
2092 erts_build_mfa_item(&fi, hp, am_true, &res, NIL);
2093 } else {
2094 hp = erts_produce_heap(hfact, 4, reserve_size);
2095 res = TUPLE3(hp, rp->current->module,
2096 rp->current->function,
2097 make_small(rp->current->arity));
2098 }
2099 return res;
2100 }
2101
2102 static Eterm
current_stacktrace(Process * p,ErtsHeapFactory * hfact,Process * rp,Uint reserve_size,int flags)2103 current_stacktrace(Process *p, ErtsHeapFactory *hfact, Process* rp,
2104 Uint reserve_size, int flags)
2105 {
2106 Uint sz;
2107 struct StackTrace* s;
2108 int depth;
2109 FunctionInfo* stk;
2110 FunctionInfo* stkp;
2111 Uint heap_size;
2112 int i;
2113 Eterm* hp;
2114 Eterm mfa;
2115 Eterm res = NIL;
2116
2117 depth = erts_backtrace_depth;
2118 sz = offsetof(struct StackTrace, trace) + sizeof(ErtsCodePtr) * depth;
2119 s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
2120 s->depth = 0;
2121 s->pc = NULL;
2122
2123 /* We skip current pc when requesting our own stack trace since it will
2124 * inevitably point to process_info/1,2 */
2125 if ((p != rp || (flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) &&
2126 depth > 0 && rp->i) {
2127 s->trace[s->depth++] = rp->i;
2128 depth--;
2129 }
2130 erts_save_stacktrace(rp, s, depth);
2131
2132 depth = s->depth;
2133 stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
2134 depth*sizeof(FunctionInfo));
2135 heap_size = 3;
2136 for (i = 0; i < depth; i++) {
2137 erts_lookup_function_info(stkp, s->trace[i], 1);
2138 if (stkp->mfa) {
2139 heap_size += stkp->needed + 2;
2140 stkp++;
2141 }
2142 }
2143
2144 reserve_size += heap_size;
2145
2146 /*
2147 * We intentionally produce heap in small chunks
2148 * (for more info see process_info_aux()).
2149 */
2150 while (stkp > stk) {
2151 stkp--;
2152 sz = stkp->needed + 2;
2153 ERTS_PI_UNRESERVE(reserve_size, sz);
2154 hp = erts_produce_heap(hfact, sz, reserve_size);
2155 hp = erts_build_mfa_item(stkp, hp, am_true, &mfa, NIL);
2156 res = CONS(hp, mfa, res);
2157 }
2158
2159 erts_free(ERTS_ALC_T_TMP, stk);
2160 erts_free(ERTS_ALC_T_TMP, s);
2161 return res;
2162 }
2163
2164 #if defined(VALGRIND) || defined(ADDRESS_SANITIZER)
iolist_to_tmp_buf(Eterm iolist,char ** bufp)2165 static int iolist_to_tmp_buf(Eterm iolist, char** bufp)
2166 {
2167 ErlDrvSizeT buf_size = 1024; /* Try with 1KB first */
2168 char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
2169 ErlDrvSizeT r = erts_iolist_to_buf(iolist, (char*) buf, buf_size - 1);
2170 if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
2171 erts_free(ERTS_ALC_T_TMP, (void *) buf);
2172 if (erts_iolist_size(iolist, &buf_size)) {
2173 return 0;
2174 }
2175 buf_size++;
2176 buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
2177 r = erts_iolist_to_buf(iolist, (char*) buf, buf_size - 1);
2178 ASSERT(r == buf_size - 1);
2179 }
2180 buf[buf_size - 1 - r] = '\0';
2181 *bufp = buf;
2182 return 1;
2183 }
2184 #endif
2185
2186 /*
2187 * This function takes care of calls to erlang:system_info/1 when the argument
2188 * is a tuple.
2189 */
2190 static BIF_RETTYPE
info_1_tuple(Process * BIF_P,Eterm * tp,int arity)2191 info_1_tuple(Process* BIF_P, /* Pointer to current process. */
2192 Eterm* tp, /* Pointer to first element in tuple */
2193 int arity) /* Arity of tuple (untagged). */
2194 {
2195 Eterm ret;
2196 Eterm sel;
2197
2198 sel = *tp++;
2199
2200 if (sel == am_memory_internal) {
2201 switch (arity) {
2202 case 3:
2203 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 1))
2204 return am_true;
2205 default:
2206 goto badarg;
2207 }
2208 }
2209 else if (sel == am_allocator_sizes) {
2210 switch (arity) {
2211 case 2:
2212 ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
2213 return ret;
2214 case 3:
2215 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 0))
2216 return am_true;
2217 default:
2218 goto badarg;
2219 }
2220 }
2221 else if (sel == am_wordsize && arity == 2) {
2222 if (tp[0] == am_internal) {
2223 return make_small(sizeof(Eterm));
2224 }
2225 if (tp[0] == am_external) {
2226 return make_small(sizeof(UWord));
2227 }
2228 goto badarg;
2229 } else if (sel == am_allocator) {
2230 switch (arity) {
2231 case 2:
2232 ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
2233 return ret;
2234 case 3:
2235 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0, 0))
2236 return am_true;
2237 default:
2238 goto badarg;
2239 }
2240 } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
2241 return erts_get_cpu_topology_term(BIF_P, *tp);
2242 } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
2243 Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
2244 if (res == THE_NON_VALUE)
2245 goto badarg;
2246 ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
2247 return ret;
2248 } else if (ERTS_IS_ATOM_STR("memory_checker", sel)) {
2249 if (arity == 2 && ERTS_IS_ATOM_STR("test_leak", *tp)) {
2250 #if defined(VALGRIND) || defined(ADDRESS_SANITIZER)
2251 erts_alloc(ERTS_ALC_T_HEAP , 100);
2252 #endif
2253 BIF_RET(am_ok);
2254 }
2255 else if (arity == 2 && ERTS_IS_ATOM_STR("test_overflow", *tp)) {
2256 static int test[2];
2257 BIF_RET(make_small(test[2]));
2258 }
2259 #if defined(VALGRIND) || defined(ADDRESS_SANITIZER)
2260 if (arity == 2 && *tp == am_running) {
2261 # if defined(VALGRIND)
2262 if (RUNNING_ON_VALGRIND)
2263 BIF_RET(ERTS_MAKE_AM("valgrind"));
2264 # elif defined(ADDRESS_SANITIZER)
2265 BIF_RET(ERTS_MAKE_AM("asan"));
2266 # endif
2267 }
2268 else if (arity == 2 && ERTS_IS_ATOM_STR("check_leaks", *tp)) {
2269 # if defined(VALGRIND)
2270 # ifdef VALGRIND_DO_ADDED_LEAK_CHECK
2271 VALGRIND_DO_ADDED_LEAK_CHECK;
2272 # else
2273 VALGRIND_DO_LEAK_CHECK;
2274 # endif
2275 BIF_RET(am_ok);
2276 # elif defined(ADDRESS_SANITIZER)
2277 __lsan_do_recoverable_leak_check();
2278 BIF_RET(am_ok);
2279 # endif
2280 }
2281 # if defined(VALGRIND)
2282 if (arity == 3 && tp[0] == am_print && is_list(tp[1])) {
2283 char* buf;
2284 if (!iolist_to_tmp_buf(tp[1], &buf))
2285 goto badarg;
2286 VALGRIND_PRINTF("%s\n", buf);
2287 erts_free(ERTS_ALC_T_TMP, (void *) buf);
2288 BIF_RET(am_true);
2289 }
2290 # endif
2291 # if defined(ADDRESS_SANITIZER)
2292 if (arity == 3 && ERTS_IS_ATOM_STR("log",tp[0]) && is_list(tp[1])) {
2293 static char *active_log = NULL;
2294 static int active_log_len;
2295 Eterm ret = NIL;
2296 char* buf;
2297 if (!iolist_to_tmp_buf(tp[1], &buf))
2298 goto badarg;
2299 erts_rwmtx_rwlock(&erts_dist_table_rwmtx); /* random lock abuse */
2300 __sanitizer_set_report_path(buf);
2301 if (active_log) {
2302 Eterm *hp = HAlloc(BIF_P, 2 * active_log_len);
2303 ret = erts_bld_string_n(&hp, 0, active_log, active_log_len);
2304 erts_free(ERTS_ALC_T_DEBUG, active_log);
2305 }
2306 active_log_len = sys_strlen(buf);
2307 active_log = erts_alloc(ERTS_ALC_T_DEBUG, active_log_len + 1);
2308 sys_memcpy(active_log, buf, active_log_len + 1);
2309 erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
2310 erts_free(ERTS_ALC_T_TMP, (void *) buf);
2311 BIF_RET(ret);
2312 }
2313 # endif
2314 #endif
2315 #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
2316 } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
2317 unsigned long long tmp;
2318 int fd;
2319 int rc;
2320
2321 if (arity != 2 || !is_small(*tp)) {
2322 goto badarg;
2323 }
2324 tmp = signed_val(*tp);
2325 if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
2326 BIF_RET(am_false);
2327 }
2328 rc = ioctl(fd, PERFMON_SETPCR, &tmp);
2329 close(fd);
2330 if (rc < 0) {
2331 BIF_RET(am_false);
2332 }
2333 BIF_RET(am_true);
2334 #endif
2335 }
2336
2337 badarg:
2338 ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
2339
2340 return ret;
2341 }
2342
2343 #define INFO_DSBUF_INC_SZ 256
2344
2345 static erts_dsprintf_buf_t *
grow_info_dsbuf(erts_dsprintf_buf_t * dsbufp,size_t need)2346 grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
2347 {
2348 size_t size;
2349 size_t free_size = dsbufp->size - dsbufp->str_len;
2350
2351 ASSERT(dsbufp);
2352
2353 if (need <= free_size)
2354 return dsbufp;
2355 size = need - free_size + INFO_DSBUF_INC_SZ;
2356 size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
2357 size += dsbufp->size;
2358 ASSERT(dsbufp->str_len + need <= size);
2359 dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
2360 (void *) dsbufp->str,
2361 size);
2362 dsbufp->size = size;
2363 return dsbufp;
2364 }
2365
2366 static erts_dsprintf_buf_t *
erts_create_info_dsbuf(Uint size)2367 erts_create_info_dsbuf(Uint size)
2368 {
2369 Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
2370 erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
2371 erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
2372 sizeof(erts_dsprintf_buf_t));
2373 sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
2374 dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
2375 dsbufp->str[0] = '\0';
2376 dsbufp->size = init_size;
2377 return dsbufp;
2378 }
2379
2380 static void
erts_destroy_info_dsbuf(erts_dsprintf_buf_t * dsbufp)2381 erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
2382 {
2383 if (dsbufp->str)
2384 erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
2385 erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
2386 }
2387
2388 static Eterm
c_compiler_used(Eterm ** hpp,Uint * szp)2389 c_compiler_used(Eterm **hpp, Uint *szp)
2390 {
2391
2392 #if defined(__GNUC__)
2393 # if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
2394 # define ERTS_GNUC_VSN_NUMS 3
2395 # elif defined(__GNUC_MINOR__)
2396 # define ERTS_GNUC_VSN_NUMS 2
2397 # else
2398 # define ERTS_GNUC_VSN_NUMS 1
2399 # endif
2400 return erts_bld_tuple(hpp,
2401 szp,
2402 2,
2403 erts_bld_atom(hpp, szp, "gnuc"),
2404 #if ERTS_GNUC_VSN_NUMS > 1
2405 erts_bld_tuple(hpp,
2406 szp,
2407 ERTS_GNUC_VSN_NUMS,
2408 #endif
2409 erts_bld_uint(hpp, szp,
2410 (Uint) __GNUC__)
2411 #ifdef __GNUC_MINOR__
2412 ,
2413 erts_bld_uint(hpp, szp,
2414 (Uint) __GNUC_MINOR__)
2415 #ifdef __GNUC_PATCHLEVEL__
2416 ,
2417 erts_bld_uint(hpp, szp,
2418 (Uint) __GNUC_PATCHLEVEL__)
2419 #endif
2420 #endif
2421 #if ERTS_GNUC_VSN_NUMS > 1
2422 )
2423 #endif
2424 );
2425
2426 #elif defined(_MSC_VER)
2427 return erts_bld_tuple(hpp,
2428 szp,
2429 2,
2430 erts_bld_atom(hpp, szp, "msc"),
2431 erts_bld_uint(hpp, szp, (Uint) _MSC_VER));
2432
2433 #else
2434 return erts_bld_tuple(hpp,
2435 szp,
2436 2,
2437 am_undefined,
2438 am_undefined);
2439 #endif
2440
2441 }
2442
is_snif_term(Eterm module_atom)2443 static int is_snif_term(Eterm module_atom) {
2444 int i;
2445 Atom *a = atom_tab(atom_val(module_atom));
2446 char *aname = (char *) a->name;
2447
2448 /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
2449 for (i = 0; i < a->len; i++) {
2450 if (aname[i] == '.')
2451 return 0;
2452 }
2453
2454 return 1;
2455 }
2456
build_snif_term(Eterm ** hpp,Uint * szp,int ix,Eterm res)2457 static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
2458 Eterm tup;
2459 tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
2460 res = erts_bld_cons( hpp, szp, tup, res);
2461 return res;
2462 }
2463
build_snifs_term(Eterm ** hpp,Uint * szp,Eterm res)2464 static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
2465 int i;
2466 for (i = 0; i < BIF_SIZE; i++) {
2467 if (is_snif_term(bif_table[i].module)) {
2468 res = build_snif_term(hpp, szp, i, res);
2469 }
2470 }
2471 return res;
2472 }
2473
system_info_1(BIF_ALIST_1)2474 BIF_RETTYPE system_info_1(BIF_ALIST_1)
2475 {
2476 Eterm res;
2477 Eterm* hp;
2478 Eterm val;
2479 int i;
2480
2481 if (is_tuple(BIF_ARG_1)) {
2482 Eterm* tp = tuple_val(BIF_ARG_1);
2483 Uint arity = *tp++;
2484 return info_1_tuple(BIF_P, tp, arityval(arity));
2485 } else if (BIF_ARG_1 == am_scheduler_id) {
2486 ErtsSchedulerData *esdp = erts_proc_sched_data(BIF_P);
2487 BIF_RET(make_small(esdp->no));
2488 } else if (BIF_ARG_1 == am_compat_rel) {
2489 ASSERT(erts_compat_rel > 0);
2490 BIF_RET(make_small(erts_compat_rel));
2491 } else if (BIF_ARG_1 == am_multi_scheduling) {
2492 {
2493 int msb = erts_is_multi_scheduling_blocked();
2494 BIF_RET(!msb
2495 ? am_enabled
2496 : (msb > 0
2497 ? am_blocked
2498 : am_blocked_normal));
2499 }
2500 } else if (BIF_ARG_1 == am_build_type || BIF_ARG_1 == am_emu_type) {
2501 #if defined(DEBUG)
2502 ERTS_DECL_AM(debug);
2503 BIF_RET(AM_debug);
2504 #elif defined(ERTS_GCOV)
2505 ERTS_DECL_AM(gcov);
2506 BIF_RET(AM_gcov);
2507 #elif defined(VALGRIND)
2508 ERTS_DECL_AM(valgrind);
2509 BIF_RET(AM_valgrind);
2510 #elif defined(ADDRESS_SANITIZER)
2511 ERTS_DECL_AM(asan);
2512 BIF_RET(AM_asan);
2513 #elif defined(GPROF)
2514 ERTS_DECL_AM(gprof);
2515 BIF_RET(AM_gprof);
2516 #elif defined(ERTS_ENABLE_LOCK_COUNT)
2517 ERTS_DECL_AM(lcnt);
2518 BIF_RET(AM_lcnt);
2519 #elif defined(ERTS_FRMPTR)
2520 ERTS_DECL_AM(frmptr);
2521 BIF_RET(AM_frmptr);
2522 #else
2523 BIF_RET(am_opt);
2524 #endif
2525 } else if (BIF_ARG_1 == am_emu_flavor) {
2526 #if defined(BEAMASM)
2527 ERTS_DECL_AM(jit);
2528 BIF_RET(AM_jit);
2529 #else
2530 ERTS_DECL_AM(emu);
2531 BIF_RET(AM_emu);
2532 #endif
2533 } else if (BIF_ARG_1 == am_time_offset) {
2534 switch (erts_time_offset_state()) {
2535 case ERTS_TIME_OFFSET_PRELIMINARY: {
2536 ERTS_DECL_AM(preliminary);
2537 BIF_RET(AM_preliminary);
2538 }
2539 case ERTS_TIME_OFFSET_FINAL: {
2540 ERTS_DECL_AM(final);
2541 BIF_RET(AM_final);
2542 }
2543 case ERTS_TIME_OFFSET_VOLATILE: {
2544 ERTS_DECL_AM(volatile);
2545 BIF_RET(AM_volatile);
2546 }
2547 default:
2548 ERTS_INTERNAL_ERROR("Invalid time offset state");
2549 }
2550 } else if (ERTS_IS_ATOM_STR("os_monotonic_time_source", BIF_ARG_1)) {
2551 BIF_RET(erts_monotonic_time_source(BIF_P));
2552 } else if (ERTS_IS_ATOM_STR("os_system_time_source", BIF_ARG_1)) {
2553 BIF_RET(erts_system_time_source(BIF_P));
2554 } else if (ERTS_IS_ATOM_STR("time_correction", BIF_ARG_1)) {
2555 BIF_RET(erts_has_time_correction() ? am_true : am_false);
2556 } else if (ERTS_IS_ATOM_STR("start_time", BIF_ARG_1)) {
2557 BIF_RET(erts_get_monotonic_start_time(BIF_P));
2558 } else if (ERTS_IS_ATOM_STR("end_time", BIF_ARG_1)) {
2559 BIF_RET(erts_get_monotonic_end_time(BIF_P));
2560 } else if (ERTS_IS_ATOM_STR("time_warp_mode", BIF_ARG_1)) {
2561 switch (erts_time_warp_mode()) {
2562 case ERTS_NO_TIME_WARP_MODE: {
2563 ERTS_DECL_AM(no_time_warp);
2564 BIF_RET(AM_no_time_warp);
2565 }
2566 case ERTS_SINGLE_TIME_WARP_MODE: {
2567 ERTS_DECL_AM(single_time_warp);
2568 BIF_RET(AM_single_time_warp);
2569 }
2570 case ERTS_MULTI_TIME_WARP_MODE: {
2571 ERTS_DECL_AM(multi_time_warp);
2572 BIF_RET(AM_multi_time_warp);
2573 }
2574 default:
2575 ERTS_INTERNAL_ERROR("Invalid time warp mode");
2576 }
2577 } else if (BIF_ARG_1 == am_allocated_areas) {
2578 res = erts_allocated_areas(NULL, NULL, BIF_P);
2579 BIF_RET(res);
2580 } else if (ERTS_IS_ATOM_STR("hipe_architecture", BIF_ARG_1)) {
2581 BIF_RET(am_undefined);
2582 } else if (BIF_ARG_1 == am_trace_control_word) {
2583 BIF_RET(db_get_trace_control_word(BIF_P));
2584 } else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
2585 BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
2586 } else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
2587 BIF_RET((erts_ets_always_compress) ? am_true : am_false);
2588 } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
2589 Uint size = 0;
2590 Uint *szp;
2591
2592 szp = &size;
2593 build_snifs_term(NULL, szp, NIL);
2594 hp = HAlloc(BIF_P, size);
2595 res = build_snifs_term(&hp, NULL, NIL);
2596 BIF_RET(res);
2597 } else if (BIF_ARG_1 == am_sequential_tracer) {
2598 ErtsTracer seq_tracer = erts_get_system_seq_tracer();
2599 val = erts_tracer_to_term(BIF_P, seq_tracer);
2600 hp = HAlloc(BIF_P, 3);
2601 res = TUPLE2(hp, am_sequential_tracer, val);
2602 BIF_RET(res);
2603 } else if (BIF_ARG_1 == am_garbage_collection){
2604 Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
2605 Eterm tup;
2606 hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2);
2607
2608 tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3;
2609 res = CONS(hp, tup, NIL); hp += 2;
2610
2611 tup = TUPLE2(hp, am_min_heap_size, make_small(H_MIN_SIZE)); hp += 3;
2612 res = CONS(hp, tup, res); hp += 2;
2613
2614 tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3;
2615 res = CONS(hp, tup, res); hp += 2;
2616
2617 tup = TUPLE2(hp, am_max_heap_size, make_small(H_MAX_SIZE)); hp += 3;
2618 res = CONS(hp, tup, res); hp += 2;
2619
2620 BIF_RET(res);
2621 } else if (BIF_ARG_1 == am_fullsweep_after){
2622 Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
2623 hp = HAlloc(BIF_P, 3);
2624 res = TUPLE2(hp, am_fullsweep_after, make_small(val));
2625 BIF_RET(res);
2626 } else if (BIF_ARG_1 == am_min_heap_size) {
2627 hp = HAlloc(BIF_P, 3);
2628 res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE));
2629 BIF_RET(res);
2630 } else if (BIF_ARG_1 == am_max_heap_size) {
2631 Uint sz = 0;
2632 erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
2633 hp = HAlloc(BIF_P, sz);
2634 res = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
2635 BIF_RET(res);
2636 } else if (BIF_ARG_1 == am_min_bin_vheap_size) {
2637 hp = HAlloc(BIF_P, 3);
2638 res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
2639 BIF_RET(res);
2640 } else if (BIF_ARG_1 == am_process_count) {
2641 BIF_RET(make_small(erts_ptab_count(&erts_proc)));
2642 } else if (BIF_ARG_1 == am_process_limit) {
2643 BIF_RET(make_small(erts_ptab_max(&erts_proc)));
2644 } else if (BIF_ARG_1 == am_port_count) {
2645 BIF_RET(make_small(erts_ptab_count(&erts_port)));
2646 } else if (BIF_ARG_1 == am_port_limit) {
2647 BIF_RET(make_small(erts_ptab_max(&erts_port)));
2648 } else if (BIF_ARG_1 == am_info
2649 || BIF_ARG_1 == am_procs
2650 || BIF_ARG_1 == am_loaded
2651 || BIF_ARG_1 == am_dist) {
2652 erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
2653
2654 /* Need to be the only thread running... */
2655 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
2656 BIF_P->scheduler_data->current_process = NULL;
2657 erts_thr_progress_block();
2658
2659 if (BIF_ARG_1 == am_info)
2660 info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2661 else if (BIF_ARG_1 == am_procs)
2662 process_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2663 else if (BIF_ARG_1 == am_loaded)
2664 loaded(ERTS_PRINT_DSBUF, (void *) dsbufp);
2665 else
2666 distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2667
2668 erts_thr_progress_unblock();
2669 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
2670 BIF_P->scheduler_data->current_process = BIF_P;
2671
2672 ASSERT(dsbufp && dsbufp->str);
2673 res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
2674 erts_destroy_info_dsbuf(dsbufp);
2675 BIF_RET(res);
2676 } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
2677 DistEntry *dep;
2678 i = 0;
2679 erts_rwmtx_rlock(&erts_dist_table_rwmtx);
2680 for (dep = erts_visible_dist_entries; dep; dep = dep->next)
2681 ++i;
2682 for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
2683 ++i;
2684 hp = HAlloc(BIF_P,i*(3+2));
2685 res = NIL;
2686 for (dep = erts_hidden_dist_entries; dep; dep = dep->next) {
2687 Eterm tpl;
2688 ASSERT(is_immed(dep->cid));
2689 tpl = TUPLE2(hp, dep->sysname, dep->cid);
2690 hp +=3;
2691 res = CONS(hp, tpl, res);
2692 hp += 2;
2693 }
2694 for (dep = erts_visible_dist_entries; dep; dep = dep->next) {
2695 Eterm tpl;
2696 ASSERT(is_immed(dep->cid));
2697 tpl = TUPLE2(hp, dep->sysname, dep->cid);
2698 hp +=3;
2699 res = CONS(hp, tpl, res);
2700 hp += 2;
2701 }
2702 erts_rwmtx_runlock(&erts_dist_table_rwmtx);
2703 BIF_RET(res);
2704 } else if (BIF_ARG_1 == am_system_version) {
2705 erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
2706 erts_print_system_version(ERTS_PRINT_DSBUF, (void *) dsbufp, BIF_P);
2707 hp = HAlloc(BIF_P, dsbufp->str_len*2);
2708 res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
2709 erts_destroy_tmp_dsbuf(dsbufp);
2710 BIF_RET(res);
2711 } else if (BIF_ARG_1 == am_system_architecture) {
2712 hp = HAlloc(BIF_P, 2*(sizeof(ERLANG_ARCHITECTURE)-1));
2713 BIF_RET(buf_to_intlist(&hp,
2714 ERLANG_ARCHITECTURE,
2715 sizeof(ERLANG_ARCHITECTURE)-1,
2716 NIL));
2717 }
2718 else if (BIF_ARG_1 == am_os_type) {
2719 BIF_RET(erts_get_global_literal(ERTS_LIT_OS_TYPE));
2720 }
2721 else if (BIF_ARG_1 == am_allocator) {
2722 BIF_RET(erts_allocator_options((void *) BIF_P));
2723 }
2724 else if (BIF_ARG_1 == am_thread_pool_size) {
2725 extern int erts_async_max_threads;
2726 int n;
2727
2728 n = erts_async_max_threads;
2729 BIF_RET(make_small(n));
2730 }
2731 else if (BIF_ARG_1 == am_alloc_util_allocators) {
2732 BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
2733 }
2734 else if (BIF_ARG_1 == am_os_version) {
2735 BIF_RET(erts_get_global_literal(ERTS_LIT_OS_VERSION));
2736 }
2737 else if (BIF_ARG_1 == am_version) {
2738 int n = sys_strlen(ERLANG_VERSION);
2739 hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
2740 BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
2741 }
2742 else if (BIF_ARG_1 == am_machine) {
2743 int n = sys_strlen(EMULATOR);
2744 hp = HAlloc(BIF_P, n*2);
2745 BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
2746 }
2747 else if (BIF_ARG_1 == am_garbage_collection) {
2748 BIF_RET(am_generational);
2749 #ifdef ERTS_OPCODE_COUNTER_SUPPORT
2750 } else if (BIF_ARG_1 == am_instruction_counts) {
2751 #ifdef DEBUG
2752 Eterm *endp;
2753 #endif
2754 Eterm *hp, **hpp;
2755 Uint hsz, *hszp;
2756 int i;
2757
2758 hpp = NULL;
2759 hsz = 0;
2760 hszp = &hsz;
2761
2762 bld_instruction_counts:
2763
2764 res = NIL;
2765 for (i = num_instructions-1; i >= 0; i--) {
2766 res = erts_bld_cons(hpp, hszp,
2767 erts_bld_tuple(hpp, hszp, 2,
2768 erts_atom_put((byte *)opc[i].name,
2769 sys_strlen(opc[i].name),
2770 ERTS_ATOM_ENC_LATIN1,
2771 1),
2772 erts_bld_uint(hpp, hszp,
2773 erts_instr_count[i])),
2774 res);
2775 }
2776
2777 if (!hpp) {
2778 hp = HAlloc(BIF_P, hsz);
2779 hpp = &hp;
2780 #ifdef DEBUG
2781 endp = hp + hsz;
2782 #endif
2783 hszp = NULL;
2784 goto bld_instruction_counts;
2785 }
2786
2787 ASSERT(endp == hp);
2788
2789 BIF_RET(res);
2790 #endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
2791 } else if (BIF_ARG_1 == am_wordsize) {
2792 return make_small(sizeof(Eterm));
2793 } else if (BIF_ARG_1 == am_endian) {
2794 #if defined(WORDS_BIGENDIAN)
2795 return am_big;
2796 #else
2797 return am_little;
2798 #endif
2799 } else if (BIF_ARG_1 == am_heap_sizes) {
2800 return erts_heap_sizes(BIF_P);
2801 } else if (BIF_ARG_1 == am_heap_type) {
2802 return am_private;
2803 } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
2804 res = erts_get_cpu_topology_term(BIF_P, am_used);
2805 BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
2806 } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
2807 if (erts_update_cpu_info()) {
2808 ERTS_DECL_AM(changed);
2809 BIF_RET(AM_changed);
2810 }
2811 else {
2812 ERTS_DECL_AM(unchanged);
2813 BIF_RET(AM_unchanged);
2814 }
2815 #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
2816 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
2817 register unsigned high asm("%l0");
2818 register unsigned low asm("%l1");
2819
2820 hp = HAlloc(BIF_P, 5);
2821 asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
2822 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2823 : "=r" (high), "=r" (low));
2824 res = TUPLE4(hp, make_small(high >> 16),
2825 make_small(high & 0xFFFF),
2826 make_small(low >> 16),
2827 make_small(low & 0xFFFF));
2828 BIF_RET(res);
2829 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick2", BIF_ARG_1)) {
2830 register unsigned high asm("%l0");
2831 register unsigned low asm("%l1");
2832
2833 asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
2834 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2835 : "=r" (high), "=r" (low));
2836 hp = HAlloc(BIF_P, 5);
2837 res = TUPLE4(hp, make_small(high >> 16),
2838 make_small(high & 0xFFFF),
2839 make_small(low >> 16),
2840 make_small(low & 0xFFFF));
2841 BIF_RET(res);
2842 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic1", BIF_ARG_1)) {
2843 register unsigned high asm("%l0");
2844 register unsigned low asm("%l1");
2845
2846 hp = HAlloc(BIF_P, 5);
2847 asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
2848 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2849 : "=r" (high), "=r" (low));
2850 res = TUPLE4(hp, make_small(high >> 16),
2851 make_small(high & 0xFFFF),
2852 make_small(low >> 16),
2853 make_small(low & 0xFFFF));
2854 BIF_RET(res);
2855 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic2", BIF_ARG_1)) {
2856 register unsigned high asm("%l0");
2857 register unsigned low asm("%l1");
2858
2859 asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
2860 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2861 : "=r" (high), "=r" (low));
2862 hp = HAlloc(BIF_P, 5);
2863 res = TUPLE4(hp, make_small(high >> 16),
2864 make_small(high & 0xFFFF),
2865 make_small(low >> 16),
2866 make_small(low & 0xFFFF));
2867 BIF_RET(res);
2868 #endif
2869 } else if (BIF_ARG_1 == am_threads) {
2870 return am_true;
2871 } else if (BIF_ARG_1 == am_creation) {
2872 Uint hsz = 0;
2873 erts_bld_uint(NULL, &hsz, erts_this_node->creation);
2874 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
2875 BIF_RET(erts_bld_uint(&hp, NULL, erts_this_node->creation));
2876 } else if (BIF_ARG_1 == am_break_ignored) {
2877 extern int ignore_break;
2878 if (ignore_break)
2879 return am_true;
2880 else
2881 return am_false;
2882 }
2883 /* Arguments that are unusual follow ... */
2884 else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
2885 int no;
2886 erts_get_logical_processors(&no, NULL, NULL, NULL);
2887 if (no > 0)
2888 BIF_RET(make_small((Uint) no));
2889 else {
2890 DECL_AM(unknown);
2891 BIF_RET(AM_unknown);
2892 }
2893 }
2894 else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
2895 int no;
2896 erts_get_logical_processors(NULL, &no, NULL, NULL);
2897 if (no > 0)
2898 BIF_RET(make_small((Uint) no));
2899 else {
2900 DECL_AM(unknown);
2901 BIF_RET(AM_unknown);
2902 }
2903 }
2904 else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
2905 int no;
2906 erts_get_logical_processors(NULL, NULL, &no, NULL);
2907 if (no > 0)
2908 BIF_RET(make_small((Uint) no));
2909 else {
2910 DECL_AM(unknown);
2911 BIF_RET(AM_unknown);
2912 }
2913 }
2914 else if (ERTS_IS_ATOM_STR("cpu_quota", BIF_ARG_1)) {
2915 int no;
2916 erts_get_logical_processors(NULL, NULL, NULL, &no);
2917 if (no > 0)
2918 BIF_RET(make_small((Uint) no));
2919 else {
2920 DECL_AM(unknown);
2921 BIF_RET(AM_unknown);
2922 }
2923 } else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) {
2924 int n = sizeof(ERLANG_OTP_RELEASE)-1;
2925 hp = HAlloc(BIF_P, 2*n);
2926 BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_RELEASE, n, NIL));
2927 } else if (ERTS_IS_ATOM_STR("driver_version", BIF_ARG_1)) {
2928 char buf[42];
2929 int n = erts_snprintf(buf, 42, "%d.%d",
2930 ERL_DRV_EXTENDED_MAJOR_VERSION,
2931 ERL_DRV_EXTENDED_MINOR_VERSION);
2932 hp = HAlloc(BIF_P, 2*n);
2933 BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
2934 } else if (ERTS_IS_ATOM_STR("nif_version", BIF_ARG_1)) {
2935 char buf[42];
2936 int n = erts_snprintf(buf, 42, "%d.%d",
2937 ERL_NIF_MAJOR_VERSION,
2938 ERL_NIF_MINOR_VERSION);
2939 hp = HAlloc(BIF_P, 2*n);
2940 BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
2941 } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
2942 BIF_RET(am_true);
2943 } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
2944 BIF_RET(erts_bound_schedulers_term(BIF_P));
2945 } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
2946 BIF_RET(erts_get_schedulers_binds(BIF_P));
2947 } else if (ERTS_IS_ATOM_STR("constant_pool_support", BIF_ARG_1)) {
2948 BIF_RET(am_true);
2949 } else if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)
2950 || ERTS_IS_ATOM_STR("schedulers_total", BIF_ARG_1)) {
2951 res = make_small(erts_no_schedulers);
2952 BIF_RET(res);
2953 } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
2954 Eterm *hp;
2955 Uint total, online, active;
2956 erts_schedulers_state(&total, &online, &active,
2957 NULL, NULL, NULL, NULL, NULL);
2958 hp = HAlloc(BIF_P, 4);
2959 res = TUPLE3(hp,
2960 make_small(total),
2961 make_small(online),
2962 make_small(active));
2963 BIF_RET(res);
2964 } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
2965 Eterm *hp;
2966 Uint total, online, active;
2967 erts_schedulers_state(&total, &online, &active,
2968 NULL, NULL, NULL, NULL, NULL);
2969 hp = HAlloc(BIF_P, 4);
2970 res = TUPLE3(hp,
2971 make_small(total),
2972 make_small(online),
2973 make_small(active));
2974 BIF_RET(res);
2975 } else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) {
2976 Eterm *hp, tpl;
2977 Uint sz, total, online, active,
2978 dirty_cpu_total, dirty_cpu_online, dirty_cpu_active,
2979 dirty_io_total, dirty_io_active;
2980 erts_schedulers_state(&total, &online, &active,
2981 &dirty_cpu_total, &dirty_cpu_online, &dirty_cpu_active,
2982 &dirty_io_total, &dirty_io_active);
2983
2984 sz = 2+5;
2985 if (dirty_cpu_total)
2986 sz += 2+5;
2987 if (dirty_io_total)
2988 sz += 2+5;
2989
2990 hp = HAlloc(BIF_P, sz);
2991
2992 res = NIL;
2993 if (dirty_io_total) {
2994 tpl = TUPLE4(hp,
2995 am_dirty_io,
2996 make_small(dirty_io_total),
2997 make_small(dirty_io_total),
2998 make_small(dirty_io_active));
2999 hp += 5;
3000 res = CONS(hp, tpl, res);
3001 hp += 2;
3002 }
3003 if (dirty_cpu_total) {
3004 tpl = TUPLE4(hp,
3005 am_dirty_cpu,
3006 make_small(dirty_cpu_total),
3007 make_small(dirty_cpu_online),
3008 make_small(dirty_cpu_active));
3009 hp += 5;
3010 res = CONS(hp, tpl, res);
3011 hp += 2;
3012 }
3013 tpl = TUPLE4(hp,
3014 am_normal,
3015 make_small(total),
3016 make_small(online),
3017 make_small(active));
3018 hp += 5;
3019 res = CONS(hp, tpl, res);
3020 BIF_RET(res);
3021 } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
3022 Uint online;
3023 erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL);
3024 BIF_RET(make_small(online));
3025 } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
3026 Uint active;
3027 erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
3028 BIF_RET(make_small(active));
3029 } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
3030 Uint dirty_cpu;
3031 erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
3032 BIF_RET(make_small(dirty_cpu));
3033 } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
3034 Uint dirty_cpu_onln;
3035 erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL);
3036 BIF_RET(make_small(dirty_cpu_onln));
3037 } else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
3038 Uint dirty_io;
3039 erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
3040 BIF_RET(make_small(dirty_io));
3041 } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
3042 res = make_small(erts_no_run_queues);
3043 BIF_RET(res);
3044 } else if (ERTS_IS_ATOM_STR("port_parallelism", BIF_ARG_1)) {
3045 res = erts_port_parallelism ? am_true : am_false;
3046 BIF_RET(res);
3047 } else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
3048 Eterm *hp = NULL;
3049 Uint sz = 0;
3050 (void) c_compiler_used(NULL, &sz);
3051 if (sz)
3052 hp = HAlloc(BIF_P, sz);
3053 BIF_RET(c_compiler_used(&hp, NULL));
3054 } else if (ERTS_IS_ATOM_STR("stop_memory_trace", BIF_ARG_1)) {
3055 erts_mtrace_stop();
3056 BIF_RET(am_true);
3057 } else if (ERTS_IS_ATOM_STR("context_reductions", BIF_ARG_1)) {
3058 BIF_RET(make_small(CONTEXT_REDS));
3059 } else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
3060 #if ERTS_ENABLE_KERNEL_POLL
3061 BIF_RET(am_true);
3062 #else
3063 BIF_RET(am_false);
3064 #endif
3065 } else if (ERTS_IS_ATOM_STR("lock_checking", BIF_ARG_1)) {
3066 #ifdef ERTS_ENABLE_LOCK_CHECK
3067 BIF_RET(am_true);
3068 #else
3069 BIF_RET(am_false);
3070 #endif
3071 } else if (ERTS_IS_ATOM_STR("lock_counting", BIF_ARG_1)) {
3072 #ifdef ERTS_ENABLE_LOCK_COUNT
3073 BIF_RET(am_true);
3074 #else
3075 BIF_RET(am_false);
3076 #endif
3077 } else if (ERTS_IS_ATOM_STR("debug_compiled", BIF_ARG_1)) {
3078 #ifdef DEBUG
3079 BIF_RET(am_true);
3080 #else
3081 BIF_RET(am_false);
3082 #endif
3083 } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
3084 BIF_RET(erts_check_io_info(BIF_P));
3085 } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
3086 if (erts_no_schedulers == 1)
3087 BIF_RET(NIL);
3088 else
3089 BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0));
3090 } else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) {
3091 if (erts_no_schedulers == 1)
3092 BIF_RET(NIL);
3093 else
3094 BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1));
3095 } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
3096 BIF_RET(ERTS_USE_MODIFIED_TIMING()
3097 ? make_small(erts_modified_timing_level)
3098 : am_undefined);
3099 } else if (ERTS_IS_ATOM_STR("port_tasks", BIF_ARG_1)) {
3100 BIF_RET(am_true);
3101 } else if (ERTS_IS_ATOM_STR("io_thread", BIF_ARG_1)) {
3102 BIF_RET(am_false);
3103 } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
3104 BIF_RET(erts_sched_stat_term(BIF_P, 0));
3105 } else if (ERTS_IS_ATOM_STR("total_scheduling_statistics", BIF_ARG_1)) {
3106 BIF_RET(erts_sched_stat_term(BIF_P, 1));
3107 } else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
3108 BIF_RET(erts_nif_taints(BIF_P));
3109 } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
3110 BIF_RET(erts_get_reader_groups_map(BIF_P));
3111 } else if (ERTS_IS_ATOM_STR("decentralized_counter_groups_map", BIF_ARG_1)) {
3112 BIF_RET(erts_get_decentralized_counter_groups_map(BIF_P));
3113 } else if (ERTS_IS_ATOM_STR("dist_buf_busy_limit", BIF_ARG_1)) {
3114 Uint hsz = 0;
3115
3116 (void) erts_bld_uint(NULL, &hsz, erts_dist_buf_busy_limit);
3117 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3118 res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
3119 BIF_RET(res);
3120 } else if (ERTS_IS_ATOM_STR("delayed_node_table_gc", BIF_ARG_1)) {
3121 Uint hsz = 0;
3122 Uint dntgc = erts_delayed_node_table_gc();
3123 if (dntgc == ERTS_NODE_TAB_DELAY_GC_INFINITY)
3124 BIF_RET(am_infinity);
3125 (void) erts_bld_uint(NULL, &hsz, dntgc);
3126 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3127 res = erts_bld_uint(&hp, NULL, dntgc);
3128 BIF_RET(res);
3129 } else if (ERTS_IS_ATOM_STR("ethread_info", BIF_ARG_1)) {
3130 BIF_RET(erts_get_ethread_info(BIF_P));
3131 }
3132 else if (ERTS_IS_ATOM_STR("ethread_used_tse", BIF_ARG_1)) {
3133 Uint64 no = (Uint64) ethr_no_used_tse();
3134 Uint hsz = 0;
3135 erts_bld_uint64(NULL, &hsz, no);
3136 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3137 res = erts_bld_uint64(&hp, NULL, no);
3138 BIF_RET(res);
3139 }
3140 else if (ERTS_IS_ATOM_STR("emu_args", BIF_ARG_1)) {
3141 BIF_RET(erts_get_emu_args(BIF_P));
3142 }
3143 else if (ERTS_IS_ATOM_STR("beam_jump_table", BIF_ARG_1)) {
3144 BIF_RET(erts_beam_jump_table() ? am_true : am_false);
3145 }
3146 else if (ERTS_IS_ATOM_STR("dynamic_trace", BIF_ARG_1)) {
3147 #if defined(USE_DTRACE)
3148 DECL_AM(dtrace);
3149 BIF_RET(AM_dtrace);
3150 #elif defined(USE_SYSTEMTAP)
3151 DECL_AM(systemtap);
3152 BIF_RET(AM_systemtap);
3153 #elif defined(USE_LTTNG)
3154 DECL_AM(lttng);
3155 BIF_RET(AM_lttng);
3156 #else
3157 BIF_RET(am_none);
3158 #endif
3159 }
3160 else if (ERTS_IS_ATOM_STR("dynamic_trace_probes", BIF_ARG_1)) {
3161 #if defined(USE_VM_PROBES)
3162 BIF_RET(am_true);
3163 #else
3164 BIF_RET(am_false);
3165 #endif
3166 }
3167 else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
3168 erts_thr_progress_dbg_print_state();
3169 BIF_RET(am_true);
3170 }
3171 else if (BIF_ARG_1 == am_message_queue_data) {
3172 switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) {
3173 case SPO_OFF_HEAP_MSGQ:
3174 BIF_RET(am_off_heap);
3175 case SPO_ON_HEAP_MSGQ:
3176 BIF_RET(am_on_heap);
3177 default:
3178 ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
3179 BIF_RET(am_error);
3180 }
3181 }
3182 else if (ERTS_IS_ATOM_STR("compile_info",BIF_ARG_1)) {
3183 Uint sz;
3184 Eterm res = NIL, tup, text;
3185 Eterm *hp = HAlloc(BIF_P, 3*(2 + 3) + /* three 2-tuples and three cons */
3186 2*(sys_strlen(erts_build_flags_CONFIG_H) +
3187 sys_strlen(erts_build_flags_CFLAGS) +
3188 sys_strlen(erts_build_flags_LDFLAGS)));
3189
3190 sz = sys_strlen(erts_build_flags_CONFIG_H);
3191 text = buf_to_intlist(&hp, erts_build_flags_CONFIG_H, sz, NIL);
3192 tup = TUPLE2(hp, am_config_h, text); hp += 3;
3193 res = CONS(hp, tup, res); hp += 2;
3194
3195 sz = sys_strlen(erts_build_flags_CFLAGS);
3196 text = buf_to_intlist(&hp, erts_build_flags_CFLAGS, sz, NIL);
3197 tup = TUPLE2(hp, am_cflags, text); hp += 3;
3198 res = CONS(hp, tup, res); hp += 2;
3199
3200 sz = sys_strlen(erts_build_flags_LDFLAGS);
3201 text = buf_to_intlist(&hp, erts_build_flags_LDFLAGS, sz, NIL);
3202 tup = TUPLE2(hp, am_ldflags, text); hp += 3;
3203 res = CONS(hp, tup, res); hp += 2;
3204
3205 BIF_RET(res);
3206 }
3207 else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
3208 BIF_RET(make_small(erts_db_get_max_tabs()));
3209 }
3210 else if (ERTS_IS_ATOM_STR("ets_count",BIF_ARG_1)) {
3211 BIF_RET(make_small(erts_ets_table_count()));
3212 }
3213 else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
3214 BIF_RET(make_small(erts_get_atom_limit()));
3215 }
3216 else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
3217 BIF_RET(make_small(atom_table_size()));
3218 }
3219 else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
3220 if (erts_has_time_correction()
3221 && erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
3222 BIF_RET(am_enabled);
3223 }
3224 BIF_RET(am_disabled);
3225 }
3226 else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
3227 BIF_RET(am_true);
3228 }
3229 else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) {
3230 #ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
3231 #ifdef ARCH_64
3232 DECL_AM(range);
3233 BIF_RET(AM_range);
3234 #else /* ARCH_32 */
3235 DECL_AM(range_bitmask);
3236 BIF_RET(AM_range_bitmask);
3237 #endif /* ARCH_32 */
3238 #else /* ! ERTS_HAVE_IS_IN_LITERAL_RANGE */
3239 DECL_AM(tag);
3240 BIF_RET(AM_tag);
3241 #endif
3242 } else if (ERTS_IS_ATOM_STR("system_logger", BIF_ARG_1)) {
3243 BIF_RET(erts_get_system_logger());
3244 }
3245
3246 BIF_ERROR(BIF_P, BADARG);
3247 }
3248
monitor_size(ErtsMonitor * mon,void * vsz,Sint reds)3249 static int monitor_size(ErtsMonitor *mon, void *vsz, Sint reds)
3250 {
3251 *((Uint *) vsz) = erts_monitor_size(mon);
3252 return 1;
3253 }
3254
link_size(ErtsMonitor * lnk,void * vsz,Sint reds)3255 static int link_size(ErtsMonitor *lnk, void *vsz, Sint reds)
3256 {
3257 *((Uint *) vsz) = erts_link_size(lnk);
3258 return 1;
3259 }
3260
3261 /**********************************************************************/
3262 /* Return information on ports */
3263 /* Info:
3264 ** id Port index
3265 ** connected (Pid)
3266 ** links List of pids
3267 ** name String
3268 ** input Number of bytes input from port program
3269 ** output Number of bytes output to the port program
3270 ** os_pid The child's process ID
3271 */
3272
3273 Eterm
erts_bld_port_info(Eterm ** hpp,ErlOffHeap * ohp,Uint * szp,Port * prt,Eterm item)3274 erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
3275 Eterm item)
3276 {
3277 Eterm res = THE_NON_VALUE;
3278
3279 ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
3280
3281 if (item == am_id) {
3282 if (hpp)
3283 res = make_small(internal_port_index(prt->common.id));
3284 if (szp) {
3285 res = am_true;
3286 goto done;
3287 }
3288 }
3289 else if (item == am_links) {
3290 MonitorInfoCollection mic;
3291 int i;
3292 Eterm item;
3293
3294 INIT_MONITOR_INFOS(mic);
3295
3296 erts_link_tree_foreach(ERTS_P_LINKS(prt), collect_one_link, (void *) &mic);
3297
3298 if (szp)
3299 *szp += mic.sz;
3300
3301 if (hpp) {
3302 res = NIL;
3303 for (i = 0; i < mic.mi_i; i++) {
3304 item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
3305 res = CONS(*hpp, item, res);
3306 *hpp += 2;
3307 }
3308 }
3309
3310 DESTROY_MONITOR_INFOS(mic);
3311
3312 if (szp) {
3313 res = am_true;
3314 goto done;
3315 }
3316 }
3317 else if (item == am_monitors) {
3318 MonitorInfoCollection mic;
3319 int i;
3320
3321 INIT_MONITOR_INFOS(mic);
3322 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3323 collect_one_origin_monitor,
3324 (void *) &mic);
3325
3326 if (szp)
3327 *szp += mic.sz;
3328
3329 if (hpp) {
3330 res = NIL;
3331 for (i = 0; i < mic.mi_i; i++) {
3332 Eterm t;
3333
3334 ASSERT(mic.mi[i].type == ERTS_MON_TYPE_PORT);
3335 ASSERT(is_internal_pid(mic.mi[i].entity.term));
3336 t = TUPLE2(*hpp, am_process, mic.mi[i].entity.term);
3337 *hpp += 3;
3338 res = CONS(*hpp, t, res);
3339 *hpp += 2;
3340 }
3341 } // hpp
3342 DESTROY_MONITOR_INFOS(mic);
3343
3344 if (szp) {
3345 res = am_true;
3346 goto done;
3347 }
3348 }
3349 else if (item == am_monitored_by) {
3350 MonitorInfoCollection mic;
3351 int i;
3352 Eterm item;
3353
3354 INIT_MONITOR_INFOS(mic);
3355 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
3356 collect_one_target_monitor,
3357 (void *) &mic);
3358 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3359 collect_one_target_monitor,
3360 (void *) &mic);
3361 if (szp)
3362 *szp += mic.sz;
3363
3364 if (hpp) {
3365 res = NIL;
3366 for (i = 0; i < mic.mi_i; ++i) {
3367 ASSERT(mic.mi[i].type != ERTS_MON_TYPE_RESOURCE);
3368 item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
3369 res = CONS(*hpp, item, res);
3370 *hpp += 2;
3371 }
3372 } // hpp
3373 DESTROY_MONITOR_INFOS(mic);
3374
3375 if (szp) {
3376 res = am_true;
3377 goto done;
3378 }
3379 }
3380 else if (item == am_name) {
3381 int count = sys_strlen(prt->name);
3382
3383 if (hpp)
3384 res = buf_to_intlist(hpp, prt->name, count, NIL);
3385
3386 if (szp) {
3387 *szp += 2*count;
3388 res = am_true;
3389 goto done;
3390 }
3391 }
3392 else if (item == am_connected) {
3393 if (hpp)
3394 res = ERTS_PORT_GET_CONNECTED(prt); /* internal pid */
3395 if (szp) {
3396 res = am_true;
3397 goto done;
3398 }
3399 }
3400 else if (item == am_input) {
3401 res = erts_bld_uint(hpp, szp, prt->bytes_in);
3402 if (szp) {
3403 res = am_true;
3404 goto done;
3405 }
3406 }
3407 else if (item == am_output) {
3408 res = erts_bld_uint(hpp, szp, prt->bytes_out);
3409 if (szp) {
3410 res = am_true;
3411 goto done;
3412 }
3413 }
3414 else if (item == am_os_pid) {
3415 res = (prt->os_pid < 0
3416 ? am_undefined
3417 : erts_bld_uword(hpp, szp, (UWord) prt->os_pid));
3418 if (szp) {
3419 res = am_true;
3420 goto done;
3421 }
3422 }
3423 else if (item == am_registered_name) {
3424 RegProc *reg = prt->common.u.alive.reg;
3425 if (reg) {
3426 res = reg->name;
3427 if (szp) {
3428 res = am_true;
3429 goto done;
3430 }
3431 }
3432 else {
3433 if (szp)
3434 return am_undefined;
3435 return NIL;
3436 }
3437 }
3438 else if (item == am_memory) {
3439 /* All memory consumed in bytes (the Port struct should not be
3440 included though).
3441 */
3442 Uint size = 0;
3443
3444 erts_link_tree_foreach(ERTS_P_LINKS(prt),
3445 link_size, (void *) &size);
3446 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3447 monitor_size, (void *) &size);
3448 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
3449 monitor_size, (void *) &size);
3450
3451 size += erts_port_data_size(prt);
3452
3453 if (prt->linebuf)
3454 size += sizeof(LineBuf) + prt->linebuf->ovsiz;
3455
3456 /* ... */
3457
3458
3459 /* All memory allocated by the driver should be included, but it is
3460 hard to retrieve... */
3461
3462 res = erts_bld_uint(hpp, szp, size);
3463 if (szp) {
3464 res = am_true;
3465 goto done;
3466 }
3467 }
3468 else if (item == am_queue_size) {
3469 Uint ioq_size = erts_port_ioq_size(prt);
3470 res = erts_bld_uint(hpp, szp, ioq_size);
3471 if (szp) {
3472 res = am_true;
3473 goto done;
3474 }
3475 }
3476 else if (ERTS_IS_ATOM_STR("locking", item)) {
3477 if (hpp) {
3478 if (erts_atomic32_read_nob(&prt->state)
3479 & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
3480 DECL_AM(port_level);
3481 ASSERT(prt->drv_ptr->flags
3482 & ERL_DRV_FLAG_USE_PORT_LOCKING);
3483 res = AM_port_level;
3484 }
3485 else {
3486 DECL_AM(driver_level);
3487 ASSERT(!(prt->drv_ptr->flags
3488 & ERL_DRV_FLAG_USE_PORT_LOCKING));
3489 res = AM_driver_level;
3490 }
3491 }
3492 if (szp) {
3493 res = am_true;
3494 goto done;
3495 }
3496 }
3497 else if (item == am_parallelism) {
3498 if (szp) {
3499 res = am_true;
3500 goto done;
3501 }
3502 res = ((ERTS_PTS_FLG_PARALLELISM &
3503 erts_atomic32_read_nob(&prt->sched.flags))
3504 ? am_true
3505 : am_false);
3506 }
3507 else {
3508 if (szp)
3509 return am_false;
3510 return THE_NON_VALUE;
3511 }
3512
3513 done:
3514 if (szp)
3515 *szp += 3;
3516 if (hpp) {
3517 res = TUPLE2(*hpp, item, res);
3518 *hpp += 3;
3519 }
3520
3521 return res;
3522 }
3523
3524 BIF_RETTYPE
fun_info_2(BIF_ALIST_2)3525 fun_info_2(BIF_ALIST_2)
3526 {
3527 Process* p = BIF_P;
3528 Eterm fun = BIF_ARG_1;
3529 Eterm what = BIF_ARG_2;
3530 Eterm* hp;
3531 Eterm val;
3532
3533 if (is_fun(fun)) {
3534 ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
3535
3536 switch (what) {
3537 case am_type:
3538 hp = HAlloc(p, 3);
3539 val = am_local;
3540 break;
3541 case am_pid:
3542 hp = HAlloc(p, 3);
3543 val = funp->creator;
3544 break;
3545 case am_module:
3546 hp = HAlloc(p, 3);
3547 val = funp->fe->module;
3548 break;
3549 case am_new_index:
3550 hp = HAlloc(p, 3);
3551 val = make_small(funp->fe->index);
3552 break;
3553 case am_new_uniq:
3554 val = new_binary(p, funp->fe->uniq, 16);
3555 hp = HAlloc(p, 3);
3556 break;
3557 case am_index:
3558 hp = HAlloc(p, 3);
3559 val = make_small(funp->fe->old_index);
3560 break;
3561 case am_uniq:
3562 hp = HAlloc(p, 3);
3563 val = make_small(funp->fe->old_uniq);
3564 break;
3565 case am_env:
3566 {
3567 Uint num_free = funp->num_free;
3568 int i;
3569
3570 hp = HAlloc(p, 3 + 2*num_free);
3571 val = NIL;
3572 for (i = num_free-1; i >= 0; i--) {
3573 val = CONS(hp, funp->env[i], val);
3574 hp += 2;
3575 }
3576 }
3577 break;
3578 case am_refc:
3579 val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p);
3580 hp = HAlloc(p, 3);
3581 break;
3582 case am_arity:
3583 hp = HAlloc(p, 3);
3584 val = make_small(funp->arity);
3585 break;
3586 case am_name:
3587 {
3588 const ErtsCodeMFA *mfa = erts_code_to_codemfa((funp->fe)->address);
3589 hp = HAlloc(p, 3);
3590 val = mfa->function;
3591 }
3592 break;
3593 default:
3594 goto error;
3595 }
3596 } else if (is_export(fun)) {
3597 Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
3598 switch (what) {
3599 case am_type:
3600 hp = HAlloc(p, 3);
3601 val = am_external;
3602 break;
3603 case am_pid:
3604 hp = HAlloc(p, 3);
3605 val = am_undefined;
3606 break;
3607 case am_module:
3608 hp = HAlloc(p, 3);
3609 val = exp->info.mfa.module;
3610 break;
3611 case am_new_index:
3612 hp = HAlloc(p, 3);
3613 val = am_undefined;
3614 break;
3615 case am_new_uniq:
3616 hp = HAlloc(p, 3);
3617 val = am_undefined;
3618 break;
3619 case am_index:
3620 hp = HAlloc(p, 3);
3621 val = am_undefined;
3622 break;
3623 case am_uniq:
3624 hp = HAlloc(p, 3);
3625 val = am_undefined;
3626 break;
3627 case am_env:
3628 hp = HAlloc(p, 3);
3629 val = NIL;
3630 break;
3631 case am_refc:
3632 hp = HAlloc(p, 3);
3633 val = am_undefined;
3634 break;
3635 case am_arity:
3636 hp = HAlloc(p, 3);
3637 val = make_small(exp->info.mfa.arity);
3638 break;
3639 case am_name:
3640 hp = HAlloc(p, 3);
3641 val = exp->info.mfa.function;
3642 break;
3643 default:
3644 goto error;
3645 }
3646 } else {
3647 error:
3648 BIF_ERROR(p, BADARG);
3649 }
3650 return TUPLE2(hp, what, val);
3651 }
3652
3653 BIF_RETTYPE
fun_info_mfa_1(BIF_ALIST_1)3654 fun_info_mfa_1(BIF_ALIST_1)
3655 {
3656 Process* p = BIF_P;
3657 Eterm fun = BIF_ARG_1;
3658 Eterm* hp;
3659
3660 if (is_fun(fun)) {
3661 const ErtsCodeMFA *mfa;
3662 ErlFunThing* funp;
3663 funp = (ErlFunThing *) fun_val(fun);
3664 mfa = erts_code_to_codemfa((funp->fe)->address);
3665
3666 hp = HAlloc(p, 4);
3667 BIF_RET(TUPLE3(hp,
3668 (funp->fe)->module,
3669 mfa->function,
3670 make_small(funp->arity)));
3671 } else if (is_export(fun)) {
3672 Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
3673 hp = HAlloc(p, 4);
3674 BIF_RET(TUPLE3(hp,exp->info.mfa.module,
3675 exp->info.mfa.function,
3676 make_small(exp->info.mfa.arity)));
3677 }
3678 BIF_ERROR(p, BADARG);
3679 }
3680
erts_internal_is_process_alive_2(BIF_ALIST_2)3681 BIF_RETTYPE erts_internal_is_process_alive_2(BIF_ALIST_2)
3682 {
3683 if (!is_internal_pid(BIF_ARG_1) || !is_internal_ordinary_ref(BIF_ARG_2))
3684 BIF_ERROR(BIF_P, BADARG);
3685 if (!erts_proc_sig_send_is_alive_request(BIF_P, BIF_ARG_1, BIF_ARG_2)) {
3686 if (ERTS_PROC_HAS_INCOMING_SIGNALS(BIF_P))
3687 ERTS_BIF_HANDLE_SIGNALS_RETURN(BIF_P, am_ok);
3688 }
3689 BIF_RET(am_ok);
3690 }
3691
is_process_alive_1(BIF_ALIST_1)3692 BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
3693 {
3694 if (is_internal_pid(BIF_ARG_1)) {
3695 erts_aint32_t state;
3696 Process *rp;
3697
3698 if (BIF_ARG_1 == BIF_P->common.id)
3699 BIF_RET(am_true);
3700
3701 rp = erts_proc_lookup_raw(BIF_ARG_1);
3702 if (!rp)
3703 BIF_RET(am_false);
3704
3705 state = erts_atomic32_read_acqb(&rp->state);
3706 if (state & (ERTS_PSFLG_EXITING
3707 | ERTS_PSFLG_SIG_Q
3708 | ERTS_PSFLG_SIG_IN_Q)) {
3709 /*
3710 * If in exiting state, trap out and send 'is alive'
3711 * request and wait for it to complete termination.
3712 *
3713 * If process has signals enqueued, we need to
3714 * send it an 'is alive' request via its signal
3715 * queue in order to ensure that signal order is
3716 * preserved (we may earlier have sent it an
3717 * exit signal that has not been processed yet).
3718 */
3719 BIF_TRAP1(is_process_alive_trap, BIF_P, BIF_ARG_1);
3720 }
3721
3722 BIF_RET(am_true);
3723 }
3724
3725 if (is_external_pid(BIF_ARG_1)) {
3726 if (external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
3727 BIF_RET(am_false); /* A pid from an old incarnation of this node */
3728 }
3729
3730 BIF_ERROR(BIF_P, BADARG);
3731 }
3732
3733 static Eterm
process_display(Process * c_p,void * arg,int * redsp,ErlHeapFragment ** bpp)3734 process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
3735 {
3736 if (redsp)
3737 *redsp = 1;
3738
3739 if (ERTS_PROC_IS_EXITING(c_p))
3740 return am_badarg;
3741
3742 erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
3743 erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p);
3744 erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
3745
3746 return am_true;
3747 }
3748
3749
erts_internal_process_display_2(BIF_ALIST_2)3750 BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2)
3751 {
3752 Eterm res;
3753
3754 if (BIF_ARG_2 != am_backtrace)
3755 BIF_RET(am_badopt);
3756
3757 if (BIF_P->common.id == BIF_ARG_1) {
3758 res = process_display(BIF_P, NULL, NULL, NULL);
3759 BIF_RET(res);
3760 }
3761
3762 if (is_not_internal_pid(BIF_ARG_1))
3763 BIF_RET(am_badarg);
3764
3765 res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
3766 !0,
3767 process_display,
3768 NULL);
3769 if (is_non_value(res))
3770 BIF_RET(am_badarg);
3771
3772 BIF_RET(res);
3773 }
3774
3775 /* this is a general call which return some possibly useful information */
3776
statistics_1(BIF_ALIST_1)3777 BIF_RETTYPE statistics_1(BIF_ALIST_1)
3778 {
3779 Eterm res;
3780 Eterm* hp;
3781
3782 if (BIF_ARG_1 == am_scheduler_wall_time) {
3783 res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
3784 if (is_non_value(res))
3785 BIF_RET(am_undefined);
3786 BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
3787 } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
3788 res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
3789 if (is_non_value(res))
3790 BIF_RET(am_undefined);
3791 BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
3792 } else if ((BIF_ARG_1 == am_total_active_tasks)
3793 | (BIF_ARG_1 == am_total_run_queue_lengths)
3794 | (BIF_ARG_1 == am_total_active_tasks_all)
3795 | (BIF_ARG_1 == am_total_run_queue_lengths_all)) {
3796 Uint no = erts_run_queues_len(NULL, 0,
3797 ((BIF_ARG_1 == am_total_active_tasks)
3798 | (BIF_ARG_1 == am_total_active_tasks_all)),
3799 ((BIF_ARG_1 == am_total_active_tasks_all)
3800 | (BIF_ARG_1 == am_total_run_queue_lengths_all)));
3801 if (IS_USMALL(0, no))
3802 res = make_small(no);
3803 else {
3804 Eterm *hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
3805 res = uint_to_big(no, hp);
3806 }
3807 BIF_RET(res);
3808 } else if ((BIF_ARG_1 == am_active_tasks)
3809 | (BIF_ARG_1 == am_run_queue_lengths)
3810 | (BIF_ARG_1 == am_active_tasks_all)
3811 | (BIF_ARG_1 == am_run_queue_lengths_all)) {
3812 Eterm res, *hp, **hpp;
3813 Uint sz, *szp;
3814 int incl_dirty_io = ((BIF_ARG_1 == am_active_tasks_all)
3815 | (BIF_ARG_1 == am_run_queue_lengths_all));
3816 int no_qs = (erts_no_run_queues + ERTS_NUM_DIRTY_CPU_RUNQS +
3817 (incl_dirty_io ? ERTS_NUM_DIRTY_IO_RUNQS : 0));
3818 Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
3819 (void) erts_run_queues_len(qszs, 0,
3820 ((BIF_ARG_1 == am_active_tasks)
3821 | (BIF_ARG_1 == am_active_tasks_all)),
3822 incl_dirty_io);
3823 sz = 0;
3824 szp = &sz;
3825 hpp = NULL;
3826 while (1) {
3827 int i;
3828 for (i = 0; i < no_qs; i++)
3829 qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
3830 res = erts_bld_list(hpp, szp, no_qs, &qszs[no_qs]);
3831 if (hpp) {
3832 erts_free(ERTS_ALC_T_TMP, qszs);
3833 BIF_RET(res);
3834 }
3835 hp = HAlloc(BIF_P, sz);
3836 szp = NULL;
3837 hpp = &hp;
3838 }
3839 #ifdef ERTS_ENABLE_MSACC
3840 } else if (BIF_ARG_1 == am_microstate_accounting) {
3841 Eterm threads;
3842 res = erts_msacc_request(BIF_P, ERTS_MSACC_GATHER, &threads);
3843 if (is_non_value(res))
3844 BIF_RET(am_undefined);
3845 BIF_TRAP2(gather_msacc_res_trap, BIF_P, res, threads);
3846 #endif
3847 } else if (BIF_ARG_1 == am_context_switches) {
3848 Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
3849 hp = HAlloc(BIF_P, 3);
3850 res = TUPLE2(hp, cs, SMALL_ZERO);
3851 BIF_RET(res);
3852 } else if (BIF_ARG_1 == am_garbage_collection) {
3853 res = erts_gc_info_request(BIF_P);
3854 if (is_non_value(res))
3855 BIF_RET(am_undefined);
3856 BIF_TRAP1(gather_gc_info_res_trap, BIF_P, res);
3857 } else if (BIF_ARG_1 == am_reductions) {
3858 Uint reds;
3859 Uint diff;
3860 Uint hsz = 3;
3861 Eterm b1, b2;
3862
3863 erts_get_total_reductions(&reds, &diff);
3864 (void) erts_bld_uint(NULL, &hsz, reds);
3865 (void) erts_bld_uint(NULL, &hsz, diff);
3866 hp = HAlloc(BIF_P, hsz);
3867 b1 = erts_bld_uint(&hp, NULL, reds);
3868 b2 = erts_bld_uint(&hp, NULL, diff);
3869 res = TUPLE2(hp, b1, b2);
3870 BIF_RET(res);
3871 } else if (BIF_ARG_1 == am_exact_reductions) {
3872 Uint reds;
3873 Uint diff;
3874 Uint hsz = 3;
3875 Eterm b1, b2;
3876
3877 erts_get_exact_total_reductions(BIF_P, &reds, &diff);
3878 (void) erts_bld_uint(NULL, &hsz, reds);
3879 (void) erts_bld_uint(NULL, &hsz, diff);
3880 hp = HAlloc(BIF_P, hsz);
3881 b1 = erts_bld_uint(&hp, NULL, reds);
3882 b2 = erts_bld_uint(&hp, NULL, diff);
3883 res = TUPLE2(hp, b1, b2);
3884 BIF_RET(res);
3885 } else if (BIF_ARG_1 == am_runtime) {
3886 ErtsMonotonicTime u1, u2;
3887 Eterm b1, b2;
3888 Uint hsz;
3889 erts_runtime_elapsed_both(&u1, NULL, &u2, NULL);
3890 hsz = 3; /* 2-tuple */
3891 (void) erts_bld_monotonic_time(NULL, &hsz, u1);
3892 (void) erts_bld_monotonic_time(NULL, &hsz, u2);
3893 hp = HAlloc(BIF_P, hsz);
3894 b1 = erts_bld_monotonic_time(&hp, NULL, u1);
3895 b2 = erts_bld_monotonic_time(&hp, NULL, u2);
3896 res = TUPLE2(hp, b1, b2);
3897 BIF_RET(res);
3898 } else if (BIF_ARG_1 == am_run_queue) {
3899 res = erts_run_queues_len(NULL, 1, 0, 0);
3900 BIF_RET(make_small(res));
3901 } else if (BIF_ARG_1 == am_wall_clock) {
3902 ErtsMonotonicTime w1, w2;
3903 Eterm b1, b2;
3904 Uint hsz;
3905 erts_wall_clock_elapsed_both(&w1, &w2);
3906 hsz = 3; /* 2-tuple */
3907 (void) erts_bld_monotonic_time(NULL, &hsz, w1);
3908 (void) erts_bld_monotonic_time(NULL, &hsz, w2);
3909 hp = HAlloc(BIF_P, hsz);
3910 b1 = erts_bld_monotonic_time(&hp, NULL, w1);
3911 b2 = erts_bld_monotonic_time(&hp, NULL, w2);
3912 res = TUPLE2(hp, b1, b2);
3913 BIF_RET(res);
3914 } else if (BIF_ARG_1 == am_io) {
3915 Eterm ref = erts_request_io_bytes(BIF_P);
3916 BIF_TRAP2(gather_io_bytes_trap, BIF_P, ref, make_small(erts_no_schedulers));
3917 }
3918 else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
3919 Eterm res, *hp, **hpp;
3920 Uint sz, *szp;
3921 int no_qs = erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS;
3922 Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
3923 (void) erts_run_queues_len(qszs, 0, 0, 1);
3924 sz = 0;
3925 szp = &sz;
3926 hpp = NULL;
3927 while (1) {
3928 int i;
3929 for (i = 0; i < no_qs; i++)
3930 qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
3931 res = erts_bld_tuplev(hpp, szp, no_qs, &qszs[no_qs]);
3932 if (hpp) {
3933 erts_free(ERTS_ALC_T_TMP, qszs);
3934 BIF_RET(res);
3935 }
3936 hp = HAlloc(BIF_P, sz);
3937 szp = NULL;
3938 hpp = &hp;
3939 }
3940 }
3941 BIF_ERROR(BIF_P, BADARG);
3942 }
3943
error_logger_warning_map_0(BIF_ALIST_0)3944 BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
3945 {
3946 BIF_RET(erts_error_logger_warnings);
3947 }
3948
3949 static erts_atomic_t available_internal_state;
3950
empty_magic_ref_destructor(Binary * bin)3951 static int empty_magic_ref_destructor(Binary *bin)
3952 {
3953 return 1;
3954 }
3955
erts_debug_get_internal_state_1(BIF_ALIST_1)3956 BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
3957 {
3958 /*
3959 * NOTE: Only supposed to be used for testing, and debugging.
3960 */
3961
3962 if (!erts_atomic_read_nob(&available_internal_state)) {
3963 BIF_ERROR(BIF_P, EXC_UNDEF);
3964 }
3965
3966 if (is_atom(BIF_ARG_1)) {
3967 if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
3968 /* Used by (emulator) */
3969 BIF_RET(make_small((Uint) ERTS_BIF_REDS_LEFT(BIF_P)));
3970 }
3971 else if (ERTS_IS_ATOM_STR("node_and_dist_references", BIF_ARG_1)) {
3972 /* Used by node_container_SUITE (emulator) */
3973 BIF_TRAP1(get_internal_state_blocked, BIF_P, BIF_ARG_1);
3974 }
3975 else if (ERTS_IS_ATOM_STR("monitoring_nodes", BIF_ARG_1)) {
3976 BIF_RET(erts_processes_monitoring_nodes(BIF_P));
3977 }
3978 else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
3979 || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
3980 /* Used by node_container_SUITE (emulator) */
3981 Sint res;
3982 if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
3983 res = erts_ptab_test_next_id(&erts_proc, 0, 0);
3984 else
3985 res = erts_ptab_test_next_id(&erts_port, 0, 0);
3986 if (res < 0)
3987 BIF_RET(am_false);
3988 BIF_RET(erts_make_integer(res, BIF_P));
3989 }
3990 else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
3991 /* Used by ets_SUITE (stdlib) */
3992 size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
3993 Eterm* hp = HAlloc(BIF_P ,3);
3994 BIF_RET(TUPLE2(hp, make_small((Uint) words),
3995 erts_ets_hash_sizeof_ext_segtab()));
3996 }
3997 else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
3998 /* Used by driver_SUITE (emulator) */
3999 Uint sz, *szp;
4000 Eterm res, *hp, **hpp;
4001 int no_errors;
4002 ErtsCheckIoDebugInfo ciodi = {0};
4003 #ifdef HAVE_ERTS_CHECK_IO_DEBUG
4004 erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
4005 no_errors = erts_check_io_debug(&ciodi);
4006 erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
4007 #else
4008 no_errors = 0;
4009 #endif
4010 sz = 0;
4011 szp = &sz;
4012 hpp = NULL;
4013 while (1) {
4014 res = erts_bld_tuple(hpp, szp, 4,
4015 erts_bld_uint(hpp, szp,
4016 (Uint) no_errors),
4017 erts_bld_uint(hpp, szp,
4018 (Uint) ciodi.no_used_fds),
4019 erts_bld_uint(hpp, szp,
4020 (Uint) ciodi.no_driver_select_structs),
4021 erts_bld_uint(hpp, szp,
4022 (Uint) ciodi.no_enif_select_structs));
4023 if (hpp)
4024 break;
4025 hp = HAlloc(BIF_P, sz);
4026 szp = NULL;
4027 hpp = &hp;
4028 }
4029 BIF_RET(res);
4030 }
4031 else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
4032 /* Used by process_SUITE (emulator) */
4033 int i;
4034 Eterm res = NIL;
4035 Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
4036 for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
4037 res = CONS(hp, pi_args[i].name, res);
4038 hp += 2;
4039 }
4040 BIF_RET(res);
4041 }
4042 else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
4043 /* Used by process_SUITE (emulator) */
4044 BIF_RET(erts_debug_ptab_list(BIF_P, &erts_proc));
4045 }
4046 else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
4047 /* Used by process_SUITE (emulator) */
4048 BIF_RET(erts_debug_ptab_list_bif_info(BIF_P, &erts_proc));
4049 }
4050 else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
4051 /* Used by distribution_SUITE (emulator) */
4052 BIF_RET(make_small((Uint) erts_debug_max_atom_out_cache_index()));
4053 }
4054 else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
4055 Uint n;
4056 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4057 n = erts_debug_nbalance();
4058 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4059 BIF_RET(erts_make_integer(n, BIF_P));
4060 }
4061 else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
4062 BIF_RET(am_true);
4063 }
4064 else if (ERTS_IS_ATOM_STR("force_heap_frags", BIF_ARG_1)) {
4065 #ifdef FORCE_HEAP_FRAGS
4066 BIF_RET(am_true);
4067 #else
4068 BIF_RET(am_false);
4069 #endif
4070 }
4071 else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
4072 Eterm res;
4073 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4074 erts_thr_progress_block();
4075 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4076 res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
4077 erts_thr_progress_unblock();
4078 BIF_RET(res);
4079 }
4080 else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
4081 BIF_RET(erts_mmap_debug_info(BIF_P));
4082 }
4083 else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
4084 BIF_RET(erts_debug_get_unique_monotonic_integer_state(BIF_P));
4085 }
4086 else if (ERTS_IS_ATOM_STR("min_unique_monotonic_integer", BIF_ARG_1)) {
4087 Sint64 value = erts_get_min_unique_monotonic_integer();
4088 if (IS_SSMALL(value))
4089 BIF_RET(make_small(value));
4090 else {
4091 Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
4092 Eterm *hp = HAlloc(BIF_P, hsz);
4093 BIF_RET(erts_sint64_to_big(value, &hp));
4094 }
4095 }
4096 else if (ERTS_IS_ATOM_STR("min_unique_integer", BIF_ARG_1)) {
4097 Sint64 value = erts_get_min_unique_integer();
4098 if (IS_SSMALL(value))
4099 BIF_RET(make_small(value));
4100 else {
4101 Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
4102 Eterm *hp = HAlloc(BIF_P, hsz);
4103 BIF_RET(erts_sint64_to_big(value, &hp));
4104 }
4105 }
4106 else if (ERTS_IS_ATOM_STR("stack_check", BIF_ARG_1)) {
4107 UWord size;
4108 char c;
4109 if (erts_is_above_stack_limit(&c))
4110 size = erts_check_stack_recursion_downwards(&c, &c);
4111 else
4112 size = erts_check_stack_recursion_upwards(&c, &c);
4113 if (IS_SSMALL(size))
4114 BIF_RET(make_small(size));
4115 else {
4116 Uint hsz = BIG_UWORD_HEAP_SIZE(size);
4117 Eterm *hp = HAlloc(BIF_P, hsz);
4118 BIF_RET(uword_to_big(size, hp));
4119 }
4120 } else if (ERTS_IS_ATOM_STR("scheduler_dump", BIF_ARG_1)) {
4121 #if defined(ERTS_HAVE_TRY_CATCH) && defined(ERTS_SYS_SUSPEND_SIGNAL)
4122 BIF_RET(am_true);
4123 #else
4124 BIF_RET(am_false);
4125 #endif
4126 }
4127 else if (ERTS_IS_ATOM_STR("lc_graph", BIF_ARG_1)) {
4128 #ifdef ERTS_ENABLE_LOCK_CHECK
4129 Eterm res = erts_lc_dump_graph();
4130 BIF_RET(res);
4131 #else
4132 BIF_RET(am_notsup);
4133 #endif
4134 }
4135 else if (ERTS_IS_ATOM_STR("flxctr_memory_usage", BIF_ARG_1)) {
4136 Sint mem = erts_flxctr_debug_memory_usage();
4137 if (mem == -1) {
4138 BIF_RET(am_notsup);
4139 } else {
4140 Uint hsz = BIG_UWORD_HEAP_SIZE((UWord)mem);
4141 Eterm *hp = HAlloc(BIF_P, hsz);
4142 BIF_RET(uword_to_big((UWord)mem, hp));
4143 }
4144 }
4145 else if (ERTS_IS_ATOM_STR("persistent_term", BIF_ARG_1)) {
4146 BIF_RET(erts_debug_persistent_term_xtra_info(BIF_P));
4147 }
4148 }
4149 else if (is_tuple(BIF_ARG_1)) {
4150 Eterm* tp = tuple_val(BIF_ARG_1);
4151 switch (arityval(tp[0])) {
4152 case 2: {
4153 if (ERTS_IS_ATOM_STR("node_and_dist_references", tp[1])) {
4154 if (tp[2] == am_blocked
4155 && erts_is_multi_scheduling_blocked() > 0) {
4156 Eterm res = erts_get_node_and_dist_references(BIF_P);
4157 BIF_RET(res);
4158 }
4159 }
4160 else if (ERTS_IS_ATOM_STR("process_status", tp[1])) {
4161 /* Used by timer process_SUITE, timer_bif_SUITE, and
4162 node_container_SUITE (emulator) */
4163 if (is_internal_pid(tp[2])) {
4164 BIF_RET(erts_process_status(NULL, tp[2]));
4165 }
4166 }
4167 else if (ERTS_IS_ATOM_STR("connection_id", tp[1])) {
4168 DistEntry *dep;
4169 Eterm *hp, res;
4170 Uint con_id, hsz = 0;
4171 if (!is_atom(tp[2]))
4172 BIF_ERROR(BIF_P, BADARG);
4173 dep = erts_sysname_to_connected_dist_entry(tp[2]);
4174 if (!dep)
4175 BIF_ERROR(BIF_P, BADARG);
4176 erts_de_rlock(dep);
4177 con_id = (Uint) dep->connection_id;
4178 erts_de_runlock(dep);
4179 (void) erts_bld_uint(NULL, &hsz, con_id);
4180 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
4181 res = erts_bld_uint(&hp, NULL, con_id);
4182 BIF_RET(res);
4183 }
4184 else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
4185 /* Used by erl_link_SUITE (emulator) */
4186 if(is_internal_pid(tp[2])) {
4187 erts_aint32_t state;
4188 Eterm res;
4189 Process *p;
4190 int sigs_done;
4191
4192 p = erts_pid2proc(BIF_P,
4193 ERTS_PROC_LOCK_MAIN,
4194 tp[2],
4195 ERTS_PROC_LOCK_MAIN);
4196 if (!p) {
4197 ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
4198 BIF_RET(am_undefined);
4199 }
4200
4201 erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
4202 erts_proc_sig_fetch(p);
4203 erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
4204 do {
4205 int reds = CONTEXT_REDS;
4206 sigs_done = erts_proc_sig_handle_incoming(p,
4207 &state,
4208 &reds,
4209 CONTEXT_REDS,
4210 !0);
4211 } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
4212
4213 if (!(state & ERTS_PSFLG_EXITING))
4214 res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
4215 else if (BIF_P == p)
4216 ERTS_BIF_EXITED(BIF_P);
4217 else
4218 res = am_undefined;
4219 if (BIF_P != p)
4220 erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
4221 BIF_RET(res);
4222 }
4223 else if(is_internal_port(tp[2])) {
4224 Eterm res;
4225 Port *p = erts_id2port_sflgs(tp[2],
4226 BIF_P,
4227 ERTS_PROC_LOCK_MAIN,
4228 ERTS_PORT_SFLGS_INVALID_LOOKUP);
4229 if(!p)
4230 BIF_RET(am_undefined);
4231 res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
4232 erts_port_release(p);
4233 BIF_RET(res);
4234 }
4235 else if(is_node_name_atom(tp[2])) {
4236 DistEntry *dep = erts_find_dist_entry(tp[2]);
4237 if(dep) {
4238 Eterm res = NIL;
4239 if (dep->mld) {
4240 erts_mtx_lock(&dep->mld->mtx);
4241 res = make_link_list(BIF_P, 0, dep->mld->links, NIL);
4242 erts_mtx_unlock(&dep->mld->mtx);
4243 }
4244 BIF_RET(res);
4245 } else {
4246 BIF_RET(am_undefined);
4247 }
4248 }
4249 }
4250 else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
4251 /* Used by erl_link_SUITE (emulator) */
4252 if(is_internal_pid(tp[2])) {
4253 erts_aint32_t state;
4254 Process *p;
4255 Eterm res;
4256 int sigs_done;
4257
4258 p = erts_pid2proc(BIF_P,
4259 ERTS_PROC_LOCK_MAIN,
4260 tp[2],
4261 ERTS_PROC_LOCK_MAIN);
4262 if (!p) {
4263 ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
4264 BIF_RET(am_undefined);
4265 }
4266
4267 erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
4268 erts_proc_sig_fetch(p);
4269 erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
4270 do {
4271 int reds = CONTEXT_REDS;
4272 sigs_done = erts_proc_sig_handle_incoming(p,
4273 &state,
4274 &reds,
4275 CONTEXT_REDS,
4276 !0);
4277 } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
4278
4279 if (!(state & ERTS_PSFLG_EXITING)) {
4280 res = make_monitor_list(BIF_P, 1, ERTS_P_MONITORS(p), NIL);
4281 res = make_monitor_list(BIF_P, 0, ERTS_P_LT_MONITORS(p), res);
4282 }
4283 else {
4284 if (BIF_P == p)
4285 ERTS_BIF_EXITED(BIF_P);
4286 else
4287 res = am_undefined;
4288 }
4289 if (BIF_P != p)
4290 erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
4291 BIF_RET(res);
4292 } else if(is_node_name_atom(tp[2])) {
4293 DistEntry *dep = erts_find_dist_entry(tp[2]);
4294 if(dep) {
4295 Eterm ml = NIL;
4296 if (dep->mld) {
4297 erts_mtx_lock(&dep->mld->mtx);
4298 ml = make_monitor_list(BIF_P, 1, dep->mld->orig_name_monitors, NIL);
4299 ml = make_monitor_list(BIF_P, 0, dep->mld->monitors, ml);
4300 erts_mtx_unlock(&dep->mld->mtx);
4301 }
4302 BIF_RET(ml);
4303 } else {
4304 BIF_RET(am_undefined);
4305 }
4306 }
4307 }
4308 else if (ERTS_IS_ATOM_STR("channel_number", tp[1])) {
4309 Eterm res;
4310 DistEntry *dep = erts_find_dist_entry(tp[2]);
4311 if (!dep)
4312 res = am_undefined;
4313 else {
4314 Uint cno = dist_entry_channel_no(dep);
4315 res = make_small(cno);
4316 }
4317 BIF_RET(res);
4318 }
4319 else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
4320 Eterm bin = tp[2];
4321 if (is_binary(bin)) {
4322 Eterm real_bin = bin;
4323 Eterm res = am_true;
4324 ErlSubBin* sb = (ErlSubBin *) binary_val(real_bin);
4325
4326 if (sb->thing_word == HEADER_SUB_BIN) {
4327 real_bin = sb->orig;
4328 }
4329 if (*binary_val(real_bin) == HEADER_PROC_BIN) {
4330 ProcBin* pb;
4331 Binary* val;
4332 Eterm SzTerm;
4333 Uint hsz = 3 + 5;
4334 Eterm* hp;
4335 DECL_AM(refc_binary);
4336
4337 pb = (ProcBin *) binary_val(real_bin);
4338 val = pb->val;
4339 (void) erts_bld_uint(NULL, &hsz, pb->size);
4340 (void) erts_bld_uint(NULL, &hsz, val->orig_size);
4341 hp = HAlloc(BIF_P, hsz);
4342
4343 /* Info about the Binary* object */
4344 SzTerm = erts_bld_uint(&hp, NULL, val->orig_size);
4345 res = TUPLE2(hp, am_binary, SzTerm);
4346 hp += 3;
4347
4348 /* Info about the ProcBin* object */
4349 SzTerm = erts_bld_uint(&hp, NULL, pb->size);
4350 res = TUPLE4(hp, AM_refc_binary, SzTerm,
4351 res, make_small(pb->flags));
4352 } else { /* heap binary */
4353 DECL_AM(heap_binary);
4354 res = AM_heap_binary;
4355 }
4356 BIF_RET(res);
4357 }
4358 }
4359 else if (ERTS_IS_ATOM_STR("term_to_binary_tuple_fallbacks", tp[1])) {
4360 Uint64 dflags = (TERM_TO_BINARY_DFLAGS
4361 & ~DFLAG_EXPORT_PTR_TAG
4362 & ~DFLAG_BIT_BINARIES);
4363 Eterm res = erts_term_to_binary(BIF_P, tp[2], 0, dflags);
4364 if (is_value(res))
4365 BIF_RET(res);
4366 BIF_ERROR(BIF_P, SYSTEM_LIMIT);
4367 }
4368 else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) {
4369 Eterm res = am_undefined;
4370 DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
4371 if (dep) {
4372 erts_de_rlock(dep);
4373 if (is_internal_port(dep->cid) || is_internal_pid(dep->cid))
4374 res = dep->cid;
4375 erts_de_runlock(dep);
4376 }
4377 BIF_RET(res);
4378 }
4379 else if (ERTS_IS_ATOM_STR("atom_out_cache_index", tp[1])) {
4380 /* Used by distribution_SUITE (emulator) */
4381 if (is_atom(tp[2])) {
4382 BIF_RET(make_small(
4383 (Uint)
4384 erts_debug_atom_to_out_cache_index(tp[2])));
4385 }
4386 }
4387 else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
4388 return erts_fake_scheduler_bindings(BIF_P, tp[2]);
4389 }
4390 else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
4391 Sint groups;
4392 if (is_not_small(tp[2]))
4393 BIF_ERROR(BIF_P, BADARG);
4394 groups = signed_val(tp[2]);
4395 if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
4396 BIF_ERROR(BIF_P, BADARG);
4397
4398 BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
4399 }
4400 else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
4401 Uint hash = (Uint) make_internal_hash(tp[2], 0);
4402 Uint hsz = 0;
4403 Eterm* hp;
4404 erts_bld_uint(NULL, &hsz, hash);
4405 hp = HAlloc(BIF_P,hsz);
4406 return erts_bld_uint(&hp, NULL, hash);
4407 }
4408 else if (ERTS_IS_ATOM_STR("atom", tp[1])) {
4409 Uint ix;
4410 if (!term_to_Uint(tp[2], &ix))
4411 BIF_ERROR(BIF_P, BADARG);
4412 while (ix >= atom_table_size()) {
4413 char tmp[20];
4414 erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
4415 erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
4416 }
4417 return make_atom(ix);
4418 }
4419 else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
4420 Binary *bin;
4421 UWord bin_addr, refc;
4422 Eterm bin_addr_term, refc_term, test_type;
4423 Uint sz;
4424 Eterm *hp;
4425 if (!is_internal_magic_ref(tp[2])) {
4426 if (is_internal_ordinary_ref(tp[2])) {
4427 ErtsORefThing *rtp;
4428 rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
4429 if (erts_is_ref_numbers_magic(rtp->num))
4430 BIF_RET(am_true);
4431 }
4432 BIF_RET(am_false);
4433 }
4434 bin = erts_magic_ref2bin(tp[2]);
4435 refc = erts_refc_read(&bin->intern.refc, 1);
4436 bin_addr = (UWord) bin;
4437 sz = 4;
4438 erts_bld_uword(NULL, &sz, bin_addr);
4439 erts_bld_uword(NULL, &sz, refc);
4440 hp = HAlloc(BIF_P, sz);
4441 bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
4442 refc_term = erts_bld_uword(&hp, NULL, refc);
4443 test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
4444 ? am_true : am_false);
4445 BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
4446 }
4447
4448 break;
4449 }
4450 case 3: {
4451 if (ERTS_IS_ATOM_STR("check_time_config", tp[1])) {
4452 int res, time_correction;
4453 ErtsTimeWarpMode time_warp_mode;
4454 if (tp[2] == am_true)
4455 time_correction = !0;
4456 else if (tp[2] == am_false)
4457 time_correction = 0;
4458 else
4459 break;
4460 if (ERTS_IS_ATOM_STR("no_time_warp", tp[3]))
4461 time_warp_mode = ERTS_NO_TIME_WARP_MODE;
4462 else if (ERTS_IS_ATOM_STR("single_time_warp", tp[3]))
4463 time_warp_mode = ERTS_SINGLE_TIME_WARP_MODE;
4464 else if (ERTS_IS_ATOM_STR("multi_time_warp", tp[3]))
4465 time_warp_mode = ERTS_MULTI_TIME_WARP_MODE;
4466 else
4467 break;
4468 res = erts_check_time_adj_support(time_correction,
4469 time_warp_mode);
4470 BIF_RET(res ? am_true : am_false);
4471 }
4472 else if (ERTS_IS_ATOM_STR("make_unique_integer", tp[1])) {
4473 Eterm res = erts_debug_make_unique_integer(BIF_P,
4474 tp[2],
4475 tp[3]);
4476 if (is_non_value(res))
4477 break;
4478 BIF_RET(res);
4479 }
4480 else if (ERTS_IS_ATOM_STR("term_to_binary", tp[1])) {
4481 return erts_debug_term_to_binary(BIF_P, tp[2], tp[3]);
4482 }
4483 break;
4484 }
4485 default:
4486 break;
4487 }
4488 }
4489 BIF_ERROR(BIF_P, BADARG);
4490 }
4491
erts_internal_is_system_process_1(BIF_ALIST_1)4492 BIF_RETTYPE erts_internal_is_system_process_1(BIF_ALIST_1)
4493 {
4494 if (is_internal_pid(BIF_ARG_1)) {
4495 Process *rp = erts_proc_lookup(BIF_ARG_1);
4496 if (rp && (rp->static_flags & ERTS_STC_FLG_SYSTEM_PROC))
4497 BIF_RET(am_true);
4498 BIF_RET(am_false);
4499 }
4500
4501 if (is_external_pid(BIF_ARG_1)
4502 && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
4503 BIF_RET(am_false);
4504 }
4505
4506 BIF_ERROR(BIF_P, BADARG);
4507 }
4508
erts_internal_system_check_1(BIF_ALIST_1)4509 BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1)
4510 {
4511 Eterm res;
4512 if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)) {
4513 res = erts_system_check_request(BIF_P);
4514 if (is_non_value(res))
4515 BIF_RET(am_undefined);
4516 BIF_TRAP1(gather_system_check_res_trap, BIF_P, res);
4517 }
4518
4519 BIF_ERROR(BIF_P, BADARG);
4520 }
4521
4522 #if defined(VALGRIND) && defined(__GNUC__)
4523 /* Force noinline for valgrind suppression */
4524 static void broken_halt_test(Eterm bif_arg_2) __attribute__((noinline));
4525 #endif
4526
broken_halt_test(Eterm bif_arg_2)4527 static void broken_halt_test(Eterm bif_arg_2)
4528 {
4529 /* Ugly ugly code used by bif_SUITE:erlang_halt/1 */
4530 #if defined(ERTS_HAVE_TRY_CATCH)
4531 erts_get_scheduler_data()->run_queue = NULL;
4532 #endif
4533 erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
4534 }
4535
4536 static void
test_multizero_timeout_in_timeout3(void * vproc)4537 test_multizero_timeout_in_timeout3(void *vproc)
4538 {
4539 Process *proc = (Process *) vproc;
4540 ErtsMessage *mp = erts_alloc_message(0, NULL);
4541 ERTS_DECL_AM(multizero_timeout_in_timeout_done);
4542 erts_queue_message(proc, 0, mp, AM_multizero_timeout_in_timeout_done, am_system);
4543 erts_proc_dec_refc(proc);
4544 }
4545
4546 static void
test_multizero_timeout_in_timeout2(void * vproc)4547 test_multizero_timeout_in_timeout2(void *vproc)
4548 {
4549 erts_start_timer_callback(0, test_multizero_timeout_in_timeout3, vproc);
4550 }
4551
4552 static void
test_multizero_timeout_in_timeout(void * vproc)4553 test_multizero_timeout_in_timeout(void *vproc)
4554 {
4555 erts_start_timer_callback(0, test_multizero_timeout_in_timeout2, vproc);
4556 }
4557
erts_debug_set_internal_state_2(BIF_ALIST_2)4558 BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
4559 {
4560 /*
4561 * NOTE: Only supposed to be used for testing, and debugging.
4562 */
4563 if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
4564 && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
4565 erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
4566 erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on);
4567 if (on) {
4568 erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
4569 erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
4570 if (erts_is_alive)
4571 erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
4572 erts_dsprintf(dsbufp,
4573 "enabled access to the emulator internal state.\n");
4574 erts_dsprintf(dsbufp,
4575 "NOTE: This is an erts internal test feature and "
4576 "should *only* be used by OTP test-suites.\n");
4577 erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
4578 }
4579 BIF_RET(prev_on ? am_true : am_false);
4580 }
4581
4582 if (!erts_atomic_read_nob(&available_internal_state)) {
4583 BIF_ERROR(BIF_P, EXC_UNDEF);
4584 }
4585
4586 if (is_atom(BIF_ARG_1)) {
4587
4588 if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
4589 Sint reds;
4590 if (term_to_Sint(BIF_ARG_2, &reds) != 0) {
4591 if (0 <= reds && reds <= CONTEXT_REDS) {
4592 if (!ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P))
4593 BIF_P->fcalls = reds;
4594 else
4595 BIF_P->fcalls = reds - CONTEXT_REDS;
4596 BIF_P->scheduler_data->virtual_reds = 0;
4597 }
4598 BIF_RET(am_true);
4599 }
4600 }
4601 else if (ERTS_IS_ATOM_STR("block", BIF_ARG_1)
4602 || ERTS_IS_ATOM_STR("sleep", BIF_ARG_1)) {
4603 int block = ERTS_IS_ATOM_STR("block", BIF_ARG_1);
4604 Sint ms;
4605 if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
4606 if (ms > 0) {
4607 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4608 if (block)
4609 erts_thr_progress_block();
4610 while (erts_milli_sleep((long) ms) != 0);
4611 if (block)
4612 erts_thr_progress_unblock();
4613 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4614 }
4615 BIF_RET(am_true);
4616 }
4617 }
4618 else if (ERTS_IS_ATOM_STR("block_scheduler", BIF_ARG_1)) {
4619 Sint ms;
4620 if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
4621 if (ms > 0) {
4622 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4623 while (erts_milli_sleep((long) ms) != 0);
4624 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4625 }
4626 BIF_RET(am_true);
4627 }
4628 }
4629 else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
4630 || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
4631 /* Used by node_container_SUITE (emulator) */
4632 Uint next;
4633
4634 if (term_to_Uint(BIF_ARG_2, &next) != 0) {
4635 Sint res;
4636
4637 if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
4638 res = erts_ptab_test_next_id(&erts_proc, 1, next);
4639 else
4640 res = erts_ptab_test_next_id(&erts_port, 1, next);
4641 if (res < 0)
4642 BIF_RET(am_false);
4643 BIF_RET(erts_make_integer(res, BIF_P));
4644 }
4645 }
4646 else if (ERTS_IS_ATOM_STR("force_gc", BIF_ARG_1)) {
4647 /* Used by signal_SUITE (emulator) */
4648 Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
4649 BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
4650 if (!rp) {
4651 BIF_RET(am_false);
4652 }
4653 else {
4654 ERTS_FORCE_GC(BIF_P);
4655 BIF_RET(am_true);
4656 }
4657 }
4658 else if (ERTS_IS_ATOM_STR("gc_state", BIF_ARG_1)) {
4659 /* Used by process_SUITE (emulator) */
4660 int res, enable;
4661
4662 switch (BIF_ARG_2) {
4663 case am_true: enable = 1; break;
4664 case am_false: enable = 0; break;
4665 default: BIF_ERROR(BIF_P, BADARG); break;
4666 }
4667
4668 res = (BIF_P->flags & F_DISABLE_GC) ? am_false : am_true;
4669 erts_set_gc_state(BIF_P, enable);
4670 BIF_RET(res);
4671 }
4672 else if (ERTS_IS_ATOM_STR("inconsistent_heap", BIF_ARG_1)) {
4673 /* Used by code_SUITE (emulator) */
4674 if (am_start == BIF_ARG_2) {
4675 Eterm broken_term;
4676 Eterm *hp;
4677
4678 ERTS_ASSERT(!(BIF_P->flags & F_DISABLE_GC));
4679 erts_set_gc_state(BIF_P, 0);
4680
4681 hp = HAlloc(BIF_P, 2);
4682 hp[0] = make_arityval(1234);
4683 hp[1] = THE_NON_VALUE;
4684
4685 broken_term = make_tuple(hp);
4686
4687 BIF_RET(broken_term);
4688 } else {
4689 Eterm broken_term;
4690 Eterm *hp;
4691
4692 broken_term = BIF_ARG_2;
4693
4694 hp = tuple_val(broken_term);
4695 ERTS_ASSERT(hp[0] == make_arityval(1234));
4696 ERTS_ASSERT(hp[1] == THE_NON_VALUE);
4697 hp[0] = make_arityval(1);
4698 hp[1] = am_ok;
4699
4700 ERTS_ASSERT(BIF_P->flags & F_DISABLE_GC);
4701 erts_set_gc_state(BIF_P, 1);
4702
4703 BIF_RET(am_ok);
4704 }
4705 }
4706 else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
4707 /* Used by ets_SUITE (stdlib) */
4708 if (is_tuple(BIF_ARG_2)) {
4709 Eterm* tpl = tuple_val(BIF_ARG_2);
4710 Uint cnt;
4711 if (arityval(tpl[0]) == 2 && is_atom(tpl[1]) &&
4712 term_to_Uint(tpl[2], &cnt)) {
4713 BIF_RET(erts_ets_colliding_names(BIF_P,tpl[1],cnt));
4714 }
4715 }
4716 }
4717 else if (ERTS_IS_ATOM_STR("binary_loop_limit", BIF_ARG_1)) {
4718 /* Used by binary_module_SUITE (stdlib) */
4719 Uint max_loops;
4720 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4721 max_loops = erts_binary_set_loop_limit(-1);
4722 BIF_RET(make_small(max_loops));
4723 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4724 max_loops = erts_binary_set_loop_limit(max_loops);
4725 BIF_RET(make_small(max_loops));
4726 }
4727 }
4728 else if (ERTS_IS_ATOM_STR("re_loop_limit", BIF_ARG_1)) {
4729 /* Used by re_SUITE (stdlib) */
4730 Uint max_loops;
4731 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4732 max_loops = erts_re_set_loop_limit(-1);
4733 BIF_RET(make_small(max_loops));
4734 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4735 max_loops = erts_re_set_loop_limit(max_loops);
4736 BIF_RET(make_small(max_loops));
4737 }
4738 }
4739 else if (ERTS_IS_ATOM_STR("unicode_loop_limit", BIF_ARG_1)) {
4740 /* Used by unicode_SUITE (stdlib) */
4741 Uint max_loops;
4742 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4743 max_loops = erts_unicode_set_loop_limit(-1);
4744 BIF_RET(make_small(max_loops));
4745 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4746 max_loops = erts_unicode_set_loop_limit(max_loops);
4747 BIF_RET(make_small(max_loops));
4748 }
4749 }
4750 else if (ERTS_IS_ATOM_STR("test_long_gc_sleep", BIF_ARG_1)) {
4751 if (term_to_Uint(BIF_ARG_2, &erts_test_long_gc_sleep) > 0)
4752 BIF_RET(am_true);
4753 }
4754 else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
4755 erts_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
4756 }
4757 else if (ERTS_IS_ATOM_STR("kill_dist_connection", BIF_ARG_1)) {
4758 DistEntry *dep = erts_sysname_to_connected_dist_entry(BIF_ARG_2);
4759 if (!dep)
4760 BIF_RET(am_false);
4761 else {
4762 Uint32 con_id;
4763 erts_de_rlock(dep);
4764 con_id = dep->connection_id;
4765 erts_de_runlock(dep);
4766 erts_kill_dist_connection(dep, con_id);
4767 BIF_RET(am_true);
4768 }
4769 }
4770 else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
4771 int flag = 0;
4772 if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2))
4773 flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
4774 else if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2))
4775 flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS;
4776 else if (ERTS_IS_ATOM_STR("aux_work", BIF_ARG_2))
4777 flag = ERTS_DEBUG_WAIT_COMPLETED_AUX_WORK;
4778 else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_2))
4779 flag = ERTS_DEBUG_WAIT_COMPLETED_THREAD_PROGRESS;
4780
4781 if (flag) {
4782 if (erts_debug_wait_completed(BIF_P, flag))
4783 ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
4784 else
4785 BIF_ERROR(BIF_P, SYSTEM_LIMIT);
4786 }
4787 }
4788 else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
4789 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4790 broken_halt_test(BIF_ARG_2);
4791 }
4792 else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
4793 int res = erts_debug_set_unique_monotonic_integer_state(BIF_ARG_2);
4794 BIF_RET(res ? am_true : am_false);
4795 }
4796 else if (ERTS_IS_ATOM_STR("node_tab_delayed_delete", BIF_ARG_1)) {
4797 /* node_container_SUITE */
4798 Sint64 msecs;
4799 if (term_to_Sint64(BIF_ARG_2, &msecs)) {
4800 /* Negative value restore original value... */
4801 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4802 erts_debug_test_node_tab_delayed_delete(msecs);
4803 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4804 BIF_RET(am_ok);
4805 }
4806 }
4807 else if (ERTS_IS_ATOM_STR("fill_heap", BIF_ARG_1)) {
4808 UWord left = HeapWordsLeft(BIF_P);
4809 if (left > 1) {
4810 Eterm* hp = HAlloc(BIF_P, left);
4811 *hp = make_pos_bignum_header(left - 1);
4812 }
4813 if (BIF_ARG_2 == am_true) {
4814 FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
4815 }
4816 BIF_RET(am_ok);
4817 }
4818 else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
4819 if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
4820 Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
4821 UWord bin_addr = (UWord) bin;
4822 Eterm bin_addr_term, magic_ref, res;
4823 Eterm *hp;
4824 Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
4825 erts_bld_uword(NULL, &sz, bin_addr);
4826 hp = HAlloc(BIF_P, sz);
4827 bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
4828 magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
4829 res = TUPLE2(hp, magic_ref, bin_addr_term);
4830 BIF_RET(res);
4831 }
4832 }
4833 else if (ERTS_IS_ATOM_STR("binary", BIF_ARG_1)) {
4834 Sint64 size;
4835 if (term_to_Sint64(BIF_ARG_2, &size)) {
4836 Binary* refbin = erts_bin_drv_alloc_fnf(size);
4837 if (!refbin)
4838 BIF_RET(am_false);
4839 sys_memset(refbin->orig_bytes, 0, size);
4840 BIF_RET(erts_build_proc_bin(&MSO(BIF_P),
4841 HAlloc(BIF_P, PROC_BIN_SIZE),
4842 refbin));
4843 }
4844 }
4845 else if (ERTS_IS_ATOM_STR("ets_force_trap", BIF_ARG_1)) {
4846 #ifdef ETS_DBG_FORCE_TRAP
4847 erts_ets_dbg_force_trap = (BIF_ARG_2 == am_true) ? 1 : 0;
4848 BIF_RET(am_ok);
4849 #else
4850 BIF_RET(am_notsup);
4851 #endif
4852 }
4853 else if (ERTS_IS_ATOM_STR("ets_force_split", BIF_ARG_1)) {
4854 if (is_tuple(BIF_ARG_2)) {
4855 Eterm* tpl = tuple_val(BIF_ARG_2);
4856
4857 if (erts_ets_force_split(tpl[1], tpl[2] == am_true))
4858 BIF_RET(am_ok);
4859 }
4860 }
4861 else if (ERTS_IS_ATOM_STR("ets_debug_random_split_join", BIF_ARG_1)) {
4862 if (is_tuple(BIF_ARG_2)) {
4863 Eterm* tpl = tuple_val(BIF_ARG_2);
4864 if (erts_ets_debug_random_split_join(tpl[1], tpl[2] == am_true))
4865 BIF_RET(am_ok);
4866 }
4867 }
4868 else if (ERTS_IS_ATOM_STR("mbuf", BIF_ARG_1)) {
4869 Uint sz = size_object(BIF_ARG_2);
4870 ErlHeapFragment* frag = new_message_buffer(sz);
4871 Eterm *hp = frag->mem;
4872 Eterm copy = copy_struct(BIF_ARG_2, sz, &hp, &frag->off_heap);
4873 frag->next = BIF_P->mbuf;
4874 BIF_P->mbuf = frag;
4875 BIF_P->mbuf_sz += sz;
4876 BIF_RET(copy);
4877 }
4878 else if (ERTS_IS_ATOM_STR("remove_hopefull_dflags", BIF_ARG_1)) {
4879 int old_val, new_val;
4880
4881 switch (BIF_ARG_2) {
4882 case am_true: new_val = !0; break;
4883 case am_false: new_val = 0; break;
4884 default: BIF_ERROR(BIF_P, BADARG); break;
4885 }
4886
4887 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4888 erts_thr_progress_block();
4889
4890 old_val = erts_dflags_test_remove_hopefull_flags;
4891 erts_dflags_test_remove_hopefull_flags = new_val;
4892
4893 erts_thr_progress_unblock();
4894 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4895
4896 BIF_RET(old_val ? am_true : am_false);
4897 }
4898 else if (ERTS_IS_ATOM_STR("code_write_permission", BIF_ARG_1)) {
4899 /*
4900 * Warning: This is a unsafe way of seizing the "lock"
4901 * as there is no automatic unlock if caller terminates.
4902 */
4903 switch(BIF_ARG_2) {
4904 case am_true:
4905 if (!erts_try_seize_code_write_permission(BIF_P)) {
4906 ERTS_BIF_YIELD2(BIF_TRAP_EXPORT(BIF_erts_debug_set_internal_state_2),
4907 BIF_P, BIF_ARG_1, BIF_ARG_2);
4908 }
4909 BIF_RET(am_true);
4910 case am_false:
4911 erts_release_code_write_permission();
4912 BIF_RET(am_true);
4913 }
4914 }
4915 else if (ERTS_IS_ATOM_STR("multizero_timeout_in_timeout", BIF_ARG_1)) {
4916 Sint64 timeout;
4917 if (term_to_Sint64(BIF_ARG_2, &timeout)) {
4918 if (timeout < 0)
4919 timeout = 0;
4920 erts_proc_inc_refc(BIF_P);
4921 erts_start_timer_callback((ErtsMonotonicTime) timeout,
4922 test_multizero_timeout_in_timeout,
4923 (void *) BIF_P);
4924 BIF_RET(am_ok);
4925 }
4926 }
4927 }
4928
4929 BIF_ERROR(BIF_P, BADARG);
4930 }
4931
4932 Eterm
erts_get_ethread_info(Process * c_p)4933 erts_get_ethread_info(Process *c_p)
4934 {
4935 Uint sz, *szp;
4936 Eterm res, *hp, **hpp, *end_hp = NULL;
4937
4938 sz = 0;
4939 szp = &sz;
4940 hpp = NULL;
4941
4942 while (1) {
4943 Eterm tup, list, name;
4944 #if defined(ETHR_NATIVE_ATOMIC32_IMPL) \
4945 || defined(ETHR_NATIVE_ATOMIC64_IMPL) \
4946 || defined(ETHR_NATIVE_DW_ATOMIC_IMPL)
4947 char buf[1024];
4948 int i;
4949 char **str;
4950 #endif
4951
4952 res = NIL;
4953
4954 #ifdef ETHR_X86_MEMBAR_H__
4955
4956 tup = erts_bld_tuple(hpp, szp, 2,
4957 erts_bld_string(hpp, szp, "sse2"),
4958 #ifdef ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
4959 erts_bld_string(hpp, szp,
4960 (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
4961 ? "yes" : "no"))
4962 #else
4963 erts_bld_string(hpp, szp, "yes")
4964 #endif
4965 );
4966 res = erts_bld_cons(hpp, szp, tup, res);
4967
4968 tup = erts_bld_tuple(hpp, szp, 2,
4969 erts_bld_string(hpp, szp,
4970 "x86"
4971 #ifdef ARCH_64
4972 "_64"
4973 #endif
4974 " OOO"),
4975 erts_bld_string(hpp, szp,
4976 #ifdef ETHR_X86_OUT_OF_ORDER
4977 "yes"
4978 #else
4979 "no"
4980 #endif
4981 ));
4982
4983 res = erts_bld_cons(hpp, szp, tup, res);
4984 #endif
4985
4986 #ifdef ETHR_SPARC_V9_MEMBAR_H__
4987
4988 tup = erts_bld_tuple(hpp, szp, 2,
4989 erts_bld_string(hpp, szp, "Sparc V9"),
4990 erts_bld_string(hpp, szp,
4991 #if defined(ETHR_SPARC_TSO)
4992 "TSO"
4993 #elif defined(ETHR_SPARC_PSO)
4994 "PSO"
4995 #elif defined(ETHR_SPARC_RMO)
4996 "RMO"
4997 #else
4998 "undefined"
4999 #endif
5000 ));
5001
5002 res = erts_bld_cons(hpp, szp, tup, res);
5003
5004 #endif
5005
5006 #ifdef ETHR_PPC_MEMBAR_H__
5007
5008 tup = erts_bld_tuple(hpp, szp, 2,
5009 erts_bld_string(hpp, szp, "lwsync"),
5010 erts_bld_string(hpp, szp,
5011 #if defined(ETHR_PPC_HAVE_LWSYNC)
5012 "yes"
5013 #elif defined(ETHR_PPC_HAVE_NO_LWSYNC)
5014 "no"
5015 #elif defined(ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__)
5016 ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ ? "yes" : "no"
5017 #else
5018 "undefined"
5019 #endif
5020 ));
5021
5022 res = erts_bld_cons(hpp, szp, tup, res);
5023
5024 #endif
5025
5026 tup = erts_bld_tuple(hpp, szp, 2,
5027 erts_bld_string(hpp, szp, "Native rw-spinlocks"),
5028 #ifdef ETHR_NATIVE_RWSPINLOCK_IMPL
5029 erts_bld_string(hpp, szp, ETHR_NATIVE_RWSPINLOCK_IMPL)
5030 #else
5031 erts_bld_string(hpp, szp, "no")
5032 #endif
5033 );
5034 res = erts_bld_cons(hpp, szp, tup, res);
5035
5036 tup = erts_bld_tuple(hpp, szp, 2,
5037 erts_bld_string(hpp, szp, "Native spinlocks"),
5038 #ifdef ETHR_NATIVE_SPINLOCK_IMPL
5039 erts_bld_string(hpp, szp, ETHR_NATIVE_SPINLOCK_IMPL)
5040 #else
5041 erts_bld_string(hpp, szp, "no")
5042 #endif
5043 );
5044 res = erts_bld_cons(hpp, szp, tup, res);
5045
5046
5047 list = NIL;
5048 #ifdef ETHR_NATIVE_DW_ATOMIC_IMPL
5049 if (ethr_have_native_dw_atomic()) {
5050 name = erts_bld_string(hpp, szp, ETHR_NATIVE_DW_ATOMIC_IMPL);
5051 str = ethr_native_dw_atomic_ops();
5052 for (i = 0; str[i]; i++) {
5053 erts_snprintf(buf, sizeof(buf), "ethr_native_dw_atomic_%s()", str[i]);
5054 list = erts_bld_cons(hpp, szp,
5055 erts_bld_string(hpp, szp, buf),
5056 list);
5057 }
5058 str = ethr_native_su_dw_atomic_ops();
5059 for (i = 0; str[i]; i++) {
5060 erts_snprintf(buf, sizeof(buf), "ethr_native_su_dw_atomic_%s()", str[i]);
5061 list = erts_bld_cons(hpp, szp,
5062 erts_bld_string(hpp, szp, buf),
5063 list);
5064 }
5065 }
5066 else
5067 #endif
5068 name = erts_bld_string(hpp, szp, "no");
5069
5070 tup = erts_bld_tuple(hpp, szp, 3,
5071 erts_bld_string(hpp, szp, "Double word native atomics"),
5072 name,
5073 list);
5074 res = erts_bld_cons(hpp, szp, tup, res);
5075
5076 list = NIL;
5077 #ifdef ETHR_NATIVE_ATOMIC64_IMPL
5078 name = erts_bld_string(hpp, szp, ETHR_NATIVE_ATOMIC64_IMPL);
5079 str = ethr_native_atomic64_ops();
5080 for (i = 0; str[i]; i++) {
5081 erts_snprintf(buf, sizeof(buf), "ethr_native_atomic64_%s()", str[i]);
5082 list = erts_bld_cons(hpp, szp,
5083 erts_bld_string(hpp, szp, buf),
5084 list);
5085 }
5086 #else
5087 name = erts_bld_string(hpp, szp, "no");
5088 #endif
5089 tup = erts_bld_tuple(hpp, szp, 3,
5090 erts_bld_string(hpp, szp, "64-bit native atomics"),
5091 name,
5092 list);
5093 res = erts_bld_cons(hpp, szp, tup, res);
5094
5095 list = NIL;
5096 #ifdef ETHR_NATIVE_ATOMIC32_IMPL
5097 name = erts_bld_string(hpp, szp, ETHR_NATIVE_ATOMIC32_IMPL);
5098 str = ethr_native_atomic32_ops();
5099 for (i = 0; str[i]; i++) {
5100 erts_snprintf(buf, sizeof(buf), "ethr_native_atomic32_%s()", str[i]);
5101 list = erts_bld_cons(hpp, szp,
5102 erts_bld_string(hpp, szp, buf),
5103 list);
5104 }
5105 #else
5106 name = erts_bld_string(hpp, szp, "no");
5107 #endif
5108 tup = erts_bld_tuple(hpp, szp, 3,
5109 erts_bld_string(hpp, szp, "32-bit native atomics"),
5110 name,
5111 list);
5112 res = erts_bld_cons(hpp, szp, tup, res);
5113
5114 if (hpp) {
5115 HRelease(c_p, end_hp, *hpp)
5116 return res;
5117 }
5118
5119 hp = HAlloc(c_p, sz);
5120 end_hp = hp + sz;
5121 hpp = &hp;
5122 szp = NULL;
5123 }
5124 }
5125
5126 static BIF_RETTYPE
gather_histograms_helper(Process * c_p,Eterm arg_tuple,int gather (Process *,int,int,int,UWord,Eterm))5127 gather_histograms_helper(Process * c_p, Eterm arg_tuple,
5128 int gather(Process *, int, int, int, UWord, Eterm))
5129 {
5130 SWord hist_start, hist_width, sched_id;
5131 int msg_count, alloc_num;
5132 Eterm *args;
5133
5134 /* This is an internal BIF, so the error checking is mostly left to erlang
5135 * code. */
5136
5137 ASSERT(is_tuple_arity(arg_tuple, 5));
5138 args = tuple_val(arg_tuple);
5139
5140 for (alloc_num = ERTS_ALC_A_MIN; alloc_num <= ERTS_ALC_A_MAX; alloc_num++) {
5141 if(erts_is_atom_str(ERTS_ALC_A2AD(alloc_num), args[1], 0)) {
5142 break;
5143 }
5144 }
5145
5146 if (alloc_num > ERTS_ALC_A_MAX) {
5147 BIF_ERROR(c_p, BADARG);
5148 }
5149
5150 sched_id = signed_val(args[2]);
5151 hist_width = signed_val(args[3]);
5152 hist_start = signed_val(args[4]);
5153
5154 if (sched_id < 0 || sched_id > erts_no_schedulers) {
5155 BIF_ERROR(c_p, BADARG);
5156 }
5157
5158 msg_count = gather(c_p, alloc_num, sched_id, hist_width, hist_start, args[5]);
5159
5160 BIF_RET(make_small(msg_count));
5161 }
5162
erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)5163 BIF_RETTYPE erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)
5164 {
5165 return gather_histograms_helper(BIF_P, BIF_ARG_1,
5166 erts_alcu_gather_alloc_histograms);
5167 }
5168
erts_internal_gather_carrier_info_1(BIF_ALIST_1)5169 BIF_RETTYPE erts_internal_gather_carrier_info_1(BIF_ALIST_1)
5170 {
5171 return gather_histograms_helper(BIF_P, BIF_ARG_1,
5172 erts_alcu_gather_carrier_info);
5173 }
5174
5175
5176 /* Builds a list of all functions in the given module:
5177 * [{Name, Arity},...] */
5178 static Eterm
functions_in_module(Process * p,const BeamCodeHeader * code_hdr)5179 functions_in_module(Process* p, const BeamCodeHeader* code_hdr)
5180 {
5181 int i;
5182 Uint num_functions;
5183 Uint need;
5184 Eterm* hp;
5185 Eterm* hp_end;
5186 Eterm result = NIL;
5187
5188 num_functions = code_hdr->num_functions;
5189 need = 5*num_functions;
5190 hp = HAlloc(p, need);
5191 hp_end = hp + need;
5192 for (i = num_functions-1; i >= 0 ; i--) {
5193 const ErtsCodeInfo* ci = code_hdr->functions[i];
5194 Eterm tuple;
5195
5196 /*
5197 * If the function name is [], this entry is a stub for
5198 * a BIF that should be ignored.
5199 */
5200 ASSERT(is_atom(ci->mfa.function) || is_nil(ci->mfa.function));
5201 if (is_atom(ci->mfa.function)) {
5202 tuple = TUPLE2(hp, ci->mfa.function, make_small(ci->mfa.arity));
5203 hp += 3;
5204
5205 result = CONS(hp, tuple, result);
5206 hp += 2;
5207 }
5208 }
5209 HRelease(p, hp_end, hp);
5210 return result;
5211 }
5212
5213 /* Builds a list of all NIFs in the given module:
5214 * [{Name, Arity},...] */
5215 static Eterm
nifs_in_module(Process * p,Eterm module)5216 nifs_in_module(Process* p, Eterm module)
5217 {
5218 Eterm nif_list, *hp;
5219 Module *mod;
5220
5221 mod = erts_get_module(module, erts_active_code_ix());
5222 nif_list = NIL;
5223
5224 if (mod->curr.nif != NULL) {
5225 int func_count, func_ix;
5226 ErlNifFunc *funcs;
5227
5228 func_count = erts_nif_get_funcs(mod->curr.nif, &funcs);
5229 hp = HAlloc(p, func_count * 5);
5230
5231 for (func_ix = func_count - 1; func_ix >= 0; func_ix--) {
5232 Eterm name, arity, pair;
5233 ErlNifFunc *func;
5234
5235 func = &funcs[func_ix];
5236
5237 name = am_atom_put(func->name, sys_strlen(func->name));
5238 arity = make_small(func->arity);
5239
5240 pair = TUPLE2(hp, name, arity);
5241 hp += 3;
5242
5243 nif_list = CONS(hp, pair, nif_list);
5244 hp += 2;
5245 }
5246 }
5247
5248 return nif_list;
5249 }
5250
5251 /* Returns 'true' if mod has any native compiled functions, otherwise 'false' */
5252 static Eterm
has_native(const BeamCodeHeader * code_hdr)5253 has_native(const BeamCodeHeader *code_hdr)
5254 {
5255 return am_false;
5256 }
5257
5258 /* Builds a list of all functions including native addresses.
5259 * [{Name,Arity,NativeAddress},...] */
5260 static Eterm
native_addresses(Process * p,const BeamCodeHeader * code_hdr)5261 native_addresses(Process* p, const BeamCodeHeader* code_hdr)
5262 {
5263 return NIL;
5264 }
5265
5266 /* Builds a list of all exported functions in the given module:
5267 * [{Name, Arity},...] */
5268 static Eterm
exported_from_module(Process * p,ErtsCodeIndex code_ix,Eterm mod)5269 exported_from_module(Process* p, ErtsCodeIndex code_ix, Eterm mod)
5270 {
5271 int i, num_exps;
5272 Eterm* hp = NULL;
5273 Eterm* hend = NULL;
5274 Eterm result = NIL;
5275
5276 num_exps = export_list_size(code_ix);
5277 for (i = 0; i < num_exps; i++) {
5278 Export* ep = export_list(i,code_ix);
5279
5280 if (ep->info.mfa.module == mod) {
5281 Eterm tuple;
5282
5283 if (erts_is_export_trampoline_active(ep, code_ix) &&
5284 BeamIsOpCode(ep->trampoline.common.op, op_call_error_handler)) {
5285 /* There is a call to the function, but it does not exist. */
5286 continue;
5287 }
5288
5289 if (hp == hend) {
5290 int need = 10 * 5;
5291 hp = HAlloc(p, need);
5292 hend = hp + need;
5293 }
5294
5295 tuple = TUPLE2(hp, ep->info.mfa.function,
5296 make_small(ep->info.mfa.arity));
5297 hp += 3;
5298
5299 result = CONS(hp, tuple, result);
5300 hp += 2;
5301 }
5302 }
5303
5304 HRelease(p, hend,hp);
5305 return result;
5306 }
5307
5308 /* Returns a list of all attributes for the module. */
5309 static Eterm
attributes_for_module(Process * p,const BeamCodeHeader * code_hdr)5310 attributes_for_module(Process* p, const BeamCodeHeader* code_hdr)
5311 {
5312 const byte* ext;
5313 Eterm result = NIL;
5314
5315 ext = code_hdr->attr_ptr;
5316 if (ext != NULL) {
5317 ErtsHeapFactory factory;
5318
5319 erts_factory_proc_prealloc_init(&factory, p,
5320 code_hdr->attr_size_on_heap);
5321
5322 result = erts_decode_ext(&factory, &ext, 0);
5323
5324 if (is_value(result)) {
5325 erts_factory_close(&factory);
5326 }
5327 }
5328 return result;
5329 }
5330
5331 /* Returns a list containing compilation information. */
5332 static Eterm
compilation_info_for_module(Process * p,const BeamCodeHeader * code_hdr)5333 compilation_info_for_module(Process* p, const BeamCodeHeader* code_hdr)
5334 {
5335 const byte* ext;
5336 Eterm result = NIL;
5337
5338 ext = code_hdr->compile_ptr;
5339 if (ext != NULL) {
5340 ErtsHeapFactory factory;
5341
5342 erts_factory_proc_prealloc_init(&factory, p,
5343 code_hdr->compile_size_on_heap);
5344
5345 result = erts_decode_ext(&factory, &ext, 0);
5346
5347 if (is_value(result)) {
5348 erts_factory_close(&factory);
5349 }
5350 }
5351
5352 return result;
5353 }
5354
5355 /* Returns the MD5 checksum for a module */
5356 static Eterm
md5_of_module(Process * p,const BeamCodeHeader * code_hdr)5357 md5_of_module(Process* p, const BeamCodeHeader* code_hdr)
5358 {
5359 return new_binary(p, code_hdr->md5_ptr, MD5_SIZE);
5360 }
5361
5362 static Eterm
get_module_info(Process * p,ErtsCodeIndex code_ix,const BeamCodeHeader * code_hdr,Eterm module,Eterm what)5363 get_module_info(Process* p, ErtsCodeIndex code_ix,
5364 const BeamCodeHeader* code_hdr,
5365 Eterm module, Eterm what)
5366 {
5367 if (what == am_module) {
5368 return module;
5369 } else if (what == am_md5) {
5370 return md5_of_module(p, code_hdr);
5371 } else if (what == am_exports) {
5372 return exported_from_module(p, code_ix, module);
5373 } else if (what == am_functions) {
5374 return functions_in_module(p, code_hdr);
5375 } else if (what == am_nifs) {
5376 return nifs_in_module(p, module);
5377 } else if (what == am_attributes) {
5378 return attributes_for_module(p, code_hdr);
5379 } else if (what == am_compile) {
5380 return compilation_info_for_module(p, code_hdr);
5381 } else if (what == am_native_addresses) {
5382 return native_addresses(p, code_hdr);
5383 } else if (what == am_native) {
5384 return has_native(code_hdr);
5385 }
5386
5387 return THE_NON_VALUE;
5388 }
5389
5390 static Eterm
module_info_0(Process * p,Eterm module)5391 module_info_0(Process* p, Eterm module)
5392 {
5393 Module* modp;
5394 ErtsCodeIndex code_ix = erts_active_code_ix();
5395 const BeamCodeHeader* code_hdr;
5396 Eterm *hp;
5397 Eterm list = NIL;
5398 Eterm tup;
5399
5400 if (is_not_atom(module)) {
5401 return THE_NON_VALUE;
5402 }
5403
5404 modp = erts_get_module(module, code_ix);
5405 if (modp == NULL) {
5406 return THE_NON_VALUE;
5407 }
5408
5409 code_hdr = modp->curr.code_hdr;
5410 if (code_hdr == NULL) {
5411 return THE_NON_VALUE;
5412 }
5413
5414 #define BUILD_INFO(What) \
5415 tup = get_module_info(p, code_ix, code_hdr, module, What); \
5416 hp = HAlloc(p, 5); \
5417 tup = TUPLE2(hp, What, tup); \
5418 hp += 3; \
5419 list = CONS(hp, tup, list)
5420
5421 BUILD_INFO(am_md5);
5422 BUILD_INFO(am_compile);
5423 BUILD_INFO(am_attributes);
5424 BUILD_INFO(am_exports);
5425 BUILD_INFO(am_module);
5426 #undef BUILD_INFO
5427 return list;
5428 }
5429
5430 static Eterm
module_info_1(Process * p,Eterm module,Eterm what)5431 module_info_1(Process* p, Eterm module, Eterm what)
5432 {
5433 Module* modp;
5434 ErtsCodeIndex code_ix = erts_active_code_ix();
5435 const BeamCodeHeader* code_hdr;
5436
5437 if (is_not_atom(module)) {
5438 return THE_NON_VALUE;
5439 }
5440
5441 modp = erts_get_module(module, code_ix);
5442 if (modp == NULL) {
5443 return THE_NON_VALUE;
5444 }
5445
5446 code_hdr = modp->curr.code_hdr;
5447 if (code_hdr == NULL) {
5448 return THE_NON_VALUE;
5449 }
5450
5451 return get_module_info(p, code_ix, code_hdr, module, what);
5452 }
5453
get_module_info_1(BIF_ALIST_1)5454 BIF_RETTYPE get_module_info_1(BIF_ALIST_1)
5455 {
5456 Eterm ret = module_info_0(BIF_P, BIF_ARG_1);
5457
5458 if (is_non_value(ret)) {
5459 BIF_ERROR(BIF_P, BADARG);
5460 }
5461
5462 BIF_RET(ret);
5463 }
5464
get_module_info_2(BIF_ALIST_2)5465 BIF_RETTYPE get_module_info_2(BIF_ALIST_2)
5466 {
5467 Eterm ret = module_info_1(BIF_P, BIF_ARG_1, BIF_ARG_2);
5468
5469 if (is_non_value(ret)) {
5470 BIF_ERROR(BIF_P, BADARG);
5471 }
5472
5473 BIF_RET(ret);
5474 }
5475
5476 #ifdef ERTS_ENABLE_LOCK_COUNT
5477
5478 typedef struct {
5479 /* info->location_count may increase between size calculation and term
5480 * building, so we cap it at the value sampled in lcnt_build_result_vector.
5481 *
5482 * Shrinking is safe though. */
5483 int max_location_count;
5484 erts_lcnt_lock_info_t *info;
5485 } lcnt_sample_t;
5486
5487 typedef struct lcnt_sample_vector_ {
5488 lcnt_sample_t *elements;
5489 size_t size;
5490 } lcnt_sample_vector_t;
5491
lcnt_build_sample_vector(erts_lcnt_lock_info_list_t * list)5492 static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
5493 erts_lcnt_lock_info_t *iterator;
5494 lcnt_sample_vector_t result;
5495 size_t allocated_entries;
5496
5497 allocated_entries = 64;
5498 result.size = 0;
5499
5500 result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
5501 allocated_entries * sizeof(lcnt_sample_t));
5502
5503 iterator = NULL;
5504 while(erts_lcnt_iterate_list(list, &iterator)) {
5505 erts_lcnt_retain_lock_info(iterator);
5506
5507 result.elements[result.size].max_location_count = iterator->location_count;
5508 result.elements[result.size].info = iterator;
5509
5510 result.size++;
5511
5512 if(result.size >= allocated_entries) {
5513 allocated_entries *= 2;
5514
5515 result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
5516 allocated_entries * sizeof(lcnt_sample_t));
5517 }
5518 }
5519
5520 return result;
5521 }
5522
lcnt_destroy_sample_vector(lcnt_sample_vector_t * vector)5523 static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
5524 size_t i;
5525
5526 for(i = 0; i < vector->size; i++) {
5527 erts_lcnt_release_lock_info(vector->elements[i].info);
5528 }
5529
5530 erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
5531 }
5532
5533 /* The size of an integer is not guaranteed to be constant since we're walking
5534 * over live data, and may cross over into bignum territory between size calc
5535 * and the actual build. This takes care of that through always assuming the
5536 * worst, but needs to be fixed up with HRelease once the final term has been
5537 * built. */
bld_unstable_uint64(Uint ** hpp,Uint * szp,Uint64 ui)5538 static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
5539 Eterm res = THE_NON_VALUE;
5540
5541 if(szp) {
5542 *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
5543 }
5544
5545 if(hpp) {
5546 if (IS_USMALL(0, ui)) {
5547 res = make_small(ui);
5548 } else {
5549 res = erts_uint64_to_big(ui, hpp);
5550 }
5551 }
5552
5553 return res;
5554 }
5555
lcnt_build_lock_stats_term(Eterm ** hpp,Uint * szp,erts_lcnt_lock_stats_t * stats,Eterm res)5556 static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
5557 unsigned int i;
5558 const char *file;
5559
5560 Eterm af, uil;
5561 Eterm uit, uic;
5562 Eterm uits, uitns, uitn;
5563 Eterm tt, tstat, tloc, t;
5564 Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
5565
5566 /* term:
5567 * [{{file, line},
5568 {tries, colls, {seconds, nanoseconds, n_blocks}},
5569 * { .. histogram .. }] */
5570
5571 file = stats->file ? stats->file : "undefined";
5572
5573 af = erts_atom_put((byte *)file, sys_strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
5574 uil = erts_bld_uint( hpp, szp, stats->line);
5575 tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
5576
5577 uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
5578 uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
5579
5580 uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
5581 uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
5582 uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
5583 tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
5584
5585 tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
5586
5587 for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
5588 vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
5589 }
5590
5591 thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
5592
5593 t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
5594 res = erts_bld_cons( hpp, szp, t, res);
5595
5596 return res;
5597 }
5598
lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t * info)5599 static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
5600 Eterm id = info->id;
5601
5602 if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_PROCLOCK) {
5603 /* Use registered names as id's for process locks if available. Thread
5604 * progress is delayed since we may be running on a dirty scheduler. */
5605 ErtsThrPrgrDelayHandle delay_handle;
5606 Process *process;
5607
5608 delay_handle = erts_thr_progress_unmanaged_delay();
5609
5610 process = erts_proc_lookup(info->id);
5611 if (process && process->common.u.alive.reg) {
5612 id = process->common.u.alive.reg->name;
5613 }
5614
5615 erts_thr_progress_unmanaged_continue(delay_handle);
5616 } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
5617 if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
5618 const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
5619 id = erts_atom_put((byte*)name, sys_strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
5620 }
5621 }
5622
5623 return id;
5624 }
5625
lcnt_build_lock_term(Eterm ** hpp,Uint * szp,lcnt_sample_t * sample,Eterm res)5626 static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
5627 erts_lcnt_lock_info_t *info = sample->info;
5628
5629 Eterm name, type, id, stats = NIL, t;
5630 const char *lock_desc;
5631 int i;
5632
5633 /* term: [{name, id, type, stats()}] */
5634
5635 ASSERT(info->name);
5636
5637 lock_desc = erts_lock_flags_get_type_name(info->flags);
5638
5639 type = erts_atom_put((byte*)lock_desc, sys_strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
5640 name = erts_atom_put((byte*)info->name, sys_strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
5641
5642 /* Only attempt to resolve ids when actually emitting the term. This ought
5643 * to be safe since all immediates are the same size. */
5644 if(hpp != NULL) {
5645 id = lcnt_pretty_print_lock_id(info);
5646 } else {
5647 id = NIL;
5648 }
5649
5650 for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
5651 stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
5652 }
5653
5654 t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
5655 res = erts_bld_cons(hpp, szp, t, res);
5656
5657 return res;
5658 }
5659
lcnt_build_result_term(Eterm ** hpp,Uint * szp,erts_lcnt_time_t * duration,lcnt_sample_vector_t * current_locks,lcnt_sample_vector_t * deleted_locks,Eterm res)5660 static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
5661 lcnt_sample_vector_t *current_locks,
5662 lcnt_sample_vector_t *deleted_locks, Eterm res) {
5663 const char *str_duration = "duration";
5664 const char *str_locks = "locks";
5665
5666 Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
5667 size_t i;
5668
5669 /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
5670
5671 /* duration tuple */
5672 dts = bld_unstable_uint64(hpp, szp, duration->s);
5673 dtns = bld_unstable_uint64(hpp, szp, duration->ns);
5674 tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
5675
5676 adur = erts_atom_put((byte *)str_duration, sys_strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
5677 tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
5678
5679 /* lock tuple */
5680 aloc = erts_atom_put((byte *)str_locks, sys_strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
5681
5682 for(i = 0; i < current_locks->size; i++) {
5683 lloc = lcnt_build_lock_term(hpp, szp, ¤t_locks->elements[i], lloc);
5684 }
5685
5686 for(i = 0; i < deleted_locks->size; i++) {
5687 lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
5688 }
5689
5690 tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
5691
5692 res = erts_bld_cons(hpp, szp, tloc, res);
5693 res = erts_bld_cons(hpp, szp, tdur, res);
5694
5695 return res;
5696 }
5697
5698 static struct {
5699 const char *name;
5700 erts_lock_flags_t flag;
5701 } lcnt_category_map[] = {
5702 {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
5703 {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
5704 {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
5705 {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
5706 {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
5707 {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
5708 {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
5709 {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
5710 {NULL, 0}
5711 };
5712
lcnt_atom_to_lock_category(Eterm atom)5713 static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
5714 int i = 0;
5715
5716 for(i = 0; lcnt_category_map[i].name != NULL; i++) {
5717 if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
5718 return lcnt_category_map[i].flag;
5719 }
5720 }
5721
5722 return 0;
5723 }
5724
lcnt_build_category_list(Eterm ** hpp,Uint * szp,erts_lock_flags_t mask)5725 static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
5726 Eterm res;
5727 int i;
5728
5729 res = NIL;
5730
5731 for(i = 0; lcnt_category_map[i].name != NULL; i++) {
5732 if(mask & lcnt_category_map[i].flag) {
5733 Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
5734 sys_strlen(lcnt_category_map[i].name),
5735 ERTS_ATOM_ENC_UTF8, 0);
5736
5737 res = erts_bld_cons(hpp, szp, category, res);
5738 }
5739 }
5740
5741 return res;
5742 }
5743
5744 #endif
5745
erts_debug_lcnt_clear_0(BIF_ALIST_0)5746 BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
5747 {
5748 #ifndef ERTS_ENABLE_LOCK_COUNT
5749 BIF_RET(am_error);
5750 #else
5751 erts_lcnt_clear_counters();
5752
5753 BIF_RET(am_ok);
5754 #endif
5755 }
5756
erts_debug_lcnt_collect_0(BIF_ALIST_0)5757 BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
5758 {
5759 #ifndef ERTS_ENABLE_LOCK_COUNT
5760 BIF_RET(am_error);
5761 #else
5762 lcnt_sample_vector_t current_locks, deleted_locks;
5763 erts_lcnt_data_t data;
5764
5765 Eterm *term_heap_start, *term_heap_end;
5766 Uint term_heap_size = 0;
5767 Eterm result;
5768
5769 data = erts_lcnt_get_data();
5770
5771 current_locks = lcnt_build_sample_vector(data.current_locks);
5772 deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
5773
5774 lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
5775 ¤t_locks, &deleted_locks, NIL);
5776
5777 term_heap_start = HAlloc(BIF_P, term_heap_size);
5778 term_heap_end = term_heap_start;
5779
5780 result = lcnt_build_result_term(&term_heap_end, NULL,
5781 &data.duration, ¤t_locks, &deleted_locks, NIL);
5782
5783 HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
5784
5785 lcnt_destroy_sample_vector(¤t_locks);
5786 lcnt_destroy_sample_vector(&deleted_locks);
5787
5788 BIF_RET(result);
5789 #endif
5790 }
5791
erts_debug_lcnt_control_1(BIF_ALIST_1)5792 BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
5793 {
5794 #ifdef ERTS_ENABLE_LOCK_COUNT
5795 if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
5796 erts_lock_flags_t mask;
5797 Eterm *term_heap_block;
5798 Uint term_heap_size;
5799
5800 mask = erts_lcnt_get_category_mask();
5801 term_heap_size = 0;
5802
5803 lcnt_build_category_list(NULL, &term_heap_size, mask);
5804
5805 term_heap_block = HAlloc(BIF_P, term_heap_size);
5806
5807 BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
5808 } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
5809 if(erts_lcnt_get_preserve_info()) {
5810 BIF_RET(am_true);
5811 }
5812
5813 BIF_RET(am_false);
5814 }
5815 #endif
5816 BIF_ERROR(BIF_P, BADARG);
5817 }
5818
erts_debug_lcnt_control_2(BIF_ALIST_2)5819 BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
5820 {
5821 #ifdef ERTS_ENABLE_LOCK_COUNT
5822 if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
5823 erts_lock_flags_t category_mask = 0;
5824 Eterm categories = BIF_ARG_2;
5825
5826 if(!(is_list(categories) || is_nil(categories))) {
5827 BIF_ERROR(BIF_P, BADARG);
5828 }
5829
5830 while(is_list(categories)) {
5831 Eterm *cell = list_val(categories);
5832 erts_lock_flags_t category;
5833
5834 category = lcnt_atom_to_lock_category(CAR(cell));
5835
5836 if(!category) {
5837 Eterm *hp = HAlloc(BIF_P, 4);
5838
5839 BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
5840 }
5841
5842 category_mask |= category;
5843 categories = CDR(cell);
5844 }
5845
5846 erts_lcnt_set_category_mask(category_mask);
5847
5848 BIF_RET(am_ok);
5849 } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
5850 int enabled = (BIF_ARG_2 == am_true);
5851
5852 if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
5853 erts_lcnt_set_preserve_info(enabled);
5854
5855 BIF_RET(am_ok);
5856 }
5857 }
5858 #endif
5859 BIF_ERROR(BIF_P, BADARG);
5860 }
5861
os_info_init(void)5862 static void os_info_init(void)
5863 {
5864 Eterm type = erts_atom_put((byte *) os_type, sys_strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1);
5865 Eterm flav;
5866 int major, minor, build;
5867 char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
5868 Eterm* hp;
5869 Eterm tuple;
5870
5871 os_flavor(buf, 1024);
5872 flav = erts_atom_put((byte *) buf, sys_strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
5873 erts_free(ERTS_ALC_T_TMP, (void *) buf);
5874 hp = erts_alloc_global_literal(ERTS_LIT_OS_TYPE, 3);
5875 tuple = TUPLE2(hp, type, flav);
5876 erts_register_global_literal(ERTS_LIT_OS_TYPE, tuple);
5877
5878 hp = erts_alloc_global_literal(ERTS_LIT_OS_VERSION, 4);
5879 os_version(&major, &minor, &build);
5880 tuple = TUPLE3(hp,
5881 make_small(major),
5882 make_small(minor),
5883 make_small(build));
5884 erts_register_global_literal(ERTS_LIT_OS_VERSION, tuple);
5885 }
5886
5887 void
erts_bif_info_init(void)5888 erts_bif_info_init(void)
5889 {
5890 erts_atomic_init_nob(&available_internal_state, 0);
5891
5892 alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
5893 alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
5894 gather_sched_wall_time_res_trap
5895 = erts_export_put(am_erts_internal, am_gather_sched_wall_time_result, 1);
5896 gather_gc_info_res_trap
5897 = erts_export_put(am_erlang, am_gather_gc_info_result, 1);
5898 gather_io_bytes_trap
5899 = erts_export_put(am_erts_internal, am_gather_io_bytes, 2);
5900 gather_msacc_res_trap
5901 = erts_export_put(am_erts_internal, am_gather_microstate_accounting_result, 2);
5902 gather_system_check_res_trap
5903 = erts_export_put(am_erts_internal, am_gather_system_check_result, 1);
5904
5905 is_process_alive_trap = erts_export_put(am_erts_internal, am_is_process_alive, 1);
5906
5907 get_internal_state_blocked = erts_export_put(am_erts_internal,
5908 am_get_internal_state_blocked,
5909 1);
5910
5911
5912 process_info_init();
5913 os_info_init();
5914 }
5915