1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 1999-2021. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24
25 #define ERTS_WANT_MEM_MAPPERS
26 #include "sys.h"
27 #include "erl_vm.h"
28 #include "global.h"
29 #include "erl_process.h"
30 #include "error.h"
31 #include "erl_driver.h"
32 #include "erl_nif.h"
33 #include "bif.h"
34 #include "big.h"
35 #include "erl_version.h"
36 #include "erl_compile_flags.h"
37 #include "erl_db_util.h"
38 #include "erl_message.h"
39 #include "erl_binary.h"
40 #include "erl_db.h"
41 #include "erl_mtrace.h"
42 #include "dist.h"
43 #include "erl_gc.h"
44 #include "erl_cpu_topology.h"
45 #include "erl_async.h"
46 #include "erl_thr_progress.h"
47 #include "erl_bif_unique.h"
48 #include "erl_map.h"
49 #include "erl_check_io.h"
50 #define ERTS_PTAB_WANT_DEBUG_FUNCS__
51 #include "erl_ptab.h"
52 #include "erl_time.h"
53 #include "erl_proc_sig_queue.h"
54 #include "erl_alloc_util.h"
55 #ifdef HIPE
56 #include "hipe_arch.h"
57 #endif
58
59 #ifdef ERTS_ENABLE_LOCK_COUNT
60 #include "erl_lock_count.h"
61 #endif
62
63 #ifdef VALGRIND
64 #include <valgrind/valgrind.h>
65 #include <valgrind/memcheck.h>
66 #endif
67
68 static Export* alloc_info_trap = NULL;
69 static Export* alloc_sizes_trap = NULL;
70 static Export* gather_io_bytes_trap = NULL;
71
72 static Export *gather_sched_wall_time_res_trap;
73 static Export *gather_msacc_res_trap;
74 static Export *gather_gc_info_res_trap;
75 static Export *gather_system_check_res_trap;
76
77 static Export *is_process_alive_trap;
78 static Export *get_internal_state_blocked;
79
80 #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
81
82 static char otp_version[] = ERLANG_OTP_VERSION;
83 /* Keep erts_system_version as a global variable for easy access from a core */
84 static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
85 "%s"
86 " [erts-" ERLANG_VERSION "]"
87 #ifndef OTP_RELEASE
88 #ifdef ERLANG_GIT_VERSION
89 " [source-" ERLANG_GIT_VERSION "]"
90 #else
91 " [source]"
92 #endif
93 #endif
94 #ifdef ARCH_64
95 " [64-bit]"
96 #endif
97 " [smp:%beu:%beu]"
98 " [ds:%beu:%beu:%beu]"
99 #if defined(ERTS_DIRTY_SCHEDULERS_TEST)
100 " [dirty-schedulers-TEST]"
101 #endif
102 " [async-threads:%d]"
103 #ifdef HIPE
104 " [hipe]"
105 #endif
106 #ifdef ET_DEBUG
107 #if ET_DEBUG
108 " [type-assertions]"
109 #endif
110 #endif
111 #ifdef DEBUG
112 " [debug-compiled]"
113 #endif
114 #ifdef ERTS_ENABLE_LOCK_CHECK
115 " [lock-checking]"
116 #endif
117 #ifdef ERTS_ENABLE_LOCK_COUNT
118 " [lock-counting]"
119 #endif
120 #ifdef ERTS_OPCODE_COUNTER_SUPPORT
121 " [instruction-counting]"
122 #endif
123 #ifdef PURIFY
124 " [purify-compiled]"
125 #endif
126 #ifdef VALGRIND
127 " [valgrind-compiled]"
128 #endif
129 #ifdef ERTS_FRMPTR
130 " [frame-pointer]"
131 #endif
132 #ifdef USE_LTTNG
133 " [lttng]"
134 #endif
135 #ifdef USE_DTRACE
136 " [dtrace]"
137 #endif
138 #ifdef USE_SYSTEMTAP
139 " [systemtap]"
140 #endif
141 #ifdef SHCOPY
142 " [sharing-preserving]"
143 #endif
144 "\n");
145
146 #define ASIZE(a) (sizeof(a)/sizeof(a[0]))
147
148 #if defined(HAVE_SOLARIS_SPARC_PERFMON)
149 # include <sys/ioccom.h>
150 # define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
151 # define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
152 #endif
153
154 /* Cached, pre-built {OsType,OsFlavor} and {Major,Minor,Build} tuples */
155 static Eterm os_type_tuple;
156 static Eterm os_version_tuple;
157
158 static Eterm
159 current_function(Process* p, ErtsHeapFactory *hfact, Process* rp,
160 int full_info, Uint reserve_size, int flags);
161 static Eterm current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
162 Uint reserve_size);
163
164 Eterm
erts_bld_bin_list(Uint ** hpp,Uint * szp,ErlOffHeap * oh,Eterm tail)165 erts_bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh, Eterm tail)
166 {
167 union erl_off_heap_ptr u;
168 Eterm res = tail;
169 Eterm tuple;
170 struct erts_tmp_aligned_offheap tmp;
171
172 for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
173 erts_align_offheap(&u, &tmp);
174 if (u.hdr->thing_word == HEADER_PROC_BIN) {
175 Eterm val = erts_bld_uword(hpp, szp, (UWord) u.pb->val);
176 Eterm orig_size = erts_bld_uint(hpp, szp, u.pb->val->orig_size);
177
178 if (szp)
179 *szp += 4+2;
180 if (hpp) {
181 Uint refc = (Uint) erts_refc_read(&u.pb->val->intern.refc, 1);
182 tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
183 res = CONS(*hpp + 4, tuple, res);
184 *hpp += 4+2;
185 }
186 }
187 }
188 return res;
189 }
190
191 static Eterm
bld_magic_ref_bin_list(Uint ** hpp,Uint * szp,ErlOffHeap * oh)192 bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
193 {
194 struct erl_off_heap_header* ohh;
195 Eterm res = NIL;
196 Eterm tuple;
197
198 for (ohh = oh->first; ohh; ohh = ohh->next) {
199 if (is_ref_thing_header((*((Eterm *) ohh)))) {
200 ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
201 Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
202 Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
203
204 if (szp)
205 *szp += 4+2;
206 if (hpp) {
207 Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
208 tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
209 res = CONS(*hpp + 4, tuple, res);
210 *hpp += 4+2;
211 }
212 }
213 }
214 return res;
215 }
216
217
218 /*
219 make_monitor_list:
220 returns a list of records..
221 -record(erl_monitor, {
222 type, % process | port | time_offset | dist_process | resource
223 % | node | nodes | suspend
224 dir, % origin | target
225 ref, % reference or []
226 pid, % Process or nodename
227 extra % registered name, integer or []
228 }).
229 */
230
do_calc_mon_size(ErtsMonitor * mon,void * vpsz,Sint reds)231 static int do_calc_mon_size(ErtsMonitor *mon, void *vpsz, Sint reds)
232 {
233 ErtsMonitorData *mdp = erts_monitor_to_data(mon);
234 Uint *psz = vpsz;
235 *psz += is_immed(mdp->ref) ? 0 : NC_HEAP_SIZE(mdp->ref);
236
237 if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
238 *psz += erts_resource_ref_size(mon->other.ptr);
239 else
240 *psz += is_immed(mon->other.item) ? 0 : NC_HEAP_SIZE(mon->other.item);
241
242 *psz += 9; /* CONS + 6-tuple */
243 return 1;
244 }
245
246 typedef struct {
247 Process *p;
248 Eterm *hp;
249 Eterm res;
250 Eterm tag;
251 } MonListContext;
252
do_make_one_mon_element(ErtsMonitor * mon,void * vpmlc,Sint reds)253 static int do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc, Sint reds)
254 {
255 ErtsMonitorData *mdp = erts_monitor_to_data(mon);
256 MonListContext *pmlc = vpmlc;
257 Eterm tup, t, d, r, p, x;
258
259 r = is_immed(mdp->ref) ? mdp->ref : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mdp->ref);
260 if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
261 p = erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->other.ptr);
262 else
263 p = (is_immed(mon->other.item)
264 ? mon->other.item
265 : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->other.item));
266
267 if (mon->flags & ERTS_ML_FLG_NAME)
268 x = ((ErtsMonitorDataExtended *) mdp)->u.name;
269 else if (erts_monitor_is_target(mon))
270 x = NIL;
271 else if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES)
272 x = make_small(((ErtsMonitorDataExtended *) mdp)->u.refc);
273 else
274 x = NIL;
275
276 switch (mon->type) {
277 case ERTS_MON_TYPE_PROC:
278 t = am_process;
279 break;
280 case ERTS_MON_TYPE_PORT:
281 t = am_port;
282 break;
283 case ERTS_MON_TYPE_TIME_OFFSET:
284 t = am_time_offset;
285 break;
286 case ERTS_MON_TYPE_DIST_PROC: {
287 ERTS_DECL_AM(dist_process);
288 t = AM_dist_process;
289 break;
290 }
291 case ERTS_MON_TYPE_RESOURCE: {
292 ERTS_DECL_AM(resource);
293 t = AM_resource;
294 break;
295 }
296 case ERTS_MON_TYPE_NODE:
297 t = am_node;
298 break;
299 case ERTS_MON_TYPE_NODES: {
300 ERTS_DECL_AM(nodes);
301 t = AM_nodes;
302 break;
303 }
304 case ERTS_MON_TYPE_SUSPEND:
305 t = am_suspend;
306 break;
307 default:
308 ERTS_INTERNAL_ERROR("Unknown monitor type");
309 t = am_error;
310 break;
311 }
312 if (erts_monitor_is_target(mon)) {
313 ERTS_DECL_AM(target);
314 d = AM_target;
315 }
316 else {
317 ERTS_DECL_AM(origin);
318 d = AM_origin;
319 }
320 tup = TUPLE6(pmlc->hp, pmlc->tag, t, d, r, p, x);
321 pmlc->hp += 7;
322 pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
323 pmlc->hp += 2;
324 return 1;
325 }
326
327 static Eterm
make_monitor_list(Process * p,int tree,ErtsMonitor * root,Eterm tail)328 make_monitor_list(Process *p, int tree, ErtsMonitor *root, Eterm tail)
329 {
330 DECL_AM(erl_monitor);
331 Uint sz = 0;
332 MonListContext mlc;
333 void (*foreach)(ErtsMonitor *,
334 ErtsMonitorFunc,
335 void *);
336
337 foreach = tree ? erts_monitor_tree_foreach : erts_monitor_list_foreach;
338
339 (*foreach)(root, do_calc_mon_size, &sz);
340 if (sz == 0)
341 return tail;
342 mlc.p = p;
343 mlc.hp = HAlloc(p,sz);
344 mlc.res = tail;
345 mlc.tag = AM_erl_monitor;
346 (*foreach)(root, do_make_one_mon_element, &mlc);
347 return mlc.res;
348 }
349
350 /*
351 make_link_list:
352 returns a list of records..
353 -record(erl_link, {
354 type, % process | port | dist_process
355 pid, % Process or port
356 id % (address)
357 }).
358 */
359
calc_lnk_size(ErtsLink * lnk,void * vpsz,Sint reds)360 static int calc_lnk_size(ErtsLink *lnk, void *vpsz, Sint reds)
361 {
362 Uint *psz = vpsz;
363 Uint sz = 0;
364 ErtsLinkData *ldp = erts_link_to_data(lnk);
365
366 (void) erts_bld_uword(NULL, &sz, (UWord) ldp);
367
368 *psz += sz;
369 *psz += is_immed(lnk->other.item) ? 0 : size_object(lnk->other.item);
370 *psz += 7; /* CONS + 4-tuple */
371 return 1;
372 }
373
374 typedef struct {
375 Process *p;
376 Eterm *hp;
377 Eterm res;
378 Eterm tag;
379 } LnkListContext;
380
make_one_lnk_element(ErtsLink * lnk,void * vpllc,Sint reds)381 static int make_one_lnk_element(ErtsLink *lnk, void * vpllc, Sint reds)
382 {
383 LnkListContext *pllc = vpllc;
384 Eterm tup, t, pid, id;
385 ErtsLinkData *ldp = erts_link_to_data(lnk);
386
387 id = erts_bld_uword(&pllc->hp, NULL, (UWord) ldp);
388
389 if (is_immed(lnk->other.item))
390 pid = lnk->other.item;
391 else {
392 Uint sz = size_object(lnk->other.item);
393 pid = copy_struct(lnk->other.item, sz, &(pllc->hp), &MSO(pllc->p));
394 }
395
396 switch (lnk->type) {
397 case ERTS_LNK_TYPE_PROC:
398 t = am_process;
399 break;
400 case ERTS_LNK_TYPE_PORT:
401 t = am_port;
402 break;
403 case ERTS_LNK_TYPE_DIST_PROC: {
404 ERTS_DECL_AM(dist_process);
405 t = AM_dist_process;
406 break;
407 }
408 default:
409 ERTS_INTERNAL_ERROR("Unkown link type");
410 t = am_undefined;
411 break;
412 }
413
414 tup = TUPLE4(pllc->hp, pllc->tag, t, pid, id);
415 pllc->hp += 5;
416 pllc->res = CONS(pllc->hp, tup, pllc->res);
417 pllc->hp += 2;
418 return 1;
419 }
420
421 static Eterm
make_link_list(Process * p,int tree,ErtsLink * root,Eterm tail)422 make_link_list(Process *p, int tree, ErtsLink *root, Eterm tail)
423 {
424 DECL_AM(erl_link);
425 Uint sz = 0;
426 LnkListContext llc;
427 void (*foreach)(ErtsLink *,
428 ErtsLinkFunc,
429 void *);
430
431 foreach = tree ? erts_link_tree_foreach : erts_link_list_foreach;
432
433 (*foreach)(root, calc_lnk_size, (void *) &sz);
434 if (sz == 0) {
435 return tail;
436 }
437 llc.p = p;
438 llc.hp = HAlloc(p,sz);
439 llc.res = tail;
440 llc.tag = AM_erl_link;
441 (*foreach)(root, make_one_lnk_element, (void *) &llc);
442 return llc.res;
443 }
444
445 int
erts_print_system_version(fmtfn_t to,void * arg,Process * c_p)446 erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
447 {
448 int i, rc = -1;
449 char *rc_str = "";
450 char rc_buf[100];
451 char *ov = otp_version;
452 Uint total, online, active;
453 Uint dirty_cpu, dirty_cpu_onln, dirty_io;
454
455 erts_schedulers_state(&total, &online, &active,
456 &dirty_cpu, &dirty_cpu_onln, NULL,
457 &dirty_io, NULL);
458 for (i = 0; i < sizeof(otp_version)-4; i++) {
459 if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
460 rc = atoi(&ov[i+3]);
461 }
462 if (rc >= 0) {
463 if (rc == 0)
464 rc_str = " [DEVELOPMENT]";
465 else {
466 erts_snprintf(rc_buf, sizeof(rc_buf), " [RELEASE CANDIDATE %d]", rc);
467 rc_str = rc_buf;
468 }
469 }
470 return erts_print(to, arg, erts_system_version,
471 rc_str
472 , total, online
473 , dirty_cpu, dirty_cpu_onln, dirty_io
474 , erts_async_max_threads
475 );
476 }
477
478 typedef struct {
479 /* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
480 * {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
481 * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
482 * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
483 union {
484 Eterm term;
485 ErtsResource* resource;
486 }entity;
487 int named;
488 Uint16 type;
489 Eterm node;
490 /* pid is actual target being monitored, no matter pid/port or name */
491 Eterm pid;
492 } MonitorInfo;
493
494 typedef struct {
495 MonitorInfo *mi;
496 Uint mi_i;
497 Uint mi_max;
498 int sz;
499 } MonitorInfoCollection;
500
501 #define INIT_MONITOR_INFOS(MIC) do { \
502 (MIC).mi = NULL; \
503 (MIC).mi_i = (MIC).mi_max = 0; \
504 (MIC).sz = 0; \
505 } while(0)
506
507 #define MI_INC 50
508 #define EXTEND_MONITOR_INFOS(MICP) \
509 do { \
510 if ((MICP)->mi_i >= (MICP)->mi_max) { \
511 (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
512 (MICP)->mi, \
513 ((MICP)->mi_max+MI_INC) \
514 * sizeof(MonitorInfo)) \
515 : erts_alloc(ERTS_ALC_T_TMP, \
516 MI_INC*sizeof(MonitorInfo))); \
517 (MICP)->mi_max += MI_INC; \
518 } \
519 } while (0)
520 #define DESTROY_MONITOR_INFOS(MIC) \
521 do { \
522 if ((MIC).mi != NULL) { \
523 erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
524 } \
525 } while (0)
526
collect_one_link(ErtsLink * lnk,void * vmicp,Sint reds)527 static int collect_one_link(ErtsLink *lnk, void *vmicp, Sint reds)
528 {
529 MonitorInfoCollection *micp = vmicp;
530 EXTEND_MONITOR_INFOS(micp);
531 micp->mi[micp->mi_i].entity.term = lnk->other.item;
532 micp->sz += 2 + NC_HEAP_SIZE(lnk->other.item);
533 micp->mi_i++;
534 return 1;
535 }
536
collect_one_origin_monitor(ErtsMonitor * mon,void * vmicp,Sint reds)537 static int collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp, Sint reds)
538 {
539 if (erts_monitor_is_origin(mon)) {
540 MonitorInfoCollection *micp = vmicp;
541
542 EXTEND_MONITOR_INFOS(micp);
543
544 micp->mi[micp->mi_i].type = mon->type;
545
546 switch (mon->type) {
547 case ERTS_MON_TYPE_PROC:
548 case ERTS_MON_TYPE_PORT:
549 case ERTS_MON_TYPE_DIST_PROC:
550 case ERTS_MON_TYPE_TIME_OFFSET:
551 if (!(mon->flags & ERTS_ML_FLG_NAME)) {
552 micp->mi[micp->mi_i].named = 0;
553 micp->mi[micp->mi_i].entity.term = mon->other.item;
554 micp->mi[micp->mi_i].node = NIL;
555 if (is_not_atom(mon->other.item))
556 micp->sz += NC_HEAP_SIZE(mon->other.item);
557 }
558 else {
559 ErtsMonitorDataExtended *mdep;
560 micp->mi[micp->mi_i].named = !0;
561 mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
562 micp->mi[micp->mi_i].entity.term = mdep->u.name;
563 if (mdep->dist)
564 micp->mi[micp->mi_i].node = mdep->dist->nodename;
565 else
566 micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
567 micp->sz += 3; /* need one 2-tuple */
568 }
569
570 /* have always pid at hand, to assist with figuring out if its a port or
571 * a process, when we monitored by name and process_info is requested.
572 * See: erl_bif_info.c:process_info_aux section for am_monitors */
573 micp->mi[micp->mi_i].pid = mon->other.item;
574
575 micp->mi_i++;
576 micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
577 break;
578 default:
579 break;
580 }
581 }
582 return 1;
583 }
584
collect_one_target_monitor(ErtsMonitor * mon,void * vmicp,Sint reds)585 static int collect_one_target_monitor(ErtsMonitor *mon, void *vmicp, Sint reds)
586 {
587 MonitorInfoCollection *micp = vmicp;
588
589 if (erts_monitor_is_target(mon)) {
590
591 EXTEND_MONITOR_INFOS(micp);
592
593 micp->mi[micp->mi_i].type = mon->type;
594 micp->mi[micp->mi_i].named = !!(mon->flags & ERTS_ML_FLG_NAME);
595 switch (mon->type) {
596
597 case ERTS_MON_TYPE_PROC:
598 case ERTS_MON_TYPE_PORT:
599 case ERTS_MON_TYPE_DIST_PROC:
600
601 micp->mi[micp->mi_i].entity.term = mon->other.item;
602 micp->mi[micp->mi_i].node = NIL;
603 micp->sz += NC_HEAP_SIZE(mon->other.item);
604
605 micp->sz += 2; /* cons */;
606 micp->mi_i++;
607 break;
608
609 case ERTS_MON_TYPE_RESOURCE:
610
611 micp->mi[micp->mi_i].entity.resource = mon->other.ptr;
612 micp->mi[micp->mi_i].node = NIL;
613 micp->sz += erts_resource_ref_size(mon->other.ptr);
614
615 micp->sz += 2; /* cons */;
616 micp->mi_i++;
617 break;
618
619 default:
620 break;
621 }
622 }
623 return 1;
624 }
625
626 typedef struct {
627 ErtsMonitorSuspend **smi;
628 Uint smi_i;
629 Uint smi_max;
630 Uint sz;
631 } ErtsSuspendMonitorInfoCollection;
632
633 #define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \
634 (SMIC).smi = NULL; \
635 (SMIC).smi_i = (SMIC).smi_max = 0; \
636 (SMIC).sz = 0; \
637 } while(0)
638
639 #define ERTS_SMI_INC 50
640 #define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
641 do { \
642 if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
643 (SMICP)->smi = ((SMICP)->smi \
644 ? erts_realloc(ERTS_ALC_T_TMP, \
645 (SMICP)->smi, \
646 ((SMICP)->smi_max \
647 + ERTS_SMI_INC) \
648 * sizeof(ErtsMonitorSuspend *)) \
649 : erts_alloc(ERTS_ALC_T_TMP, \
650 ERTS_SMI_INC \
651 * sizeof(ErtsMonitorSuspend *))); \
652 (SMICP)->smi_max += ERTS_SMI_INC; \
653 } \
654 } while (0)
655
656 #define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
657 do { \
658 if ((SMIC).smi != NULL) { \
659 erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
660 } \
661 } while (0)
662
663 static int
collect_one_suspend_monitor(ErtsMonitor * mon,void * vsmicp,Sint reds)664 collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp, Sint reds)
665 {
666 if (mon->type == ERTS_MON_TYPE_SUSPEND) {
667 Sint count;
668 erts_aint_t mstate;
669 ErtsMonitorSuspend *msp;
670 ErtsSuspendMonitorInfoCollection *smicp;
671
672 msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
673 smicp = vsmicp;
674
675 ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
676
677 smicp->smi[smicp->smi_i] = msp;
678 smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
679
680 mstate = erts_atomic_read_nob(&msp->state);
681
682 count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
683 if (!IS_SSMALL(count))
684 smicp->sz += BIG_UINT_HEAP_SIZE;
685
686 smicp->smi_i++;
687 }
688 return 1;
689 }
690
691 /*
692 * process_info/[1,2]
693 */
694
695 /*
696 * All valid process_info arguments.
697 */
698
699 #define ERTS_PI_IX_REGISTERED_NAME 0
700 #define ERTS_PI_IX_CURRENT_FUNCTION 1
701 #define ERTS_PI_IX_INITIAL_CALL 2
702 #define ERTS_PI_IX_STATUS 3
703 #define ERTS_PI_IX_MESSAGES 4
704 #define ERTS_PI_IX_MESSAGE_QUEUE_LEN 5
705 #define ERTS_PI_IX_LINKS 6
706 #define ERTS_PI_IX_MONITORS 7
707 #define ERTS_PI_IX_MONITORED_BY 8
708 #define ERTS_PI_IX_DICTIONARY 9
709 #define ERTS_PI_IX_TRAP_EXIT 10
710 #define ERTS_PI_IX_ERROR_HANDLER 11
711 #define ERTS_PI_IX_HEAP_SIZE 12
712 #define ERTS_PI_IX_STACK_SIZE 13
713 #define ERTS_PI_IX_MEMORY 14
714 #define ERTS_PI_IX_GARBAGE_COLLECTION 15
715 #define ERTS_PI_IX_GROUP_LEADER 16
716 #define ERTS_PI_IX_REDUCTIONS 17
717 #define ERTS_PI_IX_PRIORITY 18
718 #define ERTS_PI_IX_TRACE 19
719 #define ERTS_PI_IX_BINARY 20
720 #define ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN 21
721 #define ERTS_PI_IX_CATCHLEVEL 22
722 #define ERTS_PI_IX_BACKTRACE 23
723 #define ERTS_PI_IX_LAST_CALLS 24
724 #define ERTS_PI_IX_TOTAL_HEAP_SIZE 25
725 #define ERTS_PI_IX_SUSPENDING 26
726 #define ERTS_PI_IX_MIN_HEAP_SIZE 27
727 #define ERTS_PI_IX_MIN_BIN_VHEAP_SIZE 28
728 #define ERTS_PI_IX_MAX_HEAP_SIZE 29
729 #define ERTS_PI_IX_CURRENT_LOCATION 30
730 #define ERTS_PI_IX_CURRENT_STACKTRACE 31
731 #define ERTS_PI_IX_MESSAGE_QUEUE_DATA 32
732 #define ERTS_PI_IX_GARBAGE_COLLECTION_INFO 33
733 #define ERTS_PI_IX_MAGIC_REF 34
734 #define ERTS_PI_IX_FULLSWEEP_AFTER 35
735
736 #define ERTS_PI_FLAG_SINGELTON (1 << 0)
737 #define ERTS_PI_FLAG_ALWAYS_WRAP (1 << 1)
738 #define ERTS_PI_FLAG_WANT_MSGS (1 << 2)
739 #define ERTS_PI_FLAG_NEED_MSGQ_LEN (1 << 3)
740 #define ERTS_PI_FLAG_FORCE_SIG_SEND (1 << 4)
741 #define ERTS_PI_FLAG_REQUEST_FOR_OTHER (1 << 5)
742
743 #define ERTS_PI_UNRESERVE(RS, SZ) \
744 (ASSERT((RS) >= (SZ)), (RS) -= (SZ))
745
746
747 typedef struct {
748 Eterm name;
749 Uint reserve_size;
750 int flags;
751 ErtsProcLocks locks;
752 } ErtsProcessInfoArgs;
753
754 static ErtsProcessInfoArgs pi_args[] = {
755 {am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN},
756 {am_current_function, 4, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
757 {am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN},
758 {am_status, 0, 0, 0},
759 {am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
760 {am_message_queue_len, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN, ERTS_PROC_LOCK_MAIN},
761 {am_links, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
762 {am_monitors, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
763 {am_monitored_by, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
764 {am_dictionary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
765 {am_trap_exit, 0, 0, ERTS_PROC_LOCK_MAIN},
766 {am_error_handler, 0, 0, ERTS_PROC_LOCK_MAIN},
767 {am_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
768 {am_stack_size, 0, 0, ERTS_PROC_LOCK_MAIN},
769 {am_memory, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
770 {am_garbage_collection, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + ERTS_MAX_HEAP_SIZE_MAP_SZ, 0, ERTS_PROC_LOCK_MAIN},
771 {am_group_leader, 0, 0, ERTS_PROC_LOCK_MAIN},
772 {am_reductions, 0, 0, ERTS_PROC_LOCK_MAIN},
773 {am_priority, 0, 0, 0},
774 {am_trace, 0, 0, ERTS_PROC_LOCK_MAIN},
775 {am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
776 {am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN},
777 {am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN},
778 {am_backtrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
779 {am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN},
780 {am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
781 {am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0},
782 {am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
783 {am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
784 {am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
785 {am_current_location, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
786 {am_current_stacktrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
787 {am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN},
788 {am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN},
789 {am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
790 {am_fullsweep_after, 0, 0, ERTS_PROC_LOCK_MAIN}
791 };
792
793 #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(pi_args[0])))
794
795 #ifdef DEBUG
796 # define ERTS_PI_DEF_ARR_SZ 2
797 #else
798 # define ERTS_PI_DEF_ARR_SZ ERTS_PI_ARGS
799 #endif
800
801 static ERTS_INLINE Eterm
pi_ix2arg(int ix)802 pi_ix2arg(int ix)
803 {
804 if (ix < 0 || ERTS_PI_ARGS <= ix)
805 return am_undefined;
806 return pi_args[ix].name;
807 }
808
809 static ERTS_INLINE int
pi_ix2flags(int ix)810 pi_ix2flags(int ix)
811 {
812 if (ix < 0 || ERTS_PI_ARGS <= ix)
813 return 0;
814 return pi_args[ix].flags;
815 }
816
817 static ERTS_INLINE Uint
pi_ix2rsz(int ix)818 pi_ix2rsz(int ix)
819 {
820 if (ix < 0 || ERTS_PI_ARGS <= ix)
821 return 0;
822 return pi_args[ix].reserve_size;
823 }
824
825 static ERTS_INLINE ErtsProcLocks
pi_ix2locks(int ix)826 pi_ix2locks(int ix)
827 {
828 if (ix < 0 || ERTS_PI_ARGS <= ix)
829 return 0;
830 return pi_args[ix].locks;
831 }
832
833 static ERTS_INLINE int
pi_arg2ix(Eterm arg)834 pi_arg2ix(Eterm arg)
835 {
836 switch (arg) {
837 case am_registered_name:
838 return ERTS_PI_IX_REGISTERED_NAME;
839 case am_current_function:
840 return ERTS_PI_IX_CURRENT_FUNCTION;
841 case am_initial_call:
842 return ERTS_PI_IX_INITIAL_CALL;
843 case am_status:
844 return ERTS_PI_IX_STATUS;
845 case am_messages:
846 return ERTS_PI_IX_MESSAGES;
847 case am_message_queue_len:
848 return ERTS_PI_IX_MESSAGE_QUEUE_LEN;
849 case am_links:
850 return ERTS_PI_IX_LINKS;
851 case am_monitors:
852 return ERTS_PI_IX_MONITORS;
853 case am_monitored_by:
854 return ERTS_PI_IX_MONITORED_BY;
855 case am_dictionary:
856 return ERTS_PI_IX_DICTIONARY;
857 case am_trap_exit:
858 return ERTS_PI_IX_TRAP_EXIT;
859 case am_error_handler:
860 return ERTS_PI_IX_ERROR_HANDLER;
861 case am_heap_size:
862 return ERTS_PI_IX_HEAP_SIZE;
863 case am_stack_size:
864 return ERTS_PI_IX_STACK_SIZE;
865 case am_memory:
866 return ERTS_PI_IX_MEMORY;
867 case am_garbage_collection:
868 return ERTS_PI_IX_GARBAGE_COLLECTION;
869 case am_group_leader:
870 return ERTS_PI_IX_GROUP_LEADER;
871 case am_reductions:
872 return ERTS_PI_IX_REDUCTIONS;
873 case am_priority:
874 return ERTS_PI_IX_PRIORITY;
875 case am_trace:
876 return ERTS_PI_IX_TRACE;
877 case am_binary:
878 return ERTS_PI_IX_BINARY;
879 case am_sequential_trace_token:
880 return ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN;
881 case am_catchlevel:
882 return ERTS_PI_IX_CATCHLEVEL;
883 case am_backtrace:
884 return ERTS_PI_IX_BACKTRACE;
885 case am_last_calls:
886 return ERTS_PI_IX_LAST_CALLS;
887 case am_total_heap_size:
888 return ERTS_PI_IX_TOTAL_HEAP_SIZE;
889 case am_suspending:
890 return ERTS_PI_IX_SUSPENDING;
891 case am_min_heap_size:
892 return ERTS_PI_IX_MIN_HEAP_SIZE;
893 case am_min_bin_vheap_size:
894 return ERTS_PI_IX_MIN_BIN_VHEAP_SIZE;
895 case am_max_heap_size:
896 return ERTS_PI_IX_MAX_HEAP_SIZE;
897 case am_current_location:
898 return ERTS_PI_IX_CURRENT_LOCATION;
899 case am_current_stacktrace:
900 return ERTS_PI_IX_CURRENT_STACKTRACE;
901 case am_message_queue_data:
902 return ERTS_PI_IX_MESSAGE_QUEUE_DATA;
903 case am_garbage_collection_info:
904 return ERTS_PI_IX_GARBAGE_COLLECTION_INFO;
905 case am_magic_ref:
906 return ERTS_PI_IX_MAGIC_REF;
907 case am_fullsweep_after:
908 return ERTS_PI_IX_FULLSWEEP_AFTER;
909 default:
910 return -1;
911 }
912 }
913
914 static Eterm pi_1_keys[] = {
915 am_registered_name,
916 am_current_function,
917 am_initial_call,
918 am_status,
919 am_message_queue_len,
920 am_links,
921 am_dictionary,
922 am_trap_exit,
923 am_error_handler,
924 am_priority,
925 am_group_leader,
926 am_total_heap_size,
927 am_heap_size,
928 am_stack_size,
929 am_reductions,
930 am_garbage_collection,
931 am_suspending
932 };
933
934 #define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
935
936 static Eterm pi_1_keys_list;
937 static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
938
939 static void
process_info_init(void)940 process_info_init(void)
941 {
942 Eterm *hp = &pi_1_keys_list_heap[0];
943 int i;
944
945 pi_1_keys_list = NIL;
946
947 for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
948 pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
949 hp += 2;
950 }
951
952 #ifdef DEBUG
953 { /* Make sure the process_info argument mappings are consistent */
954 int ix;
955 for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
956 ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
957 }
958 }
959 #endif
960
961 }
962
963 static BIF_RETTYPE
964 process_info_aux(Process *c_p,
965 ErtsHeapFactory *hfact,
966 Process *rp,
967 ErtsProcLocks rp_locks,
968 int item_ix,
969 int flags,
970 Uint *reserve_sizep,
971 Uint *reds);
972
973 Eterm
erts_process_info(Process * c_p,ErtsHeapFactory * hfact,Process * rp,ErtsProcLocks rp_locks,int * item_ix,int item_ix_len,int flags,Uint reserve_size,Uint * reds)974 erts_process_info(Process *c_p,
975 ErtsHeapFactory *hfact,
976 Process *rp,
977 ErtsProcLocks rp_locks,
978 int *item_ix,
979 int item_ix_len,
980 int flags,
981 Uint reserve_size,
982 Uint *reds)
983 {
984 Eterm res;
985 Eterm part_res[ERTS_PI_ARGS];
986 int item_ix_ix, ix;
987
988 if (ERTS_PI_FLAG_SINGELTON & flags) {
989 ASSERT(item_ix_len == 1);
990 res = process_info_aux(c_p, hfact, rp, rp_locks, item_ix[0],
991 flags, &reserve_size, reds);
992 return res;
993 }
994
995 for (ix = 0; ix < ERTS_PI_ARGS; ix++)
996 part_res[ix] = THE_NON_VALUE;
997
998 /*
999 * We always handle 'messages' first if it should be part
1000 * of the result. This since if both 'messages' and
1001 * 'message_queue_len' are wanted, 'messages' may
1002 * change the result of 'message_queue_len' (in case
1003 * the queue contain bad distribution messages).
1004 */
1005 if (flags & ERTS_PI_FLAG_WANT_MSGS) {
1006 ix = pi_arg2ix(am_messages);
1007 ASSERT(part_res[ix] == THE_NON_VALUE);
1008 res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
1009 flags, &reserve_size, reds);
1010 ASSERT(res != am_undefined);
1011 ASSERT(res != THE_NON_VALUE);
1012 part_res[ix] = res;
1013 }
1014
1015 for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
1016 ix = item_ix[item_ix_ix];
1017 if (part_res[ix] == THE_NON_VALUE) {
1018 res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
1019 flags, &reserve_size, reds);
1020 ASSERT(res != am_undefined);
1021 ASSERT(res != THE_NON_VALUE);
1022 part_res[ix] = res;
1023 }
1024 }
1025
1026 res = NIL;
1027
1028 for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
1029 ix = item_ix[item_ix_ix];
1030 ASSERT(part_res[ix] != THE_NON_VALUE);
1031 /*
1032 * If we should ignore the value of registered_name,
1033 * its value is nil. For more info, see comment in the
1034 * beginning of process_info_aux().
1035 */
1036 if (is_nil(part_res[ix])) {
1037 ASSERT(!(flags & ERTS_PI_FLAG_ALWAYS_WRAP));
1038 ASSERT(pi_ix2arg(ix) == am_registered_name);
1039 }
1040 else {
1041 Eterm *hp;
1042 ERTS_PI_UNRESERVE(reserve_size, 2);
1043 hp = erts_produce_heap(hfact, 2, reserve_size);
1044 res = CONS(hp, part_res[ix], res);
1045 }
1046 }
1047
1048 return res;
1049 }
1050
1051 static void
1052 pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix);
1053
1054 static BIF_RETTYPE
process_info_bif(Process * c_p,Eterm pid,Eterm opt,int always_wrap,int pi2)1055 process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
1056 {
1057 ErtsHeapFactory hfact;
1058 int def_arr[ERTS_PI_DEF_ARR_SZ];
1059 int *item_ix = &def_arr[0];
1060 Process *rp = NULL;
1061 erts_aint32_t state;
1062 BIF_RETTYPE ret;
1063 Uint reds = 0;
1064 ErtsProcLocks locks = 0;
1065 int flags;
1066 Uint reserve_size;
1067 int len;
1068 Eterm res;
1069
1070 ERTS_CT_ASSERT(ERTS_PI_DEF_ARR_SZ > 0);
1071
1072 if (c_p->common.id == pid) {
1073 int local_only = c_p->sig_qs.flags & FS_LOCAL_SIGS_ONLY;
1074 int sres, sreds, reds_left;
1075
1076 reds_left = ERTS_BIF_REDS_LEFT(c_p);
1077 sreds = reds_left;
1078
1079 if (!local_only) {
1080 erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
1081 erts_proc_sig_fetch(c_p);
1082 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
1083 }
1084
1085 sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
1086
1087 BUMP_REDS(c_p, (int) sreds);
1088 reds_left -= sreds;
1089
1090 if (state & ERTS_PSFLG_EXITING) {
1091 c_p->sig_qs.flags &= ~FS_LOCAL_SIGS_ONLY;
1092 goto exited;
1093 }
1094 if (!sres | (reds_left <= 0)) {
1095 /*
1096 * More signals to handle or out of reds; need
1097 * to yield and continue. Prevent fetching of
1098 * more signals by setting local-sigs-only flag.
1099 */
1100 c_p->sig_qs.flags |= FS_LOCAL_SIGS_ONLY;
1101 goto yield;
1102 }
1103
1104 c_p->sig_qs.flags &= ~FS_LOCAL_SIGS_ONLY;
1105 }
1106
1107 if (is_atom(opt)) {
1108 int ix = pi_arg2ix(opt);
1109 item_ix[0] = ix;
1110 len = 1;
1111 locks = pi_ix2locks(ix);
1112 reserve_size = 3 + pi_ix2rsz(ix);
1113 flags = ERTS_PI_FLAG_SINGELTON;
1114 flags |= pi_ix2flags(ix);
1115 if (ix < 0)
1116 goto badarg;
1117 }
1118 else {
1119 Eterm list = opt;
1120 Uint size = ERTS_PI_DEF_ARR_SZ;
1121
1122 len = 0;
1123 reserve_size = 0;
1124 locks = 0;
1125 flags = 0;
1126
1127 while (is_list(list)) {
1128 Eterm *consp = list_val(list);
1129 Eterm arg = CAR(consp);
1130 int ix = pi_arg2ix(arg);
1131 if (ix < 0)
1132 goto badarg;
1133
1134 if (len >= size)
1135 pi_setup_grow(&item_ix, def_arr, &size, len);
1136
1137 item_ix[len++] = ix;
1138
1139 locks |= pi_ix2locks(ix);
1140 flags |= pi_ix2flags(ix);
1141 reserve_size += pi_ix2rsz(ix);
1142 reserve_size += 3; /* 2-tuple */
1143 reserve_size += 2; /* cons */
1144
1145 list = CDR(consp);
1146 }
1147
1148 if (is_not_nil(list))
1149 goto badarg;
1150 }
1151
1152 if (is_not_internal_pid(pid)) {
1153 if (is_external_pid(pid)
1154 && external_pid_dist_entry(pid) == erts_this_dist_entry)
1155 goto undefined;
1156 goto badarg;
1157 }
1158
1159 if (always_wrap)
1160 flags |= ERTS_PI_FLAG_ALWAYS_WRAP;
1161
1162 if (c_p->common.id == pid) {
1163 rp = c_p;
1164 if (locks & ~ERTS_PROC_LOCK_MAIN)
1165 erts_proc_lock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
1166 locks |= ERTS_PROC_LOCK_MAIN;
1167 }
1168 else {
1169 if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
1170 goto send_signal;
1171 state = ERTS_PSFLG_RUNNING; /* fail state... */
1172 rp = erts_try_lock_sig_free_proc(pid, locks, &state);
1173 if (!rp)
1174 goto undefined;
1175 if (rp == ERTS_PROC_LOCK_BUSY) {
1176 rp = NULL;
1177 goto send_signal;
1178 }
1179 if (state & ERTS_PSFLG_EXITING) {
1180 if (locks)
1181 erts_proc_unlock(rp, locks);
1182 locks = 0;
1183 /* wait for it to terminate properly... */
1184 goto send_signal;
1185 }
1186 if (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN) {
1187 ASSERT(locks & ERTS_PROC_LOCK_MAIN);
1188 erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
1189 erts_proc_sig_fetch(rp);
1190 if (c_p->sig_qs.cont) {
1191 erts_proc_unlock(rp, locks|ERTS_PROC_LOCK_MSGQ);
1192 locks = 0;
1193 goto send_signal;
1194 }
1195 erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
1196 }
1197 }
1198
1199 erts_factory_proc_init(&hfact, c_p);
1200
1201 res = erts_process_info(c_p, &hfact, rp, locks, item_ix, len,
1202 flags, reserve_size, &reds);
1203
1204 erts_factory_close(&hfact);
1205
1206 if (reds > INT_MAX/2)
1207 reds = INT_MAX/2;
1208 BUMP_REDS(c_p, (int) reds);
1209
1210 state = erts_atomic32_read_acqb(&rp->state);
1211 if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)) {
1212 if (state & ERTS_PSFLG_FREE) {
1213 ASSERT(!locks);
1214 goto undefined;
1215 }
1216 if (locks)
1217 erts_proc_unlock(rp, locks);
1218 locks = 0;
1219 /* wait for it to terminate properly... */
1220 goto send_signal;
1221 }
1222
1223 if (c_p == rp || !ERTS_PROC_HAS_INCOMING_SIGNALS(c_p))
1224 ERTS_BIF_PREP_RET(ret, res);
1225 else
1226 ERTS_BIF_PREP_HANDLE_SIGNALS_RETURN(ret, c_p, res);
1227
1228 done:
1229
1230 if (c_p == rp)
1231 locks &= ~ERTS_PROC_LOCK_MAIN;
1232
1233 if (locks && rp)
1234 erts_proc_unlock(rp, locks);
1235
1236 if (item_ix != def_arr)
1237 erts_free(ERTS_ALC_T_TMP, item_ix);
1238
1239 return ret;
1240
1241 badarg:
1242 ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
1243 goto done;
1244
1245 undefined:
1246 ERTS_BIF_PREP_RET(ret, am_undefined);
1247 goto done;
1248
1249 exited:
1250 ERTS_BIF_PREP_EXITED(ret, c_p);
1251 goto done;
1252
1253 yield:
1254 if (pi2)
1255 ERTS_BIF_PREP_YIELD2(ret, bif_export[BIF_process_info_2], c_p, pid, opt);
1256 else
1257 ERTS_BIF_PREP_YIELD1(ret, bif_export[BIF_process_info_1], c_p, pid);
1258 goto done;
1259
1260 send_signal: {
1261 Eterm ref = erts_make_ref(c_p);
1262 int enqueued, need_msgq_len;
1263 flags |= ERTS_PI_FLAG_REQUEST_FOR_OTHER;
1264 need_msgq_len = (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1265 /*
1266 * Set receive mark so we wont have to scan the whole
1267 * message queue for the result. Note caller unconditionally
1268 * has to enter a receive only matching messages containing
1269 * 'ref', or restore save pointer.
1270 */
1271 ERTS_RECV_MARK_SAVE(c_p);
1272 ERTS_RECV_MARK_SET(c_p);
1273 enqueued = erts_proc_sig_send_process_info_request(c_p, pid, item_ix,
1274 len, need_msgq_len,
1275 flags, reserve_size,
1276 ref);
1277 if (!enqueued) {
1278 /* Restore save pointer... */
1279 JOIN_MESSAGE(c_p);
1280 goto undefined;
1281 }
1282 ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, ref);
1283 goto done;
1284 }
1285 }
1286
1287 static void
pi_setup_grow(int ** arr,int * def_arr,Uint * sz,int ix)1288 pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix)
1289 {
1290 *sz = (ix+1) + ERTS_PI_DEF_ARR_SZ;
1291 if (*arr != def_arr)
1292 *arr = erts_realloc(ERTS_ALC_T_TMP, *arr, (*sz)*sizeof(int));
1293 else {
1294 int *new_arr = erts_alloc(ERTS_ALC_T_TMP, (*sz)*sizeof(int));
1295 sys_memcpy((void *) new_arr, (void *) def_arr,
1296 sizeof(int)*ERTS_PI_DEF_ARR_SZ);
1297 *arr = new_arr;
1298 }
1299 }
1300
1301
process_info_2(BIF_ALIST_2)1302 BIF_RETTYPE process_info_2(BIF_ALIST_2)
1303 {
1304 return process_info_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, !is_atom(BIF_ARG_2), !0);
1305 }
1306
process_info_1(BIF_ALIST_1)1307 BIF_RETTYPE process_info_1(BIF_ALIST_1)
1308 {
1309 return process_info_bif(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, 0);
1310 }
1311
1312 Eterm
process_info_aux(Process * c_p,ErtsHeapFactory * hfact,Process * rp,ErtsProcLocks rp_locks,int item_ix,int flags,Uint * reserve_sizep,Uint * reds)1313 process_info_aux(Process *c_p,
1314 ErtsHeapFactory *hfact,
1315 Process *rp,
1316 ErtsProcLocks rp_locks,
1317 int item_ix,
1318 int flags,
1319 Uint *reserve_sizep,
1320 Uint *reds)
1321 {
1322 Eterm *hp;
1323 Eterm res = NIL;
1324 Uint reserved;
1325 Uint reserve_size = *reserve_sizep;
1326
1327 #ifdef ERTS_ENABLE_LOCK_CHECK
1328 ErtsProcLocks locks = erts_proc_lc_my_proc_locks(rp);
1329
1330 switch (item_ix) {
1331 case ERTS_PI_IX_STATUS:
1332 case ERTS_PI_IX_PRIORITY:
1333 case ERTS_PI_IX_SUSPENDING:
1334 ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCK_MAIN) == 0);
1335 break;
1336 default:
1337 ERTS_LC_ASSERT(locks == ERTS_PROC_LOCK_MAIN);
1338 break;
1339 }
1340 #endif
1341
1342 reserved = pi_ix2rsz(item_ix);
1343 ERTS_PI_UNRESERVE(reserve_size, reserved);
1344
1345 (*reds)++;
1346
1347 ASSERT(rp);
1348
1349 /*
1350 * Q: Why this ERTS_PI_FLAG_ALWAYS_WRAP flag?
1351 *
1352 * A: registered_name is strange. If process has no registered name,
1353 * process_info(Pid, registered_name) returns [], and
1354 * the result of process_info(Pid) has no {registered_name, Name}
1355 * tuple in the resulting list. This is inconsistent with all other
1356 * options, but we do not dare to change it.
1357 *
1358 * When process_info/2 is called with a list as second argument,
1359 * registered_name behaves as it should, i.e. a
1360 * {registered_name, []} will appear in the resulting list.
1361 *
1362 * If ERTS_PI_FLAG_ALWAYS_WRAP is set, process_info_aux() always
1363 * wrap the result in a key two tuple.
1364 */
1365
1366 switch (item_ix) {
1367
1368 case ERTS_PI_IX_REGISTERED_NAME:
1369 if (rp->common.u.alive.reg)
1370 res = rp->common.u.alive.reg->name;
1371 else {
1372 if (flags & ERTS_PI_FLAG_ALWAYS_WRAP)
1373 res = NIL;
1374 else
1375 return NIL;
1376 }
1377 break;
1378
1379 case ERTS_PI_IX_CURRENT_FUNCTION:
1380 res = current_function(c_p, hfact, rp, 0,
1381 reserve_size, flags);
1382 break;
1383
1384 case ERTS_PI_IX_CURRENT_LOCATION:
1385 res = current_function(c_p, hfact, rp, 1,
1386 reserve_size, flags);
1387 break;
1388
1389 case ERTS_PI_IX_CURRENT_STACKTRACE:
1390 res = current_stacktrace(hfact, rp, reserve_size);
1391 break;
1392
1393 case ERTS_PI_IX_INITIAL_CALL:
1394 hp = erts_produce_heap(hfact, 4, reserve_size);
1395 res = TUPLE3(hp,
1396 rp->u.initial.module,
1397 rp->u.initial.function,
1398 make_small(rp->u.initial.arity));
1399 hp += 4;
1400 break;
1401
1402 case ERTS_PI_IX_STATUS: {
1403 erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
1404 res = erts_process_state2status(state);
1405 if (res == am_running && (state & ERTS_PSFLG_RUNNING_SYS)) {
1406 ASSERT(c_p == rp);
1407 ASSERT(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
1408 if (!(state & (ERTS_PSFLG_ACTIVE
1409 | ERTS_PSFLG_SIG_Q
1410 | ERTS_PSFLG_SIG_IN_Q))) {
1411 int sys_tasks = 0;
1412 if (state & ERTS_PSFLG_SYS_TASKS)
1413 sys_tasks = erts_have_non_prio_elev_sys_tasks(rp,
1414 rp_locks);
1415 if (!sys_tasks) {
1416 /*
1417 * We are servicing a process-info request from
1418 * another process. If that other process could
1419 * have inspected our state itself, we would have
1420 * been in the 'waiting' state.
1421 */
1422 res = am_waiting;
1423 }
1424 }
1425 }
1426 break;
1427 }
1428
1429 case ERTS_PI_IX_MESSAGES: {
1430 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1431 if (rp->sig_qs.len == 0 || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE))
1432 res = NIL;
1433 else {
1434 int info_on_self = !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
1435 ErtsMessageInfo *mip;
1436 Sint i, len;
1437 Uint heap_need;
1438
1439 mip = erts_alloc(ERTS_ALC_T_TMP,
1440 rp->sig_qs.len*sizeof(ErtsMessageInfo));
1441
1442 /*
1443 * Note that message queue may shrink when calling
1444 * erts_proc_sig_prep_msgq_for_inspection() since it removes
1445 * corrupt distribution messages.
1446 */
1447 heap_need = erts_proc_sig_prep_msgq_for_inspection(c_p, rp,
1448 rp_locks,
1449 info_on_self,
1450 mip);
1451 len = rp->sig_qs.len;
1452
1453 heap_need += len*2; /* Cons cells */
1454
1455 reserve_size += heap_need;
1456
1457 /* Build list of messages... */
1458 for (i = len - 1, res = NIL; i >= 0; i--) {
1459 Eterm msg = ERL_MESSAGE_TERM(mip[i].msgp);
1460 Uint sz = mip[i].size;
1461
1462 ERTS_PI_UNRESERVE(reserve_size, sz+2);
1463 hp = erts_produce_heap(hfact, sz+2, reserve_size);
1464
1465 if (sz != 0)
1466 msg = copy_struct(msg, sz, &hp, hfact->off_heap);
1467
1468 res = CONS(hp, msg, res);
1469 hp += 2;
1470 }
1471
1472 *reds += (Uint) len / 4;
1473
1474 erts_free(ERTS_ALC_T_TMP, mip);
1475 }
1476 break;
1477 }
1478
1479 case ERTS_PI_IX_MESSAGE_QUEUE_LEN: {
1480 Sint len = rp->sig_qs.len;
1481 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1482 ASSERT(len >= 0);
1483 if (len <= MAX_SMALL)
1484 res = make_small(len);
1485 else {
1486 hp = erts_produce_heap(hfact, BIG_UINT_HEAP_SIZE, reserve_size);
1487 res = uint_to_big((Uint) len, hp);
1488 }
1489 break;
1490 }
1491
1492 case ERTS_PI_IX_LINKS: {
1493 MonitorInfoCollection mic;
1494 int i;
1495 Eterm item;
1496
1497 INIT_MONITOR_INFOS(mic);
1498
1499 erts_link_tree_foreach(ERTS_P_LINKS(rp), collect_one_link, (void *) &mic);
1500
1501 reserve_size += mic.sz;
1502 res = NIL;
1503 for (i = 0; i < mic.mi_i; i++) {
1504 Eterm item_src = mic.mi[i].entity.term;
1505 Uint sz = NC_HEAP_SIZE(item_src) + 2;
1506 ERTS_PI_UNRESERVE(reserve_size, sz);
1507 hp = erts_produce_heap(hfact, sz, reserve_size);
1508 item = STORE_NC(&hp, hfact->off_heap, item_src);
1509 res = CONS(hp, item, res);
1510 }
1511
1512 *reds += (Uint) mic.mi_i / 4;
1513
1514 DESTROY_MONITOR_INFOS(mic);
1515 break;
1516 }
1517
1518 case ERTS_PI_IX_MONITORS: {
1519 MonitorInfoCollection mic;
1520 int i;
1521
1522 INIT_MONITOR_INFOS(mic);
1523 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1524 collect_one_origin_monitor,
1525 (void *) &mic);
1526
1527 reserve_size += mic.sz;
1528 res = NIL;
1529 for (i = 0; i < mic.mi_i; i++) {
1530 if (mic.mi[i].named) {
1531 /* Monitor by name.
1532 * Build {process|port, {Name, Node}} and cons it.
1533 */
1534 Eterm t1, t2;
1535 /* If pid is an atom, then it is a remote named monitor, which
1536 has to be a process */
1537 Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
1538 ASSERT(is_pid(mic.mi[i].pid)
1539 || is_port(mic.mi[i].pid)
1540 || is_atom(mic.mi[i].pid));
1541
1542 ERTS_PI_UNRESERVE(reserve_size, 3+3+2);
1543 hp = erts_produce_heap(hfact, 3+3+2, reserve_size);
1544
1545 t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
1546 hp += 3;
1547 t2 = TUPLE2(hp, m_type, t1);
1548 hp += 3;
1549 res = CONS(hp, t2, res);
1550 }
1551 else {
1552 /* Build {process|port|time_offset, Pid|clock_service} and cons it. */
1553 Eterm t;
1554 Eterm pid;
1555 Eterm m_type;
1556 Eterm pid_src = mic.mi[i].entity.term;
1557 Uint sz = is_atom(pid_src) ? 0 : NC_HEAP_SIZE(pid_src);
1558 sz += 3 + 2;
1559
1560 ERTS_PI_UNRESERVE(reserve_size, sz);
1561 hp = erts_produce_heap(hfact, sz, reserve_size);
1562
1563 pid = (is_atom(pid_src)
1564 ? pid_src
1565 : STORE_NC(&hp, hfact->off_heap, pid_src));
1566
1567 switch (mic.mi[i].type) {
1568 case ERTS_MON_TYPE_PORT:
1569 m_type = am_port;
1570 break;
1571 case ERTS_MON_TYPE_TIME_OFFSET:
1572 m_type = am_time_offset;
1573 break;
1574 default:
1575 m_type = am_process;
1576 break;
1577 }
1578
1579 ASSERT(is_pid(mic.mi[i].pid)
1580 || is_port(mic.mi[i].pid));
1581
1582 t = TUPLE2(hp, m_type, pid);
1583 hp += 3;
1584 res = CONS(hp, t, res);
1585 }
1586 }
1587
1588 *reds += (Uint) mic.mi_i / 4;
1589
1590 DESTROY_MONITOR_INFOS(mic);
1591 break;
1592 }
1593
1594 case ERTS_PI_IX_MONITORED_BY: {
1595 MonitorInfoCollection mic;
1596 int i;
1597 Eterm item;
1598
1599 INIT_MONITOR_INFOS(mic);
1600 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(rp),
1601 collect_one_target_monitor,
1602 (void *) &mic);
1603 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1604 collect_one_target_monitor,
1605 (void *) &mic);
1606
1607 reserve_size += mic.sz;
1608
1609 res = NIL;
1610 for (i = 0; i < mic.mi_i; ++i) {
1611 Uint sz = 2;
1612
1613 if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
1614 sz += erts_resource_ref_size(mic.mi[i].entity.resource);
1615 else
1616 sz += NC_HEAP_SIZE(mic.mi[i].entity.term);
1617
1618 ERTS_PI_UNRESERVE(reserve_size, sz);
1619 hp = erts_produce_heap(hfact, sz, reserve_size);
1620
1621 if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
1622 item = erts_bld_resource_ref(&hp,
1623 hfact->off_heap,
1624 mic.mi[i].entity.resource);
1625 else
1626 item = STORE_NC(&hp,
1627 hfact->off_heap,
1628 mic.mi[i].entity.term);
1629 res = CONS(hp, item, res);
1630 }
1631
1632 *reds += (Uint) mic.mi_i / 4;
1633
1634 DESTROY_MONITOR_INFOS(mic);
1635 break;
1636 }
1637
1638 case ERTS_PI_IX_SUSPENDING: {
1639 ErtsSuspendMonitorInfoCollection smic;
1640 int i;
1641
1642 ERTS_INIT_SUSPEND_MONITOR_INFOS(smic);
1643
1644 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1645 collect_one_suspend_monitor,
1646 (void *) &smic);
1647
1648 reserve_size += smic.sz;
1649
1650 res = NIL;
1651 for (i = 0; i < smic.smi_i; i++) {
1652 ErtsMonitorSuspend *msp;
1653 erts_aint_t mstate;
1654 Sint ci;
1655 Eterm ct, active, pending, item;
1656 Uint sz = 4 + 2;
1657
1658 msp = smic.smi[i];
1659 mstate = erts_atomic_read_nob(&msp->state);
1660
1661 ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
1662 if (!IS_SSMALL(ci))
1663 sz += BIG_UINT_HEAP_SIZE;
1664
1665 ERTS_PI_UNRESERVE(reserve_size, sz);
1666 hp = erts_produce_heap(hfact, sz, reserve_size);
1667
1668 if (IS_SSMALL(ci))
1669 ct = make_small(ci);
1670 else {
1671 ct = small_to_big(ci, hp);
1672 hp += BIG_UINT_HEAP_SIZE;
1673 }
1674
1675 if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) {
1676 active = ct;
1677 pending = make_small(0);
1678 }
1679 else {
1680 active = make_small(0);
1681 pending = ct;
1682 }
1683
1684 ASSERT(is_internal_pid(msp->md.origin.other.item));
1685
1686 item = TUPLE3(hp, msp->md.origin.other.item, active, pending);
1687 hp += 4;
1688 res = CONS(hp, item, res);
1689 }
1690
1691 *reds += (Uint) smic.smi_i / 4;
1692
1693 ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
1694
1695 break;
1696 }
1697
1698 case ERTS_PI_IX_DICTIONARY:
1699 if (!rp->dictionary || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE)) {
1700 res = NIL;
1701 } else {
1702 Uint num = rp->dictionary->numElements;
1703 res = erts_dictionary_copy(hfact, rp->dictionary, reserve_size);
1704 *reds += (Uint) num / 4;
1705 }
1706
1707 break;
1708
1709 case ERTS_PI_IX_TRAP_EXIT:
1710 res = (rp->flags & F_TRAP_EXIT) ? am_true : am_false;
1711 break;
1712
1713 case ERTS_PI_IX_ERROR_HANDLER:
1714 res = erts_proc_get_error_handler(rp);
1715 break;
1716
1717 case ERTS_PI_IX_HEAP_SIZE: {
1718 Uint hsz = 0;
1719 (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
1720 hp = erts_produce_heap(hfact, hsz, reserve_size);
1721 res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
1722 break;
1723 }
1724
1725 case ERTS_PI_IX_FULLSWEEP_AFTER: {
1726 Uint hsz = 0;
1727 (void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
1728 hp = erts_produce_heap(hfact, hsz, reserve_size);
1729 res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
1730 break;
1731 }
1732
1733 case ERTS_PI_IX_MIN_HEAP_SIZE: {
1734 Uint hsz = 0;
1735 (void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
1736 hp = erts_produce_heap(hfact, hsz, reserve_size);
1737 res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
1738 break;
1739 }
1740
1741 case ERTS_PI_IX_MIN_BIN_VHEAP_SIZE: {
1742 Uint hsz = 0;
1743 (void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
1744 hp = erts_produce_heap(hfact, hsz, reserve_size);
1745 res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
1746 break;
1747 }
1748
1749 case ERTS_PI_IX_MAX_HEAP_SIZE: {
1750 Uint hsz = 0;
1751 (void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
1752 MAX_HEAP_SIZE_FLAGS_GET(rp),
1753 NULL, &hsz);
1754 hp = erts_produce_heap(hfact, hsz, reserve_size);
1755 res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
1756 MAX_HEAP_SIZE_FLAGS_GET(rp),
1757 &hp, NULL);
1758 break;
1759 }
1760
1761 case ERTS_PI_IX_TOTAL_HEAP_SIZE: {
1762 Uint total_heap_size;
1763 Uint hsz = 0;
1764
1765 total_heap_size = rp->heap_sz;
1766 if (rp->old_hend && rp->old_heap)
1767 total_heap_size += rp->old_hend - rp->old_heap;
1768
1769 total_heap_size += rp->mbuf_sz;
1770
1771 if (rp->sig_qs.flags & FS_ON_HEAP_MSGQ) {
1772 ErtsMessage *mp;
1773 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1774 for (mp = rp->sig_qs.first; mp; mp = mp->next) {
1775 ASSERT(ERTS_SIG_IS_MSG(mp));
1776 if (mp->data.attached)
1777 total_heap_size += erts_msg_attached_data_size(mp);
1778 }
1779 *reds += (Uint) rp->sig_qs.len / 4;
1780 }
1781
1782 (void) erts_bld_uint(NULL, &hsz, total_heap_size);
1783 hp = erts_produce_heap(hfact, hsz, reserve_size);
1784 res = erts_bld_uint(&hp, NULL, total_heap_size);
1785 break;
1786 }
1787
1788 case ERTS_PI_IX_STACK_SIZE: {
1789 Uint stack_size = STACK_START(rp) - rp->stop;
1790 Uint hsz = 0;
1791 (void) erts_bld_uint(NULL, &hsz, stack_size);
1792 hp = erts_produce_heap(hfact, hsz, reserve_size);
1793 res = erts_bld_uint(&hp, NULL, stack_size);
1794 break;
1795 }
1796
1797 case ERTS_PI_IX_MEMORY: { /* Memory consumed in bytes */
1798 Uint hsz = 0;
1799 Uint size = erts_process_memory(rp, 0);
1800 (void) erts_bld_uint(NULL, &hsz, size);
1801 hp = erts_produce_heap(hfact, hsz, reserve_size);
1802 res = erts_bld_uint(&hp, NULL, size);
1803
1804 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1805 *reds += (Uint) rp->sig_qs.len / 4;
1806
1807 break;
1808 }
1809
1810 case ERTS_PI_IX_GARBAGE_COLLECTION: {
1811 DECL_AM(minor_gcs);
1812 Eterm t;
1813 Uint map_sz = 0;
1814
1815 erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
1816
1817 hp = erts_produce_heap(hfact, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz, reserve_size);
1818
1819 t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
1820 res = CONS(hp, t, NIL); hp += 2;
1821 t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp))); hp += 3;
1822 res = CONS(hp, t, res); hp += 2;
1823
1824 t = TUPLE2(hp, am_min_heap_size, make_small(MIN_HEAP_SIZE(rp))); hp += 3;
1825 res = CONS(hp, t, res); hp += 2;
1826 t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
1827 res = CONS(hp, t, res); hp += 2;
1828
1829 t = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), &hp, NULL);
1830
1831 t = TUPLE2(hp, am_max_heap_size, t); hp += 3;
1832 res = CONS(hp, t, res); hp += 2;
1833 break;
1834 }
1835
1836 case ERTS_PI_IX_GARBAGE_COLLECTION_INFO: {
1837 Uint sz = 0, actual_sz = 0;
1838
1839 erts_process_gc_info(rp, &sz, NULL, 0, 0);
1840
1841 hp = erts_produce_heap(hfact, sz, reserve_size);
1842 res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
1843
1844 break;
1845 }
1846
1847 case ERTS_PI_IX_GROUP_LEADER: {
1848 int sz = NC_HEAP_SIZE(rp->group_leader);
1849 hp = erts_produce_heap(hfact, sz, reserve_size);
1850 res = STORE_NC(&hp, hfact->off_heap, rp->group_leader);
1851 break;
1852 }
1853
1854 case ERTS_PI_IX_REDUCTIONS: {
1855 Uint reds = rp->reds + erts_current_reductions(c_p, rp);
1856 Uint hsz = 0;
1857 (void) erts_bld_uint(NULL, &hsz, reds);
1858 hp = erts_produce_heap(hfact, hsz, reserve_size);
1859 res = erts_bld_uint(&hp, NULL, reds);
1860 break;
1861 }
1862
1863 case ERTS_PI_IX_PRIORITY: {
1864 erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
1865 if (ERTS_PSFLG_EXITING & state)
1866 return am_undefined;
1867 res = erts_get_process_priority(state);
1868 break;
1869 }
1870
1871 case ERTS_PI_IX_TRACE:
1872 res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
1873 break;
1874
1875 case ERTS_PI_IX_BINARY: {
1876 ErlHeapFragment *hfrag;
1877 Uint sz;
1878
1879 res = NIL;
1880 sz = 0;
1881
1882 (void)erts_bld_bin_list(NULL, &sz, &MSO(rp), NIL);
1883 for (hfrag = rp->mbuf; hfrag != NULL; hfrag = hfrag->next) {
1884 (void)erts_bld_bin_list(NULL, &sz, &hfrag->off_heap, NIL);
1885 }
1886
1887 hp = erts_produce_heap(hfact, sz, reserve_size);
1888
1889 res = erts_bld_bin_list(&hp, NULL, &MSO(rp), NIL);
1890 for (hfrag = rp->mbuf; hfrag != NULL; hfrag = hfrag->next) {
1891 res = erts_bld_bin_list(&hp, NULL, &hfrag->off_heap, res);
1892 }
1893
1894 break;
1895 }
1896
1897 case ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN: {
1898 Uint sz = size_object(rp->seq_trace_token);
1899 hp = erts_produce_heap(hfact, sz, reserve_size);
1900 res = copy_struct(rp->seq_trace_token, sz, &hp, hfact->off_heap);
1901 break;
1902 }
1903
1904 case ERTS_PI_IX_CATCHLEVEL:
1905 res = make_small(catchlevel(rp));
1906 break;
1907
1908 case ERTS_PI_IX_BACKTRACE: {
1909 erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
1910 erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
1911 res = erts_heap_factory_new_binary(hfact, (byte *) dsbufp->str,
1912 dsbufp->str_len, reserve_size);
1913 erts_destroy_tmp_dsbuf(dsbufp);
1914 break;
1915 }
1916
1917 case ERTS_PI_IX_LAST_CALLS: {
1918 struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
1919 if (!scb) {
1920 res = am_false;
1921 } else {
1922 /*
1923 * One cons cell and a 3-struct, and a 2-tuple.
1924 * Might be less than that, if there are sends, receives or timeouts,
1925 * so we must do a HRelease() to avoid creating holes.
1926 */
1927 Sint needed = scb->n*(2+4);
1928 Eterm term, list;
1929 int i, j;
1930 Export *exp;
1931
1932 reserve_size += needed;
1933
1934 list = NIL;
1935 for (i = 0; i < scb->n; i++) {
1936 Uint sz;
1937 j = scb->cur - i - 1;
1938 if (j < 0)
1939 j += scb->len;
1940
1941 sz = 2;
1942 exp = scb->ct[j];
1943 if (exp != &exp_send && exp != &exp_receive && exp != &exp_timeout)
1944 sz += 4;
1945
1946 needed -= sz;
1947 ERTS_PI_UNRESERVE(reserve_size, sz);
1948 hp = erts_produce_heap(hfact, sz, reserve_size);
1949
1950 if (exp == &exp_send)
1951 term = am_send;
1952 else if (exp == &exp_receive)
1953 term = am_receive;
1954 else if (exp == &exp_timeout)
1955 term = am_timeout;
1956 else {
1957 term = TUPLE3(hp,
1958 scb->ct[j]->info.mfa.module,
1959 scb->ct[j]->info.mfa.function,
1960 make_small(scb->ct[j]->info.mfa.arity));
1961 hp += 4;
1962 }
1963 list = CONS(hp, term, list);
1964 }
1965
1966 ASSERT(needed >= 0);
1967 if (needed > 0)
1968 reserve_size -= needed;
1969
1970 res = list;
1971 }
1972 break;
1973 }
1974
1975 case ERTS_PI_IX_MESSAGE_QUEUE_DATA:
1976 switch (rp->sig_qs.flags & (FS_OFF_HEAP_MSGQ|FS_ON_HEAP_MSGQ)) {
1977 case FS_OFF_HEAP_MSGQ:
1978 res = am_off_heap;
1979 break;
1980 case FS_ON_HEAP_MSGQ:
1981 res = am_on_heap;
1982 break;
1983 default:
1984 res = am_error;
1985 ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
1986 break;
1987 }
1988 break;
1989
1990 case ERTS_PI_IX_MAGIC_REF: {
1991 Uint sz = 0;
1992 (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
1993 hp = erts_produce_heap(hfact, sz, 0);
1994 res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
1995
1996 *reds += (Uint) 10;
1997 break;
1998 }
1999
2000 default:
2001 return THE_NON_VALUE; /* will produce badarg */
2002
2003 }
2004
2005 ERTS_PI_UNRESERVE(reserve_size, 3);
2006 *reserve_sizep = reserve_size;
2007 hp = erts_produce_heap(hfact, 3, reserve_size);
2008
2009 return TUPLE2(hp, pi_ix2arg(item_ix), res);
2010 }
2011 #undef MI_INC
2012
2013 static Eterm
current_function(Process * c_p,ErtsHeapFactory * hfact,Process * rp,int full_info,Uint reserve_size,int flags)2014 current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
2015 int full_info, Uint reserve_size, int flags)
2016 {
2017 Eterm* hp;
2018 Eterm res;
2019 FunctionInfo fi;
2020
2021 if (rp->current == NULL) {
2022 erts_lookup_function_info(&fi, rp->i, full_info);
2023 rp->current = fi.mfa;
2024 } else if (full_info) {
2025 erts_lookup_function_info(&fi, rp->i, full_info);
2026 if (fi.mfa == NULL) {
2027 /* Use the current function without location info */
2028 erts_set_current_function(&fi, rp->current);
2029 }
2030 }
2031
2032 if (c_p == rp && !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) {
2033 FunctionInfo fi2;
2034
2035 /*
2036 * The current function is erlang:process_info/{1,2},
2037 * which is not the answer that the application want.
2038 * We will use the function pointed into by rp->cp
2039 * instead if it can be looked up.
2040 */
2041 erts_lookup_function_info(&fi2, rp->cp, full_info);
2042 if (fi2.mfa) {
2043 fi = fi2;
2044 rp->current = fi2.mfa;
2045 }
2046 }
2047
2048 /*
2049 * Return the result.
2050 */
2051 if (rp->current == NULL) {
2052 res = am_undefined;
2053 } else if (full_info) {
2054 hp = erts_produce_heap(hfact, fi.needed, reserve_size);
2055 erts_build_mfa_item(&fi, hp, am_true, &res);
2056 } else {
2057 hp = erts_produce_heap(hfact, 4, reserve_size);
2058 res = TUPLE3(hp, rp->current->module,
2059 rp->current->function,
2060 make_small(rp->current->arity));
2061 }
2062 return res;
2063 }
2064
2065 static Eterm
current_stacktrace(ErtsHeapFactory * hfact,Process * rp,Uint reserve_size)2066 current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
2067 Uint reserve_size)
2068 {
2069 Uint sz;
2070 struct StackTrace* s;
2071 int depth;
2072 FunctionInfo* stk;
2073 FunctionInfo* stkp;
2074 Uint heap_size;
2075 int i;
2076 Eterm* hp;
2077 Eterm mfa;
2078 Eterm res = NIL;
2079
2080 depth = erts_backtrace_depth;
2081 sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
2082 s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
2083 s->depth = 0;
2084 if (depth > 0 && rp->i) {
2085 s->trace[s->depth++] = rp->i;
2086 depth--;
2087 }
2088 if (depth > 0 && rp->cp != 0) {
2089 s->trace[s->depth++] = rp->cp - 1;
2090 depth--;
2091 }
2092 erts_save_stacktrace(rp, s, depth);
2093
2094 depth = s->depth;
2095 stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
2096 depth*sizeof(FunctionInfo));
2097 heap_size = 3;
2098 for (i = 0; i < depth; i++) {
2099 erts_lookup_function_info(stkp, s->trace[i], 1);
2100 if (stkp->mfa) {
2101 heap_size += stkp->needed + 2;
2102 stkp++;
2103 }
2104 }
2105
2106 reserve_size += heap_size;
2107
2108 /*
2109 * We intentionally produce heap in small chunks
2110 * (for more info see process_info_aux()).
2111 */
2112 while (stkp > stk) {
2113 stkp--;
2114 sz = stkp->needed + 2;
2115 ERTS_PI_UNRESERVE(reserve_size, sz);
2116 hp = erts_produce_heap(hfact, sz, reserve_size);
2117 hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
2118 res = CONS(hp, mfa, res);
2119 }
2120
2121 erts_free(ERTS_ALC_T_TMP, stk);
2122 erts_free(ERTS_ALC_T_TMP, s);
2123 return res;
2124 }
2125
2126 /*
2127 * This function takes care of calls to erlang:system_info/1 when the argument
2128 * is a tuple.
2129 */
2130 static BIF_RETTYPE
info_1_tuple(Process * BIF_P,Eterm * tp,int arity)2131 info_1_tuple(Process* BIF_P, /* Pointer to current process. */
2132 Eterm* tp, /* Pointer to first element in tuple */
2133 int arity) /* Arity of tuple (untagged). */
2134 {
2135 Eterm ret;
2136 Eterm sel;
2137
2138 sel = *tp++;
2139
2140 if (sel == am_memory_internal) {
2141 switch (arity) {
2142 case 3:
2143 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 1))
2144 return am_true;
2145 default:
2146 goto badarg;
2147 }
2148 }
2149 else if (sel == am_allocator_sizes) {
2150 switch (arity) {
2151 case 2:
2152 ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
2153 return ret;
2154 case 3:
2155 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 0))
2156 return am_true;
2157 default:
2158 goto badarg;
2159 }
2160 }
2161 else if (sel == am_wordsize && arity == 2) {
2162 if (tp[0] == am_internal) {
2163 return make_small(sizeof(Eterm));
2164 }
2165 if (tp[0] == am_external) {
2166 return make_small(sizeof(UWord));
2167 }
2168 goto badarg;
2169 } else if (sel == am_allocator) {
2170 switch (arity) {
2171 case 2:
2172 ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
2173 return ret;
2174 case 3:
2175 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0, 0))
2176 return am_true;
2177 default:
2178 goto badarg;
2179 }
2180 } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
2181 return erts_get_cpu_topology_term(BIF_P, *tp);
2182 } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
2183 Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
2184 if (res == THE_NON_VALUE)
2185 goto badarg;
2186 ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
2187 return ret;
2188 #if defined(PURIFY) || defined(VALGRIND)
2189 } else if (ERTS_IS_ATOM_STR("error_checker", sel)
2190 #if defined(PURIFY)
2191 || sel == am_purify
2192 #elif defined(VALGRIND)
2193 || ERTS_IS_ATOM_STR("valgrind", sel)
2194 #endif
2195 ) {
2196 if (*tp == am_memory) {
2197 #if defined(PURIFY)
2198 BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
2199 #elif defined(VALGRIND)
2200 # ifdef VALGRIND_DO_ADDED_LEAK_CHECK
2201 VALGRIND_DO_ADDED_LEAK_CHECK;
2202 # else
2203 VALGRIND_DO_LEAK_CHECK;
2204 # endif
2205 BIF_RET(make_small(0));
2206 #endif
2207 } else if (*tp == am_fd) {
2208 #if defined(PURIFY)
2209 BIF_RET(erts_make_integer(purify_new_fds_inuse(), BIF_P));
2210 #elif defined(VALGRIND)
2211 /* Not present in valgrind... */
2212 BIF_RET(make_small(0));
2213 #endif
2214 } else if (*tp == am_running) {
2215 #if defined(PURIFY)
2216 BIF_RET(purify_is_running() ? am_true : am_false);
2217 #elif defined(VALGRIND)
2218 BIF_RET(RUNNING_ON_VALGRIND ? am_true : am_false);
2219 #endif
2220 } else if (is_list(*tp)) {
2221 #if defined(PURIFY)
2222 # define ERTS_ERROR_CHECKER_PRINTF purify_printf
2223 #elif defined(VALGRIND)
2224 # define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
2225 #endif
2226 ErlDrvSizeT buf_size = 8*1024; /* Try with 8KB first */
2227 char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
2228 ErlDrvSizeT r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
2229 if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
2230 erts_free(ERTS_ALC_T_TMP, (void *) buf);
2231 if (erts_iolist_size(*tp, &buf_size)) {
2232 goto badarg;
2233 }
2234 buf_size++;
2235 buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
2236 r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
2237 ASSERT(r == buf_size - 1);
2238 }
2239 buf[buf_size - 1 - r] = '\0';
2240 ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
2241 erts_free(ERTS_ALC_T_TMP, (void *) buf);
2242 BIF_RET(am_true);
2243 #undef ERTS_ERROR_CHECKER_PRINTF
2244 }
2245 #endif
2246 #ifdef QUANTIFY
2247 } else if (sel == am_quantify) {
2248 if (*tp == am_clear) {
2249 quantify_clear_data();
2250 BIF_RET(am_true);
2251 } else if (*tp == am_start) {
2252 quantify_start_recording_data();
2253 BIF_RET(am_true);
2254 } else if (*tp == am_stop) {
2255 quantify_stop_recording_data();
2256 BIF_RET(am_true);
2257 } else if (*tp == am_running) {
2258 BIF_RET(quantify_is_running() ? am_true : am_false);
2259 }
2260 #endif
2261 #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
2262 } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
2263 unsigned long long tmp;
2264 int fd;
2265 int rc;
2266
2267 if (arity != 2 || !is_small(*tp)) {
2268 goto badarg;
2269 }
2270 tmp = signed_val(*tp);
2271 if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
2272 BIF_RET(am_false);
2273 }
2274 rc = ioctl(fd, PERFMON_SETPCR, &tmp);
2275 close(fd);
2276 if (rc < 0) {
2277 BIF_RET(am_false);
2278 }
2279 BIF_RET(am_true);
2280 #endif
2281 }
2282
2283 badarg:
2284 ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
2285
2286 return ret;
2287 }
2288
2289 #define INFO_DSBUF_INC_SZ 256
2290
2291 static erts_dsprintf_buf_t *
grow_info_dsbuf(erts_dsprintf_buf_t * dsbufp,size_t need)2292 grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
2293 {
2294 size_t size;
2295 size_t free_size = dsbufp->size - dsbufp->str_len;
2296
2297 ASSERT(dsbufp);
2298
2299 if (need <= free_size)
2300 return dsbufp;
2301 size = need - free_size + INFO_DSBUF_INC_SZ;
2302 size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
2303 size += dsbufp->size;
2304 ASSERT(dsbufp->str_len + need <= size);
2305 dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
2306 (void *) dsbufp->str,
2307 size);
2308 dsbufp->size = size;
2309 return dsbufp;
2310 }
2311
2312 static erts_dsprintf_buf_t *
erts_create_info_dsbuf(Uint size)2313 erts_create_info_dsbuf(Uint size)
2314 {
2315 Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
2316 erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
2317 erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
2318 sizeof(erts_dsprintf_buf_t));
2319 sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
2320 dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
2321 dsbufp->str[0] = '\0';
2322 dsbufp->size = init_size;
2323 return dsbufp;
2324 }
2325
2326 static void
erts_destroy_info_dsbuf(erts_dsprintf_buf_t * dsbufp)2327 erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
2328 {
2329 if (dsbufp->str)
2330 erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
2331 erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
2332 }
2333
2334 static Eterm
c_compiler_used(Eterm ** hpp,Uint * szp)2335 c_compiler_used(Eterm **hpp, Uint *szp)
2336 {
2337
2338 #if defined(__GNUC__)
2339 # if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
2340 # define ERTS_GNUC_VSN_NUMS 3
2341 # elif defined(__GNUC_MINOR__)
2342 # define ERTS_GNUC_VSN_NUMS 2
2343 # else
2344 # define ERTS_GNUC_VSN_NUMS 1
2345 # endif
2346 return erts_bld_tuple(hpp,
2347 szp,
2348 2,
2349 erts_bld_atom(hpp, szp, "gnuc"),
2350 #if ERTS_GNUC_VSN_NUMS > 1
2351 erts_bld_tuple(hpp,
2352 szp,
2353 ERTS_GNUC_VSN_NUMS,
2354 #endif
2355 erts_bld_uint(hpp, szp,
2356 (Uint) __GNUC__)
2357 #ifdef __GNUC_MINOR__
2358 ,
2359 erts_bld_uint(hpp, szp,
2360 (Uint) __GNUC_MINOR__)
2361 #ifdef __GNUC_PATCHLEVEL__
2362 ,
2363 erts_bld_uint(hpp, szp,
2364 (Uint) __GNUC_PATCHLEVEL__)
2365 #endif
2366 #endif
2367 #if ERTS_GNUC_VSN_NUMS > 1
2368 )
2369 #endif
2370 );
2371
2372 #elif defined(_MSC_VER)
2373 return erts_bld_tuple(hpp,
2374 szp,
2375 2,
2376 erts_bld_atom(hpp, szp, "msc"),
2377 erts_bld_uint(hpp, szp, (Uint) _MSC_VER));
2378
2379 #else
2380 return erts_bld_tuple(hpp,
2381 szp,
2382 2,
2383 am_undefined,
2384 am_undefined);
2385 #endif
2386
2387 }
2388
is_snif_term(Eterm module_atom)2389 static int is_snif_term(Eterm module_atom) {
2390 int i;
2391 Atom *a = atom_tab(atom_val(module_atom));
2392 char *aname = (char *) a->name;
2393
2394 /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
2395 for (i = 0; i < a->len; i++) {
2396 if (aname[i] == '.')
2397 return 0;
2398 }
2399
2400 return 1;
2401 }
2402
build_snif_term(Eterm ** hpp,Uint * szp,int ix,Eterm res)2403 static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
2404 Eterm tup;
2405 tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
2406 res = erts_bld_cons( hpp, szp, tup, res);
2407 return res;
2408 }
2409
build_snifs_term(Eterm ** hpp,Uint * szp,Eterm res)2410 static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
2411 int i;
2412 for (i = 0; i < BIF_SIZE; i++) {
2413 if (is_snif_term(bif_table[i].module)) {
2414 res = build_snif_term(hpp, szp, i, res);
2415 }
2416 }
2417 return res;
2418 }
2419
system_info_1(BIF_ALIST_1)2420 BIF_RETTYPE system_info_1(BIF_ALIST_1)
2421 {
2422 Eterm res;
2423 Eterm* hp;
2424 Eterm val;
2425 int i;
2426
2427 if (is_tuple(BIF_ARG_1)) {
2428 Eterm* tp = tuple_val(BIF_ARG_1);
2429 Uint arity = *tp++;
2430 return info_1_tuple(BIF_P, tp, arityval(arity));
2431 } else if (BIF_ARG_1 == am_scheduler_id) {
2432 ErtsSchedulerData *esdp = erts_proc_sched_data(BIF_P);
2433 BIF_RET(make_small(esdp->no));
2434 } else if (BIF_ARG_1 == am_compat_rel) {
2435 ASSERT(erts_compat_rel > 0);
2436 BIF_RET(make_small(erts_compat_rel));
2437 } else if (BIF_ARG_1 == am_multi_scheduling) {
2438 {
2439 int msb = erts_is_multi_scheduling_blocked();
2440 BIF_RET(!msb
2441 ? am_enabled
2442 : (msb > 0
2443 ? am_blocked
2444 : am_blocked_normal));
2445 }
2446 } else if (BIF_ARG_1 == am_build_type) {
2447 #if defined(DEBUG)
2448 ERTS_DECL_AM(debug);
2449 BIF_RET(AM_debug);
2450 #elif defined(PURIFY)
2451 ERTS_DECL_AM(purify);
2452 BIF_RET(AM_purify);
2453 #elif defined(QUANTIFY)
2454 ERTS_DECL_AM(quantify);
2455 BIF_RET(AM_quantify);
2456 #elif defined(PURECOV)
2457 ERTS_DECL_AM(purecov);
2458 BIF_RET(AM_purecov);
2459 #elif defined(ERTS_GCOV)
2460 ERTS_DECL_AM(gcov);
2461 BIF_RET(AM_gcov);
2462 #elif defined(VALGRIND)
2463 ERTS_DECL_AM(valgrind);
2464 BIF_RET(AM_valgrind);
2465 #elif defined(GPROF)
2466 ERTS_DECL_AM(gprof);
2467 BIF_RET(AM_gprof);
2468 #elif defined(ERTS_ENABLE_LOCK_COUNT)
2469 ERTS_DECL_AM(lcnt);
2470 BIF_RET(AM_lcnt);
2471 #elif defined(ERTS_FRMPTR)
2472 ERTS_DECL_AM(frmptr);
2473 BIF_RET(AM_frmptr);
2474 #else
2475 BIF_RET(am_opt);
2476 #endif
2477 BIF_RET(res);
2478 } else if (BIF_ARG_1 == am_time_offset) {
2479 switch (erts_time_offset_state()) {
2480 case ERTS_TIME_OFFSET_PRELIMINARY: {
2481 ERTS_DECL_AM(preliminary);
2482 BIF_RET(AM_preliminary);
2483 }
2484 case ERTS_TIME_OFFSET_FINAL: {
2485 ERTS_DECL_AM(final);
2486 BIF_RET(AM_final);
2487 }
2488 case ERTS_TIME_OFFSET_VOLATILE: {
2489 ERTS_DECL_AM(volatile);
2490 BIF_RET(AM_volatile);
2491 }
2492 default:
2493 ERTS_INTERNAL_ERROR("Invalid time offset state");
2494 }
2495 } else if (ERTS_IS_ATOM_STR("os_monotonic_time_source", BIF_ARG_1)) {
2496 BIF_RET(erts_monotonic_time_source(BIF_P));
2497 } else if (ERTS_IS_ATOM_STR("os_system_time_source", BIF_ARG_1)) {
2498 BIF_RET(erts_system_time_source(BIF_P));
2499 } else if (ERTS_IS_ATOM_STR("time_correction", BIF_ARG_1)) {
2500 BIF_RET(erts_has_time_correction() ? am_true : am_false);
2501 } else if (ERTS_IS_ATOM_STR("start_time", BIF_ARG_1)) {
2502 BIF_RET(erts_get_monotonic_start_time(BIF_P));
2503 } else if (ERTS_IS_ATOM_STR("end_time", BIF_ARG_1)) {
2504 BIF_RET(erts_get_monotonic_end_time(BIF_P));
2505 } else if (ERTS_IS_ATOM_STR("time_warp_mode", BIF_ARG_1)) {
2506 switch (erts_time_warp_mode()) {
2507 case ERTS_NO_TIME_WARP_MODE: {
2508 ERTS_DECL_AM(no_time_warp);
2509 BIF_RET(AM_no_time_warp);
2510 }
2511 case ERTS_SINGLE_TIME_WARP_MODE: {
2512 ERTS_DECL_AM(single_time_warp);
2513 BIF_RET(AM_single_time_warp);
2514 }
2515 case ERTS_MULTI_TIME_WARP_MODE: {
2516 ERTS_DECL_AM(multi_time_warp);
2517 BIF_RET(AM_multi_time_warp);
2518 }
2519 default:
2520 ERTS_INTERNAL_ERROR("Invalid time warp mode");
2521 }
2522 } else if (BIF_ARG_1 == am_outstanding_system_requests_limit) {
2523 Uint val = erts_get_outstanding_system_requests_limit();
2524 BIF_RET(make_small(val));
2525 } else if (BIF_ARG_1 == am_allocated_areas) {
2526 res = erts_allocated_areas(NULL, NULL, BIF_P);
2527 BIF_RET(res);
2528 } else if (BIF_ARG_1 == am_hipe_architecture) {
2529 #if defined(HIPE)
2530 BIF_RET(hipe_arch_name);
2531 #else
2532 BIF_RET(am_undefined);
2533 #endif
2534 } else if (BIF_ARG_1 == am_trace_control_word) {
2535 BIF_RET(db_get_trace_control_word(BIF_P));
2536 } else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
2537 BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
2538 } else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
2539 BIF_RET((erts_ets_always_compress) ? am_true : am_false);
2540 } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
2541 Uint size = 0;
2542 Uint *szp;
2543
2544 szp = &size;
2545 build_snifs_term(NULL, szp, NIL);
2546 hp = HAlloc(BIF_P, size);
2547 res = build_snifs_term(&hp, NULL, NIL);
2548 BIF_RET(res);
2549 } else if (BIF_ARG_1 == am_sequential_tracer) {
2550 ErtsTracer seq_tracer = erts_get_system_seq_tracer();
2551 val = erts_tracer_to_term(BIF_P, seq_tracer);
2552 hp = HAlloc(BIF_P, 3);
2553 res = TUPLE2(hp, am_sequential_tracer, val);
2554 BIF_RET(res);
2555 } else if (BIF_ARG_1 == am_garbage_collection){
2556 Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
2557 Eterm tup;
2558 hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2);
2559
2560 tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3;
2561 res = CONS(hp, tup, NIL); hp += 2;
2562
2563 tup = TUPLE2(hp, am_min_heap_size, make_small(H_MIN_SIZE)); hp += 3;
2564 res = CONS(hp, tup, res); hp += 2;
2565
2566 tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3;
2567 res = CONS(hp, tup, res); hp += 2;
2568
2569 tup = TUPLE2(hp, am_max_heap_size, make_small(H_MAX_SIZE)); hp += 3;
2570 res = CONS(hp, tup, res); hp += 2;
2571
2572 BIF_RET(res);
2573 } else if (BIF_ARG_1 == am_fullsweep_after){
2574 Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
2575 hp = HAlloc(BIF_P, 3);
2576 res = TUPLE2(hp, am_fullsweep_after, make_small(val));
2577 BIF_RET(res);
2578 } else if (BIF_ARG_1 == am_min_heap_size) {
2579 hp = HAlloc(BIF_P, 3);
2580 res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE));
2581 BIF_RET(res);
2582 } else if (BIF_ARG_1 == am_max_heap_size) {
2583 Uint sz = 0;
2584 erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
2585 hp = HAlloc(BIF_P, sz);
2586 res = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
2587 BIF_RET(res);
2588 } else if (BIF_ARG_1 == am_min_bin_vheap_size) {
2589 hp = HAlloc(BIF_P, 3);
2590 res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
2591 BIF_RET(res);
2592 } else if (BIF_ARG_1 == am_process_count) {
2593 BIF_RET(make_small(erts_ptab_count(&erts_proc)));
2594 } else if (BIF_ARG_1 == am_process_limit) {
2595 BIF_RET(make_small(erts_ptab_max(&erts_proc)));
2596 } else if (BIF_ARG_1 == am_port_count) {
2597 BIF_RET(make_small(erts_ptab_count(&erts_port)));
2598 } else if (BIF_ARG_1 == am_port_limit) {
2599 BIF_RET(make_small(erts_ptab_max(&erts_port)));
2600 } else if (BIF_ARG_1 == am_info
2601 || BIF_ARG_1 == am_procs
2602 || BIF_ARG_1 == am_loaded
2603 || BIF_ARG_1 == am_dist) {
2604 erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
2605
2606 /* Need to be the only thread running... */
2607 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
2608 BIF_P->scheduler_data->current_process = NULL;
2609 erts_thr_progress_block();
2610
2611 if (BIF_ARG_1 == am_info)
2612 info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2613 else if (BIF_ARG_1 == am_procs)
2614 process_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2615 else if (BIF_ARG_1 == am_loaded)
2616 loaded(ERTS_PRINT_DSBUF, (void *) dsbufp);
2617 else
2618 distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2619
2620 erts_thr_progress_unblock();
2621 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
2622 BIF_P->scheduler_data->current_process = BIF_P;
2623
2624 ASSERT(dsbufp && dsbufp->str);
2625 res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
2626 erts_destroy_info_dsbuf(dsbufp);
2627 BIF_RET(res);
2628 } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
2629 DistEntry *dep;
2630 i = 0;
2631 erts_rwmtx_rlock(&erts_dist_table_rwmtx);
2632 for (dep = erts_visible_dist_entries; dep; dep = dep->next)
2633 ++i;
2634 for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
2635 ++i;
2636 hp = HAlloc(BIF_P,i*(3+2));
2637 res = NIL;
2638 for (dep = erts_hidden_dist_entries; dep; dep = dep->next) {
2639 Eterm tpl;
2640 ASSERT(is_immed(dep->cid));
2641 tpl = TUPLE2(hp, dep->sysname, dep->cid);
2642 hp +=3;
2643 res = CONS(hp, tpl, res);
2644 hp += 2;
2645 }
2646 for (dep = erts_visible_dist_entries; dep; dep = dep->next) {
2647 Eterm tpl;
2648 ASSERT(is_immed(dep->cid));
2649 tpl = TUPLE2(hp, dep->sysname, dep->cid);
2650 hp +=3;
2651 res = CONS(hp, tpl, res);
2652 hp += 2;
2653 }
2654 erts_rwmtx_runlock(&erts_dist_table_rwmtx);
2655 BIF_RET(res);
2656 } else if (BIF_ARG_1 == am_system_version) {
2657 erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
2658 erts_print_system_version(ERTS_PRINT_DSBUF, (void *) dsbufp, BIF_P);
2659 hp = HAlloc(BIF_P, dsbufp->str_len*2);
2660 res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
2661 erts_destroy_tmp_dsbuf(dsbufp);
2662 BIF_RET(res);
2663 } else if (BIF_ARG_1 == am_system_architecture) {
2664 hp = HAlloc(BIF_P, 2*(sizeof(ERLANG_ARCHITECTURE)-1));
2665 BIF_RET(buf_to_intlist(&hp,
2666 ERLANG_ARCHITECTURE,
2667 sizeof(ERLANG_ARCHITECTURE)-1,
2668 NIL));
2669 }
2670 else if (BIF_ARG_1 == am_os_type) {
2671 BIF_RET(os_type_tuple);
2672 }
2673 else if (BIF_ARG_1 == am_allocator) {
2674 BIF_RET(erts_allocator_options((void *) BIF_P));
2675 }
2676 else if (BIF_ARG_1 == am_thread_pool_size) {
2677 extern int erts_async_max_threads;
2678 int n;
2679
2680 n = erts_async_max_threads;
2681 BIF_RET(make_small(n));
2682 }
2683 else if (BIF_ARG_1 == am_alloc_util_allocators) {
2684 BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
2685 }
2686 else if (BIF_ARG_1 == am_elib_malloc) {
2687 /* To be removed in R15 */
2688 BIF_RET(am_false);
2689 }
2690 else if (BIF_ARG_1 == am_os_version) {
2691 BIF_RET(os_version_tuple);
2692 }
2693 else if (BIF_ARG_1 == am_version) {
2694 int n = sys_strlen(ERLANG_VERSION);
2695 hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
2696 BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
2697 }
2698 else if (BIF_ARG_1 == am_machine) {
2699 int n = sys_strlen(EMULATOR);
2700 hp = HAlloc(BIF_P, n*2);
2701 BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
2702 }
2703 else if (BIF_ARG_1 == am_garbage_collection) {
2704 BIF_RET(am_generational);
2705 #ifdef ERTS_OPCODE_COUNTER_SUPPORT
2706 } else if (BIF_ARG_1 == am_instruction_counts) {
2707 #ifdef DEBUG
2708 Eterm *endp;
2709 #endif
2710 Eterm *hp, **hpp;
2711 Uint hsz, *hszp;
2712 int i;
2713
2714 hpp = NULL;
2715 hsz = 0;
2716 hszp = &hsz;
2717
2718 bld_instruction_counts:
2719
2720 res = NIL;
2721 for (i = num_instructions-1; i >= 0; i--) {
2722 res = erts_bld_cons(hpp, hszp,
2723 erts_bld_tuple(hpp, hszp, 2,
2724 erts_atom_put((byte *)opc[i].name,
2725 sys_strlen(opc[i].name),
2726 ERTS_ATOM_ENC_LATIN1,
2727 1),
2728 erts_bld_uint(hpp, hszp,
2729 erts_instr_count[i])),
2730 res);
2731 }
2732
2733 if (!hpp) {
2734 hp = HAlloc(BIF_P, hsz);
2735 hpp = &hp;
2736 #ifdef DEBUG
2737 endp = hp + hsz;
2738 #endif
2739 hszp = NULL;
2740 goto bld_instruction_counts;
2741 }
2742
2743 ASSERT(endp == hp);
2744
2745 BIF_RET(res);
2746 #endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
2747 } else if (BIF_ARG_1 == am_wordsize) {
2748 return make_small(sizeof(Eterm));
2749 } else if (BIF_ARG_1 == am_endian) {
2750 #if defined(WORDS_BIGENDIAN)
2751 return am_big;
2752 #else
2753 return am_little;
2754 #endif
2755 } else if (BIF_ARG_1 == am_heap_sizes) {
2756 return erts_heap_sizes(BIF_P);
2757 } else if (BIF_ARG_1 == am_heap_type) {
2758 return am_private;
2759 } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
2760 res = erts_get_cpu_topology_term(BIF_P, am_used);
2761 BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
2762 } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
2763 if (erts_update_cpu_info()) {
2764 ERTS_DECL_AM(changed);
2765 BIF_RET(AM_changed);
2766 }
2767 else {
2768 ERTS_DECL_AM(unchanged);
2769 BIF_RET(AM_unchanged);
2770 }
2771 #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
2772 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
2773 register unsigned high asm("%l0");
2774 register unsigned low asm("%l1");
2775
2776 hp = HAlloc(BIF_P, 5);
2777 asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
2778 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2779 : "=r" (high), "=r" (low));
2780 res = TUPLE4(hp, make_small(high >> 16),
2781 make_small(high & 0xFFFF),
2782 make_small(low >> 16),
2783 make_small(low & 0xFFFF));
2784 BIF_RET(res);
2785 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick2", BIF_ARG_1)) {
2786 register unsigned high asm("%l0");
2787 register unsigned low asm("%l1");
2788
2789 asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
2790 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2791 : "=r" (high), "=r" (low));
2792 hp = HAlloc(BIF_P, 5);
2793 res = TUPLE4(hp, make_small(high >> 16),
2794 make_small(high & 0xFFFF),
2795 make_small(low >> 16),
2796 make_small(low & 0xFFFF));
2797 BIF_RET(res);
2798 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic1", BIF_ARG_1)) {
2799 register unsigned high asm("%l0");
2800 register unsigned low asm("%l1");
2801
2802 hp = HAlloc(BIF_P, 5);
2803 asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
2804 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2805 : "=r" (high), "=r" (low));
2806 res = TUPLE4(hp, make_small(high >> 16),
2807 make_small(high & 0xFFFF),
2808 make_small(low >> 16),
2809 make_small(low & 0xFFFF));
2810 BIF_RET(res);
2811 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic2", BIF_ARG_1)) {
2812 register unsigned high asm("%l0");
2813 register unsigned low asm("%l1");
2814
2815 asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
2816 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2817 : "=r" (high), "=r" (low));
2818 hp = HAlloc(BIF_P, 5);
2819 res = TUPLE4(hp, make_small(high >> 16),
2820 make_small(high & 0xFFFF),
2821 make_small(low >> 16),
2822 make_small(low & 0xFFFF));
2823 BIF_RET(res);
2824 #endif
2825 } else if (BIF_ARG_1 == am_threads) {
2826 return am_true;
2827 } else if (BIF_ARG_1 == am_creation) {
2828 return make_small(erts_this_node->creation);
2829 } else if (BIF_ARG_1 == am_break_ignored) {
2830 extern int ignore_break;
2831 if (ignore_break)
2832 return am_true;
2833 else
2834 return am_false;
2835 }
2836 /* Arguments that are unusual follow ... */
2837 else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
2838 int no;
2839 erts_get_logical_processors(&no, NULL, NULL);
2840 if (no > 0)
2841 BIF_RET(make_small((Uint) no));
2842 else {
2843 DECL_AM(unknown);
2844 BIF_RET(AM_unknown);
2845 }
2846 }
2847 else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
2848 int no;
2849 erts_get_logical_processors(NULL, &no, NULL);
2850 if (no > 0)
2851 BIF_RET(make_small((Uint) no));
2852 else {
2853 DECL_AM(unknown);
2854 BIF_RET(AM_unknown);
2855 }
2856 }
2857 else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
2858 int no;
2859 erts_get_logical_processors(NULL, NULL, &no);
2860 if (no > 0)
2861 BIF_RET(make_small((Uint) no));
2862 else {
2863 DECL_AM(unknown);
2864 BIF_RET(AM_unknown);
2865 }
2866 } else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) {
2867 int n = sizeof(ERLANG_OTP_RELEASE)-1;
2868 hp = HAlloc(BIF_P, 2*n);
2869 BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_RELEASE, n, NIL));
2870 } else if (ERTS_IS_ATOM_STR("driver_version", BIF_ARG_1)) {
2871 char buf[42];
2872 int n = erts_snprintf(buf, 42, "%d.%d",
2873 ERL_DRV_EXTENDED_MAJOR_VERSION,
2874 ERL_DRV_EXTENDED_MINOR_VERSION);
2875 hp = HAlloc(BIF_P, 2*n);
2876 BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
2877 } else if (ERTS_IS_ATOM_STR("nif_version", BIF_ARG_1)) {
2878 char buf[42];
2879 int n = erts_snprintf(buf, 42, "%d.%d",
2880 ERL_NIF_MAJOR_VERSION,
2881 ERL_NIF_MINOR_VERSION);
2882 hp = HAlloc(BIF_P, 2*n);
2883 BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
2884 } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
2885 BIF_RET(am_true);
2886 } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
2887 BIF_RET(erts_bound_schedulers_term(BIF_P));
2888 } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
2889 BIF_RET(erts_get_schedulers_binds(BIF_P));
2890 } else if (ERTS_IS_ATOM_STR("constant_pool_support", BIF_ARG_1)) {
2891 BIF_RET(am_true);
2892 } else if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)
2893 || ERTS_IS_ATOM_STR("schedulers_total", BIF_ARG_1)) {
2894 res = make_small(erts_no_schedulers);
2895 BIF_RET(res);
2896 } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
2897 Eterm *hp;
2898 Uint total, online, active;
2899 erts_schedulers_state(&total, &online, &active,
2900 NULL, NULL, NULL, NULL, NULL);
2901 hp = HAlloc(BIF_P, 4);
2902 res = TUPLE3(hp,
2903 make_small(total),
2904 make_small(online),
2905 make_small(active));
2906 BIF_RET(res);
2907 } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
2908 Eterm *hp;
2909 Uint total, online, active;
2910 erts_schedulers_state(&total, &online, &active,
2911 NULL, NULL, NULL, NULL, NULL);
2912 hp = HAlloc(BIF_P, 4);
2913 res = TUPLE3(hp,
2914 make_small(total),
2915 make_small(online),
2916 make_small(active));
2917 BIF_RET(res);
2918 } else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) {
2919 Eterm *hp, tpl;
2920 Uint sz, total, online, active,
2921 dirty_cpu_total, dirty_cpu_online, dirty_cpu_active,
2922 dirty_io_total, dirty_io_active;
2923 erts_schedulers_state(&total, &online, &active,
2924 &dirty_cpu_total, &dirty_cpu_online, &dirty_cpu_active,
2925 &dirty_io_total, &dirty_io_active);
2926
2927 sz = 2+5;
2928 if (dirty_cpu_total)
2929 sz += 2+5;
2930 if (dirty_io_total)
2931 sz += 2+5;
2932
2933 hp = HAlloc(BIF_P, sz);
2934
2935 res = NIL;
2936 if (dirty_io_total) {
2937 tpl = TUPLE4(hp,
2938 am_dirty_io,
2939 make_small(dirty_io_total),
2940 make_small(dirty_io_total),
2941 make_small(dirty_io_active));
2942 hp += 5;
2943 res = CONS(hp, tpl, res);
2944 hp += 2;
2945 }
2946 if (dirty_cpu_total) {
2947 tpl = TUPLE4(hp,
2948 am_dirty_cpu,
2949 make_small(dirty_cpu_total),
2950 make_small(dirty_cpu_online),
2951 make_small(dirty_cpu_active));
2952 hp += 5;
2953 res = CONS(hp, tpl, res);
2954 hp += 2;
2955 }
2956 tpl = TUPLE4(hp,
2957 am_normal,
2958 make_small(total),
2959 make_small(online),
2960 make_small(active));
2961 hp += 5;
2962 res = CONS(hp, tpl, res);
2963 BIF_RET(res);
2964 } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
2965 Uint online;
2966 erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL);
2967 BIF_RET(make_small(online));
2968 } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
2969 Uint active;
2970 erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
2971 BIF_RET(make_small(active));
2972 } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
2973 Uint dirty_cpu;
2974 erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
2975 BIF_RET(make_small(dirty_cpu));
2976 } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
2977 Uint dirty_cpu_onln;
2978 erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL);
2979 BIF_RET(make_small(dirty_cpu_onln));
2980 } else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
2981 Uint dirty_io;
2982 erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
2983 BIF_RET(make_small(dirty_io));
2984 } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
2985 res = make_small(erts_no_run_queues);
2986 BIF_RET(res);
2987 } else if (ERTS_IS_ATOM_STR("port_parallelism", BIF_ARG_1)) {
2988 res = erts_port_parallelism ? am_true : am_false;
2989 BIF_RET(res);
2990 } else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
2991 Eterm *hp = NULL;
2992 Uint sz = 0;
2993 (void) c_compiler_used(NULL, &sz);
2994 if (sz)
2995 hp = HAlloc(BIF_P, sz);
2996 BIF_RET(c_compiler_used(&hp, NULL));
2997 } else if (ERTS_IS_ATOM_STR("stop_memory_trace", BIF_ARG_1)) {
2998 erts_mtrace_stop();
2999 BIF_RET(am_true);
3000 } else if (ERTS_IS_ATOM_STR("context_reductions", BIF_ARG_1)) {
3001 BIF_RET(make_small(CONTEXT_REDS));
3002 } else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
3003 #if ERTS_ENABLE_KERNEL_POLL
3004 BIF_RET(am_true);
3005 #else
3006 BIF_RET(am_false);
3007 #endif
3008 } else if (ERTS_IS_ATOM_STR("lock_checking", BIF_ARG_1)) {
3009 #ifdef ERTS_ENABLE_LOCK_CHECK
3010 BIF_RET(am_true);
3011 #else
3012 BIF_RET(am_false);
3013 #endif
3014 } else if (ERTS_IS_ATOM_STR("lock_counting", BIF_ARG_1)) {
3015 #ifdef ERTS_ENABLE_LOCK_COUNT
3016 BIF_RET(am_true);
3017 #else
3018 BIF_RET(am_false);
3019 #endif
3020 } else if (ERTS_IS_ATOM_STR("debug_compiled", BIF_ARG_1)) {
3021 #ifdef DEBUG
3022 BIF_RET(am_true);
3023 #else
3024 BIF_RET(am_false);
3025 #endif
3026 } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
3027 BIF_RET(erts_check_io_info(BIF_P));
3028 } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
3029 if (erts_no_schedulers == 1)
3030 BIF_RET(NIL);
3031 else
3032 BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0));
3033 } else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) {
3034 if (erts_no_schedulers == 1)
3035 BIF_RET(NIL);
3036 else
3037 BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1));
3038 } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
3039 BIF_RET(ERTS_USE_MODIFIED_TIMING()
3040 ? make_small(erts_modified_timing_level)
3041 : am_undefined);
3042 } else if (ERTS_IS_ATOM_STR("port_tasks", BIF_ARG_1)) {
3043 BIF_RET(am_true);
3044 } else if (ERTS_IS_ATOM_STR("io_thread", BIF_ARG_1)) {
3045 BIF_RET(am_false);
3046 } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
3047 BIF_RET(erts_sched_stat_term(BIF_P, 0));
3048 } else if (ERTS_IS_ATOM_STR("total_scheduling_statistics", BIF_ARG_1)) {
3049 BIF_RET(erts_sched_stat_term(BIF_P, 1));
3050 } else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
3051 BIF_RET(erts_nif_taints(BIF_P));
3052 } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
3053 BIF_RET(erts_get_reader_groups_map(BIF_P));
3054 } else if (ERTS_IS_ATOM_STR("decentralized_counter_groups_map", BIF_ARG_1)) {
3055 BIF_RET(erts_get_decentralized_counter_groups_map(BIF_P));
3056 } else if (ERTS_IS_ATOM_STR("dist_buf_busy_limit", BIF_ARG_1)) {
3057 Uint hsz = 0;
3058
3059 (void) erts_bld_uint(NULL, &hsz, erts_dist_buf_busy_limit);
3060 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3061 res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
3062 BIF_RET(res);
3063 } else if (ERTS_IS_ATOM_STR("delayed_node_table_gc", BIF_ARG_1)) {
3064 Uint hsz = 0;
3065 Uint dntgc = erts_delayed_node_table_gc();
3066 if (dntgc == ERTS_NODE_TAB_DELAY_GC_INFINITY)
3067 BIF_RET(am_infinity);
3068 (void) erts_bld_uint(NULL, &hsz, dntgc);
3069 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3070 res = erts_bld_uint(&hp, NULL, dntgc);
3071 BIF_RET(res);
3072 } else if (ERTS_IS_ATOM_STR("ethread_info", BIF_ARG_1)) {
3073 BIF_RET(erts_get_ethread_info(BIF_P));
3074 }
3075 else if (ERTS_IS_ATOM_STR("ethread_used_tse", BIF_ARG_1)) {
3076 Uint64 no = (Uint64) ethr_no_used_tse();
3077 Uint hsz = 0;
3078 erts_bld_uint64(NULL, &hsz, no);
3079 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3080 res = erts_bld_uint64(&hp, NULL, no);
3081 BIF_RET(res);
3082 }
3083 else if (ERTS_IS_ATOM_STR("emu_args", BIF_ARG_1)) {
3084 BIF_RET(erts_get_emu_args(BIF_P));
3085 }
3086 else if (ERTS_IS_ATOM_STR("beam_jump_table", BIF_ARG_1)) {
3087 BIF_RET(erts_beam_jump_table() ? am_true : am_false);
3088 }
3089 else if (ERTS_IS_ATOM_STR("dynamic_trace", BIF_ARG_1)) {
3090 #if defined(USE_DTRACE)
3091 DECL_AM(dtrace);
3092 BIF_RET(AM_dtrace);
3093 #elif defined(USE_SYSTEMTAP)
3094 DECL_AM(systemtap);
3095 BIF_RET(AM_systemtap);
3096 #elif defined(USE_LTTNG)
3097 DECL_AM(lttng);
3098 BIF_RET(AM_lttng);
3099 #else
3100 BIF_RET(am_none);
3101 #endif
3102 }
3103 else if (ERTS_IS_ATOM_STR("dynamic_trace_probes", BIF_ARG_1)) {
3104 #if defined(USE_VM_PROBES)
3105 BIF_RET(am_true);
3106 #else
3107 BIF_RET(am_false);
3108 #endif
3109 }
3110 else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
3111 erts_thr_progress_dbg_print_state();
3112 BIF_RET(am_true);
3113 }
3114 else if (BIF_ARG_1 == am_message_queue_data) {
3115 switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) {
3116 case SPO_OFF_HEAP_MSGQ:
3117 BIF_RET(am_off_heap);
3118 case SPO_ON_HEAP_MSGQ:
3119 BIF_RET(am_on_heap);
3120 default:
3121 ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
3122 BIF_RET(am_error);
3123 }
3124 }
3125 else if (ERTS_IS_ATOM_STR("compile_info",BIF_ARG_1)) {
3126 Uint sz;
3127 Eterm res = NIL, tup, text;
3128 Eterm *hp = HAlloc(BIF_P, 3*(2 + 3) + /* three 2-tuples and three cons */
3129 2*(sys_strlen(erts_build_flags_CONFIG_H) +
3130 sys_strlen(erts_build_flags_CFLAGS) +
3131 sys_strlen(erts_build_flags_LDFLAGS)));
3132
3133 sz = sys_strlen(erts_build_flags_CONFIG_H);
3134 text = buf_to_intlist(&hp, erts_build_flags_CONFIG_H, sz, NIL);
3135 tup = TUPLE2(hp, am_config_h, text); hp += 3;
3136 res = CONS(hp, tup, res); hp += 2;
3137
3138 sz = sys_strlen(erts_build_flags_CFLAGS);
3139 text = buf_to_intlist(&hp, erts_build_flags_CFLAGS, sz, NIL);
3140 tup = TUPLE2(hp, am_cflags, text); hp += 3;
3141 res = CONS(hp, tup, res); hp += 2;
3142
3143 sz = sys_strlen(erts_build_flags_LDFLAGS);
3144 text = buf_to_intlist(&hp, erts_build_flags_LDFLAGS, sz, NIL);
3145 tup = TUPLE2(hp, am_ldflags, text); hp += 3;
3146 res = CONS(hp, tup, res); hp += 2;
3147
3148 BIF_RET(res);
3149 }
3150 else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
3151 BIF_RET(make_small(erts_db_get_max_tabs()));
3152 }
3153 else if (ERTS_IS_ATOM_STR("ets_count",BIF_ARG_1)) {
3154 BIF_RET(make_small(erts_ets_table_count()));
3155 }
3156 else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
3157 BIF_RET(make_small(erts_get_atom_limit()));
3158 }
3159 else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
3160 BIF_RET(make_small(atom_table_size()));
3161 }
3162 else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
3163 if (erts_has_time_correction()
3164 && erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
3165 BIF_RET(am_enabled);
3166 }
3167 BIF_RET(am_disabled);
3168 }
3169 else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
3170 BIF_RET(am_true);
3171 }
3172 else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) {
3173 #ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
3174 #ifdef ARCH_64
3175 DECL_AM(range);
3176 BIF_RET(AM_range);
3177 #else /* ARCH_32 */
3178 DECL_AM(range_bitmask);
3179 BIF_RET(AM_range_bitmask);
3180 #endif /* ARCH_32 */
3181 #else /* ! ERTS_HAVE_IS_IN_LITERAL_RANGE */
3182 DECL_AM(tag);
3183 BIF_RET(AM_tag);
3184 #endif
3185 } else if (ERTS_IS_ATOM_STR("system_logger", BIF_ARG_1)) {
3186 BIF_RET(erts_get_system_logger());
3187 }
3188
3189 BIF_ERROR(BIF_P, BADARG);
3190 }
3191
monitor_size(ErtsMonitor * mon,void * vsz,Sint reds)3192 static int monitor_size(ErtsMonitor *mon, void *vsz, Sint reds)
3193 {
3194 *((Uint *) vsz) = erts_monitor_size(mon);
3195 return 1;
3196 }
3197
link_size(ErtsMonitor * lnk,void * vsz,Sint reds)3198 static int link_size(ErtsMonitor *lnk, void *vsz, Sint reds)
3199 {
3200 *((Uint *) vsz) = erts_link_size(lnk);
3201 return 1;
3202 }
3203
3204 /**********************************************************************/
3205 /* Return information on ports */
3206 /* Info:
3207 ** id Port index
3208 ** connected (Pid)
3209 ** links List of pids
3210 ** name String
3211 ** input Number of bytes input from port program
3212 ** output Number of bytes output to the port program
3213 ** os_pid The child's process ID
3214 */
3215
3216 Eterm
erts_bld_port_info(Eterm ** hpp,ErlOffHeap * ohp,Uint * szp,Port * prt,Eterm item)3217 erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
3218 Eterm item)
3219 {
3220 Eterm res = THE_NON_VALUE;
3221
3222 ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
3223
3224 if (item == am_id) {
3225 if (hpp)
3226 res = make_small(internal_port_index(prt->common.id));
3227 if (szp) {
3228 res = am_true;
3229 goto done;
3230 }
3231 }
3232 else if (item == am_links) {
3233 MonitorInfoCollection mic;
3234 int i;
3235 Eterm item;
3236
3237 INIT_MONITOR_INFOS(mic);
3238
3239 erts_link_tree_foreach(ERTS_P_LINKS(prt), collect_one_link, (void *) &mic);
3240
3241 if (szp)
3242 *szp += mic.sz;
3243
3244 if (hpp) {
3245 res = NIL;
3246 for (i = 0; i < mic.mi_i; i++) {
3247 item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
3248 res = CONS(*hpp, item, res);
3249 *hpp += 2;
3250 }
3251 }
3252
3253 DESTROY_MONITOR_INFOS(mic);
3254
3255 if (szp) {
3256 res = am_true;
3257 goto done;
3258 }
3259 }
3260 else if (item == am_monitors) {
3261 MonitorInfoCollection mic;
3262 int i;
3263
3264 INIT_MONITOR_INFOS(mic);
3265 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3266 collect_one_origin_monitor,
3267 (void *) &mic);
3268
3269 if (szp)
3270 *szp += mic.sz;
3271
3272 if (hpp) {
3273 res = NIL;
3274 for (i = 0; i < mic.mi_i; i++) {
3275 Eterm t;
3276
3277 ASSERT(mic.mi[i].type == ERTS_MON_TYPE_PORT);
3278 ASSERT(is_internal_pid(mic.mi[i].entity.term));
3279 t = TUPLE2(*hpp, am_process, mic.mi[i].entity.term);
3280 *hpp += 3;
3281 res = CONS(*hpp, t, res);
3282 *hpp += 2;
3283 }
3284 } // hpp
3285 DESTROY_MONITOR_INFOS(mic);
3286
3287 if (szp) {
3288 res = am_true;
3289 goto done;
3290 }
3291 }
3292 else if (item == am_monitored_by) {
3293 MonitorInfoCollection mic;
3294 int i;
3295 Eterm item;
3296
3297 INIT_MONITOR_INFOS(mic);
3298 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
3299 collect_one_target_monitor,
3300 (void *) &mic);
3301 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3302 collect_one_target_monitor,
3303 (void *) &mic);
3304 if (szp)
3305 *szp += mic.sz;
3306
3307 if (hpp) {
3308 res = NIL;
3309 for (i = 0; i < mic.mi_i; ++i) {
3310 ASSERT(mic.mi[i].type != ERTS_MON_TYPE_RESOURCE);
3311 item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
3312 res = CONS(*hpp, item, res);
3313 *hpp += 2;
3314 }
3315 } // hpp
3316 DESTROY_MONITOR_INFOS(mic);
3317
3318 if (szp) {
3319 res = am_true;
3320 goto done;
3321 }
3322 }
3323 else if (item == am_name) {
3324 int count = sys_strlen(prt->name);
3325
3326 if (hpp)
3327 res = buf_to_intlist(hpp, prt->name, count, NIL);
3328
3329 if (szp) {
3330 *szp += 2*count;
3331 res = am_true;
3332 goto done;
3333 }
3334 }
3335 else if (item == am_connected) {
3336 if (hpp)
3337 res = ERTS_PORT_GET_CONNECTED(prt); /* internal pid */
3338 if (szp) {
3339 res = am_true;
3340 goto done;
3341 }
3342 }
3343 else if (item == am_input) {
3344 res = erts_bld_uint(hpp, szp, prt->bytes_in);
3345 if (szp) {
3346 res = am_true;
3347 goto done;
3348 }
3349 }
3350 else if (item == am_output) {
3351 res = erts_bld_uint(hpp, szp, prt->bytes_out);
3352 if (szp) {
3353 res = am_true;
3354 goto done;
3355 }
3356 }
3357 else if (item == am_os_pid) {
3358 res = (prt->os_pid < 0
3359 ? am_undefined
3360 : erts_bld_uword(hpp, szp, (UWord) prt->os_pid));
3361 if (szp) {
3362 res = am_true;
3363 goto done;
3364 }
3365 }
3366 else if (item == am_registered_name) {
3367 RegProc *reg = prt->common.u.alive.reg;
3368 if (reg) {
3369 res = reg->name;
3370 if (szp) {
3371 res = am_true;
3372 goto done;
3373 }
3374 }
3375 else {
3376 if (szp)
3377 return am_undefined;
3378 return NIL;
3379 }
3380 }
3381 else if (item == am_memory) {
3382 /* All memory consumed in bytes (the Port struct should not be
3383 included though).
3384 */
3385 Uint size = 0;
3386
3387 erts_link_tree_foreach(ERTS_P_LINKS(prt),
3388 link_size, (void *) &size);
3389 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3390 monitor_size, (void *) &size);
3391 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
3392 monitor_size, (void *) &size);
3393
3394 size += erts_port_data_size(prt);
3395
3396 if (prt->linebuf)
3397 size += sizeof(LineBuf) + prt->linebuf->ovsiz;
3398
3399 /* ... */
3400
3401
3402 /* All memory allocated by the driver should be included, but it is
3403 hard to retrieve... */
3404
3405 res = erts_bld_uint(hpp, szp, size);
3406 if (szp) {
3407 res = am_true;
3408 goto done;
3409 }
3410 }
3411 else if (item == am_queue_size) {
3412 Uint ioq_size = erts_port_ioq_size(prt);
3413 res = erts_bld_uint(hpp, szp, ioq_size);
3414 if (szp) {
3415 res = am_true;
3416 goto done;
3417 }
3418 }
3419 else if (ERTS_IS_ATOM_STR("locking", item)) {
3420 if (hpp) {
3421 if (erts_atomic32_read_nob(&prt->state)
3422 & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
3423 DECL_AM(port_level);
3424 ASSERT(prt->drv_ptr->flags
3425 & ERL_DRV_FLAG_USE_PORT_LOCKING);
3426 res = AM_port_level;
3427 }
3428 else {
3429 DECL_AM(driver_level);
3430 ASSERT(!(prt->drv_ptr->flags
3431 & ERL_DRV_FLAG_USE_PORT_LOCKING));
3432 res = AM_driver_level;
3433 }
3434 }
3435 if (szp) {
3436 res = am_true;
3437 goto done;
3438 }
3439 }
3440 else if (item == am_parallelism) {
3441 if (szp) {
3442 res = am_true;
3443 goto done;
3444 }
3445 res = ((ERTS_PTS_FLG_PARALLELISM &
3446 erts_atomic32_read_nob(&prt->sched.flags))
3447 ? am_true
3448 : am_false);
3449 }
3450 else {
3451 if (szp)
3452 return am_false;
3453 return THE_NON_VALUE;
3454 }
3455
3456 done:
3457 if (szp)
3458 *szp += 3;
3459 if (hpp) {
3460 res = TUPLE2(*hpp, item, res);
3461 *hpp += 3;
3462 }
3463
3464 return res;
3465 }
3466
3467 BIF_RETTYPE
fun_info_2(BIF_ALIST_2)3468 fun_info_2(BIF_ALIST_2)
3469 {
3470 Process* p = BIF_P;
3471 Eterm fun = BIF_ARG_1;
3472 Eterm what = BIF_ARG_2;
3473 Eterm* hp;
3474 Eterm val;
3475
3476 if (is_fun(fun)) {
3477 ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
3478
3479 switch (what) {
3480 case am_type:
3481 hp = HAlloc(p, 3);
3482 val = am_local;
3483 break;
3484 case am_pid:
3485 hp = HAlloc(p, 3);
3486 val = funp->creator;
3487 break;
3488 case am_module:
3489 hp = HAlloc(p, 3);
3490 val = funp->fe->module;
3491 break;
3492 case am_new_index:
3493 hp = HAlloc(p, 3);
3494 val = make_small(funp->fe->index);
3495 break;
3496 case am_new_uniq:
3497 val = new_binary(p, funp->fe->uniq, 16);
3498 hp = HAlloc(p, 3);
3499 break;
3500 case am_index:
3501 hp = HAlloc(p, 3);
3502 val = make_small(funp->fe->old_index);
3503 break;
3504 case am_uniq:
3505 hp = HAlloc(p, 3);
3506 val = make_small(funp->fe->old_uniq);
3507 break;
3508 case am_env:
3509 {
3510 Uint num_free = funp->num_free;
3511 int i;
3512
3513 hp = HAlloc(p, 3 + 2*num_free);
3514 val = NIL;
3515 for (i = num_free-1; i >= 0; i--) {
3516 val = CONS(hp, funp->env[i], val);
3517 hp += 2;
3518 }
3519 }
3520 break;
3521 case am_refc:
3522 val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p);
3523 hp = HAlloc(p, 3);
3524 break;
3525 case am_arity:
3526 hp = HAlloc(p, 3);
3527 val = make_small(funp->arity);
3528 break;
3529 case am_name:
3530 hp = HAlloc(p, 3);
3531 val = funp->fe->address[-2];
3532 break;
3533 default:
3534 goto error;
3535 }
3536 } else if (is_export(fun)) {
3537 Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
3538 switch (what) {
3539 case am_type:
3540 hp = HAlloc(p, 3);
3541 val = am_external;
3542 break;
3543 case am_pid:
3544 hp = HAlloc(p, 3);
3545 val = am_undefined;
3546 break;
3547 case am_module:
3548 hp = HAlloc(p, 3);
3549 val = exp->info.mfa.module;
3550 break;
3551 case am_new_index:
3552 hp = HAlloc(p, 3);
3553 val = am_undefined;
3554 break;
3555 case am_new_uniq:
3556 hp = HAlloc(p, 3);
3557 val = am_undefined;
3558 break;
3559 case am_index:
3560 hp = HAlloc(p, 3);
3561 val = am_undefined;
3562 break;
3563 case am_uniq:
3564 hp = HAlloc(p, 3);
3565 val = am_undefined;
3566 break;
3567 case am_env:
3568 hp = HAlloc(p, 3);
3569 val = NIL;
3570 break;
3571 case am_refc:
3572 hp = HAlloc(p, 3);
3573 val = am_undefined;
3574 break;
3575 case am_arity:
3576 hp = HAlloc(p, 3);
3577 val = make_small(exp->info.mfa.arity);
3578 break;
3579 case am_name:
3580 hp = HAlloc(p, 3);
3581 val = exp->info.mfa.function;
3582 break;
3583 default:
3584 goto error;
3585 }
3586 } else {
3587 error:
3588 BIF_ERROR(p, BADARG);
3589 }
3590 return TUPLE2(hp, what, val);
3591 }
3592
3593 BIF_RETTYPE
fun_info_mfa_1(BIF_ALIST_1)3594 fun_info_mfa_1(BIF_ALIST_1)
3595 {
3596 Process* p = BIF_P;
3597 Eterm fun = BIF_ARG_1;
3598 Eterm* hp;
3599
3600 if (is_fun(fun)) {
3601 ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
3602 hp = HAlloc(p, 4);
3603 BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
3604 } else if (is_export(fun)) {
3605 Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
3606 hp = HAlloc(p, 4);
3607 BIF_RET(TUPLE3(hp,exp->info.mfa.module,
3608 exp->info.mfa.function,
3609 make_small(exp->info.mfa.arity)));
3610 }
3611 BIF_ERROR(p, BADARG);
3612 }
3613
erts_internal_is_process_alive_2(BIF_ALIST_2)3614 BIF_RETTYPE erts_internal_is_process_alive_2(BIF_ALIST_2)
3615 {
3616 if (!is_internal_pid(BIF_ARG_1) || !is_internal_ordinary_ref(BIF_ARG_2))
3617 BIF_ERROR(BIF_P, BADARG);
3618 if (!erts_proc_sig_send_is_alive_request(BIF_P, BIF_ARG_1, BIF_ARG_2)) {
3619 if (ERTS_PROC_HAS_INCOMING_SIGNALS(BIF_P))
3620 ERTS_BIF_HANDLE_SIGNALS_RETURN(BIF_P, am_ok);
3621 }
3622 BIF_RET(am_ok);
3623 }
3624
is_process_alive_1(BIF_ALIST_1)3625 BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
3626 {
3627 if (is_internal_pid(BIF_ARG_1)) {
3628 erts_aint32_t state;
3629 Process *rp;
3630
3631 if (BIF_ARG_1 == BIF_P->common.id)
3632 BIF_RET(am_true);
3633
3634 rp = erts_proc_lookup_raw(BIF_ARG_1);
3635 if (!rp)
3636 BIF_RET(am_false);
3637
3638 state = erts_atomic32_read_acqb(&rp->state);
3639 if (state & (ERTS_PSFLG_EXITING
3640 | ERTS_PSFLG_SIG_Q
3641 | ERTS_PSFLG_SIG_IN_Q)) {
3642 /*
3643 * If in exiting state, trap out and send 'is alive'
3644 * request and wait for it to complete termination.
3645 *
3646 * If process has signals enqueued, we need to
3647 * send it an 'is alive' request via its signal
3648 * queue in order to ensure that signal order is
3649 * preserved (we may earlier have sent it an
3650 * exit signal that has not been processed yet).
3651 */
3652 BIF_TRAP1(is_process_alive_trap, BIF_P, BIF_ARG_1);
3653 }
3654
3655 BIF_RET(am_true);
3656 }
3657
3658 if (is_external_pid(BIF_ARG_1)) {
3659 if (external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
3660 BIF_RET(am_false); /* A pid from an old incarnation of this node */
3661 }
3662
3663 BIF_ERROR(BIF_P, BADARG);
3664 }
3665
3666 static Eterm
process_display(Process * c_p,void * arg,int * redsp,ErlHeapFragment ** bpp)3667 process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
3668 {
3669 if (redsp)
3670 *redsp = 1;
3671
3672 if (ERTS_PROC_IS_EXITING(c_p))
3673 return am_badarg;
3674
3675 erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
3676 erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p);
3677 erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
3678
3679 return am_true;
3680 }
3681
3682
erts_internal_process_display_2(BIF_ALIST_2)3683 BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2)
3684 {
3685 Eterm res;
3686
3687 if (BIF_ARG_2 != am_backtrace)
3688 BIF_RET(am_badarg);
3689
3690 if (BIF_P->common.id == BIF_ARG_1) {
3691 res = process_display(BIF_P, NULL, NULL, NULL);
3692 BIF_RET(res);
3693 }
3694
3695 if (is_not_internal_pid(BIF_ARG_1))
3696 BIF_RET(am_badarg);
3697
3698 res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
3699 !0,
3700 process_display,
3701 NULL);
3702 if (is_non_value(res))
3703 BIF_RET(am_badarg);
3704
3705 BIF_RET(res);
3706 }
3707
3708 /* this is a general call which return some possibly useful information */
3709
statistics_1(BIF_ALIST_1)3710 BIF_RETTYPE statistics_1(BIF_ALIST_1)
3711 {
3712 Eterm res;
3713 Eterm* hp;
3714
3715 if (BIF_ARG_1 == am_scheduler_wall_time) {
3716 res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
3717 if (is_non_value(res))
3718 BIF_RET(am_undefined);
3719 BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
3720 } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
3721 res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
3722 if (is_non_value(res))
3723 BIF_RET(am_undefined);
3724 BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
3725 } else if ((BIF_ARG_1 == am_total_active_tasks)
3726 | (BIF_ARG_1 == am_total_run_queue_lengths)
3727 | (BIF_ARG_1 == am_total_active_tasks_all)
3728 | (BIF_ARG_1 == am_total_run_queue_lengths_all)) {
3729 Uint no = erts_run_queues_len(NULL, 0,
3730 ((BIF_ARG_1 == am_total_active_tasks)
3731 | (BIF_ARG_1 == am_total_active_tasks_all)),
3732 ((BIF_ARG_1 == am_total_active_tasks_all)
3733 | (BIF_ARG_1 == am_total_run_queue_lengths_all)));
3734 if (IS_USMALL(0, no))
3735 res = make_small(no);
3736 else {
3737 Eterm *hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
3738 res = uint_to_big(no, hp);
3739 }
3740 BIF_RET(res);
3741 } else if ((BIF_ARG_1 == am_active_tasks)
3742 | (BIF_ARG_1 == am_run_queue_lengths)
3743 | (BIF_ARG_1 == am_active_tasks_all)
3744 | (BIF_ARG_1 == am_run_queue_lengths_all)) {
3745 Eterm res, *hp, **hpp;
3746 Uint sz, *szp;
3747 int incl_dirty_io = ((BIF_ARG_1 == am_active_tasks_all)
3748 | (BIF_ARG_1 == am_run_queue_lengths_all));
3749 int no_qs = (erts_no_run_queues + ERTS_NUM_DIRTY_CPU_RUNQS +
3750 (incl_dirty_io ? ERTS_NUM_DIRTY_IO_RUNQS : 0));
3751 Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
3752 (void) erts_run_queues_len(qszs, 0,
3753 ((BIF_ARG_1 == am_active_tasks)
3754 | (BIF_ARG_1 == am_active_tasks_all)),
3755 incl_dirty_io);
3756 sz = 0;
3757 szp = &sz;
3758 hpp = NULL;
3759 while (1) {
3760 int i;
3761 for (i = 0; i < no_qs; i++)
3762 qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
3763 res = erts_bld_list(hpp, szp, no_qs, &qszs[no_qs]);
3764 if (hpp) {
3765 erts_free(ERTS_ALC_T_TMP, qszs);
3766 BIF_RET(res);
3767 }
3768 hp = HAlloc(BIF_P, sz);
3769 szp = NULL;
3770 hpp = &hp;
3771 }
3772 #ifdef ERTS_ENABLE_MSACC
3773 } else if (BIF_ARG_1 == am_microstate_accounting) {
3774 Eterm threads;
3775 res = erts_msacc_request(BIF_P, ERTS_MSACC_GATHER, &threads);
3776 if (is_non_value(res))
3777 BIF_RET(am_undefined);
3778 BIF_TRAP2(gather_msacc_res_trap, BIF_P, res, threads);
3779 #endif
3780 } else if (BIF_ARG_1 == am_context_switches) {
3781 Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
3782 hp = HAlloc(BIF_P, 3);
3783 res = TUPLE2(hp, cs, SMALL_ZERO);
3784 BIF_RET(res);
3785 } else if (BIF_ARG_1 == am_garbage_collection) {
3786 res = erts_gc_info_request(BIF_P);
3787 if (is_non_value(res))
3788 BIF_RET(am_undefined);
3789 BIF_TRAP1(gather_gc_info_res_trap, BIF_P, res);
3790 } else if (BIF_ARG_1 == am_reductions) {
3791 Uint reds;
3792 Uint diff;
3793 Uint hsz = 3;
3794 Eterm b1, b2;
3795
3796 erts_get_total_reductions(&reds, &diff);
3797 (void) erts_bld_uint(NULL, &hsz, reds);
3798 (void) erts_bld_uint(NULL, &hsz, diff);
3799 hp = HAlloc(BIF_P, hsz);
3800 b1 = erts_bld_uint(&hp, NULL, reds);
3801 b2 = erts_bld_uint(&hp, NULL, diff);
3802 res = TUPLE2(hp, b1, b2);
3803 BIF_RET(res);
3804 } else if (BIF_ARG_1 == am_exact_reductions) {
3805 Uint reds;
3806 Uint diff;
3807 Uint hsz = 3;
3808 Eterm b1, b2;
3809
3810 erts_get_exact_total_reductions(BIF_P, &reds, &diff);
3811 (void) erts_bld_uint(NULL, &hsz, reds);
3812 (void) erts_bld_uint(NULL, &hsz, diff);
3813 hp = HAlloc(BIF_P, hsz);
3814 b1 = erts_bld_uint(&hp, NULL, reds);
3815 b2 = erts_bld_uint(&hp, NULL, diff);
3816 res = TUPLE2(hp, b1, b2);
3817 BIF_RET(res);
3818 } else if (BIF_ARG_1 == am_runtime) {
3819 ErtsMonotonicTime u1, u2;
3820 Eterm b1, b2;
3821 Uint hsz;
3822 erts_runtime_elapsed_both(&u1, NULL, &u2, NULL);
3823 hsz = 3; /* 2-tuple */
3824 (void) erts_bld_monotonic_time(NULL, &hsz, u1);
3825 (void) erts_bld_monotonic_time(NULL, &hsz, u2);
3826 hp = HAlloc(BIF_P, hsz);
3827 b1 = erts_bld_monotonic_time(&hp, NULL, u1);
3828 b2 = erts_bld_monotonic_time(&hp, NULL, u2);
3829 res = TUPLE2(hp, b1, b2);
3830 BIF_RET(res);
3831 } else if (BIF_ARG_1 == am_run_queue) {
3832 res = erts_run_queues_len(NULL, 1, 0, 0);
3833 BIF_RET(make_small(res));
3834 } else if (BIF_ARG_1 == am_wall_clock) {
3835 ErtsMonotonicTime w1, w2;
3836 Eterm b1, b2;
3837 Uint hsz;
3838 erts_wall_clock_elapsed_both(&w1, &w2);
3839 hsz = 3; /* 2-tuple */
3840 (void) erts_bld_monotonic_time(NULL, &hsz, w1);
3841 (void) erts_bld_monotonic_time(NULL, &hsz, w2);
3842 hp = HAlloc(BIF_P, hsz);
3843 b1 = erts_bld_monotonic_time(&hp, NULL, w1);
3844 b2 = erts_bld_monotonic_time(&hp, NULL, w2);
3845 res = TUPLE2(hp, b1, b2);
3846 BIF_RET(res);
3847 } else if (BIF_ARG_1 == am_io) {
3848 Eterm ref = erts_request_io_bytes(BIF_P);
3849 BIF_TRAP2(gather_io_bytes_trap, BIF_P, ref, make_small(erts_no_schedulers));
3850 }
3851 else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
3852 Eterm res, *hp, **hpp;
3853 Uint sz, *szp;
3854 int no_qs = erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS;
3855 Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
3856 (void) erts_run_queues_len(qszs, 0, 0, 1);
3857 sz = 0;
3858 szp = &sz;
3859 hpp = NULL;
3860 while (1) {
3861 int i;
3862 for (i = 0; i < no_qs; i++)
3863 qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
3864 res = erts_bld_tuplev(hpp, szp, no_qs, &qszs[no_qs]);
3865 if (hpp) {
3866 erts_free(ERTS_ALC_T_TMP, qszs);
3867 BIF_RET(res);
3868 }
3869 hp = HAlloc(BIF_P, sz);
3870 szp = NULL;
3871 hpp = &hp;
3872 }
3873 }
3874 BIF_ERROR(BIF_P, BADARG);
3875 }
3876
error_logger_warning_map_0(BIF_ALIST_0)3877 BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
3878 {
3879 BIF_RET(erts_error_logger_warnings);
3880 }
3881
3882 static erts_atomic_t available_internal_state;
3883
empty_magic_ref_destructor(Binary * bin)3884 static int empty_magic_ref_destructor(Binary *bin)
3885 {
3886 return 1;
3887 }
3888
erts_debug_get_internal_state_1(BIF_ALIST_1)3889 BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
3890 {
3891 /*
3892 * NOTE: Only supposed to be used for testing, and debugging.
3893 */
3894
3895 if (!erts_atomic_read_nob(&available_internal_state)) {
3896 BIF_ERROR(BIF_P, EXC_UNDEF);
3897 }
3898
3899 if (is_atom(BIF_ARG_1)) {
3900 if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
3901 /* Used by (emulator) */
3902 BIF_RET(make_small((Uint) ERTS_BIF_REDS_LEFT(BIF_P)));
3903 }
3904 else if (ERTS_IS_ATOM_STR("node_and_dist_references", BIF_ARG_1)) {
3905 /* Used by node_container_SUITE (emulator) */
3906 BIF_TRAP1(get_internal_state_blocked, BIF_P, BIF_ARG_1);
3907 }
3908 else if (ERTS_IS_ATOM_STR("monitoring_nodes", BIF_ARG_1)) {
3909 BIF_RET(erts_processes_monitoring_nodes(BIF_P));
3910 }
3911 else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
3912 || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
3913 /* Used by node_container_SUITE (emulator) */
3914 Sint res;
3915 if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
3916 res = erts_ptab_test_next_id(&erts_proc, 0, 0);
3917 else
3918 res = erts_ptab_test_next_id(&erts_port, 0, 0);
3919 if (res < 0)
3920 BIF_RET(am_false);
3921 BIF_RET(erts_make_integer(res, BIF_P));
3922 }
3923 else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
3924 /* Used by ets_SUITE (stdlib) */
3925 size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
3926 Eterm* hp = HAlloc(BIF_P ,3);
3927 BIF_RET(TUPLE2(hp, make_small((Uint) words),
3928 erts_ets_hash_sizeof_ext_segtab()));
3929 }
3930 else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
3931 /* Used by driver_SUITE (emulator) */
3932 Uint sz, *szp;
3933 Eterm res, *hp, **hpp;
3934 int no_errors;
3935 ErtsCheckIoDebugInfo ciodi = {0};
3936 #ifdef HAVE_ERTS_CHECK_IO_DEBUG
3937 erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
3938 no_errors = erts_check_io_debug(&ciodi);
3939 erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
3940 #else
3941 no_errors = 0;
3942 #endif
3943 sz = 0;
3944 szp = &sz;
3945 hpp = NULL;
3946 while (1) {
3947 res = erts_bld_tuple(hpp, szp, 4,
3948 erts_bld_uint(hpp, szp,
3949 (Uint) no_errors),
3950 erts_bld_uint(hpp, szp,
3951 (Uint) ciodi.no_used_fds),
3952 erts_bld_uint(hpp, szp,
3953 (Uint) ciodi.no_driver_select_structs),
3954 erts_bld_uint(hpp, szp,
3955 (Uint) ciodi.no_enif_select_structs));
3956 if (hpp)
3957 break;
3958 hp = HAlloc(BIF_P, sz);
3959 szp = NULL;
3960 hpp = &hp;
3961 }
3962 BIF_RET(res);
3963 }
3964 else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
3965 /* Used by process_SUITE (emulator) */
3966 int i;
3967 Eterm res = NIL;
3968 Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
3969 for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
3970 res = CONS(hp, pi_args[i].name, res);
3971 hp += 2;
3972 }
3973 BIF_RET(res);
3974 }
3975 else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
3976 /* Used by process_SUITE (emulator) */
3977 BIF_RET(erts_debug_ptab_list(BIF_P, &erts_proc));
3978 }
3979 else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
3980 /* Used by process_SUITE (emulator) */
3981 BIF_RET(erts_debug_ptab_list_bif_info(BIF_P, &erts_proc));
3982 }
3983 else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
3984 /* Used by distribution_SUITE (emulator) */
3985 BIF_RET(make_small((Uint) erts_debug_max_atom_out_cache_index()));
3986 }
3987 else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
3988 Uint n;
3989 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
3990 n = erts_debug_nbalance();
3991 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
3992 BIF_RET(erts_make_integer(n, BIF_P));
3993 }
3994 else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
3995 BIF_RET(am_true);
3996 }
3997 else if (ERTS_IS_ATOM_STR("force_heap_frags", BIF_ARG_1)) {
3998 #ifdef FORCE_HEAP_FRAGS
3999 BIF_RET(am_true);
4000 #else
4001 BIF_RET(am_false);
4002 #endif
4003 }
4004 else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
4005 Eterm res;
4006 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4007 erts_thr_progress_block();
4008 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4009 res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
4010 erts_thr_progress_unblock();
4011 BIF_RET(res);
4012 }
4013 else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
4014 BIF_RET(erts_mmap_debug_info(BIF_P));
4015 }
4016 else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
4017 BIF_RET(erts_debug_get_unique_monotonic_integer_state(BIF_P));
4018 }
4019 else if (ERTS_IS_ATOM_STR("min_unique_monotonic_integer", BIF_ARG_1)) {
4020 Sint64 value = erts_get_min_unique_monotonic_integer();
4021 if (IS_SSMALL(value))
4022 BIF_RET(make_small(value));
4023 else {
4024 Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
4025 Eterm *hp = HAlloc(BIF_P, hsz);
4026 BIF_RET(erts_sint64_to_big(value, &hp));
4027 }
4028 }
4029 else if (ERTS_IS_ATOM_STR("min_unique_integer", BIF_ARG_1)) {
4030 Sint64 value = erts_get_min_unique_integer();
4031 if (IS_SSMALL(value))
4032 BIF_RET(make_small(value));
4033 else {
4034 Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
4035 Eterm *hp = HAlloc(BIF_P, hsz);
4036 BIF_RET(erts_sint64_to_big(value, &hp));
4037 }
4038 }
4039 else if (ERTS_IS_ATOM_STR("stack_check", BIF_ARG_1)) {
4040 UWord size;
4041 char c;
4042 if (erts_is_above_stack_limit(&c))
4043 size = erts_check_stack_recursion_downwards(&c);
4044 else
4045 size = erts_check_stack_recursion_upwards(&c);
4046 if (IS_SSMALL(size))
4047 BIF_RET(make_small(size));
4048 else {
4049 Uint hsz = BIG_UWORD_HEAP_SIZE(size);
4050 Eterm *hp = HAlloc(BIF_P, hsz);
4051 BIF_RET(uword_to_big(size, hp));
4052 }
4053 } else if (ERTS_IS_ATOM_STR("scheduler_dump", BIF_ARG_1)) {
4054 #if defined(ERTS_HAVE_TRY_CATCH) && defined(ERTS_SYS_SUSPEND_SIGNAL)
4055 BIF_RET(am_true);
4056 #else
4057 BIF_RET(am_false);
4058 #endif
4059 }
4060 else if (ERTS_IS_ATOM_STR("lc_graph", BIF_ARG_1)) {
4061 #ifdef ERTS_ENABLE_LOCK_CHECK
4062 Eterm res = erts_lc_dump_graph();
4063 BIF_RET(res);
4064 #else
4065 BIF_RET(am_notsup);
4066 #endif
4067 }
4068
4069 }
4070 else if (is_tuple(BIF_ARG_1)) {
4071 Eterm* tp = tuple_val(BIF_ARG_1);
4072 switch (arityval(tp[0])) {
4073 case 2: {
4074 if (ERTS_IS_ATOM_STR("node_and_dist_references", tp[1])) {
4075 if (tp[2] == am_blocked
4076 && erts_is_multi_scheduling_blocked() > 0) {
4077 Eterm res = erts_get_node_and_dist_references(BIF_P);
4078 BIF_RET(res);
4079 }
4080 }
4081 else if (ERTS_IS_ATOM_STR("process_status", tp[1])) {
4082 /* Used by timer process_SUITE, timer_bif_SUITE, and
4083 node_container_SUITE (emulator) */
4084 if (is_internal_pid(tp[2])) {
4085 BIF_RET(erts_process_status(NULL, tp[2]));
4086 }
4087 }
4088 else if (ERTS_IS_ATOM_STR("connection_id", tp[1])) {
4089 DistEntry *dep;
4090 Eterm *hp, res;
4091 Uint con_id, hsz = 0;
4092 if (!is_atom(tp[2]))
4093 BIF_ERROR(BIF_P, BADARG);
4094 dep = erts_sysname_to_connected_dist_entry(tp[2]);
4095 if (!dep)
4096 BIF_ERROR(BIF_P, BADARG);
4097 erts_de_rlock(dep);
4098 con_id = (Uint) dep->connection_id;
4099 erts_de_runlock(dep);
4100 (void) erts_bld_uint(NULL, &hsz, con_id);
4101 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
4102 res = erts_bld_uint(&hp, NULL, con_id);
4103 BIF_RET(res);
4104 }
4105 else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
4106 /* Used by erl_link_SUITE (emulator) */
4107 if(is_internal_pid(tp[2])) {
4108 erts_aint32_t state;
4109 Eterm res;
4110 Process *p;
4111 int sigs_done;
4112
4113 p = erts_pid2proc(BIF_P,
4114 ERTS_PROC_LOCK_MAIN,
4115 tp[2],
4116 ERTS_PROC_LOCK_MAIN);
4117 if (!p) {
4118 ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
4119 BIF_RET(am_undefined);
4120 }
4121
4122 erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
4123 erts_proc_sig_fetch(p);
4124 erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
4125 do {
4126 int reds = CONTEXT_REDS;
4127 sigs_done = erts_proc_sig_handle_incoming(p,
4128 &state,
4129 &reds,
4130 CONTEXT_REDS,
4131 !0);
4132 } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
4133
4134 if (!(state & ERTS_PSFLG_EXITING))
4135 res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
4136 else if (BIF_P == p)
4137 ERTS_BIF_EXITED(BIF_P);
4138 else
4139 res = am_undefined;
4140 if (BIF_P != p)
4141 erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
4142 BIF_RET(res);
4143 }
4144 else if(is_internal_port(tp[2])) {
4145 Eterm res;
4146 Port *p = erts_id2port_sflgs(tp[2],
4147 BIF_P,
4148 ERTS_PROC_LOCK_MAIN,
4149 ERTS_PORT_SFLGS_INVALID_LOOKUP);
4150 if(!p)
4151 BIF_RET(am_undefined);
4152 res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
4153 erts_port_release(p);
4154 BIF_RET(res);
4155 }
4156 else if(is_node_name_atom(tp[2])) {
4157 DistEntry *dep = erts_find_dist_entry(tp[2]);
4158 if(dep) {
4159 Eterm res = NIL;
4160 if (dep->mld) {
4161 erts_mtx_lock(&dep->mld->mtx);
4162 res = make_link_list(BIF_P, 0, dep->mld->links, NIL);
4163 erts_mtx_unlock(&dep->mld->mtx);
4164 }
4165 BIF_RET(res);
4166 } else {
4167 BIF_RET(am_undefined);
4168 }
4169 }
4170 }
4171 else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
4172 /* Used by erl_link_SUITE (emulator) */
4173 if(is_internal_pid(tp[2])) {
4174 erts_aint32_t state;
4175 Process *p;
4176 Eterm res;
4177 int sigs_done;
4178
4179 p = erts_pid2proc(BIF_P,
4180 ERTS_PROC_LOCK_MAIN,
4181 tp[2],
4182 ERTS_PROC_LOCK_MAIN);
4183 if (!p) {
4184 ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
4185 BIF_RET(am_undefined);
4186 }
4187
4188 erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
4189 erts_proc_sig_fetch(p);
4190 erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
4191 do {
4192 int reds = CONTEXT_REDS;
4193 sigs_done = erts_proc_sig_handle_incoming(p,
4194 &state,
4195 &reds,
4196 CONTEXT_REDS,
4197 !0);
4198 } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
4199
4200 if (!(state & ERTS_PSFLG_EXITING)) {
4201 res = make_monitor_list(BIF_P, 1, ERTS_P_MONITORS(p), NIL);
4202 res = make_monitor_list(BIF_P, 0, ERTS_P_LT_MONITORS(p), res);
4203 }
4204 else {
4205 if (BIF_P == p)
4206 ERTS_BIF_EXITED(BIF_P);
4207 else
4208 res = am_undefined;
4209 }
4210 if (BIF_P != p)
4211 erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
4212 BIF_RET(res);
4213 } else if(is_node_name_atom(tp[2])) {
4214 DistEntry *dep = erts_find_dist_entry(tp[2]);
4215 if(dep) {
4216 Eterm ml = NIL;
4217 if (dep->mld) {
4218 erts_mtx_lock(&dep->mld->mtx);
4219 ml = make_monitor_list(BIF_P, 1, dep->mld->orig_name_monitors, NIL);
4220 ml = make_monitor_list(BIF_P, 0, dep->mld->monitors, ml);
4221 erts_mtx_unlock(&dep->mld->mtx);
4222 }
4223 BIF_RET(ml);
4224 } else {
4225 BIF_RET(am_undefined);
4226 }
4227 }
4228 }
4229 else if (ERTS_IS_ATOM_STR("channel_number", tp[1])) {
4230 Eterm res;
4231 DistEntry *dep = erts_find_dist_entry(tp[2]);
4232 if (!dep)
4233 res = am_undefined;
4234 else {
4235 Uint cno = dist_entry_channel_no(dep);
4236 res = make_small(cno);
4237 }
4238 BIF_RET(res);
4239 }
4240 else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
4241 Eterm bin = tp[2];
4242 if (is_binary(bin)) {
4243 Eterm real_bin = bin;
4244 Eterm res = am_true;
4245 ErlSubBin* sb = (ErlSubBin *) binary_val(real_bin);
4246
4247 if (sb->thing_word == HEADER_SUB_BIN) {
4248 real_bin = sb->orig;
4249 }
4250 if (*binary_val(real_bin) == HEADER_PROC_BIN) {
4251 ProcBin* pb;
4252 Binary* val;
4253 Eterm SzTerm;
4254 Uint hsz = 3 + 5;
4255 Eterm* hp;
4256 DECL_AM(refc_binary);
4257
4258 pb = (ProcBin *) binary_val(real_bin);
4259 val = pb->val;
4260 (void) erts_bld_uint(NULL, &hsz, pb->size);
4261 (void) erts_bld_uint(NULL, &hsz, val->orig_size);
4262 hp = HAlloc(BIF_P, hsz);
4263
4264 /* Info about the Binary* object */
4265 SzTerm = erts_bld_uint(&hp, NULL, val->orig_size);
4266 res = TUPLE2(hp, am_binary, SzTerm);
4267 hp += 3;
4268
4269 /* Info about the ProcBin* object */
4270 SzTerm = erts_bld_uint(&hp, NULL, pb->size);
4271 res = TUPLE4(hp, AM_refc_binary, SzTerm,
4272 res, make_small(pb->flags));
4273 } else { /* heap binary */
4274 DECL_AM(heap_binary);
4275 res = AM_heap_binary;
4276 }
4277 BIF_RET(res);
4278 }
4279 }
4280 else if (ERTS_IS_ATOM_STR("term_to_binary_tuple_fallbacks", tp[1])) {
4281 Uint dflags = (TERM_TO_BINARY_DFLAGS
4282 & ~DFLAG_EXPORT_PTR_TAG
4283 & ~DFLAG_BIT_BINARIES);
4284 Eterm res = erts_term_to_binary(BIF_P, tp[2], 0, dflags);
4285 if (is_value(res))
4286 BIF_RET(res);
4287 BIF_ERROR(BIF_P, SYSTEM_LIMIT);
4288 }
4289 else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) {
4290 Eterm res = am_undefined;
4291 DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
4292 if (dep) {
4293 erts_de_rlock(dep);
4294 if (is_internal_port(dep->cid) || is_internal_pid(dep->cid))
4295 res = dep->cid;
4296 erts_de_runlock(dep);
4297 }
4298 BIF_RET(res);
4299 }
4300 else if (ERTS_IS_ATOM_STR("atom_out_cache_index", tp[1])) {
4301 /* Used by distribution_SUITE (emulator) */
4302 if (is_atom(tp[2])) {
4303 BIF_RET(make_small(
4304 (Uint)
4305 erts_debug_atom_to_out_cache_index(tp[2])));
4306 }
4307 }
4308 else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
4309 return erts_fake_scheduler_bindings(BIF_P, tp[2]);
4310 }
4311 else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
4312 Sint groups;
4313 if (is_not_small(tp[2]))
4314 BIF_ERROR(BIF_P, BADARG);
4315 groups = signed_val(tp[2]);
4316 if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
4317 BIF_ERROR(BIF_P, BADARG);
4318
4319 BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
4320 }
4321 else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
4322 Uint hash = (Uint) make_internal_hash(tp[2], 0);
4323 Uint hsz = 0;
4324 Eterm* hp;
4325 erts_bld_uint(NULL, &hsz, hash);
4326 hp = HAlloc(BIF_P,hsz);
4327 return erts_bld_uint(&hp, NULL, hash);
4328 }
4329 else if (ERTS_IS_ATOM_STR("atom", tp[1])) {
4330 Uint ix;
4331 if (!term_to_Uint(tp[2], &ix))
4332 BIF_ERROR(BIF_P, BADARG);
4333 while (ix >= atom_table_size()) {
4334 char tmp[20];
4335 erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
4336 erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
4337 }
4338 return make_atom(ix);
4339 }
4340 else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
4341 Binary *bin;
4342 UWord bin_addr, refc;
4343 Eterm bin_addr_term, refc_term, test_type;
4344 Uint sz;
4345 Eterm *hp;
4346 if (!is_internal_magic_ref(tp[2])) {
4347 if (is_internal_ordinary_ref(tp[2])) {
4348 ErtsORefThing *rtp;
4349 rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
4350 if (erts_is_ref_numbers_magic(rtp->num))
4351 BIF_RET(am_true);
4352 }
4353 BIF_RET(am_false);
4354 }
4355 bin = erts_magic_ref2bin(tp[2]);
4356 refc = erts_refc_read(&bin->intern.refc, 1);
4357 bin_addr = (UWord) bin;
4358 sz = 4;
4359 erts_bld_uword(NULL, &sz, bin_addr);
4360 erts_bld_uword(NULL, &sz, refc);
4361 hp = HAlloc(BIF_P, sz);
4362 bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
4363 refc_term = erts_bld_uword(&hp, NULL, refc);
4364 test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
4365 ? am_true : am_false);
4366 BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
4367 }
4368
4369 break;
4370 }
4371 case 3: {
4372 if (ERTS_IS_ATOM_STR("check_time_config", tp[1])) {
4373 int res, time_correction;
4374 ErtsTimeWarpMode time_warp_mode;
4375 if (tp[2] == am_true)
4376 time_correction = !0;
4377 else if (tp[2] == am_false)
4378 time_correction = 0;
4379 else
4380 break;
4381 if (ERTS_IS_ATOM_STR("no_time_warp", tp[3]))
4382 time_warp_mode = ERTS_NO_TIME_WARP_MODE;
4383 else if (ERTS_IS_ATOM_STR("single_time_warp", tp[3]))
4384 time_warp_mode = ERTS_SINGLE_TIME_WARP_MODE;
4385 else if (ERTS_IS_ATOM_STR("multi_time_warp", tp[3]))
4386 time_warp_mode = ERTS_MULTI_TIME_WARP_MODE;
4387 else
4388 break;
4389 res = erts_check_time_adj_support(time_correction,
4390 time_warp_mode);
4391 BIF_RET(res ? am_true : am_false);
4392 }
4393 else if (ERTS_IS_ATOM_STR("make_unique_integer", tp[1])) {
4394 Eterm res = erts_debug_make_unique_integer(BIF_P,
4395 tp[2],
4396 tp[3]);
4397 if (is_non_value(res))
4398 break;
4399 BIF_RET(res);
4400 }
4401 break;
4402 }
4403 default:
4404 break;
4405 }
4406 }
4407 BIF_ERROR(BIF_P, BADARG);
4408 }
4409
erts_internal_is_system_process_1(BIF_ALIST_1)4410 BIF_RETTYPE erts_internal_is_system_process_1(BIF_ALIST_1)
4411 {
4412 if (is_internal_pid(BIF_ARG_1)) {
4413 Process *rp = erts_proc_lookup(BIF_ARG_1);
4414 if (rp && (rp->static_flags & ERTS_STC_FLG_SYSTEM_PROC))
4415 BIF_RET(am_true);
4416 BIF_RET(am_false);
4417 }
4418
4419 if (is_external_pid(BIF_ARG_1)
4420 && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
4421 BIF_RET(am_false);
4422 }
4423
4424 BIF_ERROR(BIF_P, BADARG);
4425 }
4426
erts_internal_system_check_1(BIF_ALIST_1)4427 BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1)
4428 {
4429 Eterm res;
4430 if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)) {
4431 res = erts_system_check_request(BIF_P);
4432 if (is_non_value(res))
4433 BIF_RET(am_undefined);
4434 BIF_TRAP1(gather_system_check_res_trap, BIF_P, res);
4435 }
4436
4437 BIF_ERROR(BIF_P, BADARG);
4438 }
4439
4440 static erts_atomic_t hipe_test_reschedule_flag;
4441
4442 #if defined(VALGRIND) && defined(__GNUC__)
4443 /* Force noinline for valgrind suppression */
4444 static void broken_halt_test(Eterm bif_arg_2) __attribute__((noinline));
4445 #endif
4446
broken_halt_test(Eterm bif_arg_2)4447 static void broken_halt_test(Eterm bif_arg_2)
4448 {
4449 /* Ugly ugly code used by bif_SUITE:erlang_halt/1 */
4450 #if defined(ERTS_HAVE_TRY_CATCH)
4451 erts_get_scheduler_data()->run_queue = NULL;
4452 #endif
4453 erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
4454 }
4455
4456 static void
test_multizero_timeout_in_timeout3(void * vproc)4457 test_multizero_timeout_in_timeout3(void *vproc)
4458 {
4459 Process *proc = (Process *) vproc;
4460 ErtsMessage *mp = erts_alloc_message(0, NULL);
4461 ERTS_DECL_AM(multizero_timeout_in_timeout_done);
4462 erts_queue_message(proc, 0, mp, AM_multizero_timeout_in_timeout_done, am_system);
4463 erts_proc_dec_refc(proc);
4464 }
4465
4466 static void
test_multizero_timeout_in_timeout2(void * vproc)4467 test_multizero_timeout_in_timeout2(void *vproc)
4468 {
4469 erts_start_timer_callback(0, test_multizero_timeout_in_timeout3, vproc);
4470 }
4471
4472 static void
test_multizero_timeout_in_timeout(void * vproc)4473 test_multizero_timeout_in_timeout(void *vproc)
4474 {
4475 erts_start_timer_callback(0, test_multizero_timeout_in_timeout2, vproc);
4476 }
4477
erts_debug_set_internal_state_2(BIF_ALIST_2)4478 BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
4479 {
4480 /*
4481 * NOTE: Only supposed to be used for testing, and debugging.
4482 */
4483 if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
4484 && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
4485 erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
4486 erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on);
4487 if (on) {
4488 erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
4489 erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
4490 if (erts_is_alive)
4491 erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
4492 erts_dsprintf(dsbufp,
4493 "enabled access to the emulator internal state.\n");
4494 erts_dsprintf(dsbufp,
4495 "NOTE: This is an erts internal test feature and "
4496 "should *only* be used by OTP test-suites.\n");
4497 erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
4498 }
4499 BIF_RET(prev_on ? am_true : am_false);
4500 }
4501
4502 if (!erts_atomic_read_nob(&available_internal_state)) {
4503 BIF_ERROR(BIF_P, EXC_UNDEF);
4504 }
4505
4506 if (is_atom(BIF_ARG_1)) {
4507
4508 if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
4509 Sint reds;
4510 if (term_to_Sint(BIF_ARG_2, &reds) != 0) {
4511 if (0 <= reds && reds <= CONTEXT_REDS) {
4512 if (!ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P))
4513 BIF_P->fcalls = reds;
4514 else
4515 BIF_P->fcalls = reds - CONTEXT_REDS;
4516 BIF_P->scheduler_data->virtual_reds = 0;
4517 }
4518 BIF_RET(am_true);
4519 }
4520 }
4521 else if (ERTS_IS_ATOM_STR("block", BIF_ARG_1)
4522 || ERTS_IS_ATOM_STR("sleep", BIF_ARG_1)) {
4523 int block = ERTS_IS_ATOM_STR("block", BIF_ARG_1);
4524 Sint ms;
4525 if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
4526 if (ms > 0) {
4527 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4528 if (block)
4529 erts_thr_progress_block();
4530 while (erts_milli_sleep((long) ms) != 0);
4531 if (block)
4532 erts_thr_progress_unblock();
4533 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4534 }
4535 BIF_RET(am_true);
4536 }
4537 }
4538 else if (ERTS_IS_ATOM_STR("block_scheduler", BIF_ARG_1)) {
4539 Sint ms;
4540 if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
4541 if (ms > 0) {
4542 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4543 while (erts_milli_sleep((long) ms) != 0);
4544 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4545 }
4546 BIF_RET(am_true);
4547 }
4548 }
4549 else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
4550 || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
4551 /* Used by node_container_SUITE (emulator) */
4552 Uint next;
4553
4554 if (term_to_Uint(BIF_ARG_2, &next) != 0) {
4555 Sint res;
4556
4557 if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
4558 res = erts_ptab_test_next_id(&erts_proc, 1, next);
4559 else
4560 res = erts_ptab_test_next_id(&erts_port, 1, next);
4561 if (res < 0)
4562 BIF_RET(am_false);
4563 BIF_RET(erts_make_integer(res, BIF_P));
4564 }
4565 }
4566 else if (ERTS_IS_ATOM_STR("force_gc", BIF_ARG_1)) {
4567 /* Used by signal_SUITE (emulator) */
4568 Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
4569 BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
4570 if (!rp) {
4571 BIF_RET(am_false);
4572 }
4573 else {
4574 ERTS_FORCE_GC(BIF_P);
4575 BIF_RET(am_true);
4576 }
4577 }
4578 else if (ERTS_IS_ATOM_STR("gc_state", BIF_ARG_1)) {
4579 /* Used by process_SUITE (emulator) */
4580 int res, enable;
4581
4582 switch (BIF_ARG_2) {
4583 case am_true: enable = 1; break;
4584 case am_false: enable = 0; break;
4585 default: BIF_ERROR(BIF_P, BADARG); break;
4586 }
4587
4588 res = (BIF_P->flags & F_DISABLE_GC) ? am_false : am_true;
4589 erts_set_gc_state(BIF_P, enable);
4590 BIF_RET(res);
4591 }
4592 else if (ERTS_IS_ATOM_STR("inconsistent_heap", BIF_ARG_1)) {
4593 /* Used by code_SUITE (emulator) */
4594 if (am_start == BIF_ARG_2) {
4595 Eterm broken_term;
4596 Eterm *hp;
4597
4598 ERTS_ASSERT(!(BIF_P->flags & F_DISABLE_GC));
4599 erts_set_gc_state(BIF_P, 0);
4600
4601 hp = HAlloc(BIF_P, 2);
4602 hp[0] = make_arityval(1234);
4603 hp[1] = THE_NON_VALUE;
4604
4605 broken_term = make_tuple(hp);
4606
4607 BIF_RET(broken_term);
4608 } else {
4609 Eterm broken_term;
4610 Eterm *hp;
4611
4612 broken_term = BIF_ARG_2;
4613
4614 hp = tuple_val(broken_term);
4615 ERTS_ASSERT(hp[0] == make_arityval(1234));
4616 ERTS_ASSERT(hp[1] == THE_NON_VALUE);
4617 hp[0] = make_arityval(1);
4618 hp[1] = am_ok;
4619
4620 ERTS_ASSERT(BIF_P->flags & F_DISABLE_GC);
4621 erts_set_gc_state(BIF_P, 1);
4622
4623 BIF_RET(am_ok);
4624 }
4625 }
4626 else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
4627 /* Used by ets_SUITE (stdlib) */
4628 if (is_tuple(BIF_ARG_2)) {
4629 Eterm* tpl = tuple_val(BIF_ARG_2);
4630 Uint cnt;
4631 if (arityval(tpl[0]) == 2 && is_atom(tpl[1]) &&
4632 term_to_Uint(tpl[2], &cnt)) {
4633 BIF_RET(erts_ets_colliding_names(BIF_P,tpl[1],cnt));
4634 }
4635 }
4636 }
4637 else if (ERTS_IS_ATOM_STR("binary_loop_limit", BIF_ARG_1)) {
4638 /* Used by binary_module_SUITE (stdlib) */
4639 Uint max_loops;
4640 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4641 max_loops = erts_binary_set_loop_limit(-1);
4642 BIF_RET(make_small(max_loops));
4643 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4644 max_loops = erts_binary_set_loop_limit(max_loops);
4645 BIF_RET(make_small(max_loops));
4646 }
4647 }
4648 else if (ERTS_IS_ATOM_STR("re_loop_limit", BIF_ARG_1)) {
4649 /* Used by re_SUITE (stdlib) */
4650 Uint max_loops;
4651 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4652 max_loops = erts_re_set_loop_limit(-1);
4653 BIF_RET(make_small(max_loops));
4654 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4655 max_loops = erts_re_set_loop_limit(max_loops);
4656 BIF_RET(make_small(max_loops));
4657 }
4658 }
4659 else if (ERTS_IS_ATOM_STR("unicode_loop_limit", BIF_ARG_1)) {
4660 /* Used by unicode_SUITE (stdlib) */
4661 Uint max_loops;
4662 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4663 max_loops = erts_unicode_set_loop_limit(-1);
4664 BIF_RET(make_small(max_loops));
4665 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4666 max_loops = erts_unicode_set_loop_limit(max_loops);
4667 BIF_RET(make_small(max_loops));
4668 }
4669 }
4670 else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
4671 /* Used by hipe test suites */
4672 erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag);
4673 if (!flag && BIF_ARG_2 != am_false) {
4674 erts_atomic_set_nob(&hipe_test_reschedule_flag, 1);
4675 erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
4676 ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
4677 BIF_P, BIF_ARG_1, BIF_ARG_2);
4678 }
4679 erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
4680 BIF_RET(NIL);
4681 }
4682 else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
4683 /* Used by hipe test suites */
4684 Eterm res = am_false;
4685 Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
4686 BIF_ARG_2, ERTS_PROC_LOCK_STATUS);
4687 if (rp) {
4688 erts_resume(rp, ERTS_PROC_LOCK_STATUS);
4689 res = am_true;
4690 erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
4691 }
4692 BIF_RET(res);
4693 }
4694 else if (ERTS_IS_ATOM_STR("test_long_gc_sleep", BIF_ARG_1)) {
4695 if (term_to_Uint(BIF_ARG_2, &erts_test_long_gc_sleep) > 0)
4696 BIF_RET(am_true);
4697 }
4698 else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
4699 erts_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
4700 }
4701 else if (ERTS_IS_ATOM_STR("kill_dist_connection", BIF_ARG_1)) {
4702 DistEntry *dep = erts_sysname_to_connected_dist_entry(BIF_ARG_2);
4703 if (!dep)
4704 BIF_RET(am_false);
4705 else {
4706 Uint32 con_id;
4707 erts_de_rlock(dep);
4708 con_id = dep->connection_id;
4709 erts_de_runlock(dep);
4710 erts_kill_dist_connection(dep, con_id);
4711 BIF_RET(am_true);
4712 }
4713 }
4714 else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
4715 int flag = 0;
4716 if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2))
4717 flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
4718 else if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2))
4719 flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS;
4720 else if (ERTS_IS_ATOM_STR("aux_work", BIF_ARG_2))
4721 flag = ERTS_DEBUG_WAIT_COMPLETED_AUX_WORK;
4722
4723 if (flag && erts_debug_wait_completed(BIF_P, flag)) {
4724 ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
4725 }
4726 }
4727 else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
4728 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4729 broken_halt_test(BIF_ARG_2);
4730 }
4731 else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
4732 int res = erts_debug_set_unique_monotonic_integer_state(BIF_ARG_2);
4733 BIF_RET(res ? am_true : am_false);
4734 }
4735 else if (ERTS_IS_ATOM_STR("node_tab_delayed_delete", BIF_ARG_1)) {
4736 /* node_container_SUITE */
4737 Sint64 msecs;
4738 if (term_to_Sint64(BIF_ARG_2, &msecs)) {
4739 /* Negative value restore original value... */
4740 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4741 erts_debug_test_node_tab_delayed_delete(msecs);
4742 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4743 BIF_RET(am_ok);
4744 }
4745 }
4746 else if (ERTS_IS_ATOM_STR("fill_heap", BIF_ARG_1)) {
4747 UWord left = HeapWordsLeft(BIF_P);
4748 if (left > 1) {
4749 Eterm* hp = HAlloc(BIF_P, left);
4750 *hp = make_pos_bignum_header(left - 1);
4751 }
4752 if (BIF_ARG_2 == am_true) {
4753 FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
4754 }
4755 BIF_RET(am_ok);
4756 }
4757 else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
4758 if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
4759 Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
4760 UWord bin_addr = (UWord) bin;
4761 Eterm bin_addr_term, magic_ref, res;
4762 Eterm *hp;
4763 Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
4764 erts_bld_uword(NULL, &sz, bin_addr);
4765 hp = HAlloc(BIF_P, sz);
4766 bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
4767 magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
4768 res = TUPLE2(hp, magic_ref, bin_addr_term);
4769 BIF_RET(res);
4770 }
4771 }
4772 else if (ERTS_IS_ATOM_STR("binary", BIF_ARG_1)) {
4773 Sint64 size;
4774 if (term_to_Sint64(BIF_ARG_2, &size)) {
4775 Binary* refbin = erts_bin_drv_alloc_fnf(size);
4776 if (!refbin)
4777 BIF_RET(am_false);
4778 sys_memset(refbin->orig_bytes, 0, size);
4779 BIF_RET(erts_build_proc_bin(&MSO(BIF_P),
4780 HAlloc(BIF_P, PROC_BIN_SIZE),
4781 refbin));
4782 }
4783 }
4784 else if (ERTS_IS_ATOM_STR("ets_force_trap", BIF_ARG_1)) {
4785 #ifdef ETS_DBG_FORCE_TRAP
4786 erts_ets_dbg_force_trap = (BIF_ARG_2 == am_true) ? 1 : 0;
4787 BIF_RET(am_ok);
4788 #else
4789 BIF_RET(am_notsup);
4790 #endif
4791 }
4792 else if (ERTS_IS_ATOM_STR("ets_force_split", BIF_ARG_1)) {
4793 if (is_tuple(BIF_ARG_2)) {
4794 Eterm* tpl = tuple_val(BIF_ARG_2);
4795
4796 if (erts_ets_force_split(tpl[1], tpl[2] == am_true))
4797 BIF_RET(am_ok);
4798 }
4799 }
4800 else if (ERTS_IS_ATOM_STR("ets_debug_random_split_join", BIF_ARG_1)) {
4801 if (is_tuple(BIF_ARG_2)) {
4802 Eterm* tpl = tuple_val(BIF_ARG_2);
4803
4804 if (erts_ets_debug_random_split_join(tpl[1], tpl[2] == am_true))
4805 BIF_RET(am_ok);
4806 }
4807 }
4808 else if (ERTS_IS_ATOM_STR("mbuf", BIF_ARG_1)) {
4809 Uint sz = size_object(BIF_ARG_2);
4810 ErlHeapFragment* frag = new_message_buffer(sz);
4811 Eterm *hp = frag->mem;
4812 Eterm copy = copy_struct(BIF_ARG_2, sz, &hp, &frag->off_heap);
4813 frag->next = BIF_P->mbuf;
4814 BIF_P->mbuf = frag;
4815 BIF_P->mbuf_sz += sz;
4816 BIF_RET(copy);
4817 }
4818 else if (ERTS_IS_ATOM_STR("multizero_timeout_in_timeout", BIF_ARG_1)) {
4819 Sint64 timeout;
4820 if (term_to_Sint64(BIF_ARG_2, &timeout)) {
4821 if (timeout < 0)
4822 timeout = 0;
4823 erts_proc_inc_refc(BIF_P);
4824 erts_start_timer_callback((ErtsMonotonicTime) timeout,
4825 test_multizero_timeout_in_timeout,
4826 (void *) BIF_P);
4827 BIF_RET(am_ok);
4828 }
4829 }
4830 }
4831
4832 BIF_ERROR(BIF_P, BADARG);
4833 }
4834
4835 static BIF_RETTYPE
gather_histograms_helper(Process * c_p,Eterm arg_tuple,int gather (Process *,int,int,int,UWord,Eterm))4836 gather_histograms_helper(Process * c_p, Eterm arg_tuple,
4837 int gather(Process *, int, int, int, UWord, Eterm))
4838 {
4839 SWord hist_start, hist_width, sched_id;
4840 int msg_count, alloc_num;
4841 Eterm *args;
4842
4843 /* This is an internal BIF, so the error checking is mostly left to erlang
4844 * code. */
4845
4846 ASSERT(is_tuple_arity(arg_tuple, 5));
4847 args = tuple_val(arg_tuple);
4848
4849 for (alloc_num = ERTS_ALC_A_MIN; alloc_num <= ERTS_ALC_A_MAX; alloc_num++) {
4850 if(erts_is_atom_str(ERTS_ALC_A2AD(alloc_num), args[1], 0)) {
4851 break;
4852 }
4853 }
4854
4855 if (alloc_num > ERTS_ALC_A_MAX) {
4856 BIF_ERROR(c_p, BADARG);
4857 }
4858
4859 sched_id = signed_val(args[2]);
4860 hist_width = signed_val(args[3]);
4861 hist_start = signed_val(args[4]);
4862
4863 if (sched_id < 0 || sched_id > erts_no_schedulers) {
4864 BIF_ERROR(c_p, BADARG);
4865 }
4866
4867 msg_count = gather(c_p, alloc_num, sched_id, hist_width, hist_start, args[5]);
4868
4869 BIF_RET(make_small(msg_count));
4870 }
4871
erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)4872 BIF_RETTYPE erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)
4873 {
4874 return gather_histograms_helper(BIF_P, BIF_ARG_1,
4875 erts_alcu_gather_alloc_histograms);
4876 }
4877
erts_internal_gather_carrier_info_1(BIF_ALIST_1)4878 BIF_RETTYPE erts_internal_gather_carrier_info_1(BIF_ALIST_1)
4879 {
4880 return gather_histograms_helper(BIF_P, BIF_ARG_1,
4881 erts_alcu_gather_carrier_info);
4882 }
4883
4884 #ifdef ERTS_ENABLE_LOCK_COUNT
4885
4886 typedef struct {
4887 /* info->location_count may increase between size calculation and term
4888 * building, so we cap it at the value sampled in lcnt_build_result_vector.
4889 *
4890 * Shrinking is safe though. */
4891 int max_location_count;
4892 erts_lcnt_lock_info_t *info;
4893 } lcnt_sample_t;
4894
4895 typedef struct lcnt_sample_vector_ {
4896 lcnt_sample_t *elements;
4897 size_t size;
4898 } lcnt_sample_vector_t;
4899
lcnt_build_sample_vector(erts_lcnt_lock_info_list_t * list)4900 static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
4901 erts_lcnt_lock_info_t *iterator;
4902 lcnt_sample_vector_t result;
4903 size_t allocated_entries;
4904
4905 allocated_entries = 64;
4906 result.size = 0;
4907
4908 result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
4909 allocated_entries * sizeof(lcnt_sample_t));
4910
4911 iterator = NULL;
4912 while(erts_lcnt_iterate_list(list, &iterator)) {
4913 erts_lcnt_retain_lock_info(iterator);
4914
4915 result.elements[result.size].max_location_count = iterator->location_count;
4916 result.elements[result.size].info = iterator;
4917
4918 result.size++;
4919
4920 if(result.size >= allocated_entries) {
4921 allocated_entries *= 2;
4922
4923 result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
4924 allocated_entries * sizeof(lcnt_sample_t));
4925 }
4926 }
4927
4928 return result;
4929 }
4930
lcnt_destroy_sample_vector(lcnt_sample_vector_t * vector)4931 static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
4932 size_t i;
4933
4934 for(i = 0; i < vector->size; i++) {
4935 erts_lcnt_release_lock_info(vector->elements[i].info);
4936 }
4937
4938 erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
4939 }
4940
4941 /* The size of an integer is not guaranteed to be constant since we're walking
4942 * over live data, and may cross over into bignum territory between size calc
4943 * and the actual build. This takes care of that through always assuming the
4944 * worst, but needs to be fixed up with HRelease once the final term has been
4945 * built. */
bld_unstable_uint64(Uint ** hpp,Uint * szp,Uint64 ui)4946 static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
4947 Eterm res = THE_NON_VALUE;
4948
4949 if(szp) {
4950 *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
4951 }
4952
4953 if(hpp) {
4954 if (IS_USMALL(0, ui)) {
4955 res = make_small(ui);
4956 } else {
4957 res = erts_uint64_to_big(ui, hpp);
4958 }
4959 }
4960
4961 return res;
4962 }
4963
lcnt_build_lock_stats_term(Eterm ** hpp,Uint * szp,erts_lcnt_lock_stats_t * stats,Eterm res)4964 static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
4965 unsigned int i;
4966 const char *file;
4967
4968 Eterm af, uil;
4969 Eterm uit, uic;
4970 Eterm uits, uitns, uitn;
4971 Eterm tt, tstat, tloc, t;
4972 Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
4973
4974 /* term:
4975 * [{{file, line},
4976 {tries, colls, {seconds, nanoseconds, n_blocks}},
4977 * { .. histogram .. }] */
4978
4979 file = stats->file ? stats->file : "undefined";
4980
4981 af = erts_atom_put((byte *)file, sys_strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
4982 uil = erts_bld_uint( hpp, szp, stats->line);
4983 tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
4984
4985 uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
4986 uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
4987
4988 uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
4989 uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
4990 uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
4991 tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
4992
4993 tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
4994
4995 for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
4996 vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
4997 }
4998
4999 thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
5000
5001 t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
5002 res = erts_bld_cons( hpp, szp, t, res);
5003
5004 return res;
5005 }
5006
lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t * info)5007 static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
5008 Eterm id = info->id;
5009
5010 if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_PROCLOCK) {
5011 /* Use registered names as id's for process locks if available. Thread
5012 * progress is delayed since we may be running on a dirty scheduler. */
5013 ErtsThrPrgrDelayHandle delay_handle;
5014 Process *process;
5015
5016 delay_handle = erts_thr_progress_unmanaged_delay();
5017
5018 process = erts_proc_lookup(info->id);
5019 if (process && process->common.u.alive.reg) {
5020 id = process->common.u.alive.reg->name;
5021 }
5022
5023 erts_thr_progress_unmanaged_continue(delay_handle);
5024 } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
5025 if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
5026 const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
5027 id = erts_atom_put((byte*)name, sys_strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
5028 }
5029 }
5030
5031 return id;
5032 }
5033
lcnt_build_lock_term(Eterm ** hpp,Uint * szp,lcnt_sample_t * sample,Eterm res)5034 static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
5035 erts_lcnt_lock_info_t *info = sample->info;
5036
5037 Eterm name, type, id, stats = NIL, t;
5038 const char *lock_desc;
5039 int i;
5040
5041 /* term: [{name, id, type, stats()}] */
5042
5043 ASSERT(info->name);
5044
5045 lock_desc = erts_lock_flags_get_type_name(info->flags);
5046
5047 type = erts_atom_put((byte*)lock_desc, sys_strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
5048 name = erts_atom_put((byte*)info->name, sys_strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
5049
5050 /* Only attempt to resolve ids when actually emitting the term. This ought
5051 * to be safe since all immediates are the same size. */
5052 if(hpp != NULL) {
5053 id = lcnt_pretty_print_lock_id(info);
5054 } else {
5055 id = NIL;
5056 }
5057
5058 for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
5059 stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
5060 }
5061
5062 t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
5063 res = erts_bld_cons(hpp, szp, t, res);
5064
5065 return res;
5066 }
5067
lcnt_build_result_term(Eterm ** hpp,Uint * szp,erts_lcnt_time_t * duration,lcnt_sample_vector_t * current_locks,lcnt_sample_vector_t * deleted_locks,Eterm res)5068 static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
5069 lcnt_sample_vector_t *current_locks,
5070 lcnt_sample_vector_t *deleted_locks, Eterm res) {
5071 const char *str_duration = "duration";
5072 const char *str_locks = "locks";
5073
5074 Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
5075 size_t i;
5076
5077 /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
5078
5079 /* duration tuple */
5080 dts = bld_unstable_uint64(hpp, szp, duration->s);
5081 dtns = bld_unstable_uint64(hpp, szp, duration->ns);
5082 tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
5083
5084 adur = erts_atom_put((byte *)str_duration, sys_strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
5085 tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
5086
5087 /* lock tuple */
5088 aloc = erts_atom_put((byte *)str_locks, sys_strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
5089
5090 for(i = 0; i < current_locks->size; i++) {
5091 lloc = lcnt_build_lock_term(hpp, szp, ¤t_locks->elements[i], lloc);
5092 }
5093
5094 for(i = 0; i < deleted_locks->size; i++) {
5095 lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
5096 }
5097
5098 tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
5099
5100 res = erts_bld_cons(hpp, szp, tloc, res);
5101 res = erts_bld_cons(hpp, szp, tdur, res);
5102
5103 return res;
5104 }
5105
5106 static struct {
5107 const char *name;
5108 erts_lock_flags_t flag;
5109 } lcnt_category_map[] = {
5110 {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
5111 {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
5112 {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
5113 {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
5114 {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
5115 {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
5116 {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
5117 {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
5118 {NULL, 0}
5119 };
5120
lcnt_atom_to_lock_category(Eterm atom)5121 static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
5122 int i = 0;
5123
5124 for(i = 0; lcnt_category_map[i].name != NULL; i++) {
5125 if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
5126 return lcnt_category_map[i].flag;
5127 }
5128 }
5129
5130 return 0;
5131 }
5132
lcnt_build_category_list(Eterm ** hpp,Uint * szp,erts_lock_flags_t mask)5133 static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
5134 Eterm res;
5135 int i;
5136
5137 res = NIL;
5138
5139 for(i = 0; lcnt_category_map[i].name != NULL; i++) {
5140 if(mask & lcnt_category_map[i].flag) {
5141 Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
5142 sys_strlen(lcnt_category_map[i].name),
5143 ERTS_ATOM_ENC_UTF8, 0);
5144
5145 res = erts_bld_cons(hpp, szp, category, res);
5146 }
5147 }
5148
5149 return res;
5150 }
5151
5152 #endif
5153
erts_debug_lcnt_clear_0(BIF_ALIST_0)5154 BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
5155 {
5156 #ifndef ERTS_ENABLE_LOCK_COUNT
5157 BIF_RET(am_error);
5158 #else
5159 erts_lcnt_clear_counters();
5160
5161 BIF_RET(am_ok);
5162 #endif
5163 }
5164
erts_debug_lcnt_collect_0(BIF_ALIST_0)5165 BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
5166 {
5167 #ifndef ERTS_ENABLE_LOCK_COUNT
5168 BIF_RET(am_error);
5169 #else
5170 lcnt_sample_vector_t current_locks, deleted_locks;
5171 erts_lcnt_data_t data;
5172
5173 Eterm *term_heap_start, *term_heap_end;
5174 Uint term_heap_size = 0;
5175 Eterm result;
5176
5177 data = erts_lcnt_get_data();
5178
5179 current_locks = lcnt_build_sample_vector(data.current_locks);
5180 deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
5181
5182 lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
5183 ¤t_locks, &deleted_locks, NIL);
5184
5185 term_heap_start = HAlloc(BIF_P, term_heap_size);
5186 term_heap_end = term_heap_start;
5187
5188 result = lcnt_build_result_term(&term_heap_end, NULL,
5189 &data.duration, ¤t_locks, &deleted_locks, NIL);
5190
5191 HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
5192
5193 lcnt_destroy_sample_vector(¤t_locks);
5194 lcnt_destroy_sample_vector(&deleted_locks);
5195
5196 BIF_RET(result);
5197 #endif
5198 }
5199
erts_debug_lcnt_control_1(BIF_ALIST_1)5200 BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
5201 {
5202 #ifdef ERTS_ENABLE_LOCK_COUNT
5203 if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
5204 erts_lock_flags_t mask;
5205 Eterm *term_heap_block;
5206 Uint term_heap_size;
5207
5208 mask = erts_lcnt_get_category_mask();
5209 term_heap_size = 0;
5210
5211 lcnt_build_category_list(NULL, &term_heap_size, mask);
5212
5213 term_heap_block = HAlloc(BIF_P, term_heap_size);
5214
5215 BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
5216 } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
5217 if(erts_lcnt_get_preserve_info()) {
5218 BIF_RET(am_true);
5219 }
5220
5221 BIF_RET(am_false);
5222 }
5223 #endif
5224 BIF_ERROR(BIF_P, BADARG);
5225 }
5226
erts_debug_lcnt_control_2(BIF_ALIST_2)5227 BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
5228 {
5229 #ifdef ERTS_ENABLE_LOCK_COUNT
5230 if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
5231 erts_lock_flags_t category_mask = 0;
5232 Eterm categories = BIF_ARG_2;
5233
5234 if(!(is_list(categories) || is_nil(categories))) {
5235 BIF_ERROR(BIF_P, BADARG);
5236 }
5237
5238 while(is_list(categories)) {
5239 Eterm *cell = list_val(categories);
5240 erts_lock_flags_t category;
5241
5242 category = lcnt_atom_to_lock_category(CAR(cell));
5243
5244 if(!category) {
5245 Eterm *hp = HAlloc(BIF_P, 4);
5246
5247 BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
5248 }
5249
5250 category_mask |= category;
5251 categories = CDR(cell);
5252 }
5253
5254 erts_lcnt_set_category_mask(category_mask);
5255
5256 BIF_RET(am_ok);
5257 } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
5258 int enabled = (BIF_ARG_2 == am_true);
5259
5260 if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
5261 erts_lcnt_set_preserve_info(enabled);
5262
5263 BIF_RET(am_ok);
5264 }
5265 }
5266 #endif
5267 BIF_ERROR(BIF_P, BADARG);
5268 }
5269
os_info_init(void)5270 static void os_info_init(void)
5271 {
5272 Eterm type = erts_atom_put((byte *) os_type, sys_strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1);
5273 Eterm flav;
5274 int major, minor, build;
5275 char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
5276 Eterm* hp;
5277
5278 os_flavor(buf, 1024);
5279 flav = erts_atom_put((byte *) buf, sys_strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
5280 erts_free(ERTS_ALC_T_TMP, (void *) buf);
5281 hp = erts_alloc(ERTS_ALC_T_LITERAL, (3+4)*sizeof(Eterm));
5282 os_type_tuple = TUPLE2(hp, type, flav);
5283 erts_set_literal_tag(&os_type_tuple, hp, 3);
5284
5285 hp += 3;
5286 os_version(&major, &minor, &build);
5287 os_version_tuple = TUPLE3(hp,
5288 make_small(major),
5289 make_small(minor),
5290 make_small(build));
5291 erts_set_literal_tag(&os_version_tuple, hp, 4);
5292 }
5293
5294 void
erts_bif_info_init(void)5295 erts_bif_info_init(void)
5296 {
5297 erts_atomic_init_nob(&available_internal_state, 0);
5298 erts_atomic_init_nob(&hipe_test_reschedule_flag, 0);
5299
5300 alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
5301 alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
5302 gather_sched_wall_time_res_trap
5303 = erts_export_put(am_erts_internal, am_gather_sched_wall_time_result, 1);
5304 gather_gc_info_res_trap
5305 = erts_export_put(am_erlang, am_gather_gc_info_result, 1);
5306 gather_io_bytes_trap
5307 = erts_export_put(am_erts_internal, am_gather_io_bytes, 2);
5308 gather_msacc_res_trap
5309 = erts_export_put(am_erts_internal, am_gather_microstate_accounting_result, 2);
5310 gather_system_check_res_trap
5311 = erts_export_put(am_erts_internal, am_gather_system_check_result, 1);
5312
5313 is_process_alive_trap = erts_export_put(am_erts_internal, am_is_process_alive, 1);
5314
5315 get_internal_state_blocked = erts_export_put(am_erts_internal,
5316 am_get_internal_state_blocked,
5317 1);
5318
5319
5320 process_info_init();
5321 os_info_init();
5322 }
5323