1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 1999-2018. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24
25 #define ERTS_WANT_MEM_MAPPERS
26 #include "sys.h"
27 #include "erl_vm.h"
28 #include "global.h"
29 #include "erl_process.h"
30 #include "error.h"
31 #include "erl_driver.h"
32 #include "erl_nif.h"
33 #include "bif.h"
34 #include "big.h"
35 #include "erl_version.h"
36 #include "erl_compile_flags.h"
37 #include "erl_db_util.h"
38 #include "erl_message.h"
39 #include "erl_binary.h"
40 #include "erl_db.h"
41 #include "erl_mtrace.h"
42 #include "dist.h"
43 #include "erl_gc.h"
44 #include "erl_cpu_topology.h"
45 #include "erl_async.h"
46 #include "erl_thr_progress.h"
47 #include "erl_bif_unique.h"
48 #include "erl_map.h"
49 #include "erl_check_io.h"
50 #define ERTS_PTAB_WANT_DEBUG_FUNCS__
51 #include "erl_ptab.h"
52 #include "erl_time.h"
53 #include "erl_proc_sig_queue.h"
54 #include "erl_alloc_util.h"
55 #ifdef HIPE
56 #include "hipe_arch.h"
57 #endif
58
59 #ifdef ERTS_ENABLE_LOCK_COUNT
60 #include "erl_lock_count.h"
61 #endif
62
63 #ifdef VALGRIND
64 #include <valgrind/valgrind.h>
65 #include <valgrind/memcheck.h>
66 #endif
67
68 static Export* alloc_info_trap = NULL;
69 static Export* alloc_sizes_trap = NULL;
70 static Export* gather_io_bytes_trap = NULL;
71
72 static Export *gather_sched_wall_time_res_trap;
73 static Export *gather_msacc_res_trap;
74 static Export *gather_gc_info_res_trap;
75 static Export *gather_system_check_res_trap;
76
77 static Export *is_process_alive_trap;
78
79
80 #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
81
82 static char otp_version[] = ERLANG_OTP_VERSION;
83 /* Keep erts_system_version as a global variable for easy access from a core */
84 static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
85 "%s"
86 " [erts-" ERLANG_VERSION "]"
87 #ifndef OTP_RELEASE
88 #ifdef ERLANG_GIT_VERSION
89 " [source-" ERLANG_GIT_VERSION "]"
90 #else
91 " [source]"
92 #endif
93 #endif
94 #ifdef ARCH_64
95 " [64-bit]"
96 #endif
97 " [smp:%beu:%beu]"
98 " [ds:%beu:%beu:%beu]"
99 #if defined(ERTS_DIRTY_SCHEDULERS_TEST)
100 " [dirty-schedulers-TEST]"
101 #endif
102 " [async-threads:%d]"
103 #ifdef HIPE
104 " [hipe]"
105 #endif
106 #ifdef ET_DEBUG
107 #if ET_DEBUG
108 " [type-assertions]"
109 #endif
110 #endif
111 #ifdef DEBUG
112 " [debug-compiled]"
113 #endif
114 #ifdef ERTS_ENABLE_LOCK_CHECK
115 " [lock-checking]"
116 #endif
117 #ifdef ERTS_ENABLE_LOCK_COUNT
118 " [lock-counting]"
119 #endif
120 #ifdef ERTS_OPCODE_COUNTER_SUPPORT
121 " [instruction-counting]"
122 #endif
123 #ifdef PURIFY
124 " [purify-compiled]"
125 #endif
126 #ifdef VALGRIND
127 " [valgrind-compiled]"
128 #endif
129 #ifdef ERTS_FRMPTR
130 " [frame-pointer]"
131 #endif
132 #ifdef USE_LTTNG
133 " [lttng]"
134 #endif
135 #ifdef USE_DTRACE
136 " [dtrace]"
137 #endif
138 #ifdef USE_SYSTEMTAP
139 " [systemtap]"
140 #endif
141 #ifdef SHCOPY
142 " [sharing-preserving]"
143 #endif
144 "\n");
145
146 #define ASIZE(a) (sizeof(a)/sizeof(a[0]))
147
148 #if defined(HAVE_SOLARIS_SPARC_PERFMON)
149 # include <sys/ioccom.h>
150 # define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
151 # define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
152 #endif
153
154 /* Cached, pre-built {OsType,OsFlavor} and {Major,Minor,Build} tuples */
155 static Eterm os_type_tuple;
156 static Eterm os_version_tuple;
157
158 static Eterm
159 current_function(Process* p, ErtsHeapFactory *hfact, Process* rp,
160 int full_info, Uint reserve_size, int flags);
161 static Eterm current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
162 Uint reserve_size);
163
164 static Eterm
bld_bin_list(Uint ** hpp,Uint * szp,ErlOffHeap * oh,Eterm tail)165 bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh, Eterm tail)
166 {
167 struct erl_off_heap_header* ohh;
168 Eterm res = tail;
169 Eterm tuple;
170
171 for (ohh = oh->first; ohh; ohh = ohh->next) {
172 if (ohh->thing_word == HEADER_PROC_BIN) {
173 ProcBin* pb = (ProcBin*) ohh;
174 Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
175 Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
176
177 if (szp)
178 *szp += 4+2;
179 if (hpp) {
180 Uint refc = (Uint) erts_refc_read(&pb->val->intern.refc, 1);
181 tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
182 res = CONS(*hpp + 4, tuple, res);
183 *hpp += 4+2;
184 }
185 }
186 }
187 return res;
188 }
189
190 static Eterm
bld_magic_ref_bin_list(Uint ** hpp,Uint * szp,ErlOffHeap * oh)191 bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
192 {
193 struct erl_off_heap_header* ohh;
194 Eterm res = NIL;
195 Eterm tuple;
196
197 for (ohh = oh->first; ohh; ohh = ohh->next) {
198 if (is_ref_thing_header((*((Eterm *) ohh)))) {
199 ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
200 Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
201 Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
202
203 if (szp)
204 *szp += 4+2;
205 if (hpp) {
206 Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
207 tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
208 res = CONS(*hpp + 4, tuple, res);
209 *hpp += 4+2;
210 }
211 }
212 }
213 return res;
214 }
215
216
217 /*
218 make_monitor_list:
219 returns a list of records..
220 -record(erl_monitor, {
221 type, % process | port | time_offset | dist_process | resource
222 % | node | nodes | suspend
223 dir, % origin | target
224 ref, % reference or []
225 pid, % Process or nodename
226 extra % registered name, integer or []
227 }).
228 */
229
do_calc_mon_size(ErtsMonitor * mon,void * vpsz)230 static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
231 {
232 ErtsMonitorData *mdp = erts_monitor_to_data(mon);
233 Uint *psz = vpsz;
234 *psz += is_immed(mdp->ref) ? 0 : NC_HEAP_SIZE(mdp->ref);
235
236 if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
237 *psz += erts_resource_ref_size(mon->other.ptr);
238 else
239 *psz += is_immed(mon->other.item) ? 0 : NC_HEAP_SIZE(mon->other.item);
240
241 *psz += 9; /* CONS + 6-tuple */
242 }
243
244 typedef struct {
245 Process *p;
246 Eterm *hp;
247 Eterm res;
248 Eterm tag;
249 } MonListContext;
250
do_make_one_mon_element(ErtsMonitor * mon,void * vpmlc)251 static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
252 {
253 ErtsMonitorData *mdp = erts_monitor_to_data(mon);
254 MonListContext *pmlc = vpmlc;
255 Eterm tup, t, d, r, p, x;
256
257 r = is_immed(mdp->ref) ? mdp->ref : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mdp->ref);
258 if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
259 p = erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->other.ptr);
260 else
261 p = (is_immed(mon->other.item)
262 ? mon->other.item
263 : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->other.item));
264
265 if (mon->flags & ERTS_ML_FLG_NAME)
266 x = ((ErtsMonitorDataExtended *) mdp)->u.name;
267 else if (erts_monitor_is_target(mon))
268 x = NIL;
269 else if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES)
270 x = make_small(((ErtsMonitorDataExtended *) mdp)->u.refc);
271 else
272 x = NIL;
273
274 switch (mon->type) {
275 case ERTS_MON_TYPE_PROC:
276 t = am_process;
277 break;
278 case ERTS_MON_TYPE_PORT:
279 t = am_port;
280 break;
281 case ERTS_MON_TYPE_TIME_OFFSET:
282 t = am_time_offset;
283 break;
284 case ERTS_MON_TYPE_DIST_PROC: {
285 ERTS_DECL_AM(dist_process);
286 t = AM_dist_process;
287 break;
288 }
289 case ERTS_MON_TYPE_RESOURCE: {
290 ERTS_DECL_AM(resource);
291 t = AM_resource;
292 break;
293 }
294 case ERTS_MON_TYPE_NODE:
295 t = am_node;
296 break;
297 case ERTS_MON_TYPE_NODES: {
298 ERTS_DECL_AM(nodes);
299 t = AM_nodes;
300 break;
301 }
302 case ERTS_MON_TYPE_SUSPEND:
303 t = am_suspend;
304 break;
305 default:
306 ERTS_INTERNAL_ERROR("Unknown monitor type");
307 t = am_error;
308 break;
309 }
310 if (erts_monitor_is_target(mon)) {
311 ERTS_DECL_AM(target);
312 d = AM_target;
313 }
314 else {
315 ERTS_DECL_AM(origin);
316 d = AM_origin;
317 }
318 tup = TUPLE6(pmlc->hp, pmlc->tag, t, d, r, p, x);
319 pmlc->hp += 7;
320 pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
321 pmlc->hp += 2;
322 }
323
324 static Eterm
make_monitor_list(Process * p,int tree,ErtsMonitor * root,Eterm tail)325 make_monitor_list(Process *p, int tree, ErtsMonitor *root, Eterm tail)
326 {
327 DECL_AM(erl_monitor);
328 Uint sz = 0;
329 MonListContext mlc;
330 void (*foreach)(ErtsMonitor *,
331 void (*)(ErtsMonitor *, void *),
332 void *);
333
334 foreach = tree ? erts_monitor_tree_foreach : erts_monitor_list_foreach;
335
336 (*foreach)(root, do_calc_mon_size, &sz);
337 if (sz == 0)
338 return tail;
339 mlc.p = p;
340 mlc.hp = HAlloc(p,sz);
341 mlc.res = tail;
342 mlc.tag = AM_erl_monitor;
343 (*foreach)(root, do_make_one_mon_element, &mlc);
344 return mlc.res;
345 }
346
347 /*
348 make_link_list:
349 returns a list of records..
350 -record(erl_link, {
351 type, % process | port | dist_process
352 pid, % Process or port
353 id % (address)
354 }).
355 */
356
calc_lnk_size(ErtsLink * lnk,void * vpsz)357 static void calc_lnk_size(ErtsLink *lnk, void *vpsz)
358 {
359 Uint *psz = vpsz;
360 Uint sz = 0;
361 ErtsLinkData *ldp = erts_link_to_data(lnk);
362
363 (void) erts_bld_uword(NULL, &sz, (UWord) ldp);
364
365 *psz += sz;
366 *psz += is_immed(lnk->other.item) ? 0 : size_object(lnk->other.item);
367 *psz += 7; /* CONS + 4-tuple */
368 }
369
370 typedef struct {
371 Process *p;
372 Eterm *hp;
373 Eterm res;
374 Eterm tag;
375 } LnkListContext;
376
make_one_lnk_element(ErtsLink * lnk,void * vpllc)377 static void make_one_lnk_element(ErtsLink *lnk, void * vpllc)
378 {
379 LnkListContext *pllc = vpllc;
380 Eterm tup, t, pid, id;
381 ErtsLinkData *ldp = erts_link_to_data(lnk);
382
383 id = erts_bld_uword(&pllc->hp, NULL, (UWord) ldp);
384
385 if (is_immed(lnk->other.item))
386 pid = lnk->other.item;
387 else {
388 Uint sz = size_object(lnk->other.item);
389 pid = copy_struct(lnk->other.item, sz, &(pllc->hp), &MSO(pllc->p));
390 }
391
392 switch (lnk->type) {
393 case ERTS_LNK_TYPE_PROC:
394 t = am_process;
395 break;
396 case ERTS_LNK_TYPE_PORT:
397 t = am_port;
398 break;
399 case ERTS_LNK_TYPE_DIST_PROC: {
400 ERTS_DECL_AM(dist_process);
401 t = AM_dist_process;
402 break;
403 }
404 default:
405 ERTS_INTERNAL_ERROR("Unkown link type");
406 t = am_undefined;
407 break;
408 }
409
410 tup = TUPLE4(pllc->hp, pllc->tag, t, pid, id);
411 pllc->hp += 5;
412 pllc->res = CONS(pllc->hp, tup, pllc->res);
413 pllc->hp += 2;
414 }
415
416 static Eterm
make_link_list(Process * p,int tree,ErtsLink * root,Eterm tail)417 make_link_list(Process *p, int tree, ErtsLink *root, Eterm tail)
418 {
419 DECL_AM(erl_link);
420 Uint sz = 0;
421 LnkListContext llc;
422 void (*foreach)(ErtsLink *,
423 void (*)(ErtsLink *, void *),
424 void *);
425
426 foreach = tree ? erts_link_tree_foreach : erts_link_list_foreach;
427
428 (*foreach)(root, calc_lnk_size, (void *) &sz);
429 if (sz == 0) {
430 return tail;
431 }
432 llc.p = p;
433 llc.hp = HAlloc(p,sz);
434 llc.res = tail;
435 llc.tag = AM_erl_link;
436 (*foreach)(root, make_one_lnk_element, (void *) &llc);
437 return llc.res;
438 }
439
440 int
erts_print_system_version(fmtfn_t to,void * arg,Process * c_p)441 erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
442 {
443 int i, rc = -1;
444 char *rc_str = "";
445 char rc_buf[100];
446 char *ov = otp_version;
447 Uint total, online, active;
448 Uint dirty_cpu, dirty_cpu_onln, dirty_io;
449
450 erts_schedulers_state(&total, &online, &active,
451 &dirty_cpu, &dirty_cpu_onln, NULL,
452 &dirty_io, NULL);
453 for (i = 0; i < sizeof(otp_version)-4; i++) {
454 if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
455 rc = atoi(&ov[i+3]);
456 }
457 if (rc >= 0) {
458 if (rc == 0)
459 rc_str = " [DEVELOPMENT]";
460 else {
461 erts_snprintf(rc_buf, sizeof(rc_buf), " [RELEASE CANDIDATE %d]", rc);
462 rc_str = rc_buf;
463 }
464 }
465 return erts_print(to, arg, erts_system_version,
466 rc_str
467 , total, online
468 , dirty_cpu, dirty_cpu_onln, dirty_io
469 , erts_async_max_threads
470 );
471 }
472
473 typedef struct {
474 /* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
475 * {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
476 * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
477 * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
478 union {
479 Eterm term;
480 ErtsResource* resource;
481 }entity;
482 int named;
483 Uint16 type;
484 Eterm node;
485 /* pid is actual target being monitored, no matter pid/port or name */
486 Eterm pid;
487 } MonitorInfo;
488
489 typedef struct {
490 MonitorInfo *mi;
491 Uint mi_i;
492 Uint mi_max;
493 int sz;
494 } MonitorInfoCollection;
495
496 #define INIT_MONITOR_INFOS(MIC) do { \
497 (MIC).mi = NULL; \
498 (MIC).mi_i = (MIC).mi_max = 0; \
499 (MIC).sz = 0; \
500 } while(0)
501
502 #define MI_INC 50
503 #define EXTEND_MONITOR_INFOS(MICP) \
504 do { \
505 if ((MICP)->mi_i >= (MICP)->mi_max) { \
506 (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
507 (MICP)->mi, \
508 ((MICP)->mi_max+MI_INC) \
509 * sizeof(MonitorInfo)) \
510 : erts_alloc(ERTS_ALC_T_TMP, \
511 MI_INC*sizeof(MonitorInfo))); \
512 (MICP)->mi_max += MI_INC; \
513 } \
514 } while (0)
515 #define DESTROY_MONITOR_INFOS(MIC) \
516 do { \
517 if ((MIC).mi != NULL) { \
518 erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
519 } \
520 } while (0)
521
collect_one_link(ErtsLink * lnk,void * vmicp)522 static void collect_one_link(ErtsLink *lnk, void *vmicp)
523 {
524 MonitorInfoCollection *micp = vmicp;
525 EXTEND_MONITOR_INFOS(micp);
526 micp->mi[micp->mi_i].entity.term = lnk->other.item;
527 micp->sz += 2 + NC_HEAP_SIZE(lnk->other.item);
528 micp->mi_i++;
529 }
530
collect_one_origin_monitor(ErtsMonitor * mon,void * vmicp)531 static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
532 {
533 if (erts_monitor_is_origin(mon)) {
534 MonitorInfoCollection *micp = vmicp;
535
536 EXTEND_MONITOR_INFOS(micp);
537
538 micp->mi[micp->mi_i].type = mon->type;
539
540 switch (mon->type) {
541 case ERTS_MON_TYPE_PROC:
542 case ERTS_MON_TYPE_PORT:
543 case ERTS_MON_TYPE_DIST_PROC:
544 case ERTS_MON_TYPE_TIME_OFFSET:
545 if (!(mon->flags & ERTS_ML_FLG_NAME)) {
546 micp->mi[micp->mi_i].named = 0;
547 micp->mi[micp->mi_i].entity.term = mon->other.item;
548 micp->mi[micp->mi_i].node = NIL;
549 if (is_not_atom(mon->other.item))
550 micp->sz += NC_HEAP_SIZE(mon->other.item);
551 }
552 else {
553 ErtsMonitorDataExtended *mdep;
554 micp->mi[micp->mi_i].named = !0;
555 mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
556 micp->mi[micp->mi_i].entity.term = mdep->u.name;
557 if (mdep->dist)
558 micp->mi[micp->mi_i].node = mdep->dist->nodename;
559 else
560 micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
561 micp->sz += 3; /* need one 2-tuple */
562 }
563
564 /* have always pid at hand, to assist with figuring out if its a port or
565 * a process, when we monitored by name and process_info is requested.
566 * See: erl_bif_info.c:process_info_aux section for am_monitors */
567 micp->mi[micp->mi_i].pid = mon->other.item;
568
569 micp->mi_i++;
570 micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
571 break;
572 default:
573 break;
574 }
575 }
576 }
577
collect_one_target_monitor(ErtsMonitor * mon,void * vmicp)578 static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
579 {
580 MonitorInfoCollection *micp = vmicp;
581
582 if (erts_monitor_is_target(mon)) {
583
584 EXTEND_MONITOR_INFOS(micp);
585
586 micp->mi[micp->mi_i].type = mon->type;
587 micp->mi[micp->mi_i].named = !!(mon->flags & ERTS_ML_FLG_NAME);
588 switch (mon->type) {
589
590 case ERTS_MON_TYPE_PROC:
591 case ERTS_MON_TYPE_PORT:
592 case ERTS_MON_TYPE_DIST_PROC:
593
594 micp->mi[micp->mi_i].entity.term = mon->other.item;
595 micp->mi[micp->mi_i].node = NIL;
596 micp->sz += NC_HEAP_SIZE(mon->other.item);
597
598 micp->sz += 2; /* cons */;
599 micp->mi_i++;
600 break;
601
602 case ERTS_MON_TYPE_RESOURCE:
603
604 micp->mi[micp->mi_i].entity.resource = mon->other.ptr;
605 micp->mi[micp->mi_i].node = NIL;
606 micp->sz += erts_resource_ref_size(mon->other.ptr);
607
608 micp->sz += 2; /* cons */;
609 micp->mi_i++;
610 break;
611
612 default:
613 break;
614 }
615
616 }
617 }
618
619 typedef struct {
620 ErtsMonitorSuspend **smi;
621 Uint smi_i;
622 Uint smi_max;
623 Uint sz;
624 } ErtsSuspendMonitorInfoCollection;
625
626 #define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \
627 (SMIC).smi = NULL; \
628 (SMIC).smi_i = (SMIC).smi_max = 0; \
629 (SMIC).sz = 0; \
630 } while(0)
631
632 #define ERTS_SMI_INC 50
633 #define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
634 do { \
635 if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
636 (SMICP)->smi = ((SMICP)->smi \
637 ? erts_realloc(ERTS_ALC_T_TMP, \
638 (SMICP)->smi, \
639 ((SMICP)->smi_max \
640 + ERTS_SMI_INC) \
641 * sizeof(ErtsMonitorSuspend *)) \
642 : erts_alloc(ERTS_ALC_T_TMP, \
643 ERTS_SMI_INC \
644 * sizeof(ErtsMonitorSuspend *))); \
645 (SMICP)->smi_max += ERTS_SMI_INC; \
646 } \
647 } while (0)
648
649 #define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
650 do { \
651 if ((SMIC).smi != NULL) { \
652 erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
653 } \
654 } while (0)
655
656 static void
collect_one_suspend_monitor(ErtsMonitor * mon,void * vsmicp)657 collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp)
658 {
659 if (mon->type == ERTS_MON_TYPE_SUSPEND) {
660 Sint count;
661 erts_aint_t mstate;
662 ErtsMonitorSuspend *msp;
663 ErtsSuspendMonitorInfoCollection *smicp;
664
665 msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
666 smicp = vsmicp;
667
668 ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
669
670 smicp->smi[smicp->smi_i] = msp;
671 smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
672
673 mstate = erts_atomic_read_nob(&msp->state);
674
675 count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
676 if (!IS_SSMALL(count))
677 smicp->sz += BIG_UINT_HEAP_SIZE;
678
679 smicp->smi_i++;
680 }
681 }
682
683 /*
684 * process_info/[1,2]
685 */
686
687 /*
688 * All valid process_info arguments.
689 */
690
691 #define ERTS_PI_IX_REGISTERED_NAME 0
692 #define ERTS_PI_IX_CURRENT_FUNCTION 1
693 #define ERTS_PI_IX_INITIAL_CALL 2
694 #define ERTS_PI_IX_STATUS 3
695 #define ERTS_PI_IX_MESSAGES 4
696 #define ERTS_PI_IX_MESSAGE_QUEUE_LEN 5
697 #define ERTS_PI_IX_LINKS 6
698 #define ERTS_PI_IX_MONITORS 7
699 #define ERTS_PI_IX_MONITORED_BY 8
700 #define ERTS_PI_IX_DICTIONARY 9
701 #define ERTS_PI_IX_TRAP_EXIT 10
702 #define ERTS_PI_IX_ERROR_HANDLER 11
703 #define ERTS_PI_IX_HEAP_SIZE 12
704 #define ERTS_PI_IX_STACK_SIZE 13
705 #define ERTS_PI_IX_MEMORY 14
706 #define ERTS_PI_IX_GARBAGE_COLLECTION 15
707 #define ERTS_PI_IX_GROUP_LEADER 16
708 #define ERTS_PI_IX_REDUCTIONS 17
709 #define ERTS_PI_IX_PRIORITY 18
710 #define ERTS_PI_IX_TRACE 19
711 #define ERTS_PI_IX_BINARY 20
712 #define ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN 21
713 #define ERTS_PI_IX_CATCHLEVEL 22
714 #define ERTS_PI_IX_BACKTRACE 23
715 #define ERTS_PI_IX_LAST_CALLS 24
716 #define ERTS_PI_IX_TOTAL_HEAP_SIZE 25
717 #define ERTS_PI_IX_SUSPENDING 26
718 #define ERTS_PI_IX_MIN_HEAP_SIZE 27
719 #define ERTS_PI_IX_MIN_BIN_VHEAP_SIZE 28
720 #define ERTS_PI_IX_MAX_HEAP_SIZE 29
721 #define ERTS_PI_IX_CURRENT_LOCATION 30
722 #define ERTS_PI_IX_CURRENT_STACKTRACE 31
723 #define ERTS_PI_IX_MESSAGE_QUEUE_DATA 32
724 #define ERTS_PI_IX_GARBAGE_COLLECTION_INFO 33
725 #define ERTS_PI_IX_MAGIC_REF 34
726 #define ERTS_PI_IX_FULLSWEEP_AFTER 35
727
728 #define ERTS_PI_FLAG_SINGELTON (1 << 0)
729 #define ERTS_PI_FLAG_ALWAYS_WRAP (1 << 1)
730 #define ERTS_PI_FLAG_WANT_MSGS (1 << 2)
731 #define ERTS_PI_FLAG_NEED_MSGQ_LEN (1 << 3)
732 #define ERTS_PI_FLAG_FORCE_SIG_SEND (1 << 4)
733 #define ERTS_PI_FLAG_REQUEST_FOR_OTHER (1 << 5)
734
735 #define ERTS_PI_UNRESERVE(RS, SZ) \
736 (ASSERT((RS) >= (SZ)), (RS) -= (SZ))
737
738
739 typedef struct {
740 Eterm name;
741 Uint reserve_size;
742 int flags;
743 ErtsProcLocks locks;
744 } ErtsProcessInfoArgs;
745
746 static ErtsProcessInfoArgs pi_args[] = {
747 {am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN},
748 {am_current_function, 4, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
749 {am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN},
750 {am_status, 0, 0, 0},
751 {am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
752 {am_message_queue_len, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN, ERTS_PROC_LOCK_MAIN},
753 {am_links, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
754 {am_monitors, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
755 {am_monitored_by, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
756 {am_dictionary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
757 {am_trap_exit, 0, 0, ERTS_PROC_LOCK_MAIN},
758 {am_error_handler, 0, 0, ERTS_PROC_LOCK_MAIN},
759 {am_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
760 {am_stack_size, 0, 0, ERTS_PROC_LOCK_MAIN},
761 {am_memory, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
762 {am_garbage_collection, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + ERTS_MAX_HEAP_SIZE_MAP_SZ, 0, ERTS_PROC_LOCK_MAIN},
763 {am_group_leader, 0, 0, ERTS_PROC_LOCK_MAIN},
764 {am_reductions, 0, 0, ERTS_PROC_LOCK_MAIN},
765 {am_priority, 0, 0, 0},
766 {am_trace, 0, 0, ERTS_PROC_LOCK_MAIN},
767 {am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
768 {am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN},
769 {am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN},
770 {am_backtrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
771 {am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN},
772 {am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
773 {am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0},
774 {am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
775 {am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
776 {am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
777 {am_current_location, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
778 {am_current_stacktrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
779 {am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN},
780 {am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN},
781 {am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
782 {am_fullsweep_after, 0, 0, ERTS_PROC_LOCK_MAIN}
783 };
784
785 #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(pi_args[0])))
786
787 #ifdef DEBUG
788 # define ERTS_PI_DEF_ARR_SZ 2
789 #else
790 # define ERTS_PI_DEF_ARR_SZ ERTS_PI_ARGS
791 #endif
792
793 static ERTS_INLINE Eterm
pi_ix2arg(int ix)794 pi_ix2arg(int ix)
795 {
796 if (ix < 0 || ERTS_PI_ARGS <= ix)
797 return am_undefined;
798 return pi_args[ix].name;
799 }
800
801 static ERTS_INLINE int
pi_ix2flags(int ix)802 pi_ix2flags(int ix)
803 {
804 if (ix < 0 || ERTS_PI_ARGS <= ix)
805 return 0;
806 return pi_args[ix].flags;
807 }
808
809 static ERTS_INLINE Uint
pi_ix2rsz(int ix)810 pi_ix2rsz(int ix)
811 {
812 if (ix < 0 || ERTS_PI_ARGS <= ix)
813 return 0;
814 return pi_args[ix].reserve_size;
815 }
816
817 static ERTS_INLINE ErtsProcLocks
pi_ix2locks(int ix)818 pi_ix2locks(int ix)
819 {
820 if (ix < 0 || ERTS_PI_ARGS <= ix)
821 return 0;
822 return pi_args[ix].locks;
823 }
824
825 static ERTS_INLINE int
pi_arg2ix(Eterm arg)826 pi_arg2ix(Eterm arg)
827 {
828 switch (arg) {
829 case am_registered_name:
830 return ERTS_PI_IX_REGISTERED_NAME;
831 case am_current_function:
832 return ERTS_PI_IX_CURRENT_FUNCTION;
833 case am_initial_call:
834 return ERTS_PI_IX_INITIAL_CALL;
835 case am_status:
836 return ERTS_PI_IX_STATUS;
837 case am_messages:
838 return ERTS_PI_IX_MESSAGES;
839 case am_message_queue_len:
840 return ERTS_PI_IX_MESSAGE_QUEUE_LEN;
841 case am_links:
842 return ERTS_PI_IX_LINKS;
843 case am_monitors:
844 return ERTS_PI_IX_MONITORS;
845 case am_monitored_by:
846 return ERTS_PI_IX_MONITORED_BY;
847 case am_dictionary:
848 return ERTS_PI_IX_DICTIONARY;
849 case am_trap_exit:
850 return ERTS_PI_IX_TRAP_EXIT;
851 case am_error_handler:
852 return ERTS_PI_IX_ERROR_HANDLER;
853 case am_heap_size:
854 return ERTS_PI_IX_HEAP_SIZE;
855 case am_stack_size:
856 return ERTS_PI_IX_STACK_SIZE;
857 case am_memory:
858 return ERTS_PI_IX_MEMORY;
859 case am_garbage_collection:
860 return ERTS_PI_IX_GARBAGE_COLLECTION;
861 case am_group_leader:
862 return ERTS_PI_IX_GROUP_LEADER;
863 case am_reductions:
864 return ERTS_PI_IX_REDUCTIONS;
865 case am_priority:
866 return ERTS_PI_IX_PRIORITY;
867 case am_trace:
868 return ERTS_PI_IX_TRACE;
869 case am_binary:
870 return ERTS_PI_IX_BINARY;
871 case am_sequential_trace_token:
872 return ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN;
873 case am_catchlevel:
874 return ERTS_PI_IX_CATCHLEVEL;
875 case am_backtrace:
876 return ERTS_PI_IX_BACKTRACE;
877 case am_last_calls:
878 return ERTS_PI_IX_LAST_CALLS;
879 case am_total_heap_size:
880 return ERTS_PI_IX_TOTAL_HEAP_SIZE;
881 case am_suspending:
882 return ERTS_PI_IX_SUSPENDING;
883 case am_min_heap_size:
884 return ERTS_PI_IX_MIN_HEAP_SIZE;
885 case am_min_bin_vheap_size:
886 return ERTS_PI_IX_MIN_BIN_VHEAP_SIZE;
887 case am_max_heap_size:
888 return ERTS_PI_IX_MAX_HEAP_SIZE;
889 case am_current_location:
890 return ERTS_PI_IX_CURRENT_LOCATION;
891 case am_current_stacktrace:
892 return ERTS_PI_IX_CURRENT_STACKTRACE;
893 case am_message_queue_data:
894 return ERTS_PI_IX_MESSAGE_QUEUE_DATA;
895 case am_garbage_collection_info:
896 return ERTS_PI_IX_GARBAGE_COLLECTION_INFO;
897 case am_magic_ref:
898 return ERTS_PI_IX_MAGIC_REF;
899 case am_fullsweep_after:
900 return ERTS_PI_IX_FULLSWEEP_AFTER;
901 default:
902 return -1;
903 }
904 }
905
906 static Eterm pi_1_keys[] = {
907 am_registered_name,
908 am_current_function,
909 am_initial_call,
910 am_status,
911 am_message_queue_len,
912 am_links,
913 am_dictionary,
914 am_trap_exit,
915 am_error_handler,
916 am_priority,
917 am_group_leader,
918 am_total_heap_size,
919 am_heap_size,
920 am_stack_size,
921 am_reductions,
922 am_garbage_collection,
923 am_suspending
924 };
925
926 #define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
927
928 static Eterm pi_1_keys_list;
929 static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
930
931 static void
process_info_init(void)932 process_info_init(void)
933 {
934 Eterm *hp = &pi_1_keys_list_heap[0];
935 int i;
936
937 pi_1_keys_list = NIL;
938
939 for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
940 pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
941 hp += 2;
942 }
943
944 #ifdef DEBUG
945 { /* Make sure the process_info argument mappings are consistent */
946 int ix;
947 for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
948 ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
949 }
950 }
951 #endif
952
953 }
954
955 static BIF_RETTYPE
956 process_info_aux(Process *c_p,
957 ErtsHeapFactory *hfact,
958 Process *rp,
959 ErtsProcLocks rp_locks,
960 int item_ix,
961 int flags,
962 Uint *reserve_sizep,
963 Uint *reds);
964
965 Eterm
erts_process_info(Process * c_p,ErtsHeapFactory * hfact,Process * rp,ErtsProcLocks rp_locks,int * item_ix,int item_ix_len,int flags,Uint reserve_size,Uint * reds)966 erts_process_info(Process *c_p,
967 ErtsHeapFactory *hfact,
968 Process *rp,
969 ErtsProcLocks rp_locks,
970 int *item_ix,
971 int item_ix_len,
972 int flags,
973 Uint reserve_size,
974 Uint *reds)
975 {
976 Eterm res;
977 Eterm part_res[ERTS_PI_ARGS];
978 int item_ix_ix, ix;
979
980 if (ERTS_PI_FLAG_SINGELTON & flags) {
981 ASSERT(item_ix_len == 1);
982 res = process_info_aux(c_p, hfact, rp, rp_locks, item_ix[0],
983 flags, &reserve_size, reds);
984 return res;
985 }
986
987 for (ix = 0; ix < ERTS_PI_ARGS; ix++)
988 part_res[ix] = THE_NON_VALUE;
989
990 /*
991 * We always handle 'messages' first if it should be part
992 * of the result. This since if both 'messages' and
993 * 'message_queue_len' are wanted, 'messages' may
994 * change the result of 'message_queue_len' (in case
995 * the queue contain bad distribution messages).
996 */
997 if (flags & ERTS_PI_FLAG_WANT_MSGS) {
998 ix = pi_arg2ix(am_messages);
999 ASSERT(part_res[ix] == THE_NON_VALUE);
1000 res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
1001 flags, &reserve_size, reds);
1002 ASSERT(res != am_undefined);
1003 ASSERT(res != THE_NON_VALUE);
1004 part_res[ix] = res;
1005 }
1006
1007 for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
1008 ix = item_ix[item_ix_ix];
1009 if (part_res[ix] == THE_NON_VALUE) {
1010 res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
1011 flags, &reserve_size, reds);
1012 ASSERT(res != am_undefined);
1013 ASSERT(res != THE_NON_VALUE);
1014 part_res[ix] = res;
1015 }
1016 }
1017
1018 res = NIL;
1019
1020 for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
1021 ix = item_ix[item_ix_ix];
1022 ASSERT(part_res[ix] != THE_NON_VALUE);
1023 /*
1024 * If we should ignore the value of registered_name,
1025 * its value is nil. For more info, see comment in the
1026 * beginning of process_info_aux().
1027 */
1028 if (is_nil(part_res[ix])) {
1029 ASSERT(!(flags & ERTS_PI_FLAG_ALWAYS_WRAP));
1030 ASSERT(pi_ix2arg(ix) == am_registered_name);
1031 }
1032 else {
1033 Eterm *hp;
1034 ERTS_PI_UNRESERVE(reserve_size, 2);
1035 hp = erts_produce_heap(hfact, 2, reserve_size);
1036 res = CONS(hp, part_res[ix], res);
1037 }
1038 }
1039
1040 return res;
1041 }
1042
1043 static void
1044 pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix);
1045
1046 static BIF_RETTYPE
process_info_bif(Process * c_p,Eterm pid,Eterm opt,int always_wrap,int pi2)1047 process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
1048 {
1049 ErtsHeapFactory hfact;
1050 int def_arr[ERTS_PI_DEF_ARR_SZ];
1051 int *item_ix = &def_arr[0];
1052 Process *rp = NULL;
1053 erts_aint32_t state;
1054 BIF_RETTYPE ret;
1055 Uint reds = 0;
1056 ErtsProcLocks locks = 0;
1057 int flags;
1058 Uint reserve_size;
1059 int len;
1060 Eterm res;
1061
1062 ERTS_CT_ASSERT(ERTS_PI_DEF_ARR_SZ > 0);
1063
1064 if (c_p->common.id == pid) {
1065 int local_only = c_p->flags & F_LOCAL_SIGS_ONLY;
1066 int sres, sreds, reds_left;
1067
1068 reds_left = ERTS_BIF_REDS_LEFT(c_p);
1069 sreds = reds_left;
1070
1071 if (!local_only) {
1072 erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
1073 erts_proc_sig_fetch(c_p);
1074 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
1075 }
1076
1077 sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
1078
1079 BUMP_REDS(c_p, (int) sreds);
1080 reds_left -= sreds;
1081
1082 if (state & ERTS_PSFLG_EXITING) {
1083 c_p->flags &= ~F_LOCAL_SIGS_ONLY;
1084 goto exited;
1085 }
1086 if (!sres | (reds_left <= 0)) {
1087 /*
1088 * More signals to handle or out of reds; need
1089 * to yield and continue. Prevent fetching of
1090 * more signals by setting local-sigs-only flag.
1091 */
1092 c_p->flags |= F_LOCAL_SIGS_ONLY;
1093 goto yield;
1094 }
1095
1096 c_p->flags &= ~F_LOCAL_SIGS_ONLY;
1097 }
1098
1099 if (is_atom(opt)) {
1100 int ix = pi_arg2ix(opt);
1101 item_ix[0] = ix;
1102 len = 1;
1103 locks = pi_ix2locks(ix);
1104 reserve_size = 3 + pi_ix2rsz(ix);
1105 flags = ERTS_PI_FLAG_SINGELTON;
1106 flags |= pi_ix2flags(ix);
1107 if (ix < 0)
1108 goto badarg;
1109 }
1110 else {
1111 Eterm list = opt;
1112 Uint size = ERTS_PI_DEF_ARR_SZ;
1113
1114 len = 0;
1115 reserve_size = 0;
1116 locks = 0;
1117 flags = 0;
1118
1119 while (is_list(list)) {
1120 Eterm *consp = list_val(list);
1121 Eterm arg = CAR(consp);
1122 int ix = pi_arg2ix(arg);
1123 if (ix < 0)
1124 goto badarg;
1125
1126 if (len >= size)
1127 pi_setup_grow(&item_ix, def_arr, &size, len);
1128
1129 item_ix[len++] = ix;
1130
1131 locks |= pi_ix2locks(ix);
1132 flags |= pi_ix2flags(ix);
1133 reserve_size += pi_ix2rsz(ix);
1134 reserve_size += 3; /* 2-tuple */
1135 reserve_size += 2; /* cons */
1136
1137 list = CDR(consp);
1138 }
1139
1140 if (is_not_nil(list))
1141 goto badarg;
1142 }
1143
1144 if (is_not_internal_pid(pid)) {
1145 if (is_external_pid(pid)
1146 && external_pid_dist_entry(pid) == erts_this_dist_entry)
1147 goto undefined;
1148 goto badarg;
1149 }
1150
1151 if (always_wrap)
1152 flags |= ERTS_PI_FLAG_ALWAYS_WRAP;
1153
1154 if (c_p->common.id == pid) {
1155 rp = c_p;
1156 if (locks & ~ERTS_PROC_LOCK_MAIN)
1157 erts_proc_lock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
1158 locks |= ERTS_PROC_LOCK_MAIN;
1159 }
1160 else {
1161 if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
1162 goto send_signal;
1163 state = ERTS_PSFLG_RUNNING; /* fail state... */
1164 rp = erts_try_lock_sig_free_proc(pid, locks, &state);
1165 if (!rp)
1166 goto undefined;
1167 if (rp == ERTS_PROC_LOCK_BUSY) {
1168 rp = NULL;
1169 goto send_signal;
1170 }
1171 if (state & ERTS_PSFLG_EXITING) {
1172 if (locks)
1173 erts_proc_unlock(rp, locks);
1174 locks = 0;
1175 /* wait for it to terminate properly... */
1176 goto send_signal;
1177 }
1178 if (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN) {
1179 ASSERT(locks & ERTS_PROC_LOCK_MAIN);
1180 erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
1181 erts_proc_sig_fetch(rp);
1182 if (c_p->sig_qs.cont) {
1183 erts_proc_unlock(rp, locks|ERTS_PROC_LOCK_MSGQ);
1184 locks = 0;
1185 goto send_signal;
1186 }
1187 erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
1188 }
1189 }
1190
1191 erts_factory_proc_init(&hfact, c_p);
1192
1193 res = erts_process_info(c_p, &hfact, rp, locks, item_ix, len,
1194 flags, reserve_size, &reds);
1195
1196 erts_factory_close(&hfact);
1197
1198 if (reds > INT_MAX/2)
1199 reds = INT_MAX/2;
1200 BUMP_REDS(c_p, (int) reds);
1201
1202 state = erts_atomic32_read_acqb(&rp->state);
1203 if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)) {
1204 if (state & ERTS_PSFLG_FREE) {
1205 ASSERT(!locks);
1206 goto undefined;
1207 }
1208 if (locks)
1209 erts_proc_unlock(rp, locks);
1210 locks = 0;
1211 /* wait for it to terminate properly... */
1212 goto send_signal;
1213 }
1214
1215 if (c_p == rp || !ERTS_PROC_HAS_INCOMING_SIGNALS(c_p))
1216 ERTS_BIF_PREP_RET(ret, res);
1217 else
1218 ERTS_BIF_PREP_HANDLE_SIGNALS_RETURN(ret, c_p, res);
1219
1220 done:
1221
1222 if (c_p == rp)
1223 locks &= ~ERTS_PROC_LOCK_MAIN;
1224
1225 if (locks && rp)
1226 erts_proc_unlock(rp, locks);
1227
1228 if (item_ix != def_arr)
1229 erts_free(ERTS_ALC_T_TMP, item_ix);
1230
1231 return ret;
1232
1233 badarg:
1234 ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
1235 goto done;
1236
1237 undefined:
1238 ERTS_BIF_PREP_RET(ret, am_undefined);
1239 goto done;
1240
1241 exited:
1242 ERTS_BIF_PREP_EXITED(ret, c_p);
1243 goto done;
1244
1245 yield:
1246 if (pi2)
1247 ERTS_BIF_PREP_YIELD2(ret, bif_export[BIF_process_info_2], c_p, pid, opt);
1248 else
1249 ERTS_BIF_PREP_YIELD1(ret, bif_export[BIF_process_info_1], c_p, pid);
1250 goto done;
1251
1252 send_signal: {
1253 Eterm ref = erts_make_ref(c_p);
1254 int enqueued, need_msgq_len;
1255 flags |= ERTS_PI_FLAG_REQUEST_FOR_OTHER;
1256 need_msgq_len = (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1257 /*
1258 * Set receive mark so we wont have to scan the whole
1259 * message queue for the result. Note caller unconditionally
1260 * has to enter a receive only matching messages containing
1261 * 'ref', or restore save pointer.
1262 */
1263 ERTS_RECV_MARK_SAVE(c_p);
1264 ERTS_RECV_MARK_SET(c_p);
1265 enqueued = erts_proc_sig_send_process_info_request(c_p, pid, item_ix,
1266 len, need_msgq_len,
1267 flags, reserve_size,
1268 ref);
1269 if (!enqueued) {
1270 /* Restore save pointer... */
1271 JOIN_MESSAGE(c_p);
1272 goto undefined;
1273 }
1274 ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, ref);
1275 goto done;
1276 }
1277 }
1278
1279 static void
pi_setup_grow(int ** arr,int * def_arr,Uint * sz,int ix)1280 pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix)
1281 {
1282 *sz = (ix+1) + ERTS_PI_DEF_ARR_SZ;
1283 if (*arr != def_arr)
1284 *arr = erts_realloc(ERTS_ALC_T_TMP, *arr, (*sz)*sizeof(int));
1285 else {
1286 int *new_arr = erts_alloc(ERTS_ALC_T_TMP, (*sz)*sizeof(int));
1287 sys_memcpy((void *) new_arr, (void *) def_arr,
1288 sizeof(int)*ERTS_PI_DEF_ARR_SZ);
1289 *arr = new_arr;
1290 }
1291 }
1292
1293
process_info_2(BIF_ALIST_2)1294 BIF_RETTYPE process_info_2(BIF_ALIST_2)
1295 {
1296 return process_info_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, !is_atom(BIF_ARG_2), !0);
1297 }
1298
process_info_1(BIF_ALIST_1)1299 BIF_RETTYPE process_info_1(BIF_ALIST_1)
1300 {
1301 return process_info_bif(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, 0);
1302 }
1303
1304 Eterm
process_info_aux(Process * c_p,ErtsHeapFactory * hfact,Process * rp,ErtsProcLocks rp_locks,int item_ix,int flags,Uint * reserve_sizep,Uint * reds)1305 process_info_aux(Process *c_p,
1306 ErtsHeapFactory *hfact,
1307 Process *rp,
1308 ErtsProcLocks rp_locks,
1309 int item_ix,
1310 int flags,
1311 Uint *reserve_sizep,
1312 Uint *reds)
1313 {
1314 Eterm *hp;
1315 Eterm res = NIL;
1316 Uint reserved;
1317 Uint reserve_size = *reserve_sizep;
1318
1319 #ifdef ERTS_ENABLE_LOCK_CHECK
1320 ErtsProcLocks locks = erts_proc_lc_my_proc_locks(rp);
1321
1322 switch (item_ix) {
1323 case ERTS_PI_IX_STATUS:
1324 case ERTS_PI_IX_PRIORITY:
1325 case ERTS_PI_IX_SUSPENDING:
1326 ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCK_MAIN) == 0);
1327 break;
1328 default:
1329 ERTS_LC_ASSERT(locks == ERTS_PROC_LOCK_MAIN);
1330 break;
1331 }
1332 #endif
1333
1334 reserved = pi_ix2rsz(item_ix);
1335 ERTS_PI_UNRESERVE(reserve_size, reserved);
1336
1337 (*reds)++;
1338
1339 ASSERT(rp);
1340
1341 /*
1342 * Q: Why this ERTS_PI_FLAG_ALWAYS_WRAP flag?
1343 *
1344 * A: registered_name is strange. If process has no registered name,
1345 * process_info(Pid, registered_name) returns [], and
1346 * the result of process_info(Pid) has no {registered_name, Name}
1347 * tuple in the resulting list. This is inconsistent with all other
1348 * options, but we do not dare to change it.
1349 *
1350 * When process_info/2 is called with a list as second argument,
1351 * registered_name behaves as it should, i.e. a
1352 * {registered_name, []} will appear in the resulting list.
1353 *
1354 * If ERTS_PI_FLAG_ALWAYS_WRAP is set, process_info_aux() always
1355 * wrap the result in a key two tuple.
1356 */
1357
1358 switch (item_ix) {
1359
1360 case ERTS_PI_IX_REGISTERED_NAME:
1361 if (rp->common.u.alive.reg)
1362 res = rp->common.u.alive.reg->name;
1363 else {
1364 if (flags & ERTS_PI_FLAG_ALWAYS_WRAP)
1365 res = NIL;
1366 else
1367 return NIL;
1368 }
1369 break;
1370
1371 case ERTS_PI_IX_CURRENT_FUNCTION:
1372 res = current_function(c_p, hfact, rp, 0,
1373 reserve_size, flags);
1374 break;
1375
1376 case ERTS_PI_IX_CURRENT_LOCATION:
1377 res = current_function(c_p, hfact, rp, 1,
1378 reserve_size, flags);
1379 break;
1380
1381 case ERTS_PI_IX_CURRENT_STACKTRACE:
1382 res = current_stacktrace(hfact, rp, reserve_size);
1383 break;
1384
1385 case ERTS_PI_IX_INITIAL_CALL:
1386 hp = erts_produce_heap(hfact, 4, reserve_size);
1387 res = TUPLE3(hp,
1388 rp->u.initial.module,
1389 rp->u.initial.function,
1390 make_small(rp->u.initial.arity));
1391 hp += 4;
1392 break;
1393
1394 case ERTS_PI_IX_STATUS: {
1395 erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
1396 res = erts_process_state2status(state);
1397 if (res == am_running && (state & ERTS_PSFLG_RUNNING_SYS)) {
1398 ASSERT(c_p == rp);
1399 ASSERT(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
1400 if (!(state & (ERTS_PSFLG_SYS_TASKS
1401 | ERTS_PSFLG_ACTIVE
1402 | ERTS_PSFLG_SIG_Q
1403 | ERTS_PSFLG_SIG_IN_Q))) {
1404 /*
1405 * We are servicing a process-info request from
1406 * another process. If that other process could
1407 * have inspected our state itself, we would have
1408 * been in the 'waiting' state.
1409 */
1410 res = am_waiting;
1411 }
1412 }
1413 break;
1414 }
1415
1416 case ERTS_PI_IX_MESSAGES: {
1417 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1418 if (rp->sig_qs.len == 0 || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE))
1419 res = NIL;
1420 else {
1421 int info_on_self = !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
1422 ErtsMessageInfo *mip;
1423 Sint i, len;
1424 Uint heap_need;
1425
1426 mip = erts_alloc(ERTS_ALC_T_TMP,
1427 rp->sig_qs.len*sizeof(ErtsMessageInfo));
1428
1429 /*
1430 * Note that message queue may shrink when calling
1431 * erts_proc_sig_prep_msgq_for_inspection() since it removes
1432 * corrupt distribution messages.
1433 */
1434 heap_need = erts_proc_sig_prep_msgq_for_inspection(c_p, rp,
1435 rp_locks,
1436 info_on_self,
1437 mip);
1438 len = rp->sig_qs.len;
1439
1440 heap_need += len*2; /* Cons cells */
1441
1442 reserve_size += heap_need;
1443
1444 /* Build list of messages... */
1445 for (i = len - 1, res = NIL; i >= 0; i--) {
1446 Eterm msg = ERL_MESSAGE_TERM(mip[i].msgp);
1447 Uint sz = mip[i].size;
1448
1449 ERTS_PI_UNRESERVE(reserve_size, sz+2);
1450 hp = erts_produce_heap(hfact, sz+2, reserve_size);
1451
1452 if (sz != 0)
1453 msg = copy_struct(msg, sz, &hp, hfact->off_heap);
1454
1455 res = CONS(hp, msg, res);
1456 hp += 2;
1457 }
1458
1459 *reds += (Uint) len / 4;
1460
1461 erts_free(ERTS_ALC_T_TMP, mip);
1462 }
1463 break;
1464 }
1465
1466 case ERTS_PI_IX_MESSAGE_QUEUE_LEN: {
1467 Sint len = rp->sig_qs.len;
1468 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1469 ASSERT(len >= 0);
1470 if (len <= MAX_SMALL)
1471 res = make_small(len);
1472 else {
1473 hp = erts_produce_heap(hfact, BIG_UINT_HEAP_SIZE, reserve_size);
1474 res = uint_to_big((Uint) len, hp);
1475 }
1476 break;
1477 }
1478
1479 case ERTS_PI_IX_LINKS: {
1480 MonitorInfoCollection mic;
1481 int i;
1482 Eterm item;
1483
1484 INIT_MONITOR_INFOS(mic);
1485
1486 erts_link_tree_foreach(ERTS_P_LINKS(rp), collect_one_link, (void *) &mic);
1487
1488 reserve_size += mic.sz;
1489 res = NIL;
1490 for (i = 0; i < mic.mi_i; i++) {
1491 Eterm item_src = mic.mi[i].entity.term;
1492 Uint sz = NC_HEAP_SIZE(item_src) + 2;
1493 ERTS_PI_UNRESERVE(reserve_size, sz);
1494 hp = erts_produce_heap(hfact, sz, reserve_size);
1495 item = STORE_NC(&hp, hfact->off_heap, item_src);
1496 res = CONS(hp, item, res);
1497 }
1498
1499 *reds += (Uint) mic.mi_i / 4;
1500
1501 DESTROY_MONITOR_INFOS(mic);
1502 break;
1503 }
1504
1505 case ERTS_PI_IX_MONITORS: {
1506 MonitorInfoCollection mic;
1507 int i;
1508
1509 INIT_MONITOR_INFOS(mic);
1510 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1511 collect_one_origin_monitor,
1512 (void *) &mic);
1513
1514 reserve_size += mic.sz;
1515 res = NIL;
1516 for (i = 0; i < mic.mi_i; i++) {
1517 if (mic.mi[i].named) {
1518 /* Monitor by name.
1519 * Build {process|port, {Name, Node}} and cons it.
1520 */
1521 Eterm t1, t2;
1522 /* If pid is an atom, then it is a remote named monitor, which
1523 has to be a process */
1524 Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
1525 ASSERT(is_pid(mic.mi[i].pid)
1526 || is_port(mic.mi[i].pid)
1527 || is_atom(mic.mi[i].pid));
1528
1529 ERTS_PI_UNRESERVE(reserve_size, 3+3+2);
1530 hp = erts_produce_heap(hfact, 3+3+2, reserve_size);
1531
1532 t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
1533 hp += 3;
1534 t2 = TUPLE2(hp, m_type, t1);
1535 hp += 3;
1536 res = CONS(hp, t2, res);
1537 }
1538 else {
1539 /* Build {process|port|time_offset, Pid|clock_service} and cons it. */
1540 Eterm t;
1541 Eterm pid;
1542 Eterm m_type;
1543 Eterm pid_src = mic.mi[i].entity.term;
1544 Uint sz = is_atom(pid_src) ? 0 : NC_HEAP_SIZE(pid_src);
1545 sz += 3 + 2;
1546
1547 ERTS_PI_UNRESERVE(reserve_size, sz);
1548 hp = erts_produce_heap(hfact, sz, reserve_size);
1549
1550 pid = (is_atom(pid_src)
1551 ? pid_src
1552 : STORE_NC(&hp, hfact->off_heap, pid_src));
1553
1554 switch (mic.mi[i].type) {
1555 case ERTS_MON_TYPE_PORT:
1556 m_type = am_port;
1557 break;
1558 case ERTS_MON_TYPE_TIME_OFFSET:
1559 m_type = am_time_offset;
1560 break;
1561 default:
1562 m_type = am_process;
1563 break;
1564 }
1565
1566 ASSERT(is_pid(mic.mi[i].pid)
1567 || is_port(mic.mi[i].pid));
1568
1569 t = TUPLE2(hp, m_type, pid);
1570 hp += 3;
1571 res = CONS(hp, t, res);
1572 }
1573 }
1574
1575 *reds += (Uint) mic.mi_i / 4;
1576
1577 DESTROY_MONITOR_INFOS(mic);
1578 break;
1579 }
1580
1581 case ERTS_PI_IX_MONITORED_BY: {
1582 MonitorInfoCollection mic;
1583 int i;
1584 Eterm item;
1585
1586 INIT_MONITOR_INFOS(mic);
1587 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(rp),
1588 collect_one_target_monitor,
1589 (void *) &mic);
1590 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1591 collect_one_target_monitor,
1592 (void *) &mic);
1593
1594 reserve_size += mic.sz;
1595
1596 res = NIL;
1597 for (i = 0; i < mic.mi_i; ++i) {
1598 Uint sz = 2;
1599
1600 if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
1601 sz += erts_resource_ref_size(mic.mi[i].entity.resource);
1602 else
1603 sz += NC_HEAP_SIZE(mic.mi[i].entity.term);
1604
1605 ERTS_PI_UNRESERVE(reserve_size, sz);
1606 hp = erts_produce_heap(hfact, sz, reserve_size);
1607
1608 if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
1609 item = erts_bld_resource_ref(&hp,
1610 hfact->off_heap,
1611 mic.mi[i].entity.resource);
1612 else
1613 item = STORE_NC(&hp,
1614 hfact->off_heap,
1615 mic.mi[i].entity.term);
1616 res = CONS(hp, item, res);
1617 }
1618
1619 *reds += (Uint) mic.mi_i / 4;
1620
1621 DESTROY_MONITOR_INFOS(mic);
1622 break;
1623 }
1624
1625 case ERTS_PI_IX_SUSPENDING: {
1626 ErtsSuspendMonitorInfoCollection smic;
1627 int i;
1628
1629 ERTS_INIT_SUSPEND_MONITOR_INFOS(smic);
1630
1631 erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
1632 collect_one_suspend_monitor,
1633 (void *) &smic);
1634
1635 reserve_size += smic.sz;
1636
1637 res = NIL;
1638 for (i = 0; i < smic.smi_i; i++) {
1639 ErtsMonitorSuspend *msp;
1640 erts_aint_t mstate;
1641 Sint ci;
1642 Eterm ct, active, pending, item;
1643 Uint sz = 4 + 2;
1644
1645 msp = smic.smi[i];
1646 mstate = erts_atomic_read_nob(&msp->state);
1647
1648 ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
1649 if (!IS_SSMALL(ci))
1650 sz += BIG_UINT_HEAP_SIZE;
1651
1652 ERTS_PI_UNRESERVE(reserve_size, sz);
1653 hp = erts_produce_heap(hfact, sz, reserve_size);
1654
1655 if (IS_SSMALL(ci))
1656 ct = make_small(ci);
1657 else {
1658 ct = small_to_big(ci, hp);
1659 hp += BIG_UINT_HEAP_SIZE;
1660 }
1661
1662 if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) {
1663 active = ct;
1664 pending = make_small(0);
1665 }
1666 else {
1667 active = make_small(0);
1668 pending = ct;
1669 }
1670
1671 ASSERT(is_internal_pid(msp->md.origin.other.item));
1672
1673 item = TUPLE3(hp, msp->md.origin.other.item, active, pending);
1674 hp += 4;
1675 res = CONS(hp, item, res);
1676 }
1677
1678 *reds += (Uint) smic.smi_i / 4;
1679
1680 ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
1681
1682 break;
1683 }
1684
1685 case ERTS_PI_IX_DICTIONARY:
1686 if (!rp->dictionary || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE)) {
1687 res = NIL;
1688 } else {
1689 Uint num = rp->dictionary->numElements;
1690 res = erts_dictionary_copy(hfact, rp->dictionary, reserve_size);
1691 *reds += (Uint) num / 4;
1692 }
1693
1694 break;
1695
1696 case ERTS_PI_IX_TRAP_EXIT:
1697 res = (rp->flags & F_TRAP_EXIT) ? am_true : am_false;
1698 break;
1699
1700 case ERTS_PI_IX_ERROR_HANDLER:
1701 res = erts_proc_get_error_handler(rp);
1702 break;
1703
1704 case ERTS_PI_IX_HEAP_SIZE: {
1705 Uint hsz = 0;
1706 (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
1707 hp = erts_produce_heap(hfact, hsz, reserve_size);
1708 res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
1709 break;
1710 }
1711
1712 case ERTS_PI_IX_FULLSWEEP_AFTER: {
1713 Uint hsz = 0;
1714 (void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
1715 hp = erts_produce_heap(hfact, hsz, reserve_size);
1716 res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
1717 break;
1718 }
1719
1720 case ERTS_PI_IX_MIN_HEAP_SIZE: {
1721 Uint hsz = 0;
1722 (void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
1723 hp = erts_produce_heap(hfact, hsz, reserve_size);
1724 res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
1725 break;
1726 }
1727
1728 case ERTS_PI_IX_MIN_BIN_VHEAP_SIZE: {
1729 Uint hsz = 0;
1730 (void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
1731 hp = erts_produce_heap(hfact, hsz, reserve_size);
1732 res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
1733 break;
1734 }
1735
1736 case ERTS_PI_IX_MAX_HEAP_SIZE: {
1737 Uint hsz = 0;
1738 (void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
1739 MAX_HEAP_SIZE_FLAGS_GET(rp),
1740 NULL, &hsz);
1741 hp = erts_produce_heap(hfact, hsz, reserve_size);
1742 res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
1743 MAX_HEAP_SIZE_FLAGS_GET(rp),
1744 &hp, NULL);
1745 break;
1746 }
1747
1748 case ERTS_PI_IX_TOTAL_HEAP_SIZE: {
1749 Uint total_heap_size;
1750 Uint hsz = 0;
1751
1752 total_heap_size = rp->heap_sz;
1753 if (rp->old_hend && rp->old_heap)
1754 total_heap_size += rp->old_hend - rp->old_heap;
1755
1756 total_heap_size += rp->mbuf_sz;
1757
1758 if (rp->flags & F_ON_HEAP_MSGQ) {
1759 ErtsMessage *mp;
1760 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1761 for (mp = rp->sig_qs.first; mp; mp = mp->next) {
1762 ASSERT(ERTS_SIG_IS_MSG(mp));
1763 if (mp->data.attached)
1764 total_heap_size += erts_msg_attached_data_size(mp);
1765 }
1766 *reds += (Uint) rp->sig_qs.len / 4;
1767 }
1768
1769 (void) erts_bld_uint(NULL, &hsz, total_heap_size);
1770 hp = erts_produce_heap(hfact, hsz, reserve_size);
1771 res = erts_bld_uint(&hp, NULL, total_heap_size);
1772 break;
1773 }
1774
1775 case ERTS_PI_IX_STACK_SIZE: {
1776 Uint stack_size = STACK_START(rp) - rp->stop;
1777 Uint hsz = 0;
1778 (void) erts_bld_uint(NULL, &hsz, stack_size);
1779 hp = erts_produce_heap(hfact, hsz, reserve_size);
1780 res = erts_bld_uint(&hp, NULL, stack_size);
1781 break;
1782 }
1783
1784 case ERTS_PI_IX_MEMORY: { /* Memory consumed in bytes */
1785 Uint hsz = 0;
1786 Uint size = erts_process_memory(rp, 0);
1787 (void) erts_bld_uint(NULL, &hsz, size);
1788 hp = erts_produce_heap(hfact, hsz, reserve_size);
1789 res = erts_bld_uint(&hp, NULL, size);
1790
1791 ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
1792 *reds += (Uint) rp->sig_qs.len / 4;
1793
1794 break;
1795 }
1796
1797 case ERTS_PI_IX_GARBAGE_COLLECTION: {
1798 DECL_AM(minor_gcs);
1799 Eterm t;
1800 Uint map_sz = 0;
1801
1802 erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
1803
1804 hp = erts_produce_heap(hfact, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz, reserve_size);
1805
1806 t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
1807 res = CONS(hp, t, NIL); hp += 2;
1808 t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp))); hp += 3;
1809 res = CONS(hp, t, res); hp += 2;
1810
1811 t = TUPLE2(hp, am_min_heap_size, make_small(MIN_HEAP_SIZE(rp))); hp += 3;
1812 res = CONS(hp, t, res); hp += 2;
1813 t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
1814 res = CONS(hp, t, res); hp += 2;
1815
1816 t = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), &hp, NULL);
1817
1818 t = TUPLE2(hp, am_max_heap_size, t); hp += 3;
1819 res = CONS(hp, t, res); hp += 2;
1820 break;
1821 }
1822
1823 case ERTS_PI_IX_GARBAGE_COLLECTION_INFO: {
1824 Uint sz = 0, actual_sz = 0;
1825
1826 erts_process_gc_info(rp, &sz, NULL, 0, 0);
1827
1828 hp = erts_produce_heap(hfact, sz, reserve_size);
1829 res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
1830
1831 break;
1832 }
1833
1834 case ERTS_PI_IX_GROUP_LEADER: {
1835 int sz = NC_HEAP_SIZE(rp->group_leader);
1836 hp = erts_produce_heap(hfact, sz, reserve_size);
1837 res = STORE_NC(&hp, hfact->off_heap, rp->group_leader);
1838 break;
1839 }
1840
1841 case ERTS_PI_IX_REDUCTIONS: {
1842 Uint reds = rp->reds + erts_current_reductions(c_p, rp);
1843 Uint hsz = 0;
1844 (void) erts_bld_uint(NULL, &hsz, reds);
1845 hp = erts_produce_heap(hfact, hsz, reserve_size);
1846 res = erts_bld_uint(&hp, NULL, reds);
1847 break;
1848 }
1849
1850 case ERTS_PI_IX_PRIORITY: {
1851 erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
1852 if (ERTS_PSFLG_EXITING & state)
1853 return am_undefined;
1854 res = erts_get_process_priority(state);
1855 break;
1856 }
1857
1858 case ERTS_PI_IX_TRACE:
1859 res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
1860 break;
1861
1862 case ERTS_PI_IX_BINARY: {
1863 ErlHeapFragment *hfrag;
1864 Uint sz;
1865
1866 res = NIL;
1867 sz = 0;
1868
1869 (void)bld_bin_list(NULL, &sz, &MSO(rp), NIL);
1870 for (hfrag = rp->mbuf; hfrag != NULL; hfrag = hfrag->next) {
1871 (void)bld_bin_list(NULL, &sz, &hfrag->off_heap, NIL);
1872 }
1873
1874 hp = erts_produce_heap(hfact, sz, reserve_size);
1875
1876 res = bld_bin_list(&hp, NULL, &MSO(rp), NIL);
1877 for (hfrag = rp->mbuf; hfrag != NULL; hfrag = hfrag->next) {
1878 res = bld_bin_list(&hp, NULL, &hfrag->off_heap, res);
1879 }
1880
1881 break;
1882 }
1883
1884 case ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN: {
1885 Uint sz = size_object(rp->seq_trace_token);
1886 hp = erts_produce_heap(hfact, sz, reserve_size);
1887 res = copy_struct(rp->seq_trace_token, sz, &hp, hfact->off_heap);
1888 break;
1889 }
1890
1891 case ERTS_PI_IX_CATCHLEVEL:
1892 res = make_small(catchlevel(rp));
1893 break;
1894
1895 case ERTS_PI_IX_BACKTRACE: {
1896 erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
1897 erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
1898 res = erts_heap_factory_new_binary(hfact, (byte *) dsbufp->str,
1899 dsbufp->str_len, reserve_size);
1900 erts_destroy_tmp_dsbuf(dsbufp);
1901 break;
1902 }
1903
1904 case ERTS_PI_IX_LAST_CALLS: {
1905 struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
1906 if (!scb) {
1907 res = am_false;
1908 } else {
1909 /*
1910 * One cons cell and a 3-struct, and a 2-tuple.
1911 * Might be less than that, if there are sends, receives or timeouts,
1912 * so we must do a HRelease() to avoid creating holes.
1913 */
1914 Sint needed = scb->n*(2+4);
1915 Eterm term, list;
1916 int i, j;
1917 Export *exp;
1918
1919 reserve_size += needed;
1920
1921 list = NIL;
1922 for (i = 0; i < scb->n; i++) {
1923 Uint sz;
1924 j = scb->cur - i - 1;
1925 if (j < 0)
1926 j += scb->len;
1927
1928 sz = 2;
1929 exp = scb->ct[j];
1930 if (exp != &exp_send && exp != &exp_receive && exp != &exp_timeout)
1931 sz += 4;
1932
1933 needed -= sz;
1934 ERTS_PI_UNRESERVE(reserve_size, sz);
1935 hp = erts_produce_heap(hfact, sz, reserve_size);
1936
1937 if (exp == &exp_send)
1938 term = am_send;
1939 else if (exp == &exp_receive)
1940 term = am_receive;
1941 else if (exp == &exp_timeout)
1942 term = am_timeout;
1943 else {
1944 term = TUPLE3(hp,
1945 scb->ct[j]->info.mfa.module,
1946 scb->ct[j]->info.mfa.function,
1947 make_small(scb->ct[j]->info.mfa.arity));
1948 hp += 4;
1949 }
1950 list = CONS(hp, term, list);
1951 }
1952
1953 ASSERT(needed >= 0);
1954 if (needed > 0)
1955 reserve_size -= needed;
1956
1957 res = list;
1958 }
1959 break;
1960 }
1961
1962 case ERTS_PI_IX_MESSAGE_QUEUE_DATA:
1963 switch (rp->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) {
1964 case F_OFF_HEAP_MSGQ:
1965 res = am_off_heap;
1966 break;
1967 case F_ON_HEAP_MSGQ:
1968 res = am_on_heap;
1969 break;
1970 default:
1971 res = am_error;
1972 ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
1973 break;
1974 }
1975 break;
1976
1977 case ERTS_PI_IX_MAGIC_REF: {
1978 Uint sz = 0;
1979 (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
1980 hp = erts_produce_heap(hfact, sz, 0);
1981 res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
1982
1983 *reds += (Uint) 10;
1984 break;
1985 }
1986
1987 default:
1988 return THE_NON_VALUE; /* will produce badarg */
1989
1990 }
1991
1992 ERTS_PI_UNRESERVE(reserve_size, 3);
1993 *reserve_sizep = reserve_size;
1994 hp = erts_produce_heap(hfact, 3, reserve_size);
1995
1996 return TUPLE2(hp, pi_ix2arg(item_ix), res);
1997 }
1998 #undef MI_INC
1999
2000 static Eterm
current_function(Process * c_p,ErtsHeapFactory * hfact,Process * rp,int full_info,Uint reserve_size,int flags)2001 current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
2002 int full_info, Uint reserve_size, int flags)
2003 {
2004 Eterm* hp;
2005 Eterm res;
2006 FunctionInfo fi;
2007
2008 if (rp->current == NULL) {
2009 erts_lookup_function_info(&fi, rp->i, full_info);
2010 rp->current = fi.mfa;
2011 } else if (full_info) {
2012 erts_lookup_function_info(&fi, rp->i, full_info);
2013 if (fi.mfa == NULL) {
2014 /* Use the current function without location info */
2015 erts_set_current_function(&fi, rp->current);
2016 }
2017 }
2018
2019 if (c_p == rp && !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) {
2020 FunctionInfo fi2;
2021
2022 /*
2023 * The current function is erlang:process_info/{1,2},
2024 * which is not the answer that the application want.
2025 * We will use the function pointed into by rp->cp
2026 * instead if it can be looked up.
2027 */
2028 erts_lookup_function_info(&fi2, rp->cp, full_info);
2029 if (fi2.mfa) {
2030 fi = fi2;
2031 rp->current = fi2.mfa;
2032 }
2033 }
2034
2035 /*
2036 * Return the result.
2037 */
2038 if (rp->current == NULL) {
2039 res = am_undefined;
2040 } else if (full_info) {
2041 hp = erts_produce_heap(hfact, fi.needed, reserve_size);
2042 erts_build_mfa_item(&fi, hp, am_true, &res);
2043 } else {
2044 hp = erts_produce_heap(hfact, 4, reserve_size);
2045 res = TUPLE3(hp, rp->current->module,
2046 rp->current->function,
2047 make_small(rp->current->arity));
2048 }
2049 return res;
2050 }
2051
2052 static Eterm
current_stacktrace(ErtsHeapFactory * hfact,Process * rp,Uint reserve_size)2053 current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
2054 Uint reserve_size)
2055 {
2056 Uint sz;
2057 struct StackTrace* s;
2058 int depth;
2059 FunctionInfo* stk;
2060 FunctionInfo* stkp;
2061 Uint heap_size;
2062 int i;
2063 Eterm* hp;
2064 Eterm mfa;
2065 Eterm res = NIL;
2066
2067 depth = erts_backtrace_depth;
2068 sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
2069 s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
2070 s->depth = 0;
2071 if (depth > 0 && rp->i) {
2072 s->trace[s->depth++] = rp->i;
2073 depth--;
2074 }
2075 if (depth > 0 && rp->cp != 0) {
2076 s->trace[s->depth++] = rp->cp - 1;
2077 depth--;
2078 }
2079 erts_save_stacktrace(rp, s, depth);
2080
2081 depth = s->depth;
2082 stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
2083 depth*sizeof(FunctionInfo));
2084 heap_size = 3;
2085 for (i = 0; i < depth; i++) {
2086 erts_lookup_function_info(stkp, s->trace[i], 1);
2087 if (stkp->mfa) {
2088 heap_size += stkp->needed + 2;
2089 stkp++;
2090 }
2091 }
2092
2093 reserve_size += heap_size;
2094
2095 /*
2096 * We intentionally produce heap in small chunks
2097 * (for more info see process_info_aux()).
2098 */
2099 while (stkp > stk) {
2100 stkp--;
2101 sz = stkp->needed + 2;
2102 ERTS_PI_UNRESERVE(reserve_size, sz);
2103 hp = erts_produce_heap(hfact, sz, reserve_size);
2104 hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
2105 res = CONS(hp, mfa, res);
2106 }
2107
2108 erts_free(ERTS_ALC_T_TMP, stk);
2109 erts_free(ERTS_ALC_T_TMP, s);
2110 return res;
2111 }
2112
2113 /*
2114 * This function takes care of calls to erlang:system_info/1 when the argument
2115 * is a tuple.
2116 */
2117 static BIF_RETTYPE
info_1_tuple(Process * BIF_P,Eterm * tp,int arity)2118 info_1_tuple(Process* BIF_P, /* Pointer to current process. */
2119 Eterm* tp, /* Pointer to first element in tuple */
2120 int arity) /* Arity of tuple (untagged). */
2121 {
2122 Eterm ret;
2123 Eterm sel;
2124
2125 sel = *tp++;
2126
2127 if (sel == am_memory_internal) {
2128 switch (arity) {
2129 case 3:
2130 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 1))
2131 return am_true;
2132 default:
2133 goto badarg;
2134 }
2135 }
2136 else if (sel == am_allocator_sizes) {
2137 switch (arity) {
2138 case 2:
2139 ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
2140 return ret;
2141 case 3:
2142 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1, 0))
2143 return am_true;
2144 default:
2145 goto badarg;
2146 }
2147 }
2148 else if (sel == am_wordsize && arity == 2) {
2149 if (tp[0] == am_internal) {
2150 return make_small(sizeof(Eterm));
2151 }
2152 if (tp[0] == am_external) {
2153 return make_small(sizeof(UWord));
2154 }
2155 goto badarg;
2156 } else if (sel == am_allocator) {
2157 switch (arity) {
2158 case 2:
2159 ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
2160 return ret;
2161 case 3:
2162 if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0, 0))
2163 return am_true;
2164 default:
2165 goto badarg;
2166 }
2167 } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
2168 return erts_get_cpu_topology_term(BIF_P, *tp);
2169 } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
2170 Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
2171 if (res == THE_NON_VALUE)
2172 goto badarg;
2173 ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
2174 return ret;
2175 #if defined(PURIFY) || defined(VALGRIND)
2176 } else if (ERTS_IS_ATOM_STR("error_checker", sel)
2177 #if defined(PURIFY)
2178 || sel == am_purify
2179 #elif defined(VALGRIND)
2180 || ERTS_IS_ATOM_STR("valgrind", sel)
2181 #endif
2182 ) {
2183 if (*tp == am_memory) {
2184 #if defined(PURIFY)
2185 BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
2186 #elif defined(VALGRIND)
2187 # ifdef VALGRIND_DO_ADDED_LEAK_CHECK
2188 VALGRIND_DO_ADDED_LEAK_CHECK;
2189 # else
2190 VALGRIND_DO_LEAK_CHECK;
2191 # endif
2192 BIF_RET(make_small(0));
2193 #endif
2194 } else if (*tp == am_fd) {
2195 #if defined(PURIFY)
2196 BIF_RET(erts_make_integer(purify_new_fds_inuse(), BIF_P));
2197 #elif defined(VALGRIND)
2198 /* Not present in valgrind... */
2199 BIF_RET(make_small(0));
2200 #endif
2201 } else if (*tp == am_running) {
2202 #if defined(PURIFY)
2203 BIF_RET(purify_is_running() ? am_true : am_false);
2204 #elif defined(VALGRIND)
2205 BIF_RET(RUNNING_ON_VALGRIND ? am_true : am_false);
2206 #endif
2207 } else if (is_list(*tp)) {
2208 #if defined(PURIFY)
2209 # define ERTS_ERROR_CHECKER_PRINTF purify_printf
2210 #elif defined(VALGRIND)
2211 # define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
2212 #endif
2213 ErlDrvSizeT buf_size = 8*1024; /* Try with 8KB first */
2214 char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
2215 ErlDrvSizeT r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
2216 if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
2217 erts_free(ERTS_ALC_T_TMP, (void *) buf);
2218 if (erts_iolist_size(*tp, &buf_size)) {
2219 goto badarg;
2220 }
2221 buf_size++;
2222 buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
2223 r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
2224 ASSERT(r == buf_size - 1);
2225 }
2226 buf[buf_size - 1 - r] = '\0';
2227 ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
2228 erts_free(ERTS_ALC_T_TMP, (void *) buf);
2229 BIF_RET(am_true);
2230 #undef ERTS_ERROR_CHECKER_PRINTF
2231 }
2232 #endif
2233 #ifdef QUANTIFY
2234 } else if (sel == am_quantify) {
2235 if (*tp == am_clear) {
2236 quantify_clear_data();
2237 BIF_RET(am_true);
2238 } else if (*tp == am_start) {
2239 quantify_start_recording_data();
2240 BIF_RET(am_true);
2241 } else if (*tp == am_stop) {
2242 quantify_stop_recording_data();
2243 BIF_RET(am_true);
2244 } else if (*tp == am_running) {
2245 BIF_RET(quantify_is_running() ? am_true : am_false);
2246 }
2247 #endif
2248 #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
2249 } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
2250 unsigned long long tmp;
2251 int fd;
2252 int rc;
2253
2254 if (arity != 2 || !is_small(*tp)) {
2255 goto badarg;
2256 }
2257 tmp = signed_val(*tp);
2258 if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
2259 BIF_RET(am_false);
2260 }
2261 rc = ioctl(fd, PERFMON_SETPCR, &tmp);
2262 close(fd);
2263 if (rc < 0) {
2264 BIF_RET(am_false);
2265 }
2266 BIF_RET(am_true);
2267 #endif
2268 }
2269
2270 badarg:
2271 ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
2272
2273 return ret;
2274 }
2275
2276 #define INFO_DSBUF_INC_SZ 256
2277
2278 static erts_dsprintf_buf_t *
grow_info_dsbuf(erts_dsprintf_buf_t * dsbufp,size_t need)2279 grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
2280 {
2281 size_t size;
2282 size_t free_size = dsbufp->size - dsbufp->str_len;
2283
2284 ASSERT(dsbufp);
2285
2286 if (need <= free_size)
2287 return dsbufp;
2288 size = need - free_size + INFO_DSBUF_INC_SZ;
2289 size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
2290 size += dsbufp->size;
2291 ASSERT(dsbufp->str_len + need <= size);
2292 dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
2293 (void *) dsbufp->str,
2294 size);
2295 dsbufp->size = size;
2296 return dsbufp;
2297 }
2298
2299 static erts_dsprintf_buf_t *
erts_create_info_dsbuf(Uint size)2300 erts_create_info_dsbuf(Uint size)
2301 {
2302 Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
2303 erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
2304 erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
2305 sizeof(erts_dsprintf_buf_t));
2306 sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
2307 dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
2308 dsbufp->str[0] = '\0';
2309 dsbufp->size = init_size;
2310 return dsbufp;
2311 }
2312
2313 static void
erts_destroy_info_dsbuf(erts_dsprintf_buf_t * dsbufp)2314 erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
2315 {
2316 if (dsbufp->str)
2317 erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
2318 erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
2319 }
2320
2321 static Eterm
c_compiler_used(Eterm ** hpp,Uint * szp)2322 c_compiler_used(Eterm **hpp, Uint *szp)
2323 {
2324
2325 #if defined(__GNUC__)
2326 # if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
2327 # define ERTS_GNUC_VSN_NUMS 3
2328 # elif defined(__GNUC_MINOR__)
2329 # define ERTS_GNUC_VSN_NUMS 2
2330 # else
2331 # define ERTS_GNUC_VSN_NUMS 1
2332 # endif
2333 return erts_bld_tuple(hpp,
2334 szp,
2335 2,
2336 erts_bld_atom(hpp, szp, "gnuc"),
2337 #if ERTS_GNUC_VSN_NUMS > 1
2338 erts_bld_tuple(hpp,
2339 szp,
2340 ERTS_GNUC_VSN_NUMS,
2341 #endif
2342 erts_bld_uint(hpp, szp,
2343 (Uint) __GNUC__)
2344 #ifdef __GNUC_MINOR__
2345 ,
2346 erts_bld_uint(hpp, szp,
2347 (Uint) __GNUC_MINOR__)
2348 #ifdef __GNUC_PATCHLEVEL__
2349 ,
2350 erts_bld_uint(hpp, szp,
2351 (Uint) __GNUC_PATCHLEVEL__)
2352 #endif
2353 #endif
2354 #if ERTS_GNUC_VSN_NUMS > 1
2355 )
2356 #endif
2357 );
2358
2359 #elif defined(_MSC_VER)
2360 return erts_bld_tuple(hpp,
2361 szp,
2362 2,
2363 erts_bld_atom(hpp, szp, "msc"),
2364 erts_bld_uint(hpp, szp, (Uint) _MSC_VER));
2365
2366 #else
2367 return erts_bld_tuple(hpp,
2368 szp,
2369 2,
2370 am_undefined,
2371 am_undefined);
2372 #endif
2373
2374 }
2375
is_snif_term(Eterm module_atom)2376 static int is_snif_term(Eterm module_atom) {
2377 int i;
2378 Atom *a = atom_tab(atom_val(module_atom));
2379 char *aname = (char *) a->name;
2380
2381 /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
2382 for (i = 0; i < a->len; i++) {
2383 if (aname[i] == '.')
2384 return 0;
2385 }
2386
2387 return 1;
2388 }
2389
build_snif_term(Eterm ** hpp,Uint * szp,int ix,Eterm res)2390 static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
2391 Eterm tup;
2392 tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
2393 res = erts_bld_cons( hpp, szp, tup, res);
2394 return res;
2395 }
2396
build_snifs_term(Eterm ** hpp,Uint * szp,Eterm res)2397 static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
2398 int i;
2399 for (i = 0; i < BIF_SIZE; i++) {
2400 if (is_snif_term(bif_table[i].module)) {
2401 res = build_snif_term(hpp, szp, i, res);
2402 }
2403 }
2404 return res;
2405 }
2406
system_info_1(BIF_ALIST_1)2407 BIF_RETTYPE system_info_1(BIF_ALIST_1)
2408 {
2409 Eterm res;
2410 Eterm* hp;
2411 Eterm val;
2412 int i;
2413
2414 if (is_tuple(BIF_ARG_1)) {
2415 Eterm* tp = tuple_val(BIF_ARG_1);
2416 Uint arity = *tp++;
2417 return info_1_tuple(BIF_P, tp, arityval(arity));
2418 } else if (BIF_ARG_1 == am_scheduler_id) {
2419 ErtsSchedulerData *esdp = erts_proc_sched_data(BIF_P);
2420 BIF_RET(make_small(esdp->no));
2421 } else if (BIF_ARG_1 == am_compat_rel) {
2422 ASSERT(erts_compat_rel > 0);
2423 BIF_RET(make_small(erts_compat_rel));
2424 } else if (BIF_ARG_1 == am_multi_scheduling) {
2425 {
2426 int msb = erts_is_multi_scheduling_blocked();
2427 BIF_RET(!msb
2428 ? am_enabled
2429 : (msb > 0
2430 ? am_blocked
2431 : am_blocked_normal));
2432 }
2433 } else if (BIF_ARG_1 == am_build_type) {
2434 #if defined(DEBUG)
2435 ERTS_DECL_AM(debug);
2436 BIF_RET(AM_debug);
2437 #elif defined(PURIFY)
2438 ERTS_DECL_AM(purify);
2439 BIF_RET(AM_purify);
2440 #elif defined(QUANTIFY)
2441 ERTS_DECL_AM(quantify);
2442 BIF_RET(AM_quantify);
2443 #elif defined(PURECOV)
2444 ERTS_DECL_AM(purecov);
2445 BIF_RET(AM_purecov);
2446 #elif defined(ERTS_GCOV)
2447 ERTS_DECL_AM(gcov);
2448 BIF_RET(AM_gcov);
2449 #elif defined(VALGRIND)
2450 ERTS_DECL_AM(valgrind);
2451 BIF_RET(AM_valgrind);
2452 #elif defined(GPROF)
2453 ERTS_DECL_AM(gprof);
2454 BIF_RET(AM_gprof);
2455 #elif defined(ERTS_ENABLE_LOCK_COUNT)
2456 ERTS_DECL_AM(lcnt);
2457 BIF_RET(AM_lcnt);
2458 #elif defined(ERTS_FRMPTR)
2459 ERTS_DECL_AM(frmptr);
2460 BIF_RET(AM_frmptr);
2461 #else
2462 BIF_RET(am_opt);
2463 #endif
2464 BIF_RET(res);
2465 } else if (BIF_ARG_1 == am_time_offset) {
2466 switch (erts_time_offset_state()) {
2467 case ERTS_TIME_OFFSET_PRELIMINARY: {
2468 ERTS_DECL_AM(preliminary);
2469 BIF_RET(AM_preliminary);
2470 }
2471 case ERTS_TIME_OFFSET_FINAL: {
2472 ERTS_DECL_AM(final);
2473 BIF_RET(AM_final);
2474 }
2475 case ERTS_TIME_OFFSET_VOLATILE: {
2476 ERTS_DECL_AM(volatile);
2477 BIF_RET(AM_volatile);
2478 }
2479 default:
2480 ERTS_INTERNAL_ERROR("Invalid time offset state");
2481 }
2482 } else if (ERTS_IS_ATOM_STR("os_monotonic_time_source", BIF_ARG_1)) {
2483 BIF_RET(erts_monotonic_time_source(BIF_P));
2484 } else if (ERTS_IS_ATOM_STR("os_system_time_source", BIF_ARG_1)) {
2485 BIF_RET(erts_system_time_source(BIF_P));
2486 } else if (ERTS_IS_ATOM_STR("time_correction", BIF_ARG_1)) {
2487 BIF_RET(erts_has_time_correction() ? am_true : am_false);
2488 } else if (ERTS_IS_ATOM_STR("start_time", BIF_ARG_1)) {
2489 BIF_RET(erts_get_monotonic_start_time(BIF_P));
2490 } else if (ERTS_IS_ATOM_STR("end_time", BIF_ARG_1)) {
2491 BIF_RET(erts_get_monotonic_end_time(BIF_P));
2492 } else if (ERTS_IS_ATOM_STR("time_warp_mode", BIF_ARG_1)) {
2493 switch (erts_time_warp_mode()) {
2494 case ERTS_NO_TIME_WARP_MODE: {
2495 ERTS_DECL_AM(no_time_warp);
2496 BIF_RET(AM_no_time_warp);
2497 }
2498 case ERTS_SINGLE_TIME_WARP_MODE: {
2499 ERTS_DECL_AM(single_time_warp);
2500 BIF_RET(AM_single_time_warp);
2501 }
2502 case ERTS_MULTI_TIME_WARP_MODE: {
2503 ERTS_DECL_AM(multi_time_warp);
2504 BIF_RET(AM_multi_time_warp);
2505 }
2506 default:
2507 ERTS_INTERNAL_ERROR("Invalid time warp mode");
2508 }
2509 } else if (BIF_ARG_1 == am_allocated_areas) {
2510 res = erts_allocated_areas(NULL, NULL, BIF_P);
2511 BIF_RET(res);
2512 } else if (BIF_ARG_1 == am_hipe_architecture) {
2513 #if defined(HIPE)
2514 BIF_RET(hipe_arch_name);
2515 #else
2516 BIF_RET(am_undefined);
2517 #endif
2518 } else if (BIF_ARG_1 == am_trace_control_word) {
2519 BIF_RET(db_get_trace_control_word(BIF_P));
2520 } else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
2521 BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
2522 } else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
2523 BIF_RET((erts_ets_always_compress) ? am_true : am_false);
2524 } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
2525 Uint size = 0;
2526 Uint *szp;
2527
2528 szp = &size;
2529 build_snifs_term(NULL, szp, NIL);
2530 hp = HAlloc(BIF_P, size);
2531 res = build_snifs_term(&hp, NULL, NIL);
2532 BIF_RET(res);
2533 } else if (BIF_ARG_1 == am_sequential_tracer) {
2534 ErtsTracer seq_tracer = erts_get_system_seq_tracer();
2535 val = erts_tracer_to_term(BIF_P, seq_tracer);
2536 hp = HAlloc(BIF_P, 3);
2537 res = TUPLE2(hp, am_sequential_tracer, val);
2538 BIF_RET(res);
2539 } else if (BIF_ARG_1 == am_garbage_collection){
2540 Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
2541 Eterm tup;
2542 hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2);
2543
2544 tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3;
2545 res = CONS(hp, tup, NIL); hp += 2;
2546
2547 tup = TUPLE2(hp, am_min_heap_size, make_small(H_MIN_SIZE)); hp += 3;
2548 res = CONS(hp, tup, res); hp += 2;
2549
2550 tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3;
2551 res = CONS(hp, tup, res); hp += 2;
2552
2553 tup = TUPLE2(hp, am_max_heap_size, make_small(H_MAX_SIZE)); hp += 3;
2554 res = CONS(hp, tup, res); hp += 2;
2555
2556 BIF_RET(res);
2557 } else if (BIF_ARG_1 == am_fullsweep_after){
2558 Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
2559 hp = HAlloc(BIF_P, 3);
2560 res = TUPLE2(hp, am_fullsweep_after, make_small(val));
2561 BIF_RET(res);
2562 } else if (BIF_ARG_1 == am_min_heap_size) {
2563 hp = HAlloc(BIF_P, 3);
2564 res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE));
2565 BIF_RET(res);
2566 } else if (BIF_ARG_1 == am_max_heap_size) {
2567 Uint sz = 0;
2568 erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
2569 hp = HAlloc(BIF_P, sz);
2570 res = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
2571 BIF_RET(res);
2572 } else if (BIF_ARG_1 == am_min_bin_vheap_size) {
2573 hp = HAlloc(BIF_P, 3);
2574 res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
2575 BIF_RET(res);
2576 } else if (BIF_ARG_1 == am_process_count) {
2577 BIF_RET(make_small(erts_ptab_count(&erts_proc)));
2578 } else if (BIF_ARG_1 == am_process_limit) {
2579 BIF_RET(make_small(erts_ptab_max(&erts_proc)));
2580 } else if (BIF_ARG_1 == am_port_count) {
2581 BIF_RET(make_small(erts_ptab_count(&erts_port)));
2582 } else if (BIF_ARG_1 == am_port_limit) {
2583 BIF_RET(make_small(erts_ptab_max(&erts_port)));
2584 } else if (BIF_ARG_1 == am_info
2585 || BIF_ARG_1 == am_procs
2586 || BIF_ARG_1 == am_loaded
2587 || BIF_ARG_1 == am_dist) {
2588 erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
2589
2590 /* Need to be the only thread running... */
2591 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
2592 erts_thr_progress_block();
2593
2594 if (BIF_ARG_1 == am_info)
2595 info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2596 else if (BIF_ARG_1 == am_procs)
2597 process_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2598 else if (BIF_ARG_1 == am_loaded)
2599 loaded(ERTS_PRINT_DSBUF, (void *) dsbufp);
2600 else
2601 distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
2602
2603 erts_thr_progress_unblock();
2604 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
2605
2606 ASSERT(dsbufp && dsbufp->str);
2607 res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
2608 erts_destroy_info_dsbuf(dsbufp);
2609 BIF_RET(res);
2610 } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
2611 DistEntry *dep;
2612 i = 0;
2613 erts_rwmtx_rlock(&erts_dist_table_rwmtx);
2614 for (dep = erts_visible_dist_entries; dep; dep = dep->next)
2615 ++i;
2616 for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
2617 ++i;
2618 hp = HAlloc(BIF_P,i*(3+2));
2619 res = NIL;
2620 for (dep = erts_hidden_dist_entries; dep; dep = dep->next) {
2621 Eterm tpl;
2622 ASSERT(is_immed(dep->cid));
2623 tpl = TUPLE2(hp, dep->sysname, dep->cid);
2624 hp +=3;
2625 res = CONS(hp, tpl, res);
2626 hp += 2;
2627 }
2628 for (dep = erts_visible_dist_entries; dep; dep = dep->next) {
2629 Eterm tpl;
2630 ASSERT(is_immed(dep->cid));
2631 tpl = TUPLE2(hp, dep->sysname, dep->cid);
2632 hp +=3;
2633 res = CONS(hp, tpl, res);
2634 hp += 2;
2635 }
2636 erts_rwmtx_runlock(&erts_dist_table_rwmtx);
2637 BIF_RET(res);
2638 } else if (BIF_ARG_1 == am_system_version) {
2639 erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
2640 erts_print_system_version(ERTS_PRINT_DSBUF, (void *) dsbufp, BIF_P);
2641 hp = HAlloc(BIF_P, dsbufp->str_len*2);
2642 res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
2643 erts_destroy_tmp_dsbuf(dsbufp);
2644 BIF_RET(res);
2645 } else if (BIF_ARG_1 == am_system_architecture) {
2646 hp = HAlloc(BIF_P, 2*(sizeof(ERLANG_ARCHITECTURE)-1));
2647 BIF_RET(buf_to_intlist(&hp,
2648 ERLANG_ARCHITECTURE,
2649 sizeof(ERLANG_ARCHITECTURE)-1,
2650 NIL));
2651 }
2652 else if (BIF_ARG_1 == am_os_type) {
2653 BIF_RET(os_type_tuple);
2654 }
2655 else if (BIF_ARG_1 == am_allocator) {
2656 BIF_RET(erts_allocator_options((void *) BIF_P));
2657 }
2658 else if (BIF_ARG_1 == am_thread_pool_size) {
2659 extern int erts_async_max_threads;
2660 int n;
2661
2662 n = erts_async_max_threads;
2663 BIF_RET(make_small(n));
2664 }
2665 else if (BIF_ARG_1 == am_alloc_util_allocators) {
2666 BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
2667 }
2668 else if (BIF_ARG_1 == am_elib_malloc) {
2669 /* To be removed in R15 */
2670 BIF_RET(am_false);
2671 }
2672 else if (BIF_ARG_1 == am_os_version) {
2673 BIF_RET(os_version_tuple);
2674 }
2675 else if (BIF_ARG_1 == am_version) {
2676 int n = sys_strlen(ERLANG_VERSION);
2677 hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
2678 BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
2679 }
2680 else if (BIF_ARG_1 == am_machine) {
2681 int n = sys_strlen(EMULATOR);
2682 hp = HAlloc(BIF_P, n*2);
2683 BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
2684 }
2685 else if (BIF_ARG_1 == am_garbage_collection) {
2686 BIF_RET(am_generational);
2687 #ifdef ERTS_OPCODE_COUNTER_SUPPORT
2688 } else if (BIF_ARG_1 == am_instruction_counts) {
2689 #ifdef DEBUG
2690 Eterm *endp;
2691 #endif
2692 Eterm *hp, **hpp;
2693 Uint hsz, *hszp;
2694 int i;
2695
2696 hpp = NULL;
2697 hsz = 0;
2698 hszp = &hsz;
2699
2700 bld_instruction_counts:
2701
2702 res = NIL;
2703 for (i = num_instructions-1; i >= 0; i--) {
2704 res = erts_bld_cons(hpp, hszp,
2705 erts_bld_tuple(hpp, hszp, 2,
2706 erts_atom_put((byte *)opc[i].name,
2707 sys_strlen(opc[i].name),
2708 ERTS_ATOM_ENC_LATIN1,
2709 1),
2710 erts_bld_uint(hpp, hszp,
2711 erts_instr_count[i])),
2712 res);
2713 }
2714
2715 if (!hpp) {
2716 hp = HAlloc(BIF_P, hsz);
2717 hpp = &hp;
2718 #ifdef DEBUG
2719 endp = hp + hsz;
2720 #endif
2721 hszp = NULL;
2722 goto bld_instruction_counts;
2723 }
2724
2725 #ifdef DEBUG
2726 ASSERT(endp == hp);
2727 #endif
2728
2729 BIF_RET(res);
2730 #endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
2731 } else if (BIF_ARG_1 == am_wordsize) {
2732 return make_small(sizeof(Eterm));
2733 } else if (BIF_ARG_1 == am_endian) {
2734 #if defined(WORDS_BIGENDIAN)
2735 return am_big;
2736 #else
2737 return am_little;
2738 #endif
2739 } else if (BIF_ARG_1 == am_heap_sizes) {
2740 return erts_heap_sizes(BIF_P);
2741 } else if (BIF_ARG_1 == am_heap_type) {
2742 return am_private;
2743 } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
2744 res = erts_get_cpu_topology_term(BIF_P, am_used);
2745 BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
2746 } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
2747 if (erts_update_cpu_info()) {
2748 ERTS_DECL_AM(changed);
2749 BIF_RET(AM_changed);
2750 }
2751 else {
2752 ERTS_DECL_AM(unchanged);
2753 BIF_RET(AM_unchanged);
2754 }
2755 #if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
2756 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
2757 register unsigned high asm("%l0");
2758 register unsigned low asm("%l1");
2759
2760 hp = HAlloc(BIF_P, 5);
2761 asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
2762 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2763 : "=r" (high), "=r" (low));
2764 res = TUPLE4(hp, make_small(high >> 16),
2765 make_small(high & 0xFFFF),
2766 make_small(low >> 16),
2767 make_small(low & 0xFFFF));
2768 BIF_RET(res);
2769 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick2", BIF_ARG_1)) {
2770 register unsigned high asm("%l0");
2771 register unsigned low asm("%l1");
2772
2773 asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
2774 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2775 : "=r" (high), "=r" (low));
2776 hp = HAlloc(BIF_P, 5);
2777 res = TUPLE4(hp, make_small(high >> 16),
2778 make_small(high & 0xFFFF),
2779 make_small(low >> 16),
2780 make_small(low & 0xFFFF));
2781 BIF_RET(res);
2782 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic1", BIF_ARG_1)) {
2783 register unsigned high asm("%l0");
2784 register unsigned low asm("%l1");
2785
2786 hp = HAlloc(BIF_P, 5);
2787 asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
2788 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2789 : "=r" (high), "=r" (low));
2790 res = TUPLE4(hp, make_small(high >> 16),
2791 make_small(high & 0xFFFF),
2792 make_small(low >> 16),
2793 make_small(low & 0xFFFF));
2794 BIF_RET(res);
2795 } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic2", BIF_ARG_1)) {
2796 register unsigned high asm("%l0");
2797 register unsigned low asm("%l1");
2798
2799 asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
2800 ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
2801 : "=r" (high), "=r" (low));
2802 hp = HAlloc(BIF_P, 5);
2803 res = TUPLE4(hp, make_small(high >> 16),
2804 make_small(high & 0xFFFF),
2805 make_small(low >> 16),
2806 make_small(low & 0xFFFF));
2807 BIF_RET(res);
2808 #endif
2809 } else if (BIF_ARG_1 == am_threads) {
2810 return am_true;
2811 } else if (BIF_ARG_1 == am_creation) {
2812 return make_small(erts_this_node->creation);
2813 } else if (BIF_ARG_1 == am_break_ignored) {
2814 extern int ignore_break;
2815 if (ignore_break)
2816 return am_true;
2817 else
2818 return am_false;
2819 }
2820 /* Arguments that are unusual follow ... */
2821 else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
2822 int no;
2823 erts_get_logical_processors(&no, NULL, NULL);
2824 if (no > 0)
2825 BIF_RET(make_small((Uint) no));
2826 else {
2827 DECL_AM(unknown);
2828 BIF_RET(AM_unknown);
2829 }
2830 }
2831 else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
2832 int no;
2833 erts_get_logical_processors(NULL, &no, NULL);
2834 if (no > 0)
2835 BIF_RET(make_small((Uint) no));
2836 else {
2837 DECL_AM(unknown);
2838 BIF_RET(AM_unknown);
2839 }
2840 }
2841 else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
2842 int no;
2843 erts_get_logical_processors(NULL, NULL, &no);
2844 if (no > 0)
2845 BIF_RET(make_small((Uint) no));
2846 else {
2847 DECL_AM(unknown);
2848 BIF_RET(AM_unknown);
2849 }
2850 } else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) {
2851 int n = sizeof(ERLANG_OTP_RELEASE)-1;
2852 hp = HAlloc(BIF_P, 2*n);
2853 BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_RELEASE, n, NIL));
2854 } else if (ERTS_IS_ATOM_STR("driver_version", BIF_ARG_1)) {
2855 char buf[42];
2856 int n = erts_snprintf(buf, 42, "%d.%d",
2857 ERL_DRV_EXTENDED_MAJOR_VERSION,
2858 ERL_DRV_EXTENDED_MINOR_VERSION);
2859 hp = HAlloc(BIF_P, 2*n);
2860 BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
2861 } else if (ERTS_IS_ATOM_STR("nif_version", BIF_ARG_1)) {
2862 char buf[42];
2863 int n = erts_snprintf(buf, 42, "%d.%d",
2864 ERL_NIF_MAJOR_VERSION,
2865 ERL_NIF_MINOR_VERSION);
2866 hp = HAlloc(BIF_P, 2*n);
2867 BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
2868 } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
2869 BIF_RET(am_true);
2870 } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
2871 BIF_RET(erts_bound_schedulers_term(BIF_P));
2872 } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
2873 BIF_RET(erts_get_schedulers_binds(BIF_P));
2874 } else if (ERTS_IS_ATOM_STR("constant_pool_support", BIF_ARG_1)) {
2875 BIF_RET(am_true);
2876 } else if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)
2877 || ERTS_IS_ATOM_STR("schedulers_total", BIF_ARG_1)) {
2878 res = make_small(erts_no_schedulers);
2879 BIF_RET(res);
2880 } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
2881 Eterm *hp;
2882 Uint total, online, active;
2883 erts_schedulers_state(&total, &online, &active,
2884 NULL, NULL, NULL, NULL, NULL);
2885 hp = HAlloc(BIF_P, 4);
2886 res = TUPLE3(hp,
2887 make_small(total),
2888 make_small(online),
2889 make_small(active));
2890 BIF_RET(res);
2891 } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
2892 Eterm *hp;
2893 Uint total, online, active;
2894 erts_schedulers_state(&total, &online, &active,
2895 NULL, NULL, NULL, NULL, NULL);
2896 hp = HAlloc(BIF_P, 4);
2897 res = TUPLE3(hp,
2898 make_small(total),
2899 make_small(online),
2900 make_small(active));
2901 BIF_RET(res);
2902 } else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) {
2903 Eterm *hp, tpl;
2904 Uint sz, total, online, active,
2905 dirty_cpu_total, dirty_cpu_online, dirty_cpu_active,
2906 dirty_io_total, dirty_io_active;
2907 erts_schedulers_state(&total, &online, &active,
2908 &dirty_cpu_total, &dirty_cpu_online, &dirty_cpu_active,
2909 &dirty_io_total, &dirty_io_active);
2910
2911 sz = 2+5;
2912 if (dirty_cpu_total)
2913 sz += 2+5;
2914 if (dirty_io_total)
2915 sz += 2+5;
2916
2917 hp = HAlloc(BIF_P, sz);
2918
2919 res = NIL;
2920 if (dirty_io_total) {
2921 tpl = TUPLE4(hp,
2922 am_dirty_io,
2923 make_small(dirty_io_total),
2924 make_small(dirty_io_total),
2925 make_small(dirty_io_active));
2926 hp += 5;
2927 res = CONS(hp, tpl, res);
2928 hp += 2;
2929 }
2930 if (dirty_cpu_total) {
2931 tpl = TUPLE4(hp,
2932 am_dirty_cpu,
2933 make_small(dirty_cpu_total),
2934 make_small(dirty_cpu_online),
2935 make_small(dirty_cpu_active));
2936 hp += 5;
2937 res = CONS(hp, tpl, res);
2938 hp += 2;
2939 }
2940 tpl = TUPLE4(hp,
2941 am_normal,
2942 make_small(total),
2943 make_small(online),
2944 make_small(active));
2945 hp += 5;
2946 res = CONS(hp, tpl, res);
2947 BIF_RET(res);
2948 } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
2949 Uint online;
2950 erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL);
2951 BIF_RET(make_small(online));
2952 } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
2953 Uint active;
2954 erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
2955 BIF_RET(make_small(active));
2956 } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
2957 Uint dirty_cpu;
2958 erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
2959 BIF_RET(make_small(dirty_cpu));
2960 } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
2961 Uint dirty_cpu_onln;
2962 erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL);
2963 BIF_RET(make_small(dirty_cpu_onln));
2964 } else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
2965 Uint dirty_io;
2966 erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
2967 BIF_RET(make_small(dirty_io));
2968 } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
2969 res = make_small(erts_no_run_queues);
2970 BIF_RET(res);
2971 } else if (ERTS_IS_ATOM_STR("port_parallelism", BIF_ARG_1)) {
2972 res = erts_port_parallelism ? am_true : am_false;
2973 BIF_RET(res);
2974 } else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
2975 Eterm *hp = NULL;
2976 Uint sz = 0;
2977 (void) c_compiler_used(NULL, &sz);
2978 if (sz)
2979 hp = HAlloc(BIF_P, sz);
2980 BIF_RET(c_compiler_used(&hp, NULL));
2981 } else if (ERTS_IS_ATOM_STR("stop_memory_trace", BIF_ARG_1)) {
2982 erts_mtrace_stop();
2983 BIF_RET(am_true);
2984 } else if (ERTS_IS_ATOM_STR("context_reductions", BIF_ARG_1)) {
2985 BIF_RET(make_small(CONTEXT_REDS));
2986 } else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
2987 #if ERTS_ENABLE_KERNEL_POLL
2988 BIF_RET(am_true);
2989 #else
2990 BIF_RET(am_false);
2991 #endif
2992 } else if (ERTS_IS_ATOM_STR("lock_checking", BIF_ARG_1)) {
2993 #ifdef ERTS_ENABLE_LOCK_CHECK
2994 BIF_RET(am_true);
2995 #else
2996 BIF_RET(am_false);
2997 #endif
2998 } else if (ERTS_IS_ATOM_STR("lock_counting", BIF_ARG_1)) {
2999 #ifdef ERTS_ENABLE_LOCK_COUNT
3000 BIF_RET(am_true);
3001 #else
3002 BIF_RET(am_false);
3003 #endif
3004 } else if (ERTS_IS_ATOM_STR("debug_compiled", BIF_ARG_1)) {
3005 #ifdef DEBUG
3006 BIF_RET(am_true);
3007 #else
3008 BIF_RET(am_false);
3009 #endif
3010 } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
3011 BIF_RET(erts_check_io_info(BIF_P));
3012 } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
3013 if (erts_no_schedulers == 1)
3014 BIF_RET(NIL);
3015 else
3016 BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0));
3017 } else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) {
3018 if (erts_no_schedulers == 1)
3019 BIF_RET(NIL);
3020 else
3021 BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1));
3022 } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
3023 BIF_RET(ERTS_USE_MODIFIED_TIMING()
3024 ? make_small(erts_modified_timing_level)
3025 : am_undefined);
3026 } else if (ERTS_IS_ATOM_STR("port_tasks", BIF_ARG_1)) {
3027 BIF_RET(am_true);
3028 } else if (ERTS_IS_ATOM_STR("io_thread", BIF_ARG_1)) {
3029 BIF_RET(am_false);
3030 } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
3031 BIF_RET(erts_sched_stat_term(BIF_P, 0));
3032 } else if (ERTS_IS_ATOM_STR("total_scheduling_statistics", BIF_ARG_1)) {
3033 BIF_RET(erts_sched_stat_term(BIF_P, 1));
3034 } else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
3035 BIF_RET(erts_nif_taints(BIF_P));
3036 } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
3037 BIF_RET(erts_get_reader_groups_map(BIF_P));
3038 } else if (ERTS_IS_ATOM_STR("dist_buf_busy_limit", BIF_ARG_1)) {
3039 Uint hsz = 0;
3040
3041 (void) erts_bld_uint(NULL, &hsz, erts_dist_buf_busy_limit);
3042 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3043 res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
3044 BIF_RET(res);
3045 } else if (ERTS_IS_ATOM_STR("delayed_node_table_gc", BIF_ARG_1)) {
3046 Uint hsz = 0;
3047 Uint dntgc = erts_delayed_node_table_gc();
3048 if (dntgc == ERTS_NODE_TAB_DELAY_GC_INFINITY)
3049 BIF_RET(am_infinity);
3050 (void) erts_bld_uint(NULL, &hsz, dntgc);
3051 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3052 res = erts_bld_uint(&hp, NULL, dntgc);
3053 BIF_RET(res);
3054 } else if (ERTS_IS_ATOM_STR("ethread_info", BIF_ARG_1)) {
3055 BIF_RET(erts_get_ethread_info(BIF_P));
3056 }
3057 else if (ERTS_IS_ATOM_STR("ethread_used_tse", BIF_ARG_1)) {
3058 Uint64 no = (Uint64) ethr_no_used_tse();
3059 Uint hsz = 0;
3060 erts_bld_uint64(NULL, &hsz, no);
3061 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
3062 res = erts_bld_uint64(&hp, NULL, no);
3063 BIF_RET(res);
3064 }
3065 else if (ERTS_IS_ATOM_STR("emu_args", BIF_ARG_1)) {
3066 BIF_RET(erts_get_emu_args(BIF_P));
3067 }
3068 else if (ERTS_IS_ATOM_STR("beam_jump_table", BIF_ARG_1)) {
3069 BIF_RET(erts_beam_jump_table() ? am_true : am_false);
3070 }
3071 else if (ERTS_IS_ATOM_STR("dynamic_trace", BIF_ARG_1)) {
3072 #if defined(USE_DTRACE)
3073 DECL_AM(dtrace);
3074 BIF_RET(AM_dtrace);
3075 #elif defined(USE_SYSTEMTAP)
3076 DECL_AM(systemtap);
3077 BIF_RET(AM_systemtap);
3078 #elif defined(USE_LTTNG)
3079 DECL_AM(lttng);
3080 BIF_RET(AM_lttng);
3081 #else
3082 BIF_RET(am_none);
3083 #endif
3084 }
3085 else if (ERTS_IS_ATOM_STR("dynamic_trace_probes", BIF_ARG_1)) {
3086 #if defined(USE_VM_PROBES)
3087 BIF_RET(am_true);
3088 #else
3089 BIF_RET(am_false);
3090 #endif
3091 }
3092 else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
3093 erts_thr_progress_dbg_print_state();
3094 BIF_RET(am_true);
3095 }
3096 else if (BIF_ARG_1 == am_message_queue_data) {
3097 switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) {
3098 case SPO_OFF_HEAP_MSGQ:
3099 BIF_RET(am_off_heap);
3100 case SPO_ON_HEAP_MSGQ:
3101 BIF_RET(am_on_heap);
3102 default:
3103 ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
3104 BIF_RET(am_error);
3105 }
3106 }
3107 else if (ERTS_IS_ATOM_STR("compile_info",BIF_ARG_1)) {
3108 Uint sz;
3109 Eterm res = NIL, tup, text;
3110 Eterm *hp = HAlloc(BIF_P, 3*(2 + 3) + /* three 2-tuples and three cons */
3111 2*(sys_strlen(erts_build_flags_CONFIG_H) +
3112 sys_strlen(erts_build_flags_CFLAGS) +
3113 sys_strlen(erts_build_flags_LDFLAGS)));
3114
3115 sz = sys_strlen(erts_build_flags_CONFIG_H);
3116 text = buf_to_intlist(&hp, erts_build_flags_CONFIG_H, sz, NIL);
3117 tup = TUPLE2(hp, am_config_h, text); hp += 3;
3118 res = CONS(hp, tup, res); hp += 2;
3119
3120 sz = sys_strlen(erts_build_flags_CFLAGS);
3121 text = buf_to_intlist(&hp, erts_build_flags_CFLAGS, sz, NIL);
3122 tup = TUPLE2(hp, am_cflags, text); hp += 3;
3123 res = CONS(hp, tup, res); hp += 2;
3124
3125 sz = sys_strlen(erts_build_flags_LDFLAGS);
3126 text = buf_to_intlist(&hp, erts_build_flags_LDFLAGS, sz, NIL);
3127 tup = TUPLE2(hp, am_ldflags, text); hp += 3;
3128 res = CONS(hp, tup, res); hp += 2;
3129
3130 BIF_RET(res);
3131 }
3132 else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
3133 BIF_RET(make_small(erts_db_get_max_tabs()));
3134 }
3135 else if (ERTS_IS_ATOM_STR("ets_count",BIF_ARG_1)) {
3136 BIF_RET(make_small(erts_ets_table_count()));
3137 }
3138 else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
3139 BIF_RET(make_small(erts_get_atom_limit()));
3140 }
3141 else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
3142 BIF_RET(make_small(atom_table_size()));
3143 }
3144 else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
3145 if (erts_has_time_correction()
3146 && erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
3147 BIF_RET(am_enabled);
3148 }
3149 BIF_RET(am_disabled);
3150 }
3151 else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
3152 BIF_RET(am_true);
3153 }
3154 else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) {
3155 #ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
3156 #ifdef ARCH_64
3157 DECL_AM(range);
3158 BIF_RET(AM_range);
3159 #else /* ARCH_32 */
3160 DECL_AM(range_bitmask);
3161 BIF_RET(AM_range_bitmask);
3162 #endif /* ARCH_32 */
3163 #else /* ! ERTS_HAVE_IS_IN_LITERAL_RANGE */
3164 DECL_AM(tag);
3165 BIF_RET(AM_tag);
3166 #endif
3167 } else if (ERTS_IS_ATOM_STR("system_logger", BIF_ARG_1)) {
3168 BIF_RET(erts_get_system_logger());
3169 }
3170
3171 BIF_ERROR(BIF_P, BADARG);
3172 }
3173
monitor_size(ErtsMonitor * mon,void * vsz)3174 static void monitor_size(ErtsMonitor *mon, void *vsz)
3175 {
3176 *((Uint *) vsz) = erts_monitor_size(mon);
3177 }
3178
link_size(ErtsMonitor * lnk,void * vsz)3179 static void link_size(ErtsMonitor *lnk, void *vsz)
3180 {
3181 *((Uint *) vsz) = erts_link_size(lnk);
3182 }
3183
3184 /**********************************************************************/
3185 /* Return information on ports */
3186 /* Info:
3187 ** id Port index
3188 ** connected (Pid)
3189 ** links List of pids
3190 ** name String
3191 ** input Number of bytes input from port program
3192 ** output Number of bytes output to the port program
3193 ** os_pid The child's process ID
3194 */
3195
3196 Eterm
erts_bld_port_info(Eterm ** hpp,ErlOffHeap * ohp,Uint * szp,Port * prt,Eterm item)3197 erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
3198 Eterm item)
3199 {
3200 Eterm res = THE_NON_VALUE;
3201
3202 ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
3203
3204 if (item == am_id) {
3205 if (hpp)
3206 res = make_small(internal_port_index(prt->common.id));
3207 if (szp) {
3208 res = am_true;
3209 goto done;
3210 }
3211 }
3212 else if (item == am_links) {
3213 MonitorInfoCollection mic;
3214 int i;
3215 Eterm item;
3216
3217 INIT_MONITOR_INFOS(mic);
3218
3219 erts_link_tree_foreach(ERTS_P_LINKS(prt), collect_one_link, (void *) &mic);
3220
3221 if (szp)
3222 *szp += mic.sz;
3223
3224 if (hpp) {
3225 res = NIL;
3226 for (i = 0; i < mic.mi_i; i++) {
3227 item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
3228 res = CONS(*hpp, item, res);
3229 *hpp += 2;
3230 }
3231 }
3232
3233 DESTROY_MONITOR_INFOS(mic);
3234
3235 if (szp) {
3236 res = am_true;
3237 goto done;
3238 }
3239 }
3240 else if (item == am_monitors) {
3241 MonitorInfoCollection mic;
3242 int i;
3243
3244 INIT_MONITOR_INFOS(mic);
3245 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3246 collect_one_origin_monitor,
3247 (void *) &mic);
3248
3249 if (szp)
3250 *szp += mic.sz;
3251
3252 if (hpp) {
3253 res = NIL;
3254 for (i = 0; i < mic.mi_i; i++) {
3255 Eterm t;
3256
3257 ASSERT(mic.mi[i].type == ERTS_MON_TYPE_PORT);
3258 ASSERT(is_internal_pid(mic.mi[i].entity.term));
3259 t = TUPLE2(*hpp, am_process, mic.mi[i].entity.term);
3260 *hpp += 3;
3261 res = CONS(*hpp, t, res);
3262 *hpp += 2;
3263 }
3264 } // hpp
3265 DESTROY_MONITOR_INFOS(mic);
3266
3267 if (szp) {
3268 res = am_true;
3269 goto done;
3270 }
3271 }
3272 else if (item == am_monitored_by) {
3273 MonitorInfoCollection mic;
3274 int i;
3275 Eterm item;
3276
3277 INIT_MONITOR_INFOS(mic);
3278 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
3279 collect_one_target_monitor,
3280 (void *) &mic);
3281 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3282 collect_one_target_monitor,
3283 (void *) &mic);
3284 if (szp)
3285 *szp += mic.sz;
3286
3287 if (hpp) {
3288 res = NIL;
3289 for (i = 0; i < mic.mi_i; ++i) {
3290 ASSERT(mic.mi[i].type != ERTS_MON_TYPE_RESOURCE);
3291 item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
3292 res = CONS(*hpp, item, res);
3293 *hpp += 2;
3294 }
3295 } // hpp
3296 DESTROY_MONITOR_INFOS(mic);
3297
3298 if (szp) {
3299 res = am_true;
3300 goto done;
3301 }
3302 }
3303 else if (item == am_name) {
3304 int count = sys_strlen(prt->name);
3305
3306 if (hpp)
3307 res = buf_to_intlist(hpp, prt->name, count, NIL);
3308
3309 if (szp) {
3310 *szp += 2*count;
3311 res = am_true;
3312 goto done;
3313 }
3314 }
3315 else if (item == am_connected) {
3316 if (hpp)
3317 res = ERTS_PORT_GET_CONNECTED(prt); /* internal pid */
3318 if (szp) {
3319 res = am_true;
3320 goto done;
3321 }
3322 }
3323 else if (item == am_input) {
3324 res = erts_bld_uint(hpp, szp, prt->bytes_in);
3325 if (szp) {
3326 res = am_true;
3327 goto done;
3328 }
3329 }
3330 else if (item == am_output) {
3331 res = erts_bld_uint(hpp, szp, prt->bytes_out);
3332 if (szp) {
3333 res = am_true;
3334 goto done;
3335 }
3336 }
3337 else if (item == am_os_pid) {
3338 res = (prt->os_pid < 0
3339 ? am_undefined
3340 : erts_bld_uword(hpp, szp, (UWord) prt->os_pid));
3341 if (szp) {
3342 res = am_true;
3343 goto done;
3344 }
3345 }
3346 else if (item == am_registered_name) {
3347 RegProc *reg = prt->common.u.alive.reg;
3348 if (reg) {
3349 res = reg->name;
3350 if (szp) {
3351 res = am_true;
3352 goto done;
3353 }
3354 }
3355 else {
3356 if (szp)
3357 return am_undefined;
3358 return NIL;
3359 }
3360 }
3361 else if (item == am_memory) {
3362 /* All memory consumed in bytes (the Port struct should not be
3363 included though).
3364 */
3365 Uint size = 0;
3366
3367 erts_link_tree_foreach(ERTS_P_LINKS(prt),
3368 link_size, (void *) &size);
3369 erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
3370 monitor_size, (void *) &size);
3371 erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
3372 monitor_size, (void *) &size);
3373
3374 size += erts_port_data_size(prt);
3375
3376 if (prt->linebuf)
3377 size += sizeof(LineBuf) + prt->linebuf->ovsiz;
3378
3379 /* ... */
3380
3381
3382 /* All memory allocated by the driver should be included, but it is
3383 hard to retrieve... */
3384
3385 res = erts_bld_uint(hpp, szp, size);
3386 if (szp) {
3387 res = am_true;
3388 goto done;
3389 }
3390 }
3391 else if (item == am_queue_size) {
3392 Uint ioq_size = erts_port_ioq_size(prt);
3393 res = erts_bld_uint(hpp, szp, ioq_size);
3394 if (szp) {
3395 res = am_true;
3396 goto done;
3397 }
3398 }
3399 else if (ERTS_IS_ATOM_STR("locking", item)) {
3400 if (hpp) {
3401 if (erts_atomic32_read_nob(&prt->state)
3402 & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
3403 DECL_AM(port_level);
3404 ASSERT(prt->drv_ptr->flags
3405 & ERL_DRV_FLAG_USE_PORT_LOCKING);
3406 res = AM_port_level;
3407 }
3408 else {
3409 DECL_AM(driver_level);
3410 ASSERT(!(prt->drv_ptr->flags
3411 & ERL_DRV_FLAG_USE_PORT_LOCKING));
3412 res = AM_driver_level;
3413 }
3414 }
3415 if (szp) {
3416 res = am_true;
3417 goto done;
3418 }
3419 }
3420 else if (item == am_parallelism) {
3421 if (szp) {
3422 res = am_true;
3423 goto done;
3424 }
3425 res = ((ERTS_PTS_FLG_PARALLELISM &
3426 erts_atomic32_read_nob(&prt->sched.flags))
3427 ? am_true
3428 : am_false);
3429 }
3430 else {
3431 if (szp)
3432 return am_false;
3433 return THE_NON_VALUE;
3434 }
3435
3436 done:
3437 if (szp)
3438 *szp += 3;
3439 if (hpp) {
3440 res = TUPLE2(*hpp, item, res);
3441 *hpp += 3;
3442 }
3443
3444 return res;
3445 }
3446
3447 BIF_RETTYPE
fun_info_2(BIF_ALIST_2)3448 fun_info_2(BIF_ALIST_2)
3449 {
3450 Process* p = BIF_P;
3451 Eterm fun = BIF_ARG_1;
3452 Eterm what = BIF_ARG_2;
3453 Eterm* hp;
3454 Eterm val;
3455
3456 if (is_fun(fun)) {
3457 ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
3458
3459 switch (what) {
3460 case am_type:
3461 hp = HAlloc(p, 3);
3462 val = am_local;
3463 break;
3464 case am_pid:
3465 hp = HAlloc(p, 3);
3466 val = funp->creator;
3467 break;
3468 case am_module:
3469 hp = HAlloc(p, 3);
3470 val = funp->fe->module;
3471 break;
3472 case am_new_index:
3473 hp = HAlloc(p, 3);
3474 val = make_small(funp->fe->index);
3475 break;
3476 case am_new_uniq:
3477 val = new_binary(p, funp->fe->uniq, 16);
3478 hp = HAlloc(p, 3);
3479 break;
3480 case am_index:
3481 hp = HAlloc(p, 3);
3482 val = make_small(funp->fe->old_index);
3483 break;
3484 case am_uniq:
3485 hp = HAlloc(p, 3);
3486 val = make_small(funp->fe->old_uniq);
3487 break;
3488 case am_env:
3489 {
3490 Uint num_free = funp->num_free;
3491 int i;
3492
3493 hp = HAlloc(p, 3 + 2*num_free);
3494 val = NIL;
3495 for (i = num_free-1; i >= 0; i--) {
3496 val = CONS(hp, funp->env[i], val);
3497 hp += 2;
3498 }
3499 }
3500 break;
3501 case am_refc:
3502 val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p);
3503 hp = HAlloc(p, 3);
3504 break;
3505 case am_arity:
3506 hp = HAlloc(p, 3);
3507 val = make_small(funp->arity);
3508 break;
3509 case am_name:
3510 hp = HAlloc(p, 3);
3511 val = funp->fe->address[-2];
3512 break;
3513 default:
3514 goto error;
3515 }
3516 } else if (is_export(fun)) {
3517 Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
3518 switch (what) {
3519 case am_type:
3520 hp = HAlloc(p, 3);
3521 val = am_external;
3522 break;
3523 case am_pid:
3524 hp = HAlloc(p, 3);
3525 val = am_undefined;
3526 break;
3527 case am_module:
3528 hp = HAlloc(p, 3);
3529 val = exp->info.mfa.module;
3530 break;
3531 case am_new_index:
3532 hp = HAlloc(p, 3);
3533 val = am_undefined;
3534 break;
3535 case am_new_uniq:
3536 hp = HAlloc(p, 3);
3537 val = am_undefined;
3538 break;
3539 case am_index:
3540 hp = HAlloc(p, 3);
3541 val = am_undefined;
3542 break;
3543 case am_uniq:
3544 hp = HAlloc(p, 3);
3545 val = am_undefined;
3546 break;
3547 case am_env:
3548 hp = HAlloc(p, 3);
3549 val = NIL;
3550 break;
3551 case am_refc:
3552 hp = HAlloc(p, 3);
3553 val = am_undefined;
3554 break;
3555 case am_arity:
3556 hp = HAlloc(p, 3);
3557 val = make_small(exp->info.mfa.arity);
3558 break;
3559 case am_name:
3560 hp = HAlloc(p, 3);
3561 val = exp->info.mfa.function;
3562 break;
3563 default:
3564 goto error;
3565 }
3566 } else {
3567 error:
3568 BIF_ERROR(p, BADARG);
3569 }
3570 return TUPLE2(hp, what, val);
3571 }
3572
3573 BIF_RETTYPE
fun_info_mfa_1(BIF_ALIST_1)3574 fun_info_mfa_1(BIF_ALIST_1)
3575 {
3576 Process* p = BIF_P;
3577 Eterm fun = BIF_ARG_1;
3578 Eterm* hp;
3579
3580 if (is_fun(fun)) {
3581 ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
3582 hp = HAlloc(p, 4);
3583 BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
3584 } else if (is_export(fun)) {
3585 Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
3586 hp = HAlloc(p, 4);
3587 BIF_RET(TUPLE3(hp,exp->info.mfa.module,
3588 exp->info.mfa.function,
3589 make_small(exp->info.mfa.arity)));
3590 }
3591 BIF_ERROR(p, BADARG);
3592 }
3593
erts_internal_is_process_alive_2(BIF_ALIST_2)3594 BIF_RETTYPE erts_internal_is_process_alive_2(BIF_ALIST_2)
3595 {
3596 if (!is_internal_pid(BIF_ARG_1) || !is_internal_ordinary_ref(BIF_ARG_2))
3597 BIF_ERROR(BIF_P, BADARG);
3598 if (!erts_proc_sig_send_is_alive_request(BIF_P, BIF_ARG_1, BIF_ARG_2)) {
3599 if (ERTS_PROC_HAS_INCOMING_SIGNALS(BIF_P))
3600 ERTS_BIF_HANDLE_SIGNALS_RETURN(BIF_P, am_ok);
3601 }
3602 BIF_RET(am_ok);
3603 }
3604
is_process_alive_1(BIF_ALIST_1)3605 BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
3606 {
3607 if (is_internal_pid(BIF_ARG_1)) {
3608 erts_aint32_t state;
3609 Process *rp;
3610
3611 if (BIF_ARG_1 == BIF_P->common.id)
3612 BIF_RET(am_true);
3613
3614 rp = erts_proc_lookup_raw(BIF_ARG_1);
3615 if (!rp)
3616 BIF_RET(am_false);
3617
3618 state = erts_atomic32_read_acqb(&rp->state);
3619 if (state & (ERTS_PSFLG_EXITING
3620 | ERTS_PSFLG_SIG_Q
3621 | ERTS_PSFLG_SIG_IN_Q)) {
3622 /*
3623 * If in exiting state, trap out and send 'is alive'
3624 * request and wait for it to complete termination.
3625 *
3626 * If process has signals enqueued, we need to
3627 * send it an 'is alive' request via its signal
3628 * queue in order to ensure that signal order is
3629 * preserved (we may earlier have sent it an
3630 * exit signal that has not been processed yet).
3631 */
3632 BIF_TRAP1(is_process_alive_trap, BIF_P, BIF_ARG_1);
3633 }
3634
3635 BIF_RET(am_true);
3636 }
3637
3638 if (is_external_pid(BIF_ARG_1)) {
3639 if (external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
3640 BIF_RET(am_false); /* A pid from an old incarnation of this node */
3641 }
3642
3643 BIF_ERROR(BIF_P, BADARG);
3644 }
3645
3646 static Eterm
process_display(Process * c_p,void * arg,int * redsp,ErlHeapFragment ** bpp)3647 process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
3648 {
3649 if (redsp)
3650 *redsp = 1;
3651
3652 if (ERTS_PROC_IS_EXITING(c_p))
3653 return am_badarg;
3654
3655 erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
3656 erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p);
3657 erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
3658
3659 return am_true;
3660 }
3661
3662
erts_internal_process_display_2(BIF_ALIST_2)3663 BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2)
3664 {
3665 Eterm res;
3666
3667 if (BIF_ARG_2 != am_backtrace)
3668 BIF_RET(am_badarg);
3669
3670 if (BIF_P->common.id == BIF_ARG_1) {
3671 res = process_display(BIF_P, NULL, NULL, NULL);
3672 BIF_RET(res);
3673 }
3674
3675 if (is_not_internal_pid(BIF_ARG_1))
3676 BIF_RET(am_badarg);
3677
3678 res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
3679 !0,
3680 process_display,
3681 NULL);
3682 if (is_non_value(res))
3683 BIF_RET(am_badarg);
3684
3685 BIF_RET(res);
3686 }
3687
3688 /* this is a general call which return some possibly useful information */
3689
statistics_1(BIF_ALIST_1)3690 BIF_RETTYPE statistics_1(BIF_ALIST_1)
3691 {
3692 Eterm res;
3693 Eterm* hp;
3694
3695 if (BIF_ARG_1 == am_scheduler_wall_time) {
3696 res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
3697 if (is_non_value(res))
3698 BIF_RET(am_undefined);
3699 BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
3700 } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
3701 res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
3702 if (is_non_value(res))
3703 BIF_RET(am_undefined);
3704 BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
3705 } else if ((BIF_ARG_1 == am_total_active_tasks)
3706 | (BIF_ARG_1 == am_total_run_queue_lengths)
3707 | (BIF_ARG_1 == am_total_active_tasks_all)
3708 | (BIF_ARG_1 == am_total_run_queue_lengths_all)) {
3709 Uint no = erts_run_queues_len(NULL, 0,
3710 ((BIF_ARG_1 == am_total_active_tasks)
3711 | (BIF_ARG_1 == am_total_active_tasks_all)),
3712 ((BIF_ARG_1 == am_total_active_tasks_all)
3713 | (BIF_ARG_1 == am_total_run_queue_lengths_all)));
3714 if (IS_USMALL(0, no))
3715 res = make_small(no);
3716 else {
3717 Eterm *hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
3718 res = uint_to_big(no, hp);
3719 }
3720 BIF_RET(res);
3721 } else if ((BIF_ARG_1 == am_active_tasks)
3722 | (BIF_ARG_1 == am_run_queue_lengths)
3723 | (BIF_ARG_1 == am_active_tasks_all)
3724 | (BIF_ARG_1 == am_run_queue_lengths_all)) {
3725 Eterm res, *hp, **hpp;
3726 Uint sz, *szp;
3727 int incl_dirty_io = ((BIF_ARG_1 == am_active_tasks_all)
3728 | (BIF_ARG_1 == am_run_queue_lengths_all));
3729 int no_qs = (erts_no_run_queues + ERTS_NUM_DIRTY_CPU_RUNQS +
3730 (incl_dirty_io ? ERTS_NUM_DIRTY_IO_RUNQS : 0));
3731 Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
3732 (void) erts_run_queues_len(qszs, 0,
3733 ((BIF_ARG_1 == am_active_tasks)
3734 | (BIF_ARG_1 == am_active_tasks_all)),
3735 incl_dirty_io);
3736 sz = 0;
3737 szp = &sz;
3738 hpp = NULL;
3739 while (1) {
3740 int i;
3741 for (i = 0; i < no_qs; i++)
3742 qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
3743 res = erts_bld_list(hpp, szp, no_qs, &qszs[no_qs]);
3744 if (hpp) {
3745 erts_free(ERTS_ALC_T_TMP, qszs);
3746 BIF_RET(res);
3747 }
3748 hp = HAlloc(BIF_P, sz);
3749 szp = NULL;
3750 hpp = &hp;
3751 }
3752 #ifdef ERTS_ENABLE_MSACC
3753 } else if (BIF_ARG_1 == am_microstate_accounting) {
3754 Eterm threads;
3755 res = erts_msacc_request(BIF_P, ERTS_MSACC_GATHER, &threads);
3756 if (is_non_value(res))
3757 BIF_RET(am_undefined);
3758 BIF_TRAP2(gather_msacc_res_trap, BIF_P, res, threads);
3759 #endif
3760 } else if (BIF_ARG_1 == am_context_switches) {
3761 Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
3762 hp = HAlloc(BIF_P, 3);
3763 res = TUPLE2(hp, cs, SMALL_ZERO);
3764 BIF_RET(res);
3765 } else if (BIF_ARG_1 == am_garbage_collection) {
3766 res = erts_gc_info_request(BIF_P);
3767 if (is_non_value(res))
3768 BIF_RET(am_undefined);
3769 BIF_TRAP1(gather_gc_info_res_trap, BIF_P, res);
3770 } else if (BIF_ARG_1 == am_reductions) {
3771 Uint reds;
3772 Uint diff;
3773 Uint hsz = 3;
3774 Eterm b1, b2;
3775
3776 erts_get_total_reductions(&reds, &diff);
3777 (void) erts_bld_uint(NULL, &hsz, reds);
3778 (void) erts_bld_uint(NULL, &hsz, diff);
3779 hp = HAlloc(BIF_P, hsz);
3780 b1 = erts_bld_uint(&hp, NULL, reds);
3781 b2 = erts_bld_uint(&hp, NULL, diff);
3782 res = TUPLE2(hp, b1, b2);
3783 BIF_RET(res);
3784 } else if (BIF_ARG_1 == am_exact_reductions) {
3785 Uint reds;
3786 Uint diff;
3787 Uint hsz = 3;
3788 Eterm b1, b2;
3789
3790 erts_get_exact_total_reductions(BIF_P, &reds, &diff);
3791 (void) erts_bld_uint(NULL, &hsz, reds);
3792 (void) erts_bld_uint(NULL, &hsz, diff);
3793 hp = HAlloc(BIF_P, hsz);
3794 b1 = erts_bld_uint(&hp, NULL, reds);
3795 b2 = erts_bld_uint(&hp, NULL, diff);
3796 res = TUPLE2(hp, b1, b2);
3797 BIF_RET(res);
3798 } else if (BIF_ARG_1 == am_runtime) {
3799 ErtsMonotonicTime u1, u2;
3800 Eterm b1, b2;
3801 Uint hsz;
3802 erts_runtime_elapsed_both(&u1, NULL, &u2, NULL);
3803 hsz = 3; /* 2-tuple */
3804 (void) erts_bld_monotonic_time(NULL, &hsz, u1);
3805 (void) erts_bld_monotonic_time(NULL, &hsz, u2);
3806 hp = HAlloc(BIF_P, hsz);
3807 b1 = erts_bld_monotonic_time(&hp, NULL, u1);
3808 b2 = erts_bld_monotonic_time(&hp, NULL, u2);
3809 res = TUPLE2(hp, b1, b2);
3810 BIF_RET(res);
3811 } else if (BIF_ARG_1 == am_run_queue) {
3812 res = erts_run_queues_len(NULL, 1, 0, 0);
3813 BIF_RET(make_small(res));
3814 } else if (BIF_ARG_1 == am_wall_clock) {
3815 ErtsMonotonicTime w1, w2;
3816 Eterm b1, b2;
3817 Uint hsz;
3818 erts_wall_clock_elapsed_both(&w1, &w2);
3819 hsz = 3; /* 2-tuple */
3820 (void) erts_bld_monotonic_time(NULL, &hsz, w1);
3821 (void) erts_bld_monotonic_time(NULL, &hsz, w2);
3822 hp = HAlloc(BIF_P, hsz);
3823 b1 = erts_bld_monotonic_time(&hp, NULL, w1);
3824 b2 = erts_bld_monotonic_time(&hp, NULL, w2);
3825 res = TUPLE2(hp, b1, b2);
3826 BIF_RET(res);
3827 } else if (BIF_ARG_1 == am_io) {
3828 Eterm ref = erts_request_io_bytes(BIF_P);
3829 BIF_TRAP2(gather_io_bytes_trap, BIF_P, ref, make_small(erts_no_schedulers));
3830 }
3831 else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
3832 Eterm res, *hp, **hpp;
3833 Uint sz, *szp;
3834 int no_qs = erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS;
3835 Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
3836 (void) erts_run_queues_len(qszs, 0, 0, 1);
3837 sz = 0;
3838 szp = &sz;
3839 hpp = NULL;
3840 while (1) {
3841 int i;
3842 for (i = 0; i < no_qs; i++)
3843 qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
3844 res = erts_bld_tuplev(hpp, szp, no_qs, &qszs[no_qs]);
3845 if (hpp) {
3846 erts_free(ERTS_ALC_T_TMP, qszs);
3847 BIF_RET(res);
3848 }
3849 hp = HAlloc(BIF_P, sz);
3850 szp = NULL;
3851 hpp = &hp;
3852 }
3853 }
3854 BIF_ERROR(BIF_P, BADARG);
3855 }
3856
error_logger_warning_map_0(BIF_ALIST_0)3857 BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
3858 {
3859 BIF_RET(erts_error_logger_warnings);
3860 }
3861
3862 static erts_atomic_t available_internal_state;
3863
empty_magic_ref_destructor(Binary * bin)3864 static int empty_magic_ref_destructor(Binary *bin)
3865 {
3866 return 1;
3867 }
3868
erts_debug_get_internal_state_1(BIF_ALIST_1)3869 BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
3870 {
3871 /*
3872 * NOTE: Only supposed to be used for testing, and debugging.
3873 */
3874
3875 if (!erts_atomic_read_nob(&available_internal_state)) {
3876 BIF_ERROR(BIF_P, EXC_UNDEF);
3877 }
3878
3879 if (is_atom(BIF_ARG_1)) {
3880 if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
3881 /* Used by (emulator) */
3882 BIF_RET(make_small((Uint) ERTS_BIF_REDS_LEFT(BIF_P)));
3883 }
3884 else if (ERTS_IS_ATOM_STR("node_and_dist_references", BIF_ARG_1)) {
3885 /* Used by node_container_SUITE (emulator) */
3886 Eterm res = erts_get_node_and_dist_references(BIF_P);
3887 BIF_RET(res);
3888 }
3889 else if (ERTS_IS_ATOM_STR("monitoring_nodes", BIF_ARG_1)) {
3890 BIF_RET(erts_processes_monitoring_nodes(BIF_P));
3891 }
3892 else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
3893 || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
3894 /* Used by node_container_SUITE (emulator) */
3895 Sint res;
3896 if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
3897 res = erts_ptab_test_next_id(&erts_proc, 0, 0);
3898 else
3899 res = erts_ptab_test_next_id(&erts_port, 0, 0);
3900 if (res < 0)
3901 BIF_RET(am_false);
3902 BIF_RET(erts_make_integer(res, BIF_P));
3903 }
3904 else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
3905 /* Used by ets_SUITE (stdlib) */
3906 size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
3907 Eterm* hp = HAlloc(BIF_P ,3);
3908 BIF_RET(TUPLE2(hp, make_small((Uint) words),
3909 erts_ets_hash_sizeof_ext_segtab()));
3910 }
3911 else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
3912 /* Used by driver_SUITE (emulator) */
3913 Uint sz, *szp;
3914 Eterm res, *hp, **hpp;
3915 int no_errors;
3916 ErtsCheckIoDebugInfo ciodi = {0};
3917 #ifdef HAVE_ERTS_CHECK_IO_DEBUG
3918 erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
3919 no_errors = erts_check_io_debug(&ciodi);
3920 erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
3921 #else
3922 no_errors = 0;
3923 #endif
3924 sz = 0;
3925 szp = &sz;
3926 hpp = NULL;
3927 while (1) {
3928 res = erts_bld_tuple(hpp, szp, 4,
3929 erts_bld_uint(hpp, szp,
3930 (Uint) no_errors),
3931 erts_bld_uint(hpp, szp,
3932 (Uint) ciodi.no_used_fds),
3933 erts_bld_uint(hpp, szp,
3934 (Uint) ciodi.no_driver_select_structs),
3935 erts_bld_uint(hpp, szp,
3936 (Uint) ciodi.no_enif_select_structs));
3937 if (hpp)
3938 break;
3939 hp = HAlloc(BIF_P, sz);
3940 szp = NULL;
3941 hpp = &hp;
3942 }
3943 BIF_RET(res);
3944 }
3945 else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
3946 /* Used by process_SUITE (emulator) */
3947 int i;
3948 Eterm res = NIL;
3949 Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
3950 for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
3951 res = CONS(hp, pi_args[i].name, res);
3952 hp += 2;
3953 }
3954 BIF_RET(res);
3955 }
3956 else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
3957 /* Used by process_SUITE (emulator) */
3958 BIF_RET(erts_debug_ptab_list(BIF_P, &erts_proc));
3959 }
3960 else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
3961 /* Used by process_SUITE (emulator) */
3962 BIF_RET(erts_debug_ptab_list_bif_info(BIF_P, &erts_proc));
3963 }
3964 else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
3965 /* Used by distribution_SUITE (emulator) */
3966 BIF_RET(make_small((Uint) erts_debug_max_atom_out_cache_index()));
3967 }
3968 else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
3969 Uint n;
3970 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
3971 n = erts_debug_nbalance();
3972 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
3973 BIF_RET(erts_make_integer(n, BIF_P));
3974 }
3975 else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
3976 BIF_RET(am_true);
3977 }
3978 else if (ERTS_IS_ATOM_STR("force_heap_frags", BIF_ARG_1)) {
3979 #ifdef FORCE_HEAP_FRAGS
3980 BIF_RET(am_true);
3981 #else
3982 BIF_RET(am_false);
3983 #endif
3984 }
3985 else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
3986 Eterm res;
3987 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
3988 erts_thr_progress_block();
3989 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
3990 res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
3991 erts_thr_progress_unblock();
3992 BIF_RET(res);
3993 }
3994 else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
3995 BIF_RET(erts_mmap_debug_info(BIF_P));
3996 }
3997 else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
3998 BIF_RET(erts_debug_get_unique_monotonic_integer_state(BIF_P));
3999 }
4000 else if (ERTS_IS_ATOM_STR("min_unique_monotonic_integer", BIF_ARG_1)) {
4001 Sint64 value = erts_get_min_unique_monotonic_integer();
4002 if (IS_SSMALL(value))
4003 BIF_RET(make_small(value));
4004 else {
4005 Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
4006 Eterm *hp = HAlloc(BIF_P, hsz);
4007 BIF_RET(erts_sint64_to_big(value, &hp));
4008 }
4009 }
4010 else if (ERTS_IS_ATOM_STR("min_unique_integer", BIF_ARG_1)) {
4011 Sint64 value = erts_get_min_unique_integer();
4012 if (IS_SSMALL(value))
4013 BIF_RET(make_small(value));
4014 else {
4015 Uint hsz = ERTS_SINT64_HEAP_SIZE(value);
4016 Eterm *hp = HAlloc(BIF_P, hsz);
4017 BIF_RET(erts_sint64_to_big(value, &hp));
4018 }
4019 }
4020 else if (ERTS_IS_ATOM_STR("stack_check", BIF_ARG_1)) {
4021 UWord size;
4022 char c;
4023 if (erts_is_above_stack_limit(&c))
4024 size = erts_check_stack_recursion_downwards(&c);
4025 else
4026 size = erts_check_stack_recursion_upwards(&c);
4027 if (IS_SSMALL(size))
4028 BIF_RET(make_small(size));
4029 else {
4030 Uint hsz = BIG_UWORD_HEAP_SIZE(size);
4031 Eterm *hp = HAlloc(BIF_P, hsz);
4032 BIF_RET(uword_to_big(size, hp));
4033 }
4034 } else if (ERTS_IS_ATOM_STR("scheduler_dump", BIF_ARG_1)) {
4035 #if defined(ERTS_HAVE_TRY_CATCH) && defined(ERTS_SYS_SUSPEND_SIGNAL)
4036 BIF_RET(am_true);
4037 #else
4038 BIF_RET(am_false);
4039 #endif
4040 }
4041 else if (ERTS_IS_ATOM_STR("lc_graph", BIF_ARG_1)) {
4042 #ifdef ERTS_ENABLE_LOCK_CHECK
4043 Eterm res = erts_lc_dump_graph();
4044 BIF_RET(res);
4045 #else
4046 BIF_RET(am_notsup);
4047 #endif
4048 }
4049
4050 }
4051 else if (is_tuple(BIF_ARG_1)) {
4052 Eterm* tp = tuple_val(BIF_ARG_1);
4053 switch (arityval(tp[0])) {
4054 case 2: {
4055 if (ERTS_IS_ATOM_STR("process_status", tp[1])) {
4056 /* Used by timer process_SUITE, timer_bif_SUITE, and
4057 node_container_SUITE (emulator) */
4058 if (is_internal_pid(tp[2])) {
4059 BIF_RET(erts_process_status(NULL, tp[2]));
4060 }
4061 }
4062 else if (ERTS_IS_ATOM_STR("connection_id", tp[1])) {
4063 DistEntry *dep;
4064 Eterm *hp, res;
4065 Uint con_id, hsz = 0;
4066 if (!is_atom(tp[2]))
4067 BIF_ERROR(BIF_P, BADARG);
4068 dep = erts_sysname_to_connected_dist_entry(tp[2]);
4069 if (!dep)
4070 BIF_ERROR(BIF_P, BADARG);
4071 erts_de_rlock(dep);
4072 con_id = (Uint) dep->connection_id;
4073 erts_de_runlock(dep);
4074 (void) erts_bld_uint(NULL, &hsz, con_id);
4075 hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
4076 res = erts_bld_uint(&hp, NULL, con_id);
4077 BIF_RET(res);
4078 }
4079 else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
4080 /* Used by erl_link_SUITE (emulator) */
4081 if(is_internal_pid(tp[2])) {
4082 erts_aint32_t state;
4083 Eterm res;
4084 Process *p;
4085 int sigs_done, local_only;
4086
4087 p = erts_pid2proc(BIF_P,
4088 ERTS_PROC_LOCK_MAIN,
4089 tp[2],
4090 ERTS_PROC_LOCK_MAIN);
4091 if (!p) {
4092 ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
4093 BIF_RET(am_undefined);
4094 }
4095
4096 local_only = 0;
4097 do {
4098 int reds = CONTEXT_REDS;
4099 sigs_done = erts_proc_sig_handle_incoming(p,
4100 &state,
4101 &reds,
4102 CONTEXT_REDS,
4103 local_only);
4104 local_only = !0;
4105 } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
4106
4107 if (!(state & ERTS_PSFLG_EXITING))
4108 res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
4109 else if (BIF_P == p)
4110 ERTS_BIF_EXITED(BIF_P);
4111 else
4112 res = am_undefined;
4113 if (BIF_P != p)
4114 erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
4115 BIF_RET(res);
4116 }
4117 else if(is_internal_port(tp[2])) {
4118 Eterm res;
4119 Port *p = erts_id2port_sflgs(tp[2],
4120 BIF_P,
4121 ERTS_PROC_LOCK_MAIN,
4122 ERTS_PORT_SFLGS_INVALID_LOOKUP);
4123 if(!p)
4124 BIF_RET(am_undefined);
4125 res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
4126 erts_port_release(p);
4127 BIF_RET(res);
4128 }
4129 else if(is_node_name_atom(tp[2])) {
4130 DistEntry *dep = erts_find_dist_entry(tp[2]);
4131 if(dep) {
4132 Eterm res = NIL;
4133 if (dep->mld) {
4134 erts_mtx_lock(&dep->mld->mtx);
4135 res = make_link_list(BIF_P, 0, dep->mld->links, NIL);
4136 erts_mtx_unlock(&dep->mld->mtx);
4137 }
4138 BIF_RET(res);
4139 } else {
4140 BIF_RET(am_undefined);
4141 }
4142 }
4143 }
4144 else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
4145 /* Used by erl_link_SUITE (emulator) */
4146 if(is_internal_pid(tp[2])) {
4147 erts_aint32_t state;
4148 Process *p;
4149 Eterm res;
4150 int sigs_done, local_only;
4151
4152 p = erts_pid2proc(BIF_P,
4153 ERTS_PROC_LOCK_MAIN,
4154 tp[2],
4155 ERTS_PROC_LOCK_MAIN);
4156 if (!p) {
4157 ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
4158 BIF_RET(am_undefined);
4159 }
4160
4161 local_only = 0;
4162 do {
4163 int reds = CONTEXT_REDS;
4164 sigs_done = erts_proc_sig_handle_incoming(p,
4165 &state,
4166 &reds,
4167 CONTEXT_REDS,
4168 local_only);
4169 local_only = !0;
4170 } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
4171
4172 if (!(state & ERTS_PSFLG_EXITING)) {
4173 res = make_monitor_list(BIF_P, 1, ERTS_P_MONITORS(p), NIL);
4174 res = make_monitor_list(BIF_P, 0, ERTS_P_LT_MONITORS(p), res);
4175 }
4176 else {
4177 if (BIF_P == p)
4178 ERTS_BIF_EXITED(BIF_P);
4179 else
4180 res = am_undefined;
4181 }
4182 if (BIF_P != p)
4183 erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
4184 BIF_RET(res);
4185 } else if(is_node_name_atom(tp[2])) {
4186 DistEntry *dep = erts_find_dist_entry(tp[2]);
4187 if(dep) {
4188 Eterm ml = NIL;
4189 if (dep->mld) {
4190 erts_mtx_lock(&dep->mld->mtx);
4191 ml = make_monitor_list(BIF_P, 1, dep->mld->orig_name_monitors, NIL);
4192 ml = make_monitor_list(BIF_P, 0, dep->mld->monitors, ml);
4193 erts_mtx_unlock(&dep->mld->mtx);
4194 }
4195 BIF_RET(ml);
4196 } else {
4197 BIF_RET(am_undefined);
4198 }
4199 }
4200 }
4201 else if (ERTS_IS_ATOM_STR("channel_number", tp[1])) {
4202 Eterm res;
4203 DistEntry *dep = erts_find_dist_entry(tp[2]);
4204 if (!dep)
4205 res = am_undefined;
4206 else {
4207 Uint cno = dist_entry_channel_no(dep);
4208 res = make_small(cno);
4209 }
4210 BIF_RET(res);
4211 }
4212 else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
4213 Eterm bin = tp[2];
4214 if (is_binary(bin)) {
4215 Eterm real_bin = bin;
4216 Eterm res = am_true;
4217 ErlSubBin* sb = (ErlSubBin *) binary_val(real_bin);
4218
4219 if (sb->thing_word == HEADER_SUB_BIN) {
4220 real_bin = sb->orig;
4221 }
4222 if (*binary_val(real_bin) == HEADER_PROC_BIN) {
4223 ProcBin* pb;
4224 Binary* val;
4225 Eterm SzTerm;
4226 Uint hsz = 3 + 5;
4227 Eterm* hp;
4228 DECL_AM(refc_binary);
4229
4230 pb = (ProcBin *) binary_val(real_bin);
4231 val = pb->val;
4232 (void) erts_bld_uint(NULL, &hsz, pb->size);
4233 (void) erts_bld_uint(NULL, &hsz, val->orig_size);
4234 hp = HAlloc(BIF_P, hsz);
4235
4236 /* Info about the Binary* object */
4237 SzTerm = erts_bld_uint(&hp, NULL, val->orig_size);
4238 res = TUPLE2(hp, am_binary, SzTerm);
4239 hp += 3;
4240
4241 /* Info about the ProcBin* object */
4242 SzTerm = erts_bld_uint(&hp, NULL, pb->size);
4243 res = TUPLE4(hp, AM_refc_binary, SzTerm,
4244 res, make_small(pb->flags));
4245 } else { /* heap binary */
4246 DECL_AM(heap_binary);
4247 res = AM_heap_binary;
4248 }
4249 BIF_RET(res);
4250 }
4251 }
4252 else if (ERTS_IS_ATOM_STR("term_to_binary_tuple_fallbacks", tp[1])) {
4253 Uint dflags = (TERM_TO_BINARY_DFLAGS
4254 & ~DFLAG_EXPORT_PTR_TAG
4255 & ~DFLAG_BIT_BINARIES);
4256 BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags));
4257 }
4258 else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) {
4259 Eterm res = am_undefined;
4260 DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
4261 if (dep) {
4262 erts_de_rlock(dep);
4263 if (is_internal_port(dep->cid) || is_internal_pid(dep->cid))
4264 res = dep->cid;
4265 erts_de_runlock(dep);
4266 }
4267 BIF_RET(res);
4268 }
4269 else if (ERTS_IS_ATOM_STR("atom_out_cache_index", tp[1])) {
4270 /* Used by distribution_SUITE (emulator) */
4271 if (is_atom(tp[2])) {
4272 BIF_RET(make_small(
4273 (Uint)
4274 erts_debug_atom_to_out_cache_index(tp[2])));
4275 }
4276 }
4277 else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
4278 return erts_fake_scheduler_bindings(BIF_P, tp[2]);
4279 }
4280 else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
4281 Sint groups;
4282 if (is_not_small(tp[2]))
4283 BIF_ERROR(BIF_P, BADARG);
4284 groups = signed_val(tp[2]);
4285 if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
4286 BIF_ERROR(BIF_P, BADARG);
4287
4288 BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
4289 }
4290 else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
4291 Uint hash = (Uint) make_internal_hash(tp[2], 0);
4292 Uint hsz = 0;
4293 Eterm* hp;
4294 erts_bld_uint(NULL, &hsz, hash);
4295 hp = HAlloc(BIF_P,hsz);
4296 return erts_bld_uint(&hp, NULL, hash);
4297 }
4298 else if (ERTS_IS_ATOM_STR("atom", tp[1])) {
4299 Uint ix;
4300 if (!term_to_Uint(tp[2], &ix))
4301 BIF_ERROR(BIF_P, BADARG);
4302 while (ix >= atom_table_size()) {
4303 char tmp[20];
4304 erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
4305 erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
4306 }
4307 return make_atom(ix);
4308 }
4309 else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
4310 Binary *bin;
4311 UWord bin_addr, refc;
4312 Eterm bin_addr_term, refc_term, test_type;
4313 Uint sz;
4314 Eterm *hp;
4315 if (!is_internal_magic_ref(tp[2])) {
4316 if (is_internal_ordinary_ref(tp[2])) {
4317 ErtsORefThing *rtp;
4318 rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
4319 if (erts_is_ref_numbers_magic(rtp->num))
4320 BIF_RET(am_true);
4321 }
4322 BIF_RET(am_false);
4323 }
4324 bin = erts_magic_ref2bin(tp[2]);
4325 refc = erts_refc_read(&bin->intern.refc, 1);
4326 bin_addr = (UWord) bin;
4327 sz = 4;
4328 erts_bld_uword(NULL, &sz, bin_addr);
4329 erts_bld_uword(NULL, &sz, refc);
4330 hp = HAlloc(BIF_P, sz);
4331 bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
4332 refc_term = erts_bld_uword(&hp, NULL, refc);
4333 test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
4334 ? am_true : am_false);
4335 BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
4336 }
4337
4338 break;
4339 }
4340 case 3: {
4341 if (ERTS_IS_ATOM_STR("check_time_config", tp[1])) {
4342 int res, time_correction;
4343 ErtsTimeWarpMode time_warp_mode;
4344 if (tp[2] == am_true)
4345 time_correction = !0;
4346 else if (tp[2] == am_false)
4347 time_correction = 0;
4348 else
4349 break;
4350 if (ERTS_IS_ATOM_STR("no_time_warp", tp[3]))
4351 time_warp_mode = ERTS_NO_TIME_WARP_MODE;
4352 else if (ERTS_IS_ATOM_STR("single_time_warp", tp[3]))
4353 time_warp_mode = ERTS_SINGLE_TIME_WARP_MODE;
4354 else if (ERTS_IS_ATOM_STR("multi_time_warp", tp[3]))
4355 time_warp_mode = ERTS_MULTI_TIME_WARP_MODE;
4356 else
4357 break;
4358 res = erts_check_time_adj_support(time_correction,
4359 time_warp_mode);
4360 BIF_RET(res ? am_true : am_false);
4361 }
4362 else if (ERTS_IS_ATOM_STR("make_unique_integer", tp[1])) {
4363 Eterm res = erts_debug_make_unique_integer(BIF_P,
4364 tp[2],
4365 tp[3]);
4366 if (is_non_value(res))
4367 break;
4368 BIF_RET(res);
4369 }
4370 break;
4371 }
4372 default:
4373 break;
4374 }
4375 }
4376 BIF_ERROR(BIF_P, BADARG);
4377 }
4378
erts_internal_is_system_process_1(BIF_ALIST_1)4379 BIF_RETTYPE erts_internal_is_system_process_1(BIF_ALIST_1)
4380 {
4381 if (is_internal_pid(BIF_ARG_1)) {
4382 Process *rp = erts_proc_lookup(BIF_ARG_1);
4383 if (rp && (rp->static_flags & ERTS_STC_FLG_SYSTEM_PROC))
4384 BIF_RET(am_true);
4385 BIF_RET(am_false);
4386 }
4387
4388 if (is_external_pid(BIF_ARG_1)
4389 && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
4390 BIF_RET(am_false);
4391 }
4392
4393 BIF_ERROR(BIF_P, BADARG);
4394 }
4395
erts_internal_system_check_1(BIF_ALIST_1)4396 BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1)
4397 {
4398 Eterm res;
4399 if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)) {
4400 res = erts_system_check_request(BIF_P);
4401 if (is_non_value(res))
4402 BIF_RET(am_undefined);
4403 BIF_TRAP1(gather_system_check_res_trap, BIF_P, res);
4404 }
4405
4406 BIF_ERROR(BIF_P, BADARG);
4407 }
4408
4409 static erts_atomic_t hipe_test_reschedule_flag;
4410
4411 #if defined(VALGRIND) && defined(__GNUC__)
4412 /* Force noinline for valgrind suppression */
4413 static void broken_halt_test(Eterm bif_arg_2) __attribute__((noinline));
4414 #endif
4415
broken_halt_test(Eterm bif_arg_2)4416 static void broken_halt_test(Eterm bif_arg_2)
4417 {
4418 /* Ugly ugly code used by bif_SUITE:erlang_halt/1 */
4419 #if defined(ERTS_HAVE_TRY_CATCH)
4420 erts_get_scheduler_data()->run_queue = NULL;
4421 #endif
4422 erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
4423 }
4424
4425 static void
test_multizero_timeout_in_timeout3(void * vproc)4426 test_multizero_timeout_in_timeout3(void *vproc)
4427 {
4428 Process *proc = (Process *) vproc;
4429 ErtsMessage *mp = erts_alloc_message(0, NULL);
4430 ERTS_DECL_AM(multizero_timeout_in_timeout_done);
4431 erts_queue_message(proc, 0, mp, AM_multizero_timeout_in_timeout_done, am_system);
4432 erts_proc_dec_refc(proc);
4433 }
4434
4435 static void
test_multizero_timeout_in_timeout2(void * vproc)4436 test_multizero_timeout_in_timeout2(void *vproc)
4437 {
4438 erts_start_timer_callback(0, test_multizero_timeout_in_timeout3, vproc);
4439 }
4440
4441 static void
test_multizero_timeout_in_timeout(void * vproc)4442 test_multizero_timeout_in_timeout(void *vproc)
4443 {
4444 erts_start_timer_callback(0, test_multizero_timeout_in_timeout2, vproc);
4445 }
4446
erts_debug_set_internal_state_2(BIF_ALIST_2)4447 BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
4448 {
4449 /*
4450 * NOTE: Only supposed to be used for testing, and debugging.
4451 */
4452 if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
4453 && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
4454 erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
4455 erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on);
4456 if (on) {
4457 erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
4458 erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
4459 if (erts_is_alive)
4460 erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
4461 erts_dsprintf(dsbufp,
4462 "enabled access to the emulator internal state.\n");
4463 erts_dsprintf(dsbufp,
4464 "NOTE: This is an erts internal test feature and "
4465 "should *only* be used by OTP test-suites.\n");
4466 erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
4467 }
4468 BIF_RET(prev_on ? am_true : am_false);
4469 }
4470
4471 if (!erts_atomic_read_nob(&available_internal_state)) {
4472 BIF_ERROR(BIF_P, EXC_UNDEF);
4473 }
4474
4475 if (is_atom(BIF_ARG_1)) {
4476
4477 if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
4478 Sint reds;
4479 if (term_to_Sint(BIF_ARG_2, &reds) != 0) {
4480 if (0 <= reds && reds <= CONTEXT_REDS) {
4481 if (!ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P))
4482 BIF_P->fcalls = reds;
4483 else
4484 BIF_P->fcalls = reds - CONTEXT_REDS;
4485 }
4486 BIF_RET(am_true);
4487 }
4488 }
4489 else if (ERTS_IS_ATOM_STR("block", BIF_ARG_1)
4490 || ERTS_IS_ATOM_STR("sleep", BIF_ARG_1)) {
4491 int block = ERTS_IS_ATOM_STR("block", BIF_ARG_1);
4492 Sint ms;
4493 if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
4494 if (ms > 0) {
4495 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4496 if (block)
4497 erts_thr_progress_block();
4498 while (erts_milli_sleep((long) ms) != 0);
4499 if (block)
4500 erts_thr_progress_unblock();
4501 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4502 }
4503 BIF_RET(am_true);
4504 }
4505 }
4506 else if (ERTS_IS_ATOM_STR("block_scheduler", BIF_ARG_1)) {
4507 Sint ms;
4508 if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
4509 if (ms > 0) {
4510 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4511 while (erts_milli_sleep((long) ms) != 0);
4512 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4513 }
4514 BIF_RET(am_true);
4515 }
4516 }
4517 else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
4518 || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
4519 /* Used by node_container_SUITE (emulator) */
4520 Uint next;
4521
4522 if (term_to_Uint(BIF_ARG_2, &next) != 0) {
4523 Sint res;
4524
4525 if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
4526 res = erts_ptab_test_next_id(&erts_proc, 1, next);
4527 else
4528 res = erts_ptab_test_next_id(&erts_port, 1, next);
4529 if (res < 0)
4530 BIF_RET(am_false);
4531 BIF_RET(erts_make_integer(res, BIF_P));
4532 }
4533 }
4534 else if (ERTS_IS_ATOM_STR("force_gc", BIF_ARG_1)) {
4535 /* Used by signal_SUITE (emulator) */
4536 Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
4537 BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
4538 if (!rp) {
4539 BIF_RET(am_false);
4540 }
4541 else {
4542 ERTS_FORCE_GC(BIF_P);
4543 BIF_RET(am_true);
4544 }
4545 }
4546 else if (ERTS_IS_ATOM_STR("gc_state", BIF_ARG_1)) {
4547 /* Used by process_SUITE (emulator) */
4548 int res, enable;
4549
4550 switch (BIF_ARG_2) {
4551 case am_true: enable = 1; break;
4552 case am_false: enable = 0; break;
4553 default: BIF_ERROR(BIF_P, BADARG); break;
4554 }
4555
4556 res = (BIF_P->flags & F_DISABLE_GC) ? am_false : am_true;
4557 erts_set_gc_state(BIF_P, enable);
4558 BIF_RET(res);
4559 }
4560 else if (ERTS_IS_ATOM_STR("inconsistent_heap", BIF_ARG_1)) {
4561 /* Used by code_SUITE (emulator) */
4562 if (am_start == BIF_ARG_2) {
4563 Eterm broken_term;
4564 Eterm *hp;
4565
4566 ERTS_ASSERT(!(BIF_P->flags & F_DISABLE_GC));
4567 erts_set_gc_state(BIF_P, 0);
4568
4569 hp = HAlloc(BIF_P, 2);
4570 hp[0] = make_arityval(1234);
4571 hp[1] = THE_NON_VALUE;
4572
4573 broken_term = make_tuple(hp);
4574
4575 BIF_RET(broken_term);
4576 } else {
4577 Eterm broken_term;
4578 Eterm *hp;
4579
4580 broken_term = BIF_ARG_2;
4581
4582 hp = tuple_val(broken_term);
4583 ERTS_ASSERT(hp[0] == make_arityval(1234));
4584 ERTS_ASSERT(hp[1] == THE_NON_VALUE);
4585 hp[0] = make_arityval(1);
4586 hp[1] = am_ok;
4587
4588 ERTS_ASSERT(BIF_P->flags & F_DISABLE_GC);
4589 erts_set_gc_state(BIF_P, 1);
4590
4591 BIF_RET(am_ok);
4592 }
4593 }
4594 else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
4595 /* Used by ets_SUITE (stdlib) */
4596 if (is_tuple(BIF_ARG_2)) {
4597 Eterm* tpl = tuple_val(BIF_ARG_2);
4598 Uint cnt;
4599 if (arityval(tpl[0]) == 2 && is_atom(tpl[1]) &&
4600 term_to_Uint(tpl[2], &cnt)) {
4601 BIF_RET(erts_ets_colliding_names(BIF_P,tpl[1],cnt));
4602 }
4603 }
4604 }
4605 else if (ERTS_IS_ATOM_STR("binary_loop_limit", BIF_ARG_1)) {
4606 /* Used by binary_module_SUITE (stdlib) */
4607 Uint max_loops;
4608 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4609 max_loops = erts_binary_set_loop_limit(-1);
4610 BIF_RET(make_small(max_loops));
4611 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4612 max_loops = erts_binary_set_loop_limit(max_loops);
4613 BIF_RET(make_small(max_loops));
4614 }
4615 }
4616 else if (ERTS_IS_ATOM_STR("re_loop_limit", BIF_ARG_1)) {
4617 /* Used by re_SUITE (stdlib) */
4618 Uint max_loops;
4619 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4620 max_loops = erts_re_set_loop_limit(-1);
4621 BIF_RET(make_small(max_loops));
4622 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4623 max_loops = erts_re_set_loop_limit(max_loops);
4624 BIF_RET(make_small(max_loops));
4625 }
4626 }
4627 else if (ERTS_IS_ATOM_STR("unicode_loop_limit", BIF_ARG_1)) {
4628 /* Used by unicode_SUITE (stdlib) */
4629 Uint max_loops;
4630 if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
4631 max_loops = erts_unicode_set_loop_limit(-1);
4632 BIF_RET(make_small(max_loops));
4633 } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
4634 max_loops = erts_unicode_set_loop_limit(max_loops);
4635 BIF_RET(make_small(max_loops));
4636 }
4637 }
4638 else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
4639 /* Used by hipe test suites */
4640 erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag);
4641 if (!flag && BIF_ARG_2 != am_false) {
4642 erts_atomic_set_nob(&hipe_test_reschedule_flag, 1);
4643 erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
4644 ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
4645 BIF_P, BIF_ARG_1, BIF_ARG_2);
4646 }
4647 erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
4648 BIF_RET(NIL);
4649 }
4650 else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
4651 /* Used by hipe test suites */
4652 Eterm res = am_false;
4653 Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
4654 BIF_ARG_2, ERTS_PROC_LOCK_STATUS);
4655 if (rp) {
4656 erts_resume(rp, ERTS_PROC_LOCK_STATUS);
4657 res = am_true;
4658 erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
4659 }
4660 BIF_RET(res);
4661 }
4662 else if (ERTS_IS_ATOM_STR("test_long_gc_sleep", BIF_ARG_1)) {
4663 if (term_to_Uint(BIF_ARG_2, &erts_test_long_gc_sleep) > 0)
4664 BIF_RET(am_true);
4665 }
4666 else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
4667 erts_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
4668 }
4669 else if (ERTS_IS_ATOM_STR("kill_dist_connection", BIF_ARG_1)) {
4670 DistEntry *dep = erts_sysname_to_connected_dist_entry(BIF_ARG_2);
4671 if (!dep)
4672 BIF_RET(am_false);
4673 else {
4674 Uint32 con_id;
4675 erts_de_rlock(dep);
4676 con_id = dep->connection_id;
4677 erts_de_runlock(dep);
4678 erts_kill_dist_connection(dep, con_id);
4679 BIF_RET(am_true);
4680 }
4681 }
4682 else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
4683 if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
4684 int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
4685 if (erts_debug_wait_completed(BIF_P, flag)) {
4686 ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
4687 }
4688 }
4689 if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2)) {
4690 int flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS;
4691 if (erts_debug_wait_completed(BIF_P, flag)) {
4692 ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
4693 }
4694 }
4695 }
4696 else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
4697 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4698 broken_halt_test(BIF_ARG_2);
4699 }
4700 else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
4701 int res = erts_debug_set_unique_monotonic_integer_state(BIF_ARG_2);
4702 BIF_RET(res ? am_true : am_false);
4703 }
4704 else if (ERTS_IS_ATOM_STR("node_tab_delayed_delete", BIF_ARG_1)) {
4705 /* node_container_SUITE */
4706 Sint64 msecs;
4707 if (term_to_Sint64(BIF_ARG_2, &msecs)) {
4708 /* Negative value restore original value... */
4709 erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4710 erts_debug_test_node_tab_delayed_delete(msecs);
4711 erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4712 BIF_RET(am_ok);
4713 }
4714 }
4715 else if (ERTS_IS_ATOM_STR("fill_heap", BIF_ARG_1)) {
4716 UWord left = HeapWordsLeft(BIF_P);
4717 if (left > 1) {
4718 Eterm* hp = HAlloc(BIF_P, left);
4719 *hp = make_pos_bignum_header(left - 1);
4720 }
4721 if (BIF_ARG_2 == am_true) {
4722 FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
4723 }
4724 BIF_RET(am_ok);
4725 }
4726 else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
4727 if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
4728 Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
4729 UWord bin_addr = (UWord) bin;
4730 Eterm bin_addr_term, magic_ref, res;
4731 Eterm *hp;
4732 Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
4733 erts_bld_uword(NULL, &sz, bin_addr);
4734 hp = HAlloc(BIF_P, sz);
4735 bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
4736 magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
4737 res = TUPLE2(hp, magic_ref, bin_addr_term);
4738 BIF_RET(res);
4739 }
4740 }
4741 else if (ERTS_IS_ATOM_STR("binary", BIF_ARG_1)) {
4742 Sint64 size;
4743 if (term_to_Sint64(BIF_ARG_2, &size)) {
4744 Binary* refbin = erts_bin_drv_alloc_fnf(size);
4745 if (!refbin)
4746 BIF_RET(am_false);
4747 sys_memset(refbin->orig_bytes, 0, size);
4748 BIF_RET(erts_build_proc_bin(&MSO(BIF_P),
4749 HAlloc(BIF_P, PROC_BIN_SIZE),
4750 refbin));
4751 }
4752 }
4753 else if (ERTS_IS_ATOM_STR("ets_force_trap", BIF_ARG_1)) {
4754 #ifdef ETS_DBG_FORCE_TRAP
4755 erts_ets_dbg_force_trap = (BIF_ARG_2 == am_true) ? 1 : 0;
4756 BIF_RET(am_ok);
4757 #else
4758 BIF_RET(am_notsup);
4759 #endif
4760 }
4761 else if (ERTS_IS_ATOM_STR("mbuf", BIF_ARG_1)) {
4762 Uint sz = size_object(BIF_ARG_2);
4763 ErlHeapFragment* frag = new_message_buffer(sz);
4764 Eterm *hp = frag->mem;
4765 Eterm copy = copy_struct(BIF_ARG_2, sz, &hp, &frag->off_heap);
4766 frag->next = BIF_P->mbuf;
4767 BIF_P->mbuf = frag;
4768 BIF_P->mbuf_sz += sz;
4769 BIF_RET(copy);
4770 }
4771 else if (ERTS_IS_ATOM_STR("multizero_timeout_in_timeout", BIF_ARG_1)) {
4772 Sint64 timeout;
4773 if (term_to_Sint64(BIF_ARG_2, &timeout)) {
4774 if (timeout < 0)
4775 timeout = 0;
4776 erts_proc_inc_refc(BIF_P);
4777 erts_start_timer_callback((ErtsMonotonicTime) timeout,
4778 test_multizero_timeout_in_timeout,
4779 (void *) BIF_P);
4780 BIF_RET(am_ok);
4781 }
4782 }
4783 }
4784
4785 BIF_ERROR(BIF_P, BADARG);
4786 }
4787
4788 static BIF_RETTYPE
gather_histograms_helper(Process * c_p,Eterm arg_tuple,int gather (Process *,int,int,int,UWord,Eterm))4789 gather_histograms_helper(Process * c_p, Eterm arg_tuple,
4790 int gather(Process *, int, int, int, UWord, Eterm))
4791 {
4792 SWord hist_start, hist_width, sched_id;
4793 int msg_count, alloc_num;
4794 Eterm *args;
4795
4796 /* This is an internal BIF, so the error checking is mostly left to erlang
4797 * code. */
4798
4799 ASSERT(is_tuple_arity(arg_tuple, 5));
4800 args = tuple_val(arg_tuple);
4801
4802 for (alloc_num = ERTS_ALC_A_MIN; alloc_num <= ERTS_ALC_A_MAX; alloc_num++) {
4803 if(erts_is_atom_str(ERTS_ALC_A2AD(alloc_num), args[1], 0)) {
4804 break;
4805 }
4806 }
4807
4808 if (alloc_num > ERTS_ALC_A_MAX) {
4809 BIF_ERROR(c_p, BADARG);
4810 }
4811
4812 sched_id = signed_val(args[2]);
4813 hist_width = signed_val(args[3]);
4814 hist_start = signed_val(args[4]);
4815
4816 if (sched_id < 0 || sched_id > erts_no_schedulers) {
4817 BIF_ERROR(c_p, BADARG);
4818 }
4819
4820 msg_count = gather(c_p, alloc_num, sched_id, hist_width, hist_start, args[5]);
4821
4822 BIF_RET(make_small(msg_count));
4823 }
4824
erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)4825 BIF_RETTYPE erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)
4826 {
4827 return gather_histograms_helper(BIF_P, BIF_ARG_1,
4828 erts_alcu_gather_alloc_histograms);
4829 }
4830
erts_internal_gather_carrier_info_1(BIF_ALIST_1)4831 BIF_RETTYPE erts_internal_gather_carrier_info_1(BIF_ALIST_1)
4832 {
4833 return gather_histograms_helper(BIF_P, BIF_ARG_1,
4834 erts_alcu_gather_carrier_info);
4835 }
4836
4837 #ifdef ERTS_ENABLE_LOCK_COUNT
4838
4839 typedef struct {
4840 /* info->location_count may increase between size calculation and term
4841 * building, so we cap it at the value sampled in lcnt_build_result_vector.
4842 *
4843 * Shrinking is safe though. */
4844 int max_location_count;
4845 erts_lcnt_lock_info_t *info;
4846 } lcnt_sample_t;
4847
4848 typedef struct lcnt_sample_vector_ {
4849 lcnt_sample_t *elements;
4850 size_t size;
4851 } lcnt_sample_vector_t;
4852
lcnt_build_sample_vector(erts_lcnt_lock_info_list_t * list)4853 static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
4854 erts_lcnt_lock_info_t *iterator;
4855 lcnt_sample_vector_t result;
4856 size_t allocated_entries;
4857
4858 allocated_entries = 64;
4859 result.size = 0;
4860
4861 result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
4862 allocated_entries * sizeof(lcnt_sample_t));
4863
4864 iterator = NULL;
4865 while(erts_lcnt_iterate_list(list, &iterator)) {
4866 erts_lcnt_retain_lock_info(iterator);
4867
4868 result.elements[result.size].max_location_count = iterator->location_count;
4869 result.elements[result.size].info = iterator;
4870
4871 result.size++;
4872
4873 if(result.size >= allocated_entries) {
4874 allocated_entries *= 2;
4875
4876 result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
4877 allocated_entries * sizeof(lcnt_sample_t));
4878 }
4879 }
4880
4881 return result;
4882 }
4883
lcnt_destroy_sample_vector(lcnt_sample_vector_t * vector)4884 static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
4885 size_t i;
4886
4887 for(i = 0; i < vector->size; i++) {
4888 erts_lcnt_release_lock_info(vector->elements[i].info);
4889 }
4890
4891 erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
4892 }
4893
4894 /* The size of an integer is not guaranteed to be constant since we're walking
4895 * over live data, and may cross over into bignum territory between size calc
4896 * and the actual build. This takes care of that through always assuming the
4897 * worst, but needs to be fixed up with HRelease once the final term has been
4898 * built. */
bld_unstable_uint64(Uint ** hpp,Uint * szp,Uint64 ui)4899 static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
4900 Eterm res = THE_NON_VALUE;
4901
4902 if(szp) {
4903 *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
4904 }
4905
4906 if(hpp) {
4907 if (IS_USMALL(0, ui)) {
4908 res = make_small(ui);
4909 } else {
4910 res = erts_uint64_to_big(ui, hpp);
4911 }
4912 }
4913
4914 return res;
4915 }
4916
lcnt_build_lock_stats_term(Eterm ** hpp,Uint * szp,erts_lcnt_lock_stats_t * stats,Eterm res)4917 static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
4918 unsigned int i;
4919 const char *file;
4920
4921 Eterm af, uil;
4922 Eterm uit, uic;
4923 Eterm uits, uitns, uitn;
4924 Eterm tt, tstat, tloc, t;
4925 Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
4926
4927 /* term:
4928 * [{{file, line},
4929 {tries, colls, {seconds, nanoseconds, n_blocks}},
4930 * { .. histogram .. }] */
4931
4932 file = stats->file ? stats->file : "undefined";
4933
4934 af = erts_atom_put((byte *)file, sys_strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
4935 uil = erts_bld_uint( hpp, szp, stats->line);
4936 tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
4937
4938 uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
4939 uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
4940
4941 uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
4942 uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
4943 uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
4944 tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
4945
4946 tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
4947
4948 for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
4949 vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
4950 }
4951
4952 thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
4953
4954 t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
4955 res = erts_bld_cons( hpp, szp, t, res);
4956
4957 return res;
4958 }
4959
lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t * info)4960 static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
4961 Eterm id = info->id;
4962
4963 if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_PROCLOCK) {
4964 /* Use registered names as id's for process locks if available. Thread
4965 * progress is delayed since we may be running on a dirty scheduler. */
4966 ErtsThrPrgrDelayHandle delay_handle;
4967 Process *process;
4968
4969 delay_handle = erts_thr_progress_unmanaged_delay();
4970
4971 process = erts_proc_lookup(info->id);
4972 if (process && process->common.u.alive.reg) {
4973 id = process->common.u.alive.reg->name;
4974 }
4975
4976 erts_thr_progress_unmanaged_continue(delay_handle);
4977 } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
4978 if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
4979 const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
4980 id = erts_atom_put((byte*)name, sys_strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
4981 }
4982 }
4983
4984 return id;
4985 }
4986
lcnt_build_lock_term(Eterm ** hpp,Uint * szp,lcnt_sample_t * sample,Eterm res)4987 static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
4988 erts_lcnt_lock_info_t *info = sample->info;
4989
4990 Eterm name, type, id, stats = NIL, t;
4991 const char *lock_desc;
4992 int i;
4993
4994 /* term: [{name, id, type, stats()}] */
4995
4996 ASSERT(info->name);
4997
4998 lock_desc = erts_lock_flags_get_type_name(info->flags);
4999
5000 type = erts_atom_put((byte*)lock_desc, sys_strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
5001 name = erts_atom_put((byte*)info->name, sys_strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
5002
5003 /* Only attempt to resolve ids when actually emitting the term. This ought
5004 * to be safe since all immediates are the same size. */
5005 if(hpp != NULL) {
5006 id = lcnt_pretty_print_lock_id(info);
5007 } else {
5008 id = NIL;
5009 }
5010
5011 for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
5012 stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
5013 }
5014
5015 t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
5016 res = erts_bld_cons(hpp, szp, t, res);
5017
5018 return res;
5019 }
5020
lcnt_build_result_term(Eterm ** hpp,Uint * szp,erts_lcnt_time_t * duration,lcnt_sample_vector_t * current_locks,lcnt_sample_vector_t * deleted_locks,Eterm res)5021 static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
5022 lcnt_sample_vector_t *current_locks,
5023 lcnt_sample_vector_t *deleted_locks, Eterm res) {
5024 const char *str_duration = "duration";
5025 const char *str_locks = "locks";
5026
5027 Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
5028 size_t i;
5029
5030 /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
5031
5032 /* duration tuple */
5033 dts = bld_unstable_uint64(hpp, szp, duration->s);
5034 dtns = bld_unstable_uint64(hpp, szp, duration->ns);
5035 tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
5036
5037 adur = erts_atom_put((byte *)str_duration, sys_strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
5038 tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
5039
5040 /* lock tuple */
5041 aloc = erts_atom_put((byte *)str_locks, sys_strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
5042
5043 for(i = 0; i < current_locks->size; i++) {
5044 lloc = lcnt_build_lock_term(hpp, szp, ¤t_locks->elements[i], lloc);
5045 }
5046
5047 for(i = 0; i < deleted_locks->size; i++) {
5048 lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
5049 }
5050
5051 tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
5052
5053 res = erts_bld_cons(hpp, szp, tloc, res);
5054 res = erts_bld_cons(hpp, szp, tdur, res);
5055
5056 return res;
5057 }
5058
5059 static struct {
5060 const char *name;
5061 erts_lock_flags_t flag;
5062 } lcnt_category_map[] = {
5063 {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
5064 {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
5065 {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
5066 {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
5067 {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
5068 {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
5069 {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
5070 {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
5071 {NULL, 0}
5072 };
5073
lcnt_atom_to_lock_category(Eterm atom)5074 static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
5075 int i = 0;
5076
5077 for(i = 0; lcnt_category_map[i].name != NULL; i++) {
5078 if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
5079 return lcnt_category_map[i].flag;
5080 }
5081 }
5082
5083 return 0;
5084 }
5085
lcnt_build_category_list(Eterm ** hpp,Uint * szp,erts_lock_flags_t mask)5086 static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
5087 Eterm res;
5088 int i;
5089
5090 res = NIL;
5091
5092 for(i = 0; lcnt_category_map[i].name != NULL; i++) {
5093 if(mask & lcnt_category_map[i].flag) {
5094 Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
5095 sys_strlen(lcnt_category_map[i].name),
5096 ERTS_ATOM_ENC_UTF8, 0);
5097
5098 res = erts_bld_cons(hpp, szp, category, res);
5099 }
5100 }
5101
5102 return res;
5103 }
5104
5105 #endif
5106
erts_debug_lcnt_clear_0(BIF_ALIST_0)5107 BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
5108 {
5109 #ifndef ERTS_ENABLE_LOCK_COUNT
5110 BIF_RET(am_error);
5111 #else
5112 erts_lcnt_clear_counters();
5113
5114 BIF_RET(am_ok);
5115 #endif
5116 }
5117
erts_debug_lcnt_collect_0(BIF_ALIST_0)5118 BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
5119 {
5120 #ifndef ERTS_ENABLE_LOCK_COUNT
5121 BIF_RET(am_error);
5122 #else
5123 lcnt_sample_vector_t current_locks, deleted_locks;
5124 erts_lcnt_data_t data;
5125
5126 Eterm *term_heap_start, *term_heap_end;
5127 Uint term_heap_size = 0;
5128 Eterm result;
5129
5130 data = erts_lcnt_get_data();
5131
5132 current_locks = lcnt_build_sample_vector(data.current_locks);
5133 deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
5134
5135 lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
5136 ¤t_locks, &deleted_locks, NIL);
5137
5138 term_heap_start = HAlloc(BIF_P, term_heap_size);
5139 term_heap_end = term_heap_start;
5140
5141 result = lcnt_build_result_term(&term_heap_end, NULL,
5142 &data.duration, ¤t_locks, &deleted_locks, NIL);
5143
5144 HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
5145
5146 lcnt_destroy_sample_vector(¤t_locks);
5147 lcnt_destroy_sample_vector(&deleted_locks);
5148
5149 BIF_RET(result);
5150 #endif
5151 }
5152
erts_debug_lcnt_control_1(BIF_ALIST_1)5153 BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
5154 {
5155 #ifdef ERTS_ENABLE_LOCK_COUNT
5156 if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
5157 erts_lock_flags_t mask;
5158 Eterm *term_heap_block;
5159 Uint term_heap_size;
5160
5161 mask = erts_lcnt_get_category_mask();
5162 term_heap_size = 0;
5163
5164 lcnt_build_category_list(NULL, &term_heap_size, mask);
5165
5166 term_heap_block = HAlloc(BIF_P, term_heap_size);
5167
5168 BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
5169 } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
5170 if(erts_lcnt_get_preserve_info()) {
5171 BIF_RET(am_true);
5172 }
5173
5174 BIF_RET(am_false);
5175 }
5176 #endif
5177 BIF_ERROR(BIF_P, BADARG);
5178 }
5179
erts_debug_lcnt_control_2(BIF_ALIST_2)5180 BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
5181 {
5182 #ifdef ERTS_ENABLE_LOCK_COUNT
5183 if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
5184 erts_lock_flags_t category_mask = 0;
5185 Eterm categories = BIF_ARG_2;
5186
5187 if(!(is_list(categories) || is_nil(categories))) {
5188 BIF_ERROR(BIF_P, BADARG);
5189 }
5190
5191 while(is_list(categories)) {
5192 Eterm *cell = list_val(categories);
5193 erts_lock_flags_t category;
5194
5195 category = lcnt_atom_to_lock_category(CAR(cell));
5196
5197 if(!category) {
5198 Eterm *hp = HAlloc(BIF_P, 4);
5199
5200 BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
5201 }
5202
5203 category_mask |= category;
5204 categories = CDR(cell);
5205 }
5206
5207 erts_lcnt_set_category_mask(category_mask);
5208
5209 BIF_RET(am_ok);
5210 } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
5211 int enabled = (BIF_ARG_2 == am_true);
5212
5213 if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
5214 erts_lcnt_set_preserve_info(enabled);
5215
5216 BIF_RET(am_ok);
5217 }
5218 }
5219 #endif
5220 BIF_ERROR(BIF_P, BADARG);
5221 }
5222
os_info_init(void)5223 static void os_info_init(void)
5224 {
5225 Eterm type = erts_atom_put((byte *) os_type, sys_strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1);
5226 Eterm flav;
5227 int major, minor, build;
5228 char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
5229 Eterm* hp;
5230
5231 os_flavor(buf, 1024);
5232 flav = erts_atom_put((byte *) buf, sys_strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
5233 erts_free(ERTS_ALC_T_TMP, (void *) buf);
5234 hp = erts_alloc(ERTS_ALC_T_LITERAL, (3+4)*sizeof(Eterm));
5235 os_type_tuple = TUPLE2(hp, type, flav);
5236 erts_set_literal_tag(&os_type_tuple, hp, 3);
5237
5238 hp += 3;
5239 os_version(&major, &minor, &build);
5240 os_version_tuple = TUPLE3(hp,
5241 make_small(major),
5242 make_small(minor),
5243 make_small(build));
5244 erts_set_literal_tag(&os_version_tuple, hp, 4);
5245 }
5246
5247 void
erts_bif_info_init(void)5248 erts_bif_info_init(void)
5249 {
5250 erts_atomic_init_nob(&available_internal_state, 0);
5251 erts_atomic_init_nob(&hipe_test_reschedule_flag, 0);
5252
5253 alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
5254 alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
5255 gather_sched_wall_time_res_trap
5256 = erts_export_put(am_erts_internal, am_gather_sched_wall_time_result, 1);
5257 gather_gc_info_res_trap
5258 = erts_export_put(am_erlang, am_gather_gc_info_result, 1);
5259 gather_io_bytes_trap
5260 = erts_export_put(am_erts_internal, am_gather_io_bytes, 2);
5261 gather_msacc_res_trap
5262 = erts_export_put(am_erts_internal, am_gather_microstate_accounting_result, 2);
5263 gather_system_check_res_trap
5264 = erts_export_put(am_erts_internal, am_gather_system_check_result, 1);
5265
5266 is_process_alive_trap = erts_export_put(am_erts_internal, am_is_process_alive, 1);
5267
5268
5269 process_info_init();
5270 os_info_init();
5271 }
5272