1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 2009-2020. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 /* Erlang Native InterFace
21  */
22 
23 /*
24  * Environment contains a pointer to currently executing process.
25  * In the dirty case this pointer do however not point to the
26  * actual process structure of the executing process, but instead
27  * a "shadow process structure". This in order to be able to handle
28  * heap allocation without the need to acquire the main lock on
29  * the process.
30  *
31  * The dirty process is allowed to allocate on the heap without
32  * the main lock, i.e., incrementing htop, but is not allowed to
33  * modify mbuf, offheap, etc without the main lock. The dirty
34  * process moves mbuf list and offheap list of the shadow process
35  * structure into the real structure when the dirty nif call
36  * completes.
37  */
38 
39 
40 #ifdef HAVE_CONFIG_H
41 #  include "config.h"
42 #endif
43 
44 #include "erl_nif.h"
45 
46 #include "sys.h"
47 #include "global.h"
48 #include "erl_binary.h"
49 #include "bif.h"
50 #include "error.h"
51 #include "big.h"
52 #include "erl_map.h"
53 #include "beam_bp.h"
54 #include "erl_thr_progress.h"
55 #include "dtrace-wrapper.h"
56 #include "erl_process.h"
57 #include "erl_bif_unique.h"
58 #include "erl_utils.h"
59 #include "erl_io_queue.h"
60 #include "erl_proc_sig_queue.h"
61 #undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
62 #define ERTS_WANT_NFUNC_SCHED_INTERNALS__
63 #include "erl_nfunc_sched.h"
64 #if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
65 #define HAVE_USE_DTRACE 1
66 #endif
67 
68 #include <limits.h>
69 #include <stddef.h> /* offsetof */
70 
71 /* Information about a loaded nif library.
72  * Each successful call to erlang:load_nif will allocate an instance of
73  * erl_module_nif. Two calls opening the same library will thus have the same
74  * 'handle'.
75  */
76 struct erl_module_nif {
77     void* priv_data;
78     void* handle;             /* "dlopen" */
79     struct enif_entry_t entry;
80     erts_refc_t rt_cnt;       /* number of resource types */
81     erts_refc_t rt_dtor_cnt;  /* number of resource types with destructors */
82     Module* mod;           /* Can be NULL if orphan with dtor-resources left */
83 
84     ErlNifFunc _funcs_copy_[1];  /* only used for old libs */
85 };
86 
87 typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
88 
89 #ifdef DEBUG
90 #  define READONLY_CHECK
91 #  define ERTS_DBG_NIF_NOT_SCHED_MARKER ((void *) (UWord) 1)
92 #endif
93 #ifdef READONLY_CHECK
94 #  define ADD_READONLY_CHECK(ENV,PTR,SIZE) add_readonly_check(ENV,PTR,SIZE)
95 static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
96 #else
97 #  define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0)
98 #endif
99 
100 #ifdef ERTS_NIF_ASSERT_IN_ENV
101 #  define ASSERT_IN_ENV(ENV, TERM, NR, TYPE) dbg_assert_in_env(ENV, TERM, NR, TYPE, __func__)
102 static void dbg_assert_in_env(ErlNifEnv*, Eterm term, int nr, const char* type, const char* func);
103 #  include "erl_gc.h"
104 #else
105 #  define ASSERT_IN_ENV(ENV, TERM, NR, TYPE)
106 #endif
107 
108 #ifdef DEBUG
109 static int is_offheap(const ErlOffHeap* off_heap);
110 #endif
111 
112 #ifdef USE_VM_PROBES
113 void dtrace_nifenv_str(ErlNifEnv *, char *);
114 #endif
115 
116 #define MIN_HEAP_FRAG_SZ 200
117 static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp);
118 
119 static ERTS_INLINE int
is_scheduler(void)120 is_scheduler(void)
121 {
122     ErtsSchedulerData *esdp = erts_get_scheduler_data();
123     if (!esdp)
124 	return 0;
125     if (ERTS_SCHEDULER_IS_DIRTY(esdp))
126 	return -1;
127     return 1;
128 }
129 
130 static ERTS_INLINE void
execution_state(ErlNifEnv * env,Process ** c_pp,int * schedp)131 execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
132 {
133     if (schedp)
134 	*schedp = is_scheduler();
135     if (c_pp) {
136 	if (!env || env->proc->common.id == ERTS_INVALID_PID)
137 	    *c_pp = NULL;
138 	else {
139 	    Process *c_p = env->proc;
140 
141 	    if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) {
142 		ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
143 				   & ERTS_PROC_LOCK_MAIN);
144 	    }
145 	    else {
146 		c_p = env->proc->next;
147 		ASSERT(is_scheduler() < 0);
148 		ASSERT(c_p && env->proc->common.id == c_p->common.id);
149 	    }
150 
151 	    *c_pp = c_p;
152 
153 	    ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC));
154 	}
155     }
156 }
157 
alloc_heap(ErlNifEnv * env,size_t need)158 static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, size_t need)
159 {
160     Eterm* hp = env->hp;
161     env->hp += need;
162     if (env->hp <= env->hp_end) {
163 	return hp;
164     }
165     return alloc_heap_heavy(env, need, hp);
166 }
167 
alloc_heap_heavy(ErlNifEnv * env,size_t need,Eterm * hp)168 static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp)
169 {
170     env->hp = hp;
171     if (env->heap_frag == NULL) {
172 	ASSERT(HEAP_LIMIT(env->proc) == env->hp_end);
173         ASSERT(env->hp + need > env->hp_end);
174 	HEAP_TOP(env->proc) = env->hp;
175     }
176     else {
177 	Uint usz = env->hp - env->heap_frag->mem;
178 	env->proc->mbuf_sz += usz - env->heap_frag->used_size;
179 	env->heap_frag->used_size = usz;
180 	ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
181     }
182     hp = erts_heap_alloc(env->proc, need, MIN_HEAP_FRAG_SZ);
183     env->heap_frag = MBUF(env->proc);
184     env->hp = hp + need;
185     env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
186 
187     return hp;
188 }
189 
190 #if SIZEOF_LONG != ERTS_SIZEOF_ETERM
ensure_heap(ErlNifEnv * env,size_t may_need)191 static ERTS_INLINE void ensure_heap(ErlNifEnv* env, size_t may_need)
192 {
193     if (env->hp + may_need > env->hp_end) {
194 	alloc_heap_heavy(env, may_need, env->hp);
195 	env->hp -= may_need;
196     }
197 }
198 #endif
199 
erts_pre_nif(ErlNifEnv * env,Process * p,struct erl_module_nif * mod_nif,Process * tracee)200 void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
201                   Process* tracee)
202 {
203     env->mod_nif = mod_nif;
204     env->proc = p;
205     env->hp = HEAP_TOP(p);
206     env->hp_end = HEAP_LIMIT(p);
207     env->heap_frag = NULL;
208     env->fpe_was_unmasked = erts_block_fpe();
209     env->tmp_obj_list = NULL;
210     env->exception_thrown = 0;
211     env->tracee = tracee;
212 
213     ASSERT(p->common.id != ERTS_INVALID_PID);
214 
215 #ifdef ERTS_NIF_ASSERT_IN_ENV
216     env->dbg_disable_assert_in_env = 0;
217 #endif
218 #if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS)
219     {
220 	ErtsSchedulerData *esdp = erts_get_scheduler_data();
221 	ASSERT(esdp);
222 
223 	if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
224 	    erts_aint32_t state = erts_atomic32_read_nob(&p->state);
225 
226 	    ASSERT(p->scheduler_data == esdp);
227 	    ASSERT((state & (ERTS_PSFLG_RUNNING
228 			     | ERTS_PSFLG_RUNNING_SYS))
229 		   && !(state & (ERTS_PSFLG_DIRTY_RUNNING
230 				 | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
231 	}
232     }
233 #endif
234 }
235 
236 static void full_cache_env(ErlNifEnv *env);
237 static void cache_env(ErlNifEnv* env);
238 static void full_flush_env(ErlNifEnv *env);
239 static void flush_env(ErlNifEnv* env);
240 
241 /* Temporary object header, auto-deallocated when NIF returns or when
242  * independent environment is cleared.
243  *
244  * The payload can be accessed with &tmp_obj_ptr[1] but keep in mind that its
245  * first element must not require greater alignment than `next`. */
246 struct enif_tmp_obj_t {
247     struct enif_tmp_obj_t* next;
248     void (*dtor)(struct enif_tmp_obj_t*);
249     ErtsAlcType_t allocator;
250     /*char data[];*/
251 };
252 
free_tmp_objs(ErlNifEnv * env)253 static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
254 {
255     while (env->tmp_obj_list != NULL) {
256 	struct enif_tmp_obj_t* free_me = env->tmp_obj_list;
257 	env->tmp_obj_list = free_me->next;
258 	free_me->dtor(free_me);
259     }
260 }
261 
262 /* Whether the given environment is bound to a process and will be cleaned up
263  * when the NIF returns. It's safe to use temp_alloc for objects in
264  * env->tmp_obj_list when this is true. */
is_proc_bound(ErlNifEnv * env)265 static ERTS_INLINE int is_proc_bound(ErlNifEnv *env)
266 {
267     return env->mod_nif != NULL;
268 }
269 
270 /* Allocates and attaches an object to the given environment, running its
271  * destructor when the environment is cleared. To avoid temporary variables the
272  * address of the allocated object is returned instead of the enif_tmp_obj_t.
273  *
274  * The destructor *must* call `erts_free(tmp_obj->allocator, tmp_obj)` to free
275  * the object. If the destructor needs to refer to the allocated object its
276  * address will be &tmp_obj[1]. */
alloc_tmp_obj(ErlNifEnv * env,size_t size,void (* dtor)(struct enif_tmp_obj_t *))277 static ERTS_INLINE void *alloc_tmp_obj(ErlNifEnv *env, size_t size,
278                                        void (*dtor)(struct enif_tmp_obj_t*)) {
279     struct enif_tmp_obj_t *tmp_obj;
280     ErtsAlcType_t allocator;
281 
282     allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
283 
284     tmp_obj = erts_alloc(allocator, sizeof(struct enif_tmp_obj_t) + MAX(1, size));
285 
286     tmp_obj->next = env->tmp_obj_list;
287     tmp_obj->allocator = allocator;
288     tmp_obj->dtor = dtor;
289 
290     env->tmp_obj_list = tmp_obj;
291 
292     return (void*)&tmp_obj[1];
293 }
294 
295 /* Generic destructor for objects allocated through alloc_tmp_obj that don't
296  * care about their payload. */
tmp_alloc_dtor(struct enif_tmp_obj_t * tmp_obj)297 static void tmp_alloc_dtor(struct enif_tmp_obj_t *tmp_obj)
298 {
299     erts_free(tmp_obj->allocator, tmp_obj);
300 }
301 
erts_post_nif(ErlNifEnv * env)302 void erts_post_nif(ErlNifEnv* env)
303 {
304     erts_unblock_fpe(env->fpe_was_unmasked);
305     full_flush_env(env);
306     free_tmp_objs(env);
307     env->exiting = ERTS_PROC_IS_EXITING(env->proc);
308 }
309 
310 
311 /*
312  * Initialize a NifExport struct. Create it if needed and store it in the
313  * proc. The direct_fp function is what will be invoked by op_call_nif, and
314  * the indirect_fp function, if not NULL, is what the direct_fp function
315  * will call. If the allocated NifExport isn't enough to hold all of argv,
316  * allocate a larger one. Save 'current' and registers if first time this
317  * call is scheduled.
318  */
319 
320 static ERTS_INLINE ERL_NIF_TERM
schedule(ErlNifEnv * env,NativeFunPtr direct_fp,NativeFunPtr indirect_fp,Eterm mod,Eterm func_name,int argc,const ERL_NIF_TERM argv[])321 schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
322 	 Eterm mod, Eterm func_name, int argc, const ERL_NIF_TERM argv[])
323 {
324     NifExport *ep;
325     Process *c_p, *dirty_shadow_proc;
326 
327     execution_state(env, &c_p, NULL);
328     if (c_p == env->proc)
329 	dirty_shadow_proc = NULL;
330     else
331 	dirty_shadow_proc = env->proc;
332 
333     ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
334 
335     ep = erts_nif_export_schedule(c_p, dirty_shadow_proc,
336 				  c_p->current,
337 				  c_p->cp,
338 				  BeamOpCodeAddr(op_call_nif),
339 				  direct_fp, indirect_fp,
340 				  mod, func_name,
341 				  argc, (const Eterm *) argv);
342     if (!ep->m) {
343 	/* First time this call is scheduled... */
344 	erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
345 	ep->m = env->mod_nif;
346     }
347     return (ERL_NIF_TERM) THE_NON_VALUE;
348 }
349 
350 
351 static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
352 static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
353 
354 int
erts_call_dirty_nif(ErtsSchedulerData * esdp,Process * c_p,BeamInstr * I,Eterm * reg)355 erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
356 {
357     int exiting;
358     ERL_NIF_TERM *argv = (ERL_NIF_TERM *) reg;
359     NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
360     ErtsCodeMFA *codemfa = erts_code_to_codemfa(I);
361     NativeFunPtr dirty_nif = (NativeFunPtr) I[1];
362     ErlNifEnv env;
363     ERL_NIF_TERM result;
364 #ifdef DEBUG
365     erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
366 
367     ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
368 
369     ASSERT(!c_p->scheduler_data);
370     ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
371 	&& !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
372     ASSERT(esdp);
373 
374     nep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
375 #endif
376 
377     erts_pre_nif(&env, c_p, nep->m, NULL);
378 
379     env.proc = erts_make_dirty_shadow_proc(esdp, c_p);
380 
381     env.proc->freason = EXC_NULL;
382     env.proc->fvalue = NIL;
383     env.proc->ftrace = NIL;
384     env.proc->i = c_p->i;
385 
386     ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
387 
388     erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
389 						   | ERTS_PSFLG_DIRTY_IO_PROC));
390 
391     ASSERT(esdp->current_nif == NULL);
392     esdp->current_nif = &env;
393 
394     erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
395 
396     result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
397 
398     erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
399 
400     ASSERT(esdp->current_nif == &env);
401     esdp->current_nif = NULL;
402 
403     ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
404     ASSERT(env.proc->next == c_p);
405 
406     exiting = ERTS_PROC_IS_EXITING(c_p);
407 
408     if (!exiting) {
409 	if (env.exception_thrown) {
410 	schedule_exception:
411 	    schedule(&env, dirty_nif_exception, NULL,
412 		     am_erts_internal, am_dirty_nif_exception,
413 		     1, &env.proc->fvalue);
414 	}
415 	else if (is_value(result)) {
416 	    schedule(&env, dirty_nif_finalizer, NULL,
417 		     am_erts_internal, am_dirty_nif_finalizer,
418 		     1, &result);
419 	}
420 	else if (env.proc->freason != TRAP) { /* user returned garbage... */
421 	    ERTS_DECL_AM(badreturn);
422 	    (void) enif_raise_exception(&env, AM_badreturn);
423 	    goto schedule_exception;
424 	}
425 	else {
426 	    /* Rescheduled by dirty NIF call... */
427 	    ASSERT(nep->func != ERTS_DBG_NIF_NOT_SCHED_MARKER);
428 	}
429 	c_p->i = env.proc->i;
430 	c_p->arity = env.proc->arity;
431     }
432 
433 #ifdef DEBUG
434     if (nep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
435 	nep->func = NULL;
436 #endif
437 
438     erts_unblock_fpe(env.fpe_was_unmasked);
439     full_flush_env(&env);
440     free_tmp_objs(&env);
441 
442     return exiting;
443 }
444 
445 
full_flush_env(ErlNifEnv * env)446 static void full_flush_env(ErlNifEnv* env)
447 {
448     flush_env(env);
449     if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
450 	/* Dirty nif call using shadow process struct */
451 	erts_flush_dirty_shadow_proc(env->proc);
452 }
453 
full_cache_env(ErlNifEnv * env)454 static void full_cache_env(ErlNifEnv* env)
455 {
456     if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
457 	erts_cache_dirty_shadow_proc(env->proc);
458         /*
459          * If shadow proc had heap fragments when flushed
460          * those have now been moved to the real proc.
461          * Ensure heap pointers do not point into a heap
462          * fragment on real proc...
463          */
464         ASSERT(!env->proc->mbuf);
465 	env->hp_end = HEAP_LIMIT(env->proc);
466 	env->hp = HEAP_TOP(env->proc);
467     }
468     cache_env(env);
469 }
470 
471 /* Flush out our cached heap pointers to allow an ordinary HAlloc
472 */
flush_env(ErlNifEnv * env)473 static void flush_env(ErlNifEnv* env)
474 {
475     if (env->heap_frag == NULL) {
476 	ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
477 	ASSERT(env->hp >= HEAP_TOP(env->proc));
478 	ASSERT(env->hp <= HEAP_LIMIT(env->proc));
479 	HEAP_TOP(env->proc) = env->hp;
480     }
481     else {
482 	Uint usz;
483 	ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
484 	ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
485 	usz = env->hp - env->heap_frag->mem;
486 	env->proc->mbuf_sz += usz - env->heap_frag->used_size;
487 	env->heap_frag->used_size = usz;
488 	ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
489     }
490 }
491 
492 /* Restore cached heap pointers to allow alloc_heap again.
493 */
cache_env(ErlNifEnv * env)494 static void cache_env(ErlNifEnv* env)
495 {
496     env->heap_frag = MBUF(env->proc);
497     if (env->heap_frag == NULL) {
498 	ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
499 	ASSERT(env->hp <= HEAP_TOP(env->proc));
500 	ASSERT(env->hp <= HEAP_LIMIT(env->proc));
501 	env->hp = HEAP_TOP(env->proc);
502     }
503     else {
504 	env->hp = env->heap_frag->mem + env->heap_frag->used_size;
505 	env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
506     }
507 }
508 
enif_priv_data(ErlNifEnv * env)509 void* enif_priv_data(ErlNifEnv* env)
510 {
511     return env->mod_nif->priv_data;
512 }
513 
enif_alloc(size_t size)514 void* enif_alloc(size_t size)
515 {
516     return erts_alloc_fnf(ERTS_ALC_T_NIF, (Uint) size);
517 }
518 
enif_realloc(void * ptr,size_t size)519 void* enif_realloc(void* ptr, size_t size)
520 {
521     return erts_realloc_fnf(ERTS_ALC_T_NIF, ptr, size);
522 }
523 
enif_free(void * ptr)524 void enif_free(void* ptr)
525 {
526     erts_free(ERTS_ALC_T_NIF, ptr);
527 }
528 
529 struct enif_msg_environment_t
530 {
531     ErlNifEnv env;
532     Process phony_proc;
533 };
534 
535 static ERTS_INLINE void
setup_nif_env(struct enif_msg_environment_t * msg_env,struct erl_module_nif * mod,Process * tracee)536 setup_nif_env(struct enif_msg_environment_t* msg_env,
537               struct erl_module_nif* mod,
538               Process* tracee)
539 {
540     Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */
541 
542     msg_env->env.hp = phony_heap;
543     msg_env->env.hp_end = phony_heap;
544     msg_env->env.heap_frag = NULL;
545     msg_env->env.mod_nif = mod;
546     msg_env->env.tmp_obj_list = NULL;
547     msg_env->env.proc = &msg_env->phony_proc;
548     msg_env->env.exception_thrown = 0;
549     sys_memset(&msg_env->phony_proc, 0, sizeof(Process));
550     HEAP_START(&msg_env->phony_proc) = phony_heap;
551     HEAP_TOP(&msg_env->phony_proc) = phony_heap;
552     HEAP_LIMIT(&msg_env->phony_proc) = phony_heap;
553     HEAP_END(&msg_env->phony_proc) = phony_heap;
554     MBUF(&msg_env->phony_proc) = NULL;
555     msg_env->phony_proc.common.id = ERTS_INVALID_PID;
556     msg_env->env.tracee = tracee;
557 
558 #ifdef FORCE_HEAP_FRAGS
559     msg_env->phony_proc.space_verified = 0;
560     msg_env->phony_proc.space_verified_from = NULL;
561 #endif
562 #ifdef ERTS_NIF_ASSERT_IN_ENV
563     msg_env->env.dbg_disable_assert_in_env = 0;
564 #endif
565 }
566 
enif_alloc_env(void)567 ErlNifEnv* enif_alloc_env(void)
568 {
569     struct enif_msg_environment_t* msg_env =
570 	erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
571     setup_nif_env(msg_env, NULL, NULL);
572     return &msg_env->env;
573 }
enif_free_env(ErlNifEnv * env)574 void enif_free_env(ErlNifEnv* env)
575 {
576     enif_clear_env(env);
577     erts_free(ERTS_ALC_T_NIF, env);
578 }
579 
pre_nif_noproc(struct enif_msg_environment_t * msg_env,struct erl_module_nif * mod,Process * tracee)580 static ERTS_INLINE void pre_nif_noproc(struct enif_msg_environment_t* msg_env,
581                                        struct erl_module_nif* mod,
582                                        Process* tracee)
583 {
584     setup_nif_env(msg_env, mod, tracee);
585     msg_env->env.fpe_was_unmasked = erts_block_fpe();
586 }
587 
post_nif_noproc(struct enif_msg_environment_t * msg_env)588 static ERTS_INLINE void post_nif_noproc(struct enif_msg_environment_t* msg_env)
589 {
590     erts_unblock_fpe(msg_env->env.fpe_was_unmasked);
591     enif_clear_env(&msg_env->env);
592 }
593 
clear_offheap(ErlOffHeap * oh)594 static ERTS_INLINE void clear_offheap(ErlOffHeap* oh)
595 {
596     oh->first = NULL;
597     oh->overhead = 0;
598 }
599 
enif_clear_env(ErlNifEnv * env)600 void enif_clear_env(ErlNifEnv* env)
601 {
602     struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env;
603     Process* p = &menv->phony_proc;
604     ASSERT(p == menv->env.proc);
605     ASSERT(p->common.id == ERTS_INVALID_PID);
606     ASSERT(MBUF(p) == menv->env.heap_frag);
607 
608     free_tmp_objs(env);
609 
610     if (MBUF(p) != NULL) {
611 	erts_cleanup_offheap(&MSO(p));
612 	clear_offheap(&MSO(p));
613 	free_message_buffer(MBUF(p));
614 	MBUF(p) = NULL;
615 	menv->env.heap_frag = NULL;
616     }
617     ASSERT(HEAP_TOP(p) == HEAP_END(p));
618     menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
619 
620     ASSERT(!is_offheap(&MSO(p)));
621 }
622 
623 #ifdef DEBUG
624 static int enif_send_delay = 0;
625 #define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 32 == 0)
626 #else
627 #ifdef ERTS_PROC_LOCK_OWN_IMPL
628 #define ERTS_FORCE_ENIF_SEND_DELAY() 0
629 #else
630 /*
631  * We always schedule messages if we do not use our own
632  * process lock implementation, as if we try to do a trylock on
633  * a lock that might already be locked by the same thread.
634  * And what happens then with different mutex implementations
635  * is not always guaranteed.
636  */
637 #define ERTS_FORCE_ENIF_SEND_DELAY() 1
638 #endif
639 #endif
640 
erts_flush_trace_messages(Process * c_p,ErtsProcLocks c_p_locks)641 int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
642 {
643     ErlTraceMessageQueue *msgq, **last_msgq;
644     int reds = 0;
645 
646     /* Only one thread at a time is allowed to flush trace messages,
647        so we require the main lock to be held when doing the flush */
648     ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
649 
650     erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
651 
652     msgq = c_p->trace_msg_q;
653 
654     if (!msgq)
655         goto error;
656 
657     do {
658         Process* rp;
659         ErtsProcLocks rp_locks;
660         ErtsMessage *first, **last;
661         Uint len;
662 
663         first = msgq->first;
664         last = msgq->last;
665         len = msgq->len;
666         msgq->first = NULL;
667         msgq->last = &msgq->first;
668         msgq->len = 0;
669         erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
670 
671         ASSERT(len != 0);
672 
673         rp = erts_proc_lookup(msgq->receiver);
674         if (rp) {
675             rp_locks = 0;
676             if (rp->common.id == c_p->common.id)
677                 rp_locks = c_p_locks;
678             erts_queue_proc_messages(c_p, rp, rp_locks, first, last, len);
679             if (rp->common.id == c_p->common.id)
680                 rp_locks &= ~c_p_locks;
681             if (rp_locks)
682                 erts_proc_unlock(rp, rp_locks);
683             reds += len;
684         } else {
685             erts_cleanup_messages(first);
686         }
687         reds += 1;
688         erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
689         msgq = msgq->next;
690     } while (msgq);
691 
692     last_msgq = &c_p->trace_msg_q;
693 
694     while (*last_msgq) {
695         msgq = *last_msgq;
696         if (msgq->len == 0) {
697             *last_msgq = msgq->next;
698             erts_free(ERTS_ALC_T_TRACE_MSG_QUEUE, msgq);
699         } else {
700             last_msgq = &msgq->next;
701         }
702     }
703 
704 error:
705     erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
706 
707     return reds;
708 }
709 
710 /** @brief Create a message with the content of process independent \c msg_env.
711  *  Invalidates \c msg_env.
712  */
erts_create_message_from_nif_env(ErlNifEnv * msg_env)713 ErtsMessage* erts_create_message_from_nif_env(ErlNifEnv* msg_env)
714 {
715     struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
716     ErtsMessage* mp;
717 
718     flush_env(msg_env);
719     mp = erts_alloc_message(0, NULL);
720     mp->data.heap_frag = menv->env.heap_frag;
721     ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc));
722     if (mp->data.heap_frag != NULL) {
723         /* Move all offheap's from phony proc to the first fragment.
724            Quick and dirty... */
725         ASSERT(!is_offheap(&mp->data.heap_frag->off_heap));
726         mp->data.heap_frag->off_heap = MSO(&menv->phony_proc);
727         clear_offheap(&MSO(&menv->phony_proc));
728         menv->env.heap_frag = NULL;
729         MBUF(&menv->phony_proc) = NULL;
730     }
731     return mp;
732 }
733 
make_copy(ErlNifEnv * dst_env,ERL_NIF_TERM src_term,Uint * cpy_szp)734 static ERTS_INLINE ERL_NIF_TERM make_copy(ErlNifEnv* dst_env,
735                                           ERL_NIF_TERM src_term,
736                                           Uint *cpy_szp)
737 {
738     Uint sz;
739     Eterm* hp;
740     /*
741      * No preserved sharing allowed as long as literals are also preserved.
742      * Process independent environment can not be reached by purge.
743      */
744     sz = size_object(src_term);
745     if (cpy_szp)
746         *cpy_szp += sz;
747     hp = alloc_heap(dst_env, sz);
748     return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
749 }
750 
enif_send(ErlNifEnv * env,const ErlNifPid * to_pid,ErlNifEnv * msg_env,ERL_NIF_TERM msg)751 int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
752 	      ErlNifEnv* msg_env, ERL_NIF_TERM msg)
753 {
754     struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
755     ErtsProcLocks rp_locks = 0;
756     ErtsProcLocks lc_locks = 0;
757     Process* rp;
758     Process* c_p;
759     ErtsMessage *mp;
760     Eterm from;
761     Eterm receiver = to_pid->pid;
762     int scheduler;
763     Uint copy_sz = 0;
764 
765     execution_state(env, &c_p, &scheduler);
766 
767 
768     if (scheduler > 0) { /* Normal scheduler */
769 	rp = erts_proc_lookup(receiver);
770 	if (!rp)
771 	    return 0;
772     }
773     else {
774 	if (c_p) {
775 	    ASSERT(scheduler < 0); /* Dirty scheduler */
776 	    if (ERTS_PROC_IS_EXITING(c_p))
777 		return 0;
778 
779 	    if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
780 		erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
781 	    }
782 	}
783 
784 	rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
785 			       receiver, rp_locks,
786 			       ERTS_P2P_FLG_INC_REFC);
787 	if (!rp) {
788 	    if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
789 		erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
790 	    return 0;
791 	}
792     }
793 
794     if (c_p == rp)
795 	rp_locks = ERTS_PROC_LOCK_MAIN;
796 
797     if (menv) {
798         Eterm token = c_p ? SEQ_TRACE_TOKEN(c_p) : am_undefined;
799         if (token != NIL && token != am_undefined) {
800             /* This code is copied from erts_send_message */
801             Eterm stoken = SEQ_TRACE_TOKEN(c_p);
802 #ifdef USE_VM_PROBES
803             DTRACE_CHARBUF(sender_name, 64);
804             DTRACE_CHARBUF(receiver_name, 64);
805             Sint tok_label = 0;
806             Sint tok_lastcnt = 0;
807             Sint tok_serial = 0;
808             Eterm utag = NIL;
809             *sender_name = *receiver_name = '\0';
810             if (DTRACE_ENABLED(message_send)) {
811                 erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
812                               "%T", c_p->common.id);
813                 erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
814                               "%T", rp->common.id);
815             }
816 #endif
817             if (have_seqtrace(stoken)) {
818                 seq_trace_update_send(c_p);
819                 seq_trace_output(stoken, msg, SEQ_TRACE_SEND,
820                                  rp->common.id, c_p);
821             }
822 #ifdef USE_VM_PROBES
823             if (!(DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)) {
824                 stoken = NIL;
825             }
826 #endif
827             token = make_copy(msg_env, stoken, &copy_sz);
828 
829 #ifdef USE_VM_PROBES
830             if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING) {
831                 if (is_immed(DT_UTAG(c_p)))
832                     utag = DT_UTAG(c_p);
833                 else
834                     utag = make_copy(msg_env, DT_UTAG(c_p), &copy_sz);
835             }
836             if (DTRACE_ENABLED(message_send)) {
837                 if (have_seqtrace(stoken)) {
838                     tok_label = SEQ_TRACE_T_DTRACE_LABEL(stoken);
839                     tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
840                     tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
841                 }
842                 DTRACE6(message_send, sender_name, receiver_name,
843                         size_object(msg), tok_label, tok_lastcnt, tok_serial);
844             }
845 #endif
846         }
847         mp = erts_create_message_from_nif_env(msg_env);
848         ERL_MESSAGE_TOKEN(mp) = token;
849     } else {
850         erts_literal_area_t litarea;
851 	ErlOffHeap *ohp;
852         Eterm *hp;
853         Uint sz;
854         INITIALIZE_LITERAL_PURGE_AREA(litarea);
855         sz = size_object_litopt(msg, &litarea);
856         copy_sz += sz;
857 	if (c_p && !env->tracee) {
858 	    full_flush_env(env);
859 	    mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
860 	    full_cache_env(env);
861 	}
862 	else {
863 	    erts_aint_t state = erts_atomic32_read_nob(&rp->state);
864 	    if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
865 		mp = erts_alloc_message(sz, &hp);
866 		ohp = sz == 0 ? NULL : &mp->hfrag.off_heap;
867 	    }
868 	    else {
869 		ErlHeapFragment *bp = new_message_buffer(sz);
870 		mp = erts_alloc_message(0, NULL);
871 		mp->data.heap_frag = bp;
872 		hp = bp->mem;
873 		ohp = &bp->off_heap;
874 	    }
875 	}
876         ERL_MESSAGE_TOKEN(mp) = am_undefined;
877         msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea);
878     }
879 
880     from = c_p ? c_p->common.id : am_undefined;
881 
882     if (!env || !env->tracee) {
883         /* This clause is taken when enif_send is called in a nif
884            that is not a erl_tracer nif. */
885 
886         if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) {
887 	    full_flush_env(env);
888             trace_send(c_p, receiver, msg);
889 	    full_cache_env(env);
890 	}
891         if (c_p && scheduler > 0 && copy_sz > ERTS_MSG_COPY_WORDS_PER_REDUCTION) {
892             Uint reds = copy_sz / ERTS_MSG_COPY_WORDS_PER_REDUCTION;
893             if (reds > CONTEXT_REDS)
894                 reds = CONTEXT_REDS;
895             BUMP_REDS(c_p, (int) reds);
896         }
897     }
898     else {
899         /* This clause is taken when the nif is called in the context
900            of a traced process. We do not know which locks we have
901            so we have to do a try lock and if that fails we enqueue
902            the message in a special trace message output queue of the
903            tracee */
904         ErlTraceMessageQueue *msgq;
905         Process *t_p = env->tracee;
906 
907         erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
908 
909         msgq = t_p->trace_msg_q;
910 
911         while (msgq != NULL) {
912             if (msgq->receiver == receiver) {
913                 break;
914             }
915             msgq = msgq->next;
916         }
917 
918 #ifdef ERTS_ENABLE_LOCK_CHECK
919         lc_locks = erts_proc_lc_my_proc_locks(rp);
920         rp_locks |= lc_locks;
921 #endif
922         if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
923             rp_locks & ERTS_PROC_LOCK_MSGQ ||
924             erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
925 
926             ERL_MESSAGE_TERM(mp) = msg;
927             ERL_MESSAGE_FROM(mp) = from;
928             ERL_MESSAGE_TOKEN(mp) = am_undefined;
929 
930             if (!msgq) {
931                 msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
932                                   sizeof(ErlTraceMessageQueue));
933                 msgq->receiver = receiver;
934                 msgq->first = mp;
935                 msgq->last = &mp->next;
936                 msgq->len = 1;
937 
938                 /* Insert in linked list */
939                 msgq->next = t_p->trace_msg_q;
940                 t_p->trace_msg_q = msgq;
941 
942                 erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
943 
944 		erts_schedule_flush_trace_messages(t_p, 0);
945             } else {
946                 msgq->len++;
947                 *msgq->last = mp;
948                 msgq->last = &mp->next;
949                 erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
950             }
951             goto done;
952         } else {
953             erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
954             rp_locks &= ~ERTS_PROC_LOCK_TRACE;
955             rp_locks |= ERTS_PROC_LOCK_MSGQ;
956         }
957     }
958 
959     if (c_p)
960         erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
961     else
962         erts_queue_message(rp, rp_locks, mp, msg, from);
963 
964 done:
965 
966     if (c_p == rp)
967 	rp_locks &= ~ERTS_PROC_LOCK_MAIN;
968     if (rp_locks & ~lc_locks)
969 	erts_proc_unlock(rp, rp_locks & ~lc_locks);
970     if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
971 	erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
972     if (scheduler <= 0)
973 	erts_proc_dec_refc(rp);
974 
975     return 1;
976 }
977 
978 int
enif_port_command(ErlNifEnv * env,const ErlNifPort * to_port,ErlNifEnv * msg_env,ERL_NIF_TERM msg)979 enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
980                   ErlNifEnv *msg_env, ERL_NIF_TERM msg)
981 {
982     int iflags = (erts_port_synchronous_ops
983 		  ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
984 		  : ERTS_PORT_SFLGS_INVALID_LOOKUP);
985     int scheduler;
986     Process *c_p;
987     Port *prt;
988     int res;
989 
990     if (!env)
991 	erts_exit(ERTS_ABORT_EXIT, "enif_port_command: env == NULL");
992 
993     execution_state(env, &c_p, &scheduler);
994 
995     if (!c_p)
996 	c_p = env->proc;
997 
998     if (scheduler > 0)
999 	prt = erts_port_lookup(to_port->port_id, iflags);
1000     else {
1001 	if (ERTS_PROC_IS_EXITING(c_p))
1002 	    return 0;
1003 	prt = erts_thr_port_lookup(to_port->port_id, iflags);
1004     }
1005 
1006     if (!prt)
1007 	res = 0;
1008     else
1009         res = erts_port_output_async(prt, c_p->common.id, msg);
1010 
1011     if (scheduler <= 0)
1012 	erts_port_dec_refc(prt);
1013 
1014     return res;
1015 }
1016 
1017 /*
1018  *  env must be the caller's environment in a scheduler or NULL in a
1019  *      non-scheduler thread.
1020  *  name must be an atom - anything else will just waste time.
1021  */
call_whereis(ErlNifEnv * env,Eterm name)1022 static Eterm call_whereis(ErlNifEnv *env, Eterm name)
1023 {
1024     Process *c_p;
1025     Eterm res;
1026     int scheduler;
1027 
1028     execution_state(env, &c_p, &scheduler);
1029     ASSERT(scheduler || !c_p);
1030 
1031     if (scheduler < 0) {
1032         /* dirty scheduler */
1033         if (ERTS_PROC_IS_EXITING(c_p))
1034             return 0;
1035 
1036         if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
1037             c_p = NULL; /* as we don't have main lock */
1038     }
1039 
1040 
1041     if (c_p) {
1042          /* main lock may be released below and c_p->htop updated by others */
1043         flush_env(env);
1044     }
1045     res = erts_whereis_name_to_id(c_p, name);
1046     if (c_p)
1047         cache_env(env);
1048 
1049     return res;
1050 }
1051 
enif_whereis_pid(ErlNifEnv * env,ERL_NIF_TERM name,ErlNifPid * pid)1052 int enif_whereis_pid(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid)
1053 {
1054     Eterm res;
1055 
1056     if (is_not_atom(name))
1057         return 0;
1058 
1059     res = call_whereis(env, name);
1060     /* enif_get_local_ functions check the type */
1061     return enif_get_local_pid(env, res, pid);
1062 }
1063 
enif_whereis_port(ErlNifEnv * env,ERL_NIF_TERM name,ErlNifPort * port)1064 int enif_whereis_port(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port)
1065 {
1066     Eterm res;
1067 
1068     if (is_not_atom(name))
1069         return 0;
1070 
1071     res = call_whereis(env, name);
1072     /* enif_get_local_ functions check the type */
1073     return enif_get_local_port(env, res, port);
1074 }
1075 
enif_make_copy(ErlNifEnv * dst_env,ERL_NIF_TERM src_term)1076 ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
1077 {
1078     return make_copy(dst_env, src_term, NULL);
1079 }
1080 
1081 #ifdef DEBUG
is_offheap(const ErlOffHeap * oh)1082 static int is_offheap(const ErlOffHeap* oh)
1083 {
1084     return oh->first != NULL;
1085 }
1086 #endif
1087 
enif_self(ErlNifEnv * caller_env,ErlNifPid * pid)1088 ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)
1089 {
1090     if (caller_env->proc->common.id == ERTS_INVALID_PID)
1091         return NULL;
1092     pid->pid = caller_env->proc->common.id;
1093     return pid;
1094 }
1095 
enif_get_local_pid(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifPid * pid)1096 int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)
1097 {
1098     if (is_internal_pid(term)) {
1099         pid->pid=term;
1100         return 1;
1101     }
1102     return 0;
1103 }
1104 
enif_set_pid_undefined(ErlNifPid * pid)1105 void enif_set_pid_undefined(ErlNifPid* pid)
1106 {
1107     pid->pid = am_undefined;
1108 }
1109 
enif_is_pid_undefined(const ErlNifPid * pid)1110 int enif_is_pid_undefined(const ErlNifPid* pid)
1111 {
1112     ASSERT(pid->pid == am_undefined || is_internal_pid(pid->pid));
1113     return pid->pid == am_undefined;
1114 }
1115 
enif_get_local_port(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifPort * port)1116 int enif_get_local_port(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPort* port)
1117 {
1118     if (is_internal_port(term)) {
1119         port->port_id=term;
1120         return 1;
1121     }
1122     return 0;
1123 }
1124 
enif_is_atom(ErlNifEnv * env,ERL_NIF_TERM term)1125 int enif_is_atom(ErlNifEnv* env, ERL_NIF_TERM term)
1126 {
1127     return is_atom(term);
1128 }
1129 
enif_is_binary(ErlNifEnv * env,ERL_NIF_TERM term)1130 int enif_is_binary(ErlNifEnv* env, ERL_NIF_TERM term)
1131 {
1132     return is_binary(term) && (binary_bitsize(term) % 8 == 0);
1133 }
1134 
enif_is_empty_list(ErlNifEnv * env,ERL_NIF_TERM term)1135 int enif_is_empty_list(ErlNifEnv* env, ERL_NIF_TERM term)
1136 {
1137     return is_nil(term);
1138 }
1139 
enif_is_fun(ErlNifEnv * env,ERL_NIF_TERM term)1140 int enif_is_fun(ErlNifEnv* env, ERL_NIF_TERM term)
1141 {
1142     return is_fun(term);
1143 }
1144 
enif_is_pid(ErlNifEnv * env,ERL_NIF_TERM term)1145 int enif_is_pid(ErlNifEnv* env, ERL_NIF_TERM term)
1146 {
1147     return is_pid(term);
1148 }
1149 
enif_is_port(ErlNifEnv * env,ERL_NIF_TERM term)1150 int enif_is_port(ErlNifEnv* env, ERL_NIF_TERM term)
1151 {
1152     return is_port(term);
1153 }
1154 
enif_is_ref(ErlNifEnv * env,ERL_NIF_TERM term)1155 int enif_is_ref(ErlNifEnv* env, ERL_NIF_TERM term)
1156 {
1157     return is_ref(term);
1158 }
1159 
enif_is_tuple(ErlNifEnv * env,ERL_NIF_TERM term)1160 int enif_is_tuple(ErlNifEnv* env, ERL_NIF_TERM term)
1161 {
1162     return is_tuple(term);
1163 }
1164 
enif_is_list(ErlNifEnv * env,ERL_NIF_TERM term)1165 int enif_is_list(ErlNifEnv* env, ERL_NIF_TERM term)
1166 {
1167     return is_list(term) || is_nil(term);
1168 }
1169 
enif_is_exception(ErlNifEnv * env,ERL_NIF_TERM term)1170 int enif_is_exception(ErlNifEnv* env, ERL_NIF_TERM term)
1171 {
1172     return env->exception_thrown && term == THE_NON_VALUE;
1173 }
1174 
enif_is_number(ErlNifEnv * env,ERL_NIF_TERM term)1175 int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
1176 {
1177     return is_number(term);
1178 }
1179 
enif_term_type(ErlNifEnv * env,ERL_NIF_TERM term)1180 ErlNifTermType enif_term_type(ErlNifEnv* env, ERL_NIF_TERM term) {
1181     (void)env;
1182 
1183     switch (tag_val_def(term)) {
1184     case ATOM_DEF:
1185         return ERL_NIF_TERM_TYPE_ATOM;
1186     case BINARY_DEF:
1187         return ERL_NIF_TERM_TYPE_BITSTRING;
1188     case FLOAT_DEF:
1189         return ERL_NIF_TERM_TYPE_FLOAT;
1190     case EXPORT_DEF:
1191     case FUN_DEF:
1192         return ERL_NIF_TERM_TYPE_FUN;
1193     case BIG_DEF:
1194     case SMALL_DEF:
1195         return ERL_NIF_TERM_TYPE_INTEGER;
1196     case LIST_DEF:
1197     case NIL_DEF:
1198         return ERL_NIF_TERM_TYPE_LIST;
1199     case MAP_DEF:
1200         return ERL_NIF_TERM_TYPE_MAP;
1201     case EXTERNAL_PID_DEF:
1202     case PID_DEF:
1203         return ERL_NIF_TERM_TYPE_PID;
1204     case EXTERNAL_PORT_DEF:
1205     case PORT_DEF:
1206         return ERL_NIF_TERM_TYPE_PORT;
1207     case EXTERNAL_REF_DEF:
1208     case REF_DEF:
1209         return ERL_NIF_TERM_TYPE_REFERENCE;
1210     case TUPLE_DEF:
1211         return ERL_NIF_TERM_TYPE_TUPLE;
1212     default:
1213         /* tag_val_def() aborts on its own when passed complete garbage, but
1214          * it's possible that the user has given us garbage that just happens
1215          * to match something that tag_val_def() accepts but we don't, like
1216          * binary match contexts. */
1217         ERTS_INTERNAL_ERROR("Invalid term passed to enif_term_type");
1218     }
1219 }
1220 
aligned_binary_dtor(struct enif_tmp_obj_t * obj)1221 static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
1222 {
1223     erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator);
1224 }
1225 
enif_inspect_binary(ErlNifEnv * env,Eterm bin_term,ErlNifBinary * bin)1226 int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
1227 {
1228     ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
1229     union {
1230 	struct enif_tmp_obj_t* tmp;
1231 	byte* raw_ptr;
1232     }u;
1233 
1234     if (is_binary(bin_term)) {
1235         ProcBin *pb = (ProcBin*) binary_val(bin_term);
1236         if (pb->thing_word == HEADER_SUB_BIN) {
1237             ErlSubBin* sb = (ErlSubBin*) pb;
1238             pb = (ProcBin*) binary_val(sb->orig);
1239         }
1240         if (pb->thing_word == HEADER_PROC_BIN && pb->flags)
1241             erts_emasculate_writable_binary(pb);
1242     }
1243     u.tmp = NULL;
1244     bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
1245 						    sizeof(struct enif_tmp_obj_t));
1246     if (bin->data == NULL) {
1247 	return 0;
1248     }
1249     if (u.tmp != NULL) {
1250 	u.tmp->allocator = allocator;
1251 	u.tmp->next = env->tmp_obj_list;
1252 	u.tmp->dtor = &aligned_binary_dtor;
1253 	env->tmp_obj_list = u.tmp;
1254     }
1255     bin->size = binary_size(bin_term);
1256     bin->ref_bin = NULL;
1257     ADD_READONLY_CHECK(env, bin->data, bin->size);
1258     return 1;
1259 }
1260 
enif_inspect_iolist_as_binary(ErlNifEnv * env,Eterm term,ErlNifBinary * bin)1261 int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
1262 {
1263     ErlDrvSizeT sz;
1264     if (is_binary(term)) {
1265 	return enif_inspect_binary(env,term,bin);
1266     }
1267     if (is_nil(term)) {
1268 	bin->data = (unsigned char*) &bin->data; /* dummy non-NULL */
1269 	bin->size = 0;
1270 	bin->ref_bin = NULL;
1271 	return 1;
1272     }
1273     if (erts_iolist_size(term, &sz)) {
1274 	return 0;
1275     }
1276 
1277     bin->data = alloc_tmp_obj(env, sz, &tmp_alloc_dtor);
1278     bin->size = sz;
1279     bin->ref_bin = NULL;
1280     erts_iolist_to_buf(term, (char*) bin->data, sz);
1281     ADD_READONLY_CHECK(env, bin->data, bin->size);
1282     return 1;
1283 }
1284 
enif_alloc_binary(size_t size,ErlNifBinary * bin)1285 int enif_alloc_binary(size_t size, ErlNifBinary* bin)
1286 {
1287     Binary* refbin;
1288 
1289     refbin = erts_bin_drv_alloc_fnf(size); /* BUGBUG: alloc type? */
1290     if (refbin == NULL) {
1291 	return 0; /* The NIF must take action */
1292     }
1293 
1294     bin->size = size;
1295     bin->data = (unsigned char*) refbin->orig_bytes;
1296     bin->ref_bin = refbin;
1297     return 1;
1298 }
1299 
enif_realloc_binary(ErlNifBinary * bin,size_t size)1300 int enif_realloc_binary(ErlNifBinary* bin, size_t size)
1301 {
1302     if (bin->ref_bin != NULL) {
1303 	Binary* oldbin;
1304 	Binary* newbin;
1305 
1306 	oldbin = (Binary*) bin->ref_bin;
1307 	newbin = (Binary *) erts_bin_realloc_fnf(oldbin, size);
1308 	if (!newbin) {
1309 	    return 0;
1310 	}
1311 	bin->ref_bin = newbin;
1312 	bin->data = (unsigned char*) newbin->orig_bytes;
1313 	bin->size = size;
1314     }
1315     else {
1316 	unsigned char* old_data = bin->data;
1317 	size_t cpy_sz = (size < bin->size ? size : bin->size);
1318 	enif_alloc_binary(size, bin);
1319 	sys_memcpy(bin->data, old_data, cpy_sz);
1320     }
1321     return 1;
1322 }
1323 
1324 
enif_release_binary(ErlNifBinary * bin)1325 void enif_release_binary(ErlNifBinary* bin)
1326 {
1327     if (bin->ref_bin != NULL) {
1328 	Binary* refbin = bin->ref_bin;
1329         erts_bin_release(refbin);
1330     }
1331 #ifdef DEBUG
1332     bin->data = NULL;
1333     bin->ref_bin = NULL;
1334 #endif
1335 }
1336 
enif_make_new_binary(ErlNifEnv * env,size_t size,ERL_NIF_TERM * termp)1337 unsigned char* enif_make_new_binary(ErlNifEnv* env, size_t size,
1338 				    ERL_NIF_TERM* termp)
1339 {
1340     flush_env(env);
1341     *termp = new_binary(env->proc, NULL, size);
1342     cache_env(env);
1343     return binary_bytes(*termp);
1344 }
1345 
enif_term_to_binary(ErlNifEnv * dst_env,ERL_NIF_TERM term,ErlNifBinary * bin)1346 int enif_term_to_binary(ErlNifEnv *dst_env, ERL_NIF_TERM term,
1347                         ErlNifBinary *bin)
1348 {
1349     Uint size;
1350     byte *bp;
1351     Binary* refbin;
1352 
1353     switch (erts_encode_ext_size(term, &size)) {
1354     case ERTS_EXT_SZ_SYSTEM_LIMIT:
1355         return 0; /* system limit */
1356     case ERTS_EXT_SZ_YIELD:
1357         ERTS_INTERNAL_ERROR("Unexpected yield");
1358     case ERTS_EXT_SZ_OK:
1359         break;
1360     }
1361     if (!enif_alloc_binary(size, bin))
1362         return 0;
1363 
1364     refbin = bin->ref_bin;
1365 
1366     bp = bin->data;
1367 
1368     erts_encode_ext(term, &bp);
1369 
1370     bin->size = bp - bin->data;
1371     refbin->orig_size = bin->size;
1372 
1373     ASSERT(bin->data + bin->size == bp);
1374 
1375     return 1;
1376 }
1377 
enif_binary_to_term(ErlNifEnv * dst_env,const unsigned char * data,size_t data_sz,ERL_NIF_TERM * term,ErlNifBinaryToTerm opts)1378 size_t enif_binary_to_term(ErlNifEnv *dst_env,
1379                            const unsigned char* data,
1380                            size_t data_sz,
1381                            ERL_NIF_TERM *term,
1382                            ErlNifBinaryToTerm opts)
1383 {
1384     Sint size;
1385     ErtsHeapFactory factory;
1386     byte *bp = (byte*) data;
1387     Uint32 flags = 0;
1388 
1389     switch ((Uint32)opts) {
1390     case 0: break;
1391     case ERL_NIF_BIN2TERM_SAFE: flags = ERTS_DIST_EXT_BTT_SAFE; break;
1392     default: return 0;
1393     }
1394     if ((size = erts_decode_ext_size(bp, data_sz)) < 0)
1395         return 0;
1396 
1397     if (size > 0) {
1398         flush_env(dst_env);
1399         erts_factory_proc_prealloc_init(&factory, dst_env->proc, size);
1400     } else {
1401         erts_factory_dummy_init(&factory);
1402     }
1403 
1404     *term = erts_decode_ext(&factory, &bp, flags);
1405 
1406     if (is_non_value(*term)) {
1407         return 0;
1408     }
1409     if (size > 0) {
1410         erts_factory_close(&factory);
1411         cache_env(dst_env);
1412     }
1413 
1414     ASSERT(bp > data);
1415     return bp - data;
1416 }
1417 
enif_is_identical(Eterm lhs,Eterm rhs)1418 int enif_is_identical(Eterm lhs, Eterm rhs)
1419 {
1420     return EQ(lhs,rhs);
1421 }
1422 
enif_compare(Eterm lhs,Eterm rhs)1423 int enif_compare(Eterm lhs, Eterm rhs)
1424 {
1425     Sint result = CMP(lhs,rhs);
1426 
1427     if (result < 0) {
1428         return -1;
1429     } else if (result > 0) {
1430         return 1;
1431     }
1432 
1433     return result;
1434 }
1435 
enif_hash(ErlNifHash type,Eterm term,ErlNifUInt64 salt)1436 ErlNifUInt64 enif_hash(ErlNifHash type, Eterm term, ErlNifUInt64 salt)
1437 {
1438     switch (type) {
1439         case ERL_NIF_INTERNAL_HASH:
1440             return make_internal_hash(term, (Uint32) salt);
1441         case ERL_NIF_PHASH2:
1442             /* It appears that make_hash2 doesn't always react to seasoning
1443              * as well as it should. Therefore, let's make it ignore the salt
1444              * value and declare salted uses of phash2 as unsupported.
1445              */
1446             return make_hash2(term) & ((1 << 27) - 1);
1447         default:
1448             return 0;
1449     }
1450 }
1451 
enif_get_tuple(ErlNifEnv * env,Eterm tpl,int * arity,const Eterm ** array)1452 int enif_get_tuple(ErlNifEnv* env, Eterm tpl, int* arity, const Eterm** array)
1453 {
1454     Eterm* ptr;
1455     if (is_not_tuple(tpl)) {
1456 	return 0;
1457     }
1458     ptr = tuple_val(tpl);
1459     *arity = arityval(*ptr);
1460     *array = ptr+1;
1461     return 1;
1462 }
1463 
enif_get_string(ErlNifEnv * env,ERL_NIF_TERM list,char * buf,unsigned len,ErlNifCharEncoding encoding)1464 int enif_get_string(ErlNifEnv *env, ERL_NIF_TERM list, char* buf, unsigned len,
1465 		    ErlNifCharEncoding encoding)
1466 {
1467     Eterm* listptr;
1468     int n = 0;
1469 
1470     ASSERT(encoding == ERL_NIF_LATIN1);
1471     if (len < 1) {
1472 	return 0;
1473     }
1474     while (is_not_nil(list)) {
1475 	if (is_not_list(list)) {
1476 	    buf[n] = '\0';
1477 	    return 0;
1478 	}
1479 	listptr = list_val(list);
1480 
1481 	if (!is_byte(*listptr)) {
1482 	    buf[n] = '\0';
1483 	    return 0;
1484 	}
1485 	buf[n++] = unsigned_val(*listptr);
1486 	if (n >= len) {
1487 	    buf[n-1] = '\0'; /* truncate */
1488 	    return -len;
1489 	}
1490 	list = CDR(listptr);
1491     }
1492     buf[n] = '\0';
1493     return n + 1;
1494 }
1495 
enif_make_binary(ErlNifEnv * env,ErlNifBinary * bin)1496 Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
1497 {
1498     Eterm bin_term;
1499 
1500     if (bin->ref_bin != NULL) {
1501         Binary* binary = bin->ref_bin;
1502 
1503         /* If the binary is smaller than the heap binary limit we'll return a
1504          * heap binary to reduce the number of small refc binaries in the
1505          * system. We can't simply release the refc binary right away however;
1506          * the documentation states that the binary should be considered
1507          * read-only from this point on, which implies that it should still be
1508          * readable.
1509          *
1510          * We could keep it alive until we return by adding it to the temporary
1511          * object list, but that requires an off-heap allocation which is
1512          * potentially quite slow, so we create a dummy ProcBin instead and
1513          * rely on the next minor GC to get rid of it. */
1514         if (bin->size <= ERL_ONHEAP_BIN_LIMIT) {
1515             ErlHeapBin* hb;
1516 
1517             hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(bin->size));
1518             hb->thing_word = header_heap_bin(bin->size);
1519             hb->size = bin->size;
1520 
1521             sys_memcpy(hb->data, bin->data, bin->size);
1522 
1523             erts_build_proc_bin(&MSO(env->proc),
1524                                 alloc_heap(env, PROC_BIN_SIZE),
1525                                 binary);
1526 
1527             bin_term = make_binary(hb);
1528         } else {
1529             bin_term = erts_build_proc_bin(&MSO(env->proc),
1530                                            alloc_heap(env, PROC_BIN_SIZE),
1531                                            binary);
1532         }
1533 
1534         /* Our (possibly shared) ownership has been transferred to the term. */
1535         bin->ref_bin = NULL;
1536     } else {
1537         flush_env(env);
1538         bin_term = new_binary(env->proc, bin->data, bin->size);
1539         cache_env(env);
1540     }
1541 
1542     return bin_term;
1543 }
1544 
enif_make_sub_binary(ErlNifEnv * env,ERL_NIF_TERM bin_term,size_t pos,size_t size)1545 Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
1546 			   size_t pos, size_t size)
1547 {
1548     ErlSubBin* sb;
1549     Eterm orig;
1550     Uint offset, bit_offset, bit_size;
1551 #ifdef DEBUG
1552     size_t src_size;
1553 
1554     ASSERT(is_binary(bin_term));
1555     src_size = binary_size(bin_term);
1556     ASSERT(pos <= src_size);
1557     ASSERT(size <= src_size);
1558     ASSERT(pos + size <= src_size);
1559 #endif
1560     sb = (ErlSubBin*) alloc_heap(env, ERL_SUB_BIN_SIZE);
1561     ERTS_GET_REAL_BIN(bin_term, orig, offset, bit_offset, bit_size);
1562     sb->thing_word = HEADER_SUB_BIN;
1563     sb->size = size;
1564     sb->offs = offset + pos;
1565     sb->orig = orig;
1566     sb->bitoffs = bit_offset;
1567     sb->bitsize = 0;
1568     sb->is_writable = 0;
1569     return make_binary(sb);
1570 }
1571 
1572 
enif_make_badarg(ErlNifEnv * env)1573 Eterm enif_make_badarg(ErlNifEnv* env)
1574 {
1575     return enif_raise_exception(env, am_badarg);
1576 }
1577 
enif_raise_exception(ErlNifEnv * env,ERL_NIF_TERM reason)1578 Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason)
1579 {
1580     env->exception_thrown = 1;
1581     env->proc->fvalue = reason;
1582     BIF_ERROR(env->proc, EXC_ERROR);
1583 }
1584 
enif_has_pending_exception(ErlNifEnv * env,ERL_NIF_TERM * reason)1585 int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason)
1586 {
1587     if (env->exception_thrown && reason != NULL)
1588 	*reason = env->proc->fvalue;
1589     return env->exception_thrown;
1590 }
1591 
enif_get_atom(ErlNifEnv * env,Eterm atom,char * buf,unsigned len,ErlNifCharEncoding encoding)1592 int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len,
1593 		  ErlNifCharEncoding encoding)
1594 {
1595     Atom* ap;
1596     ASSERT(encoding == ERL_NIF_LATIN1);
1597     if (is_not_atom(atom) || len==0) {
1598 	return 0;
1599     }
1600     ap = atom_tab(atom_val(atom));
1601 
1602     if (ap->latin1_chars < 0 || ap->latin1_chars >= len) {
1603 	return 0;
1604     }
1605     if (ap->latin1_chars == ap->len) {
1606 	sys_memcpy(buf, ap->name, ap->len);
1607     }
1608     else {
1609 	int dlen = erts_utf8_to_latin1((byte*)buf, ap->name, ap->len);
1610 	ASSERT(dlen == ap->latin1_chars); (void)dlen;
1611     }
1612     buf[ap->latin1_chars] = '\0';
1613     return ap->latin1_chars + 1;
1614 }
1615 
enif_get_int(ErlNifEnv * env,Eterm term,int * ip)1616 int enif_get_int(ErlNifEnv* env, Eterm term, int* ip)
1617 {
1618 #if SIZEOF_INT ==  ERTS_SIZEOF_ETERM
1619     return term_to_Sint(term, (Sint*)ip);
1620 #elif (SIZEOF_LONG ==  ERTS_SIZEOF_ETERM) || \
1621   (SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM)
1622     Sint i;
1623     if (!term_to_Sint(term, &i) || i < INT_MIN || i > INT_MAX) {
1624 	return 0;
1625     }
1626     *ip = (int) i;
1627     return 1;
1628 #else
1629 #  error Unknown word size
1630 #endif
1631 }
1632 
enif_get_uint(ErlNifEnv * env,Eterm term,unsigned * ip)1633 int enif_get_uint(ErlNifEnv* env, Eterm term, unsigned* ip)
1634 {
1635 #if SIZEOF_INT == ERTS_SIZEOF_ETERM
1636     return term_to_Uint(term, (Uint*)ip);
1637 #elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \
1638   (SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM)
1639     Uint i;
1640     if (!term_to_Uint(term, &i) || i > UINT_MAX) {
1641 	return 0;
1642     }
1643     *ip = (unsigned) i;
1644     return 1;
1645 #endif
1646 }
1647 
enif_get_long(ErlNifEnv * env,Eterm term,long * ip)1648 int enif_get_long(ErlNifEnv* env, Eterm term, long* ip)
1649 {
1650 #if SIZEOF_LONG == ERTS_SIZEOF_ETERM
1651     return term_to_Sint(term, ip);
1652 #elif SIZEOF_LONG == 8
1653     return term_to_Sint64(term, ip);
1654 #elif SIZEOF_LONG == SIZEOF_INT
1655     int tmp,ret;
1656     ret = enif_get_int(env,term,&tmp);
1657     if (ret) {
1658       *ip = (long) tmp;
1659     }
1660     return ret;
1661 #else
1662 #  error Unknown long word size
1663 #endif
1664 }
1665 
enif_get_ulong(ErlNifEnv * env,Eterm term,unsigned long * ip)1666 int enif_get_ulong(ErlNifEnv* env, Eterm term, unsigned long* ip)
1667 {
1668 #if SIZEOF_LONG == ERTS_SIZEOF_ETERM
1669     return term_to_Uint(term, ip);
1670 #elif SIZEOF_LONG == 8
1671     return term_to_Uint64(term, ip);
1672 #elif SIZEOF_LONG == SIZEOF_INT
1673     int ret;
1674     unsigned int tmp;
1675     ret = enif_get_uint(env,term,&tmp);
1676     if (ret) {
1677       *ip = (unsigned long) tmp;
1678     }
1679     return ret;
1680 #else
1681 #  error Unknown long word size
1682 #endif
1683 }
1684 
1685 #if HAVE_INT64 && SIZEOF_LONG != 8
enif_get_int64(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifSInt64 * ip)1686 int enif_get_int64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifSInt64* ip)
1687 {
1688     return term_to_Sint64(term, ip);
1689 }
1690 
enif_get_uint64(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifUInt64 * ip)1691 int enif_get_uint64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifUInt64* ip)
1692 {
1693     return term_to_Uint64(term, ip);
1694 }
1695 #endif /* HAVE_INT64 && SIZEOF_LONG != 8 */
1696 
enif_get_double(ErlNifEnv * env,ERL_NIF_TERM term,double * dp)1697 int enif_get_double(ErlNifEnv* env, ERL_NIF_TERM term, double* dp)
1698 {
1699     FloatDef f;
1700     if (is_not_float(term)) {
1701 	return 0;
1702     }
1703     GET_DOUBLE(term, f);
1704     *dp = f.fd;
1705     return 1;
1706 }
1707 
enif_get_atom_length(ErlNifEnv * env,Eterm atom,unsigned * len,ErlNifCharEncoding enc)1708 int enif_get_atom_length(ErlNifEnv* env, Eterm atom, unsigned* len,
1709 			 ErlNifCharEncoding enc)
1710 {
1711     Atom* ap;
1712     ASSERT(enc == ERL_NIF_LATIN1);
1713     if (is_not_atom(atom)) return 0;
1714     ap = atom_tab(atom_val(atom));
1715     if (ap->latin1_chars < 0) {
1716 	return 0;
1717     }
1718     *len = ap->latin1_chars;
1719     return 1;
1720 }
1721 
enif_get_list_cell(ErlNifEnv * env,Eterm term,Eterm * head,Eterm * tail)1722 int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
1723 {
1724     Eterm* val;
1725     if (is_not_list(term)) return 0;
1726     val = list_val(term);
1727     *head = CAR(val);
1728     *tail = CDR(val);
1729     return 1;
1730 }
1731 
enif_get_list_length(ErlNifEnv * env,Eterm term,unsigned * len)1732 int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len)
1733 {
1734     Sint i;
1735     Uint u;
1736 
1737     if ((i = erts_list_length(term)) < 0) return 0;
1738     u = (Uint)i;
1739     if ((unsigned)u != u) return 0;
1740     *len = u;
1741     return 1;
1742 }
1743 
enif_make_int(ErlNifEnv * env,int i)1744 ERL_NIF_TERM enif_make_int(ErlNifEnv* env, int i)
1745 {
1746 #if SIZEOF_INT == ERTS_SIZEOF_ETERM
1747     return IS_SSMALL(i) ? make_small(i) : small_to_big(i,alloc_heap(env,2));
1748 #elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \
1749   (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM)
1750     return make_small(i);
1751 #endif
1752 }
1753 
enif_make_uint(ErlNifEnv * env,unsigned i)1754 ERL_NIF_TERM enif_make_uint(ErlNifEnv* env, unsigned i)
1755 {
1756 #if SIZEOF_INT == ERTS_SIZEOF_ETERM
1757     return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2));
1758 #elif (SIZEOF_LONG ==  ERTS_SIZEOF_ETERM) || \
1759   (SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM)
1760     return make_small(i);
1761 #endif
1762 }
1763 
enif_make_long(ErlNifEnv * env,long i)1764 ERL_NIF_TERM enif_make_long(ErlNifEnv* env, long i)
1765 {
1766     if (IS_SSMALL(i)) {
1767 	return make_small(i);
1768     }
1769 #if SIZEOF_LONG == ERTS_SIZEOF_ETERM
1770     return small_to_big(i, alloc_heap(env,2));
1771 #elif SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM
1772     return make_small(i);
1773 #elif SIZEOF_LONG == 8
1774     ensure_heap(env,3);
1775     return erts_sint64_to_big(i, &env->hp);
1776 #endif
1777 }
1778 
enif_make_ulong(ErlNifEnv * env,unsigned long i)1779 ERL_NIF_TERM enif_make_ulong(ErlNifEnv* env, unsigned long i)
1780 {
1781     if (IS_USMALL(0,i)) {
1782 	return make_small(i);
1783     }
1784 #if SIZEOF_LONG == ERTS_SIZEOF_ETERM
1785     return uint_to_big(i,alloc_heap(env,2));
1786 #elif SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM
1787     return make_small(i);
1788 #elif SIZEOF_LONG == 8
1789     ensure_heap(env,3);
1790     return erts_uint64_to_big(i, &env->hp);
1791 #endif
1792 }
1793 
1794 #if HAVE_INT64 && SIZEOF_LONG != 8
enif_make_int64(ErlNifEnv * env,ErlNifSInt64 i)1795 ERL_NIF_TERM enif_make_int64(ErlNifEnv* env, ErlNifSInt64 i)
1796 {
1797     Uint* hp;
1798     Uint need = 0;
1799     erts_bld_sint64(NULL, &need, i);
1800     hp = alloc_heap(env, need);
1801     return erts_bld_sint64(&hp, NULL, i);
1802 }
1803 
enif_make_uint64(ErlNifEnv * env,ErlNifUInt64 i)1804 ERL_NIF_TERM enif_make_uint64(ErlNifEnv* env, ErlNifUInt64 i)
1805 {
1806     Uint* hp;
1807     Uint need = 0;
1808     erts_bld_uint64(NULL, &need, i);
1809     hp = alloc_heap(env, need);
1810     return erts_bld_uint64(&hp, NULL, i);
1811 }
1812 #endif /* HAVE_INT64 && SIZEOF_LONG != 8 */
1813 
enif_make_double(ErlNifEnv * env,double d)1814 ERL_NIF_TERM enif_make_double(ErlNifEnv* env, double d)
1815 {
1816     Eterm* hp;
1817     FloatDef f;
1818 
1819     if (!erts_isfinite(d))
1820         return enif_make_badarg(env);
1821     hp = alloc_heap(env,FLOAT_SIZE_OBJECT);
1822     f.fd = d;
1823     PUT_DOUBLE(f, hp);
1824     return make_float(hp);
1825 }
1826 
enif_make_atom(ErlNifEnv * env,const char * name)1827 ERL_NIF_TERM enif_make_atom(ErlNifEnv* env, const char* name)
1828 {
1829     return enif_make_atom_len(env, name, sys_strlen(name));
1830 }
1831 
enif_make_atom_len(ErlNifEnv * env,const char * name,size_t len)1832 ERL_NIF_TERM enif_make_atom_len(ErlNifEnv* env, const char* name, size_t len)
1833 {
1834     if (len > MAX_ATOM_CHARACTERS)
1835         return enif_make_badarg(env);
1836     return erts_atom_put((byte*)name, len, ERTS_ATOM_ENC_LATIN1, 1);
1837 }
1838 
enif_make_existing_atom(ErlNifEnv * env,const char * name,ERL_NIF_TERM * atom,ErlNifCharEncoding enc)1839 int enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom,
1840 			    ErlNifCharEncoding enc)
1841 {
1842     return enif_make_existing_atom_len(env, name, sys_strlen(name), atom, enc);
1843 }
1844 
enif_make_existing_atom_len(ErlNifEnv * env,const char * name,size_t len,ERL_NIF_TERM * atom,ErlNifCharEncoding encoding)1845 int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len,
1846 				ERL_NIF_TERM* atom, ErlNifCharEncoding encoding)
1847 {
1848     ASSERT(encoding == ERL_NIF_LATIN1);
1849     if (len > MAX_ATOM_CHARACTERS)
1850         return 0;
1851     return erts_atom_get(name, len, atom, ERTS_ATOM_ENC_LATIN1);
1852 }
1853 
enif_make_tuple(ErlNifEnv * env,unsigned cnt,...)1854 ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
1855 {
1856 #ifdef ERTS_NIF_ASSERT_IN_ENV
1857     int nr = 0;
1858 #endif
1859     Eterm* hp = alloc_heap(env,cnt+1);
1860     Eterm ret = make_tuple(hp);
1861     va_list ap;
1862 
1863     *hp++ = make_arityval(cnt);
1864     va_start(ap,cnt);
1865     while (cnt--) {
1866         Eterm elem = va_arg(ap,Eterm);
1867         ASSERT_IN_ENV(env, elem, ++nr, "tuple");
1868 	*hp++ = elem;
1869     }
1870     va_end(ap);
1871     return ret;
1872 }
1873 
enif_make_tuple_from_array(ErlNifEnv * env,const ERL_NIF_TERM arr[],unsigned cnt)1874 ERL_NIF_TERM enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
1875 {
1876 #ifdef ERTS_NIF_ASSERT_IN_ENV
1877     int nr = 0;
1878 #endif
1879     Eterm* hp = alloc_heap(env,cnt+1);
1880     Eterm ret = make_tuple(hp);
1881     const Eterm* src = arr;
1882 
1883     *hp++ = make_arityval(cnt);
1884     while (cnt--) {
1885         ASSERT_IN_ENV(env, *src, ++nr, "tuple");
1886 	*hp++ = *src++;
1887     }
1888     return ret;
1889 }
1890 
enif_make_list_cell(ErlNifEnv * env,Eterm car,Eterm cdr)1891 ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr)
1892 {
1893     Eterm* hp = alloc_heap(env,2);
1894     Eterm ret = make_list(hp);
1895 
1896     ASSERT_IN_ENV(env, car, 0, "head of list cell");
1897     ASSERT_IN_ENV(env, cdr, 0, "tail of list cell");
1898     CAR(hp) = car;
1899     CDR(hp) = cdr;
1900     return ret;
1901 }
1902 
enif_make_list(ErlNifEnv * env,unsigned cnt,...)1903 ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
1904 {
1905     if (cnt == 0) {
1906 	return NIL;
1907     }
1908     else {
1909 #ifdef ERTS_NIF_ASSERT_IN_ENV
1910         int nr = 0;
1911 #endif
1912 	Eterm* hp = alloc_heap(env,cnt*2);
1913 	Eterm ret = make_list(hp);
1914 	Eterm* last = &ret;
1915 	va_list ap;
1916 
1917 	va_start(ap,cnt);
1918 	while (cnt--) {
1919             Eterm term = va_arg(ap,Eterm);
1920 	    *last = make_list(hp);
1921             ASSERT_IN_ENV(env, term, ++nr, "list");
1922 	    *hp = term;
1923 	    last = ++hp;
1924 	    ++hp;
1925 	}
1926 	va_end(ap);
1927 	*last = NIL;
1928 	return ret;
1929     }
1930 }
1931 
enif_make_list_from_array(ErlNifEnv * env,const ERL_NIF_TERM arr[],unsigned cnt)1932 ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
1933 {
1934 #ifdef ERTS_NIF_ASSERT_IN_ENV
1935     int nr = 0;
1936 #endif
1937     Eterm* hp = alloc_heap(env,cnt*2);
1938     Eterm ret = make_list(hp);
1939     Eterm* last = &ret;
1940     const Eterm* src = arr;
1941 
1942     while (cnt--) {
1943         Eterm term = *src++;
1944 	*last = make_list(hp);
1945         ASSERT_IN_ENV(env, term, ++nr, "list");
1946 	*hp = term;
1947 	last = ++hp;
1948 	++hp;
1949     }
1950     *last = NIL;
1951     return ret;
1952 }
1953 
enif_make_string(ErlNifEnv * env,const char * string,ErlNifCharEncoding encoding)1954 ERL_NIF_TERM enif_make_string(ErlNifEnv* env, const char* string,
1955 			      ErlNifCharEncoding encoding)
1956 {
1957     return enif_make_string_len(env, string, sys_strlen(string), encoding);
1958 }
1959 
enif_make_string_len(ErlNifEnv * env,const char * string,size_t len,ErlNifCharEncoding encoding)1960 ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string,
1961 				  size_t len, ErlNifCharEncoding encoding)
1962 {
1963     Eterm* hp = alloc_heap(env,len*2);
1964     ASSERT(encoding == ERL_NIF_LATIN1);
1965     return erts_bld_string_n(&hp,NULL,string,len);
1966 }
1967 
enif_make_ref(ErlNifEnv * env)1968 ERL_NIF_TERM enif_make_ref(ErlNifEnv* env)
1969 {
1970     Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
1971     return erts_make_ref_in_buffer(hp);
1972 }
1973 
enif_system_info(ErlNifSysInfo * sip,size_t si_size)1974 void enif_system_info(ErlNifSysInfo *sip, size_t si_size)
1975 {
1976     driver_system_info(sip, si_size);
1977 }
1978 
enif_make_reverse_list(ErlNifEnv * env,ERL_NIF_TERM term,ERL_NIF_TERM * list)1979 int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list)
1980 {
1981     Eterm *listptr, ret, *hp;
1982 
1983     ret = NIL;
1984 
1985     while (is_not_nil(term)) {
1986 	if (is_not_list(term)) {
1987 	    return 0;
1988 	}
1989 	hp = alloc_heap(env, 2);
1990 	listptr = list_val(term);
1991 	ret = CONS(hp, CAR(listptr), ret);
1992 	term = CDR(listptr);
1993     }
1994     *list = ret;
1995     return 1;
1996 }
1997 
enif_is_current_process_alive(ErlNifEnv * env)1998 int enif_is_current_process_alive(ErlNifEnv* env)
1999 {
2000     Process *c_p;
2001     int scheduler;
2002 
2003     execution_state(env, &c_p, &scheduler);
2004 
2005     if (!c_p)
2006 	erts_exit(ERTS_ABORT_EXIT,
2007 		  "enif_is_current_process_alive: "
2008                   "Invalid environment");
2009 
2010     if (!scheduler)
2011 	erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: "
2012 		  "called from non-scheduler thread");
2013 
2014     return !ERTS_PROC_IS_EXITING(c_p);
2015 }
2016 
enif_is_process_alive(ErlNifEnv * env,ErlNifPid * proc)2017 int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
2018 {
2019     int scheduler;
2020 
2021     execution_state(env, NULL, &scheduler);
2022 
2023     if (scheduler > 0)
2024 	return !!erts_proc_lookup(proc->pid);
2025     else {
2026 	Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
2027 					ERTS_P2P_FLG_INC_REFC);
2028 	if (rp)
2029 	    erts_proc_dec_refc(rp);
2030 	return !!rp;
2031     }
2032 }
2033 
enif_is_port_alive(ErlNifEnv * env,ErlNifPort * port)2034 int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
2035 {
2036     int scheduler;
2037     Uint32 iflags = (erts_port_synchronous_ops
2038 		     ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
2039 		     : ERTS_PORT_SFLGS_INVALID_LOOKUP);
2040 
2041     execution_state(env, NULL, &scheduler);
2042 
2043     if (scheduler > 0)
2044 	return !!erts_port_lookup(port->port_id, iflags);
2045     else {
2046 	Port *prt = erts_thr_port_lookup(port->port_id, iflags);
2047 	if (prt)
2048 	    erts_port_dec_refc(prt);
2049 	return !!prt;
2050     }
2051 }
2052 
2053 ERL_NIF_TERM
enif_now_time(ErlNifEnv * env)2054 enif_now_time(ErlNifEnv *env)
2055 {
2056     Uint mega, sec, micro;
2057     Eterm *hp;
2058     get_now(&mega, &sec, &micro);
2059     hp = alloc_heap(env, 4);
2060     return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro));
2061 }
2062 
2063 ERL_NIF_TERM
enif_cpu_time(ErlNifEnv * env)2064 enif_cpu_time(ErlNifEnv *env)
2065 {
2066 #ifdef HAVE_ERTS_NOW_CPU
2067     Uint mega, sec, micro;
2068     Eterm *hp;
2069     erts_get_now_cpu(&mega, &sec, &micro);
2070     hp = alloc_heap(env, 4);
2071     return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro));
2072 #else
2073     return enif_make_badarg(env);
2074 #endif
2075 }
2076 
2077 ERL_NIF_TERM
enif_make_unique_integer(ErlNifEnv * env,ErlNifUniqueInteger properties)2078 enif_make_unique_integer(ErlNifEnv *env, ErlNifUniqueInteger properties)
2079 {
2080     int monotonic = properties & ERL_NIF_UNIQUE_MONOTONIC;
2081     int positive = properties & ERL_NIF_UNIQUE_POSITIVE;
2082     Eterm *hp;
2083     Uint hsz;
2084 
2085     if (monotonic) {
2086         Sint64 raw_unique = erts_raw_get_unique_monotonic_integer();
2087         hsz = erts_raw_unique_monotonic_integer_heap_size(raw_unique, positive);
2088         hp = alloc_heap(env, hsz);
2089         return erts_raw_make_unique_monotonic_integer_value(&hp, raw_unique, positive);
2090     } else {
2091         Uint64 raw_unique[ERTS_UNIQUE_INT_RAW_VALUES];
2092         erts_raw_get_unique_integer(raw_unique);
2093         hsz = erts_raw_unique_integer_heap_size(raw_unique, positive);
2094         hp = alloc_heap(env, hsz);
2095         return erts_raw_make_unique_integer(&hp, raw_unique, positive);
2096     }
2097 }
2098 
enif_mutex_create(char * name)2099 ErlNifMutex* enif_mutex_create(char *name) { return erl_drv_mutex_create(name); }
enif_mutex_destroy(ErlNifMutex * mtx)2100 void enif_mutex_destroy(ErlNifMutex *mtx) {  erl_drv_mutex_destroy(mtx); }
enif_mutex_trylock(ErlNifMutex * mtx)2101 int enif_mutex_trylock(ErlNifMutex *mtx) { return erl_drv_mutex_trylock(mtx); }
enif_mutex_lock(ErlNifMutex * mtx)2102 void enif_mutex_lock(ErlNifMutex *mtx) { erl_drv_mutex_lock(mtx); }
enif_mutex_unlock(ErlNifMutex * mtx)2103 void enif_mutex_unlock(ErlNifMutex *mtx) { erl_drv_mutex_unlock(mtx); }
enif_cond_create(char * name)2104 ErlNifCond* enif_cond_create(char *name) { return erl_drv_cond_create(name); }
enif_cond_destroy(ErlNifCond * cnd)2105 void enif_cond_destroy(ErlNifCond *cnd) { erl_drv_cond_destroy(cnd); }
enif_cond_signal(ErlNifCond * cnd)2106 void enif_cond_signal(ErlNifCond *cnd) { erl_drv_cond_signal(cnd); }
enif_cond_broadcast(ErlNifCond * cnd)2107 void enif_cond_broadcast(ErlNifCond *cnd) { erl_drv_cond_broadcast(cnd); }
enif_cond_wait(ErlNifCond * cnd,ErlNifMutex * mtx)2108 void enif_cond_wait(ErlNifCond *cnd, ErlNifMutex *mtx) { erl_drv_cond_wait(cnd,mtx); }
enif_rwlock_create(char * name)2109 ErlNifRWLock* enif_rwlock_create(char *name) { return erl_drv_rwlock_create(name); }
enif_rwlock_destroy(ErlNifRWLock * rwlck)2110 void enif_rwlock_destroy(ErlNifRWLock *rwlck) { erl_drv_rwlock_destroy(rwlck); }
enif_rwlock_tryrlock(ErlNifRWLock * rwlck)2111 int enif_rwlock_tryrlock(ErlNifRWLock *rwlck) { return erl_drv_rwlock_tryrlock(rwlck); }
enif_rwlock_rlock(ErlNifRWLock * rwlck)2112 void enif_rwlock_rlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rlock(rwlck); }
enif_rwlock_runlock(ErlNifRWLock * rwlck)2113 void enif_rwlock_runlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_runlock(rwlck); }
enif_rwlock_tryrwlock(ErlNifRWLock * rwlck)2114 int enif_rwlock_tryrwlock(ErlNifRWLock *rwlck) { return erl_drv_rwlock_tryrwlock(rwlck); }
enif_rwlock_rwlock(ErlNifRWLock * rwlck)2115 void enif_rwlock_rwlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rwlock(rwlck); }
enif_rwlock_rwunlock(ErlNifRWLock * rwlck)2116 void enif_rwlock_rwunlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rwunlock(rwlck); }
enif_tsd_key_create(char * name,ErlNifTSDKey * key)2117 int enif_tsd_key_create(char *name, ErlNifTSDKey *key) { return erl_drv_tsd_key_create(name,key); }
enif_tsd_key_destroy(ErlNifTSDKey key)2118 void enif_tsd_key_destroy(ErlNifTSDKey key) { erl_drv_tsd_key_destroy(key); }
enif_tsd_set(ErlNifTSDKey key,void * data)2119 void enif_tsd_set(ErlNifTSDKey key, void *data) { erl_drv_tsd_set(key,data); }
enif_tsd_get(ErlNifTSDKey key)2120 void* enif_tsd_get(ErlNifTSDKey key) { return erl_drv_tsd_get(key); }
enif_thread_opts_create(char * name)2121 ErlNifThreadOpts* enif_thread_opts_create(char *name) { return (ErlNifThreadOpts*) erl_drv_thread_opts_create(name); }
enif_thread_opts_destroy(ErlNifThreadOpts * opts)2122 void enif_thread_opts_destroy(ErlNifThreadOpts *opts) { erl_drv_thread_opts_destroy((ErlDrvThreadOpts*)opts); }
enif_thread_create(char * name,ErlNifTid * tid,void * (* func)(void *),void * args,ErlNifThreadOpts * opts)2123 int enif_thread_create(char *name, ErlNifTid *tid, void* (*func)(void *),
2124 		       void *args, ErlNifThreadOpts *opts) {
2125     return erl_drv_thread_create(name,tid,func,args,(ErlDrvThreadOpts*)opts);
2126 }
enif_thread_self(void)2127 ErlNifTid enif_thread_self(void) { return erl_drv_thread_self(); }
enif_equal_tids(ErlNifTid tid1,ErlNifTid tid2)2128 int enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2) { return erl_drv_equal_tids(tid1,tid2); }
enif_thread_exit(void * resp)2129 void enif_thread_exit(void *resp) { erl_drv_thread_exit(resp); }
enif_thread_join(ErlNifTid tid,void ** respp)2130 int enif_thread_join(ErlNifTid tid, void **respp) { return erl_drv_thread_join(tid,respp); }
2131 
enif_mutex_name(ErlNifMutex * mtx)2132 char* enif_mutex_name(ErlNifMutex *mtx) {return erl_drv_mutex_name(mtx); }
enif_cond_name(ErlNifCond * cnd)2133 char* enif_cond_name(ErlNifCond *cnd) { return erl_drv_cond_name(cnd); }
enif_rwlock_name(ErlNifRWLock * rwlck)2134 char* enif_rwlock_name(ErlNifRWLock* rwlck) { return erl_drv_rwlock_name(rwlck); }
enif_thread_name(ErlNifTid tid)2135 char* enif_thread_name(ErlNifTid tid) { return erl_drv_thread_name(tid); }
2136 
enif_getenv(const char * key,char * value,size_t * value_size)2137 int enif_getenv(const char *key, char *value, size_t *value_size) { return erl_drv_getenv(key, value, value_size); }
2138 
enif_monotonic_time(ErlNifTimeUnit time_unit)2139 ErlNifTime enif_monotonic_time(ErlNifTimeUnit time_unit)
2140 {
2141     return (ErlNifTime) erts_napi_monotonic_time((int) time_unit);
2142 }
2143 
enif_time_offset(ErlNifTimeUnit time_unit)2144 ErlNifTime enif_time_offset(ErlNifTimeUnit time_unit)
2145 {
2146     return (ErlNifTime) erts_napi_time_offset((int) time_unit);
2147 }
2148 
2149 ErlNifTime
enif_convert_time_unit(ErlNifTime val,ErlNifTimeUnit from,ErlNifTimeUnit to)2150 enif_convert_time_unit(ErlNifTime val,
2151 		       ErlNifTimeUnit from,
2152 		       ErlNifTimeUnit to)
2153 {
2154     return (ErlNifTime) erts_napi_convert_time_unit((ErtsMonotonicTime) val,
2155 						    (int) from,
2156 						    (int) to);
2157 }
2158 
enif_fprintf(FILE * filep,const char * format,...)2159 int enif_fprintf(FILE* filep, const char* format, ...)
2160 {
2161     int ret;
2162     va_list arglist;
2163     va_start(arglist, format);
2164     ret = erts_vfprintf(filep, format, arglist);
2165     va_end(arglist);
2166     return ret;
2167 }
2168 
enif_vfprintf(FILE * filep,const char * format,va_list ap)2169 int enif_vfprintf(FILE* filep, const char *format, va_list ap)
2170 {
2171     return erts_vfprintf(filep, format, ap);
2172 }
2173 
enif_snprintf(char * buffer,size_t size,const char * format,...)2174 int enif_snprintf(char *buffer, size_t size, const char* format, ...)
2175 {
2176     int ret;
2177     va_list arglist;
2178     va_start(arglist, format);
2179     ret = erts_vsnprintf(buffer, size, format, arglist);
2180     va_end(arglist);
2181     return ret;
2182 }
2183 
enif_vsnprintf(char * buffer,size_t size,const char * format,va_list ap)2184 int enif_vsnprintf(char* buffer, size_t size, const char *format, va_list ap)
2185 {
2186     return erts_vsnprintf(buffer, size, format, ap);
2187 }
2188 
2189 
2190 /***********************************************************
2191  **       Memory managed (GC'ed) "resource" objects       **
2192  ***********************************************************/
2193 
2194 /* dummy node in circular list */
2195 struct enif_resource_type_t resource_type_list;
2196 
find_resource_type(Eterm module,Eterm name)2197 static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
2198 {
2199     ErlNifResourceType* type;
2200     for (type = resource_type_list.next;
2201 	 type != &resource_type_list;
2202 	 type = type->next) {
2203 
2204 	if (type->module == module && type->name == name) {
2205 	    return type;
2206 	}
2207     }
2208     return NULL;
2209 }
2210 
2211 #define in_area(ptr,start,nbytes) \
2212     ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
2213 
rt_have_callbacks(ErlNifResourceType * rt)2214 static ERTS_INLINE int rt_have_callbacks(ErlNifResourceType* rt)
2215 {
2216     return rt->dtor != NULL;
2217 }
2218 
close_lib(struct erl_module_nif * lib)2219 static void close_lib(struct erl_module_nif* lib)
2220 {
2221     ASSERT(lib != NULL);
2222     ASSERT(lib->handle != NULL);
2223     ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0);
2224 
2225     if (lib->entry.unload != NULL) {
2226 	struct enif_msg_environment_t msg_env;
2227         pre_nif_noproc(&msg_env, lib, NULL);
2228 	lib->entry.unload(&msg_env.env, lib->priv_data);
2229         post_nif_noproc(&msg_env);
2230     }
2231     if (!erts_is_static_nif(lib->handle))
2232       erts_sys_ddll_close(lib->handle);
2233     lib->handle = NULL;
2234 }
2235 
steal_resource_type(ErlNifResourceType * type)2236 static void steal_resource_type(ErlNifResourceType* type)
2237 {
2238     struct erl_module_nif* lib = type->owner;
2239 
2240     if (rt_have_callbacks(type)
2241 	&& erts_refc_dectest(&lib->rt_dtor_cnt, 0) == 0
2242 	&& lib->mod == NULL) {
2243 	/* last type with destructor gone, close orphan lib */
2244 
2245 	close_lib(lib);
2246     }
2247     if (erts_refc_dectest(&lib->rt_cnt, 0) == 0
2248 	&& lib->mod == NULL) {
2249 	erts_free(ERTS_ALC_T_NIF, lib);
2250     }
2251 }
2252 
resource_dtor_nop(ErlNifEnv * env,void * obj)2253 static void resource_dtor_nop(ErlNifEnv* env, void* obj)
2254 {
2255     /* do nothing */
2256 }
2257 
2258 /* The opened_rt_list is used by enif_open_resource_type()
2259  * in order to rollback "creates" and "take-overs" in case the load fails.
2260  */
2261 struct opened_resource_type
2262 {
2263     struct opened_resource_type* next;
2264 
2265     ErlNifResourceFlags op;
2266     ErlNifResourceType* type;
2267     ErlNifResourceTypeInit new_callbacks;
2268 };
2269 static struct opened_resource_type* opened_rt_list = NULL;
2270 
2271 static
open_resource_type(ErlNifEnv * env,const char * name_str,const ErlNifResourceTypeInit * init,ErlNifResourceFlags flags,ErlNifResourceFlags * tried,size_t sizeof_init)2272 ErlNifResourceType* open_resource_type(ErlNifEnv* env,
2273                                        const char* name_str,
2274                                        const ErlNifResourceTypeInit* init,
2275                                        ErlNifResourceFlags flags,
2276                                        ErlNifResourceFlags* tried,
2277                                        size_t sizeof_init)
2278 {
2279     ErlNifResourceType* type = NULL;
2280     ErlNifResourceFlags op = flags;
2281     Eterm module_am, name_am;
2282 
2283     ASSERT(erts_thr_progress_is_blocking());
2284     module_am = make_atom(env->mod_nif->mod->module);
2285     name_am = enif_make_atom(env, name_str);
2286 
2287     type = find_resource_type(module_am, name_am);
2288     if (type == NULL) {
2289 	if (flags & ERL_NIF_RT_CREATE) {
2290 	    type = erts_alloc(ERTS_ALC_T_NIF,
2291 			      sizeof(struct enif_resource_type_t));
2292 	    type->module = module_am;
2293 	    type->name = name_am;
2294 	    erts_refc_init(&type->refc, 1);
2295 	    op = ERL_NIF_RT_CREATE;
2296 	#ifdef DEBUG
2297 	    type->dtor = (void*)1;
2298 	    type->owner = (void*)2;
2299 	    type->prev = (void*)3;
2300 	    type->next = (void*)4;
2301 	#endif
2302 	}
2303     }
2304     else {
2305 	if (flags & ERL_NIF_RT_TAKEOVER) {
2306 	    op = ERL_NIF_RT_TAKEOVER;
2307 	}
2308 	else {
2309 	    type = NULL;
2310 	}
2311     }
2312     if (type != NULL) {
2313 	struct opened_resource_type* ort = erts_alloc(ERTS_ALC_T_TMP,
2314 						sizeof(struct opened_resource_type));
2315 	ort->op = op;
2316 	ort->type = type;
2317         sys_memzero(&ort->new_callbacks, sizeof(ErlNifResourceTypeInit));
2318         ASSERT(sizeof_init > 0 && sizeof_init <= sizeof(ErlNifResourceTypeInit));
2319         sys_memcpy(&ort->new_callbacks, init, sizeof_init);
2320         if (!ort->new_callbacks.dtor && (ort->new_callbacks.down ||
2321                                          ort->new_callbacks.stop)) {
2322             /* Set dummy dtor for fast rt_have_callbacks()
2323              * This case should be rare anyway */
2324             ort->new_callbacks.dtor = resource_dtor_nop;
2325         }
2326 	ort->next = opened_rt_list;
2327 	opened_rt_list = ort;
2328     }
2329     if (tried != NULL) {
2330 	*tried = op;
2331     }
2332     return type;
2333 }
2334 
2335 ErlNifResourceType*
enif_open_resource_type(ErlNifEnv * env,const char * module_str,const char * name_str,ErlNifResourceDtor * dtor,ErlNifResourceFlags flags,ErlNifResourceFlags * tried)2336 enif_open_resource_type(ErlNifEnv* env,
2337                         const char* module_str,
2338                         const char* name_str,
2339 			ErlNifResourceDtor* dtor,
2340 			ErlNifResourceFlags flags,
2341 			ErlNifResourceFlags* tried)
2342 {
2343     ErlNifResourceTypeInit init =  {dtor, NULL};
2344     ASSERT(module_str == NULL); /* for now... */
2345     return open_resource_type(env, name_str, &init, flags, tried,
2346                               sizeof(init));
2347 }
2348 
2349 ErlNifResourceType*
enif_open_resource_type_x(ErlNifEnv * env,const char * name_str,const ErlNifResourceTypeInit * init,ErlNifResourceFlags flags,ErlNifResourceFlags * tried)2350 enif_open_resource_type_x(ErlNifEnv* env,
2351                           const char* name_str,
2352                           const ErlNifResourceTypeInit* init,
2353                           ErlNifResourceFlags flags,
2354                           ErlNifResourceFlags* tried)
2355 {
2356     return open_resource_type(env, name_str, init, flags, tried,
2357                               env->mod_nif->entry.sizeof_ErlNifResourceTypeInit);
2358 }
2359 
commit_opened_resource_types(struct erl_module_nif * lib)2360 static void commit_opened_resource_types(struct erl_module_nif* lib)
2361 {
2362     while (opened_rt_list) {
2363 	struct opened_resource_type* ort = opened_rt_list;
2364 
2365 	ErlNifResourceType* type = ort->type;
2366 
2367 	if (ort->op == ERL_NIF_RT_CREATE) {
2368 	    type->prev = &resource_type_list;
2369 	    type->next = resource_type_list.next;
2370 	    type->next->prev = type;
2371 	    type->prev->next = type;
2372 	}
2373 	else { /* ERL_NIF_RT_TAKEOVER */
2374 	    steal_resource_type(type);
2375 	}
2376 
2377 	type->owner = lib;
2378 	type->dtor = ort->new_callbacks.dtor;
2379         type->stop = ort->new_callbacks.stop;
2380         type->down = ort->new_callbacks.down;
2381 
2382 	if (rt_have_callbacks(type)) {
2383 	    erts_refc_inc(&lib->rt_dtor_cnt, 1);
2384 	}
2385 	erts_refc_inc(&lib->rt_cnt, 1);
2386 
2387 	opened_rt_list = ort->next;
2388 	erts_free(ERTS_ALC_T_TMP, ort);
2389     }
2390 }
2391 
rollback_opened_resource_types(void)2392 static void rollback_opened_resource_types(void)
2393 {
2394     while (opened_rt_list) {
2395 	struct opened_resource_type* ort = opened_rt_list;
2396 
2397 	if (ort->op == ERL_NIF_RT_CREATE) {
2398 	    erts_free(ERTS_ALC_T_NIF, ort->type);
2399 	}
2400 
2401 	opened_rt_list = ort->next;
2402 	erts_free(ERTS_ALC_T_TMP, ort);
2403     }
2404 }
2405 
2406 #ifdef ARCH_64
2407 #  define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 63)
2408 #else
2409 #  define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 31)
2410 #endif
2411 #define ERTS_RESOURCE_REFC_MASK (~ERTS_RESOURCE_DYING_FLAG)
2412 
2413 static ERTS_INLINE void
rmon_set_dying(ErtsResourceMonitors * rms)2414 rmon_set_dying(ErtsResourceMonitors *rms)
2415 {
2416     rms->refc |= ERTS_RESOURCE_DYING_FLAG;
2417 }
2418 
2419 static ERTS_INLINE int
rmon_is_dying(ErtsResourceMonitors * rms)2420 rmon_is_dying(ErtsResourceMonitors *rms)
2421 {
2422     return !!(rms->refc & ERTS_RESOURCE_DYING_FLAG);
2423 }
2424 
2425 static ERTS_INLINE void
rmon_refc_inc(ErtsResourceMonitors * rms)2426 rmon_refc_inc(ErtsResourceMonitors *rms)
2427 {
2428     rms->refc++;
2429 }
2430 
2431 static ERTS_INLINE Uint
rmon_refc_dec_read(ErtsResourceMonitors * rms)2432 rmon_refc_dec_read(ErtsResourceMonitors *rms)
2433 {
2434     Uint res;
2435     ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0);
2436     res = --rms->refc;
2437     return res & ERTS_RESOURCE_REFC_MASK;
2438 }
2439 
2440 static ERTS_INLINE void
rmon_refc_dec(ErtsResourceMonitors * rms)2441 rmon_refc_dec(ErtsResourceMonitors *rms)
2442 {
2443     ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0);
2444     --rms->refc;
2445 }
2446 
2447 static ERTS_INLINE Uint
rmon_refc_read(ErtsResourceMonitors * rms)2448 rmon_refc_read(ErtsResourceMonitors *rms)
2449 {
2450     return rms->refc & ERTS_RESOURCE_REFC_MASK;
2451 }
2452 
dtor_demonitor(ErtsMonitor * mon,void * context,Sint reds)2453 static int dtor_demonitor(ErtsMonitor* mon, void* context, Sint reds)
2454 {
2455     ASSERT(erts_monitor_is_origin(mon));
2456     ASSERT(is_internal_pid(mon->other.item));
2457 
2458     erts_proc_sig_send_demonitor(mon);
2459     return 1;
2460 }
2461 
2462 #ifdef DEBUG
erts_dbg_is_resource_dying(ErtsResource * resource)2463 int erts_dbg_is_resource_dying(ErtsResource* resource)
2464 {
2465     return resource->monitors && rmon_is_dying(resource->monitors);
2466 }
2467 #endif
2468 
2469 #define NIF_RESOURCE_DTOR &nif_resource_dtor_prologue
2470 
2471 static void run_resource_dtor(void* vbin);
2472 
nif_resource_dtor_prologue(Binary * bin)2473 static int nif_resource_dtor_prologue(Binary* bin)
2474 {
2475     /*
2476      * Schedule user resource destructor as aux work to get a context
2477      * where we know what locks we have for example.
2478      */
2479     Uint sched_id = erts_get_scheduler_id();
2480     if (!sched_id)
2481         sched_id = 1;
2482     erts_schedule_misc_aux_work(sched_id, run_resource_dtor, bin);
2483     return 0; /* don't free */
2484 }
2485 
run_resource_dtor(void * vbin)2486 static void run_resource_dtor(void* vbin)
2487 {
2488     Binary* bin = (Binary*) vbin;
2489     ErtsResource* resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
2490     ErlNifResourceType* type = resource->type;
2491     ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
2492 
2493     if (resource->monitors) {
2494         ErtsResourceMonitors* rm = resource->monitors;
2495         int kill;
2496         ErtsMonitor *root;
2497         Uint refc;
2498 
2499         ASSERT(type->down);
2500         erts_mtx_lock(&rm->lock);
2501         ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0);
2502         kill = !rmon_is_dying(rm);
2503         if (kill) {
2504             rmon_set_dying(rm);
2505             root = rm->root;
2506             rm->root = NULL;
2507         }
2508         refc = rmon_refc_read(rm);
2509         erts_mtx_unlock(&rm->lock);
2510 
2511         if (kill)
2512             erts_monitor_tree_foreach_delete(&root,
2513                                              dtor_demonitor,
2514                                              NULL);
2515 
2516         /*
2517          * If resource->monitors->refc != 0 there are
2518          * outstanding references to the resource from
2519          * monitors that has not been removed yet.
2520          * nif_resource_dtor_prologue() will be called again when this
2521          * reference count reach zero.
2522          */
2523         if (refc != 0)
2524             return; /* we'll be back... */
2525         erts_mtx_destroy(&rm->lock);
2526     }
2527 
2528     if (type->dtor != NULL) {
2529         struct enif_msg_environment_t msg_env;
2530         pre_nif_noproc(&msg_env, type->owner, NULL);
2531 	type->dtor(&msg_env.env, resource->data);
2532         post_nif_noproc(&msg_env);
2533     }
2534     if (erts_refc_dectest(&type->refc, 0) == 0) {
2535 	ASSERT(type->next == NULL);
2536 	ASSERT(type->owner != NULL);
2537 	ASSERT(type->owner->mod == NULL);
2538 	steal_resource_type(type);
2539 	erts_free(ERTS_ALC_T_NIF, type);
2540     }
2541     erts_magic_binary_free((Binary*)vbin);
2542 }
2543 
erts_resource_stop(ErtsResource * resource,ErlNifEvent e,int is_direct_call)2544 void erts_resource_stop(ErtsResource* resource, ErlNifEvent e,
2545                         int is_direct_call)
2546 {
2547     struct enif_msg_environment_t msg_env;
2548     ASSERT(resource->type->stop);
2549     pre_nif_noproc(&msg_env, resource->type->owner, NULL);
2550     resource->type->stop(&msg_env.env, resource->data, e, is_direct_call);
2551     post_nif_noproc(&msg_env);
2552 }
2553 
erts_nif_demonitored(ErtsResource * resource)2554 void erts_nif_demonitored(ErtsResource* resource)
2555 {
2556     ErtsResourceMonitors* rmp = resource->monitors;
2557     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2558     int free_me;
2559 
2560     ASSERT(rmp);
2561     ASSERT(resource->type->down);
2562 
2563     erts_mtx_lock(&rmp->lock);
2564     free_me = ((rmon_refc_dec_read(rmp) == 0) & !!rmon_is_dying(rmp));
2565     erts_mtx_unlock(&rmp->lock);
2566 
2567     if (free_me)
2568         erts_bin_free(&bin->binary);
2569 }
2570 
erts_fire_nif_monitor(ErtsMonitor * tmon)2571 void erts_fire_nif_monitor(ErtsMonitor *tmon)
2572 {
2573     ErtsResource* resource;
2574     ErtsMonitorData *mdp;
2575     ErtsMonitor *omon;
2576     ErtsBinary* bin;
2577     struct enif_msg_environment_t msg_env;
2578     ErlNifPid nif_pid;
2579     ErlNifMonitor nif_monitor;
2580     ErtsResourceMonitors* rmp;
2581     Uint mrefc, brefc;
2582     int active, is_dying;
2583 
2584     ASSERT(tmon->type == ERTS_MON_TYPE_RESOURCE);
2585     ASSERT(erts_monitor_is_target(tmon));
2586 
2587     resource = tmon->other.ptr;
2588     bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2589     rmp = resource->monitors;
2590 
2591     mdp = erts_monitor_to_data(tmon);
2592     omon = &mdp->origin;
2593 
2594     ASSERT(rmp);
2595     ASSERT(resource->type->down);
2596 
2597     erts_mtx_lock(&rmp->lock);
2598 
2599     mrefc = rmon_refc_dec_read(rmp);
2600     is_dying = rmon_is_dying(rmp);
2601     active = !is_dying && erts_monitor_is_in_table(omon);
2602 
2603     if (active) {
2604         erts_monitor_tree_delete(&rmp->root, omon);
2605         brefc = (Uint) erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0);
2606     }
2607 
2608     erts_mtx_unlock(&rmp->lock);
2609 
2610     if (!active) {
2611         ASSERT(!is_dying || erts_refc_read(&bin->binary.intern.refc, 0) == 0);
2612         if (is_dying && mrefc == 0)
2613             erts_bin_free(&bin->binary);
2614         erts_monitor_release(tmon);
2615     }
2616     else {
2617         if (brefc > 0) {
2618             ASSERT(is_internal_pid(omon->other.item));
2619             erts_ref_to_driver_monitor(mdp->ref, &nif_monitor);
2620             nif_pid.pid = omon->other.item;
2621             pre_nif_noproc(&msg_env, resource->type->owner, NULL);
2622             resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
2623             post_nif_noproc(&msg_env);
2624 
2625             erts_bin_release(&bin->binary);
2626         }
2627 
2628         erts_monitor_release_both(mdp);
2629     }
2630 }
2631 
enif_alloc_resource(ErlNifResourceType * type,size_t data_sz)2632 void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
2633 {
2634     size_t magic_sz = offsetof(ErtsResource,data);
2635     Binary* bin;
2636     ErtsResource* resource;
2637     size_t monitors_offs;
2638 
2639     if (type->down) {
2640         /* Put ErtsResourceMonitors after user data and properly aligned */
2641         monitors_offs = ((data_sz + ERTS_ALLOC_ALIGN_BYTES - 1)
2642                          & ~((size_t)ERTS_ALLOC_ALIGN_BYTES - 1));
2643         magic_sz += monitors_offs + sizeof(ErtsResourceMonitors);
2644     }
2645     else {
2646         ERTS_UNDEF(monitors_offs, 0);
2647         magic_sz += data_sz;
2648     }
2649     bin = erts_create_magic_binary_x(magic_sz, NIF_RESOURCE_DTOR,
2650                                      ERTS_ALC_T_BINARY,
2651                                      1); /* unaligned */
2652     resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
2653 
2654     ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
2655     resource->type = type;
2656     erts_refc_inc(&bin->intern.refc, 1);
2657 #ifdef DEBUG
2658     erts_refc_init(&resource->nif_refc, 1);
2659 #endif
2660     erts_refc_inc(&resource->type->refc, 2);
2661     if (type->down) {
2662         resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
2663         erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
2664             ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
2665         resource->monitors->root = NULL;
2666         resource->monitors->refc = 0;
2667         resource->monitors->user_data_sz = data_sz;
2668     }
2669     else {
2670         resource->monitors = NULL;
2671     }
2672     return resource->data;
2673 }
2674 
enif_release_resource(void * obj)2675 void enif_release_resource(void* obj)
2676 {
2677     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2678     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2679 
2680     ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
2681     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
2682 #ifdef DEBUG
2683     erts_refc_dec(&resource->nif_refc, 0);
2684 #endif
2685     erts_bin_release(&bin->binary);
2686 }
2687 
enif_keep_resource(void * obj)2688 void enif_keep_resource(void* obj)
2689 {
2690     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2691     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2692 
2693     ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
2694     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
2695 #ifdef DEBUG
2696     erts_refc_inc(&resource->nif_refc, 1);
2697 #endif
2698     erts_refc_inc(&bin->binary.intern.refc, 2);
2699 }
2700 
erts_bld_resource_ref(Eterm ** hpp,ErlOffHeap * oh,ErtsResource * resource)2701 Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource)
2702 {
2703     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2704     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
2705     return erts_mk_magic_ref(hpp, oh, &bin->binary);
2706 }
2707 
enif_make_resource(ErlNifEnv * env,void * obj)2708 ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
2709 {
2710     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2711     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2712     Eterm* hp = alloc_heap(env, ERTS_MAGIC_REF_THING_SIZE);
2713     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
2714     return erts_mk_magic_ref(&hp, &MSO(env->proc), &bin->binary);
2715 }
2716 
enif_make_resource_binary(ErlNifEnv * env,void * obj,const void * data,size_t size)2717 ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
2718 				       const void* data, size_t size)
2719 {
2720     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2721     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2722     ErlOffHeap *ohp = &MSO(env->proc);
2723     Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
2724     ProcBin* pb = (ProcBin *) hp;
2725 
2726     pb->thing_word = HEADER_PROC_BIN;
2727     pb->size = size;
2728     pb->next = ohp->first;
2729     ohp->first = (struct erl_off_heap_header*) pb;
2730     pb->val = &bin->binary;
2731     pb->bytes = (byte*) data;
2732     pb->flags = 0;
2733 
2734     OH_OVERHEAD(ohp, size / sizeof(Eterm));
2735     erts_refc_inc(&bin->binary.intern.refc, 1);
2736 
2737     return make_binary(hp);
2738 }
2739 
enif_get_resource(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifResourceType * type,void ** objp)2740 int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type,
2741 		      void** objp)
2742 {
2743     Binary* mbin;
2744     ErtsResource* resource;
2745     if (is_internal_magic_ref(term))
2746 	mbin = erts_magic_ref2bin(term);
2747     else {
2748         Eterm *hp;
2749         if (!is_binary(term))
2750             return 0;
2751         hp = binary_val(term);
2752         if (thing_subtag(*hp) != REFC_BINARY_SUBTAG)
2753             return 0;
2754         /*
2755         if (((ProcBin *) hp)->size != 0) {
2756             return 0; / * Or should we allow "resource binaries" as handles? * /
2757         }
2758         */
2759         mbin = ((ProcBin *) hp)->val;
2760         if (!(mbin->intern.flags & BIN_FLAG_MAGIC))
2761             return 0;
2762     }
2763     resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
2764     if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != NIF_RESOURCE_DTOR
2765 	|| resource->type != type) {
2766 	return 0;
2767     }
2768     *objp = resource->data;
2769     return 1;
2770 }
2771 
enif_sizeof_resource(void * obj)2772 size_t enif_sizeof_resource(void* obj)
2773 {
2774     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2775     if (resource->monitors) {
2776         return resource->monitors->user_data_sz;
2777     }
2778     else {
2779         Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
2780         return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErtsResource,data);
2781     }
2782 }
2783 
2784 
enif_dlopen(const char * lib,void (* err_handler)(void *,const char *),void * err_arg)2785 void* enif_dlopen(const char* lib,
2786 		  void (*err_handler)(void*,const char*), void* err_arg)
2787 {
2788     ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
2789     void* handle;
2790     void* init_func;
2791     if (erts_sys_ddll_open(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) {
2792 	if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) == ERL_DE_NO_ERROR) {
2793 	    erts_sys_ddll_call_nif_init(init_func);
2794 	}
2795     }
2796     else {
2797 	if (err_handler != NULL) {
2798 	    (*err_handler)(err_arg, errdesc.str);
2799 	}
2800 	handle = NULL;
2801     }
2802     erts_sys_ddll_free_error(&errdesc);
2803     return handle;
2804 }
2805 
enif_dlsym(void * handle,const char * symbol,void (* err_handler)(void *,const char *),void * err_arg)2806 void* enif_dlsym(void* handle, const char* symbol,
2807 		 void (*err_handler)(void*,const char*), void* err_arg)
2808 {
2809     ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
2810     void* ret;
2811     if (erts_sys_ddll_sym2(handle, symbol, &ret, &errdesc) != ERL_DE_NO_ERROR) {
2812 	if (err_handler != NULL) {
2813 	    (*err_handler)(err_arg, errdesc.str);
2814 	}
2815 	erts_sys_ddll_free_error(&errdesc);
2816 	return NULL;
2817     }
2818     return ret;
2819 }
2820 
enif_consume_timeslice(ErlNifEnv * env,int percent)2821 int enif_consume_timeslice(ErlNifEnv* env, int percent)
2822 {
2823     Process *proc;
2824     Sint reds;
2825     int sched;
2826 
2827     execution_state(env, &proc, &sched);
2828 
2829     if (sched < 0)
2830         return 0; /* no-op on dirty scheduler */
2831 
2832     ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100);
2833     if (percent < 1) percent = 1;
2834     else if (percent > 100) percent = 100;
2835 
2836     reds = ((CONTEXT_REDS+99) / 100) * percent;
2837     ASSERT(reds > 0 && reds <= CONTEXT_REDS);
2838     BUMP_REDS(proc, reds);
2839     return ERTS_BIF_REDS_LEFT(proc) == 0;
2840 }
2841 
2842 static ERTS_INLINE void
nif_export_cleanup_nif_mod(NifExport * ep)2843 nif_export_cleanup_nif_mod(NifExport *ep)
2844 {
2845     if (erts_refc_dectest(&ep->m->rt_dtor_cnt, 0) == 0 && ep->m->mod == NULL)
2846 	close_lib(ep->m);
2847     ep->m = NULL;
2848 }
2849 
2850 void
erts_nif_export_cleanup_nif_mod(NifExport * ep)2851 erts_nif_export_cleanup_nif_mod(NifExport *ep)
2852 {
2853     nif_export_cleanup_nif_mod(ep);
2854 }
2855 
2856 static ERTS_INLINE void
nif_export_restore(Process * c_p,NifExport * ep,Eterm res)2857 nif_export_restore(Process *c_p, NifExport *ep, Eterm res)
2858 {
2859     erts_nif_export_restore(c_p, ep, res);
2860     ASSERT(ep->m);
2861     nif_export_cleanup_nif_mod(ep);
2862 }
2863 
2864 
2865 
2866 /*
2867  * Finalize a dirty NIF call. This function is scheduled to cause the VM to
2868  * switch the process off a dirty scheduler thread and back onto a regular
2869  * scheduler thread, and then return the result from the dirty NIF. It also
2870  * restores the original NIF MFA when necessary based on the value of
2871  * ep->func set by execute_dirty_nif via init_nif_sched_data -- non-NULL
2872  * means restore, NULL means do not restore.
2873  */
2874 static ERL_NIF_TERM
dirty_nif_finalizer(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])2875 dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
2876 {
2877     Process* proc;
2878     NifExport* ep;
2879 
2880     execution_state(env, &proc, NULL);
2881 
2882     ASSERT(argc == 1);
2883     ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
2884     ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
2885     ASSERT(ep);
2886     nif_export_restore(proc, ep, argv[0]);
2887     return argv[0];
2888 }
2889 
2890 /* Finalize a dirty NIF call that raised an exception.  Otherwise same as
2891  * the dirty_nif_finalizer() function.
2892  */
2893 static ERL_NIF_TERM
dirty_nif_exception(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])2894 dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
2895 {
2896     ERL_NIF_TERM ret;
2897     Process* proc;
2898     NifExport* ep;
2899     Eterm exception;
2900 
2901     execution_state(env, &proc, NULL);
2902 
2903     ASSERT(argc == 1);
2904     ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
2905     ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
2906     ASSERT(ep);
2907     exception = argv[0]; /* argv overwritten by restore below... */
2908     nif_export_cleanup_nif_mod(ep);
2909     ret = enif_raise_exception(env, exception);
2910 
2911     /* Restore orig info for error and clear nif export in handle_error() */
2912     proc->freason |= EXF_RESTORE_NIF;
2913     return ret;
2914 }
2915 
2916 /*
2917  * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute.
2918  * The dirty scheduler thread type (CPU or I/O) is indicated in flags
2919  * parameter.
2920  */
2921 static ERTS_INLINE ERL_NIF_TERM
schedule_dirty_nif(ErlNifEnv * env,int flags,NativeFunPtr fp,Eterm func_name,int argc,const ERL_NIF_TERM argv[])2922 schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp,
2923 		   Eterm func_name, int argc, const ERL_NIF_TERM argv[])
2924 {
2925     Process* proc;
2926 
2927     ASSERT(is_atom(func_name));
2928     ASSERT(fp);
2929 
2930     ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
2931 
2932     execution_state(env, &proc, NULL);
2933 
2934     (void) erts_atomic32_read_bset_nob(&proc->state,
2935 					   (ERTS_PSFLG_DIRTY_CPU_PROC
2936 					    | ERTS_PSFLG_DIRTY_IO_PROC),
2937 					   (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND
2938 					    ? ERTS_PSFLG_DIRTY_CPU_PROC
2939 					    : ERTS_PSFLG_DIRTY_IO_PROC));
2940 
2941     return schedule(env, fp, NULL, proc->current->module, func_name, argc, argv);
2942 }
2943 
2944 static ERTS_INLINE ERL_NIF_TERM
static_schedule_dirty_nif(ErlNifEnv * env,erts_aint32_t dirty_psflg,int argc,const ERL_NIF_TERM argv[])2945 static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
2946 			     int argc, const ERL_NIF_TERM argv[])
2947 {
2948     Process *proc;
2949     NifExport *ep;
2950     Eterm mod, func;
2951     NativeFunPtr fp;
2952 
2953     execution_state(env, &proc, NULL);
2954 
2955     /*
2956      * Called in order to schedule statically determined
2957      * dirty NIF calls...
2958      *
2959      * Note that 'current' does not point into a NifExport
2960      * structure; only a structure with similar
2961      * parts (located in code).
2962      */
2963 
2964     ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
2965     mod = proc->current->module;
2966     func = proc->current->function;
2967     fp = (NativeFunPtr) ep->func;
2968 
2969     ASSERT(is_atom(mod) && is_atom(func));
2970     ASSERT(fp);
2971 
2972     (void) erts_atomic32_read_bset_nob(&proc->state,
2973 					   (ERTS_PSFLG_DIRTY_CPU_PROC
2974 					    | ERTS_PSFLG_DIRTY_IO_PROC),
2975 					   dirty_psflg);
2976 
2977     return schedule(env, fp, NULL, mod, func, argc, argv);
2978 }
2979 
2980 static ERL_NIF_TERM
static_schedule_dirty_io_nif(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])2981 static_schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
2982 {
2983     return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_IO_PROC, argc, argv);
2984 }
2985 
2986 static ERL_NIF_TERM
static_schedule_dirty_cpu_nif(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])2987 static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
2988 {
2989     return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
2990 }
2991 
2992 
2993 /*
2994  * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
2995  * calls the actual NIF, restores original NIF MFA if necessary, and
2996  * then returns the NIF result.
2997  */
2998 static ERL_NIF_TERM
execute_nif(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])2999 execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
3000 {
3001     Process* proc;
3002     NativeFunPtr fp;
3003     NifExport* ep;
3004     ERL_NIF_TERM result;
3005 
3006     execution_state(env, &proc, NULL);
3007 
3008     ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
3009     fp = ep->func;
3010     ASSERT(ep);
3011     ASSERT(!env->exception_thrown);
3012 
3013     fp = (NativeFunPtr) ep->func;
3014 
3015 #ifdef DEBUG
3016     ep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
3017 #endif
3018 
3019     result = (*fp)(env, argc, argv);
3020 
3021     ASSERT(ep == ERTS_PROC_GET_NIF_TRAP_EXPORT(proc));
3022 
3023     if (is_value(result) || proc->freason != TRAP) {
3024 	/* Done (not rescheduled)... */
3025 	ASSERT(ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER);
3026 	if (!env->exception_thrown)
3027 	    nif_export_restore(proc, ep, result);
3028 	else {
3029 	    nif_export_cleanup_nif_mod(ep);
3030 	    /*
3031 	     * Restore orig info for error and clear nif
3032 	     * export in handle_error()
3033 	     */
3034 	    proc->freason |= EXF_RESTORE_NIF;
3035 	}
3036     }
3037 
3038 #ifdef DEBUG
3039     if (ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
3040 	ep->func = NULL;
3041 #endif
3042 
3043     return result;
3044 }
3045 
3046 ERL_NIF_TERM
enif_schedule_nif(ErlNifEnv * env,const char * fun_name,int flags,ERL_NIF_TERM (* fp)(ErlNifEnv *,int,const ERL_NIF_TERM[]),int argc,const ERL_NIF_TERM argv[])3047 enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
3048 		  ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
3049 		  int argc, const ERL_NIF_TERM argv[])
3050 {
3051     Process* proc;
3052     ERL_NIF_TERM fun_name_atom, result;
3053     int scheduler;
3054 
3055     if (argc > MAX_ARG)
3056 	return enif_make_badarg(env);
3057     fun_name_atom = enif_make_atom(env, fun_name);
3058     if (enif_is_exception(env, fun_name_atom))
3059 	return fun_name_atom;
3060 
3061     execution_state(env, &proc, &scheduler);
3062     if (scheduler <= 0) {
3063 	if (scheduler == 0)
3064 	    enif_make_badarg(env);
3065 	erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
3066     }
3067 
3068     if (flags == 0)
3069 	result = schedule(env, execute_nif, fp, proc->current->module,
3070 			  fun_name_atom, argc, argv);
3071     else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
3072 	result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv);
3073     }
3074     else
3075 	result = enif_make_badarg(env);
3076 
3077     if (scheduler < 0)
3078 	erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
3079 
3080     return result;
3081 }
3082 
3083 int
enif_thread_type(void)3084 enif_thread_type(void)
3085 {
3086     ErtsSchedulerData *esdp = erts_get_scheduler_data();
3087 
3088     if (!esdp)
3089 	return ERL_NIF_THR_UNDEFINED;
3090 
3091     switch (esdp->type) {
3092     case ERTS_SCHED_NORMAL:
3093 	return ERL_NIF_THR_NORMAL_SCHEDULER;
3094     case ERTS_SCHED_DIRTY_CPU:
3095 	return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
3096     case ERTS_SCHED_DIRTY_IO:
3097         return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
3098     default:
3099         ERTS_INTERNAL_ERROR("Invalid scheduler type");
3100 	return -1;
3101     }
3102 }
3103 
3104 /* Maps */
3105 
enif_is_map(ErlNifEnv * env,ERL_NIF_TERM term)3106 int enif_is_map(ErlNifEnv* env, ERL_NIF_TERM term)
3107 {
3108     return is_map(term);
3109 }
3110 
enif_get_map_size(ErlNifEnv * env,ERL_NIF_TERM term,size_t * size)3111 int enif_get_map_size(ErlNifEnv* env, ERL_NIF_TERM term, size_t *size)
3112 {
3113     if (is_flatmap(term)) {
3114 	flatmap_t *mp;
3115 	mp    = (flatmap_t*)flatmap_val(term);
3116 	*size = flatmap_get_size(mp);
3117 	return 1;
3118     }
3119     else if (is_hashmap(term)) {
3120         *size = hashmap_size(term);
3121         return 1;
3122     }
3123     return 0;
3124 }
3125 
enif_make_new_map(ErlNifEnv * env)3126 ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env)
3127 {
3128     Eterm* hp = alloc_heap(env,MAP_HEADER_FLATMAP_SZ+1);
3129     Eterm tup;
3130     flatmap_t *mp;
3131 
3132     tup   = make_tuple(hp);
3133     *hp++ = make_arityval(0);
3134     mp    = (flatmap_t*)hp;
3135     mp->thing_word = MAP_HEADER_FLATMAP;
3136     mp->size = 0;
3137     mp->keys = tup;
3138 
3139     return make_flatmap(mp);
3140 }
3141 
enif_make_map_from_arrays(ErlNifEnv * env,ERL_NIF_TERM keys[],ERL_NIF_TERM values[],size_t cnt,ERL_NIF_TERM * map_out)3142 int enif_make_map_from_arrays(ErlNifEnv *env,
3143                               ERL_NIF_TERM keys[],
3144                               ERL_NIF_TERM values[],
3145                               size_t cnt,
3146                               ERL_NIF_TERM *map_out)
3147 {
3148     ErtsHeapFactory factory;
3149     int succeeded;
3150 
3151 #ifdef ERTS_NIF_ASSERT_IN_ENV
3152     size_t index = 0;
3153 
3154     while (index < cnt) {
3155         ASSERT_IN_ENV(env, keys[index], index, "key");
3156         ASSERT_IN_ENV(env, values[index], index, "value");
3157         index++;
3158     }
3159 #endif
3160 
3161     flush_env(env);
3162 
3163     erts_factory_proc_prealloc_init(&factory, env->proc,
3164         cnt * 2 + MAP_HEADER_FLATMAP_SZ + 1);
3165 
3166     (*map_out) = erts_map_from_ks_and_vs(&factory, keys, values, cnt);
3167     succeeded = (*map_out) != THE_NON_VALUE;
3168 
3169     if (!succeeded) {
3170         erts_factory_undo(&factory);
3171     }
3172 
3173     erts_factory_close(&factory);
3174 
3175     cache_env(env);
3176 
3177     return succeeded;
3178 }
3179 
enif_make_map_put(ErlNifEnv * env,Eterm map_in,Eterm key,Eterm value,Eterm * map_out)3180 int enif_make_map_put(ErlNifEnv* env,
3181 	              Eterm map_in,
3182 		      Eterm key,
3183 		      Eterm value,
3184 		      Eterm *map_out)
3185 {
3186     if (!is_map(map_in)) {
3187 	return 0;
3188     }
3189     ASSERT_IN_ENV(env, map_in, 0, "old map");
3190     ASSERT_IN_ENV(env, key, 0, "key");
3191     ASSERT_IN_ENV(env, value, 0, "value");
3192 
3193     flush_env(env);
3194     *map_out = erts_maps_put(env->proc, key, value, map_in);
3195     cache_env(env);
3196     return 1;
3197 }
3198 
enif_get_map_value(ErlNifEnv * env,Eterm map,Eterm key,Eterm * value)3199 int enif_get_map_value(ErlNifEnv* env,
3200 	               Eterm map,
3201 		       Eterm key,
3202 		       Eterm *value)
3203 {
3204     const Eterm *ret;
3205     if (!is_map(map)) {
3206 	return 0;
3207     }
3208     ret = erts_maps_get(key, map);
3209     if (ret) {
3210         *value = *ret;
3211         return 1;
3212     }
3213     return 0;
3214 }
3215 
enif_make_map_update(ErlNifEnv * env,Eterm map_in,Eterm key,Eterm value,Eterm * map_out)3216 int enif_make_map_update(ErlNifEnv* env,
3217 	                 Eterm map_in,
3218 			 Eterm key,
3219 			 Eterm value,
3220 			 Eterm *map_out)
3221 {
3222     int res;
3223     if (!is_map(map_in)) {
3224 	return 0;
3225     }
3226 
3227     ASSERT_IN_ENV(env, map_in, 0, "old map");
3228     ASSERT_IN_ENV(env, key, 0, "key");
3229     ASSERT_IN_ENV(env, value, 0, "value");
3230 
3231     flush_env(env);
3232     res = erts_maps_update(env->proc, key, value, map_in, map_out);
3233     cache_env(env);
3234     return res;
3235 }
3236 
enif_make_map_remove(ErlNifEnv * env,Eterm map_in,Eterm key,Eterm * map_out)3237 int enif_make_map_remove(ErlNifEnv* env,
3238 	                 Eterm map_in,
3239 			 Eterm key,
3240 			 Eterm *map_out)
3241 {
3242     if (!is_map(map_in)) {
3243 	return 0;
3244     }
3245     flush_env(env);
3246     (void) erts_maps_take(env->proc, key, map_in, map_out, NULL);
3247     cache_env(env);
3248     return 1;
3249 }
3250 
enif_map_iterator_create(ErlNifEnv * env,Eterm map,ErlNifMapIterator * iter,ErlNifMapIteratorEntry entry)3251 int enif_map_iterator_create(ErlNifEnv *env,
3252 	                     Eterm map,
3253 			     ErlNifMapIterator *iter,
3254 			     ErlNifMapIteratorEntry entry)
3255 {
3256     if (is_flatmap(map)) {
3257 	flatmap_t *mp = (flatmap_t*)flatmap_val(map);
3258 	size_t offset;
3259 
3260 	switch (entry) {
3261 	    case ERL_NIF_MAP_ITERATOR_FIRST: offset = 0; break;
3262 	    case ERL_NIF_MAP_ITERATOR_LAST: offset = flatmap_get_size(mp) - 1; break;
3263 	    default: goto error;
3264 	}
3265 
3266 	/* empty maps are ok but will leave the iterator
3267 	 * in bad shape.
3268 	 */
3269 
3270 	iter->map     = map;
3271 	iter->u.flat.ks = ((Eterm *)flatmap_get_keys(mp)) + offset;
3272 	iter->u.flat.vs = ((Eterm *)flatmap_get_values(mp)) + offset;
3273 	iter->size    = flatmap_get_size(mp);
3274 	iter->idx     = offset + 1;
3275 
3276 	return 1;
3277     }
3278     else if (is_hashmap(map)) {
3279         iter->map = map;
3280         iter->size = hashmap_size(map);
3281         iter->u.hash.wstack = erts_alloc(ERTS_ALC_T_NIF, sizeof(ErtsDynamicWStack));
3282         WSTACK_INIT(iter->u.hash.wstack, ERTS_ALC_T_NIF);
3283 
3284         switch (entry) {
3285 	    case ERL_NIF_MAP_ITERATOR_FIRST:
3286                 iter->idx = 1;
3287                 hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 0);
3288                 iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws);
3289                 break;
3290 	    case ERL_NIF_MAP_ITERATOR_LAST:
3291                 iter->idx = hashmap_size(map);
3292                 hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 1);
3293                 iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws);
3294                 break;
3295 	    default:
3296                 goto error;
3297 	}
3298         ASSERT(!!iter->u.hash.kv == (iter->idx >= 1 &&
3299                                      iter->idx <= iter->size));
3300         return 1;
3301     }
3302 error:
3303 #ifdef DEBUG
3304     iter->map = THE_NON_VALUE;
3305 #endif
3306     return 0;
3307 }
3308 
enif_map_iterator_destroy(ErlNifEnv * env,ErlNifMapIterator * iter)3309 void enif_map_iterator_destroy(ErlNifEnv *env, ErlNifMapIterator *iter)
3310 {
3311     if (is_hashmap(iter->map)) {
3312         WSTACK_DESTROY(iter->u.hash.wstack->ws);
3313         erts_free(ERTS_ALC_T_NIF, iter->u.hash.wstack);
3314     }
3315     else
3316         ASSERT(is_flatmap(iter->map));
3317 
3318 #ifdef DEBUG
3319     iter->map = THE_NON_VALUE;
3320 #endif
3321 }
3322 
enif_map_iterator_is_tail(ErlNifEnv * env,ErlNifMapIterator * iter)3323 int enif_map_iterator_is_tail(ErlNifEnv *env, ErlNifMapIterator *iter)
3324 {
3325     ASSERT(iter);
3326     if (is_flatmap(iter->map)) {
3327         ASSERT(iter->idx >= 0);
3328         ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1);
3329         return (iter->size == 0 || iter->idx > iter->size);
3330     }
3331     else {
3332         ASSERT(is_hashmap(iter->map));
3333         return iter->idx > iter->size;
3334     }
3335 }
3336 
enif_map_iterator_is_head(ErlNifEnv * env,ErlNifMapIterator * iter)3337 int enif_map_iterator_is_head(ErlNifEnv *env, ErlNifMapIterator *iter)
3338 {
3339     ASSERT(iter);
3340     if (is_flatmap(iter->map)) {
3341         ASSERT(iter->idx >= 0);
3342         ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1);
3343         return (iter->size == 0 || iter->idx == 0);
3344     }
3345     else {
3346         ASSERT(is_hashmap(iter->map));
3347         return iter->idx == 0;
3348     }
3349 }
3350 
3351 
enif_map_iterator_next(ErlNifEnv * env,ErlNifMapIterator * iter)3352 int enif_map_iterator_next(ErlNifEnv *env, ErlNifMapIterator *iter)
3353 {
3354     ASSERT(iter);
3355     if (is_flatmap(iter->map)) {
3356         if (iter->idx <= iter->size) {
3357             iter->idx++;
3358             iter->u.flat.ks++;
3359             iter->u.flat.vs++;
3360         }
3361         return (iter->idx <= iter->size);
3362     }
3363     else {
3364         ASSERT(is_hashmap(iter->map));
3365 
3366         if (iter->idx <= hashmap_size(iter->map)) {
3367             if (iter->idx < 1) {
3368                 hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 0);
3369             }
3370             iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws);
3371             iter->idx++;
3372             ASSERT(!!iter->u.hash.kv == (iter->idx <= iter->size));
3373         }
3374         return iter->idx <= iter->size;
3375     }
3376 }
3377 
enif_map_iterator_prev(ErlNifEnv * env,ErlNifMapIterator * iter)3378 int enif_map_iterator_prev(ErlNifEnv *env, ErlNifMapIterator *iter)
3379 {
3380     ASSERT(iter);
3381     if (is_flatmap(iter->map)) {
3382         if (iter->idx > 0) {
3383             iter->idx--;
3384             iter->u.flat.ks--;
3385             iter->u.flat.vs--;
3386         }
3387         return iter->idx > 0;
3388     }
3389     else {
3390         ASSERT(is_hashmap(iter->map));
3391 
3392         if (iter->idx > 0) {
3393             if (iter->idx > iter->size) {
3394                 hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 1);
3395             }
3396             iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws);
3397             iter->idx--;
3398             ASSERT(!!iter->u.hash.kv == (iter->idx > 0));
3399         }
3400         return iter->idx > 0;
3401     }
3402 }
3403 
enif_map_iterator_get_pair(ErlNifEnv * env,ErlNifMapIterator * iter,Eterm * key,Eterm * value)3404 int enif_map_iterator_get_pair(ErlNifEnv *env,
3405 			       ErlNifMapIterator *iter,
3406 			       Eterm *key,
3407 			       Eterm *value)
3408 {
3409     ASSERT(iter);
3410     if (is_flatmap(iter->map)) {
3411         if (iter->idx > 0 && iter->idx <= iter->size) {
3412             ASSERT(iter->u.flat.ks >= flatmap_get_keys(flatmap_val(iter->map)) &&
3413                    iter->u.flat.ks  < (flatmap_get_keys(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map))));
3414             ASSERT(iter->u.flat.vs >= flatmap_get_values(flatmap_val(iter->map)) &&
3415                    iter->u.flat.vs  < (flatmap_get_values(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map))));
3416             *key   = *(iter->u.flat.ks);
3417             *value = *(iter->u.flat.vs);
3418             return 1;
3419         }
3420     }
3421     else {
3422         ASSERT(is_hashmap(iter->map));
3423         if (iter->idx > 0 && iter->idx <= iter->size) {
3424             *key   = CAR(iter->u.hash.kv);
3425             *value = CDR(iter->u.hash.kv);
3426             return 1;
3427         }
3428     }
3429     return 0;
3430 }
3431 
enif_monitor_process(ErlNifEnv * env,void * obj,const ErlNifPid * target_pid,ErlNifMonitor * monitor)3432 int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
3433                          ErlNifMonitor* monitor)
3434 {
3435     ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
3436     Eterm tmp[ERTS_REF_THING_SIZE];
3437     Eterm ref;
3438     ErtsResourceMonitors *rm;
3439     ErtsMonitorData *mdp;
3440 
3441     ASSERT(ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->magic_binary.destructor
3442            == NIF_RESOURCE_DTOR);
3443     ASSERT(erts_refc_read(&ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->binary.intern.refc, 0) != 0);
3444     ASSERT(!rsrc->monitors == !rsrc->type->down);
3445 
3446     rm = rsrc->monitors;
3447     if (!rm) {
3448         ASSERT(!rsrc->type->down);
3449         return -1;
3450     }
3451     ASSERT(rsrc->type->down);
3452 
3453     if (target_pid->pid == am_undefined)
3454         return 1;
3455 
3456     ref = erts_make_ref_in_buffer(tmp);
3457 
3458     mdp = erts_monitor_create(ERTS_MON_TYPE_RESOURCE, ref,
3459                               (Eterm) rsrc, target_pid->pid, NIL);
3460     erts_mtx_lock(&rm->lock);
3461     ASSERT(!rmon_is_dying(rm));
3462     erts_monitor_tree_insert(&rm->root, &mdp->origin);
3463     rmon_refc_inc(rm);
3464     erts_mtx_unlock(&rm->lock);
3465 
3466     if (!erts_proc_sig_send_monitor(&mdp->target, target_pid->pid)) {
3467         /* Failed to send monitor signal; cleanup... */
3468 #ifdef DEBUG
3469         ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
3470 #endif
3471 
3472         erts_mtx_lock(&rm->lock);
3473         ASSERT(!rmon_is_dying(rm));
3474         erts_monitor_tree_delete(&rm->root, &mdp->origin);
3475         rmon_refc_dec(rm);
3476         ASSERT(erts_refc_read(&bin->binary.intern.refc, 1) != 0);
3477         erts_mtx_unlock(&rm->lock);
3478         erts_monitor_release_both(mdp);
3479 
3480         return 1;
3481     }
3482 
3483     if (monitor)
3484         erts_ref_to_driver_monitor(ref,monitor);
3485 
3486     return 0;
3487 }
3488 
enif_make_monitor_term(ErlNifEnv * env,const ErlNifMonitor * monitor)3489 ERL_NIF_TERM enif_make_monitor_term(ErlNifEnv* env, const ErlNifMonitor* monitor)
3490 {
3491     Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
3492     return erts_driver_monitor_to_ref(hp, monitor);
3493 }
3494 
enif_demonitor_process(ErlNifEnv * env,void * obj,const ErlNifMonitor * monitor)3495 int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor)
3496 {
3497     ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
3498 #ifdef DEBUG
3499     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
3500 #endif
3501     ErtsResourceMonitors *rm;
3502     ErtsMonitor *mon;
3503     Eterm ref_heap[ERTS_REF_THING_SIZE];
3504     Eterm ref;
3505 
3506     ASSERT(bin->magic_binary.destructor == NIF_RESOURCE_DTOR);
3507     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
3508 
3509     ref = erts_driver_monitor_to_ref(ref_heap, monitor);
3510 
3511     rm = rsrc->monitors;
3512     erts_mtx_lock(&rm->lock);
3513     ASSERT(!rmon_is_dying(rm));
3514     mon = erts_monitor_tree_lookup(rm->root, ref);
3515     if (mon)
3516         erts_monitor_tree_delete(&rm->root, mon);
3517     erts_mtx_unlock(&rm->lock);
3518 
3519     if (!mon)
3520         return 1;
3521 
3522     ASSERT(erts_monitor_is_origin(mon));
3523     ASSERT(is_internal_pid(mon->other.item));
3524 
3525     erts_proc_sig_send_demonitor(mon);
3526 
3527     return 0;
3528 }
3529 
enif_compare_monitors(const ErlNifMonitor * monitor1,const ErlNifMonitor * monitor2)3530 int enif_compare_monitors(const ErlNifMonitor *monitor1,
3531                           const ErlNifMonitor *monitor2)
3532 {
3533     return sys_memcmp((void *) monitor1, (void *) monitor2,
3534                       ERTS_REF_THING_SIZE*sizeof(Eterm));
3535 }
3536 
enif_ioq_create(ErlNifIOQueueOpts opts)3537 ErlNifIOQueue *enif_ioq_create(ErlNifIOQueueOpts opts)
3538 {
3539     ErlNifIOQueue *q;
3540 
3541     if (opts != ERL_NIF_IOQ_NORMAL)
3542         return NULL;
3543 
3544     q = enif_alloc(sizeof(ErlNifIOQueue));
3545     if (!q) return NULL;
3546     erts_ioq_init(q, ERTS_ALC_T_NIF, 0);
3547 
3548     return q;
3549 }
3550 
enif_ioq_destroy(ErlNifIOQueue * q)3551 void enif_ioq_destroy(ErlNifIOQueue *q)
3552 {
3553     erts_ioq_clear(q);
3554     enif_free(q);
3555 }
3556 
3557 /* If the iovec was preallocated (Stack or otherwise) it needs to be marked as
3558  * such to perform a proper free. */
3559 #define ERL_NIF_IOVEC_FLAGS_PREALLOC (1 << 0)
3560 
enif_free_iovec(ErlNifIOVec * iov)3561 void enif_free_iovec(ErlNifIOVec *iov)
3562 {
3563     int i;
3564     /* Decrement the refc of all the binaries */
3565     for (i = 0; i < iov->iovcnt; i++) {
3566         Binary *bptr = ((Binary**)iov->ref_bins)[i];
3567         /* bptr can be null if enq_binary was used */
3568         if (bptr && erts_refc_dectest(&bptr->intern.refc, 0) == 0) {
3569             erts_bin_free(bptr);
3570         }
3571     }
3572 
3573     if (!(iov->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
3574         enif_free(iov);
3575     }
3576 }
3577 
3578 typedef struct {
3579     UWord sublist_length;
3580     Eterm sublist_start;
3581     Eterm sublist_end;
3582 
3583     UWord referenced_size;
3584     UWord copied_size;
3585 
3586     UWord iovec_len;
3587 } iovec_slice_t;
3588 
examine_iovec_term(Eterm list,UWord max_length,iovec_slice_t * result)3589 static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *result) {
3590     Eterm lookahead;
3591 
3592     result->sublist_start = list;
3593     result->sublist_length = 0;
3594     result->referenced_size = 0;
3595     result->copied_size = 0;
3596     result->iovec_len = 0;
3597 
3598     lookahead = result->sublist_start;
3599 
3600     while (is_list(lookahead)) {
3601         UWord byte_size;
3602         Eterm binary;
3603         Eterm *cell;
3604 
3605         cell = list_val(lookahead);
3606         binary = CAR(cell);
3607 
3608         if (!is_binary(binary)) {
3609             return 0;
3610         }
3611 
3612         byte_size = binary_size(binary);
3613 
3614         if (byte_size > 0) {
3615             int bit_offset, bit_size;
3616             Eterm parent_binary;
3617             UWord byte_offset;
3618 
3619             int requires_copying;
3620 
3621             ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset,
3622                 bit_offset, bit_size);
3623 
3624             (void)byte_offset;
3625 
3626             if (bit_size != 0) {
3627                 return 0;
3628             }
3629 
3630             /* If we're unaligned or an on-heap binary we'll need to copy
3631              * ourselves over to a temporary buffer. */
3632             requires_copying = (bit_offset != 0) ||
3633                 thing_subtag(*binary_val(parent_binary)) == HEAP_BINARY_SUBTAG;
3634 
3635             if (requires_copying) {
3636                 result->copied_size += byte_size;
3637             } else {
3638                 result->referenced_size += byte_size;
3639             }
3640 
3641             result->iovec_len += 1 + byte_size / MAX_SYSIOVEC_IOVLEN;
3642         }
3643 
3644         result->sublist_length += 1;
3645         lookahead = CDR(cell);
3646 
3647         if (result->sublist_length >= max_length) {
3648             break;
3649         }
3650     }
3651 
3652     if (!is_nil(lookahead) && !is_list(lookahead)) {
3653         return 0;
3654     }
3655 
3656     result->sublist_end = lookahead;
3657 
3658     return 1;
3659 }
3660 
marshal_iovec_binary(Eterm binary,ErlNifBinary * copy_buffer,UWord * copy_offset,ErlNifBinary * result)3661 static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer,
3662         UWord *copy_offset, ErlNifBinary *result) {
3663 
3664     Eterm *parent_header;
3665     Eterm parent_binary;
3666 
3667     int bit_offset, bit_size;
3668     Uint byte_offset;
3669 
3670     ASSERT(is_binary(binary));
3671 
3672     ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size);
3673 
3674     ASSERT(bit_size == 0);
3675 
3676     parent_header = binary_val(parent_binary);
3677 
3678     result->size = binary_size(binary);
3679 
3680     if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
3681         ProcBin *pb = (ProcBin*)parent_header;
3682 
3683         if (pb->flags & (PB_IS_WRITABLE | PB_ACTIVE_WRITER)) {
3684             erts_emasculate_writable_binary(pb);
3685         }
3686 
3687         ASSERT(pb->val != NULL);
3688         ASSERT(byte_offset < pb->size);
3689         ASSERT(&pb->bytes[byte_offset] >= (byte*)(pb->val)->orig_bytes);
3690 
3691         result->data = (unsigned char*)&pb->bytes[byte_offset];
3692         result->ref_bin = (void*)pb->val;
3693     } else {
3694         ErlHeapBin *hb = (ErlHeapBin*)parent_header;
3695 
3696         ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
3697 
3698         result->data = &((unsigned char*)&hb->data)[byte_offset];
3699         result->ref_bin = NULL;
3700     }
3701 
3702     /* If this isn't an *aligned* refc binary, copy its contents to the buffer
3703      * and reference that instead. */
3704 
3705     if (result->ref_bin == NULL || bit_offset != 0) {
3706         ASSERT(copy_buffer->ref_bin != NULL && copy_buffer->data != NULL);
3707         ASSERT(result->size <= (copy_buffer->size - *copy_offset));
3708 
3709         if (bit_offset == 0) {
3710             sys_memcpy(&copy_buffer->data[*copy_offset],
3711                 result->data, result->size);
3712         } else {
3713             erts_copy_bits(result->data, bit_offset, 1,
3714                 (byte*)&copy_buffer->data[*copy_offset], 0, 1,
3715                 result->size * 8);
3716         }
3717 
3718         result->data = &copy_buffer->data[*copy_offset];
3719         result->ref_bin = copy_buffer->ref_bin;
3720 
3721         *copy_offset += result->size;
3722     }
3723 }
3724 
fill_iovec_with_slice(ErlNifEnv * env,iovec_slice_t * slice,ErlNifIOVec * iovec)3725 static int fill_iovec_with_slice(ErlNifEnv *env,
3726                                  iovec_slice_t *slice,
3727                                  ErlNifIOVec *iovec) {
3728     ErlNifBinary copy_buffer = {0};
3729     UWord copy_offset, iovec_idx;
3730     Eterm sublist_iterator;
3731 
3732     /* Set up a common refc binary for all on-heap and unaligned binaries. */
3733     if (slice->copied_size > 0) {
3734         if (!enif_alloc_binary(slice->copied_size, &copy_buffer)) {
3735             return 0;
3736         }
3737 
3738         ASSERT(copy_buffer.ref_bin != NULL);
3739     }
3740 
3741     sublist_iterator = slice->sublist_start;
3742     copy_offset = 0;
3743     iovec_idx = 0;
3744 
3745     while (sublist_iterator != slice->sublist_end) {
3746         ErlNifBinary raw_data;
3747         Eterm *cell;
3748 
3749         cell = list_val(sublist_iterator);
3750         marshal_iovec_binary(CAR(cell), &copy_buffer, &copy_offset, &raw_data);
3751 
3752         while (raw_data.size > 0) {
3753             UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN);
3754 
3755             ASSERT(iovec_idx < iovec->iovcnt);
3756             ASSERT(raw_data.ref_bin != NULL);
3757 
3758             iovec->iov[iovec_idx].iov_base = raw_data.data;
3759             iovec->iov[iovec_idx].iov_len = chunk_len;
3760 
3761             iovec->ref_bins[iovec_idx] = raw_data.ref_bin;
3762 
3763             raw_data.data += chunk_len;
3764             raw_data.size -= chunk_len;
3765 
3766             iovec_idx += 1;
3767         }
3768 
3769         sublist_iterator = CDR(cell);
3770     }
3771 
3772     ASSERT(iovec_idx == iovec->iovcnt);
3773 
3774     if (env == NULL) {
3775         int i;
3776         for (i = 0; i < iovec->iovcnt; i++) {
3777             Binary *refc_binary = (Binary*)(iovec->ref_bins[i]);
3778             erts_refc_inc(&refc_binary->intern.refc, 1);
3779         }
3780 
3781         if (slice->copied_size > 0) {
3782             /* Transfer ownership to the iovec; we've taken references to it in
3783              * the above loop. */
3784             enif_release_binary(&copy_buffer);
3785         }
3786     } else {
3787         if (slice->copied_size > 0) {
3788             /* Attach the binary to our environment and let the next minor GC
3789              * get rid of it. This is slightly faster than using the tmp object
3790              * list since it avoids off-heap allocations. */
3791             erts_build_proc_bin(&MSO(env->proc),
3792                 alloc_heap(env, PROC_BIN_SIZE), copy_buffer.ref_bin);
3793         }
3794     }
3795 
3796     return 1;
3797 }
3798 
create_iovec_from_slice(ErlNifEnv * env,iovec_slice_t * slice,ErlNifIOVec ** result)3799 static int create_iovec_from_slice(ErlNifEnv *env,
3800                                    iovec_slice_t *slice,
3801                                    ErlNifIOVec **result) {
3802     ErlNifIOVec *iovec = *result;
3803 
3804     if (iovec && slice->iovec_len < ERL_NIF_IOVEC_SIZE) {
3805         iovec->iov = iovec->small_iov;
3806         iovec->ref_bins = iovec->small_ref_bin;
3807         iovec->flags = ERL_NIF_IOVEC_FLAGS_PREALLOC;
3808     } else {
3809         UWord iov_offset, binv_offset, alloc_size;
3810         char *alloc_base;
3811 
3812         iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlNifIOVec));
3813         binv_offset = iov_offset;
3814         binv_offset += ERTS_ALC_DATA_ALIGN_SIZE(slice->iovec_len * sizeof(SysIOVec));
3815         alloc_size = binv_offset;
3816         alloc_size += slice->iovec_len * sizeof(Binary*);
3817 
3818         /* When the user passes an environment, we attach the iovec to it so
3819          * the user won't have to bother managing it (similar to
3820          * enif_inspect_binary). It'll disappear once the environment is
3821          * cleaned up. */
3822         if (env != NULL) {
3823             alloc_base = alloc_tmp_obj(env, alloc_size, &tmp_alloc_dtor);
3824         } else {
3825             alloc_base = erts_alloc(ERTS_ALC_T_NIF, alloc_size);
3826         }
3827 
3828         iovec = (ErlNifIOVec*)alloc_base;
3829         iovec->iov = (SysIOVec*)(alloc_base + iov_offset);
3830         iovec->ref_bins = (void**)(alloc_base + binv_offset);
3831         iovec->flags = 0;
3832     }
3833 
3834     iovec->size = slice->referenced_size + slice->copied_size;
3835     iovec->iovcnt = slice->iovec_len;
3836 
3837     if(!fill_iovec_with_slice(env, slice, iovec)) {
3838         if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
3839             erts_free(ERTS_ALC_T_NIF, iovec);
3840         }
3841 
3842         return 0;
3843     }
3844 
3845     *result = iovec;
3846 
3847     return 1;
3848 }
3849 
enif_inspect_iovec(ErlNifEnv * env,size_t max_elements,ERL_NIF_TERM list,ERL_NIF_TERM * tail,ErlNifIOVec ** iov)3850 int enif_inspect_iovec(ErlNifEnv *env, size_t max_elements,
3851                        ERL_NIF_TERM list, ERL_NIF_TERM *tail,
3852                        ErlNifIOVec **iov) {
3853     iovec_slice_t slice;
3854 
3855     if(!examine_iovec_term(list, max_elements, &slice)) {
3856         return 0;
3857     } else if(!create_iovec_from_slice(env, &slice, iov)) {
3858         return 0;
3859     }
3860 
3861     (*tail) = slice.sublist_end;
3862 
3863     return 1;
3864 }
3865 
3866 /* */
enif_ioq_enqv(ErlNifIOQueue * q,ErlNifIOVec * iov,size_t skip)3867 int enif_ioq_enqv(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip)
3868 {
3869     if(skip <= iov->size) {
3870         return !erts_ioq_enqv(q, (ErtsIOVec*)iov, skip);
3871     }
3872 
3873     return 0;
3874 }
3875 
enif_ioq_enq_binary(ErlNifIOQueue * q,ErlNifBinary * bin,size_t skip)3876 int enif_ioq_enq_binary(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip)
3877 {
3878     ErlNifIOVec vec = {1, bin->size, NULL, NULL, ERL_NIF_IOVEC_FLAGS_PREALLOC };
3879     Binary *ref_bin = (Binary*)bin->ref_bin;
3880     int res;
3881     vec.iov = vec.small_iov;
3882     vec.ref_bins = vec.small_ref_bin;
3883     vec.iov[0].iov_base = bin->data;
3884     vec.iov[0].iov_len = bin->size;
3885     ((Binary**)(vec.ref_bins))[0] = ref_bin;
3886 
3887     res = enif_ioq_enqv(q, &vec, skip);
3888     enif_release_binary(bin);
3889     return res;
3890 }
3891 
enif_ioq_size(ErlNifIOQueue * q)3892 size_t enif_ioq_size(ErlNifIOQueue *q)
3893 {
3894     return erts_ioq_size(q);
3895 }
3896 
enif_ioq_deq(ErlNifIOQueue * q,size_t elems,size_t * size)3897 int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size)
3898 {
3899     if (erts_ioq_deq(q, elems) == -1)
3900         return 0;
3901     if (size)
3902         *size = erts_ioq_size(q);
3903     return 1;
3904 }
3905 
enif_ioq_peek_head(ErlNifEnv * env,ErlNifIOQueue * q,size_t * size,ERL_NIF_TERM * bin_term)3906 int enif_ioq_peek_head(ErlNifEnv *env, ErlNifIOQueue *q, size_t *size, ERL_NIF_TERM *bin_term) {
3907     SysIOVec *iov_entry;
3908     Binary *ref_bin;
3909 
3910     if (q->size == 0) {
3911         return 0;
3912     }
3913 
3914     ASSERT(q->b_head != q->b_tail && q->v_head != q->v_tail);
3915 
3916     ref_bin = &q->b_head[0]->nif;
3917     iov_entry = &q->v_head[0];
3918 
3919     if (size != NULL) {
3920         *size = iov_entry->iov_len;
3921     }
3922 
3923     if (iov_entry->iov_len > ERL_ONHEAP_BIN_LIMIT) {
3924         ProcBin *pb = (ProcBin*)alloc_heap(env, PROC_BIN_SIZE);
3925 
3926         pb->thing_word = HEADER_PROC_BIN;
3927         pb->next = MSO(env->proc).first;
3928         pb->val = ref_bin;
3929         pb->flags = 0;
3930 
3931         ASSERT((byte*)iov_entry->iov_base >= (byte*)ref_bin->orig_bytes);
3932         ASSERT(iov_entry->iov_len <= ref_bin->orig_size);
3933 
3934         pb->bytes = (byte*)iov_entry->iov_base;
3935         pb->size = iov_entry->iov_len;
3936 
3937         MSO(env->proc).first = (struct erl_off_heap_header*) pb;
3938         OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
3939 
3940         erts_refc_inc(&ref_bin->intern.refc, 2);
3941         *bin_term = make_binary(pb);
3942     } else {
3943         ErlHeapBin* hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(iov_entry->iov_len));
3944 
3945         hb->thing_word = header_heap_bin(iov_entry->iov_len);
3946         hb->size = iov_entry->iov_len;
3947 
3948         sys_memcpy(hb->data, iov_entry->iov_base, iov_entry->iov_len);
3949         *bin_term = make_binary(hb);
3950     }
3951 
3952     return 1;
3953 }
3954 
enif_ioq_peek(ErlNifIOQueue * q,int * iovlen)3955 SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen)
3956 {
3957     return erts_ioq_peekq(q, iovlen);
3958 }
3959 
3960 /***************************************************************************
3961  **                              load_nif/2                               **
3962  ***************************************************************************/
3963 
3964 
get_func_pp(BeamCodeHeader * mod_code,Eterm f_atom,unsigned arity)3965 static ErtsCodeInfo** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity)
3966 {
3967     int n = (int) mod_code->num_functions;
3968     int j;
3969     for (j = 0; j < n; ++j) {
3970 	ErtsCodeInfo* ci = mod_code->functions[j];
3971 	ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
3972 	if (f_atom == ci->mfa.function
3973 	    && arity == ci->mfa.arity) {
3974 	    return mod_code->functions+j;
3975 	}
3976     }
3977     return NULL;
3978 }
3979 
mkatom(const char * str)3980 static Eterm mkatom(const char *str)
3981 {
3982     return am_atom_put(str, sys_strlen(str));
3983 }
3984 
3985 struct tainted_module_t
3986 {
3987     struct tainted_module_t* next;
3988     Eterm module_atom;
3989 };
3990 
3991 erts_atomic_t first_taint; /* struct tainted_module_t* */
3992 
erts_add_taint(Eterm mod_atom)3993 void erts_add_taint(Eterm mod_atom)
3994 {
3995 #ifdef ERTS_ENABLE_LOCK_CHECK
3996     extern erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
3997 #endif
3998     struct tainted_module_t *first, *t;
3999 
4000     ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)
4001                    || erts_thr_progress_is_blocking());
4002 
4003     first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
4004     for (t=first ; t; t=t->next) {
4005 	if (t->module_atom == mod_atom) {
4006 	    return;
4007 	}
4008     }
4009     t = erts_alloc_fnf(ERTS_ALC_T_TAINT, sizeof(*t));
4010     if (t != NULL) {
4011 	t->module_atom = mod_atom;
4012 	t->next = first;
4013         erts_atomic_set_nob(&first_taint, (erts_aint_t)t);
4014     }
4015 }
4016 
erts_nif_taints(Process * p)4017 Eterm erts_nif_taints(Process* p)
4018 {
4019     struct tainted_module_t *first, *t;
4020     unsigned cnt = 0;
4021     Eterm list = NIL;
4022     Eterm* hp;
4023 
4024     first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
4025     for (t=first ; t!=NULL; t=t->next) {
4026 	cnt++;
4027     }
4028     hp = HAlloc(p,cnt*2);
4029     for (t=first ; t!=NULL; t=t->next) {
4030 	list = CONS(hp, t->module_atom, list);
4031 	hp += 2;
4032     }
4033     return list;
4034 }
4035 
erts_print_nif_taints(fmtfn_t to,void * to_arg)4036 void erts_print_nif_taints(fmtfn_t to, void* to_arg)
4037 {
4038     struct tainted_module_t *t;
4039     const char* delim = "";
4040 
4041     t = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
4042     for ( ; t; t = t->next) {
4043 	const Atom* atom = atom_tab(atom_val(t->module_atom));
4044 	erts_cbprintf(to,to_arg,"%s%.*s", delim, atom->len, atom->name);
4045 	delim = ",";
4046     }
4047     erts_cbprintf(to,to_arg,"\n");
4048 }
4049 
4050 
load_nif_error(Process * p,const char * atom,const char * format,...)4051 static Eterm load_nif_error(Process* p, const char* atom, const char* format, ...)
4052 {
4053     erts_dsprintf_buf_t* dsbufp = erts_create_tmp_dsbuf(0);
4054     Eterm ret;
4055     Eterm* hp;
4056     Eterm** hpp = NULL;
4057     Uint sz = 0;
4058     Uint* szp = &sz;
4059     va_list arglist;
4060 
4061     va_start(arglist, format);
4062     erts_vdsprintf(dsbufp, format, arglist);
4063     va_end(arglist);
4064 
4065     for (;;) {
4066 	Eterm txt = erts_bld_string_n(hpp, &sz, dsbufp->str, dsbufp->str_len);
4067 	ret = erts_bld_tuple(hpp, szp, 2, am_error,
4068 			     erts_bld_tuple(hpp, szp, 2, mkatom(atom), txt));
4069 	if (hpp != NULL) {
4070 	    break;
4071 	}
4072 	hp = HAlloc(p,sz);
4073 	hpp = &hp;
4074 	szp = NULL;
4075     }
4076     erts_destroy_tmp_dsbuf(dsbufp);
4077     return ret;
4078 }
4079 
4080 #define AT_LEAST_VERSION(E,MAJ,MIN) \
4081     (((E)->major * 0x100 + (E)->minor) >= ((MAJ) * 0x100 + (MIN)))
4082 
4083 /*
4084  * Allocate erl_module_nif and make a _modern_ copy of the lib entry.
4085  */
create_lib(const ErlNifEntry * src)4086 static struct erl_module_nif* create_lib(const ErlNifEntry* src)
4087 {
4088     struct erl_module_nif* lib;
4089     ErlNifEntry* dst;
4090     Uint bytes = offsetof(struct erl_module_nif, _funcs_copy_);
4091 
4092     if (!AT_LEAST_VERSION(src, 2, 7))
4093         bytes += src->num_of_funcs * sizeof(ErlNifFunc);
4094 
4095     lib = erts_alloc(ERTS_ALC_T_NIF, bytes);
4096     dst = &lib->entry;
4097 
4098     sys_memcpy(dst, src, offsetof(ErlNifEntry, vm_variant));
4099 
4100     if (AT_LEAST_VERSION(src, 2, 1)) {
4101         dst->vm_variant = src->vm_variant;
4102     } else {
4103         dst->vm_variant = "beam.vanilla";
4104     }
4105     if (AT_LEAST_VERSION(src, 2, 7)) {
4106         dst->options = src->options;
4107     } else {
4108         /*
4109          * Make a modern copy of the ErlNifFunc array
4110          */
4111         struct ErlNifFunc_V1 {
4112             const char* name;
4113             unsigned arity;
4114             ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
4115         }*src_funcs = (struct ErlNifFunc_V1*) src->funcs;
4116         int i;
4117         for (i = 0; i < src->num_of_funcs; ++i) {
4118             sys_memcpy(&lib->_funcs_copy_[i], &src_funcs[i], sizeof(*src_funcs));
4119             lib->_funcs_copy_[i].flags = 0;
4120         }
4121         dst->funcs = lib->_funcs_copy_;
4122         dst->options = 0;
4123     }
4124     if (AT_LEAST_VERSION(src, 2, 12)) {
4125         dst->sizeof_ErlNifResourceTypeInit = src->sizeof_ErlNifResourceTypeInit;
4126     } else {
4127         dst->sizeof_ErlNifResourceTypeInit = 0;
4128     }
4129     if (AT_LEAST_VERSION(src, 2, 14)) {
4130         dst->min_erts = src->min_erts;
4131     } else {
4132         dst->min_erts = "erts-?";
4133     }
4134     return lib;
4135 };
4136 
4137 
load_nif_2(BIF_ALIST_2)4138 BIF_RETTYPE load_nif_2(BIF_ALIST_2)
4139 {
4140     static const char bad_lib[] = "bad_lib";
4141     static const char upgrade[] = "upgrade";
4142     char* lib_name = NULL;
4143     void* handle = NULL;
4144     void* init_func = NULL;
4145     ErlNifEntry* entry = NULL;
4146     ErlNifEnv env;
4147     int i, err, encoding;
4148     Module* module_p;
4149     Eterm mod_atom;
4150     const Atom* mod_atomp;
4151     Eterm f_atom;
4152     ErtsCodeMFA* caller;
4153     ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
4154     Eterm ret = am_ok;
4155     int veto;
4156     int taint = 1;
4157     struct erl_module_nif* lib = NULL;
4158     struct erl_module_instance* this_mi;
4159     struct erl_module_instance* prev_mi;
4160 
4161     if (BIF_P->flags & F_HIPE_MODE) {
4162 	ret = load_nif_error(BIF_P, "notsup", "Calling load_nif from HiPE compiled "
4163 			     "modules not supported");
4164 	BIF_RET(ret);
4165     }
4166 
4167     encoding = erts_get_native_filename_encoding();
4168     if (encoding == ERL_FILENAME_WIN_WCHAR) {
4169         /* Do not convert the lib name to utf-16le yet, do that in win32 specific code */
4170         /* since lib_name is used in error messages */
4171         encoding = ERL_FILENAME_UTF8;
4172     }
4173     lib_name = erts_convert_filename_to_encoding(BIF_ARG_1, NULL, 0,
4174                                                  ERTS_ALC_T_TMP, 1, 0, encoding,
4175 						 NULL, 0);
4176     if (!lib_name) {
4177 	BIF_ERROR(BIF_P, BADARG);
4178     }
4179 
4180     if (!erts_try_seize_code_write_permission(BIF_P)) {
4181 	erts_free(ERTS_ALC_T_TMP, lib_name);
4182 	ERTS_BIF_YIELD2(bif_export[BIF_load_nif_2],
4183 			BIF_P, BIF_ARG_1, BIF_ARG_2);
4184     }
4185 
4186     /* Block system (is this the right place to do it?) */
4187     erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
4188     erts_thr_progress_block();
4189     erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
4190 
4191     /* Find calling module */
4192     ASSERT(BIF_P->current != NULL);
4193     ASSERT(BIF_P->current->module == am_erlang
4194 	   && BIF_P->current->function == am_load_nif
4195 	   && BIF_P->current->arity == 2);
4196     caller = find_function_from_pc(BIF_P->cp);
4197     ASSERT(caller != NULL);
4198     mod_atom = caller->module;
4199     ASSERT(is_atom(mod_atom));
4200     module_p = erts_get_module(mod_atom, erts_active_code_ix());
4201     ASSERT(module_p != NULL);
4202 
4203     mod_atomp = atom_tab(atom_val(mod_atom));
4204     {
4205         ErtsStaticNifEntry* sne;
4206         sne = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
4207         if (sne != NULL) {
4208             init_func = sne->nif_init;
4209             handle = init_func;
4210             taint = sne->taint;
4211         }
4212     }
4213     this_mi = &module_p->curr;
4214     prev_mi = &module_p->old;
4215     if (in_area(caller, module_p->old.code_hdr, module_p->old.code_length)) {
4216 	ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
4217 			     "module '%T' not allowed", mod_atom);
4218 	goto error;
4219     } else if (module_p->on_load) {
4220 	ASSERT(module_p->on_load->code_hdr->on_load_function_ptr);
4221 	if (module_p->curr.code_hdr) {
4222 	    prev_mi = &module_p->curr;
4223 	} else {
4224 	    prev_mi = &module_p->old;
4225 	}
4226 	this_mi = module_p->on_load;
4227     }
4228 
4229     if (this_mi->nif != NULL) {
4230         ret = load_nif_error(BIF_P,"reload","NIF library already loaded"
4231                              " (reload disallowed since OTP 20).");
4232     }
4233     else if (init_func == NULL &&
4234              (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
4235 	const char slogan[] = "Failed to load NIF library";
4236 	if (strstr(errdesc.str, lib_name) != NULL) {
4237 	    ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str);
4238 	}
4239 	else {
4240 	    ret = load_nif_error(BIF_P, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str);
4241 	}
4242     }
4243     else if (init_func == NULL &&
4244 	     erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) {
4245 	ret  = load_nif_error(BIF_P, bad_lib, "Failed to find library init"
4246 			      " function: '%s'", errdesc.str);
4247 
4248     }
4249     else if ((taint ? erts_add_taint(mod_atom) : 0,
4250 	      (entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) {
4251 	ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful");
4252     }
4253     else if (entry->major > ERL_NIF_MAJOR_VERSION
4254              || (entry->major == ERL_NIF_MAJOR_VERSION
4255                  && entry->minor > ERL_NIF_MINOR_VERSION)) {
4256         char* fmt = "That '%T' NIF library needs %s or newer. Either try to"
4257             " recompile the NIF lib or use a newer erts runtime.";
4258         ret = load_nif_error(BIF_P, bad_lib, fmt, mod_atom, entry->min_erts);
4259     }
4260     else if (entry->major < ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
4261 	     || (entry->major==2 && entry->minor == 5)) { /* experimental maps */
4262 
4263         char* fmt = "That old NIF library (%d.%d) is not compatible with this "
4264             "erts runtime (%d.%d). Try recompile the NIF lib.";
4265         ret = load_nif_error(BIF_P, bad_lib, fmt, entry->major, entry->minor,
4266                              ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
4267     }
4268     else if (AT_LEAST_VERSION(entry, 2, 1)
4269 	     && sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) {
4270 	ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for "
4271 			     "this vm variant (%s).",
4272 			     entry->vm_variant, ERL_NIF_VM_VARIANT);
4273     }
4274     else if (!erts_is_atom_str((char*)entry->name, mod_atom, 1)) {
4275 	ret = load_nif_error(BIF_P, bad_lib, "Library module name '%s' does not"
4276 			     " match calling module '%T'", entry->name, mod_atom);
4277     }
4278     else {
4279         lib = create_lib(entry);
4280         entry = &lib->entry; /* Use a guaranteed modern lib entry from now on */
4281 
4282         lib->handle = handle;
4283         erts_refc_init(&lib->rt_cnt, 0);
4284         erts_refc_init(&lib->rt_dtor_cnt, 0);
4285         ASSERT(opened_rt_list == NULL);
4286         lib->mod = module_p;
4287 
4288         for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
4289 	    ErtsCodeInfo** ci_pp;
4290             ErlNifFunc* f = &entry->funcs[i];
4291 
4292 	    if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
4293 		|| (ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
4294 		ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
4295 				     mod_atom, f->name, f->arity);
4296 	    }
4297 	    else if (f->flags != 0 &&
4298                      f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND &&
4299                      f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND) {
4300                 ret = load_nif_error(BIF_P, bad_lib,
4301                                      "Illegal flags field value %d for NIF %T:%s/%u",
4302                                      f->flags, mod_atom, f->name, f->arity);
4303 	    }
4304 	    else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
4305                      < BEAM_NIF_MIN_FUNC_SZ)
4306 	    {
4307 		ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
4308 				     " in module (%T:%s/%u too small)",
4309 				     mod_atom, f->name, f->arity);
4310 	    }
4311 	    /*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
4312 	      mod_atom, f->name, f->arity);*/
4313 	}
4314     }
4315 
4316     if (ret != am_ok) {
4317 	goto error;
4318     }
4319 
4320     /* Call load or upgrade:
4321      */
4322 
4323     env.mod_nif = lib;
4324 
4325     lib->priv_data = NULL;
4326     if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
4327         void* prev_old_data = prev_mi->nif->priv_data;
4328         if (entry->upgrade == NULL) {
4329             ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
4330             goto error;
4331         }
4332         erts_pre_nif(&env, BIF_P, lib, NULL);
4333         veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
4334         erts_post_nif(&env);
4335         if (veto) {
4336             prev_mi->nif->priv_data = prev_old_data;
4337             ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
4338         }
4339     }
4340     else if (entry->load != NULL) { /********* Initial load ***********/
4341         erts_pre_nif(&env, BIF_P, lib, NULL);
4342         veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
4343         erts_post_nif(&env);
4344         if (veto) {
4345             ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto);
4346         }
4347     }
4348     if (ret == am_ok) {
4349         commit_opened_resource_types(lib);
4350 
4351 	/*
4352 	** Everything ok, patch the beam code with op_call_nif
4353 	*/
4354 
4355 	this_mi->nif = lib;
4356 	for (i=0; i < entry->num_of_funcs; i++)
4357 	{
4358             ErlNifFunc* f = &entry->funcs[i];
4359 	    ErtsCodeInfo* ci;
4360             BeamInstr *code_ptr;
4361 
4362 	    erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
4363 	    ci = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
4364             code_ptr = erts_codeinfo_to_code(ci);
4365 
4366 	    if (ci->u.gen_bp == NULL) {
4367 		code_ptr[0] = BeamOpCodeAddr(op_call_nif);
4368 	    }
4369 	    else { /* Function traced, patch the original instruction word */
4370 		GenericBp* g = ci->u.gen_bp;
4371 		ASSERT(BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint));
4372 		g->orig_instr = BeamOpCodeAddr(op_call_nif);
4373 	    }
4374 	    if (f->flags) {
4375 		code_ptr[3] = (BeamInstr) f->fptr;
4376 		code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
4377 		    (BeamInstr) static_schedule_dirty_io_nif :
4378 		    (BeamInstr) static_schedule_dirty_cpu_nif;
4379 	    }
4380 	    else
4381 		code_ptr[1] = (BeamInstr) f->fptr;
4382 	    code_ptr[2] = (BeamInstr) lib;
4383 	}
4384     }
4385     else {
4386     error:
4387 	rollback_opened_resource_types();
4388 	ASSERT(ret != am_ok);
4389         if (lib != NULL) {
4390 	    erts_free(ERTS_ALC_T_NIF, lib);
4391 	}
4392 	if (handle != NULL && !erts_is_static_nif(handle)) {
4393 	    erts_sys_ddll_close(handle);
4394 	}
4395 	erts_sys_ddll_free_error(&errdesc);
4396     }
4397 
4398     erts_thr_progress_unblock();
4399     erts_release_code_write_permission();
4400     erts_free(ERTS_ALC_T_TMP, lib_name);
4401 
4402     BIF_RET(ret);
4403 }
4404 
4405 
4406 void
erts_unload_nif(struct erl_module_nif * lib)4407 erts_unload_nif(struct erl_module_nif* lib)
4408 {
4409     ErlNifResourceType* rt;
4410     ErlNifResourceType* next;
4411     ASSERT(erts_thr_progress_is_blocking());
4412     ASSERT(lib != NULL);
4413     ASSERT(lib->mod != NULL);
4414 
4415     erts_tracer_nif_clear();
4416 
4417     for (rt = resource_type_list.next;
4418 	 rt != &resource_type_list;
4419 	 rt = next) {
4420 
4421 	next = rt->next;
4422 	if (rt->owner == lib) {
4423 	    rt->next->prev = rt->prev;
4424 	    rt->prev->next = rt->next;
4425 	    rt->next = NULL;
4426 	    rt->prev = NULL;
4427 	    if (erts_refc_dectest(&rt->refc, 0) == 0) {
4428 		if (rt_have_callbacks(rt)) {
4429 		    erts_refc_dec(&lib->rt_dtor_cnt, 0);
4430 		}
4431 		erts_refc_dec(&lib->rt_cnt, 0);
4432 		erts_free(ERTS_ALC_T_NIF, rt);
4433 	    }
4434 	}
4435     }
4436     if (erts_refc_read(&lib->rt_dtor_cnt, 0) == 0) {
4437 	close_lib(lib);
4438 	if (erts_refc_read(&lib->rt_cnt, 0) == 0) {
4439 	    erts_free(ERTS_ALC_T_NIF, lib);
4440 	    return;
4441 	}
4442     }
4443     else {
4444 	ASSERT(erts_refc_read(&lib->rt_cnt, 1) > 0);
4445     }
4446     lib->mod = NULL;   /* orphan lib */
4447 }
4448 
erl_nif_init()4449 void erl_nif_init()
4450 {
4451     ERTS_CT_ASSERT((offsetof(ErtsResource,data) % 8)
4452                    == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
4453 
4454     resource_type_list.next = &resource_type_list;
4455     resource_type_list.prev = &resource_type_list;
4456     resource_type_list.dtor = NULL;
4457     resource_type_list.owner = NULL;
4458     resource_type_list.module = THE_NON_VALUE;
4459     resource_type_list.name = THE_NON_VALUE;
4460 
4461 }
4462 
erts_nif_get_funcs(struct erl_module_nif * mod,ErlNifFunc ** funcs)4463 int erts_nif_get_funcs(struct erl_module_nif* mod,
4464                        ErlNifFunc **funcs)
4465 {
4466     *funcs = mod->entry.funcs;
4467     return mod->entry.num_of_funcs;
4468 }
4469 
erts_nif_get_module(struct erl_module_nif * nif_mod)4470 Module *erts_nif_get_module(struct erl_module_nif *nif_mod) {
4471     return nif_mod->mod;
4472 }
4473 
erts_nif_call_function(Process * p,Process * tracee,struct erl_module_nif * mod,ErlNifFunc * fun,int argc,Eterm * argv)4474 Eterm erts_nif_call_function(Process *p, Process *tracee,
4475                              struct erl_module_nif* mod,
4476                              ErlNifFunc *fun, int argc, Eterm *argv)
4477 {
4478     Eterm nif_result;
4479 #ifdef DEBUG
4480     /* Verify that function is part of this module */
4481     int i;
4482     for (i = 0; i < mod->entry.num_of_funcs; i++)
4483         if (fun == &(mod->entry.funcs[i]))
4484             break;
4485     ASSERT(i < mod->entry.num_of_funcs);
4486     if (p)
4487         ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
4488                            || erts_thr_progress_is_blocking());
4489 #endif
4490     if (p) {
4491         /* This is almost a normal nif call like in beam_emu,
4492            except that any heap consumed by the nif will be
4493            released without checking if anything in it is live.
4494            This is because we cannot do a GC here as we don't know
4495            the number of live registers that have to be preserved.
4496            This means that any heap part of the returned term may
4497            not be used outside this function. */
4498         struct enif_environment_t env;
4499         ErlHeapFragment *orig_hf = MBUF(p);
4500         ErlOffHeap orig_oh = MSO(p);
4501         Eterm *orig_htop = HEAP_TOP(p);
4502         ASSERT(is_internal_pid(p->common.id));
4503         MBUF(p) = NULL;
4504         clear_offheap(&MSO(p));
4505 
4506         erts_pre_nif(&env, p, mod, tracee);
4507 #ifdef ERTS_NIF_ASSERT_IN_ENV
4508         env.dbg_disable_assert_in_env = 1;
4509 #endif
4510         nif_result = (*fun->fptr)(&env, argc, argv);
4511         if (env.exception_thrown)
4512             nif_result = THE_NON_VALUE;
4513         erts_post_nif(&env);
4514 
4515         /* Free any offheap and heap fragments created in nif */
4516         if (MSO(p).first) {
4517             erts_cleanup_offheap(&MSO(p));
4518             clear_offheap(&MSO(p));
4519         }
4520         if (MBUF(p))
4521             free_message_buffer(MBUF(p));
4522 
4523         /* restore original heap fragment list */
4524         MBUF(p) = orig_hf;
4525         MSO(p) = orig_oh;
4526         HEAP_TOP(p) = orig_htop;
4527     } else {
4528         /* Nif call was done without a process context,
4529            so we create a phony one. */
4530         struct enif_msg_environment_t msg_env;
4531         pre_nif_noproc(&msg_env, mod, tracee);
4532 #ifdef ERTS_NIF_ASSERT_IN_ENV
4533         msg_env.env.dbg_disable_assert_in_env = 1;
4534 #endif
4535         nif_result = (*fun->fptr)(&msg_env.env, argc, argv);
4536         if (msg_env.env.exception_thrown)
4537             nif_result = THE_NON_VALUE;
4538         post_nif_noproc(&msg_env);
4539     }
4540 
4541     return nif_result;
4542 }
4543 
4544 #ifdef USE_VM_PROBES
dtrace_nifenv_str(ErlNifEnv * env,char * process_buf)4545 void dtrace_nifenv_str(ErlNifEnv *env, char *process_buf)
4546 {
4547     dtrace_pid_str(env->proc->common.id, process_buf);
4548 }
4549 #endif
4550 
4551 #ifdef READONLY_CHECK
4552 /* Use checksums to assert that NIFs do not write into inspected binaries
4553 */
4554 static void readonly_check_dtor(struct enif_tmp_obj_t*);
4555 static unsigned calc_checksum(unsigned char* ptr, unsigned size);
4556 
4557 struct readonly_check_t
4558 {
4559     unsigned char* ptr;
4560     unsigned size;
4561     unsigned checksum;
4562 };
add_readonly_check(ErlNifEnv * env,unsigned char * ptr,unsigned sz)4563 static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz)
4564 {
4565     struct readonly_check_t* obj;
4566 
4567     obj = alloc_tmp_obj(env, sizeof(struct readonly_check_t),
4568         &readonly_check_dtor);
4569 
4570     obj->ptr = ptr;
4571     obj->size = sz;
4572     obj->checksum = calc_checksum(ptr, sz);
4573 }
readonly_check_dtor(struct enif_tmp_obj_t * tmp_obj)4574 static void readonly_check_dtor(struct enif_tmp_obj_t* tmp_obj)
4575 {
4576     struct readonly_check_t* ro_check = (struct readonly_check_t*)&tmp_obj[1];
4577     unsigned chksum = calc_checksum(ro_check->ptr, ro_check->size);
4578     if (chksum != ro_check->checksum) {
4579 	fprintf(stderr, "\r\nReadonly data written by NIF, checksums differ"
4580 		" %x != %x\r\nABORTING\r\n", chksum, ro_check->checksum);
4581 	abort();
4582     }
4583     erts_free(tmp_obj->allocator, tmp_obj);
4584 }
calc_checksum(unsigned char * ptr,unsigned size)4585 static unsigned calc_checksum(unsigned char* ptr, unsigned size)
4586 {
4587     unsigned i, sum = 0;
4588     for (i=0; i<size; i++) {
4589 	sum ^= ptr[i] << ((i % 4)*8);
4590     }
4591     return sum;
4592 }
4593 
4594 #endif /* READONLY_CHECK */
4595 
4596 #ifdef ERTS_NIF_ASSERT_IN_ENV
dbg_assert_in_env(ErlNifEnv * env,Eterm term,int nr,const char * type,const char * func)4597 static void dbg_assert_in_env(ErlNifEnv* env, Eterm term,
4598                               int nr, const char* type, const char* func)
4599 {
4600     Uint saved_used_size;
4601     Eterm* real_htop;
4602 
4603     if (is_immed(term)
4604         || (is_non_value(term) && env->exception_thrown)
4605         || erts_is_literal(term, ptr_val(term)))
4606         return;
4607 
4608     if (env->dbg_disable_assert_in_env) {
4609         /*
4610          * Trace nifs may cheat as built terms are discarded after return.
4611          * ToDo: Check if 'term' is part of argv[].
4612          */
4613         return;
4614     }
4615 
4616     if (env->heap_frag) {
4617         ASSERT(env->heap_frag == MBUF(env->proc));
4618         ASSERT(env->hp >= env->heap_frag->mem);
4619         ASSERT(env->hp <= env->heap_frag->mem + env->heap_frag->alloc_size);
4620         saved_used_size = env->heap_frag->used_size;
4621         env->heap_frag->used_size = env->hp - env->heap_frag->mem;
4622         real_htop = NULL;
4623     }
4624     else {
4625         real_htop = env->hp;
4626     }
4627     if (!erts_dbg_within_proc(ptr_val(term), env->proc, real_htop)) {
4628         fprintf(stderr, "\r\nFAILED ASSERTION in %s:\r\n", func);
4629         if (nr) {
4630             fprintf(stderr, "Term #%d of the %s is not from same ErlNifEnv.",
4631                     nr, type);
4632         }
4633         else {
4634             fprintf(stderr, "The %s is not from the same ErlNifEnv.", type);
4635         }
4636         fprintf(stderr, "\r\nABORTING\r\n");
4637         abort();
4638     }
4639     if (env->heap_frag) {
4640         env->heap_frag->used_size = saved_used_size;
4641     }
4642 }
4643 #endif
4644 
4645 #ifdef HAVE_USE_DTRACE
4646 
4647 #define MESSAGE_BUFSIZ 1024
4648 
get_string_maybe(ErlNifEnv * env,const ERL_NIF_TERM term,char ** ptr,char * buf,int bufsiz)4649 static void get_string_maybe(ErlNifEnv *env, const ERL_NIF_TERM term,
4650 		      char **ptr, char *buf, int bufsiz)
4651 {
4652     ErlNifBinary str_bin;
4653 
4654     if (!enif_inspect_iolist_as_binary(env, term, &str_bin) ||
4655         str_bin.size > bufsiz) {
4656         *ptr = NULL;
4657     } else {
4658         sys_memcpy(buf, (char *) str_bin.data, str_bin.size);
4659         buf[str_bin.size] = '\0';
4660         *ptr = buf;
4661     }
4662 }
4663 
erl_nif_user_trace_s1(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])4664 ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc,
4665                                    const ERL_NIF_TERM argv[])
4666 {
4667     ErlNifBinary message_bin;
4668     DTRACE_CHARBUF(messagebuf, MESSAGE_BUFSIZ + 1);
4669 
4670     if (DTRACE_ENABLED(user_trace_s1)) {
4671 	if (!enif_inspect_iolist_as_binary(env, argv[0], &message_bin) ||
4672 	    message_bin.size > MESSAGE_BUFSIZ) {
4673 	    return am_badarg;
4674 	}
4675 	sys_memcpy(messagebuf, (char *) message_bin.data, message_bin.size);
4676         messagebuf[message_bin.size] = '\0';
4677 	DTRACE1(user_trace_s1, messagebuf);
4678 	return am_true;
4679     } else {
4680 	return am_false;
4681     }
4682 }
4683 
erl_nif_user_trace_i4s4(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])4684 ERL_NIF_TERM erl_nif_user_trace_i4s4(ErlNifEnv* env, int argc,
4685                                      const ERL_NIF_TERM argv[])
4686 {
4687     DTRACE_CHARBUF(procbuf, 32 + 1);
4688     DTRACE_CHARBUF(user_tagbuf, MESSAGE_BUFSIZ + 1);
4689     char *utbuf = NULL;
4690     ErlNifSInt64 i1, i2, i3, i4;
4691     DTRACE_CHARBUF(messagebuf1, MESSAGE_BUFSIZ + 1);
4692     DTRACE_CHARBUF(messagebuf2, MESSAGE_BUFSIZ + 1);
4693     DTRACE_CHARBUF(messagebuf3, MESSAGE_BUFSIZ + 1);
4694     DTRACE_CHARBUF(messagebuf4, MESSAGE_BUFSIZ + 1);
4695     char *mbuf1 = NULL, *mbuf2 = NULL, *mbuf3 = NULL, *mbuf4 = NULL;
4696 
4697     if (DTRACE_ENABLED(user_trace_i4s4)) {
4698 	dtrace_nifenv_str(env, procbuf);
4699         get_string_maybe(env, argv[0], &utbuf, user_tagbuf, MESSAGE_BUFSIZ);
4700         if (! enif_get_int64(env, argv[1], &i1))
4701             i1 = 0;
4702         if (! enif_get_int64(env, argv[2], &i2))
4703             i2 = 0;
4704         if (! enif_get_int64(env, argv[3], &i3))
4705             i3 = 0;
4706         if (! enif_get_int64(env, argv[4], &i4))
4707             i4 = 0;
4708         get_string_maybe(env, argv[5], &mbuf1, messagebuf1, MESSAGE_BUFSIZ);
4709         get_string_maybe(env, argv[6], &mbuf2, messagebuf2, MESSAGE_BUFSIZ);
4710         get_string_maybe(env, argv[7], &mbuf3, messagebuf3, MESSAGE_BUFSIZ);
4711         get_string_maybe(env, argv[8], &mbuf4, messagebuf4, MESSAGE_BUFSIZ);
4712 	DTRACE10(user_trace_i4s4, procbuf, utbuf,
4713 		 i1, i2, i3, i4, mbuf1, mbuf2, mbuf3, mbuf4);
4714 	return am_true;
4715     } else {
4716 	return am_false;
4717     }
4718 }
4719 
4720 #define DTRACE10_LABEL(name, label, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
4721     erlang_##name##label((a0), (a1), (a2), (a3), (a4), (a5), (a6), (a7), (a8), (a9))
4722 #define N_STATEMENT(the_label) \
4723    case the_label: \
4724       if (DTRACE_ENABLED(user_trace_n##the_label)) { \
4725           dtrace_nifenv_str(env, procbuf); \
4726           get_string_maybe(env, argv[1], &utbuf, user_tagbuf, MESSAGE_BUFSIZ); \
4727           if (! enif_get_int64(env, argv[2], &i1)) \
4728               i1 = 0; \
4729           if (! enif_get_int64(env, argv[3], &i2)) \
4730               i2 = 0; \
4731           if (! enif_get_int64(env, argv[4], &i3)) \
4732               i3 = 0; \
4733           if (! enif_get_int64(env, argv[5], &i4)) \
4734               i4 = 0; \
4735           get_string_maybe(env, argv[6], &mbuf1, messagebuf1, MESSAGE_BUFSIZ); \
4736           get_string_maybe(env, argv[7], &mbuf2, messagebuf2, MESSAGE_BUFSIZ); \
4737           get_string_maybe(env, argv[8], &mbuf3, messagebuf3, MESSAGE_BUFSIZ); \
4738           get_string_maybe(env, argv[9], &mbuf4, messagebuf4, MESSAGE_BUFSIZ); \
4739           DTRACE10_LABEL(user_trace_n, the_label, procbuf, utbuf,    \
4740                          i1, i2, i3, i4, mbuf1, mbuf2, mbuf3, mbuf4); \
4741           return am_true; \
4742       } else { \
4743           return am_false; \
4744       } \
4745       break
4746 
erl_nif_user_trace_n(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])4747 ERL_NIF_TERM erl_nif_user_trace_n(ErlNifEnv* env, int argc,
4748 				  const ERL_NIF_TERM argv[])
4749 {
4750     DTRACE_CHARBUF(procbuf, 32 + 1);
4751     DTRACE_CHARBUF(user_tagbuf, MESSAGE_BUFSIZ + 1);
4752     char *utbuf = NULL;
4753     ErlNifSInt64 i1, i2, i3, i4;
4754     DTRACE_CHARBUF(messagebuf1, MESSAGE_BUFSIZ + 1);
4755     DTRACE_CHARBUF(messagebuf2, MESSAGE_BUFSIZ + 1);
4756     DTRACE_CHARBUF(messagebuf3, MESSAGE_BUFSIZ + 1);
4757     DTRACE_CHARBUF(messagebuf4, MESSAGE_BUFSIZ + 1);
4758     char *mbuf1 = NULL, *mbuf2 = NULL, *mbuf3 = NULL, *mbuf4 = NULL;
4759     ErlNifSInt64 label = 0;
4760 
4761     if (! enif_get_int64(env, argv[0], &label) || label < 0 || label > 1023) {
4762 	return am_badarg;
4763     }
4764     switch (label) {
4765         N_STATEMENT(0);
4766         N_STATEMENT(1);
4767         N_STATEMENT(2);
4768         N_STATEMENT(3);
4769         N_STATEMENT(4);
4770         N_STATEMENT(5);
4771         N_STATEMENT(6);
4772         N_STATEMENT(7);
4773         N_STATEMENT(8);
4774         N_STATEMENT(9);
4775         N_STATEMENT(10);
4776         N_STATEMENT(11);
4777         N_STATEMENT(12);
4778         N_STATEMENT(13);
4779         N_STATEMENT(14);
4780         N_STATEMENT(15);
4781         N_STATEMENT(16);
4782         N_STATEMENT(17);
4783         N_STATEMENT(18);
4784         N_STATEMENT(19);
4785         N_STATEMENT(20);
4786         N_STATEMENT(21);
4787         N_STATEMENT(22);
4788         N_STATEMENT(23);
4789         N_STATEMENT(24);
4790         N_STATEMENT(25);
4791         N_STATEMENT(26);
4792         N_STATEMENT(27);
4793         N_STATEMENT(28);
4794         N_STATEMENT(29);
4795         N_STATEMENT(30);
4796         N_STATEMENT(31);
4797         N_STATEMENT(32);
4798         N_STATEMENT(33);
4799         N_STATEMENT(34);
4800         N_STATEMENT(35);
4801         N_STATEMENT(36);
4802         N_STATEMENT(37);
4803         N_STATEMENT(38);
4804         N_STATEMENT(39);
4805         N_STATEMENT(40);
4806         N_STATEMENT(41);
4807         N_STATEMENT(42);
4808         N_STATEMENT(43);
4809         N_STATEMENT(44);
4810         N_STATEMENT(45);
4811         N_STATEMENT(46);
4812         N_STATEMENT(47);
4813         N_STATEMENT(48);
4814         N_STATEMENT(49);
4815         N_STATEMENT(50);
4816         N_STATEMENT(51);
4817         N_STATEMENT(52);
4818         N_STATEMENT(53);
4819         N_STATEMENT(54);
4820         N_STATEMENT(55);
4821         N_STATEMENT(56);
4822         N_STATEMENT(57);
4823         N_STATEMENT(58);
4824         N_STATEMENT(59);
4825         N_STATEMENT(60);
4826         N_STATEMENT(61);
4827         N_STATEMENT(62);
4828         N_STATEMENT(63);
4829         N_STATEMENT(64);
4830         N_STATEMENT(65);
4831         N_STATEMENT(66);
4832         N_STATEMENT(67);
4833         N_STATEMENT(68);
4834         N_STATEMENT(69);
4835         N_STATEMENT(70);
4836         N_STATEMENT(71);
4837         N_STATEMENT(72);
4838         N_STATEMENT(73);
4839         N_STATEMENT(74);
4840         N_STATEMENT(75);
4841         N_STATEMENT(76);
4842         N_STATEMENT(77);
4843         N_STATEMENT(78);
4844         N_STATEMENT(79);
4845         N_STATEMENT(80);
4846         N_STATEMENT(81);
4847         N_STATEMENT(82);
4848         N_STATEMENT(83);
4849         N_STATEMENT(84);
4850         N_STATEMENT(85);
4851         N_STATEMENT(86);
4852         N_STATEMENT(87);
4853         N_STATEMENT(88);
4854         N_STATEMENT(89);
4855         N_STATEMENT(90);
4856         N_STATEMENT(91);
4857         N_STATEMENT(92);
4858         N_STATEMENT(93);
4859         N_STATEMENT(94);
4860         N_STATEMENT(95);
4861         N_STATEMENT(96);
4862         N_STATEMENT(97);
4863         N_STATEMENT(98);
4864         N_STATEMENT(99);
4865         N_STATEMENT(100);
4866         N_STATEMENT(101);
4867         N_STATEMENT(102);
4868         N_STATEMENT(103);
4869         N_STATEMENT(104);
4870         N_STATEMENT(105);
4871         N_STATEMENT(106);
4872         N_STATEMENT(107);
4873         N_STATEMENT(108);
4874         N_STATEMENT(109);
4875         N_STATEMENT(110);
4876         N_STATEMENT(111);
4877         N_STATEMENT(112);
4878         N_STATEMENT(113);
4879         N_STATEMENT(114);
4880         N_STATEMENT(115);
4881         N_STATEMENT(116);
4882         N_STATEMENT(117);
4883         N_STATEMENT(118);
4884         N_STATEMENT(119);
4885         N_STATEMENT(120);
4886         N_STATEMENT(121);
4887         N_STATEMENT(122);
4888         N_STATEMENT(123);
4889         N_STATEMENT(124);
4890         N_STATEMENT(125);
4891         N_STATEMENT(126);
4892         N_STATEMENT(127);
4893         N_STATEMENT(128);
4894         N_STATEMENT(129);
4895         N_STATEMENT(130);
4896         N_STATEMENT(131);
4897         N_STATEMENT(132);
4898         N_STATEMENT(133);
4899         N_STATEMENT(134);
4900         N_STATEMENT(135);
4901         N_STATEMENT(136);
4902         N_STATEMENT(137);
4903         N_STATEMENT(138);
4904         N_STATEMENT(139);
4905         N_STATEMENT(140);
4906         N_STATEMENT(141);
4907         N_STATEMENT(142);
4908         N_STATEMENT(143);
4909         N_STATEMENT(144);
4910         N_STATEMENT(145);
4911         N_STATEMENT(146);
4912         N_STATEMENT(147);
4913         N_STATEMENT(148);
4914         N_STATEMENT(149);
4915         N_STATEMENT(150);
4916         N_STATEMENT(151);
4917         N_STATEMENT(152);
4918         N_STATEMENT(153);
4919         N_STATEMENT(154);
4920         N_STATEMENT(155);
4921         N_STATEMENT(156);
4922         N_STATEMENT(157);
4923         N_STATEMENT(158);
4924         N_STATEMENT(159);
4925         N_STATEMENT(160);
4926         N_STATEMENT(161);
4927         N_STATEMENT(162);
4928         N_STATEMENT(163);
4929         N_STATEMENT(164);
4930         N_STATEMENT(165);
4931         N_STATEMENT(166);
4932         N_STATEMENT(167);
4933         N_STATEMENT(168);
4934         N_STATEMENT(169);
4935         N_STATEMENT(170);
4936         N_STATEMENT(171);
4937         N_STATEMENT(172);
4938         N_STATEMENT(173);
4939         N_STATEMENT(174);
4940         N_STATEMENT(175);
4941         N_STATEMENT(176);
4942         N_STATEMENT(177);
4943         N_STATEMENT(178);
4944         N_STATEMENT(179);
4945         N_STATEMENT(180);
4946         N_STATEMENT(181);
4947         N_STATEMENT(182);
4948         N_STATEMENT(183);
4949         N_STATEMENT(184);
4950         N_STATEMENT(185);
4951         N_STATEMENT(186);
4952         N_STATEMENT(187);
4953         N_STATEMENT(188);
4954         N_STATEMENT(189);
4955         N_STATEMENT(190);
4956         N_STATEMENT(191);
4957         N_STATEMENT(192);
4958         N_STATEMENT(193);
4959         N_STATEMENT(194);
4960         N_STATEMENT(195);
4961         N_STATEMENT(196);
4962         N_STATEMENT(197);
4963         N_STATEMENT(198);
4964         N_STATEMENT(199);
4965         N_STATEMENT(200);
4966         N_STATEMENT(201);
4967         N_STATEMENT(202);
4968         N_STATEMENT(203);
4969         N_STATEMENT(204);
4970         N_STATEMENT(205);
4971         N_STATEMENT(206);
4972         N_STATEMENT(207);
4973         N_STATEMENT(208);
4974         N_STATEMENT(209);
4975         N_STATEMENT(210);
4976         N_STATEMENT(211);
4977         N_STATEMENT(212);
4978         N_STATEMENT(213);
4979         N_STATEMENT(214);
4980         N_STATEMENT(215);
4981         N_STATEMENT(216);
4982         N_STATEMENT(217);
4983         N_STATEMENT(218);
4984         N_STATEMENT(219);
4985         N_STATEMENT(220);
4986         N_STATEMENT(221);
4987         N_STATEMENT(222);
4988         N_STATEMENT(223);
4989         N_STATEMENT(224);
4990         N_STATEMENT(225);
4991         N_STATEMENT(226);
4992         N_STATEMENT(227);
4993         N_STATEMENT(228);
4994         N_STATEMENT(229);
4995         N_STATEMENT(230);
4996         N_STATEMENT(231);
4997         N_STATEMENT(232);
4998         N_STATEMENT(233);
4999         N_STATEMENT(234);
5000         N_STATEMENT(235);
5001         N_STATEMENT(236);
5002         N_STATEMENT(237);
5003         N_STATEMENT(238);
5004         N_STATEMENT(239);
5005         N_STATEMENT(240);
5006         N_STATEMENT(241);
5007         N_STATEMENT(242);
5008         N_STATEMENT(243);
5009         N_STATEMENT(244);
5010         N_STATEMENT(245);
5011         N_STATEMENT(246);
5012         N_STATEMENT(247);
5013         N_STATEMENT(248);
5014         N_STATEMENT(249);
5015         N_STATEMENT(250);
5016         N_STATEMENT(251);
5017         N_STATEMENT(252);
5018         N_STATEMENT(253);
5019         N_STATEMENT(254);
5020         N_STATEMENT(255);
5021         N_STATEMENT(256);
5022         N_STATEMENT(257);
5023         N_STATEMENT(258);
5024         N_STATEMENT(259);
5025         N_STATEMENT(260);
5026         N_STATEMENT(261);
5027         N_STATEMENT(262);
5028         N_STATEMENT(263);
5029         N_STATEMENT(264);
5030         N_STATEMENT(265);
5031         N_STATEMENT(266);
5032         N_STATEMENT(267);
5033         N_STATEMENT(268);
5034         N_STATEMENT(269);
5035         N_STATEMENT(270);
5036         N_STATEMENT(271);
5037         N_STATEMENT(272);
5038         N_STATEMENT(273);
5039         N_STATEMENT(274);
5040         N_STATEMENT(275);
5041         N_STATEMENT(276);
5042         N_STATEMENT(277);
5043         N_STATEMENT(278);
5044         N_STATEMENT(279);
5045         N_STATEMENT(280);
5046         N_STATEMENT(281);
5047         N_STATEMENT(282);
5048         N_STATEMENT(283);
5049         N_STATEMENT(284);
5050         N_STATEMENT(285);
5051         N_STATEMENT(286);
5052         N_STATEMENT(287);
5053         N_STATEMENT(288);
5054         N_STATEMENT(289);
5055         N_STATEMENT(290);
5056         N_STATEMENT(291);
5057         N_STATEMENT(292);
5058         N_STATEMENT(293);
5059         N_STATEMENT(294);
5060         N_STATEMENT(295);
5061         N_STATEMENT(296);
5062         N_STATEMENT(297);
5063         N_STATEMENT(298);
5064         N_STATEMENT(299);
5065         N_STATEMENT(300);
5066         N_STATEMENT(301);
5067         N_STATEMENT(302);
5068         N_STATEMENT(303);
5069         N_STATEMENT(304);
5070         N_STATEMENT(305);
5071         N_STATEMENT(306);
5072         N_STATEMENT(307);
5073         N_STATEMENT(308);
5074         N_STATEMENT(309);
5075         N_STATEMENT(310);
5076         N_STATEMENT(311);
5077         N_STATEMENT(312);
5078         N_STATEMENT(313);
5079         N_STATEMENT(314);
5080         N_STATEMENT(315);
5081         N_STATEMENT(316);
5082         N_STATEMENT(317);
5083         N_STATEMENT(318);
5084         N_STATEMENT(319);
5085         N_STATEMENT(320);
5086         N_STATEMENT(321);
5087         N_STATEMENT(322);
5088         N_STATEMENT(323);
5089         N_STATEMENT(324);
5090         N_STATEMENT(325);
5091         N_STATEMENT(326);
5092         N_STATEMENT(327);
5093         N_STATEMENT(328);
5094         N_STATEMENT(329);
5095         N_STATEMENT(330);
5096         N_STATEMENT(331);
5097         N_STATEMENT(332);
5098         N_STATEMENT(333);
5099         N_STATEMENT(334);
5100         N_STATEMENT(335);
5101         N_STATEMENT(336);
5102         N_STATEMENT(337);
5103         N_STATEMENT(338);
5104         N_STATEMENT(339);
5105         N_STATEMENT(340);
5106         N_STATEMENT(341);
5107         N_STATEMENT(342);
5108         N_STATEMENT(343);
5109         N_STATEMENT(344);
5110         N_STATEMENT(345);
5111         N_STATEMENT(346);
5112         N_STATEMENT(347);
5113         N_STATEMENT(348);
5114         N_STATEMENT(349);
5115         N_STATEMENT(350);
5116         N_STATEMENT(351);
5117         N_STATEMENT(352);
5118         N_STATEMENT(353);
5119         N_STATEMENT(354);
5120         N_STATEMENT(355);
5121         N_STATEMENT(356);
5122         N_STATEMENT(357);
5123         N_STATEMENT(358);
5124         N_STATEMENT(359);
5125         N_STATEMENT(360);
5126         N_STATEMENT(361);
5127         N_STATEMENT(362);
5128         N_STATEMENT(363);
5129         N_STATEMENT(364);
5130         N_STATEMENT(365);
5131         N_STATEMENT(366);
5132         N_STATEMENT(367);
5133         N_STATEMENT(368);
5134         N_STATEMENT(369);
5135         N_STATEMENT(370);
5136         N_STATEMENT(371);
5137         N_STATEMENT(372);
5138         N_STATEMENT(373);
5139         N_STATEMENT(374);
5140         N_STATEMENT(375);
5141         N_STATEMENT(376);
5142         N_STATEMENT(377);
5143         N_STATEMENT(378);
5144         N_STATEMENT(379);
5145         N_STATEMENT(380);
5146         N_STATEMENT(381);
5147         N_STATEMENT(382);
5148         N_STATEMENT(383);
5149         N_STATEMENT(384);
5150         N_STATEMENT(385);
5151         N_STATEMENT(386);
5152         N_STATEMENT(387);
5153         N_STATEMENT(388);
5154         N_STATEMENT(389);
5155         N_STATEMENT(390);
5156         N_STATEMENT(391);
5157         N_STATEMENT(392);
5158         N_STATEMENT(393);
5159         N_STATEMENT(394);
5160         N_STATEMENT(395);
5161         N_STATEMENT(396);
5162         N_STATEMENT(397);
5163         N_STATEMENT(398);
5164         N_STATEMENT(399);
5165         N_STATEMENT(400);
5166         N_STATEMENT(401);
5167         N_STATEMENT(402);
5168         N_STATEMENT(403);
5169         N_STATEMENT(404);
5170         N_STATEMENT(405);
5171         N_STATEMENT(406);
5172         N_STATEMENT(407);
5173         N_STATEMENT(408);
5174         N_STATEMENT(409);
5175         N_STATEMENT(410);
5176         N_STATEMENT(411);
5177         N_STATEMENT(412);
5178         N_STATEMENT(413);
5179         N_STATEMENT(414);
5180         N_STATEMENT(415);
5181         N_STATEMENT(416);
5182         N_STATEMENT(417);
5183         N_STATEMENT(418);
5184         N_STATEMENT(419);
5185         N_STATEMENT(420);
5186         N_STATEMENT(421);
5187         N_STATEMENT(422);
5188         N_STATEMENT(423);
5189         N_STATEMENT(424);
5190         N_STATEMENT(425);
5191         N_STATEMENT(426);
5192         N_STATEMENT(427);
5193         N_STATEMENT(428);
5194         N_STATEMENT(429);
5195         N_STATEMENT(430);
5196         N_STATEMENT(431);
5197         N_STATEMENT(432);
5198         N_STATEMENT(433);
5199         N_STATEMENT(434);
5200         N_STATEMENT(435);
5201         N_STATEMENT(436);
5202         N_STATEMENT(437);
5203         N_STATEMENT(438);
5204         N_STATEMENT(439);
5205         N_STATEMENT(440);
5206         N_STATEMENT(441);
5207         N_STATEMENT(442);
5208         N_STATEMENT(443);
5209         N_STATEMENT(444);
5210         N_STATEMENT(445);
5211         N_STATEMENT(446);
5212         N_STATEMENT(447);
5213         N_STATEMENT(448);
5214         N_STATEMENT(449);
5215         N_STATEMENT(450);
5216         N_STATEMENT(451);
5217         N_STATEMENT(452);
5218         N_STATEMENT(453);
5219         N_STATEMENT(454);
5220         N_STATEMENT(455);
5221         N_STATEMENT(456);
5222         N_STATEMENT(457);
5223         N_STATEMENT(458);
5224         N_STATEMENT(459);
5225         N_STATEMENT(460);
5226         N_STATEMENT(461);
5227         N_STATEMENT(462);
5228         N_STATEMENT(463);
5229         N_STATEMENT(464);
5230         N_STATEMENT(465);
5231         N_STATEMENT(466);
5232         N_STATEMENT(467);
5233         N_STATEMENT(468);
5234         N_STATEMENT(469);
5235         N_STATEMENT(470);
5236         N_STATEMENT(471);
5237         N_STATEMENT(472);
5238         N_STATEMENT(473);
5239         N_STATEMENT(474);
5240         N_STATEMENT(475);
5241         N_STATEMENT(476);
5242         N_STATEMENT(477);
5243         N_STATEMENT(478);
5244         N_STATEMENT(479);
5245         N_STATEMENT(480);
5246         N_STATEMENT(481);
5247         N_STATEMENT(482);
5248         N_STATEMENT(483);
5249         N_STATEMENT(484);
5250         N_STATEMENT(485);
5251         N_STATEMENT(486);
5252         N_STATEMENT(487);
5253         N_STATEMENT(488);
5254         N_STATEMENT(489);
5255         N_STATEMENT(490);
5256         N_STATEMENT(491);
5257         N_STATEMENT(492);
5258         N_STATEMENT(493);
5259         N_STATEMENT(494);
5260         N_STATEMENT(495);
5261         N_STATEMENT(496);
5262         N_STATEMENT(497);
5263         N_STATEMENT(498);
5264         N_STATEMENT(499);
5265         N_STATEMENT(500);
5266         N_STATEMENT(501);
5267         N_STATEMENT(502);
5268         N_STATEMENT(503);
5269         N_STATEMENT(504);
5270         N_STATEMENT(505);
5271         N_STATEMENT(506);
5272         N_STATEMENT(507);
5273         N_STATEMENT(508);
5274         N_STATEMENT(509);
5275         N_STATEMENT(510);
5276         N_STATEMENT(511);
5277         N_STATEMENT(512);
5278         N_STATEMENT(513);
5279         N_STATEMENT(514);
5280         N_STATEMENT(515);
5281         N_STATEMENT(516);
5282         N_STATEMENT(517);
5283         N_STATEMENT(518);
5284         N_STATEMENT(519);
5285         N_STATEMENT(520);
5286         N_STATEMENT(521);
5287         N_STATEMENT(522);
5288         N_STATEMENT(523);
5289         N_STATEMENT(524);
5290         N_STATEMENT(525);
5291         N_STATEMENT(526);
5292         N_STATEMENT(527);
5293         N_STATEMENT(528);
5294         N_STATEMENT(529);
5295         N_STATEMENT(530);
5296         N_STATEMENT(531);
5297         N_STATEMENT(532);
5298         N_STATEMENT(533);
5299         N_STATEMENT(534);
5300         N_STATEMENT(535);
5301         N_STATEMENT(536);
5302         N_STATEMENT(537);
5303         N_STATEMENT(538);
5304         N_STATEMENT(539);
5305         N_STATEMENT(540);
5306         N_STATEMENT(541);
5307         N_STATEMENT(542);
5308         N_STATEMENT(543);
5309         N_STATEMENT(544);
5310         N_STATEMENT(545);
5311         N_STATEMENT(546);
5312         N_STATEMENT(547);
5313         N_STATEMENT(548);
5314         N_STATEMENT(549);
5315         N_STATEMENT(550);
5316         N_STATEMENT(551);
5317         N_STATEMENT(552);
5318         N_STATEMENT(553);
5319         N_STATEMENT(554);
5320         N_STATEMENT(555);
5321         N_STATEMENT(556);
5322         N_STATEMENT(557);
5323         N_STATEMENT(558);
5324         N_STATEMENT(559);
5325         N_STATEMENT(560);
5326         N_STATEMENT(561);
5327         N_STATEMENT(562);
5328         N_STATEMENT(563);
5329         N_STATEMENT(564);
5330         N_STATEMENT(565);
5331         N_STATEMENT(566);
5332         N_STATEMENT(567);
5333         N_STATEMENT(568);
5334         N_STATEMENT(569);
5335         N_STATEMENT(570);
5336         N_STATEMENT(571);
5337         N_STATEMENT(572);
5338         N_STATEMENT(573);
5339         N_STATEMENT(574);
5340         N_STATEMENT(575);
5341         N_STATEMENT(576);
5342         N_STATEMENT(577);
5343         N_STATEMENT(578);
5344         N_STATEMENT(579);
5345         N_STATEMENT(580);
5346         N_STATEMENT(581);
5347         N_STATEMENT(582);
5348         N_STATEMENT(583);
5349         N_STATEMENT(584);
5350         N_STATEMENT(585);
5351         N_STATEMENT(586);
5352         N_STATEMENT(587);
5353         N_STATEMENT(588);
5354         N_STATEMENT(589);
5355         N_STATEMENT(590);
5356         N_STATEMENT(591);
5357         N_STATEMENT(592);
5358         N_STATEMENT(593);
5359         N_STATEMENT(594);
5360         N_STATEMENT(595);
5361         N_STATEMENT(596);
5362         N_STATEMENT(597);
5363         N_STATEMENT(598);
5364         N_STATEMENT(599);
5365         N_STATEMENT(600);
5366         N_STATEMENT(601);
5367         N_STATEMENT(602);
5368         N_STATEMENT(603);
5369         N_STATEMENT(604);
5370         N_STATEMENT(605);
5371         N_STATEMENT(606);
5372         N_STATEMENT(607);
5373         N_STATEMENT(608);
5374         N_STATEMENT(609);
5375         N_STATEMENT(610);
5376         N_STATEMENT(611);
5377         N_STATEMENT(612);
5378         N_STATEMENT(613);
5379         N_STATEMENT(614);
5380         N_STATEMENT(615);
5381         N_STATEMENT(616);
5382         N_STATEMENT(617);
5383         N_STATEMENT(618);
5384         N_STATEMENT(619);
5385         N_STATEMENT(620);
5386         N_STATEMENT(621);
5387         N_STATEMENT(622);
5388         N_STATEMENT(623);
5389         N_STATEMENT(624);
5390         N_STATEMENT(625);
5391         N_STATEMENT(626);
5392         N_STATEMENT(627);
5393         N_STATEMENT(628);
5394         N_STATEMENT(629);
5395         N_STATEMENT(630);
5396         N_STATEMENT(631);
5397         N_STATEMENT(632);
5398         N_STATEMENT(633);
5399         N_STATEMENT(634);
5400         N_STATEMENT(635);
5401         N_STATEMENT(636);
5402         N_STATEMENT(637);
5403         N_STATEMENT(638);
5404         N_STATEMENT(639);
5405         N_STATEMENT(640);
5406         N_STATEMENT(641);
5407         N_STATEMENT(642);
5408         N_STATEMENT(643);
5409         N_STATEMENT(644);
5410         N_STATEMENT(645);
5411         N_STATEMENT(646);
5412         N_STATEMENT(647);
5413         N_STATEMENT(648);
5414         N_STATEMENT(649);
5415         N_STATEMENT(650);
5416         N_STATEMENT(651);
5417         N_STATEMENT(652);
5418         N_STATEMENT(653);
5419         N_STATEMENT(654);
5420         N_STATEMENT(655);
5421         N_STATEMENT(656);
5422         N_STATEMENT(657);
5423         N_STATEMENT(658);
5424         N_STATEMENT(659);
5425         N_STATEMENT(660);
5426         N_STATEMENT(661);
5427         N_STATEMENT(662);
5428         N_STATEMENT(663);
5429         N_STATEMENT(664);
5430         N_STATEMENT(665);
5431         N_STATEMENT(666);
5432         N_STATEMENT(667);
5433         N_STATEMENT(668);
5434         N_STATEMENT(669);
5435         N_STATEMENT(670);
5436         N_STATEMENT(671);
5437         N_STATEMENT(672);
5438         N_STATEMENT(673);
5439         N_STATEMENT(674);
5440         N_STATEMENT(675);
5441         N_STATEMENT(676);
5442         N_STATEMENT(677);
5443         N_STATEMENT(678);
5444         N_STATEMENT(679);
5445         N_STATEMENT(680);
5446         N_STATEMENT(681);
5447         N_STATEMENT(682);
5448         N_STATEMENT(683);
5449         N_STATEMENT(684);
5450         N_STATEMENT(685);
5451         N_STATEMENT(686);
5452         N_STATEMENT(687);
5453         N_STATEMENT(688);
5454         N_STATEMENT(689);
5455         N_STATEMENT(690);
5456         N_STATEMENT(691);
5457         N_STATEMENT(692);
5458         N_STATEMENT(693);
5459         N_STATEMENT(694);
5460         N_STATEMENT(695);
5461         N_STATEMENT(696);
5462         N_STATEMENT(697);
5463         N_STATEMENT(698);
5464         N_STATEMENT(699);
5465         N_STATEMENT(700);
5466         N_STATEMENT(701);
5467         N_STATEMENT(702);
5468         N_STATEMENT(703);
5469         N_STATEMENT(704);
5470         N_STATEMENT(705);
5471         N_STATEMENT(706);
5472         N_STATEMENT(707);
5473         N_STATEMENT(708);
5474         N_STATEMENT(709);
5475         N_STATEMENT(710);
5476         N_STATEMENT(711);
5477         N_STATEMENT(712);
5478         N_STATEMENT(713);
5479         N_STATEMENT(714);
5480         N_STATEMENT(715);
5481         N_STATEMENT(716);
5482         N_STATEMENT(717);
5483         N_STATEMENT(718);
5484         N_STATEMENT(719);
5485         N_STATEMENT(720);
5486         N_STATEMENT(721);
5487         N_STATEMENT(722);
5488         N_STATEMENT(723);
5489         N_STATEMENT(724);
5490         N_STATEMENT(725);
5491         N_STATEMENT(726);
5492         N_STATEMENT(727);
5493         N_STATEMENT(728);
5494         N_STATEMENT(729);
5495         N_STATEMENT(730);
5496         N_STATEMENT(731);
5497         N_STATEMENT(732);
5498         N_STATEMENT(733);
5499         N_STATEMENT(734);
5500         N_STATEMENT(735);
5501         N_STATEMENT(736);
5502         N_STATEMENT(737);
5503         N_STATEMENT(738);
5504         N_STATEMENT(739);
5505         N_STATEMENT(740);
5506         N_STATEMENT(741);
5507         N_STATEMENT(742);
5508         N_STATEMENT(743);
5509         N_STATEMENT(744);
5510         N_STATEMENT(745);
5511         N_STATEMENT(746);
5512         N_STATEMENT(747);
5513         N_STATEMENT(748);
5514         N_STATEMENT(749);
5515         N_STATEMENT(750);
5516         N_STATEMENT(751);
5517         N_STATEMENT(752);
5518         N_STATEMENT(753);
5519         N_STATEMENT(754);
5520         N_STATEMENT(755);
5521         N_STATEMENT(756);
5522         N_STATEMENT(757);
5523         N_STATEMENT(758);
5524         N_STATEMENT(759);
5525         N_STATEMENT(760);
5526         N_STATEMENT(761);
5527         N_STATEMENT(762);
5528         N_STATEMENT(763);
5529         N_STATEMENT(764);
5530         N_STATEMENT(765);
5531         N_STATEMENT(766);
5532         N_STATEMENT(767);
5533         N_STATEMENT(768);
5534         N_STATEMENT(769);
5535         N_STATEMENT(770);
5536         N_STATEMENT(771);
5537         N_STATEMENT(772);
5538         N_STATEMENT(773);
5539         N_STATEMENT(774);
5540         N_STATEMENT(775);
5541         N_STATEMENT(776);
5542         N_STATEMENT(777);
5543         N_STATEMENT(778);
5544         N_STATEMENT(779);
5545         N_STATEMENT(780);
5546         N_STATEMENT(781);
5547         N_STATEMENT(782);
5548         N_STATEMENT(783);
5549         N_STATEMENT(784);
5550         N_STATEMENT(785);
5551         N_STATEMENT(786);
5552         N_STATEMENT(787);
5553         N_STATEMENT(788);
5554         N_STATEMENT(789);
5555         N_STATEMENT(790);
5556         N_STATEMENT(791);
5557         N_STATEMENT(792);
5558         N_STATEMENT(793);
5559         N_STATEMENT(794);
5560         N_STATEMENT(795);
5561         N_STATEMENT(796);
5562         N_STATEMENT(797);
5563         N_STATEMENT(798);
5564         N_STATEMENT(799);
5565         N_STATEMENT(800);
5566         N_STATEMENT(801);
5567         N_STATEMENT(802);
5568         N_STATEMENT(803);
5569         N_STATEMENT(804);
5570         N_STATEMENT(805);
5571         N_STATEMENT(806);
5572         N_STATEMENT(807);
5573         N_STATEMENT(808);
5574         N_STATEMENT(809);
5575         N_STATEMENT(810);
5576         N_STATEMENT(811);
5577         N_STATEMENT(812);
5578         N_STATEMENT(813);
5579         N_STATEMENT(814);
5580         N_STATEMENT(815);
5581         N_STATEMENT(816);
5582         N_STATEMENT(817);
5583         N_STATEMENT(818);
5584         N_STATEMENT(819);
5585         N_STATEMENT(820);
5586         N_STATEMENT(821);
5587         N_STATEMENT(822);
5588         N_STATEMENT(823);
5589         N_STATEMENT(824);
5590         N_STATEMENT(825);
5591         N_STATEMENT(826);
5592         N_STATEMENT(827);
5593         N_STATEMENT(828);
5594         N_STATEMENT(829);
5595         N_STATEMENT(830);
5596         N_STATEMENT(831);
5597         N_STATEMENT(832);
5598         N_STATEMENT(833);
5599         N_STATEMENT(834);
5600         N_STATEMENT(835);
5601         N_STATEMENT(836);
5602         N_STATEMENT(837);
5603         N_STATEMENT(838);
5604         N_STATEMENT(839);
5605         N_STATEMENT(840);
5606         N_STATEMENT(841);
5607         N_STATEMENT(842);
5608         N_STATEMENT(843);
5609         N_STATEMENT(844);
5610         N_STATEMENT(845);
5611         N_STATEMENT(846);
5612         N_STATEMENT(847);
5613         N_STATEMENT(848);
5614         N_STATEMENT(849);
5615         N_STATEMENT(850);
5616         N_STATEMENT(851);
5617         N_STATEMENT(852);
5618         N_STATEMENT(853);
5619         N_STATEMENT(854);
5620         N_STATEMENT(855);
5621         N_STATEMENT(856);
5622         N_STATEMENT(857);
5623         N_STATEMENT(858);
5624         N_STATEMENT(859);
5625         N_STATEMENT(860);
5626         N_STATEMENT(861);
5627         N_STATEMENT(862);
5628         N_STATEMENT(863);
5629         N_STATEMENT(864);
5630         N_STATEMENT(865);
5631         N_STATEMENT(866);
5632         N_STATEMENT(867);
5633         N_STATEMENT(868);
5634         N_STATEMENT(869);
5635         N_STATEMENT(870);
5636         N_STATEMENT(871);
5637         N_STATEMENT(872);
5638         N_STATEMENT(873);
5639         N_STATEMENT(874);
5640         N_STATEMENT(875);
5641         N_STATEMENT(876);
5642         N_STATEMENT(877);
5643         N_STATEMENT(878);
5644         N_STATEMENT(879);
5645         N_STATEMENT(880);
5646         N_STATEMENT(881);
5647         N_STATEMENT(882);
5648         N_STATEMENT(883);
5649         N_STATEMENT(884);
5650         N_STATEMENT(885);
5651         N_STATEMENT(886);
5652         N_STATEMENT(887);
5653         N_STATEMENT(888);
5654         N_STATEMENT(889);
5655         N_STATEMENT(890);
5656         N_STATEMENT(891);
5657         N_STATEMENT(892);
5658         N_STATEMENT(893);
5659         N_STATEMENT(894);
5660         N_STATEMENT(895);
5661         N_STATEMENT(896);
5662         N_STATEMENT(897);
5663         N_STATEMENT(898);
5664         N_STATEMENT(899);
5665         N_STATEMENT(900);
5666         N_STATEMENT(901);
5667         N_STATEMENT(902);
5668         N_STATEMENT(903);
5669         N_STATEMENT(904);
5670         N_STATEMENT(905);
5671         N_STATEMENT(906);
5672         N_STATEMENT(907);
5673         N_STATEMENT(908);
5674         N_STATEMENT(909);
5675         N_STATEMENT(910);
5676         N_STATEMENT(911);
5677         N_STATEMENT(912);
5678         N_STATEMENT(913);
5679         N_STATEMENT(914);
5680         N_STATEMENT(915);
5681         N_STATEMENT(916);
5682         N_STATEMENT(917);
5683         N_STATEMENT(918);
5684         N_STATEMENT(919);
5685         N_STATEMENT(920);
5686         N_STATEMENT(921);
5687         N_STATEMENT(922);
5688         N_STATEMENT(923);
5689         N_STATEMENT(924);
5690         N_STATEMENT(925);
5691         N_STATEMENT(926);
5692         N_STATEMENT(927);
5693         N_STATEMENT(928);
5694         N_STATEMENT(929);
5695         N_STATEMENT(930);
5696         N_STATEMENT(931);
5697         N_STATEMENT(932);
5698         N_STATEMENT(933);
5699         N_STATEMENT(934);
5700         N_STATEMENT(935);
5701         N_STATEMENT(936);
5702         N_STATEMENT(937);
5703         N_STATEMENT(938);
5704         N_STATEMENT(939);
5705         N_STATEMENT(940);
5706         N_STATEMENT(941);
5707         N_STATEMENT(942);
5708         N_STATEMENT(943);
5709         N_STATEMENT(944);
5710         N_STATEMENT(945);
5711         N_STATEMENT(946);
5712         N_STATEMENT(947);
5713         N_STATEMENT(948);
5714         N_STATEMENT(949);
5715         N_STATEMENT(950);
5716     }
5717     return am_error;          /* NOTREACHED, shut up the compiler */
5718 }
5719 
5720 #endif /* HAVE_USE_DTRACE */
5721