1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 2009-2020. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 /* Erlang Native InterFace
21  */
22 
23 /*
24  * Environment contains a pointer to currently executing process.
25  * In the dirty case this pointer do however not point to the
26  * actual process structure of the executing process, but instead
27  * a "shadow process structure". This in order to be able to handle
28  * heap allocation without the need to acquire the main lock on
29  * the process.
30  *
31  * The dirty process is allowed to allocate on the heap without
32  * the main lock, i.e., incrementing htop, but is not allowed to
33  * modify mbuf, offheap, etc without the main lock. The dirty
34  * process moves mbuf list and offheap list of the shadow process
35  * structure into the real structure when the dirty nif call
36  * completes.
37  */
38 
39 
40 #ifdef HAVE_CONFIG_H
41 #  include "config.h"
42 #endif
43 
44 #include "erl_nif.h"
45 
46 #include "sys.h"
47 #include "global.h"
48 #include "erl_binary.h"
49 #include "bif.h"
50 #include "error.h"
51 #include "big.h"
52 #include "erl_map.h"
53 #include "beam_bp.h"
54 #include "erl_thr_progress.h"
55 #include "dtrace-wrapper.h"
56 #include "erl_process.h"
57 #include "erl_bif_unique.h"
58 #include "erl_utils.h"
59 #include "erl_io_queue.h"
60 #include "erl_proc_sig_queue.h"
61 #undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
62 #define ERTS_WANT_NFUNC_SCHED_INTERNALS__
63 #include "erl_nfunc_sched.h"
64 #if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
65 #define HAVE_USE_DTRACE 1
66 #endif
67 #include "jit/beam_asm.h"
68 
69 #include <limits.h>
70 #include <stddef.h> /* offsetof */
71 
72 /* Information about a loaded nif library.
73  * Each successful call to erlang:load_nif will allocate an instance of
74  * erl_module_nif. Two calls opening the same library will thus have the same
75  * 'handle'.
76  */
77 struct erl_module_nif {
78     erts_refc_t refc;    /* References to this struct
79                           * +1 erl_module_instance (loaded Erlang code)
80                           * +1 "dlopen" (loaded native code)
81                           * +1 scheduled load finisher
82                           * +1 for each owned resource type
83                           */
84     erts_mtx_t load_mtx; /* protects load finish from unload */
85     struct ErtsNifFinish_* finish;
86     ErtsThrPrgrLaterOp lop;
87 
88     void* priv_data;
89     void* handle;             /* "dlopen" */
90     struct enif_entry_t entry;
91     erts_refc_t dynlib_refc;  /* References to loaded native code
92                                  +1 erl_module_instance
93                                  +1 for each owned resource type with callbacks
94                                  +1 for each ongoing dirty NIF call
95                                */
96     Module* mod;    /* Can be NULL if purged and dynlib_refc > 0 */
97 
98     ErlNifFunc _funcs_copy_[1];  /* only used for old libs */
99 };
100 
101 typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
102 
103 #ifdef DEBUG
104 #  define READONLY_CHECK
105 #  define ERTS_DBG_NIF_NOT_SCHED_MARKER ((void *) (UWord) 1)
106 #endif
107 #ifdef READONLY_CHECK
108 #  define ADD_READONLY_CHECK(ENV,PTR,SIZE) add_readonly_check(ENV,PTR,SIZE)
109 static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
110 #else
111 #  define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0)
112 #endif
113 
114 #ifdef ERTS_NIF_ASSERT_IN_ENV
115 #  define ASSERT_IN_ENV(ENV, TERM, NR, TYPE) dbg_assert_in_env(ENV, TERM, NR, TYPE, __func__)
116 static void dbg_assert_in_env(ErlNifEnv*, Eterm term, int nr, const char* type, const char* func);
117 #  include "erl_gc.h"
118 #else
119 #  define ASSERT_IN_ENV(ENV, TERM, NR, TYPE)
120 #endif
121 
122 #ifdef DEBUG
123 static int is_offheap(const ErlOffHeap* off_heap);
124 #endif
125 
126 #ifdef USE_VM_PROBES
127 void dtrace_nifenv_str(ErlNifEnv *, char *);
128 #endif
129 
130 #define MIN_HEAP_FRAG_SZ 200
131 static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp);
132 
133 static ERTS_INLINE int
is_scheduler(void)134 is_scheduler(void)
135 {
136     ErtsSchedulerData *esdp = erts_get_scheduler_data();
137     if (!esdp)
138 	return 0;
139     if (ERTS_SCHEDULER_IS_DIRTY(esdp))
140 	return -1;
141     return 1;
142 }
143 
144 static ERTS_INLINE void
execution_state(ErlNifEnv * env,Process ** c_pp,int * schedp)145 execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
146 {
147     if (schedp)
148 	*schedp = is_scheduler();
149     if (c_pp) {
150 	if (!env || env->proc->common.id == ERTS_INVALID_PID)
151 	    *c_pp = NULL;
152 	else {
153 	    Process *c_p = env->proc;
154 
155 	    if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) {
156 		ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
157 				   & ERTS_PROC_LOCK_MAIN);
158 	    }
159 	    else {
160 		c_p = env->proc->next;
161 		ASSERT(is_scheduler() < 0);
162 		ASSERT(c_p && env->proc->common.id == c_p->common.id);
163 	    }
164 
165 	    *c_pp = c_p;
166 
167 	    ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC));
168 	}
169     }
170 }
171 
alloc_heap(ErlNifEnv * env,size_t need)172 static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, size_t need)
173 {
174     Eterm* hp = env->hp;
175     env->hp += need;
176     if (env->hp <= env->hp_end) {
177 	return hp;
178     }
179     return alloc_heap_heavy(env, need, hp);
180 }
181 
alloc_heap_heavy(ErlNifEnv * env,size_t need,Eterm * hp)182 static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp)
183 {
184     env->hp = hp;
185     if (env->heap_frag == NULL) {
186 	ASSERT(HEAP_LIMIT(env->proc) == env->hp_end);
187         ASSERT(env->hp + need > env->hp_end);
188 	HEAP_TOP(env->proc) = env->hp;
189     }
190     else {
191 	Uint usz = env->hp - env->heap_frag->mem;
192 	env->proc->mbuf_sz += usz - env->heap_frag->used_size;
193 	env->heap_frag->used_size = usz;
194 	ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
195     }
196     hp = erts_heap_alloc(env->proc, need, MIN_HEAP_FRAG_SZ);
197     env->heap_frag = MBUF(env->proc);
198     env->hp = hp + need;
199     env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
200 
201     return hp;
202 }
203 
204 #if SIZEOF_LONG != ERTS_SIZEOF_ETERM
ensure_heap(ErlNifEnv * env,size_t may_need)205 static ERTS_INLINE void ensure_heap(ErlNifEnv* env, size_t may_need)
206 {
207     if (env->hp + may_need > env->hp_end) {
208 	alloc_heap_heavy(env, may_need, env->hp);
209 	env->hp -= may_need;
210     }
211 }
212 #endif
213 
erts_pre_nif(ErlNifEnv * env,Process * p,struct erl_module_nif * mod_nif,Process * tracee)214 void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
215                   Process* tracee)
216 {
217     env->mod_nif = mod_nif;
218     env->proc = p;
219     env->hp = HEAP_TOP(p);
220     env->hp_end = HEAP_LIMIT(p);
221     env->heap_frag = NULL;
222     env->tmp_obj_list = NULL;
223     env->exception_thrown = 0;
224     env->tracee = tracee;
225 
226     ASSERT(p->common.id != ERTS_INVALID_PID);
227 
228 #ifdef ERTS_NIF_ASSERT_IN_ENV
229     env->dbg_disable_assert_in_env = 0;
230 #endif
231 #if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS)
232     {
233 	ErtsSchedulerData *esdp = erts_get_scheduler_data();
234 	ASSERT(esdp);
235 
236 	if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
237 	    erts_aint32_t state = erts_atomic32_read_nob(&p->state);
238 
239 	    ASSERT(p->scheduler_data == esdp);
240 	    ASSERT((state & (ERTS_PSFLG_RUNNING
241 			     | ERTS_PSFLG_RUNNING_SYS))
242 		   && !(state & (ERTS_PSFLG_DIRTY_RUNNING
243 				 | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
244 	}
245     }
246 #endif
247 }
248 
249 static void full_cache_env(ErlNifEnv *env);
250 static void cache_env(ErlNifEnv* env);
251 static void full_flush_env(ErlNifEnv *env);
252 static void flush_env(ErlNifEnv* env);
253 
254 /* Temporary object header, auto-deallocated when NIF returns or when
255  * independent environment is cleared.
256  *
257  * The payload can be accessed with &tmp_obj_ptr[1] but keep in mind that its
258  * first element must not require greater alignment than `next`. */
259 struct enif_tmp_obj_t {
260     struct enif_tmp_obj_t* next;
261     void (*dtor)(struct enif_tmp_obj_t*);
262     ErtsAlcType_t allocator;
263     /*char data[];*/
264 };
265 
free_tmp_objs(ErlNifEnv * env)266 static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
267 {
268     while (env->tmp_obj_list != NULL) {
269 	struct enif_tmp_obj_t* free_me = env->tmp_obj_list;
270 	env->tmp_obj_list = free_me->next;
271 	free_me->dtor(free_me);
272     }
273 }
274 
275 /* Whether the given environment is bound to a process and will be cleaned up
276  * when the NIF returns. It's safe to use temp_alloc for objects in
277  * env->tmp_obj_list when this is true. */
is_proc_bound(ErlNifEnv * env)278 static ERTS_INLINE int is_proc_bound(ErlNifEnv *env)
279 {
280     return env->mod_nif != NULL;
281 }
282 
283 /* Allocates and attaches an object to the given environment, running its
284  * destructor when the environment is cleared. To avoid temporary variables the
285  * address of the allocated object is returned instead of the enif_tmp_obj_t.
286  *
287  * The destructor *must* call `erts_free(tmp_obj->allocator, tmp_obj)` to free
288  * the object. If the destructor needs to refer to the allocated object its
289  * address will be &tmp_obj[1]. */
alloc_tmp_obj(ErlNifEnv * env,size_t size,void (* dtor)(struct enif_tmp_obj_t *))290 static ERTS_INLINE void *alloc_tmp_obj(ErlNifEnv *env, size_t size,
291                                        void (*dtor)(struct enif_tmp_obj_t*)) {
292     struct enif_tmp_obj_t *tmp_obj;
293     ErtsAlcType_t allocator;
294 
295     allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
296 
297     tmp_obj = erts_alloc(allocator, sizeof(struct enif_tmp_obj_t) + MAX(1, size));
298 
299     tmp_obj->next = env->tmp_obj_list;
300     tmp_obj->allocator = allocator;
301     tmp_obj->dtor = dtor;
302 
303     env->tmp_obj_list = tmp_obj;
304 
305     return (void*)&tmp_obj[1];
306 }
307 
308 /* Generic destructor for objects allocated through alloc_tmp_obj that don't
309  * care about their payload. */
tmp_alloc_dtor(struct enif_tmp_obj_t * tmp_obj)310 static void tmp_alloc_dtor(struct enif_tmp_obj_t *tmp_obj)
311 {
312     erts_free(tmp_obj->allocator, tmp_obj);
313 }
314 
erts_post_nif(ErlNifEnv * env)315 void erts_post_nif(ErlNifEnv* env)
316 {
317     full_flush_env(env);
318     free_tmp_objs(env);
319     env->exiting = ERTS_PROC_IS_EXITING(env->proc);
320 }
321 
322 
323 /*
324  * Initialize a ErtsNativeFunc struct. Create it if needed and store it in the
325  * proc. The direct_fp function is what will be invoked by op_call_nif, and
326  * the indirect_fp function, if not NULL, is what the direct_fp function
327  * will call. If the allocated ErtsNativeFunc isn't enough to hold all of argv,
328  * allocate a larger one. Save 'current' and registers if first time this
329  * call is scheduled.
330  */
331 
332 static ERTS_INLINE ERL_NIF_TERM
schedule(ErlNifEnv * env,NativeFunPtr direct_fp,NativeFunPtr indirect_fp,Eterm mod,Eterm func_name,int argc,const ERL_NIF_TERM argv[])333 schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
334 	 Eterm mod, Eterm func_name, int argc, const ERL_NIF_TERM argv[])
335 {
336     ErtsNativeFunc *ep;
337     Process *c_p, *dirty_shadow_proc;
338 
339     execution_state(env, &c_p, NULL);
340     ASSERT(c_p);
341 
342     if (c_p == env->proc)
343 	dirty_shadow_proc = NULL;
344     else
345 	dirty_shadow_proc = env->proc;
346 
347     ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
348 
349     ep = erts_nfunc_schedule(c_p, dirty_shadow_proc,
350 				  c_p->current,
351                                   cp_val(c_p->stop[0]),
352                              #ifdef BEAMASM
353 				  op_call_nif_WWW,
354                              #else
355                                   BeamOpCodeAddr(op_call_nif_WWW),
356                              #endif
357 				  direct_fp, indirect_fp,
358 				  mod, func_name,
359 				  argc, (const Eterm *) argv);
360     if (!ep->m) {
361 	/* First time this call is scheduled... */
362 	erts_refc_inc(&env->mod_nif->dynlib_refc, 2);
363 	ep->m = env->mod_nif;
364     }
365     return (ERL_NIF_TERM) THE_NON_VALUE;
366 }
367 
368 
369 static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
370 static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
371 
372 int
erts_call_dirty_nif(ErtsSchedulerData * esdp,Process * c_p,ErtsCodePtr I,Eterm * reg)373 erts_call_dirty_nif(ErtsSchedulerData *esdp,
374                     Process *c_p,
375                     ErtsCodePtr I,
376                     Eterm *reg)
377 {
378     int exiting;
379     ERL_NIF_TERM *argv = (ERL_NIF_TERM *) reg;
380     ErtsNativeFunc *nep = ERTS_I_BEAM_OP_TO_NFUNC(I);
381     const ErtsCodeMFA *codemfa = erts_code_to_codemfa(I);
382     NativeFunPtr dirty_nif = (NativeFunPtr) nep->trampoline.dfunc;
383     ErlNifEnv env;
384     ERL_NIF_TERM result;
385 #ifdef DEBUG
386     erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
387 
388     ASSERT(nep == ERTS_PROC_GET_NFUNC_TRAP_WRAPPER(c_p));
389 
390     ASSERT(!c_p->scheduler_data);
391     ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
392 	&& !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
393     ASSERT(esdp);
394 
395     nep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
396 #endif
397 
398     erts_pre_nif(&env, c_p, nep->m, NULL);
399 
400     env.proc = erts_make_dirty_shadow_proc(esdp, c_p);
401 
402     env.proc->freason = EXC_NULL;
403     env.proc->fvalue = NIL;
404     env.proc->ftrace = NIL;
405     env.proc->i = c_p->i;
406 
407     ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
408 
409     erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
410 						   | ERTS_PSFLG_DIRTY_IO_PROC));
411 
412     ASSERT(esdp->current_nif == NULL);
413     esdp->current_nif = &env;
414 
415     erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
416 
417     result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
418 
419     erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
420 
421     ASSERT(esdp->current_nif == &env);
422     esdp->current_nif = NULL;
423 
424     ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
425     ASSERT(env.proc->next == c_p);
426 
427     exiting = ERTS_PROC_IS_EXITING(c_p);
428 
429     if (!exiting) {
430 	if (env.exception_thrown) {
431 	schedule_exception:
432 	    schedule(&env, dirty_nif_exception, NULL,
433 		     am_erts_internal, am_dirty_nif_exception,
434 		     1, &env.proc->fvalue);
435 	}
436 	else if (is_value(result)) {
437 	    schedule(&env, dirty_nif_finalizer, NULL,
438 		     am_erts_internal, am_dirty_nif_finalizer,
439 		     1, &result);
440 	}
441 	else if (env.proc->freason != TRAP) { /* user returned garbage... */
442 	    ERTS_DECL_AM(badreturn);
443 	    (void) enif_raise_exception(&env, AM_badreturn);
444 	    goto schedule_exception;
445 	}
446 	else {
447 	    /* Rescheduled by dirty NIF call... */
448 	    ASSERT(nep->func != ERTS_DBG_NIF_NOT_SCHED_MARKER);
449 	}
450 	c_p->i = env.proc->i;
451 	c_p->arity = env.proc->arity;
452     }
453 
454 #ifdef DEBUG
455     if (nep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
456 	nep->func = NULL;
457 #endif
458 
459     full_flush_env(&env);
460     free_tmp_objs(&env);
461 
462     return exiting;
463 }
464 
465 
full_flush_env(ErlNifEnv * env)466 static void full_flush_env(ErlNifEnv* env)
467 {
468     flush_env(env);
469     if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
470 	/* Dirty nif call using shadow process struct */
471 	erts_flush_dirty_shadow_proc(env->proc);
472 }
473 
full_cache_env(ErlNifEnv * env)474 static void full_cache_env(ErlNifEnv* env)
475 {
476     if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
477 	erts_cache_dirty_shadow_proc(env->proc);
478         /*
479          * If shadow proc had heap fragments when flushed
480          * those have now been moved to the real proc.
481          * Ensure heap pointers do not point into a heap
482          * fragment on real proc...
483          */
484         ASSERT(!env->proc->mbuf);
485 	env->hp_end = HEAP_LIMIT(env->proc);
486 	env->hp = HEAP_TOP(env->proc);
487     }
488     cache_env(env);
489 }
490 
491 /* Flush out our cached heap pointers to allow an ordinary HAlloc
492 */
flush_env(ErlNifEnv * env)493 static void flush_env(ErlNifEnv* env)
494 {
495     if (env->heap_frag == NULL) {
496 	ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
497 	ASSERT(env->hp >= HEAP_TOP(env->proc));
498 	ASSERT(env->hp <= HEAP_LIMIT(env->proc));
499 	HEAP_TOP(env->proc) = env->hp;
500     }
501     else {
502 	Uint usz;
503 	ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
504 	ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
505 	usz = env->hp - env->heap_frag->mem;
506 	env->proc->mbuf_sz += usz - env->heap_frag->used_size;
507 	env->heap_frag->used_size = usz;
508 	ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
509     }
510 }
511 
512 /* Restore cached heap pointers to allow alloc_heap again.
513 */
cache_env(ErlNifEnv * env)514 static void cache_env(ErlNifEnv* env)
515 {
516     env->heap_frag = MBUF(env->proc);
517     if (env->heap_frag == NULL) {
518 	ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
519 	ASSERT(env->hp <= HEAP_TOP(env->proc));
520 	ASSERT(env->hp <= HEAP_LIMIT(env->proc));
521 	env->hp = HEAP_TOP(env->proc);
522     }
523     else {
524 	env->hp = env->heap_frag->mem + env->heap_frag->used_size;
525 	env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
526     }
527 }
528 
enif_priv_data(ErlNifEnv * env)529 void* enif_priv_data(ErlNifEnv* env)
530 {
531     return env->mod_nif->priv_data;
532 }
533 
enif_alloc(size_t size)534 void* enif_alloc(size_t size)
535 {
536     return erts_alloc_fnf(ERTS_ALC_T_NIF, (Uint) size);
537 }
538 
enif_realloc(void * ptr,size_t size)539 void* enif_realloc(void* ptr, size_t size)
540 {
541     return erts_realloc_fnf(ERTS_ALC_T_NIF, ptr, size);
542 }
543 
enif_free(void * ptr)544 void enif_free(void* ptr)
545 {
546     erts_free(ERTS_ALC_T_NIF, ptr);
547 }
548 
549 struct enif_msg_environment_t
550 {
551     ErlNifEnv env;
552     Process phony_proc;
553 };
554 
555 #if S_REDZONE == 0
556 /*
557  * Arrays of size zero are not allowed (although some compilers do
558  * allow it). Be sure to set the array size to 1 if there is no
559  * redzone to ensure that the code can be compiled with any compiler.
560  */
561 static Eterm phony_heap[1];
562 #else
563 static Eterm phony_heap[S_REDZONE];
564 #endif
565 
566 static ERTS_INLINE void
setup_nif_env(struct enif_msg_environment_t * msg_env,struct erl_module_nif * mod,Process * tracee)567 setup_nif_env(struct enif_msg_environment_t* msg_env,
568               struct erl_module_nif* mod,
569               Process* tracee)
570 {
571     msg_env->env.hp = &phony_heap[0];
572     msg_env->env.hp_end = &phony_heap[0];
573     msg_env->env.heap_frag = NULL;
574     msg_env->env.mod_nif = mod;
575     msg_env->env.tmp_obj_list = NULL;
576     msg_env->env.proc = &msg_env->phony_proc;
577     msg_env->env.exception_thrown = 0;
578     sys_memset(&msg_env->phony_proc, 0, sizeof(Process));
579     HEAP_START(&msg_env->phony_proc) = &phony_heap[0];
580     HEAP_TOP(&msg_env->phony_proc) = &phony_heap[0];
581     STACK_TOP(&msg_env->phony_proc) = &phony_heap[S_REDZONE];
582     STACK_START(&msg_env->phony_proc) = &phony_heap[S_REDZONE];
583     MBUF(&msg_env->phony_proc) = NULL;
584     msg_env->phony_proc.common.id = ERTS_INVALID_PID;
585     msg_env->env.tracee = tracee;
586 
587 #ifdef FORCE_HEAP_FRAGS
588     msg_env->phony_proc.space_verified = 0;
589     msg_env->phony_proc.space_verified_from = NULL;
590 #endif
591 #ifdef ERTS_NIF_ASSERT_IN_ENV
592     msg_env->env.dbg_disable_assert_in_env = 0;
593 #endif
594 }
595 
enif_alloc_env(void)596 ErlNifEnv* enif_alloc_env(void)
597 {
598     struct enif_msg_environment_t* msg_env =
599 	erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
600     setup_nif_env(msg_env, NULL, NULL);
601     return &msg_env->env;
602 }
enif_free_env(ErlNifEnv * env)603 void enif_free_env(ErlNifEnv* env)
604 {
605     enif_clear_env(env);
606     erts_free(ERTS_ALC_T_NIF, env);
607 }
608 
pre_nif_noproc(struct enif_msg_environment_t * msg_env,struct erl_module_nif * mod,Process * tracee)609 static ERTS_INLINE void pre_nif_noproc(struct enif_msg_environment_t* msg_env,
610                                        struct erl_module_nif* mod,
611                                        Process* tracee)
612 {
613     setup_nif_env(msg_env, mod, tracee);
614 }
615 
post_nif_noproc(struct enif_msg_environment_t * msg_env)616 static ERTS_INLINE void post_nif_noproc(struct enif_msg_environment_t* msg_env)
617 {
618     enif_clear_env(&msg_env->env);
619 }
620 
clear_offheap(ErlOffHeap * oh)621 static ERTS_INLINE void clear_offheap(ErlOffHeap* oh)
622 {
623     oh->first = NULL;
624     oh->overhead = 0;
625 }
626 
enif_clear_env(ErlNifEnv * env)627 void enif_clear_env(ErlNifEnv* env)
628 {
629     struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env;
630     Process* p = &menv->phony_proc;
631     ASSERT(p == menv->env.proc);
632     ASSERT(p->common.id == ERTS_INVALID_PID);
633     ASSERT(MBUF(p) == menv->env.heap_frag);
634 
635     free_tmp_objs(env);
636 
637     if (MBUF(p) != NULL) {
638 	erts_cleanup_offheap(&MSO(p));
639 	clear_offheap(&MSO(p));
640 	free_message_buffer(MBUF(p));
641 	MBUF(p) = NULL;
642 	menv->env.heap_frag = NULL;
643     }
644 
645     ASSERT(HEAP_TOP(p) == HEAP_END(p) - S_REDZONE);
646     menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
647 
648     ASSERT(!is_offheap(&MSO(p)));
649 }
650 
651 #ifdef DEBUG
652 static int enif_send_delay = 0;
653 #define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 32 == 0)
654 #else
655 #ifdef ERTS_PROC_LOCK_OWN_IMPL
656 #define ERTS_FORCE_ENIF_SEND_DELAY() 0
657 #else
658 /*
659  * We always schedule messages if we do not use our own
660  * process lock implementation, as if we try to do a trylock on
661  * a lock that might already be locked by the same thread.
662  * And what happens then with different mutex implementations
663  * is not always guaranteed.
664  */
665 #define ERTS_FORCE_ENIF_SEND_DELAY() 1
666 #endif
667 #endif
668 
erts_flush_trace_messages(Process * c_p,ErtsProcLocks c_p_locks)669 int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
670 {
671     ErlTraceMessageQueue *msgq, **last_msgq;
672     int reds = 0;
673 
674     /* Only one thread at a time is allowed to flush trace messages,
675        so we require the main lock to be held when doing the flush */
676     ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
677 
678     erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
679 
680     msgq = c_p->trace_msg_q;
681 
682     if (!msgq)
683         goto error;
684 
685     do {
686         Process* rp;
687         ErtsProcLocks rp_locks;
688         ErtsMessage *first, **last;
689         Uint len;
690 
691         first = msgq->first;
692         last = msgq->last;
693         len = msgq->len;
694         msgq->first = NULL;
695         msgq->last = &msgq->first;
696         msgq->len = 0;
697         erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
698 
699         ASSERT(len != 0);
700 
701         rp = erts_proc_lookup(msgq->receiver);
702         if (rp) {
703             rp_locks = 0;
704             if (rp->common.id == c_p->common.id)
705                 rp_locks = c_p_locks;
706             erts_queue_proc_messages(c_p, rp, rp_locks, first, last, len);
707             if (rp->common.id == c_p->common.id)
708                 rp_locks &= ~c_p_locks;
709             if (rp_locks)
710                 erts_proc_unlock(rp, rp_locks);
711             reds += len;
712         } else {
713             erts_cleanup_messages(first);
714         }
715         reds += 1;
716         erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
717         msgq = msgq->next;
718     } while (msgq);
719 
720     last_msgq = &c_p->trace_msg_q;
721 
722     while (*last_msgq) {
723         msgq = *last_msgq;
724         if (msgq->len == 0) {
725             *last_msgq = msgq->next;
726             erts_free(ERTS_ALC_T_TRACE_MSG_QUEUE, msgq);
727         } else {
728             last_msgq = &msgq->next;
729         }
730     }
731 
732 error:
733     erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
734 
735     return reds;
736 }
737 
738 /** @brief Create a message with the content of process independent \c msg_env.
739  *  Invalidates \c msg_env.
740  */
erts_create_message_from_nif_env(ErlNifEnv * msg_env)741 ErtsMessage* erts_create_message_from_nif_env(ErlNifEnv* msg_env)
742 {
743     struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
744     ErtsMessage* mp;
745 
746     flush_env(msg_env);
747     mp = erts_alloc_message(0, NULL);
748     mp->data.heap_frag = menv->env.heap_frag;
749     ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc));
750     if (mp->data.heap_frag != NULL) {
751         /* Move all offheap's from phony proc to the first fragment.
752            Quick and dirty... */
753         ASSERT(!is_offheap(&mp->data.heap_frag->off_heap));
754         mp->data.heap_frag->off_heap = MSO(&menv->phony_proc);
755         clear_offheap(&MSO(&menv->phony_proc));
756         menv->env.heap_frag = NULL;
757         MBUF(&menv->phony_proc) = NULL;
758     }
759     return mp;
760 }
761 
make_copy(ErlNifEnv * dst_env,ERL_NIF_TERM src_term,Uint * cpy_szp)762 static ERTS_INLINE ERL_NIF_TERM make_copy(ErlNifEnv* dst_env,
763                                           ERL_NIF_TERM src_term,
764                                           Uint *cpy_szp)
765 {
766     Uint sz;
767     Eterm* hp;
768     /*
769      * No preserved sharing allowed as long as literals are also preserved.
770      * Process independent environment can not be reached by purge.
771      */
772     sz = size_object(src_term);
773     if (cpy_szp)
774         *cpy_szp += sz;
775     hp = alloc_heap(dst_env, sz);
776     return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
777 }
778 
enif_send(ErlNifEnv * env,const ErlNifPid * to_pid,ErlNifEnv * msg_env,ERL_NIF_TERM msg)779 int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
780 	      ErlNifEnv* msg_env, ERL_NIF_TERM msg)
781 {
782     struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
783     ErtsProcLocks rp_locks = 0;
784     ErtsProcLocks lc_locks = 0;
785     Process* rp;
786     Process* c_p;
787     ErtsMessage *mp;
788     Eterm from;
789     Eterm receiver = to_pid->pid;
790     int scheduler;
791     Uint copy_sz = 0;
792 
793     execution_state(env, &c_p, &scheduler);
794 
795 
796     if (scheduler > 0) { /* Normal scheduler */
797 	rp = erts_proc_lookup(receiver);
798 	if (!rp)
799 	    return 0;
800     }
801     else {
802 	if (c_p) {
803 	    ASSERT(scheduler < 0); /* Dirty scheduler */
804 	    if (ERTS_PROC_IS_EXITING(c_p))
805 		return 0;
806 
807 	    if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
808 		erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
809 	    }
810 	}
811 
812 	rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
813 			       receiver, rp_locks,
814 			       ERTS_P2P_FLG_INC_REFC);
815 	if (!rp) {
816 	    if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
817 		erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
818 	    return 0;
819 	}
820     }
821 
822     if (c_p == rp)
823 	rp_locks = ERTS_PROC_LOCK_MAIN;
824 
825     if (menv) {
826         Eterm token = c_p ? SEQ_TRACE_TOKEN(c_p) : am_undefined;
827         if (token != NIL && token != am_undefined) {
828             /* This code is copied from erts_send_message */
829             Eterm stoken = SEQ_TRACE_TOKEN(c_p);
830 #ifdef USE_VM_PROBES
831             DTRACE_CHARBUF(sender_name, 64);
832             DTRACE_CHARBUF(receiver_name, 64);
833             Sint tok_label = 0;
834             Sint tok_lastcnt = 0;
835             Sint tok_serial = 0;
836             Eterm utag = NIL;
837             *sender_name = *receiver_name = '\0';
838             if (DTRACE_ENABLED(message_send)) {
839                 erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
840                               "%T", c_p->common.id);
841                 erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
842                               "%T", rp->common.id);
843             }
844 #endif
845             if (have_seqtrace(stoken)) {
846                 seq_trace_update_serial(c_p);
847                 seq_trace_output(stoken, msg, SEQ_TRACE_SEND,
848                                  rp->common.id, c_p);
849             }
850 #ifdef USE_VM_PROBES
851             if (!(DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)) {
852                 stoken = NIL;
853             }
854 #endif
855             token = make_copy(msg_env, stoken, &copy_sz);
856 
857 #ifdef USE_VM_PROBES
858             if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING) {
859                 if (is_immed(DT_UTAG(c_p)))
860                     utag = DT_UTAG(c_p);
861                 else
862                     utag = make_copy(msg_env, DT_UTAG(c_p), &copy_sz);
863             }
864             if (DTRACE_ENABLED(message_send)) {
865                 if (have_seqtrace(stoken)) {
866                     tok_label = SEQ_TRACE_T_DTRACE_LABEL(stoken);
867                     tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
868                     tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
869                 }
870                 DTRACE6(message_send, sender_name, receiver_name,
871                         size_object(msg), tok_label, tok_lastcnt, tok_serial);
872             }
873 #endif
874         }
875         mp = erts_create_message_from_nif_env(msg_env);
876         ERL_MESSAGE_TOKEN(mp) = token;
877     } else {
878         erts_literal_area_t litarea;
879 	ErlOffHeap *ohp;
880         Eterm *hp;
881         Uint sz;
882         INITIALIZE_LITERAL_PURGE_AREA(litarea);
883         sz = size_object_litopt(msg, &litarea);
884         copy_sz += sz;
885 	if (c_p && !env->tracee) {
886 	    full_flush_env(env);
887 	    mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
888 	    full_cache_env(env);
889 	}
890 	else {
891 	    erts_aint_t state = erts_atomic32_read_nob(&rp->state);
892 	    if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
893 		mp = erts_alloc_message(sz, &hp);
894 		ohp = sz == 0 ? NULL : &mp->hfrag.off_heap;
895 	    }
896 	    else {
897 		ErlHeapFragment *bp = new_message_buffer(sz);
898 		mp = erts_alloc_message(0, NULL);
899 		mp->data.heap_frag = bp;
900 		hp = bp->mem;
901 		ohp = &bp->off_heap;
902 	    }
903 	}
904         ERL_MESSAGE_TOKEN(mp) = am_undefined;
905         msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea);
906     }
907 
908     from = c_p ? c_p->common.id : am_undefined;
909 
910     if (!env || !env->tracee) {
911         /* This clause is taken when enif_send is called in a nif
912            that is not a erl_tracer nif. */
913         if (c_p) {
914             ASSERT(env);
915             if (IS_TRACED_FL(c_p, F_TRACE_SEND)) {
916                 full_flush_env(env);
917                 trace_send(c_p, receiver, msg);
918                 full_cache_env(env);
919             }
920             if (scheduler > 0 && copy_sz > ERTS_MSG_COPY_WORDS_PER_REDUCTION) {
921                 Uint reds = copy_sz / ERTS_MSG_COPY_WORDS_PER_REDUCTION;
922                 if (reds > CONTEXT_REDS)
923                     reds = CONTEXT_REDS;
924                 BUMP_REDS(c_p, (int) reds);
925             }
926         }
927     }
928     else {
929         /* This clause is taken when the nif is called in the context
930            of a traced process. We do not know which locks we have
931            so we have to do a try lock and if that fails we enqueue
932            the message in a special trace message output queue of the
933            tracee */
934         ErlTraceMessageQueue *msgq;
935         Process *t_p = env->tracee;
936 
937         erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
938 
939         msgq = t_p->trace_msg_q;
940 
941         while (msgq != NULL) {
942             if (msgq->receiver == receiver) {
943                 break;
944             }
945             msgq = msgq->next;
946         }
947 
948 #ifdef ERTS_ENABLE_LOCK_CHECK
949         lc_locks = erts_proc_lc_my_proc_locks(rp);
950         rp_locks |= lc_locks;
951 #endif
952         if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
953             rp_locks & ERTS_PROC_LOCK_MSGQ ||
954             erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
955 
956             ERL_MESSAGE_TERM(mp) = msg;
957             ERL_MESSAGE_FROM(mp) = from;
958             ERL_MESSAGE_TOKEN(mp) = am_undefined;
959 
960             if (!msgq) {
961                 msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
962                                   sizeof(ErlTraceMessageQueue));
963                 msgq->receiver = receiver;
964                 msgq->first = mp;
965                 msgq->last = &mp->next;
966                 msgq->len = 1;
967 
968                 /* Insert in linked list */
969                 msgq->next = t_p->trace_msg_q;
970                 t_p->trace_msg_q = msgq;
971 
972                 erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
973 
974 		erts_schedule_flush_trace_messages(t_p, 0);
975             } else {
976                 msgq->len++;
977                 *msgq->last = mp;
978                 msgq->last = &mp->next;
979                 erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
980             }
981             goto done;
982         } else {
983             erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
984             rp_locks &= ~ERTS_PROC_LOCK_TRACE;
985             rp_locks |= ERTS_PROC_LOCK_MSGQ;
986         }
987     }
988 
989     if (c_p)
990         erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
991     else
992         erts_queue_message(rp, rp_locks, mp, msg, from);
993 
994 done:
995 
996     if (c_p == rp)
997 	rp_locks &= ~ERTS_PROC_LOCK_MAIN;
998     if (rp_locks & ~lc_locks)
999 	erts_proc_unlock(rp, rp_locks & ~lc_locks);
1000     if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
1001 	erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
1002     if (scheduler <= 0)
1003 	erts_proc_dec_refc(rp);
1004 
1005     return 1;
1006 }
1007 
1008 int
enif_port_command(ErlNifEnv * env,const ErlNifPort * to_port,ErlNifEnv * msg_env,ERL_NIF_TERM msg)1009 enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
1010                   ErlNifEnv *msg_env, ERL_NIF_TERM msg)
1011 {
1012     int iflags = (erts_port_synchronous_ops
1013 		  ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
1014 		  : ERTS_PORT_SFLGS_INVALID_LOOKUP);
1015     int scheduler;
1016     Process *c_p;
1017     Port *prt;
1018     int res;
1019 
1020     if (!env)
1021 	erts_exit(ERTS_ABORT_EXIT, "enif_port_command: env == NULL");
1022 
1023     execution_state(env, &c_p, &scheduler);
1024 
1025     if (!c_p)
1026 	c_p = env->proc;
1027 
1028     if (scheduler > 0)
1029 	prt = erts_port_lookup(to_port->port_id, iflags);
1030     else {
1031 	if (ERTS_PROC_IS_EXITING(c_p))
1032 	    return 0;
1033 	prt = erts_thr_port_lookup(to_port->port_id, iflags);
1034     }
1035 
1036     if (!prt)
1037 	res = 0;
1038     else
1039         res = erts_port_output_async(prt, c_p->common.id, msg);
1040 
1041     if (scheduler <= 0)
1042 	erts_port_dec_refc(prt);
1043 
1044     return res;
1045 }
1046 
1047 /*
1048  *  env must be the caller's environment in a scheduler or NULL in a
1049  *      non-scheduler thread.
1050  *  name must be an atom - anything else will just waste time.
1051  */
call_whereis(ErlNifEnv * env,Eterm name)1052 static Eterm call_whereis(ErlNifEnv *env, Eterm name)
1053 {
1054     Process *c_p;
1055     Eterm res;
1056     int scheduler;
1057 
1058     execution_state(env, &c_p, &scheduler);
1059     ASSERT(scheduler || !c_p);
1060 
1061     if (scheduler < 0) {
1062         /* dirty scheduler */
1063         if (ERTS_PROC_IS_EXITING(c_p))
1064             return 0;
1065 
1066         if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
1067             c_p = NULL; /* as we don't have main lock */
1068     }
1069 
1070 
1071     if (c_p) {
1072          /* main lock may be released below and c_p->htop updated by others */
1073         flush_env(env);
1074     }
1075     res = erts_whereis_name_to_id(c_p, name);
1076     if (c_p)
1077         cache_env(env);
1078 
1079     return res;
1080 }
1081 
enif_whereis_pid(ErlNifEnv * env,ERL_NIF_TERM name,ErlNifPid * pid)1082 int enif_whereis_pid(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid)
1083 {
1084     Eterm res;
1085 
1086     if (is_not_atom(name))
1087         return 0;
1088 
1089     res = call_whereis(env, name);
1090     /* enif_get_local_ functions check the type */
1091     return enif_get_local_pid(env, res, pid);
1092 }
1093 
enif_whereis_port(ErlNifEnv * env,ERL_NIF_TERM name,ErlNifPort * port)1094 int enif_whereis_port(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port)
1095 {
1096     Eterm res;
1097 
1098     if (is_not_atom(name))
1099         return 0;
1100 
1101     res = call_whereis(env, name);
1102     /* enif_get_local_ functions check the type */
1103     return enif_get_local_port(env, res, port);
1104 }
1105 
enif_make_copy(ErlNifEnv * dst_env,ERL_NIF_TERM src_term)1106 ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
1107 {
1108     return make_copy(dst_env, src_term, NULL);
1109 }
1110 
1111 #ifdef DEBUG
is_offheap(const ErlOffHeap * oh)1112 static int is_offheap(const ErlOffHeap* oh)
1113 {
1114     return oh->first != NULL;
1115 }
1116 #endif
1117 
enif_self(ErlNifEnv * caller_env,ErlNifPid * pid)1118 ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)
1119 {
1120     if (caller_env->proc->common.id == ERTS_INVALID_PID)
1121         return NULL;
1122     pid->pid = caller_env->proc->common.id;
1123     return pid;
1124 }
1125 
enif_get_local_pid(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifPid * pid)1126 int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)
1127 {
1128     if (is_internal_pid(term)) {
1129         pid->pid=term;
1130         return 1;
1131     }
1132     return 0;
1133 }
1134 
enif_set_pid_undefined(ErlNifPid * pid)1135 void enif_set_pid_undefined(ErlNifPid* pid)
1136 {
1137     pid->pid = am_undefined;
1138 }
1139 
enif_is_pid_undefined(const ErlNifPid * pid)1140 int enif_is_pid_undefined(const ErlNifPid* pid)
1141 {
1142     ASSERT(pid->pid == am_undefined || is_internal_pid(pid->pid));
1143     return pid->pid == am_undefined;
1144 }
1145 
enif_get_local_port(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifPort * port)1146 int enif_get_local_port(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPort* port)
1147 {
1148     if (is_internal_port(term)) {
1149         port->port_id=term;
1150         return 1;
1151     }
1152     return 0;
1153 }
1154 
enif_is_atom(ErlNifEnv * env,ERL_NIF_TERM term)1155 int enif_is_atom(ErlNifEnv* env, ERL_NIF_TERM term)
1156 {
1157     return is_atom(term);
1158 }
1159 
enif_is_binary(ErlNifEnv * env,ERL_NIF_TERM term)1160 int enif_is_binary(ErlNifEnv* env, ERL_NIF_TERM term)
1161 {
1162     return is_binary(term) && (binary_bitsize(term) % 8 == 0);
1163 }
1164 
enif_is_empty_list(ErlNifEnv * env,ERL_NIF_TERM term)1165 int enif_is_empty_list(ErlNifEnv* env, ERL_NIF_TERM term)
1166 {
1167     return is_nil(term);
1168 }
1169 
enif_is_fun(ErlNifEnv * env,ERL_NIF_TERM term)1170 int enif_is_fun(ErlNifEnv* env, ERL_NIF_TERM term)
1171 {
1172     return is_fun(term);
1173 }
1174 
enif_is_pid(ErlNifEnv * env,ERL_NIF_TERM term)1175 int enif_is_pid(ErlNifEnv* env, ERL_NIF_TERM term)
1176 {
1177     return is_pid(term);
1178 }
1179 
enif_is_port(ErlNifEnv * env,ERL_NIF_TERM term)1180 int enif_is_port(ErlNifEnv* env, ERL_NIF_TERM term)
1181 {
1182     return is_port(term);
1183 }
1184 
enif_is_ref(ErlNifEnv * env,ERL_NIF_TERM term)1185 int enif_is_ref(ErlNifEnv* env, ERL_NIF_TERM term)
1186 {
1187     return is_ref(term);
1188 }
1189 
enif_is_tuple(ErlNifEnv * env,ERL_NIF_TERM term)1190 int enif_is_tuple(ErlNifEnv* env, ERL_NIF_TERM term)
1191 {
1192     return is_tuple(term);
1193 }
1194 
enif_is_list(ErlNifEnv * env,ERL_NIF_TERM term)1195 int enif_is_list(ErlNifEnv* env, ERL_NIF_TERM term)
1196 {
1197     return is_list(term) || is_nil(term);
1198 }
1199 
enif_is_exception(ErlNifEnv * env,ERL_NIF_TERM term)1200 int enif_is_exception(ErlNifEnv* env, ERL_NIF_TERM term)
1201 {
1202     return env->exception_thrown && term == THE_NON_VALUE;
1203 }
1204 
enif_is_number(ErlNifEnv * env,ERL_NIF_TERM term)1205 int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
1206 {
1207     return is_number(term);
1208 }
1209 
enif_term_type(ErlNifEnv * env,ERL_NIF_TERM term)1210 ErlNifTermType enif_term_type(ErlNifEnv* env, ERL_NIF_TERM term) {
1211     (void)env;
1212 
1213     switch (tag_val_def(term)) {
1214     case ATOM_DEF:
1215         return ERL_NIF_TERM_TYPE_ATOM;
1216     case BINARY_DEF:
1217         return ERL_NIF_TERM_TYPE_BITSTRING;
1218     case FLOAT_DEF:
1219         return ERL_NIF_TERM_TYPE_FLOAT;
1220     case EXPORT_DEF:
1221     case FUN_DEF:
1222         return ERL_NIF_TERM_TYPE_FUN;
1223     case BIG_DEF:
1224     case SMALL_DEF:
1225         return ERL_NIF_TERM_TYPE_INTEGER;
1226     case LIST_DEF:
1227     case NIL_DEF:
1228         return ERL_NIF_TERM_TYPE_LIST;
1229     case MAP_DEF:
1230         return ERL_NIF_TERM_TYPE_MAP;
1231     case EXTERNAL_PID_DEF:
1232     case PID_DEF:
1233         return ERL_NIF_TERM_TYPE_PID;
1234     case EXTERNAL_PORT_DEF:
1235     case PORT_DEF:
1236         return ERL_NIF_TERM_TYPE_PORT;
1237     case EXTERNAL_REF_DEF:
1238     case REF_DEF:
1239         return ERL_NIF_TERM_TYPE_REFERENCE;
1240     case TUPLE_DEF:
1241         return ERL_NIF_TERM_TYPE_TUPLE;
1242     default:
1243         /* tag_val_def() aborts on its own when passed complete garbage, but
1244          * it's possible that the user has given us garbage that just happens
1245          * to match something that tag_val_def() accepts but we don't, like
1246          * binary match contexts. */
1247         ERTS_INTERNAL_ERROR("Invalid term passed to enif_term_type");
1248     }
1249 }
1250 
aligned_binary_dtor(struct enif_tmp_obj_t * obj)1251 static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
1252 {
1253     erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator);
1254 }
1255 
enif_inspect_binary(ErlNifEnv * env,Eterm bin_term,ErlNifBinary * bin)1256 int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
1257 {
1258     ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
1259     union {
1260 	struct enif_tmp_obj_t* tmp;
1261 	byte* raw_ptr;
1262     }u;
1263 
1264     if (is_binary(bin_term)) {
1265         ProcBin *pb = (ProcBin*) binary_val(bin_term);
1266         if (pb->thing_word == HEADER_SUB_BIN) {
1267             ErlSubBin* sb = (ErlSubBin*) pb;
1268             pb = (ProcBin*) binary_val(sb->orig);
1269         }
1270         if (pb->thing_word == HEADER_PROC_BIN && pb->flags)
1271             erts_emasculate_writable_binary(pb);
1272     }
1273     u.tmp = NULL;
1274     bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
1275 						    sizeof(struct enif_tmp_obj_t));
1276     if (bin->data == NULL) {
1277 	return 0;
1278     }
1279     if (u.tmp != NULL) {
1280 	u.tmp->allocator = allocator;
1281 	u.tmp->next = env->tmp_obj_list;
1282 	u.tmp->dtor = &aligned_binary_dtor;
1283 	env->tmp_obj_list = u.tmp;
1284     }
1285     bin->size = binary_size(bin_term);
1286     bin->ref_bin = NULL;
1287     ADD_READONLY_CHECK(env, bin->data, bin->size);
1288     return 1;
1289 }
1290 
enif_inspect_iolist_as_binary(ErlNifEnv * env,Eterm term,ErlNifBinary * bin)1291 int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
1292 {
1293     ErlDrvSizeT sz;
1294     if (is_binary(term)) {
1295 	return enif_inspect_binary(env,term,bin);
1296     }
1297     if (is_nil(term)) {
1298 	bin->data = (unsigned char*) &bin->data; /* dummy non-NULL */
1299 	bin->size = 0;
1300 	bin->ref_bin = NULL;
1301 	return 1;
1302     }
1303     if (erts_iolist_size(term, &sz)) {
1304 	return 0;
1305     }
1306 
1307     bin->data = alloc_tmp_obj(env, sz, &tmp_alloc_dtor);
1308     bin->size = sz;
1309     bin->ref_bin = NULL;
1310     erts_iolist_to_buf(term, (char*) bin->data, sz);
1311     ADD_READONLY_CHECK(env, bin->data, bin->size);
1312     return 1;
1313 }
1314 
enif_alloc_binary(size_t size,ErlNifBinary * bin)1315 int enif_alloc_binary(size_t size, ErlNifBinary* bin)
1316 {
1317     Binary* refbin;
1318 
1319     refbin = erts_bin_drv_alloc_fnf(size); /* BUGBUG: alloc type? */
1320     if (refbin == NULL) {
1321 	return 0; /* The NIF must take action */
1322     }
1323 
1324     bin->size = size;
1325     bin->data = (unsigned char*) refbin->orig_bytes;
1326     bin->ref_bin = refbin;
1327     return 1;
1328 }
1329 
enif_realloc_binary(ErlNifBinary * bin,size_t size)1330 int enif_realloc_binary(ErlNifBinary* bin, size_t size)
1331 {
1332     if (bin->ref_bin != NULL) {
1333 	Binary* oldbin;
1334 	Binary* newbin;
1335 
1336 	oldbin = (Binary*) bin->ref_bin;
1337 	newbin = (Binary *) erts_bin_realloc_fnf(oldbin, size);
1338 	if (!newbin) {
1339 	    return 0;
1340 	}
1341 	bin->ref_bin = newbin;
1342 	bin->data = (unsigned char*) newbin->orig_bytes;
1343 	bin->size = size;
1344     }
1345     else {
1346 	unsigned char* old_data = bin->data;
1347 	size_t cpy_sz = (size < bin->size ? size : bin->size);
1348 	enif_alloc_binary(size, bin);
1349 	sys_memcpy(bin->data, old_data, cpy_sz);
1350     }
1351     return 1;
1352 }
1353 
1354 
enif_release_binary(ErlNifBinary * bin)1355 void enif_release_binary(ErlNifBinary* bin)
1356 {
1357     if (bin->ref_bin != NULL) {
1358 	Binary* refbin = bin->ref_bin;
1359         erts_bin_release(refbin);
1360     }
1361 #ifdef DEBUG
1362     bin->data = NULL;
1363     bin->ref_bin = NULL;
1364 #endif
1365 }
1366 
enif_make_new_binary(ErlNifEnv * env,size_t size,ERL_NIF_TERM * termp)1367 unsigned char* enif_make_new_binary(ErlNifEnv* env, size_t size,
1368 				    ERL_NIF_TERM* termp)
1369 {
1370     flush_env(env);
1371     *termp = new_binary(env->proc, NULL, size);
1372     cache_env(env);
1373     return binary_bytes(*termp);
1374 }
1375 
enif_term_to_binary(ErlNifEnv * dst_env,ERL_NIF_TERM term,ErlNifBinary * bin)1376 int enif_term_to_binary(ErlNifEnv *dst_env, ERL_NIF_TERM term,
1377                         ErlNifBinary *bin)
1378 {
1379     Uint size;
1380     byte *bp;
1381     Binary* refbin;
1382 
1383     switch (erts_encode_ext_size(term, &size)) {
1384     case ERTS_EXT_SZ_SYSTEM_LIMIT:
1385         return 0; /* system limit */
1386     case ERTS_EXT_SZ_YIELD:
1387         ERTS_INTERNAL_ERROR("Unexpected yield");
1388     case ERTS_EXT_SZ_OK:
1389         break;
1390     }
1391     if (!enif_alloc_binary(size, bin))
1392         return 0;
1393 
1394     refbin = bin->ref_bin;
1395 
1396     bp = bin->data;
1397 
1398     erts_encode_ext(term, &bp);
1399 
1400     bin->size = bp - bin->data;
1401     refbin->orig_size = bin->size;
1402 
1403     ASSERT(bin->data + bin->size == bp);
1404 
1405     return 1;
1406 }
1407 
enif_binary_to_term(ErlNifEnv * dst_env,const unsigned char * data,size_t data_sz,ERL_NIF_TERM * term,ErlNifBinaryToTerm opts)1408 size_t enif_binary_to_term(ErlNifEnv *dst_env,
1409                            const unsigned char* data,
1410                            size_t data_sz,
1411                            ERL_NIF_TERM *term,
1412                            ErlNifBinaryToTerm opts)
1413 {
1414     Sint size;
1415     ErtsHeapFactory factory;
1416     const byte *bp = (byte*) data;
1417     Uint32 flags = 0;
1418 
1419     switch ((Uint32)opts) {
1420     case 0: break;
1421     case ERL_NIF_BIN2TERM_SAFE: flags = ERTS_DIST_EXT_BTT_SAFE; break;
1422     default: return 0;
1423     }
1424     if ((size = erts_decode_ext_size(bp, data_sz)) < 0)
1425         return 0;
1426 
1427     if (size > 0) {
1428         flush_env(dst_env);
1429         erts_factory_proc_prealloc_init(&factory, dst_env->proc, size);
1430     } else {
1431         erts_factory_dummy_init(&factory);
1432     }
1433 
1434     *term = erts_decode_ext(&factory, &bp, flags);
1435 
1436     if (is_non_value(*term)) {
1437         return 0;
1438     }
1439     if (size > 0) {
1440         erts_factory_close(&factory);
1441         cache_env(dst_env);
1442     }
1443 
1444     ASSERT(bp > data);
1445     return bp - data;
1446 }
1447 
enif_is_identical(Eterm lhs,Eterm rhs)1448 int enif_is_identical(Eterm lhs, Eterm rhs)
1449 {
1450     return EQ(lhs,rhs);
1451 }
1452 
enif_compare(Eterm lhs,Eterm rhs)1453 int enif_compare(Eterm lhs, Eterm rhs)
1454 {
1455     Sint result = CMP(lhs,rhs);
1456 
1457     if (result < 0) {
1458         return -1;
1459     } else if (result > 0) {
1460         return 1;
1461     }
1462 
1463     return result;
1464 }
1465 
enif_hash(ErlNifHash type,Eterm term,ErlNifUInt64 salt)1466 ErlNifUInt64 enif_hash(ErlNifHash type, Eterm term, ErlNifUInt64 salt)
1467 {
1468     switch (type) {
1469         case ERL_NIF_INTERNAL_HASH:
1470             return make_internal_hash(term, (Uint32) salt);
1471         case ERL_NIF_PHASH2:
1472             /* It appears that make_hash2 doesn't always react to seasoning
1473              * as well as it should. Therefore, let's make it ignore the salt
1474              * value and declare salted uses of phash2 as unsupported.
1475              */
1476             return make_hash2(term) & ((1 << 27) - 1);
1477         default:
1478             return 0;
1479     }
1480 }
1481 
enif_get_tuple(ErlNifEnv * env,Eterm tpl,int * arity,const Eterm ** array)1482 int enif_get_tuple(ErlNifEnv* env, Eterm tpl, int* arity, const Eterm** array)
1483 {
1484     Eterm* ptr;
1485     if (is_not_tuple(tpl)) {
1486 	return 0;
1487     }
1488     ptr = tuple_val(tpl);
1489     *arity = arityval(*ptr);
1490     *array = ptr+1;
1491     return 1;
1492 }
1493 
enif_get_string(ErlNifEnv * env,ERL_NIF_TERM list,char * buf,unsigned len,ErlNifCharEncoding encoding)1494 int enif_get_string(ErlNifEnv *env, ERL_NIF_TERM list, char* buf, unsigned len,
1495 		    ErlNifCharEncoding encoding)
1496 {
1497     Eterm* listptr;
1498     int n = 0;
1499 
1500     ASSERT(encoding == ERL_NIF_LATIN1);
1501     if (len < 1) {
1502 	return 0;
1503     }
1504     while (is_not_nil(list)) {
1505 	if (is_not_list(list)) {
1506 	    buf[n] = '\0';
1507 	    return 0;
1508 	}
1509 	listptr = list_val(list);
1510 
1511 	if (!is_byte(*listptr)) {
1512 	    buf[n] = '\0';
1513 	    return 0;
1514 	}
1515 	buf[n++] = unsigned_val(*listptr);
1516 	if (n >= len) {
1517 	    buf[n-1] = '\0'; /* truncate */
1518 	    return -len;
1519 	}
1520 	list = CDR(listptr);
1521     }
1522     buf[n] = '\0';
1523     return n + 1;
1524 }
1525 
enif_make_binary(ErlNifEnv * env,ErlNifBinary * bin)1526 Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
1527 {
1528     Eterm bin_term;
1529 
1530     if (bin->ref_bin != NULL) {
1531         Binary* binary = bin->ref_bin;
1532 
1533         /* If the binary is smaller than the heap binary limit we'll return a
1534          * heap binary to reduce the number of small refc binaries in the
1535          * system. We can't simply release the refc binary right away however;
1536          * the documentation states that the binary should be considered
1537          * read-only from this point on, which implies that it should still be
1538          * readable.
1539          *
1540          * We could keep it alive until we return by adding it to the temporary
1541          * object list, but that requires an off-heap allocation which is
1542          * potentially quite slow, so we create a dummy ProcBin instead and
1543          * rely on the next minor GC to get rid of it. */
1544         if (bin->size <= ERL_ONHEAP_BIN_LIMIT) {
1545             ErlHeapBin* hb;
1546 
1547             hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(bin->size));
1548             hb->thing_word = header_heap_bin(bin->size);
1549             hb->size = bin->size;
1550 
1551             sys_memcpy(hb->data, bin->data, bin->size);
1552 
1553             erts_build_proc_bin(&MSO(env->proc),
1554                                 alloc_heap(env, PROC_BIN_SIZE),
1555                                 binary);
1556 
1557             bin_term = make_binary(hb);
1558         } else {
1559             bin_term = erts_build_proc_bin(&MSO(env->proc),
1560                                            alloc_heap(env, PROC_BIN_SIZE),
1561                                            binary);
1562         }
1563 
1564         /* Our (possibly shared) ownership has been transferred to the term. */
1565         bin->ref_bin = NULL;
1566     } else {
1567         flush_env(env);
1568         bin_term = new_binary(env->proc, bin->data, bin->size);
1569         cache_env(env);
1570     }
1571 
1572     return bin_term;
1573 }
1574 
enif_make_sub_binary(ErlNifEnv * env,ERL_NIF_TERM bin_term,size_t pos,size_t size)1575 Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
1576 			   size_t pos, size_t size)
1577 {
1578     ErlSubBin* sb;
1579     Eterm orig;
1580     Uint offset, bit_offset, bit_size;
1581 #ifdef DEBUG
1582     size_t src_size;
1583 
1584     ASSERT(is_binary(bin_term));
1585     src_size = binary_size(bin_term);
1586     ASSERT(pos <= src_size);
1587     ASSERT(size <= src_size);
1588     ASSERT(pos + size <= src_size);
1589 #endif
1590     sb = (ErlSubBin*) alloc_heap(env, ERL_SUB_BIN_SIZE);
1591     ERTS_GET_REAL_BIN(bin_term, orig, offset, bit_offset, bit_size);
1592     sb->thing_word = HEADER_SUB_BIN;
1593     sb->size = size;
1594     sb->offs = offset + pos;
1595     sb->orig = orig;
1596     sb->bitoffs = bit_offset;
1597     sb->bitsize = 0;
1598     sb->is_writable = 0;
1599     return make_binary(sb);
1600 }
1601 
1602 
enif_make_badarg(ErlNifEnv * env)1603 Eterm enif_make_badarg(ErlNifEnv* env)
1604 {
1605     return enif_raise_exception(env, am_badarg);
1606 }
1607 
enif_raise_exception(ErlNifEnv * env,ERL_NIF_TERM reason)1608 Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason)
1609 {
1610     env->exception_thrown = 1;
1611     env->proc->fvalue = reason;
1612     BIF_ERROR(env->proc, EXC_ERROR);
1613 }
1614 
enif_has_pending_exception(ErlNifEnv * env,ERL_NIF_TERM * reason)1615 int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason)
1616 {
1617     if (env->exception_thrown && reason != NULL)
1618 	*reason = env->proc->fvalue;
1619     return env->exception_thrown;
1620 }
1621 
enif_get_atom(ErlNifEnv * env,Eterm atom,char * buf,unsigned len,ErlNifCharEncoding encoding)1622 int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len,
1623 		  ErlNifCharEncoding encoding)
1624 {
1625     Atom* ap;
1626     ASSERT(encoding == ERL_NIF_LATIN1);
1627     if (is_not_atom(atom) || len==0) {
1628 	return 0;
1629     }
1630     ap = atom_tab(atom_val(atom));
1631 
1632     if (ap->latin1_chars < 0 || ap->latin1_chars >= len) {
1633 	return 0;
1634     }
1635     if (ap->latin1_chars == ap->len) {
1636 	sys_memcpy(buf, ap->name, ap->len);
1637     }
1638     else {
1639 	int dlen = erts_utf8_to_latin1((byte*)buf, ap->name, ap->len);
1640 	ASSERT(dlen == ap->latin1_chars); (void)dlen;
1641     }
1642     buf[ap->latin1_chars] = '\0';
1643     return ap->latin1_chars + 1;
1644 }
1645 
enif_get_int(ErlNifEnv * env,Eterm term,int * ip)1646 int enif_get_int(ErlNifEnv* env, Eterm term, int* ip)
1647 {
1648 #if SIZEOF_INT ==  ERTS_SIZEOF_ETERM
1649     return term_to_Sint(term, (Sint*)ip);
1650 #elif (SIZEOF_LONG ==  ERTS_SIZEOF_ETERM) || \
1651   (SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM)
1652     Sint i;
1653     if (!term_to_Sint(term, &i) || i < INT_MIN || i > INT_MAX) {
1654 	return 0;
1655     }
1656     *ip = (int) i;
1657     return 1;
1658 #else
1659 #  error Unknown word size
1660 #endif
1661 }
1662 
enif_get_uint(ErlNifEnv * env,Eterm term,unsigned * ip)1663 int enif_get_uint(ErlNifEnv* env, Eterm term, unsigned* ip)
1664 {
1665 #if SIZEOF_INT == ERTS_SIZEOF_ETERM
1666     return term_to_Uint(term, (Uint*)ip);
1667 #elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \
1668   (SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM)
1669     Uint i;
1670     if (!term_to_Uint(term, &i) || i > UINT_MAX) {
1671 	return 0;
1672     }
1673     *ip = (unsigned) i;
1674     return 1;
1675 #endif
1676 }
1677 
enif_get_long(ErlNifEnv * env,Eterm term,long * ip)1678 int enif_get_long(ErlNifEnv* env, Eterm term, long* ip)
1679 {
1680 #if SIZEOF_LONG == ERTS_SIZEOF_ETERM
1681     return term_to_Sint(term, ip);
1682 #elif SIZEOF_LONG == 8
1683     return term_to_Sint64(term, ip);
1684 #elif SIZEOF_LONG == SIZEOF_INT
1685     int tmp,ret;
1686     ret = enif_get_int(env,term,&tmp);
1687     if (ret) {
1688       *ip = (long) tmp;
1689     }
1690     return ret;
1691 #else
1692 #  error Unknown long word size
1693 #endif
1694 }
1695 
enif_get_ulong(ErlNifEnv * env,Eterm term,unsigned long * ip)1696 int enif_get_ulong(ErlNifEnv* env, Eterm term, unsigned long* ip)
1697 {
1698 #if SIZEOF_LONG == ERTS_SIZEOF_ETERM
1699     return term_to_Uint(term, ip);
1700 #elif SIZEOF_LONG == 8
1701     return term_to_Uint64(term, ip);
1702 #elif SIZEOF_LONG == SIZEOF_INT
1703     int ret;
1704     unsigned int tmp;
1705     ret = enif_get_uint(env,term,&tmp);
1706     if (ret) {
1707       *ip = (unsigned long) tmp;
1708     }
1709     return ret;
1710 #else
1711 #  error Unknown long word size
1712 #endif
1713 }
1714 
1715 #if HAVE_INT64 && SIZEOF_LONG != 8
enif_get_int64(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifSInt64 * ip)1716 int enif_get_int64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifSInt64* ip)
1717 {
1718     return term_to_Sint64(term, ip);
1719 }
1720 
enif_get_uint64(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifUInt64 * ip)1721 int enif_get_uint64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifUInt64* ip)
1722 {
1723     return term_to_Uint64(term, ip);
1724 }
1725 #endif /* HAVE_INT64 && SIZEOF_LONG != 8 */
1726 
enif_get_double(ErlNifEnv * env,ERL_NIF_TERM term,double * dp)1727 int enif_get_double(ErlNifEnv* env, ERL_NIF_TERM term, double* dp)
1728 {
1729     FloatDef f;
1730     if (is_not_float(term)) {
1731 	return 0;
1732     }
1733     GET_DOUBLE(term, f);
1734     *dp = f.fd;
1735     return 1;
1736 }
1737 
enif_get_atom_length(ErlNifEnv * env,Eterm atom,unsigned * len,ErlNifCharEncoding enc)1738 int enif_get_atom_length(ErlNifEnv* env, Eterm atom, unsigned* len,
1739 			 ErlNifCharEncoding enc)
1740 {
1741     Atom* ap;
1742     ASSERT(enc == ERL_NIF_LATIN1);
1743     if (is_not_atom(atom)) return 0;
1744     ap = atom_tab(atom_val(atom));
1745     if (ap->latin1_chars < 0) {
1746 	return 0;
1747     }
1748     *len = ap->latin1_chars;
1749     return 1;
1750 }
1751 
enif_get_list_cell(ErlNifEnv * env,Eterm term,Eterm * head,Eterm * tail)1752 int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
1753 {
1754     Eterm* val;
1755     if (is_not_list(term)) return 0;
1756     val = list_val(term);
1757     *head = CAR(val);
1758     *tail = CDR(val);
1759     return 1;
1760 }
1761 
enif_get_list_length(ErlNifEnv * env,Eterm term,unsigned * len)1762 int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len)
1763 {
1764     Sint i;
1765     Uint u;
1766 
1767     if ((i = erts_list_length(term)) < 0) return 0;
1768     u = (Uint)i;
1769     if ((unsigned)u != u) return 0;
1770     *len = u;
1771     return 1;
1772 }
1773 
enif_make_int(ErlNifEnv * env,int i)1774 ERL_NIF_TERM enif_make_int(ErlNifEnv* env, int i)
1775 {
1776 #if SIZEOF_INT == ERTS_SIZEOF_ETERM
1777     return IS_SSMALL(i) ? make_small(i) : small_to_big(i,alloc_heap(env,2));
1778 #elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \
1779   (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM)
1780     return make_small(i);
1781 #endif
1782 }
1783 
enif_make_uint(ErlNifEnv * env,unsigned i)1784 ERL_NIF_TERM enif_make_uint(ErlNifEnv* env, unsigned i)
1785 {
1786 #if SIZEOF_INT == ERTS_SIZEOF_ETERM
1787     return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2));
1788 #elif (SIZEOF_LONG ==  ERTS_SIZEOF_ETERM) || \
1789   (SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM)
1790     return make_small(i);
1791 #endif
1792 }
1793 
enif_make_long(ErlNifEnv * env,long i)1794 ERL_NIF_TERM enif_make_long(ErlNifEnv* env, long i)
1795 {
1796 #if SIZEOF_LONG < ERTS_SIZEOF_ETERM
1797     return make_small(i);
1798 #else
1799     if (IS_SSMALL(i)) {
1800 	return make_small(i);
1801     }
1802 # if SIZEOF_LONG == ERTS_SIZEOF_ETERM
1803     return small_to_big(i, alloc_heap(env,2));
1804 # elif SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM
1805     return make_small(i);
1806 # elif SIZEOF_LONG == 8
1807     ensure_heap(env,3);
1808     return erts_sint64_to_big(i, &env->hp);
1809 # endif
1810 #endif
1811 }
1812 
enif_make_ulong(ErlNifEnv * env,unsigned long i)1813 ERL_NIF_TERM enif_make_ulong(ErlNifEnv* env, unsigned long i)
1814 {
1815     if (IS_USMALL(0,i)) {
1816 	return make_small(i);
1817     }
1818 #if SIZEOF_LONG == ERTS_SIZEOF_ETERM
1819     return uint_to_big(i,alloc_heap(env,2));
1820 #elif SIZEOF_LONG_LONG ==  ERTS_SIZEOF_ETERM
1821     return make_small(i);
1822 #elif SIZEOF_LONG == 8
1823     ensure_heap(env,3);
1824     return erts_uint64_to_big(i, &env->hp);
1825 #endif
1826 }
1827 
1828 #if HAVE_INT64 && SIZEOF_LONG != 8
enif_make_int64(ErlNifEnv * env,ErlNifSInt64 i)1829 ERL_NIF_TERM enif_make_int64(ErlNifEnv* env, ErlNifSInt64 i)
1830 {
1831     Uint* hp;
1832     Uint need = 0;
1833     erts_bld_sint64(NULL, &need, i);
1834     hp = alloc_heap(env, need);
1835     return erts_bld_sint64(&hp, NULL, i);
1836 }
1837 
enif_make_uint64(ErlNifEnv * env,ErlNifUInt64 i)1838 ERL_NIF_TERM enif_make_uint64(ErlNifEnv* env, ErlNifUInt64 i)
1839 {
1840     Uint* hp;
1841     Uint need = 0;
1842     erts_bld_uint64(NULL, &need, i);
1843     hp = alloc_heap(env, need);
1844     return erts_bld_uint64(&hp, NULL, i);
1845 }
1846 #endif /* HAVE_INT64 && SIZEOF_LONG != 8 */
1847 
enif_make_double(ErlNifEnv * env,double d)1848 ERL_NIF_TERM enif_make_double(ErlNifEnv* env, double d)
1849 {
1850     Eterm* hp;
1851     FloatDef f;
1852 
1853     if (!erts_isfinite(d))
1854         return enif_make_badarg(env);
1855     hp = alloc_heap(env,FLOAT_SIZE_OBJECT);
1856     f.fd = d;
1857     PUT_DOUBLE(f, hp);
1858     return make_float(hp);
1859 }
1860 
enif_make_atom(ErlNifEnv * env,const char * name)1861 ERL_NIF_TERM enif_make_atom(ErlNifEnv* env, const char* name)
1862 {
1863     return enif_make_atom_len(env, name, sys_strlen(name));
1864 }
1865 
enif_make_atom_len(ErlNifEnv * env,const char * name,size_t len)1866 ERL_NIF_TERM enif_make_atom_len(ErlNifEnv* env, const char* name, size_t len)
1867 {
1868     if (len > MAX_ATOM_CHARACTERS)
1869         return enif_make_badarg(env);
1870     return erts_atom_put((byte*)name, len, ERTS_ATOM_ENC_LATIN1, 1);
1871 }
1872 
enif_make_existing_atom(ErlNifEnv * env,const char * name,ERL_NIF_TERM * atom,ErlNifCharEncoding enc)1873 int enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom,
1874 			    ErlNifCharEncoding enc)
1875 {
1876     return enif_make_existing_atom_len(env, name, sys_strlen(name), atom, enc);
1877 }
1878 
enif_make_existing_atom_len(ErlNifEnv * env,const char * name,size_t len,ERL_NIF_TERM * atom,ErlNifCharEncoding encoding)1879 int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len,
1880 				ERL_NIF_TERM* atom, ErlNifCharEncoding encoding)
1881 {
1882     ASSERT(encoding == ERL_NIF_LATIN1);
1883     if (len > MAX_ATOM_CHARACTERS)
1884         return 0;
1885     return erts_atom_get(name, len, atom, ERTS_ATOM_ENC_LATIN1);
1886 }
1887 
enif_make_tuple(ErlNifEnv * env,unsigned cnt,...)1888 ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
1889 {
1890 #ifdef ERTS_NIF_ASSERT_IN_ENV
1891     int nr = 0;
1892 #endif
1893     Eterm* hp = alloc_heap(env,cnt+1);
1894     Eterm ret = make_tuple(hp);
1895     va_list ap;
1896 
1897     *hp++ = make_arityval(cnt);
1898     va_start(ap,cnt);
1899     while (cnt--) {
1900         Eterm elem = va_arg(ap,Eterm);
1901         ASSERT_IN_ENV(env, elem, ++nr, "tuple");
1902 	*hp++ = elem;
1903     }
1904     va_end(ap);
1905     return ret;
1906 }
1907 
enif_make_tuple_from_array(ErlNifEnv * env,const ERL_NIF_TERM arr[],unsigned cnt)1908 ERL_NIF_TERM enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
1909 {
1910 #ifdef ERTS_NIF_ASSERT_IN_ENV
1911     int nr = 0;
1912 #endif
1913     Eterm* hp = alloc_heap(env,cnt+1);
1914     Eterm ret = make_tuple(hp);
1915     const Eterm* src = arr;
1916 
1917     *hp++ = make_arityval(cnt);
1918     while (cnt--) {
1919         ASSERT_IN_ENV(env, *src, ++nr, "tuple");
1920 	*hp++ = *src++;
1921     }
1922     return ret;
1923 }
1924 
enif_make_list_cell(ErlNifEnv * env,Eterm car,Eterm cdr)1925 ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr)
1926 {
1927     Eterm* hp = alloc_heap(env,2);
1928     Eterm ret = make_list(hp);
1929 
1930     ASSERT_IN_ENV(env, car, 0, "head of list cell");
1931     ASSERT_IN_ENV(env, cdr, 0, "tail of list cell");
1932     CAR(hp) = car;
1933     CDR(hp) = cdr;
1934     return ret;
1935 }
1936 
enif_make_list(ErlNifEnv * env,unsigned cnt,...)1937 ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
1938 {
1939     if (cnt == 0) {
1940 	return NIL;
1941     }
1942     else {
1943 #ifdef ERTS_NIF_ASSERT_IN_ENV
1944         int nr = 0;
1945 #endif
1946 	Eterm* hp = alloc_heap(env,cnt*2);
1947 	Eterm ret = make_list(hp);
1948 	Eterm* last = &ret;
1949 	va_list ap;
1950 
1951 	va_start(ap,cnt);
1952 	while (cnt--) {
1953             Eterm term = va_arg(ap,Eterm);
1954 	    *last = make_list(hp);
1955             ASSERT_IN_ENV(env, term, ++nr, "list");
1956 	    *hp = term;
1957 	    last = ++hp;
1958 	    ++hp;
1959 	}
1960 	va_end(ap);
1961 	*last = NIL;
1962 	return ret;
1963     }
1964 }
1965 
enif_make_list_from_array(ErlNifEnv * env,const ERL_NIF_TERM arr[],unsigned cnt)1966 ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
1967 {
1968 #ifdef ERTS_NIF_ASSERT_IN_ENV
1969     int nr = 0;
1970 #endif
1971     Eterm* hp = alloc_heap(env,cnt*2);
1972     Eterm ret = make_list(hp);
1973     Eterm* last = &ret;
1974     const Eterm* src = arr;
1975 
1976     while (cnt--) {
1977         Eterm term = *src++;
1978 	*last = make_list(hp);
1979         ASSERT_IN_ENV(env, term, ++nr, "list");
1980 	*hp = term;
1981 	last = ++hp;
1982 	++hp;
1983     }
1984     *last = NIL;
1985     return ret;
1986 }
1987 
enif_make_string(ErlNifEnv * env,const char * string,ErlNifCharEncoding encoding)1988 ERL_NIF_TERM enif_make_string(ErlNifEnv* env, const char* string,
1989 			      ErlNifCharEncoding encoding)
1990 {
1991     return enif_make_string_len(env, string, sys_strlen(string), encoding);
1992 }
1993 
enif_make_string_len(ErlNifEnv * env,const char * string,size_t len,ErlNifCharEncoding encoding)1994 ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string,
1995 				  size_t len, ErlNifCharEncoding encoding)
1996 {
1997     Eterm* hp = alloc_heap(env,len*2);
1998     ASSERT(encoding == ERL_NIF_LATIN1);
1999     return erts_bld_string_n(&hp,NULL,string,len);
2000 }
2001 
enif_make_ref(ErlNifEnv * env)2002 ERL_NIF_TERM enif_make_ref(ErlNifEnv* env)
2003 {
2004     Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
2005     return erts_make_ref_in_buffer(hp);
2006 }
2007 
enif_system_info(ErlNifSysInfo * sip,size_t si_size)2008 void enif_system_info(ErlNifSysInfo *sip, size_t si_size)
2009 {
2010     driver_system_info(sip, si_size);
2011 }
2012 
enif_make_reverse_list(ErlNifEnv * env,ERL_NIF_TERM term,ERL_NIF_TERM * list)2013 int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list)
2014 {
2015     Eterm *listptr, ret, *hp;
2016 
2017     ret = NIL;
2018 
2019     while (is_not_nil(term)) {
2020 	if (is_not_list(term)) {
2021 	    return 0;
2022 	}
2023 	hp = alloc_heap(env, 2);
2024 	listptr = list_val(term);
2025 	ret = CONS(hp, CAR(listptr), ret);
2026 	term = CDR(listptr);
2027     }
2028     *list = ret;
2029     return 1;
2030 }
2031 
enif_is_current_process_alive(ErlNifEnv * env)2032 int enif_is_current_process_alive(ErlNifEnv* env)
2033 {
2034     Process *c_p;
2035     int scheduler;
2036 
2037     execution_state(env, &c_p, &scheduler);
2038 
2039     if (!c_p)
2040 	erts_exit(ERTS_ABORT_EXIT,
2041 		  "enif_is_current_process_alive: "
2042                   "Invalid environment");
2043 
2044     if (!scheduler)
2045 	erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: "
2046 		  "called from non-scheduler thread");
2047 
2048     return !ERTS_PROC_IS_EXITING(c_p);
2049 }
2050 
enif_is_process_alive(ErlNifEnv * env,ErlNifPid * proc)2051 int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
2052 {
2053     int scheduler;
2054 
2055     execution_state(env, NULL, &scheduler);
2056 
2057     if (scheduler > 0)
2058 	return !!erts_proc_lookup(proc->pid);
2059     else {
2060 	Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
2061 					ERTS_P2P_FLG_INC_REFC);
2062 	if (rp)
2063 	    erts_proc_dec_refc(rp);
2064 	return !!rp;
2065     }
2066 }
2067 
enif_is_port_alive(ErlNifEnv * env,ErlNifPort * port)2068 int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
2069 {
2070     int scheduler;
2071     Uint32 iflags = (erts_port_synchronous_ops
2072 		     ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
2073 		     : ERTS_PORT_SFLGS_INVALID_LOOKUP);
2074 
2075     execution_state(env, NULL, &scheduler);
2076 
2077     if (scheduler > 0)
2078 	return !!erts_port_lookup(port->port_id, iflags);
2079     else {
2080 	Port *prt = erts_thr_port_lookup(port->port_id, iflags);
2081 	if (prt)
2082 	    erts_port_dec_refc(prt);
2083 	return !!prt;
2084     }
2085 }
2086 
2087 ERL_NIF_TERM
enif_now_time(ErlNifEnv * env)2088 enif_now_time(ErlNifEnv *env)
2089 {
2090     Uint mega, sec, micro;
2091     Eterm *hp;
2092     get_now(&mega, &sec, &micro);
2093     hp = alloc_heap(env, 4);
2094     return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro));
2095 }
2096 
2097 ERL_NIF_TERM
enif_cpu_time(ErlNifEnv * env)2098 enif_cpu_time(ErlNifEnv *env)
2099 {
2100 #ifdef HAVE_ERTS_NOW_CPU
2101     Uint mega, sec, micro;
2102     Eterm *hp;
2103     erts_get_now_cpu(&mega, &sec, &micro);
2104     hp = alloc_heap(env, 4);
2105     return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro));
2106 #else
2107     return enif_make_badarg(env);
2108 #endif
2109 }
2110 
2111 ERL_NIF_TERM
enif_make_unique_integer(ErlNifEnv * env,ErlNifUniqueInteger properties)2112 enif_make_unique_integer(ErlNifEnv *env, ErlNifUniqueInteger properties)
2113 {
2114     int monotonic = properties & ERL_NIF_UNIQUE_MONOTONIC;
2115     int positive = properties & ERL_NIF_UNIQUE_POSITIVE;
2116     Eterm *hp;
2117     Uint hsz;
2118 
2119     if (monotonic) {
2120         Sint64 raw_unique = erts_raw_get_unique_monotonic_integer();
2121         hsz = erts_raw_unique_monotonic_integer_heap_size(raw_unique, positive);
2122         hp = alloc_heap(env, hsz);
2123         return erts_raw_make_unique_monotonic_integer_value(&hp, raw_unique, positive);
2124     } else {
2125         Uint64 raw_unique[ERTS_UNIQUE_INT_RAW_VALUES];
2126         erts_raw_get_unique_integer(raw_unique);
2127         hsz = erts_raw_unique_integer_heap_size(raw_unique, positive);
2128         hp = alloc_heap(env, hsz);
2129         return erts_raw_make_unique_integer(&hp, raw_unique, positive);
2130     }
2131 }
2132 
enif_mutex_create(char * name)2133 ErlNifMutex* enif_mutex_create(char *name) { return erl_drv_mutex_create(name); }
enif_mutex_destroy(ErlNifMutex * mtx)2134 void enif_mutex_destroy(ErlNifMutex *mtx) {  erl_drv_mutex_destroy(mtx); }
enif_mutex_trylock(ErlNifMutex * mtx)2135 int enif_mutex_trylock(ErlNifMutex *mtx) { return erl_drv_mutex_trylock(mtx); }
enif_mutex_lock(ErlNifMutex * mtx)2136 void enif_mutex_lock(ErlNifMutex *mtx) { erl_drv_mutex_lock(mtx); }
enif_mutex_unlock(ErlNifMutex * mtx)2137 void enif_mutex_unlock(ErlNifMutex *mtx) { erl_drv_mutex_unlock(mtx); }
enif_cond_create(char * name)2138 ErlNifCond* enif_cond_create(char *name) { return erl_drv_cond_create(name); }
enif_cond_destroy(ErlNifCond * cnd)2139 void enif_cond_destroy(ErlNifCond *cnd) { erl_drv_cond_destroy(cnd); }
enif_cond_signal(ErlNifCond * cnd)2140 void enif_cond_signal(ErlNifCond *cnd) { erl_drv_cond_signal(cnd); }
enif_cond_broadcast(ErlNifCond * cnd)2141 void enif_cond_broadcast(ErlNifCond *cnd) { erl_drv_cond_broadcast(cnd); }
enif_cond_wait(ErlNifCond * cnd,ErlNifMutex * mtx)2142 void enif_cond_wait(ErlNifCond *cnd, ErlNifMutex *mtx) { erl_drv_cond_wait(cnd,mtx); }
enif_rwlock_create(char * name)2143 ErlNifRWLock* enif_rwlock_create(char *name) { return erl_drv_rwlock_create(name); }
enif_rwlock_destroy(ErlNifRWLock * rwlck)2144 void enif_rwlock_destroy(ErlNifRWLock *rwlck) { erl_drv_rwlock_destroy(rwlck); }
enif_rwlock_tryrlock(ErlNifRWLock * rwlck)2145 int enif_rwlock_tryrlock(ErlNifRWLock *rwlck) { return erl_drv_rwlock_tryrlock(rwlck); }
enif_rwlock_rlock(ErlNifRWLock * rwlck)2146 void enif_rwlock_rlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rlock(rwlck); }
enif_rwlock_runlock(ErlNifRWLock * rwlck)2147 void enif_rwlock_runlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_runlock(rwlck); }
enif_rwlock_tryrwlock(ErlNifRWLock * rwlck)2148 int enif_rwlock_tryrwlock(ErlNifRWLock *rwlck) { return erl_drv_rwlock_tryrwlock(rwlck); }
enif_rwlock_rwlock(ErlNifRWLock * rwlck)2149 void enif_rwlock_rwlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rwlock(rwlck); }
enif_rwlock_rwunlock(ErlNifRWLock * rwlck)2150 void enif_rwlock_rwunlock(ErlNifRWLock *rwlck) { erl_drv_rwlock_rwunlock(rwlck); }
enif_tsd_key_create(char * name,ErlNifTSDKey * key)2151 int enif_tsd_key_create(char *name, ErlNifTSDKey *key) { return erl_drv_tsd_key_create(name,key); }
enif_tsd_key_destroy(ErlNifTSDKey key)2152 void enif_tsd_key_destroy(ErlNifTSDKey key) { erl_drv_tsd_key_destroy(key); }
enif_tsd_set(ErlNifTSDKey key,void * data)2153 void enif_tsd_set(ErlNifTSDKey key, void *data) { erl_drv_tsd_set(key,data); }
enif_tsd_get(ErlNifTSDKey key)2154 void* enif_tsd_get(ErlNifTSDKey key) { return erl_drv_tsd_get(key); }
enif_thread_opts_create(char * name)2155 ErlNifThreadOpts* enif_thread_opts_create(char *name) { return (ErlNifThreadOpts*) erl_drv_thread_opts_create(name); }
enif_thread_opts_destroy(ErlNifThreadOpts * opts)2156 void enif_thread_opts_destroy(ErlNifThreadOpts *opts) { erl_drv_thread_opts_destroy((ErlDrvThreadOpts*)opts); }
enif_thread_create(char * name,ErlNifTid * tid,void * (* func)(void *),void * args,ErlNifThreadOpts * opts)2157 int enif_thread_create(char *name, ErlNifTid *tid, void* (*func)(void *),
2158 		       void *args, ErlNifThreadOpts *opts) {
2159     return erl_drv_thread_create(name,tid,func,args,(ErlDrvThreadOpts*)opts);
2160 }
enif_thread_self(void)2161 ErlNifTid enif_thread_self(void) { return erl_drv_thread_self(); }
enif_equal_tids(ErlNifTid tid1,ErlNifTid tid2)2162 int enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2) { return erl_drv_equal_tids(tid1,tid2); }
enif_thread_exit(void * resp)2163 void enif_thread_exit(void *resp) { erl_drv_thread_exit(resp); }
enif_thread_join(ErlNifTid tid,void ** respp)2164 int enif_thread_join(ErlNifTid tid, void **respp) { return erl_drv_thread_join(tid,respp); }
2165 
enif_mutex_name(ErlNifMutex * mtx)2166 char* enif_mutex_name(ErlNifMutex *mtx) {return erl_drv_mutex_name(mtx); }
enif_cond_name(ErlNifCond * cnd)2167 char* enif_cond_name(ErlNifCond *cnd) { return erl_drv_cond_name(cnd); }
enif_rwlock_name(ErlNifRWLock * rwlck)2168 char* enif_rwlock_name(ErlNifRWLock* rwlck) { return erl_drv_rwlock_name(rwlck); }
enif_thread_name(ErlNifTid tid)2169 char* enif_thread_name(ErlNifTid tid) { return erl_drv_thread_name(tid); }
2170 
enif_getenv(const char * key,char * value,size_t * value_size)2171 int enif_getenv(const char *key, char *value, size_t *value_size) { return erl_drv_getenv(key, value, value_size); }
2172 
enif_monotonic_time(ErlNifTimeUnit time_unit)2173 ErlNifTime enif_monotonic_time(ErlNifTimeUnit time_unit)
2174 {
2175     return (ErlNifTime) erts_napi_monotonic_time((int) time_unit);
2176 }
2177 
enif_time_offset(ErlNifTimeUnit time_unit)2178 ErlNifTime enif_time_offset(ErlNifTimeUnit time_unit)
2179 {
2180     return (ErlNifTime) erts_napi_time_offset((int) time_unit);
2181 }
2182 
2183 ErlNifTime
enif_convert_time_unit(ErlNifTime val,ErlNifTimeUnit from,ErlNifTimeUnit to)2184 enif_convert_time_unit(ErlNifTime val,
2185 		       ErlNifTimeUnit from,
2186 		       ErlNifTimeUnit to)
2187 {
2188     return (ErlNifTime) erts_napi_convert_time_unit((ErtsMonotonicTime) val,
2189 						    (int) from,
2190 						    (int) to);
2191 }
2192 
enif_fprintf(FILE * filep,const char * format,...)2193 int enif_fprintf(FILE* filep, const char* format, ...)
2194 {
2195     int ret;
2196     va_list arglist;
2197     va_start(arglist, format);
2198     ret = erts_vfprintf(filep, format, arglist);
2199     va_end(arglist);
2200     return ret;
2201 }
2202 
enif_vfprintf(FILE * filep,const char * format,va_list ap)2203 int enif_vfprintf(FILE* filep, const char *format, va_list ap)
2204 {
2205     return erts_vfprintf(filep, format, ap);
2206 }
2207 
enif_snprintf(char * buffer,size_t size,const char * format,...)2208 int enif_snprintf(char *buffer, size_t size, const char* format, ...)
2209 {
2210     int ret;
2211     va_list arglist;
2212     va_start(arglist, format);
2213     ret = erts_vsnprintf(buffer, size, format, arglist);
2214     va_end(arglist);
2215     return ret;
2216 }
2217 
enif_vsnprintf(char * buffer,size_t size,const char * format,va_list ap)2218 int enif_vsnprintf(char* buffer, size_t size, const char *format, va_list ap)
2219 {
2220     return erts_vsnprintf(buffer, size, format, ap);
2221 }
2222 
2223 
2224 /***********************************************************
2225  **       Memory managed (GC'ed) "resource" objects       **
2226  ***********************************************************/
2227 
2228 /*
2229  * Sentinel node in circular list of all resource types.
2230  * List protected by code_write_permission.
2231  */
2232 struct enif_resource_type_t resource_type_list;
2233 
find_resource_type(Eterm module,Eterm name)2234 static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
2235 {
2236     ErlNifResourceType* type;
2237     for (type = resource_type_list.next;
2238 	 type != &resource_type_list;
2239 	 type = type->next) {
2240 
2241 	if (type->module == module && type->name == name) {
2242 	    return type;
2243 	}
2244     }
2245     return NULL;
2246 }
2247 
2248 #define in_area(ptr,start,nbytes) \
2249     ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
2250 
rt_have_callbacks(ErlNifResourceTypeInit * rti)2251 static ERTS_INLINE int rt_have_callbacks(ErlNifResourceTypeInit* rti)
2252 {
2253     return rti->dtor != NULL;
2254 }
2255 
deref_nifmod(struct erl_module_nif * lib)2256 static void deref_nifmod(struct erl_module_nif* lib)
2257 {
2258     if (erts_refc_dectest(&lib->refc, 0) == 0) {
2259         ASSERT(lib->handle == NULL);
2260         ASSERT(lib->mod == NULL);
2261         erts_free(ERTS_ALC_T_NIF, lib);
2262     }
2263 }
2264 
close_dynlib(struct erl_module_nif * lib)2265 static void close_dynlib(struct erl_module_nif* lib)
2266 {
2267     ASSERT(lib != NULL);
2268     ASSERT(lib->mod == NULL);
2269     ASSERT(lib->handle != NULL);
2270     ASSERT(erts_refc_read(&lib->dynlib_refc,0) == 0);
2271 
2272     if (lib->entry.unload != NULL) {
2273 	struct enif_msg_environment_t msg_env;
2274         pre_nif_noproc(&msg_env, lib, NULL);
2275 	lib->entry.unload(&msg_env.env, lib->priv_data);
2276         post_nif_noproc(&msg_env);
2277     }
2278     if (!erts_is_static_nif(lib->handle))
2279       erts_sys_ddll_close(lib->handle);
2280 
2281     lib->handle = NULL;
2282     deref_nifmod(lib);
2283 }
2284 
steal_resource_type(ErlNifResourceType * type)2285 static void steal_resource_type(ErlNifResourceType* type)
2286 {
2287     struct erl_module_nif* lib = type->owner;
2288 
2289     if (rt_have_callbacks(&type->fn_real)
2290         && erts_refc_dectest(&lib->dynlib_refc, 0) == 0) {
2291         /* last resource type with callbacks gone, close purged lib */
2292         close_dynlib(lib);
2293     }
2294     deref_nifmod(lib);
2295 }
2296 
2297 static erts_rwmtx_t erts_nif_call_tab_lock;
2298 
resource_dtor_during_takeover(ErlNifEnv * env,void * obj)2299 static void resource_dtor_during_takeover(ErlNifEnv* env, void* obj)
2300 {
2301     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2302     ErlNifResourceType* rt = resource->type;
2303 
2304     erts_rwmtx_rlock(&erts_nif_call_tab_lock);
2305     if (rt->fn_real.dtor)
2306         rt->fn_real.dtor(env, obj);
2307     erts_rwmtx_runlock(&erts_nif_call_tab_lock);
2308 }
resource_stop_during_takeover(ErlNifEnv * env,void * obj,ErlNifEvent e,int is_direct_call)2309 static void resource_stop_during_takeover(ErlNifEnv* env, void* obj,
2310                                            ErlNifEvent e, int is_direct_call)
2311 {
2312     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2313     ErlNifResourceType* rt = resource->type;
2314 
2315     erts_rwmtx_rlock(&erts_nif_call_tab_lock);
2316     ASSERT(rt->fn_real.stop);
2317     rt->fn_real.stop(env, obj, e, is_direct_call);
2318     erts_rwmtx_runlock(&erts_nif_call_tab_lock);
2319 }
resource_down_during_takeover(ErlNifEnv * env,void * obj,ErlNifPid * pid,ErlNifMonitor * mon)2320 static void resource_down_during_takeover(ErlNifEnv* env, void* obj,
2321                                           ErlNifPid* pid, ErlNifMonitor* mon)
2322 {
2323     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2324     ErlNifResourceType* rt = resource->type;
2325 
2326     erts_rwmtx_rlock(&erts_nif_call_tab_lock);
2327     ASSERT(rt->fn_real.down);
2328     rt->fn_real.down(env, obj, pid, mon);
2329     erts_rwmtx_runlock(&erts_nif_call_tab_lock);
2330 }
resource_dyncall_during_takeover(ErlNifEnv * env,void * obj,void * call_data)2331 static void resource_dyncall_during_takeover(ErlNifEnv* env, void* obj,
2332                                              void* call_data)
2333 {
2334     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2335     ErlNifResourceType* rt = resource->type;
2336 
2337     erts_rwmtx_rlock(&erts_nif_call_tab_lock);
2338     ASSERT(rt->fn_real.dyncall);
2339     rt->fn_real.dyncall(env, obj, call_data);
2340     erts_rwmtx_runlock(&erts_nif_call_tab_lock);
2341 }
2342 
resource_dtor_nop(ErlNifEnv * env,void * obj)2343 static void resource_dtor_nop(ErlNifEnv* env, void* obj)
2344 {
2345     /* do nothing */
2346 }
2347 
2348 /* The opened_rt_list is used by enif_open_resource_type()
2349  * in order to rollback "creates" and "take-overs" in case the load fails.
2350  */
2351 struct opened_resource_type
2352 {
2353     struct opened_resource_type* next;
2354 
2355     ErlNifResourceFlags op;
2356     ErlNifResourceType* type;
2357     ErlNifResourceTypeInit new_callbacks;
2358 };
2359 static struct opened_resource_type* opened_rt_list = NULL;
2360 
2361 static
open_resource_type(ErlNifEnv * env,const char * name_str,const ErlNifResourceTypeInit * init,ErlNifResourceFlags flags,ErlNifResourceFlags * tried,int init_members)2362 ErlNifResourceType* open_resource_type(ErlNifEnv* env,
2363                                        const char* name_str,
2364                                        const ErlNifResourceTypeInit* init,
2365                                        ErlNifResourceFlags flags,
2366                                        ErlNifResourceFlags* tried,
2367                                        int init_members)
2368 {
2369     ErlNifResourceType* type = NULL;
2370     ErlNifResourceFlags op = flags;
2371     Eterm module_am, name_am;
2372 
2373     ERTS_LC_ASSERT(erts_has_code_write_permission());
2374     module_am = make_atom(env->mod_nif->mod->module);
2375     name_am = enif_make_atom(env, name_str);
2376 
2377     type = find_resource_type(module_am, name_am);
2378     if (type == NULL) {
2379 	if (flags & ERL_NIF_RT_CREATE) {
2380 	    type = erts_alloc(ERTS_ALC_T_NIF,
2381 			      sizeof(struct enif_resource_type_t));
2382 	    type->module = module_am;
2383 	    type->name = name_am;
2384 	    erts_refc_init(&type->refc, 1);
2385 	    op = ERL_NIF_RT_CREATE;
2386 	#ifdef DEBUG
2387             type->fn.dtor = (void*)1;
2388 	    type->fn_real.dtor = (void*)1;
2389 	    type->owner = (void*)2;
2390 	    type->prev = (void*)3;
2391 	    type->next = (void*)4;
2392 	#endif
2393 	}
2394     }
2395     else {
2396 	if (flags & ERL_NIF_RT_TAKEOVER) {
2397 	    op = ERL_NIF_RT_TAKEOVER;
2398 	}
2399 	else {
2400 	    type = NULL;
2401 	}
2402     }
2403     if (type != NULL) {
2404 	struct opened_resource_type* ort = erts_alloc(ERTS_ALC_T_TMP,
2405 						sizeof(struct opened_resource_type));
2406 	ort->op = op;
2407 	ort->type = type;
2408         sys_memzero(&ort->new_callbacks, sizeof(ErlNifResourceTypeInit));
2409         switch (init_members) {
2410         case 4: ort->new_callbacks.dyncall = init->dyncall;
2411         case 3: ort->new_callbacks.down = init->down;
2412         case 2: ort->new_callbacks.stop = init->stop;
2413         case 1: ort->new_callbacks.dtor = init->dtor;
2414         case 0:
2415             break;
2416         default:
2417             ERTS_ASSERT(!"Invalid number of ErlNifResourceTypeInit members");
2418         }
2419         if (!ort->new_callbacks.dtor && (ort->new_callbacks.down ||
2420                                          ort->new_callbacks.stop ||
2421                                          ort->new_callbacks.dyncall)) {
2422             /* Set dummy dtor for fast rt_have_callbacks()
2423              * This case should be rare anyway */
2424             ort->new_callbacks.dtor = resource_dtor_nop;
2425         }
2426 	ort->next = opened_rt_list;
2427 	opened_rt_list = ort;
2428     }
2429     if (tried != NULL) {
2430 	*tried = op;
2431     }
2432     return type;
2433 }
2434 
2435 ErlNifResourceType*
enif_open_resource_type(ErlNifEnv * env,const char * module_str,const char * name_str,ErlNifResourceDtor * dtor,ErlNifResourceFlags flags,ErlNifResourceFlags * tried)2436 enif_open_resource_type(ErlNifEnv* env,
2437                         const char* module_str,
2438                         const char* name_str,
2439 			ErlNifResourceDtor* dtor,
2440 			ErlNifResourceFlags flags,
2441 			ErlNifResourceFlags* tried)
2442 {
2443     ErlNifResourceTypeInit init = {dtor};
2444     ASSERT(module_str == NULL); /* for now... */
2445     return open_resource_type(env, name_str, &init, flags, tried, 1);
2446 }
2447 
2448 ErlNifResourceType*
enif_open_resource_type_x(ErlNifEnv * env,const char * name_str,const ErlNifResourceTypeInit * init,ErlNifResourceFlags flags,ErlNifResourceFlags * tried)2449 enif_open_resource_type_x(ErlNifEnv* env,
2450                           const char* name_str,
2451                           const ErlNifResourceTypeInit* init,
2452                           ErlNifResourceFlags flags,
2453                           ErlNifResourceFlags* tried)
2454 {
2455     return open_resource_type(env, name_str, init, flags, tried, 3);
2456 }
2457 
2458 ErlNifResourceType*
enif_init_resource_type(ErlNifEnv * env,const char * name_str,const ErlNifResourceTypeInit * init,ErlNifResourceFlags flags,ErlNifResourceFlags * tried)2459 enif_init_resource_type(ErlNifEnv* env,
2460                         const char* name_str,
2461                         const ErlNifResourceTypeInit* init,
2462                         ErlNifResourceFlags flags,
2463                         ErlNifResourceFlags* tried)
2464 {
2465     return open_resource_type(env, name_str, init, flags, tried, init->members);
2466 }
2467 
prepare_opened_rt(struct erl_module_nif * lib)2468 static void prepare_opened_rt(struct erl_module_nif* lib)
2469 {
2470     struct opened_resource_type* ort = opened_rt_list;
2471 
2472     while (ort) {
2473 	ErlNifResourceType* type = ort->type;
2474 
2475 	if (ort->op == ERL_NIF_RT_CREATE) {
2476             type->fn = ort->new_callbacks;
2477             type->fn_real = ort->new_callbacks;
2478 	    type->prev = &resource_type_list;
2479 	    type->next = resource_type_list.next;
2480 	    type->next->prev = type;
2481 	    type->prev->next = type;
2482 	}
2483 	else { /* ERL_NIF_RT_TAKEOVER */
2484 	    steal_resource_type(type);
2485 
2486             /*
2487              * Prepare for atomic change of callbacks with lock-wrappers
2488              */
2489             type->fn.dtor = resource_dtor_during_takeover;
2490             type->fn.stop = resource_stop_during_takeover;
2491             type->fn.down = resource_down_during_takeover;
2492             type->fn.dyncall = resource_dyncall_during_takeover;
2493 	}
2494         type->owner = lib;
2495 
2496         if (rt_have_callbacks(&ort->new_callbacks))
2497 	    erts_refc_inc(&lib->dynlib_refc, 2);
2498 	erts_refc_inc(&lib->refc, 2);
2499 
2500         ort = ort->next;
2501     }
2502 }
2503 
2504 /*
2505  * Do atomic change from old to new callbacks
2506  */
commit_opened_rt(void)2507 static void commit_opened_rt(void)
2508 {
2509     struct opened_resource_type* ort = opened_rt_list;
2510 
2511     ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_nif_call_tab_lock));
2512 
2513     while (ort) {
2514         if (ort->op == ERL_NIF_RT_TAKEOVER) {
2515             ort->type->fn_real = ort->new_callbacks;
2516         }
2517         ort = ort->next;
2518     }
2519 }
2520 
2521 /*
2522  * Cleanup and let callbacks be called directly without locking
2523  */
cleanup_opened_rt(void)2524 static void cleanup_opened_rt(void)
2525 {
2526     struct opened_resource_type* ort = opened_rt_list;
2527 
2528     while (opened_rt_list) {
2529         ort = opened_rt_list;
2530         if (ort->op == ERL_NIF_RT_TAKEOVER) {
2531             ort->type->fn = ort->new_callbacks;
2532         }
2533         opened_rt_list = ort->next;
2534         erts_free(ERTS_ALC_T_TMP, ort);
2535     }
2536 }
2537 
rollback_opened_resource_types(void)2538 static void rollback_opened_resource_types(void)
2539 {
2540     while (opened_rt_list) {
2541 	struct opened_resource_type* ort = opened_rt_list;
2542 
2543 	if (ort->op == ERL_NIF_RT_CREATE) {
2544 	    erts_free(ERTS_ALC_T_NIF, ort->type);
2545 	}
2546 
2547 	opened_rt_list = ort->next;
2548 	erts_free(ERTS_ALC_T_TMP, ort);
2549     }
2550 }
2551 
2552 #ifdef ARCH_64
2553 #  define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 63)
2554 #else
2555 #  define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 31)
2556 #endif
2557 #define ERTS_RESOURCE_REFC_MASK (~ERTS_RESOURCE_DYING_FLAG)
2558 
2559 static ERTS_INLINE void
rmon_set_dying(ErtsResourceMonitors * rms)2560 rmon_set_dying(ErtsResourceMonitors *rms)
2561 {
2562     rms->refc |= ERTS_RESOURCE_DYING_FLAG;
2563 }
2564 
2565 static ERTS_INLINE int
rmon_is_dying(ErtsResourceMonitors * rms)2566 rmon_is_dying(ErtsResourceMonitors *rms)
2567 {
2568     return !!(rms->refc & ERTS_RESOURCE_DYING_FLAG);
2569 }
2570 
2571 static ERTS_INLINE void
rmon_refc_inc(ErtsResourceMonitors * rms)2572 rmon_refc_inc(ErtsResourceMonitors *rms)
2573 {
2574     rms->refc++;
2575 }
2576 
2577 static ERTS_INLINE Uint
rmon_refc_dec_read(ErtsResourceMonitors * rms)2578 rmon_refc_dec_read(ErtsResourceMonitors *rms)
2579 {
2580     Uint res;
2581     ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0);
2582     res = --rms->refc;
2583     return res & ERTS_RESOURCE_REFC_MASK;
2584 }
2585 
2586 static ERTS_INLINE void
rmon_refc_dec(ErtsResourceMonitors * rms)2587 rmon_refc_dec(ErtsResourceMonitors *rms)
2588 {
2589     ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0);
2590     --rms->refc;
2591 }
2592 
2593 static ERTS_INLINE Uint
rmon_refc_read(ErtsResourceMonitors * rms)2594 rmon_refc_read(ErtsResourceMonitors *rms)
2595 {
2596     return rms->refc & ERTS_RESOURCE_REFC_MASK;
2597 }
2598 
dtor_demonitor(ErtsMonitor * mon,void * context,Sint reds)2599 static int dtor_demonitor(ErtsMonitor* mon, void* context, Sint reds)
2600 {
2601     ASSERT(erts_monitor_is_origin(mon));
2602     ASSERT(is_internal_pid(mon->other.item));
2603 
2604     erts_proc_sig_send_demonitor(mon);
2605     return 1;
2606 }
2607 
2608 #ifdef DEBUG
erts_dbg_is_resource_dying(ErtsResource * resource)2609 int erts_dbg_is_resource_dying(ErtsResource* resource)
2610 {
2611     return resource->monitors && rmon_is_dying(resource->monitors);
2612 }
2613 #endif
2614 
2615 #define NIF_RESOURCE_DTOR &nif_resource_dtor_prologue
2616 
2617 static void run_resource_dtor(void* vbin);
2618 
nif_resource_dtor_prologue(Binary * bin)2619 static int nif_resource_dtor_prologue(Binary* bin)
2620 {
2621     /*
2622      * Schedule user resource destructor as aux work to get a context
2623      * where we know what locks we have for example.
2624      */
2625     Uint sched_id = erts_get_scheduler_id();
2626     if (!sched_id)
2627         sched_id = 1;
2628     erts_schedule_misc_aux_work(sched_id, run_resource_dtor, bin);
2629     return 0; /* don't free */
2630 }
2631 
run_resource_dtor(void * vbin)2632 static void run_resource_dtor(void* vbin)
2633 {
2634     Binary* bin = (Binary*) vbin;
2635     ErtsResource* resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
2636     ErlNifResourceType* type = resource->type;
2637     ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
2638 
2639     if (resource->monitors) {
2640         ErtsResourceMonitors* rm = resource->monitors;
2641         int kill;
2642         ErtsMonitor *root;
2643         Uint refc;
2644 
2645         ASSERT(type->fn.down);
2646         erts_mtx_lock(&rm->lock);
2647         ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0);
2648         kill = !rmon_is_dying(rm);
2649         if (kill) {
2650             rmon_set_dying(rm);
2651             root = rm->root;
2652             rm->root = NULL;
2653         }
2654         refc = rmon_refc_read(rm);
2655         erts_mtx_unlock(&rm->lock);
2656 
2657         if (kill)
2658             erts_monitor_tree_foreach_delete(&root,
2659                                              dtor_demonitor,
2660                                              NULL);
2661 
2662         /*
2663          * If resource->monitors->refc != 0 there are
2664          * outstanding references to the resource from
2665          * monitors that has not been removed yet.
2666          * nif_resource_dtor_prologue() will be called again when this
2667          * reference count reach zero.
2668          */
2669         if (refc != 0)
2670             return; /* we'll be back... */
2671         erts_mtx_destroy(&rm->lock);
2672     }
2673 
2674     if (type->fn.dtor != NULL) {
2675         struct enif_msg_environment_t msg_env;
2676         pre_nif_noproc(&msg_env, type->owner, NULL);
2677 	type->fn.dtor(&msg_env.env, resource->data);
2678         post_nif_noproc(&msg_env);
2679     }
2680     if (erts_refc_dectest(&type->refc, 0) == 0) {
2681 	ASSERT(type->next == NULL);
2682 	ASSERT(type->owner != NULL);
2683 	ASSERT(type->owner->mod == NULL);
2684 	steal_resource_type(type);
2685 	erts_free(ERTS_ALC_T_NIF, type);
2686     }
2687     erts_magic_binary_free((Binary*)vbin);
2688 }
2689 
erts_resource_stop(ErtsResource * resource,ErlNifEvent e,int is_direct_call)2690 void erts_resource_stop(ErtsResource* resource, ErlNifEvent e,
2691                         int is_direct_call)
2692 {
2693     struct enif_msg_environment_t msg_env;
2694     ASSERT(resource->type->fn.stop);
2695     pre_nif_noproc(&msg_env, resource->type->owner, NULL);
2696     resource->type->fn.stop(&msg_env.env, resource->data, e, is_direct_call);
2697     post_nif_noproc(&msg_env);
2698 }
2699 
erts_nif_demonitored(ErtsResource * resource)2700 void erts_nif_demonitored(ErtsResource* resource)
2701 {
2702     ErtsResourceMonitors* rmp = resource->monitors;
2703     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2704     int free_me;
2705 
2706     ASSERT(rmp);
2707     ASSERT(resource->type->fn.down);
2708 
2709     erts_mtx_lock(&rmp->lock);
2710     free_me = ((rmon_refc_dec_read(rmp) == 0) & !!rmon_is_dying(rmp));
2711     erts_mtx_unlock(&rmp->lock);
2712 
2713     if (free_me)
2714         erts_bin_free(&bin->binary);
2715 }
2716 
erts_fire_nif_monitor(ErtsMonitor * tmon)2717 void erts_fire_nif_monitor(ErtsMonitor *tmon)
2718 {
2719     ErtsResource* resource;
2720     ErtsMonitorData *mdp;
2721     ErtsMonitor *omon;
2722     ErtsBinary* bin;
2723     struct enif_msg_environment_t msg_env;
2724     ErlNifPid nif_pid;
2725     ErlNifMonitor nif_monitor;
2726     ErtsResourceMonitors* rmp;
2727     Uint mrefc, brefc;
2728     int active, is_dying;
2729 
2730     ASSERT(tmon->type == ERTS_MON_TYPE_RESOURCE);
2731     ASSERT(erts_monitor_is_target(tmon));
2732 
2733     resource = tmon->other.ptr;
2734     bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2735     rmp = resource->monitors;
2736 
2737     mdp = erts_monitor_to_data(tmon);
2738     omon = &mdp->origin;
2739 
2740     ASSERT(rmp);
2741     ASSERT(resource->type->fn.down);
2742 
2743     erts_mtx_lock(&rmp->lock);
2744 
2745     mrefc = rmon_refc_dec_read(rmp);
2746     is_dying = rmon_is_dying(rmp);
2747     active = !is_dying && erts_monitor_is_in_table(omon);
2748 
2749     if (active) {
2750         erts_monitor_tree_delete(&rmp->root, omon);
2751         brefc = (Uint) erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0);
2752     }
2753 
2754     erts_mtx_unlock(&rmp->lock);
2755 
2756     if (!active) {
2757         if (is_dying && mrefc == 0) {
2758             ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0);
2759             erts_bin_free(&bin->binary);
2760         }
2761         erts_monitor_release(tmon);
2762     }
2763     else {
2764         if (brefc > 0) {
2765             ASSERT(is_internal_pid(omon->other.item));
2766             erts_ref_to_driver_monitor(mdp->ref, &nif_monitor);
2767             nif_pid.pid = omon->other.item;
2768             pre_nif_noproc(&msg_env, resource->type->owner, NULL);
2769             resource->type->fn.down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
2770             post_nif_noproc(&msg_env);
2771 
2772             erts_bin_release(&bin->binary);
2773         }
2774 
2775         erts_monitor_release_both(mdp);
2776     }
2777 }
2778 
enif_alloc_resource(ErlNifResourceType * type,size_t data_sz)2779 void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
2780 {
2781     size_t magic_sz = offsetof(ErtsResource,data);
2782     Binary* bin;
2783     ErtsResource* resource;
2784     size_t monitors_offs;
2785 
2786     if (type->fn.down) {
2787         /* Put ErtsResourceMonitors after user data and properly aligned */
2788         monitors_offs = ((data_sz + ERTS_ALLOC_ALIGN_BYTES - 1)
2789                          & ~((size_t)ERTS_ALLOC_ALIGN_BYTES - 1));
2790         magic_sz += monitors_offs + sizeof(ErtsResourceMonitors);
2791     }
2792     else {
2793         ERTS_UNDEF(monitors_offs, 0);
2794         magic_sz += data_sz;
2795     }
2796     bin = erts_create_magic_binary_x(magic_sz, NIF_RESOURCE_DTOR,
2797                                      ERTS_ALC_T_BINARY,
2798                                      1); /* unaligned */
2799     resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
2800 
2801     ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
2802     resource->type = type;
2803     erts_refc_inc(&bin->intern.refc, 1);
2804 #ifdef DEBUG
2805     erts_refc_init(&resource->nif_refc, 1);
2806 #endif
2807     erts_refc_inc(&resource->type->refc, 2);
2808     if (type->fn.down) {
2809         resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
2810         erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
2811             ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
2812         resource->monitors->root = NULL;
2813         resource->monitors->refc = 0;
2814         resource->monitors->user_data_sz = data_sz;
2815     }
2816     else {
2817         resource->monitors = NULL;
2818     }
2819     return resource->data;
2820 }
2821 
enif_release_resource(void * obj)2822 void enif_release_resource(void* obj)
2823 {
2824     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2825     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2826 
2827     ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
2828     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
2829 #ifdef DEBUG
2830     erts_refc_dec(&resource->nif_refc, 0);
2831 #endif
2832     erts_bin_release(&bin->binary);
2833 }
2834 
enif_keep_resource(void * obj)2835 void enif_keep_resource(void* obj)
2836 {
2837     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2838     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2839 
2840     ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
2841     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
2842 #ifdef DEBUG
2843     erts_refc_inc(&resource->nif_refc, 1);
2844 #endif
2845     erts_refc_inc(&bin->binary.intern.refc, 2);
2846 }
2847 
erts_bld_resource_ref(Eterm ** hpp,ErlOffHeap * oh,ErtsResource * resource)2848 Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource)
2849 {
2850     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2851     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
2852     return erts_mk_magic_ref(hpp, oh, &bin->binary);
2853 }
2854 
enif_make_resource(ErlNifEnv * env,void * obj)2855 ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
2856 {
2857     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2858     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2859     Eterm* hp = alloc_heap(env, ERTS_MAGIC_REF_THING_SIZE);
2860     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
2861     return erts_mk_magic_ref(&hp, &MSO(env->proc), &bin->binary);
2862 }
2863 
enif_make_resource_binary(ErlNifEnv * env,void * obj,const void * data,size_t size)2864 ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
2865 				       const void* data, size_t size)
2866 {
2867     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2868     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
2869     ErlOffHeap *ohp = &MSO(env->proc);
2870     Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
2871     ProcBin* pb = (ProcBin *) hp;
2872 
2873     pb->thing_word = HEADER_PROC_BIN;
2874     pb->size = size;
2875     pb->next = ohp->first;
2876     ohp->first = (struct erl_off_heap_header*) pb;
2877     pb->val = &bin->binary;
2878     pb->bytes = (byte*) data;
2879     pb->flags = 0;
2880 
2881     OH_OVERHEAD(ohp, size / sizeof(Eterm));
2882     erts_refc_inc(&bin->binary.intern.refc, 1);
2883 
2884     return make_binary(hp);
2885 }
2886 
enif_get_resource(ErlNifEnv * env,ERL_NIF_TERM term,ErlNifResourceType * type,void ** objp)2887 int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type,
2888 		      void** objp)
2889 {
2890     Binary* mbin;
2891     ErtsResource* resource;
2892     if (is_internal_magic_ref(term))
2893 	mbin = erts_magic_ref2bin(term);
2894     else {
2895         Eterm *hp;
2896         if (!is_binary(term))
2897             return 0;
2898         hp = binary_val(term);
2899         if (thing_subtag(*hp) != REFC_BINARY_SUBTAG)
2900             return 0;
2901         /*
2902         if (((ProcBin *) hp)->size != 0) {
2903             return 0; / * Or should we allow "resource binaries" as handles? * /
2904         }
2905         */
2906         mbin = ((ProcBin *) hp)->val;
2907         if (!(mbin->intern.flags & BIN_FLAG_MAGIC))
2908             return 0;
2909     }
2910     resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
2911     if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != NIF_RESOURCE_DTOR
2912 	|| resource->type != type) {
2913 	return 0;
2914     }
2915     *objp = resource->data;
2916     return 1;
2917 }
2918 
enif_sizeof_resource(void * obj)2919 size_t enif_sizeof_resource(void* obj)
2920 {
2921     ErtsResource* resource = DATA_TO_RESOURCE(obj);
2922     if (resource->monitors) {
2923         return resource->monitors->user_data_sz;
2924     }
2925     else {
2926         Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
2927         return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErtsResource,data);
2928     }
2929 }
2930 
enif_dynamic_resource_call(ErlNifEnv * caller_env,ERL_NIF_TERM rt_module_atom,ERL_NIF_TERM rt_name_atom,ERL_NIF_TERM resource_term,void * call_data)2931 int enif_dynamic_resource_call(ErlNifEnv* caller_env,
2932                                ERL_NIF_TERM rt_module_atom,
2933                                ERL_NIF_TERM rt_name_atom,
2934                                ERL_NIF_TERM resource_term,
2935                                void* call_data)
2936 {
2937     Binary* mbin;
2938     ErtsResource* resource;
2939     ErlNifResourceType* rt;
2940 
2941     if (!is_internal_magic_ref(resource_term))
2942         return 1;
2943 
2944     mbin = erts_magic_ref2bin(resource_term);
2945     resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
2946     if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != NIF_RESOURCE_DTOR)
2947         return 1;
2948     rt = resource->type;
2949     ASSERT(rt->owner);
2950     if (rt->module != rt_module_atom || rt->name != rt_name_atom
2951         || !rt->fn.dyncall) {
2952         return 1;
2953     }
2954 
2955     rt->fn.dyncall(caller_env, &resource->data, call_data);
2956     return 0;
2957 }
2958 
2959 
enif_dlopen(const char * lib,void (* err_handler)(void *,const char *),void * err_arg)2960 void* enif_dlopen(const char* lib,
2961 		  void (*err_handler)(void*,const char*), void* err_arg)
2962 {
2963     ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
2964     void* handle;
2965     void* init_func;
2966     if (erts_sys_ddll_open(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) {
2967 	if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) == ERL_DE_NO_ERROR) {
2968 	    erts_sys_ddll_call_nif_init(init_func);
2969 	}
2970     }
2971     else {
2972 	if (err_handler != NULL) {
2973 	    (*err_handler)(err_arg, errdesc.str);
2974 	}
2975 	handle = NULL;
2976     }
2977     erts_sys_ddll_free_error(&errdesc);
2978     return handle;
2979 }
2980 
enif_dlsym(void * handle,const char * symbol,void (* err_handler)(void *,const char *),void * err_arg)2981 void* enif_dlsym(void* handle, const char* symbol,
2982 		 void (*err_handler)(void*,const char*), void* err_arg)
2983 {
2984     ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
2985     void* ret;
2986     if (erts_sys_ddll_sym2(handle, symbol, &ret, &errdesc) != ERL_DE_NO_ERROR) {
2987 	if (err_handler != NULL) {
2988 	    (*err_handler)(err_arg, errdesc.str);
2989 	}
2990 	erts_sys_ddll_free_error(&errdesc);
2991 	return NULL;
2992     }
2993     return ret;
2994 }
2995 
enif_consume_timeslice(ErlNifEnv * env,int percent)2996 int enif_consume_timeslice(ErlNifEnv* env, int percent)
2997 {
2998     Process *proc;
2999     Sint reds;
3000     int sched;
3001 
3002     execution_state(env, &proc, &sched);
3003 
3004     if (sched < 0)
3005         return 0; /* no-op on dirty scheduler */
3006 
3007     ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100);
3008     if (percent < 1) percent = 1;
3009     else if (percent > 100) percent = 100;
3010 
3011     reds = ((CONTEXT_REDS+99) / 100) * percent;
3012     ASSERT(reds > 0 && reds <= CONTEXT_REDS);
3013     BUMP_REDS(proc, reds);
3014     return ERTS_BIF_REDS_LEFT(proc) == 0;
3015 }
3016 
3017 static ERTS_INLINE void
nfunc_cleanup_nif_mod(ErtsNativeFunc * ep)3018 nfunc_cleanup_nif_mod(ErtsNativeFunc *ep)
3019 {
3020     if (erts_refc_dectest(&ep->m->dynlib_refc, 0) == 0)
3021 	close_dynlib(ep->m);
3022     ep->m = NULL;
3023 }
3024 
3025 void
erts_nfunc_cleanup_nif_mod(ErtsNativeFunc * ep)3026 erts_nfunc_cleanup_nif_mod(ErtsNativeFunc *ep)
3027 {
3028     nfunc_cleanup_nif_mod(ep);
3029 }
3030 
3031 static ERTS_INLINE void
nfunc_restore(Process * c_p,ErtsNativeFunc * ep,Eterm res)3032 nfunc_restore(Process *c_p, ErtsNativeFunc *ep, Eterm res)
3033 {
3034     erts_nfunc_restore(c_p, ep, res);
3035     ASSERT(ep->m);
3036     nfunc_cleanup_nif_mod(ep);
3037 }
3038 
3039 
3040 
3041 /*
3042  * Finalize a dirty NIF call. This function is scheduled to cause the VM to
3043  * switch the process off a dirty scheduler thread and back onto a regular
3044  * scheduler thread, and then return the result from the dirty NIF. It also
3045  * restores the original NIF MFA when necessary based on the value of
3046  * ep->func set by execute_dirty_nif via init_nif_sched_data -- non-NULL
3047  * means restore, NULL means do not restore.
3048  */
3049 static ERL_NIF_TERM
dirty_nif_finalizer(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])3050 dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
3051 {
3052     Process* proc;
3053     ErtsNativeFunc* ep;
3054 
3055     execution_state(env, &proc, NULL);
3056 
3057     ASSERT(argc == 1);
3058     ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
3059     ep = (ErtsNativeFunc*) ERTS_PROC_GET_NFUNC_TRAP_WRAPPER(proc);
3060     ASSERT(ep);
3061     nfunc_restore(proc, ep, argv[0]);
3062     return argv[0];
3063 }
3064 
3065 /* Finalize a dirty NIF call that raised an exception.  Otherwise same as
3066  * the dirty_nif_finalizer() function.
3067  */
3068 static ERL_NIF_TERM
dirty_nif_exception(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])3069 dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
3070 {
3071     ERL_NIF_TERM ret;
3072     Process* proc;
3073     ErtsNativeFunc* ep;
3074     Eterm exception;
3075 
3076     execution_state(env, &proc, NULL);
3077 
3078     ASSERT(argc == 1);
3079     ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
3080     ep = (ErtsNativeFunc*) ERTS_PROC_GET_NFUNC_TRAP_WRAPPER(proc);
3081     ASSERT(ep);
3082     exception = argv[0]; /* argv overwritten by restore below... */
3083     nfunc_cleanup_nif_mod(ep);
3084     ret = enif_raise_exception(env, exception);
3085 
3086     /* Restore orig info for error and clear native func wrapper in
3087      * handle_error() */
3088     proc->freason |= EXF_RESTORE_NFUNC;
3089     return ret;
3090 }
3091 
3092 static ERTS_INLINE ERL_NIF_TERM
static_schedule_dirty_nif(ErlNifEnv * env,erts_aint32_t dirty_psflg,int argc,const ERL_NIF_TERM argv[])3093 static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
3094 			     int argc, const ERL_NIF_TERM argv[])
3095 {
3096     Process *proc;
3097     ErtsNativeFunc *ep;
3098     Eterm mod, func;
3099     NativeFunPtr fp;
3100 
3101     execution_state(env, &proc, NULL);
3102     ASSERT(proc);
3103 
3104     /*
3105      * Called in order to schedule statically determined
3106      * dirty NIF calls...
3107      *
3108      * Note that 'current' does not point into a ErtsNativeFunc
3109      * structure; only a structure with similar parts (located in code).
3110      */
3111 
3112     ep = ErtsContainerStruct(proc->current, ErtsNativeFunc, trampoline.info.mfa);
3113     mod = proc->current->module;
3114     func = proc->current->function;
3115     fp = (NativeFunPtr) ep->func;
3116 
3117     ASSERT(is_atom(mod) && is_atom(func));
3118     ASSERT(fp);
3119 
3120     (void) erts_atomic32_read_bset_nob(&proc->state,
3121 					   (ERTS_PSFLG_DIRTY_CPU_PROC
3122 					    | ERTS_PSFLG_DIRTY_IO_PROC),
3123 					   dirty_psflg);
3124 
3125     return schedule(env, fp, NULL, mod, func, argc, argv);
3126 }
3127 
3128 static ERL_NIF_TERM
static_schedule_dirty_io_nif(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])3129 static_schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
3130 {
3131     return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_IO_PROC, argc, argv);
3132 }
3133 
3134 static ERL_NIF_TERM
static_schedule_dirty_cpu_nif(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])3135 static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
3136 {
3137     return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
3138 }
3139 
3140 /*
3141  * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
3142  * calls the actual NIF, restores original NIF MFA if necessary, and
3143  * then returns the NIF result.
3144  */
3145 static ERL_NIF_TERM
execute_nif(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])3146 execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
3147 {
3148     Process* proc;
3149     NativeFunPtr fp;
3150     ErtsNativeFunc* ep;
3151     ERL_NIF_TERM result;
3152 
3153     execution_state(env, &proc, NULL);
3154     ASSERT(proc);
3155 
3156     ep = ErtsContainerStruct(proc->current, ErtsNativeFunc, trampoline.info.mfa);
3157     fp = ep->func;
3158     ASSERT(ep);
3159     ASSERT(!env->exception_thrown);
3160 
3161     fp = (NativeFunPtr) ep->func;
3162 
3163 #ifdef DEBUG
3164     ep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
3165 #endif
3166 
3167     result = (*fp)(env, argc, argv);
3168 
3169     ASSERT(ep == ERTS_PROC_GET_NFUNC_TRAP_WRAPPER(proc));
3170 
3171     if (is_value(result) || proc->freason != TRAP) {
3172 	/* Done (not rescheduled)... */
3173 	ASSERT(ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER);
3174 	if (!env->exception_thrown)
3175 	    nfunc_restore(proc, ep, result);
3176 	else {
3177 	    nfunc_cleanup_nif_mod(ep);
3178 	    /*
3179 	     * Restore orig info for error and clear nif
3180 	     * export in handle_error()
3181 	     */
3182 	    proc->freason |= EXF_RESTORE_NFUNC;
3183 	}
3184     }
3185 
3186 #ifdef DEBUG
3187     if (ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
3188 	ep->func = NULL;
3189 #endif
3190 
3191     return result;
3192 }
3193 
3194 ERL_NIF_TERM
enif_schedule_nif(ErlNifEnv * env,const char * fun_name,int flags,ERL_NIF_TERM (* fp)(ErlNifEnv *,int,const ERL_NIF_TERM[]),int argc,const ERL_NIF_TERM argv[])3195 enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
3196 		  ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
3197 		  int argc, const ERL_NIF_TERM argv[])
3198 {
3199     Process* proc;
3200     ERL_NIF_TERM fun_name_atom, result;
3201     int scheduler;
3202 
3203     if (argc > MAX_ARG)
3204 	return enif_make_badarg(env);
3205     fun_name_atom = enif_make_atom(env, fun_name);
3206     if (enif_is_exception(env, fun_name_atom))
3207 	return fun_name_atom;
3208 
3209     execution_state(env, &proc, &scheduler);
3210     ASSERT(proc);
3211 
3212     if (scheduler <= 0) {
3213 	if (scheduler == 0)
3214 	    enif_make_badarg(env);
3215 	erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
3216     }
3217 
3218     if (flags == 0)
3219 	result = schedule(env, execute_nif, fp, proc->current->module,
3220 			  fun_name_atom, argc, argv);
3221     else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
3222         (void) erts_atomic32_read_bset_nob(&proc->state,
3223                                            (ERTS_PSFLG_DIRTY_CPU_PROC
3224                                             | ERTS_PSFLG_DIRTY_IO_PROC),
3225                                            (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND
3226                                             ? ERTS_PSFLG_DIRTY_CPU_PROC
3227                                             : ERTS_PSFLG_DIRTY_IO_PROC));
3228         result = schedule(env, fp, NULL, proc->current->module, fun_name_atom,
3229                           argc, argv);
3230     }
3231     else
3232 	result = enif_make_badarg(env);
3233 
3234     if (scheduler < 0)
3235 	erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
3236 
3237     return result;
3238 }
3239 
3240 int
enif_thread_type(void)3241 enif_thread_type(void)
3242 {
3243     ErtsSchedulerData *esdp = erts_get_scheduler_data();
3244 
3245     if (!esdp)
3246 	return ERL_NIF_THR_UNDEFINED;
3247 
3248     switch (esdp->type) {
3249     case ERTS_SCHED_NORMAL:
3250 	return ERL_NIF_THR_NORMAL_SCHEDULER;
3251     case ERTS_SCHED_DIRTY_CPU:
3252 	return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
3253     case ERTS_SCHED_DIRTY_IO:
3254         return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
3255     default:
3256         ERTS_INTERNAL_ERROR("Invalid scheduler type");
3257 	return -1;
3258     }
3259 }
3260 
3261 /* Maps */
3262 
enif_is_map(ErlNifEnv * env,ERL_NIF_TERM term)3263 int enif_is_map(ErlNifEnv* env, ERL_NIF_TERM term)
3264 {
3265     return is_map(term);
3266 }
3267 
enif_get_map_size(ErlNifEnv * env,ERL_NIF_TERM term,size_t * size)3268 int enif_get_map_size(ErlNifEnv* env, ERL_NIF_TERM term, size_t *size)
3269 {
3270     if (is_flatmap(term)) {
3271 	flatmap_t *mp;
3272 	mp    = (flatmap_t*)flatmap_val(term);
3273 	*size = flatmap_get_size(mp);
3274 	return 1;
3275     }
3276     else if (is_hashmap(term)) {
3277         *size = hashmap_size(term);
3278         return 1;
3279     }
3280     return 0;
3281 }
3282 
enif_make_new_map(ErlNifEnv * env)3283 ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env)
3284 {
3285     Eterm* hp = alloc_heap(env,MAP_HEADER_FLATMAP_SZ+1);
3286     Eterm tup;
3287     flatmap_t *mp;
3288 
3289     tup   = make_tuple(hp);
3290     *hp++ = make_arityval(0);
3291     mp    = (flatmap_t*)hp;
3292     mp->thing_word = MAP_HEADER_FLATMAP;
3293     mp->size = 0;
3294     mp->keys = tup;
3295 
3296     return make_flatmap(mp);
3297 }
3298 
enif_make_map_from_arrays(ErlNifEnv * env,ERL_NIF_TERM keys[],ERL_NIF_TERM values[],size_t cnt,ERL_NIF_TERM * map_out)3299 int enif_make_map_from_arrays(ErlNifEnv *env,
3300                               ERL_NIF_TERM keys[],
3301                               ERL_NIF_TERM values[],
3302                               size_t cnt,
3303                               ERL_NIF_TERM *map_out)
3304 {
3305     ErtsHeapFactory factory;
3306     int succeeded;
3307 
3308 #ifdef ERTS_NIF_ASSERT_IN_ENV
3309     size_t index = 0;
3310 
3311     while (index < cnt) {
3312         ASSERT_IN_ENV(env, keys[index], index, "key");
3313         ASSERT_IN_ENV(env, values[index], index, "value");
3314         index++;
3315     }
3316 #endif
3317 
3318     flush_env(env);
3319 
3320     erts_factory_proc_prealloc_init(&factory, env->proc,
3321         cnt * 2 + MAP_HEADER_FLATMAP_SZ + 1);
3322 
3323     (*map_out) = erts_map_from_ks_and_vs(&factory, keys, values, cnt);
3324     succeeded = (*map_out) != THE_NON_VALUE;
3325 
3326     if (!succeeded) {
3327         erts_factory_undo(&factory);
3328     }
3329 
3330     erts_factory_close(&factory);
3331 
3332     cache_env(env);
3333 
3334     return succeeded;
3335 }
3336 
enif_make_map_put(ErlNifEnv * env,Eterm map_in,Eterm key,Eterm value,Eterm * map_out)3337 int enif_make_map_put(ErlNifEnv* env,
3338 	              Eterm map_in,
3339 		      Eterm key,
3340 		      Eterm value,
3341 		      Eterm *map_out)
3342 {
3343     if (!is_map(map_in)) {
3344 	return 0;
3345     }
3346     ASSERT_IN_ENV(env, map_in, 0, "old map");
3347     ASSERT_IN_ENV(env, key, 0, "key");
3348     ASSERT_IN_ENV(env, value, 0, "value");
3349 
3350     flush_env(env);
3351     *map_out = erts_maps_put(env->proc, key, value, map_in);
3352     cache_env(env);
3353     return 1;
3354 }
3355 
enif_get_map_value(ErlNifEnv * env,Eterm map,Eterm key,Eterm * value)3356 int enif_get_map_value(ErlNifEnv* env,
3357 	               Eterm map,
3358 		       Eterm key,
3359 		       Eterm *value)
3360 {
3361     const Eterm *ret;
3362     if (!is_map(map)) {
3363 	return 0;
3364     }
3365     ret = erts_maps_get(key, map);
3366     if (ret) {
3367         *value = *ret;
3368         return 1;
3369     }
3370     return 0;
3371 }
3372 
enif_make_map_update(ErlNifEnv * env,Eterm map_in,Eterm key,Eterm value,Eterm * map_out)3373 int enif_make_map_update(ErlNifEnv* env,
3374 	                 Eterm map_in,
3375 			 Eterm key,
3376 			 Eterm value,
3377 			 Eterm *map_out)
3378 {
3379     int res;
3380     if (!is_map(map_in)) {
3381 	return 0;
3382     }
3383 
3384     ASSERT_IN_ENV(env, map_in, 0, "old map");
3385     ASSERT_IN_ENV(env, key, 0, "key");
3386     ASSERT_IN_ENV(env, value, 0, "value");
3387 
3388     flush_env(env);
3389     res = erts_maps_update(env->proc, key, value, map_in, map_out);
3390     cache_env(env);
3391     return res;
3392 }
3393 
enif_make_map_remove(ErlNifEnv * env,Eterm map_in,Eterm key,Eterm * map_out)3394 int enif_make_map_remove(ErlNifEnv* env,
3395 	                 Eterm map_in,
3396 			 Eterm key,
3397 			 Eterm *map_out)
3398 {
3399     if (!is_map(map_in)) {
3400 	return 0;
3401     }
3402     flush_env(env);
3403     (void) erts_maps_take(env->proc, key, map_in, map_out, NULL);
3404     cache_env(env);
3405     return 1;
3406 }
3407 
enif_map_iterator_create(ErlNifEnv * env,Eterm map,ErlNifMapIterator * iter,ErlNifMapIteratorEntry entry)3408 int enif_map_iterator_create(ErlNifEnv *env,
3409 	                     Eterm map,
3410 			     ErlNifMapIterator *iter,
3411 			     ErlNifMapIteratorEntry entry)
3412 {
3413     if (is_flatmap(map)) {
3414 	flatmap_t *mp = (flatmap_t*)flatmap_val(map);
3415 	size_t offset;
3416 
3417 	switch (entry) {
3418 	    case ERL_NIF_MAP_ITERATOR_FIRST: offset = 0; break;
3419 	    case ERL_NIF_MAP_ITERATOR_LAST: offset = flatmap_get_size(mp) - 1; break;
3420 	    default: goto error;
3421 	}
3422 
3423 	/* empty maps are ok but will leave the iterator
3424 	 * in bad shape.
3425 	 */
3426 
3427 	iter->map     = map;
3428 	iter->u.flat.ks = ((Eterm *)flatmap_get_keys(mp)) + offset;
3429 	iter->u.flat.vs = ((Eterm *)flatmap_get_values(mp)) + offset;
3430 	iter->size    = flatmap_get_size(mp);
3431 	iter->idx     = offset + 1;
3432 
3433 	return 1;
3434     }
3435     else if (is_hashmap(map)) {
3436         iter->map = map;
3437         iter->size = hashmap_size(map);
3438         iter->u.hash.wstack = erts_alloc(ERTS_ALC_T_NIF, sizeof(ErtsDynamicWStack));
3439         WSTACK_INIT(iter->u.hash.wstack, ERTS_ALC_T_NIF);
3440 
3441         switch (entry) {
3442 	    case ERL_NIF_MAP_ITERATOR_FIRST:
3443                 iter->idx = 1;
3444                 hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 0);
3445                 iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws);
3446                 break;
3447 	    case ERL_NIF_MAP_ITERATOR_LAST:
3448                 iter->idx = hashmap_size(map);
3449                 hashmap_iterator_init(&iter->u.hash.wstack->ws, map, 1);
3450                 iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws);
3451                 break;
3452 	    default:
3453                 goto error;
3454 	}
3455         ASSERT(!!iter->u.hash.kv == (iter->idx >= 1 &&
3456                                      iter->idx <= iter->size));
3457         return 1;
3458     }
3459 error:
3460 #ifdef DEBUG
3461     iter->map = THE_NON_VALUE;
3462 #endif
3463     return 0;
3464 }
3465 
enif_map_iterator_destroy(ErlNifEnv * env,ErlNifMapIterator * iter)3466 void enif_map_iterator_destroy(ErlNifEnv *env, ErlNifMapIterator *iter)
3467 {
3468     if (is_hashmap(iter->map)) {
3469         WSTACK_DESTROY(iter->u.hash.wstack->ws);
3470         erts_free(ERTS_ALC_T_NIF, iter->u.hash.wstack);
3471     }
3472     else
3473         ASSERT(is_flatmap(iter->map));
3474 
3475 #ifdef DEBUG
3476     iter->map = THE_NON_VALUE;
3477 #endif
3478 }
3479 
enif_map_iterator_is_tail(ErlNifEnv * env,ErlNifMapIterator * iter)3480 int enif_map_iterator_is_tail(ErlNifEnv *env, ErlNifMapIterator *iter)
3481 {
3482     ASSERT(iter);
3483     if (is_flatmap(iter->map)) {
3484         ASSERT(iter->idx >= 0);
3485         ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1);
3486         return (iter->size == 0 || iter->idx > iter->size);
3487     }
3488     else {
3489         ASSERT(is_hashmap(iter->map));
3490         return iter->idx > iter->size;
3491     }
3492 }
3493 
enif_map_iterator_is_head(ErlNifEnv * env,ErlNifMapIterator * iter)3494 int enif_map_iterator_is_head(ErlNifEnv *env, ErlNifMapIterator *iter)
3495 {
3496     ASSERT(iter);
3497     if (is_flatmap(iter->map)) {
3498         ASSERT(iter->idx >= 0);
3499         ASSERT(iter->idx <= flatmap_get_size(flatmap_val(iter->map)) + 1);
3500         return (iter->size == 0 || iter->idx == 0);
3501     }
3502     else {
3503         ASSERT(is_hashmap(iter->map));
3504         return iter->idx == 0;
3505     }
3506 }
3507 
3508 
enif_map_iterator_next(ErlNifEnv * env,ErlNifMapIterator * iter)3509 int enif_map_iterator_next(ErlNifEnv *env, ErlNifMapIterator *iter)
3510 {
3511     ASSERT(iter);
3512     if (is_flatmap(iter->map)) {
3513         if (iter->idx <= iter->size) {
3514             iter->idx++;
3515             iter->u.flat.ks++;
3516             iter->u.flat.vs++;
3517         }
3518         return (iter->idx <= iter->size);
3519     }
3520     else {
3521         ASSERT(is_hashmap(iter->map));
3522 
3523         if (iter->idx <= hashmap_size(iter->map)) {
3524             if (iter->idx < 1) {
3525                 hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 0);
3526             }
3527             iter->u.hash.kv = hashmap_iterator_next(&iter->u.hash.wstack->ws);
3528             iter->idx++;
3529             ASSERT(!!iter->u.hash.kv == (iter->idx <= iter->size));
3530         }
3531         return iter->idx <= iter->size;
3532     }
3533 }
3534 
enif_map_iterator_prev(ErlNifEnv * env,ErlNifMapIterator * iter)3535 int enif_map_iterator_prev(ErlNifEnv *env, ErlNifMapIterator *iter)
3536 {
3537     ASSERT(iter);
3538     if (is_flatmap(iter->map)) {
3539         if (iter->idx > 0) {
3540             iter->idx--;
3541             iter->u.flat.ks--;
3542             iter->u.flat.vs--;
3543         }
3544         return iter->idx > 0;
3545     }
3546     else {
3547         ASSERT(is_hashmap(iter->map));
3548 
3549         if (iter->idx > 0) {
3550             if (iter->idx > iter->size) {
3551                 hashmap_iterator_init(&iter->u.hash.wstack->ws, iter->map, 1);
3552             }
3553             iter->u.hash.kv = hashmap_iterator_prev(&iter->u.hash.wstack->ws);
3554             iter->idx--;
3555             ASSERT(!!iter->u.hash.kv == (iter->idx > 0));
3556         }
3557         return iter->idx > 0;
3558     }
3559 }
3560 
enif_map_iterator_get_pair(ErlNifEnv * env,ErlNifMapIterator * iter,Eterm * key,Eterm * value)3561 int enif_map_iterator_get_pair(ErlNifEnv *env,
3562 			       ErlNifMapIterator *iter,
3563 			       Eterm *key,
3564 			       Eterm *value)
3565 {
3566     ASSERT(iter);
3567     if (is_flatmap(iter->map)) {
3568         if (iter->idx > 0 && iter->idx <= iter->size) {
3569             ASSERT(iter->u.flat.ks >= flatmap_get_keys(flatmap_val(iter->map)) &&
3570                    iter->u.flat.ks  < (flatmap_get_keys(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map))));
3571             ASSERT(iter->u.flat.vs >= flatmap_get_values(flatmap_val(iter->map)) &&
3572                    iter->u.flat.vs  < (flatmap_get_values(flatmap_val(iter->map)) + flatmap_get_size(flatmap_val(iter->map))));
3573             *key   = *(iter->u.flat.ks);
3574             *value = *(iter->u.flat.vs);
3575             return 1;
3576         }
3577     }
3578     else {
3579         ASSERT(is_hashmap(iter->map));
3580         if (iter->idx > 0 && iter->idx <= iter->size) {
3581             *key   = CAR(iter->u.hash.kv);
3582             *value = CDR(iter->u.hash.kv);
3583             return 1;
3584         }
3585     }
3586     return 0;
3587 }
3588 
enif_monitor_process(ErlNifEnv * env,void * obj,const ErlNifPid * target_pid,ErlNifMonitor * monitor)3589 int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
3590                          ErlNifMonitor* monitor)
3591 {
3592     ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
3593     Eterm tmp[ERTS_REF_THING_SIZE];
3594     Eterm ref;
3595     ErtsResourceMonitors *rm;
3596     ErtsMonitorData *mdp;
3597 
3598     ASSERT(ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->magic_binary.destructor
3599            == NIF_RESOURCE_DTOR);
3600     ASSERT(erts_refc_read(&ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->binary.intern.refc, 0) != 0);
3601     ASSERT(!rsrc->monitors == !rsrc->type->fn.down);
3602 
3603     rm = rsrc->monitors;
3604     if (!rm) {
3605         ASSERT(!rsrc->type->fn.down);
3606         return -1;
3607     }
3608     ASSERT(rsrc->type->fn.down);
3609 
3610     if (target_pid->pid == am_undefined)
3611         return 1;
3612 
3613     ref = erts_make_ref_in_buffer(tmp);
3614 
3615     mdp = erts_monitor_create(ERTS_MON_TYPE_RESOURCE, ref,
3616                               (Eterm) rsrc, target_pid->pid,
3617                               NIL, THE_NON_VALUE);
3618     erts_mtx_lock(&rm->lock);
3619     ASSERT(!rmon_is_dying(rm));
3620     erts_monitor_tree_insert(&rm->root, &mdp->origin);
3621     rmon_refc_inc(rm);
3622     erts_mtx_unlock(&rm->lock);
3623 
3624     if (!erts_proc_sig_send_monitor(&mdp->u.target, target_pid->pid)) {
3625         /* Failed to send monitor signal; cleanup... */
3626 #ifdef DEBUG
3627         ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
3628 #endif
3629 
3630         erts_mtx_lock(&rm->lock);
3631         ASSERT(!rmon_is_dying(rm));
3632         erts_monitor_tree_delete(&rm->root, &mdp->origin);
3633         rmon_refc_dec(rm);
3634         ASSERT(erts_refc_read(&bin->binary.intern.refc, 1) != 0);
3635         erts_mtx_unlock(&rm->lock);
3636         erts_monitor_release_both(mdp);
3637 
3638         return 1;
3639     }
3640 
3641     if (monitor)
3642         erts_ref_to_driver_monitor(ref,monitor);
3643 
3644     return 0;
3645 }
3646 
enif_make_monitor_term(ErlNifEnv * env,const ErlNifMonitor * monitor)3647 ERL_NIF_TERM enif_make_monitor_term(ErlNifEnv* env, const ErlNifMonitor* monitor)
3648 {
3649     Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
3650     return erts_driver_monitor_to_ref(hp, monitor);
3651 }
3652 
enif_demonitor_process(ErlNifEnv * env,void * obj,const ErlNifMonitor * monitor)3653 int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor)
3654 {
3655     ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
3656 #ifdef DEBUG
3657     ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
3658 #endif
3659     ErtsResourceMonitors *rm;
3660     ErtsMonitor *mon;
3661     Eterm ref_heap[ERTS_REF_THING_SIZE];
3662     Eterm ref;
3663 
3664     ASSERT(bin->magic_binary.destructor == NIF_RESOURCE_DTOR);
3665     ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
3666 
3667     ref = erts_driver_monitor_to_ref(ref_heap, monitor);
3668 
3669     rm = rsrc->monitors;
3670     erts_mtx_lock(&rm->lock);
3671     ASSERT(!rmon_is_dying(rm));
3672     mon = erts_monitor_tree_lookup(rm->root, ref);
3673     if (mon)
3674         erts_monitor_tree_delete(&rm->root, mon);
3675     erts_mtx_unlock(&rm->lock);
3676 
3677     if (!mon)
3678         return 1;
3679 
3680     ASSERT(erts_monitor_is_origin(mon));
3681     ASSERT(is_internal_pid(mon->other.item));
3682 
3683     erts_proc_sig_send_demonitor(mon);
3684 
3685     return 0;
3686 }
3687 
enif_compare_monitors(const ErlNifMonitor * monitor1,const ErlNifMonitor * monitor2)3688 int enif_compare_monitors(const ErlNifMonitor *monitor1,
3689                           const ErlNifMonitor *monitor2)
3690 {
3691     return sys_memcmp((void *) monitor1, (void *) monitor2,
3692                       ERTS_REF_THING_SIZE*sizeof(Eterm));
3693 }
3694 
enif_ioq_create(ErlNifIOQueueOpts opts)3695 ErlNifIOQueue *enif_ioq_create(ErlNifIOQueueOpts opts)
3696 {
3697     ErlNifIOQueue *q;
3698 
3699     if (opts != ERL_NIF_IOQ_NORMAL)
3700         return NULL;
3701 
3702     q = enif_alloc(sizeof(ErlNifIOQueue));
3703     if (!q) return NULL;
3704     erts_ioq_init(q, ERTS_ALC_T_NIF, 0);
3705 
3706     return q;
3707 }
3708 
enif_ioq_destroy(ErlNifIOQueue * q)3709 void enif_ioq_destroy(ErlNifIOQueue *q)
3710 {
3711     erts_ioq_clear(q);
3712     enif_free(q);
3713 }
3714 
3715 /* If the iovec was preallocated (Stack or otherwise) it needs to be marked as
3716  * such to perform a proper free. */
3717 #define ERL_NIF_IOVEC_FLAGS_PREALLOC (1 << 0)
3718 
enif_free_iovec(ErlNifIOVec * iov)3719 void enif_free_iovec(ErlNifIOVec *iov)
3720 {
3721     int i;
3722     /* Decrement the refc of all the binaries */
3723     for (i = 0; i < iov->iovcnt; i++) {
3724         Binary *bptr = ((Binary**)iov->ref_bins)[i];
3725         /* bptr can be null if enq_binary was used */
3726         if (bptr && erts_refc_dectest(&bptr->intern.refc, 0) == 0) {
3727             erts_bin_free(bptr);
3728         }
3729     }
3730 
3731     if (!(iov->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
3732         enif_free(iov);
3733     }
3734 }
3735 
3736 typedef struct {
3737     UWord sublist_length;
3738     Eterm sublist_start;
3739     Eterm sublist_end;
3740 
3741     UWord referenced_size;
3742     UWord copied_size;
3743 
3744     UWord iovec_len;
3745 } iovec_slice_t;
3746 
examine_iovec_term(Eterm list,UWord max_length,iovec_slice_t * result)3747 static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *result) {
3748     Eterm lookahead;
3749 
3750     result->sublist_start = list;
3751     result->sublist_length = 0;
3752     result->referenced_size = 0;
3753     result->copied_size = 0;
3754     result->iovec_len = 0;
3755 
3756     lookahead = result->sublist_start;
3757 
3758     while (is_list(lookahead)) {
3759         UWord byte_size;
3760         Eterm binary;
3761         Eterm *cell;
3762 
3763         cell = list_val(lookahead);
3764         binary = CAR(cell);
3765 
3766         if (!is_binary(binary)) {
3767             return 0;
3768         }
3769 
3770         byte_size = binary_size(binary);
3771 
3772         if (byte_size > 0) {
3773             int bit_offset, bit_size;
3774             Eterm parent_binary;
3775             UWord byte_offset;
3776 
3777             int requires_copying;
3778 
3779             ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset,
3780                 bit_offset, bit_size);
3781 
3782             (void)byte_offset;
3783 
3784             if (bit_size != 0) {
3785                 return 0;
3786             }
3787 
3788             /* If we're unaligned or an on-heap binary we'll need to copy
3789              * ourselves over to a temporary buffer. */
3790             requires_copying = (bit_offset != 0) ||
3791                 thing_subtag(*binary_val(parent_binary)) == HEAP_BINARY_SUBTAG;
3792 
3793             if (requires_copying) {
3794                 result->copied_size += byte_size;
3795             } else {
3796                 result->referenced_size += byte_size;
3797             }
3798 
3799             result->iovec_len += 1 + byte_size / MAX_SYSIOVEC_IOVLEN;
3800         }
3801 
3802         result->sublist_length += 1;
3803         lookahead = CDR(cell);
3804 
3805         if (result->sublist_length >= max_length) {
3806             break;
3807         }
3808     }
3809 
3810     if (!is_nil(lookahead) && !is_list(lookahead)) {
3811         return 0;
3812     }
3813 
3814     result->sublist_end = lookahead;
3815 
3816     return 1;
3817 }
3818 
marshal_iovec_binary(Eterm binary,ErlNifBinary * copy_buffer,UWord * copy_offset,ErlNifBinary * result)3819 static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer,
3820         UWord *copy_offset, ErlNifBinary *result) {
3821 
3822     Eterm *parent_header;
3823     Eterm parent_binary;
3824 
3825     int bit_offset, bit_size;
3826     Uint byte_offset;
3827 
3828     ASSERT(is_binary(binary));
3829 
3830     ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size);
3831 
3832     ASSERT(bit_size == 0);
3833 
3834     parent_header = binary_val(parent_binary);
3835 
3836     result->size = binary_size(binary);
3837 
3838     if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
3839         ProcBin *pb = (ProcBin*)parent_header;
3840 
3841         if (pb->flags & (PB_IS_WRITABLE | PB_ACTIVE_WRITER)) {
3842             erts_emasculate_writable_binary(pb);
3843         }
3844 
3845         ASSERT(pb->val != NULL);
3846         ASSERT(byte_offset < pb->size);
3847         ASSERT(&pb->bytes[byte_offset] >= (byte*)(pb->val)->orig_bytes);
3848 
3849         result->data = (unsigned char*)&pb->bytes[byte_offset];
3850         result->ref_bin = (void*)pb->val;
3851     } else {
3852         ErlHeapBin *hb = (ErlHeapBin*)parent_header;
3853 
3854         ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
3855 
3856         result->data = &((unsigned char*)&hb->data)[byte_offset];
3857         result->ref_bin = NULL;
3858     }
3859 
3860     /* If this isn't an *aligned* refc binary, copy its contents to the buffer
3861      * and reference that instead. */
3862 
3863     if (result->ref_bin == NULL || bit_offset != 0) {
3864         ASSERT(copy_buffer->ref_bin != NULL && copy_buffer->data != NULL);
3865         ASSERT(result->size <= (copy_buffer->size - *copy_offset));
3866 
3867         if (bit_offset == 0) {
3868             sys_memcpy(&copy_buffer->data[*copy_offset],
3869                 result->data, result->size);
3870         } else {
3871             erts_copy_bits(result->data, bit_offset, 1,
3872                 (byte*)&copy_buffer->data[*copy_offset], 0, 1,
3873                 result->size * 8);
3874         }
3875 
3876         result->data = &copy_buffer->data[*copy_offset];
3877         result->ref_bin = copy_buffer->ref_bin;
3878 
3879         *copy_offset += result->size;
3880     }
3881 }
3882 
fill_iovec_with_slice(ErlNifEnv * env,iovec_slice_t * slice,ErlNifIOVec * iovec)3883 static int fill_iovec_with_slice(ErlNifEnv *env,
3884                                  iovec_slice_t *slice,
3885                                  ErlNifIOVec *iovec) {
3886     ErlNifBinary copy_buffer = {0};
3887     UWord copy_offset, iovec_idx;
3888     Eterm sublist_iterator;
3889 
3890     /* Set up a common refc binary for all on-heap and unaligned binaries. */
3891     if (slice->copied_size > 0) {
3892         if (!enif_alloc_binary(slice->copied_size, &copy_buffer)) {
3893             return 0;
3894         }
3895 
3896         ASSERT(copy_buffer.ref_bin != NULL);
3897     }
3898 
3899     sublist_iterator = slice->sublist_start;
3900     copy_offset = 0;
3901     iovec_idx = 0;
3902 
3903     while (sublist_iterator != slice->sublist_end) {
3904         ErlNifBinary raw_data;
3905         Eterm *cell;
3906 
3907         cell = list_val(sublist_iterator);
3908         marshal_iovec_binary(CAR(cell), &copy_buffer, &copy_offset, &raw_data);
3909 
3910         while (raw_data.size > 0) {
3911             UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN);
3912 
3913             ASSERT(iovec_idx < iovec->iovcnt);
3914             ASSERT(raw_data.ref_bin != NULL);
3915 
3916             iovec->iov[iovec_idx].iov_base = raw_data.data;
3917             iovec->iov[iovec_idx].iov_len = chunk_len;
3918 
3919             iovec->ref_bins[iovec_idx] = raw_data.ref_bin;
3920 
3921             raw_data.data += chunk_len;
3922             raw_data.size -= chunk_len;
3923 
3924             iovec_idx += 1;
3925         }
3926 
3927         sublist_iterator = CDR(cell);
3928     }
3929 
3930     ASSERT(iovec_idx == iovec->iovcnt);
3931 
3932     if (env == NULL) {
3933         int i;
3934         for (i = 0; i < iovec->iovcnt; i++) {
3935             Binary *refc_binary = (Binary*)(iovec->ref_bins[i]);
3936             erts_refc_inc(&refc_binary->intern.refc, 1);
3937         }
3938 
3939         if (slice->copied_size > 0) {
3940             /* Transfer ownership to the iovec; we've taken references to it in
3941              * the above loop. */
3942             enif_release_binary(&copy_buffer);
3943         }
3944     } else {
3945         if (slice->copied_size > 0) {
3946             /* Attach the binary to our environment and let the next minor GC
3947              * get rid of it. This is slightly faster than using the tmp object
3948              * list since it avoids off-heap allocations. */
3949             erts_build_proc_bin(&MSO(env->proc),
3950                 alloc_heap(env, PROC_BIN_SIZE), copy_buffer.ref_bin);
3951         }
3952     }
3953 
3954     return 1;
3955 }
3956 
create_iovec_from_slice(ErlNifEnv * env,iovec_slice_t * slice,ErlNifIOVec ** result)3957 static int create_iovec_from_slice(ErlNifEnv *env,
3958                                    iovec_slice_t *slice,
3959                                    ErlNifIOVec **result) {
3960     ErlNifIOVec *iovec = *result;
3961 
3962     if (iovec && slice->iovec_len < ERL_NIF_IOVEC_SIZE) {
3963         iovec->iov = iovec->small_iov;
3964         iovec->ref_bins = iovec->small_ref_bin;
3965         iovec->flags = ERL_NIF_IOVEC_FLAGS_PREALLOC;
3966     } else {
3967         UWord iov_offset, binv_offset, alloc_size;
3968         char *alloc_base;
3969 
3970         iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlNifIOVec));
3971         binv_offset = iov_offset;
3972         binv_offset += ERTS_ALC_DATA_ALIGN_SIZE(slice->iovec_len * sizeof(SysIOVec));
3973         alloc_size = binv_offset;
3974         alloc_size += slice->iovec_len * sizeof(Binary*);
3975 
3976         /* When the user passes an environment, we attach the iovec to it so
3977          * the user won't have to bother managing it (similar to
3978          * enif_inspect_binary). It'll disappear once the environment is
3979          * cleaned up. */
3980         if (env != NULL) {
3981             alloc_base = alloc_tmp_obj(env, alloc_size, &tmp_alloc_dtor);
3982         } else {
3983             alloc_base = erts_alloc(ERTS_ALC_T_NIF, alloc_size);
3984         }
3985 
3986         iovec = (ErlNifIOVec*)alloc_base;
3987         iovec->iov = (SysIOVec*)(alloc_base + iov_offset);
3988         iovec->ref_bins = (void**)(alloc_base + binv_offset);
3989         iovec->flags = 0;
3990     }
3991 
3992     iovec->size = slice->referenced_size + slice->copied_size;
3993     iovec->iovcnt = slice->iovec_len;
3994 
3995     if(!fill_iovec_with_slice(env, slice, iovec)) {
3996         if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
3997             erts_free(ERTS_ALC_T_NIF, iovec);
3998         }
3999 
4000         return 0;
4001     }
4002 
4003     *result = iovec;
4004 
4005     return 1;
4006 }
4007 
enif_inspect_iovec(ErlNifEnv * env,size_t max_elements,ERL_NIF_TERM list,ERL_NIF_TERM * tail,ErlNifIOVec ** iov)4008 int enif_inspect_iovec(ErlNifEnv *env, size_t max_elements,
4009                        ERL_NIF_TERM list, ERL_NIF_TERM *tail,
4010                        ErlNifIOVec **iov) {
4011     iovec_slice_t slice;
4012 
4013     if(!examine_iovec_term(list, max_elements, &slice)) {
4014         return 0;
4015     } else if(!create_iovec_from_slice(env, &slice, iov)) {
4016         return 0;
4017     }
4018 
4019     (*tail) = slice.sublist_end;
4020 
4021     return 1;
4022 }
4023 
4024 /* */
enif_ioq_enqv(ErlNifIOQueue * q,ErlNifIOVec * iov,size_t skip)4025 int enif_ioq_enqv(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip)
4026 {
4027     if(skip <= iov->size) {
4028         return !erts_ioq_enqv(q, (ErtsIOVec*)iov, skip);
4029     }
4030 
4031     return 0;
4032 }
4033 
enif_ioq_enq_binary(ErlNifIOQueue * q,ErlNifBinary * bin,size_t skip)4034 int enif_ioq_enq_binary(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip)
4035 {
4036     ErlNifIOVec vec = {1, bin->size, NULL, NULL, ERL_NIF_IOVEC_FLAGS_PREALLOC };
4037     Binary *ref_bin = (Binary*)bin->ref_bin;
4038     int res;
4039     vec.iov = vec.small_iov;
4040     vec.ref_bins = vec.small_ref_bin;
4041     vec.iov[0].iov_base = bin->data;
4042     vec.iov[0].iov_len = bin->size;
4043     ((Binary**)(vec.ref_bins))[0] = ref_bin;
4044 
4045     res = enif_ioq_enqv(q, &vec, skip);
4046     enif_release_binary(bin);
4047     return res;
4048 }
4049 
enif_ioq_size(ErlNifIOQueue * q)4050 size_t enif_ioq_size(ErlNifIOQueue *q)
4051 {
4052     return erts_ioq_size(q);
4053 }
4054 
enif_ioq_deq(ErlNifIOQueue * q,size_t elems,size_t * size)4055 int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size)
4056 {
4057     if (erts_ioq_deq(q, elems) == -1)
4058         return 0;
4059     if (size)
4060         *size = erts_ioq_size(q);
4061     return 1;
4062 }
4063 
enif_ioq_peek_head(ErlNifEnv * env,ErlNifIOQueue * q,size_t * size,ERL_NIF_TERM * bin_term)4064 int enif_ioq_peek_head(ErlNifEnv *env, ErlNifIOQueue *q, size_t *size, ERL_NIF_TERM *bin_term) {
4065     SysIOVec *iov_entry;
4066     Binary *ref_bin;
4067 
4068     if (q->size == 0) {
4069         return 0;
4070     }
4071 
4072     ASSERT(q->b_head != q->b_tail && q->v_head != q->v_tail);
4073 
4074     ref_bin = &q->b_head[0]->nif;
4075     iov_entry = &q->v_head[0];
4076 
4077     if (size != NULL) {
4078         *size = iov_entry->iov_len;
4079     }
4080 
4081     if (iov_entry->iov_len > ERL_ONHEAP_BIN_LIMIT) {
4082         ProcBin *pb = (ProcBin*)alloc_heap(env, PROC_BIN_SIZE);
4083 
4084         pb->thing_word = HEADER_PROC_BIN;
4085         pb->next = MSO(env->proc).first;
4086         pb->val = ref_bin;
4087         pb->flags = 0;
4088 
4089         ASSERT((byte*)iov_entry->iov_base >= (byte*)ref_bin->orig_bytes);
4090         ASSERT(iov_entry->iov_len <= ref_bin->orig_size);
4091 
4092         pb->bytes = (byte*)iov_entry->iov_base;
4093         pb->size = iov_entry->iov_len;
4094 
4095         MSO(env->proc).first = (struct erl_off_heap_header*) pb;
4096         OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
4097 
4098         erts_refc_inc(&ref_bin->intern.refc, 2);
4099         *bin_term = make_binary(pb);
4100     } else {
4101         ErlHeapBin* hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(iov_entry->iov_len));
4102 
4103         hb->thing_word = header_heap_bin(iov_entry->iov_len);
4104         hb->size = iov_entry->iov_len;
4105 
4106         sys_memcpy(hb->data, iov_entry->iov_base, iov_entry->iov_len);
4107         *bin_term = make_binary(hb);
4108     }
4109 
4110     return 1;
4111 }
4112 
enif_ioq_peek(ErlNifIOQueue * q,int * iovlen)4113 SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen)
4114 {
4115     return erts_ioq_peekq(q, iovlen);
4116 }
4117 
4118 /***************************************************************************
4119  **                              load_nif/2                               **
4120  ***************************************************************************/
4121 
get_func_pp(const BeamCodeHeader * mod_code,Eterm f_atom,unsigned arity)4122 static const ErtsCodeInfo * const * get_func_pp(const BeamCodeHeader* mod_code,
4123                                                 Eterm f_atom, unsigned arity)
4124 {
4125     int n = (int) mod_code->num_functions;
4126     int j;
4127 
4128     for (j = 0; j < n; ++j) {
4129         const ErtsCodeInfo* ci = mod_code->functions[j];
4130 
4131 #ifndef BEAMASM
4132         ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
4133 #endif
4134 
4135         if (f_atom == ci->mfa.function
4136             && arity == ci->mfa.arity) {
4137             return &mod_code->functions[j];
4138         }
4139     }
4140 
4141     return NULL;
4142 }
4143 
mkatom(const char * str)4144 static Eterm mkatom(const char *str)
4145 {
4146     return am_atom_put(str, sys_strlen(str));
4147 }
4148 
4149 struct tainted_module_t
4150 {
4151     struct tainted_module_t* next;
4152     Eterm module_atom;
4153 };
4154 
4155 erts_atomic_t first_taint; /* struct tainted_module_t* */
4156 
erts_add_taint(Eterm mod_atom)4157 void erts_add_taint(Eterm mod_atom)
4158 {
4159 #ifdef ERTS_ENABLE_LOCK_CHECK
4160     extern erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
4161 #endif
4162     struct tainted_module_t *first, *t;
4163 
4164     ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)
4165                    || erts_has_code_write_permission());
4166 
4167     first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
4168     for (t=first ; t; t=t->next) {
4169 	if (t->module_atom == mod_atom) {
4170 	    return;
4171 	}
4172     }
4173     t = erts_alloc_fnf(ERTS_ALC_T_TAINT, sizeof(*t));
4174     if (t != NULL) {
4175 	t->module_atom = mod_atom;
4176 	t->next = first;
4177         erts_atomic_set_relb(&first_taint, (erts_aint_t)t);
4178     }
4179 }
4180 
erts_nif_taints(Process * p)4181 Eterm erts_nif_taints(Process* p)
4182 {
4183     struct tainted_module_t *first, *t;
4184     unsigned cnt = 0;
4185     Eterm list = NIL;
4186     Eterm* hp;
4187 
4188     first = (struct tainted_module_t*) erts_atomic_read_acqb(&first_taint);
4189     for (t=first ; t!=NULL; t=t->next) {
4190 	cnt++;
4191     }
4192     hp = HAlloc(p,cnt*2);
4193     for (t=first ; t!=NULL; t=t->next) {
4194 	list = CONS(hp, t->module_atom, list);
4195 	hp += 2;
4196     }
4197     return list;
4198 }
4199 
erts_print_nif_taints(fmtfn_t to,void * to_arg)4200 void erts_print_nif_taints(fmtfn_t to, void* to_arg)
4201 {
4202     struct tainted_module_t *t;
4203     const char* delim = "";
4204 
4205     t = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
4206     for ( ; t; t = t->next) {
4207 	const Atom* atom = atom_tab(atom_val(t->module_atom));
4208 	erts_cbprintf(to,to_arg,"%s%.*s", delim, atom->len, atom->name);
4209 	delim = ",";
4210     }
4211     erts_cbprintf(to,to_arg,"\n");
4212 }
4213 
4214 
load_nif_error(Process * p,const char * atom,const char * format,...)4215 static Eterm load_nif_error(Process* p, const char* atom, const char* format, ...)
4216 {
4217     erts_dsprintf_buf_t* dsbufp = erts_create_tmp_dsbuf(0);
4218     Eterm ret;
4219     Eterm* hp;
4220     Eterm** hpp = NULL;
4221     Uint sz = 0;
4222     Uint* szp = &sz;
4223     va_list arglist;
4224 
4225     va_start(arglist, format);
4226     erts_vdsprintf(dsbufp, format, arglist);
4227     va_end(arglist);
4228 
4229     for (;;) {
4230 	Eterm txt = erts_bld_string_n(hpp, &sz, dsbufp->str, dsbufp->str_len);
4231 	ret = erts_bld_tuple(hpp, szp, 2, am_error,
4232 			     erts_bld_tuple(hpp, szp, 2, mkatom(atom), txt));
4233 	if (hpp != NULL) {
4234 	    break;
4235 	}
4236 	hp = HAlloc(p,sz);
4237 	hpp = &hp;
4238 	szp = NULL;
4239     }
4240     erts_destroy_tmp_dsbuf(dsbufp);
4241     return ret;
4242 }
4243 
4244 #define AT_LEAST_VERSION(E,MAJ,MIN) \
4245     (((E)->major * 0x100 + (E)->minor) >= ((MAJ) * 0x100 + (MIN)))
4246 
4247 /*
4248  * Allocate erl_module_nif and make a _modern_ copy of the lib entry.
4249  */
create_lib(const ErlNifEntry * src)4250 static struct erl_module_nif* create_lib(const ErlNifEntry* src)
4251 {
4252     struct erl_module_nif* lib;
4253     ErlNifEntry* dst;
4254     Uint bytes = offsetof(struct erl_module_nif, _funcs_copy_);
4255 
4256     if (!AT_LEAST_VERSION(src, 2, 7))
4257         bytes += src->num_of_funcs * sizeof(ErlNifFunc);
4258 
4259     lib = erts_alloc(ERTS_ALC_T_NIF, bytes);
4260     erts_mtx_init(&lib->load_mtx, "nif_load", NIL,
4261                   ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
4262     dst = &lib->entry;
4263 
4264     sys_memcpy(dst, src, offsetof(ErlNifEntry, vm_variant));
4265 
4266     if (AT_LEAST_VERSION(src, 2, 1)) {
4267         dst->vm_variant = src->vm_variant;
4268     } else {
4269         dst->vm_variant = "beam.vanilla";
4270     }
4271     if (AT_LEAST_VERSION(src, 2, 7)) {
4272         dst->options = src->options;
4273     } else {
4274         /*
4275          * Make a modern copy of the ErlNifFunc array
4276          */
4277         struct ErlNifFunc_V1 {
4278             const char* name;
4279             unsigned arity;
4280             ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
4281         }*src_funcs = (struct ErlNifFunc_V1*) src->funcs;
4282         int i;
4283         for (i = 0; i < src->num_of_funcs; ++i) {
4284             sys_memcpy(&lib->_funcs_copy_[i], &src_funcs[i], sizeof(*src_funcs));
4285             lib->_funcs_copy_[i].flags = 0;
4286         }
4287         dst->funcs = lib->_funcs_copy_;
4288         dst->options = 0;
4289     }
4290     if (AT_LEAST_VERSION(src, 2, 12)) {
4291         dst->sizeof_ErlNifResourceTypeInit = src->sizeof_ErlNifResourceTypeInit;
4292     } else {
4293         dst->sizeof_ErlNifResourceTypeInit = 0;
4294     }
4295     if (AT_LEAST_VERSION(src, 2, 14)) {
4296         dst->min_erts = src->min_erts;
4297     } else {
4298         dst->min_erts = "erts-?";
4299     }
4300     return lib;
4301 };
4302 
4303 /* load_nif/2 is implemented as an instruction as it needs to know where it
4304  * was called from, and it's a pain to get that information in a BIF.
4305  *
4306  * This is a small stub that rejects apply(erlang, load_nif, [Path, Args]). */
load_nif_2(BIF_ALIST_2)4307 BIF_RETTYPE load_nif_2(BIF_ALIST_2) {
4308     BIF_RET(load_nif_error(BIF_P, "bad_lib",
4309                            "load_nif/2 must be explicitly called from the NIF "
4310                            "module. It cannot be called through apply/3."));
4311 }
4312 
4313 typedef struct {
4314     HashBucket bucket;
4315     const ErtsCodeInfo *code_info_exec;
4316     ErtsCodeInfo *code_info_rw;
4317     ErtsCodeInfo info;
4318     struct
4319     {
4320         /* data */
4321 #ifdef BEAMASM
4322         BeamInstr prologue[BEAM_ASM_FUNC_PROLOGUE_SIZE / sizeof(UWord)];
4323         BeamInstr call_nif[10];
4324 #else
4325         BeamInstr call_nif[4];
4326 #endif
4327     } code;
4328 } ErtsNifBeamStub;
4329 
4330 typedef struct ErtsNifFinish_ {
4331     int nstubs_hashed;  /* can be less than 'num_of_funcs' if load failed */
4332     ErtsNifBeamStub beam_stubv[1];
4333 } ErtsNifFinish;
4334 
4335 #define sizeof_ErtsNifFinish(N) \
4336     (offsetof(ErtsNifFinish, beam_stubv) + (N)*sizeof(ErtsNifBeamStub))
4337 
4338 static void load_nif_1st_finisher(void* vlib);
4339 static void load_nif_2nd_finisher(void* vlib);
4340 static void load_nif_3rd_finisher(void* vlib);
4341 static void release_beam_stubs(struct erl_module_nif* lib);
4342 static void erase_hashed_stubs(ErtsNifFinish*);
4343 
4344 struct hash erts_nif_call_tab;
4345 
nif_call_hash(ErtsNifBeamStub * obj)4346 static HashValue nif_call_hash(ErtsNifBeamStub* obj)
4347 {
4348     return ((HashValue)obj->code_info_exec / sizeof(BeamInstr));
4349 }
4350 
nif_call_cmp(ErtsNifBeamStub * tmpl,ErtsNifBeamStub * obj)4351 static int nif_call_cmp(ErtsNifBeamStub* tmpl, ErtsNifBeamStub* obj)
4352 {
4353     return tmpl->code_info_exec != obj->code_info_exec;
4354 }
4355 
nif_call_alloc(ErtsNifBeamStub * tmpl)4356 static ErtsNifBeamStub* nif_call_alloc(ErtsNifBeamStub* tmpl)
4357 {
4358     return tmpl;
4359 }
4360 
nif_call_free(ErtsNifBeamStub * obj)4361 static void nif_call_free(ErtsNifBeamStub* obj)
4362 {
4363 }
4364 
nif_call_table_init(void)4365 static void nif_call_table_init(void)
4366 {
4367     HashFunctions f;
4368 
4369     erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
4370     rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
4371 
4372     erts_rwmtx_init_opt(&erts_nif_call_tab_lock, &rwmtx_opt, "nif_call_tab",
4373                         NIL, (ERTS_LOCK_FLAGS_PROPERTY_STATIC |
4374                               ERTS_LOCK_FLAGS_CATEGORY_GENERIC));
4375 
4376     f.hash = (H_FUN) nif_call_hash;
4377     f.cmp  = (HCMP_FUN) nif_call_cmp;
4378     f.alloc = (HALLOC_FUN) nif_call_alloc;
4379     f.free = (HFREE_FUN) nif_call_free;
4380     f.meta_alloc = (HMALLOC_FUN) erts_alloc;
4381     f.meta_free = (HMFREE_FUN) erts_free;
4382     f.meta_print = (HMPRINT_FUN) erts_print;
4383 
4384     hash_init(ERTS_ALC_T_NIF, &erts_nif_call_tab, "nif_call_tab", 100, f);
4385 }
4386 
4387 static void patch_call_nif_early(ErlNifEntry*, struct erl_module_instance*);
4388 
erts_load_nif(Process * c_p,ErtsCodePtr I,Eterm filename,Eterm args)4389 Eterm erts_load_nif(Process *c_p, ErtsCodePtr I, Eterm filename, Eterm args)
4390 {
4391     static const char bad_lib[] = "bad_lib";
4392     static const char upgrade[] = "upgrade";
4393     char* lib_name = NULL;
4394     void* handle = NULL;
4395     void* init_func = NULL;
4396     ErlNifEntry* entry = NULL;
4397     ErlNifEnv env;
4398     int i, err, encoding;
4399     Module* module_p;
4400     Eterm mod_atom;
4401     const Atom* mod_atomp;
4402     Eterm f_atom;
4403     const ErtsCodeMFA* caller;
4404     ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
4405     Eterm ret = am_ok;
4406     int veto;
4407     int taint = 1;
4408     struct erl_module_nif* lib = NULL;
4409     struct erl_module_instance* this_mi;
4410     struct erl_module_instance* prev_mi;
4411 
4412     encoding = erts_get_native_filename_encoding();
4413     if (encoding == ERL_FILENAME_WIN_WCHAR) {
4414         /* Do not convert the lib name to utf-16le yet, do that in win32 specific code */
4415         /* since lib_name is used in error messages */
4416         encoding = ERL_FILENAME_UTF8;
4417     }
4418     lib_name = erts_convert_filename_to_encoding(filename, NULL, 0,
4419                                                  ERTS_ALC_T_TMP, 1, 0, encoding,
4420 						 NULL, 0);
4421     if (!lib_name) {
4422         return THE_NON_VALUE;
4423     }
4424 
4425     /* Find calling module */
4426     caller = erts_find_function_from_pc(I);
4427     ASSERT(caller != NULL);
4428     mod_atom = caller->module;
4429     ASSERT(is_atom(mod_atom));
4430     module_p = erts_get_module(mod_atom, erts_active_code_ix());
4431     ASSERT(module_p != NULL);
4432 
4433     mod_atomp = atom_tab(atom_val(mod_atom));
4434     {
4435         ErtsStaticNifEntry* sne;
4436         sne = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
4437         if (sne != NULL) {
4438             init_func = sne->nif_init;
4439             handle = init_func;
4440             taint = sne->taint;
4441         }
4442     }
4443     this_mi = &module_p->curr;
4444     prev_mi = &module_p->old;
4445     if (in_area(caller, module_p->old.code_hdr, module_p->old.code_length)) {
4446 	ret = load_nif_error(c_p, "old_code", "Calling load_nif from old "
4447 			     "module '%T' not allowed", mod_atom);
4448 	goto error;
4449     } else if (module_p->on_load) {
4450 	ASSERT(((module_p->on_load)->code_hdr)->on_load);
4451 	if (module_p->curr.code_hdr) {
4452 	    prev_mi = &module_p->curr;
4453 	} else {
4454 	    prev_mi = &module_p->old;
4455 	}
4456 	this_mi = module_p->on_load;
4457     }
4458 
4459     if (this_mi->nif != NULL) {
4460         ret = load_nif_error(c_p,"reload","NIF library already loaded"
4461                              " (reload disallowed since OTP 20).");
4462     }
4463     else if (init_func == NULL &&
4464              (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
4465 	const char slogan[] = "Failed to load NIF library";
4466 	if (strstr(errdesc.str, lib_name) != NULL) {
4467 	    ret = load_nif_error(c_p, "load_failed", "%s: '%s'", slogan, errdesc.str);
4468 	}
4469 	else {
4470 	    ret = load_nif_error(c_p, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str);
4471 	}
4472     }
4473     else if (init_func == NULL &&
4474 	     erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) {
4475 	ret  = load_nif_error(c_p, bad_lib, "Failed to find library init"
4476 			      " function: '%s'", errdesc.str);
4477 
4478     }
4479     else if ((taint ? erts_add_taint(mod_atom) : 0,
4480 	      (entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) {
4481 	ret = load_nif_error(c_p, bad_lib, "Library init-call unsuccessful");
4482     }
4483     else if (entry->major > ERL_NIF_MAJOR_VERSION
4484              || (entry->major == ERL_NIF_MAJOR_VERSION
4485                  && entry->minor > ERL_NIF_MINOR_VERSION)) {
4486         char* fmt = "That '%T' NIF library needs %s or newer. Either try to"
4487             " recompile the NIF lib or use a newer erts runtime.";
4488         ret = load_nif_error(c_p, bad_lib, fmt, mod_atom, entry->min_erts);
4489     }
4490     else if (entry->major < ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
4491 	     || (entry->major==2 && entry->minor == 5)) { /* experimental maps */
4492 
4493         char* fmt = "That old NIF library (%d.%d) is not compatible with this "
4494             "erts runtime (%d.%d). Try recompile the NIF lib.";
4495         ret = load_nif_error(c_p, bad_lib, fmt, entry->major, entry->minor,
4496                              ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
4497     }
4498     else if (AT_LEAST_VERSION(entry, 2, 1)
4499 	     && sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) {
4500 	ret = load_nif_error(c_p, bad_lib, "Library (%s) not compiled for "
4501 			     "this vm variant (%s).",
4502 			     entry->vm_variant, ERL_NIF_VM_VARIANT);
4503     }
4504     else if (!erts_is_atom_str((char*)entry->name, mod_atom, 1)) {
4505 	ret = load_nif_error(c_p, bad_lib, "Library module name '%s' does not"
4506 			     " match calling module '%T'", entry->name, mod_atom);
4507     }
4508     else {
4509         lib = create_lib(entry);
4510         entry = &lib->entry; /* Use a guaranteed modern lib entry from now on */
4511 
4512         lib->handle = handle;
4513         erts_refc_init(&lib->refc, 2);  /* Erlang code + NIF code */
4514         erts_refc_init(&lib->dynlib_refc, 1);
4515         ASSERT(opened_rt_list == NULL);
4516         lib->mod = module_p;
4517 
4518         lib->finish = erts_alloc(ERTS_ALC_T_NIF,
4519                                  sizeof_ErtsNifFinish(entry->num_of_funcs));
4520         lib->finish->nstubs_hashed = 0;
4521 
4522         erts_rwmtx_rwlock(&erts_nif_call_tab_lock);
4523         for (i=0; i < entry->num_of_funcs; i++) {
4524             const ErtsCodeInfo * const * ci_pp;
4525             const ErtsCodeInfo* ci;
4526             ErlNifFunc* f = &entry->funcs[i];
4527             ErtsNifBeamStub* stub = &lib->finish->beam_stubv[i];
4528 
4529             stub->code_info_exec = NULL; /* end marker in case we fail */
4530 
4531 	    if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
4532 		|| (ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
4533 		ret = load_nif_error(c_p,bad_lib,"Function not found %T:%s/%u",
4534 				     mod_atom, f->name, f->arity);
4535                 break;
4536 	    }
4537 
4538             ci = *ci_pp;
4539 
4540 	    if (f->flags != 0 &&
4541                 f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND &&
4542                 f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND) {
4543 
4544                 ret = load_nif_error(c_p, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
4545                                      f->flags, mod_atom, f->name, f->arity);
4546                 break;
4547 	    }
4548 
4549 #ifdef DEBUG
4550         {
4551             ErtsCodePtr curr_func, next_func;
4552 
4553             curr_func = erts_codeinfo_to_code((ErtsCodeInfo*)ci_pp[0]);
4554             next_func = erts_codeinfo_to_code((ErtsCodeInfo*)ci_pp[1]);
4555 
4556             ASSERT(!ErtsInArea(next_func,
4557                                curr_func,
4558                                BEAM_NATIVE_MIN_FUNC_SZ * sizeof(UWord)));
4559         }
4560 #endif
4561 
4562             ERTS_CT_ASSERT(sizeof(stub->code) <=
4563                     BEAM_NATIVE_MIN_FUNC_SZ * sizeof(Eterm));
4564 
4565             stub->code_info_exec = ci;
4566             stub->code_info_rw = erts_writable_code_ptr(this_mi, ci);
4567             stub->info = *ci;
4568 
4569             if (hash_put(&erts_nif_call_tab, stub) != stub) {
4570                 ret = load_nif_error(c_p, bad_lib, "Duplicate NIF entry for %T:%s/%u",
4571                                      mod_atom, f->name, f->arity);
4572                 break;
4573             }
4574             lib->finish->nstubs_hashed++;
4575 
4576  #ifdef BEAMASM
4577             {
4578                 /* See beam_asm.h for details on how the nif load trampoline
4579                  * works */
4580                 void* normal_fptr, *dirty_fptr;
4581 
4582                 if (f->flags) {
4583                     if (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) {
4584                         normal_fptr = static_schedule_dirty_io_nif;
4585                     } else {
4586                         normal_fptr = static_schedule_dirty_cpu_nif;
4587                     }
4588 
4589                     dirty_fptr = f->fptr;
4590                 } else {
4591                     dirty_fptr = NULL;
4592                     normal_fptr = f->fptr;
4593                 }
4594 
4595                 beamasm_emit_call_nif(
4596                     ci, normal_fptr, lib, dirty_fptr,
4597                     (char*)&stub->info,
4598                     sizeof(stub->info) + sizeof(stub->code));
4599             }
4600 #else
4601             stub->code.call_nif[0] = BeamOpCodeAddr(op_call_nif_WWW);
4602             stub->code.call_nif[2] = (BeamInstr) lib;
4603 
4604             if (f->flags) {
4605                 stub->code.call_nif[3] = (BeamInstr) f->fptr;
4606                 stub->code.call_nif[1] =
4607                     (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
4608                         (BeamInstr) static_schedule_dirty_io_nif :
4609                         (BeamInstr) static_schedule_dirty_cpu_nif;
4610             } else {
4611                 stub->code.call_nif[1] = (BeamInstr) f->fptr;
4612             }
4613 #endif
4614         }
4615         erts_rwmtx_rwunlock(&erts_nif_call_tab_lock);
4616         ASSERT(lib->finish->nstubs_hashed == lib->entry.num_of_funcs);
4617     }
4618 
4619     if (ret != am_ok) {
4620 	goto error;
4621     }
4622 
4623     /* Call load or upgrade:
4624      */
4625     ASSERT(lib);
4626     env.mod_nif = lib;
4627 
4628     lib->priv_data = NULL;
4629     if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
4630         void* prev_old_data = prev_mi->nif->priv_data;
4631         if (entry->upgrade == NULL) {
4632             ret = load_nif_error(c_p, upgrade, "Upgrade not supported by this NIF library.");
4633             goto error;
4634         }
4635         /*
4636          * Go single scheduler during upgrade callback.
4637          * Todo: Fix better solution with suspending callers and waiting for
4638          *       all calls to return (including dirty).
4639          *       Note that erts_thr_progress_block() will not block dirty NIFs.
4640          */
4641         erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
4642         erts_thr_progress_block();
4643         erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
4644         erts_pre_nif(&env, c_p, lib, NULL);
4645         veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, args);
4646         erts_post_nif(&env);
4647         erts_thr_progress_unblock();
4648         if (veto) {
4649             prev_mi->nif->priv_data = prev_old_data;
4650             ret = load_nif_error(c_p, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
4651         }
4652     }
4653     else if (entry->load != NULL) { /********* Initial load ***********/
4654         erts_pre_nif(&env, c_p, lib, NULL);
4655         veto = entry->load(&env, &lib->priv_data, args);
4656         erts_post_nif(&env);
4657         if (veto) {
4658             ret = load_nif_error(c_p, "load", "Library load-call unsuccessful (%d).", veto);
4659         }
4660     }
4661 
4662     if (ret == am_ok) {
4663 	/*
4664          * Everything ok, make NIF code callable.
4665 	 */
4666 	this_mi->nif = lib;
4667         prepare_opened_rt(lib);
4668         /*
4669          * The call table lock will make sure all NIFs and callbacks in module
4670          * are made accessible atomically.
4671          */
4672         erts_rwmtx_rwlock(&erts_nif_call_tab_lock);
4673         commit_opened_rt();
4674         patch_call_nif_early(entry, this_mi);
4675         erts_rwmtx_rwunlock(&erts_nif_call_tab_lock);
4676 
4677         cleanup_opened_rt();
4678 
4679         /*
4680          * Now we wait thread progress, to make sure no process is still
4681          * executing the beam code of the NIFs, before we can patch in the
4682          * final fast multi word call_nif_WWW instructions.
4683          */
4684         erts_refc_inc(&lib->refc, 2);
4685         erts_schedule_thr_prgr_later_op(load_nif_1st_finisher, lib,
4686                                         &lib->lop);
4687     }
4688     else {
4689     error:
4690 	rollback_opened_resource_types();
4691 	ASSERT(ret != am_ok);
4692         if (lib != NULL) {
4693             if (lib->finish != NULL) {
4694                 erase_hashed_stubs(lib->finish);
4695                 erts_free(ERTS_ALC_T_NIF, lib->finish);
4696             }
4697 	    erts_free(ERTS_ALC_T_NIF, lib);
4698 	}
4699 	if (handle != NULL && !erts_is_static_nif(handle)) {
4700 	    erts_sys_ddll_close(handle);
4701 	}
4702 	erts_sys_ddll_free_error(&errdesc);
4703     }
4704 
4705     erts_free(ERTS_ALC_T_TMP, lib_name);
4706 
4707     BIF_RET(ret);
4708 }
4709 
4710 
4711 /*
4712  * Write 'call_nif_early' as the first beam instruction for all NIFs
4713  * which will make them callable.
4714  *
4715  * The 'call_nif_early' is a one word beam instruction which uses a lock
4716  * protected hash lookup to get its "arguments". This guarantees an atomically
4717  * safe publication of all NIFs in the module.
4718  */
patch_call_nif_early(ErlNifEntry * entry,struct erl_module_instance * this_mi)4719 static void patch_call_nif_early(ErlNifEntry* entry,
4720                                  struct erl_module_instance* this_mi)
4721 {
4722     int i;
4723 
4724     ERTS_LC_ASSERT(erts_has_code_write_permission());
4725     ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_nif_call_tab_lock));
4726 
4727     for (i=0; i < entry->num_of_funcs; i++)
4728     {
4729         ErlNifFunc* f = &entry->funcs[i];
4730         const ErtsCodeInfo * const * ci_pp;
4731         ErtsCodeInfo* ci;
4732         Eterm f_atom;
4733 
4734         erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
4735 
4736         ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity);
4737         ci = erts_writable_code_ptr(this_mi, *ci_pp);
4738 
4739 #ifndef BEAMASM
4740         {
4741             const BeamInstr call_nif_early = BeamOpCodeAddr(op_call_nif_early);
4742             BeamInstr volatile *code_ptr;
4743 
4744             /* `ci` is writable. */
4745             code_ptr = (BeamInstr*)erts_codeinfo_to_code(ci);
4746 
4747             if (ci->u.gen_bp) {
4748                 /*
4749                  * Function traced, patch the original instruction word
4750                  * Code write permission protects against racing breakpoint writes.
4751                  */
4752                 GenericBp* g = ci->u.gen_bp;
4753                 g->orig_instr = BeamSetCodeAddr(g->orig_instr, call_nif_early);
4754                 if (BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint))
4755                     continue;
4756             } else {
4757                 ASSERT(!BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint));
4758             }
4759 
4760             code_ptr[0] = BeamSetCodeAddr(code_ptr[0], call_nif_early);
4761         }
4762 #else
4763         /* See beam_asm.h for details on how the nif load trampoline works */
4764         erts_asm_bp_set_flag(ci, ERTS_ASM_BP_FLAG_CALL_NIF_EARLY);
4765 #endif
4766     }
4767 }
4768 
erts_call_nif_early(Process * c_p,const ErtsCodeInfo * ci)4769 ErtsCodePtr erts_call_nif_early(Process* c_p, const ErtsCodeInfo* ci)
4770 {
4771     ErtsNifBeamStub* bs;
4772     ErtsNifBeamStub tmpl;
4773 
4774     tmpl.code_info_exec = ci;
4775 
4776     erts_rwmtx_rlock(&erts_nif_call_tab_lock);
4777     bs = (ErtsNifBeamStub*) hash_get(&erts_nif_call_tab, &tmpl);
4778     erts_rwmtx_runlock(&erts_nif_call_tab_lock);
4779 
4780     ASSERT(bs);
4781     return (ErtsCodePtr)&bs->code;
4782 }
4783 
load_nif_1st_finisher(void * vlib)4784 static void load_nif_1st_finisher(void* vlib)
4785 {
4786     struct erl_module_nif* lib = (struct erl_module_nif*) vlib;
4787     ErtsNifFinish* fin;
4788     int i;
4789 
4790     erts_mtx_lock(&lib->load_mtx);
4791     fin = lib->finish;
4792 
4793     if (fin) {
4794         for (i=0; i < lib->entry.num_of_funcs; i++) {
4795             ErtsCodeInfo *ci = fin->beam_stubv[i].code_info_rw;
4796 
4797 #ifdef BEAMASM
4798             char *code_ptr = (char*)erts_codeinfo_to_code(ci);
4799             sys_memcpy(&code_ptr[BEAM_ASM_FUNC_PROLOGUE_SIZE],
4800                        fin->beam_stubv[i].code.call_nif,
4801                        sizeof(fin->beam_stubv[0].code.call_nif));
4802 #else
4803             BeamInstr *code_ptr = (BeamInstr*)erts_codeinfo_to_code(ci);
4804 
4805             /* called function */
4806             code_ptr[1] = fin->beam_stubv[i].code.call_nif[1];
4807 
4808             /* erl_module_nif */
4809             code_ptr[2] = fin->beam_stubv[i].code.call_nif[2];
4810 
4811             if (lib->entry.funcs[i].flags) {
4812                 /* real NIF */
4813                 code_ptr[3] = fin->beam_stubv[i].code.call_nif[3];
4814             }
4815 #endif
4816         }
4817     }
4818     erts_mtx_unlock(&lib->load_mtx);
4819 
4820     if (fin) {
4821         /*
4822          * A second thread progress to get a memory barrier between the
4823          * arguments of call_nif_WWW (written above) and the instruction word
4824          * itself.
4825          */
4826         erts_schedule_thr_prgr_later_op(load_nif_2nd_finisher, lib,
4827                                         &lib->lop);
4828     }
4829     else { /* Unloaded */
4830         deref_nifmod(lib);
4831     }
4832 }
4833 
load_nif_2nd_finisher(void * vlib)4834 static void load_nif_2nd_finisher(void* vlib)
4835 {
4836     struct erl_module_nif* lib = (struct erl_module_nif*) vlib;
4837     ErtsNifFinish* fin;
4838     int i;
4839 
4840     /*
4841      * We seize code write permission only to avoid any trace breakpoints
4842      * to change while we patch the op_call_nif_WWW instruction.
4843      */
4844     if (!erts_try_seize_code_write_permission_aux(load_nif_2nd_finisher, vlib)) {
4845         return;
4846     }
4847 
4848     erts_mtx_lock(&lib->load_mtx);
4849     fin = lib->finish;
4850     if (fin) {
4851         for (i=0; i < lib->entry.num_of_funcs; i++) {
4852             ErtsCodeInfo *ci = fin->beam_stubv[i].code_info_rw;
4853 
4854 #ifndef BEAMASM
4855             BeamInstr volatile *code_ptr;
4856 
4857             code_ptr = (BeamInstr*)erts_codeinfo_to_code(ci);
4858 
4859             if (ci->u.gen_bp) {
4860                 /*
4861                  * Function traced, patch the original instruction word
4862                  */
4863                 GenericBp* g = ci->u.gen_bp;
4864                 ASSERT(BeamIsOpCode(g->orig_instr, op_call_nif_early));
4865                 g->orig_instr = BeamOpCodeAddr(op_call_nif_WWW);
4866                 if (BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint))
4867                     continue;
4868             }
4869 
4870             ASSERT(BeamIsOpCode(code_ptr[0], op_call_nif_early));
4871             code_ptr[0] = BeamOpCodeAddr(op_call_nif_WWW);
4872 #else
4873             /* See beam_asm.h for details on how the nif load trampoline works */
4874             erts_asm_bp_unset_flag(ci, ERTS_ASM_BP_FLAG_CALL_NIF_EARLY);
4875 #endif
4876         }
4877     }
4878     erts_mtx_unlock(&lib->load_mtx);
4879 
4880     erts_release_code_write_permission();
4881 
4882     if (fin) {
4883         UWord bytes = sizeof_ErtsNifFinish(lib->entry.num_of_funcs);
4884         /*
4885          * A third and final thread progress, to make sure no one is executing
4886          * the call_nif_early instructions anymore, before we can deallocate
4887          * the beam stubs.
4888          */
4889         erts_schedule_thr_prgr_later_cleanup_op(load_nif_3rd_finisher, lib,
4890                                                 &lib->lop,
4891                                                 bytes);
4892     }
4893     else { /* Unloaded */
4894         deref_nifmod(lib);
4895     }
4896 }
4897 
load_nif_3rd_finisher(void * vlib)4898 static void load_nif_3rd_finisher(void* vlib)
4899 {
4900     struct erl_module_nif* lib = (struct erl_module_nif*) vlib;
4901 
4902     release_beam_stubs(lib);
4903     deref_nifmod(lib);
4904 }
4905 
release_beam_stubs(struct erl_module_nif * lib)4906 static void release_beam_stubs(struct erl_module_nif* lib)
4907 {
4908     ErtsNifFinish* fin;
4909 
4910     erts_mtx_lock(&lib->load_mtx);
4911     fin = lib->finish;
4912     lib->finish =  NULL;
4913     erts_mtx_unlock(&lib->load_mtx);
4914 
4915     if (fin) {
4916         erase_hashed_stubs(fin);
4917         erts_free(ERTS_ALC_T_NIF, fin);
4918     }
4919 }
4920 
erase_hashed_stubs(ErtsNifFinish * fin)4921 static void erase_hashed_stubs(ErtsNifFinish* fin)
4922 {
4923     int i;
4924 
4925     erts_rwmtx_rwlock(&erts_nif_call_tab_lock);
4926     for (i=0; i < fin->nstubs_hashed; i++) {
4927         void* erased = hash_erase(&erts_nif_call_tab, &fin->beam_stubv[i]);
4928         ASSERT(erased); (void) erased;
4929     }
4930     erts_rwmtx_rwunlock(&erts_nif_call_tab_lock);
4931 }
4932 
4933 void
erts_unload_nif(struct erl_module_nif * lib)4934 erts_unload_nif(struct erl_module_nif* lib)
4935 {
4936     ErlNifResourceType* rt;
4937     ErlNifResourceType* next;
4938 
4939     ASSERT(lib != NULL);
4940     ASSERT(lib->mod != NULL);
4941     ERTS_LC_ASSERT(erts_has_code_write_permission());
4942 
4943     erts_tracer_nif_clear();
4944 
4945     release_beam_stubs(lib);
4946 
4947     for (rt = resource_type_list.next;
4948 	 rt != &resource_type_list;
4949 	 rt = next) {
4950 
4951 	next = rt->next;
4952 	if (rt->owner == lib) {
4953 	    rt->next->prev = rt->prev;
4954 	    rt->prev->next = rt->next;
4955 	    rt->next = NULL;
4956 	    rt->prev = NULL;
4957 	    if (erts_refc_dectest(&rt->refc, 0) == 0) {
4958 		if (rt_have_callbacks(&rt->fn_real))
4959 		    erts_refc_dec(&lib->dynlib_refc, 1);
4960 		erts_refc_dec(&lib->refc, 1);
4961 		erts_free(ERTS_ALC_T_NIF, rt);
4962 	    }
4963 	}
4964     }
4965     lib->mod = NULL;   /* purged Elang module */
4966 
4967     if (erts_refc_dectest(&lib->dynlib_refc, 0) == 0)
4968 	close_dynlib(lib);
4969 
4970     deref_nifmod(lib);
4971 }
4972 
erl_nif_init()4973 void erl_nif_init()
4974 {
4975     ERTS_CT_ASSERT((offsetof(ErtsResource,data) % 8)
4976                    == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
4977 
4978     resource_type_list.next = &resource_type_list;
4979     resource_type_list.prev = &resource_type_list;
4980     resource_type_list.fn.dtor = NULL;
4981     resource_type_list.fn_real.dtor = NULL;
4982     resource_type_list.owner = NULL;
4983     resource_type_list.module = THE_NON_VALUE;
4984     resource_type_list.name = THE_NON_VALUE;
4985 
4986     nif_call_table_init();
4987 }
4988 
erts_nif_get_funcs(struct erl_module_nif * mod,ErlNifFunc ** funcs)4989 int erts_nif_get_funcs(struct erl_module_nif* mod,
4990                        ErlNifFunc **funcs)
4991 {
4992     *funcs = mod->entry.funcs;
4993     return mod->entry.num_of_funcs;
4994 }
4995 
erts_nif_get_module(struct erl_module_nif * nif_mod)4996 Module *erts_nif_get_module(struct erl_module_nif *nif_mod) {
4997     return nif_mod->mod;
4998 }
4999 
erts_nif_call_function(Process * p,Process * tracee,struct erl_module_nif * mod,ErlNifFunc * fun,int argc,Eterm * argv)5000 Eterm erts_nif_call_function(Process *p, Process *tracee,
5001                              struct erl_module_nif* mod,
5002                              ErlNifFunc *fun, int argc, Eterm *argv)
5003 {
5004     Eterm nif_result;
5005 #ifdef DEBUG
5006     /* Verify that function is part of this module */
5007     int i;
5008     for (i = 0; i < mod->entry.num_of_funcs; i++)
5009         if (fun == &(mod->entry.funcs[i]))
5010             break;
5011     ASSERT(i < mod->entry.num_of_funcs);
5012     if (p)
5013         ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
5014                            || erts_thr_progress_is_blocking());
5015 #endif
5016     if (p) {
5017         /* This is almost a normal nif call like in beam_emu,
5018            except that any heap consumed by the nif will be
5019            released without checking if anything in it is live.
5020            This is because we cannot do a GC here as we don't know
5021            the number of live registers that have to be preserved.
5022            This means that any heap part of the returned term may
5023            not be used outside this function. */
5024         struct enif_environment_t env;
5025         ErlHeapFragment *orig_hf = MBUF(p);
5026         ErlOffHeap orig_oh = MSO(p);
5027         Eterm *orig_htop = HEAP_TOP(p);
5028         ASSERT(is_internal_pid(p->common.id));
5029         MBUF(p) = NULL;
5030         clear_offheap(&MSO(p));
5031 
5032         erts_pre_nif(&env, p, mod, tracee);
5033 #ifdef ERTS_NIF_ASSERT_IN_ENV
5034         env.dbg_disable_assert_in_env = 1;
5035 #endif
5036         nif_result = (*fun->fptr)(&env, argc, argv);
5037         if (env.exception_thrown)
5038             nif_result = THE_NON_VALUE;
5039         erts_post_nif(&env);
5040 
5041         /* Free any offheap and heap fragments created in nif */
5042         if (MSO(p).first) {
5043             erts_cleanup_offheap(&MSO(p));
5044             clear_offheap(&MSO(p));
5045         }
5046         if (MBUF(p))
5047             free_message_buffer(MBUF(p));
5048 
5049         /* restore original heap fragment list */
5050         MBUF(p) = orig_hf;
5051         MSO(p) = orig_oh;
5052         HEAP_TOP(p) = orig_htop;
5053     } else {
5054         /* Nif call was done without a process context,
5055            so we create a phony one. */
5056         struct enif_msg_environment_t msg_env;
5057         pre_nif_noproc(&msg_env, mod, tracee);
5058 #ifdef ERTS_NIF_ASSERT_IN_ENV
5059         msg_env.env.dbg_disable_assert_in_env = 1;
5060 #endif
5061         nif_result = (*fun->fptr)(&msg_env.env, argc, argv);
5062         if (msg_env.env.exception_thrown)
5063             nif_result = THE_NON_VALUE;
5064         post_nif_noproc(&msg_env);
5065     }
5066 
5067     return nif_result;
5068 }
5069 
5070 #ifdef USE_VM_PROBES
dtrace_nifenv_str(ErlNifEnv * env,char * process_buf)5071 void dtrace_nifenv_str(ErlNifEnv *env, char *process_buf)
5072 {
5073     dtrace_pid_str(env->proc->common.id, process_buf);
5074 }
5075 #endif
5076 
5077 #ifdef READONLY_CHECK
5078 /* Use checksums to assert that NIFs do not write into inspected binaries
5079 */
5080 static void readonly_check_dtor(struct enif_tmp_obj_t*);
5081 static unsigned calc_checksum(unsigned char* ptr, unsigned size);
5082 
5083 struct readonly_check_t
5084 {
5085     unsigned char* ptr;
5086     unsigned size;
5087     unsigned checksum;
5088 };
add_readonly_check(ErlNifEnv * env,unsigned char * ptr,unsigned sz)5089 static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz)
5090 {
5091     struct readonly_check_t* obj;
5092 
5093     obj = alloc_tmp_obj(env, sizeof(struct readonly_check_t),
5094         &readonly_check_dtor);
5095 
5096     obj->ptr = ptr;
5097     obj->size = sz;
5098     obj->checksum = calc_checksum(ptr, sz);
5099 }
readonly_check_dtor(struct enif_tmp_obj_t * tmp_obj)5100 static void readonly_check_dtor(struct enif_tmp_obj_t* tmp_obj)
5101 {
5102     struct readonly_check_t* ro_check = (struct readonly_check_t*)&tmp_obj[1];
5103     unsigned chksum = calc_checksum(ro_check->ptr, ro_check->size);
5104     if (chksum != ro_check->checksum) {
5105 	fprintf(stderr, "\r\nReadonly data written by NIF, checksums differ"
5106 		" %x != %x\r\nABORTING\r\n", chksum, ro_check->checksum);
5107 	abort();
5108     }
5109     erts_free(tmp_obj->allocator, tmp_obj);
5110 }
calc_checksum(unsigned char * ptr,unsigned size)5111 static unsigned calc_checksum(unsigned char* ptr, unsigned size)
5112 {
5113     unsigned i, sum = 0;
5114     for (i=0; i<size; i++) {
5115 	sum ^= ptr[i] << ((i % 4)*8);
5116     }
5117     return sum;
5118 }
5119 
5120 #endif /* READONLY_CHECK */
5121 
5122 #ifdef ERTS_NIF_ASSERT_IN_ENV
dbg_assert_in_env(ErlNifEnv * env,Eterm term,int nr,const char * type,const char * func)5123 static void dbg_assert_in_env(ErlNifEnv* env, Eterm term,
5124                               int nr, const char* type, const char* func)
5125 {
5126     Uint saved_used_size;
5127     Eterm* real_htop;
5128 
5129     if (is_immed(term)
5130         || (is_non_value(term) && env->exception_thrown)
5131         || erts_is_literal(term, ptr_val(term)))
5132         return;
5133 
5134     if (env->dbg_disable_assert_in_env) {
5135         /*
5136          * Trace nifs may cheat as built terms are discarded after return.
5137          * ToDo: Check if 'term' is part of argv[].
5138          */
5139         return;
5140     }
5141 
5142     if (env->heap_frag) {
5143         ASSERT(env->heap_frag == MBUF(env->proc));
5144         ASSERT(env->hp >= env->heap_frag->mem);
5145         ASSERT(env->hp <= env->heap_frag->mem + env->heap_frag->alloc_size);
5146         saved_used_size = env->heap_frag->used_size;
5147         env->heap_frag->used_size = env->hp - env->heap_frag->mem;
5148         real_htop = NULL;
5149     }
5150     else {
5151         real_htop = env->hp;
5152     }
5153     if (!erts_dbg_within_proc(ptr_val(term), env->proc, real_htop)) {
5154         int ok = 0;
5155         if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
5156             Process* real_proc = env->proc->next;
5157             ASSERT(real_proc);
5158             erts_proc_lock(real_proc, ERTS_PROC_LOCK_MAIN);
5159             ok = (ERTS_PROC_IS_EXITING(real_proc)
5160                   || erts_dbg_within_proc(ptr_val(term), real_proc, NULL));
5161             erts_proc_unlock(real_proc, ERTS_PROC_LOCK_MAIN);
5162         }
5163         if (!ok) {
5164             fprintf(stderr, "\r\nFAILED ASSERTION in %s:\r\n", func);
5165             if (nr) {
5166                 fprintf(stderr, "Term #%d of the %s is not from same ErlNifEnv.",
5167                         nr, type);
5168             }
5169             else {
5170                 fprintf(stderr, "The %s is not from the same ErlNifEnv.", type);
5171             }
5172             fprintf(stderr, "\r\nABORTING\r\n");
5173             abort();
5174         }
5175     }
5176     if (env->heap_frag) {
5177         env->heap_frag->used_size = saved_used_size;
5178     }
5179 }
5180 #endif
5181 
5182 #ifdef HAVE_USE_DTRACE
5183 
5184 #define MESSAGE_BUFSIZ 1024
5185 
get_string_maybe(ErlNifEnv * env,const ERL_NIF_TERM term,char ** ptr,char * buf,int bufsiz)5186 static void get_string_maybe(ErlNifEnv *env, const ERL_NIF_TERM term,
5187 		      char **ptr, char *buf, int bufsiz)
5188 {
5189     ErlNifBinary str_bin;
5190 
5191     if (!enif_inspect_iolist_as_binary(env, term, &str_bin) ||
5192         str_bin.size > bufsiz) {
5193         *ptr = NULL;
5194     } else {
5195         sys_memcpy(buf, (char *) str_bin.data, str_bin.size);
5196         buf[str_bin.size] = '\0';
5197         *ptr = buf;
5198     }
5199 }
5200 
erl_nif_user_trace_s1(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])5201 ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc,
5202                                    const ERL_NIF_TERM argv[])
5203 {
5204     ErlNifBinary message_bin;
5205     DTRACE_CHARBUF(messagebuf, MESSAGE_BUFSIZ + 1);
5206 
5207     if (DTRACE_ENABLED(user_trace_s1)) {
5208 	if (!enif_inspect_iolist_as_binary(env, argv[0], &message_bin) ||
5209 	    message_bin.size > MESSAGE_BUFSIZ) {
5210 	    return am_badarg;
5211 	}
5212 	sys_memcpy(messagebuf, (char *) message_bin.data, message_bin.size);
5213         messagebuf[message_bin.size] = '\0';
5214 	DTRACE1(user_trace_s1, messagebuf);
5215 	return am_true;
5216     } else {
5217 	return am_false;
5218     }
5219 }
5220 
erl_nif_user_trace_i4s4(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])5221 ERL_NIF_TERM erl_nif_user_trace_i4s4(ErlNifEnv* env, int argc,
5222                                      const ERL_NIF_TERM argv[])
5223 {
5224     DTRACE_CHARBUF(procbuf, 32 + 1);
5225     DTRACE_CHARBUF(user_tagbuf, MESSAGE_BUFSIZ + 1);
5226     char *utbuf = NULL;
5227     ErlNifSInt64 i1, i2, i3, i4;
5228     DTRACE_CHARBUF(messagebuf1, MESSAGE_BUFSIZ + 1);
5229     DTRACE_CHARBUF(messagebuf2, MESSAGE_BUFSIZ + 1);
5230     DTRACE_CHARBUF(messagebuf3, MESSAGE_BUFSIZ + 1);
5231     DTRACE_CHARBUF(messagebuf4, MESSAGE_BUFSIZ + 1);
5232     char *mbuf1 = NULL, *mbuf2 = NULL, *mbuf3 = NULL, *mbuf4 = NULL;
5233 
5234     if (DTRACE_ENABLED(user_trace_i4s4)) {
5235 	dtrace_nifenv_str(env, procbuf);
5236         get_string_maybe(env, argv[0], &utbuf, user_tagbuf, MESSAGE_BUFSIZ);
5237         if (! enif_get_int64(env, argv[1], &i1))
5238             i1 = 0;
5239         if (! enif_get_int64(env, argv[2], &i2))
5240             i2 = 0;
5241         if (! enif_get_int64(env, argv[3], &i3))
5242             i3 = 0;
5243         if (! enif_get_int64(env, argv[4], &i4))
5244             i4 = 0;
5245         get_string_maybe(env, argv[5], &mbuf1, messagebuf1, MESSAGE_BUFSIZ);
5246         get_string_maybe(env, argv[6], &mbuf2, messagebuf2, MESSAGE_BUFSIZ);
5247         get_string_maybe(env, argv[7], &mbuf3, messagebuf3, MESSAGE_BUFSIZ);
5248         get_string_maybe(env, argv[8], &mbuf4, messagebuf4, MESSAGE_BUFSIZ);
5249 	DTRACE10(user_trace_i4s4, procbuf, utbuf,
5250 		 i1, i2, i3, i4, mbuf1, mbuf2, mbuf3, mbuf4);
5251 	return am_true;
5252     } else {
5253 	return am_false;
5254     }
5255 }
5256 
5257 #define DTRACE10_LABEL(name, label, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
5258     erlang_##name##label((a0), (a1), (a2), (a3), (a4), (a5), (a6), (a7), (a8), (a9))
5259 #define N_STATEMENT(the_label) \
5260    case the_label: \
5261       if (DTRACE_ENABLED(user_trace_n##the_label)) { \
5262           dtrace_nifenv_str(env, procbuf); \
5263           get_string_maybe(env, argv[1], &utbuf, user_tagbuf, MESSAGE_BUFSIZ); \
5264           if (! enif_get_int64(env, argv[2], &i1)) \
5265               i1 = 0; \
5266           if (! enif_get_int64(env, argv[3], &i2)) \
5267               i2 = 0; \
5268           if (! enif_get_int64(env, argv[4], &i3)) \
5269               i3 = 0; \
5270           if (! enif_get_int64(env, argv[5], &i4)) \
5271               i4 = 0; \
5272           get_string_maybe(env, argv[6], &mbuf1, messagebuf1, MESSAGE_BUFSIZ); \
5273           get_string_maybe(env, argv[7], &mbuf2, messagebuf2, MESSAGE_BUFSIZ); \
5274           get_string_maybe(env, argv[8], &mbuf3, messagebuf3, MESSAGE_BUFSIZ); \
5275           get_string_maybe(env, argv[9], &mbuf4, messagebuf4, MESSAGE_BUFSIZ); \
5276           DTRACE10_LABEL(user_trace_n, the_label, procbuf, utbuf,    \
5277                          i1, i2, i3, i4, mbuf1, mbuf2, mbuf3, mbuf4); \
5278           return am_true; \
5279       } else { \
5280           return am_false; \
5281       } \
5282       break
5283 
erl_nif_user_trace_n(ErlNifEnv * env,int argc,const ERL_NIF_TERM argv[])5284 ERL_NIF_TERM erl_nif_user_trace_n(ErlNifEnv* env, int argc,
5285 				  const ERL_NIF_TERM argv[])
5286 {
5287     DTRACE_CHARBUF(procbuf, 32 + 1);
5288     DTRACE_CHARBUF(user_tagbuf, MESSAGE_BUFSIZ + 1);
5289     char *utbuf = NULL;
5290     ErlNifSInt64 i1, i2, i3, i4;
5291     DTRACE_CHARBUF(messagebuf1, MESSAGE_BUFSIZ + 1);
5292     DTRACE_CHARBUF(messagebuf2, MESSAGE_BUFSIZ + 1);
5293     DTRACE_CHARBUF(messagebuf3, MESSAGE_BUFSIZ + 1);
5294     DTRACE_CHARBUF(messagebuf4, MESSAGE_BUFSIZ + 1);
5295     char *mbuf1 = NULL, *mbuf2 = NULL, *mbuf3 = NULL, *mbuf4 = NULL;
5296     ErlNifSInt64 label = 0;
5297 
5298     if (! enif_get_int64(env, argv[0], &label) || label < 0 || label > 1023) {
5299 	return am_badarg;
5300     }
5301     switch (label) {
5302         N_STATEMENT(0);
5303         N_STATEMENT(1);
5304         N_STATEMENT(2);
5305         N_STATEMENT(3);
5306         N_STATEMENT(4);
5307         N_STATEMENT(5);
5308         N_STATEMENT(6);
5309         N_STATEMENT(7);
5310         N_STATEMENT(8);
5311         N_STATEMENT(9);
5312         N_STATEMENT(10);
5313         N_STATEMENT(11);
5314         N_STATEMENT(12);
5315         N_STATEMENT(13);
5316         N_STATEMENT(14);
5317         N_STATEMENT(15);
5318         N_STATEMENT(16);
5319         N_STATEMENT(17);
5320         N_STATEMENT(18);
5321         N_STATEMENT(19);
5322         N_STATEMENT(20);
5323         N_STATEMENT(21);
5324         N_STATEMENT(22);
5325         N_STATEMENT(23);
5326         N_STATEMENT(24);
5327         N_STATEMENT(25);
5328         N_STATEMENT(26);
5329         N_STATEMENT(27);
5330         N_STATEMENT(28);
5331         N_STATEMENT(29);
5332         N_STATEMENT(30);
5333         N_STATEMENT(31);
5334         N_STATEMENT(32);
5335         N_STATEMENT(33);
5336         N_STATEMENT(34);
5337         N_STATEMENT(35);
5338         N_STATEMENT(36);
5339         N_STATEMENT(37);
5340         N_STATEMENT(38);
5341         N_STATEMENT(39);
5342         N_STATEMENT(40);
5343         N_STATEMENT(41);
5344         N_STATEMENT(42);
5345         N_STATEMENT(43);
5346         N_STATEMENT(44);
5347         N_STATEMENT(45);
5348         N_STATEMENT(46);
5349         N_STATEMENT(47);
5350         N_STATEMENT(48);
5351         N_STATEMENT(49);
5352         N_STATEMENT(50);
5353         N_STATEMENT(51);
5354         N_STATEMENT(52);
5355         N_STATEMENT(53);
5356         N_STATEMENT(54);
5357         N_STATEMENT(55);
5358         N_STATEMENT(56);
5359         N_STATEMENT(57);
5360         N_STATEMENT(58);
5361         N_STATEMENT(59);
5362         N_STATEMENT(60);
5363         N_STATEMENT(61);
5364         N_STATEMENT(62);
5365         N_STATEMENT(63);
5366         N_STATEMENT(64);
5367         N_STATEMENT(65);
5368         N_STATEMENT(66);
5369         N_STATEMENT(67);
5370         N_STATEMENT(68);
5371         N_STATEMENT(69);
5372         N_STATEMENT(70);
5373         N_STATEMENT(71);
5374         N_STATEMENT(72);
5375         N_STATEMENT(73);
5376         N_STATEMENT(74);
5377         N_STATEMENT(75);
5378         N_STATEMENT(76);
5379         N_STATEMENT(77);
5380         N_STATEMENT(78);
5381         N_STATEMENT(79);
5382         N_STATEMENT(80);
5383         N_STATEMENT(81);
5384         N_STATEMENT(82);
5385         N_STATEMENT(83);
5386         N_STATEMENT(84);
5387         N_STATEMENT(85);
5388         N_STATEMENT(86);
5389         N_STATEMENT(87);
5390         N_STATEMENT(88);
5391         N_STATEMENT(89);
5392         N_STATEMENT(90);
5393         N_STATEMENT(91);
5394         N_STATEMENT(92);
5395         N_STATEMENT(93);
5396         N_STATEMENT(94);
5397         N_STATEMENT(95);
5398         N_STATEMENT(96);
5399         N_STATEMENT(97);
5400         N_STATEMENT(98);
5401         N_STATEMENT(99);
5402         N_STATEMENT(100);
5403         N_STATEMENT(101);
5404         N_STATEMENT(102);
5405         N_STATEMENT(103);
5406         N_STATEMENT(104);
5407         N_STATEMENT(105);
5408         N_STATEMENT(106);
5409         N_STATEMENT(107);
5410         N_STATEMENT(108);
5411         N_STATEMENT(109);
5412         N_STATEMENT(110);
5413         N_STATEMENT(111);
5414         N_STATEMENT(112);
5415         N_STATEMENT(113);
5416         N_STATEMENT(114);
5417         N_STATEMENT(115);
5418         N_STATEMENT(116);
5419         N_STATEMENT(117);
5420         N_STATEMENT(118);
5421         N_STATEMENT(119);
5422         N_STATEMENT(120);
5423         N_STATEMENT(121);
5424         N_STATEMENT(122);
5425         N_STATEMENT(123);
5426         N_STATEMENT(124);
5427         N_STATEMENT(125);
5428         N_STATEMENT(126);
5429         N_STATEMENT(127);
5430         N_STATEMENT(128);
5431         N_STATEMENT(129);
5432         N_STATEMENT(130);
5433         N_STATEMENT(131);
5434         N_STATEMENT(132);
5435         N_STATEMENT(133);
5436         N_STATEMENT(134);
5437         N_STATEMENT(135);
5438         N_STATEMENT(136);
5439         N_STATEMENT(137);
5440         N_STATEMENT(138);
5441         N_STATEMENT(139);
5442         N_STATEMENT(140);
5443         N_STATEMENT(141);
5444         N_STATEMENT(142);
5445         N_STATEMENT(143);
5446         N_STATEMENT(144);
5447         N_STATEMENT(145);
5448         N_STATEMENT(146);
5449         N_STATEMENT(147);
5450         N_STATEMENT(148);
5451         N_STATEMENT(149);
5452         N_STATEMENT(150);
5453         N_STATEMENT(151);
5454         N_STATEMENT(152);
5455         N_STATEMENT(153);
5456         N_STATEMENT(154);
5457         N_STATEMENT(155);
5458         N_STATEMENT(156);
5459         N_STATEMENT(157);
5460         N_STATEMENT(158);
5461         N_STATEMENT(159);
5462         N_STATEMENT(160);
5463         N_STATEMENT(161);
5464         N_STATEMENT(162);
5465         N_STATEMENT(163);
5466         N_STATEMENT(164);
5467         N_STATEMENT(165);
5468         N_STATEMENT(166);
5469         N_STATEMENT(167);
5470         N_STATEMENT(168);
5471         N_STATEMENT(169);
5472         N_STATEMENT(170);
5473         N_STATEMENT(171);
5474         N_STATEMENT(172);
5475         N_STATEMENT(173);
5476         N_STATEMENT(174);
5477         N_STATEMENT(175);
5478         N_STATEMENT(176);
5479         N_STATEMENT(177);
5480         N_STATEMENT(178);
5481         N_STATEMENT(179);
5482         N_STATEMENT(180);
5483         N_STATEMENT(181);
5484         N_STATEMENT(182);
5485         N_STATEMENT(183);
5486         N_STATEMENT(184);
5487         N_STATEMENT(185);
5488         N_STATEMENT(186);
5489         N_STATEMENT(187);
5490         N_STATEMENT(188);
5491         N_STATEMENT(189);
5492         N_STATEMENT(190);
5493         N_STATEMENT(191);
5494         N_STATEMENT(192);
5495         N_STATEMENT(193);
5496         N_STATEMENT(194);
5497         N_STATEMENT(195);
5498         N_STATEMENT(196);
5499         N_STATEMENT(197);
5500         N_STATEMENT(198);
5501         N_STATEMENT(199);
5502         N_STATEMENT(200);
5503         N_STATEMENT(201);
5504         N_STATEMENT(202);
5505         N_STATEMENT(203);
5506         N_STATEMENT(204);
5507         N_STATEMENT(205);
5508         N_STATEMENT(206);
5509         N_STATEMENT(207);
5510         N_STATEMENT(208);
5511         N_STATEMENT(209);
5512         N_STATEMENT(210);
5513         N_STATEMENT(211);
5514         N_STATEMENT(212);
5515         N_STATEMENT(213);
5516         N_STATEMENT(214);
5517         N_STATEMENT(215);
5518         N_STATEMENT(216);
5519         N_STATEMENT(217);
5520         N_STATEMENT(218);
5521         N_STATEMENT(219);
5522         N_STATEMENT(220);
5523         N_STATEMENT(221);
5524         N_STATEMENT(222);
5525         N_STATEMENT(223);
5526         N_STATEMENT(224);
5527         N_STATEMENT(225);
5528         N_STATEMENT(226);
5529         N_STATEMENT(227);
5530         N_STATEMENT(228);
5531         N_STATEMENT(229);
5532         N_STATEMENT(230);
5533         N_STATEMENT(231);
5534         N_STATEMENT(232);
5535         N_STATEMENT(233);
5536         N_STATEMENT(234);
5537         N_STATEMENT(235);
5538         N_STATEMENT(236);
5539         N_STATEMENT(237);
5540         N_STATEMENT(238);
5541         N_STATEMENT(239);
5542         N_STATEMENT(240);
5543         N_STATEMENT(241);
5544         N_STATEMENT(242);
5545         N_STATEMENT(243);
5546         N_STATEMENT(244);
5547         N_STATEMENT(245);
5548         N_STATEMENT(246);
5549         N_STATEMENT(247);
5550         N_STATEMENT(248);
5551         N_STATEMENT(249);
5552         N_STATEMENT(250);
5553         N_STATEMENT(251);
5554         N_STATEMENT(252);
5555         N_STATEMENT(253);
5556         N_STATEMENT(254);
5557         N_STATEMENT(255);
5558         N_STATEMENT(256);
5559         N_STATEMENT(257);
5560         N_STATEMENT(258);
5561         N_STATEMENT(259);
5562         N_STATEMENT(260);
5563         N_STATEMENT(261);
5564         N_STATEMENT(262);
5565         N_STATEMENT(263);
5566         N_STATEMENT(264);
5567         N_STATEMENT(265);
5568         N_STATEMENT(266);
5569         N_STATEMENT(267);
5570         N_STATEMENT(268);
5571         N_STATEMENT(269);
5572         N_STATEMENT(270);
5573         N_STATEMENT(271);
5574         N_STATEMENT(272);
5575         N_STATEMENT(273);
5576         N_STATEMENT(274);
5577         N_STATEMENT(275);
5578         N_STATEMENT(276);
5579         N_STATEMENT(277);
5580         N_STATEMENT(278);
5581         N_STATEMENT(279);
5582         N_STATEMENT(280);
5583         N_STATEMENT(281);
5584         N_STATEMENT(282);
5585         N_STATEMENT(283);
5586         N_STATEMENT(284);
5587         N_STATEMENT(285);
5588         N_STATEMENT(286);
5589         N_STATEMENT(287);
5590         N_STATEMENT(288);
5591         N_STATEMENT(289);
5592         N_STATEMENT(290);
5593         N_STATEMENT(291);
5594         N_STATEMENT(292);
5595         N_STATEMENT(293);
5596         N_STATEMENT(294);
5597         N_STATEMENT(295);
5598         N_STATEMENT(296);
5599         N_STATEMENT(297);
5600         N_STATEMENT(298);
5601         N_STATEMENT(299);
5602         N_STATEMENT(300);
5603         N_STATEMENT(301);
5604         N_STATEMENT(302);
5605         N_STATEMENT(303);
5606         N_STATEMENT(304);
5607         N_STATEMENT(305);
5608         N_STATEMENT(306);
5609         N_STATEMENT(307);
5610         N_STATEMENT(308);
5611         N_STATEMENT(309);
5612         N_STATEMENT(310);
5613         N_STATEMENT(311);
5614         N_STATEMENT(312);
5615         N_STATEMENT(313);
5616         N_STATEMENT(314);
5617         N_STATEMENT(315);
5618         N_STATEMENT(316);
5619         N_STATEMENT(317);
5620         N_STATEMENT(318);
5621         N_STATEMENT(319);
5622         N_STATEMENT(320);
5623         N_STATEMENT(321);
5624         N_STATEMENT(322);
5625         N_STATEMENT(323);
5626         N_STATEMENT(324);
5627         N_STATEMENT(325);
5628         N_STATEMENT(326);
5629         N_STATEMENT(327);
5630         N_STATEMENT(328);
5631         N_STATEMENT(329);
5632         N_STATEMENT(330);
5633         N_STATEMENT(331);
5634         N_STATEMENT(332);
5635         N_STATEMENT(333);
5636         N_STATEMENT(334);
5637         N_STATEMENT(335);
5638         N_STATEMENT(336);
5639         N_STATEMENT(337);
5640         N_STATEMENT(338);
5641         N_STATEMENT(339);
5642         N_STATEMENT(340);
5643         N_STATEMENT(341);
5644         N_STATEMENT(342);
5645         N_STATEMENT(343);
5646         N_STATEMENT(344);
5647         N_STATEMENT(345);
5648         N_STATEMENT(346);
5649         N_STATEMENT(347);
5650         N_STATEMENT(348);
5651         N_STATEMENT(349);
5652         N_STATEMENT(350);
5653         N_STATEMENT(351);
5654         N_STATEMENT(352);
5655         N_STATEMENT(353);
5656         N_STATEMENT(354);
5657         N_STATEMENT(355);
5658         N_STATEMENT(356);
5659         N_STATEMENT(357);
5660         N_STATEMENT(358);
5661         N_STATEMENT(359);
5662         N_STATEMENT(360);
5663         N_STATEMENT(361);
5664         N_STATEMENT(362);
5665         N_STATEMENT(363);
5666         N_STATEMENT(364);
5667         N_STATEMENT(365);
5668         N_STATEMENT(366);
5669         N_STATEMENT(367);
5670         N_STATEMENT(368);
5671         N_STATEMENT(369);
5672         N_STATEMENT(370);
5673         N_STATEMENT(371);
5674         N_STATEMENT(372);
5675         N_STATEMENT(373);
5676         N_STATEMENT(374);
5677         N_STATEMENT(375);
5678         N_STATEMENT(376);
5679         N_STATEMENT(377);
5680         N_STATEMENT(378);
5681         N_STATEMENT(379);
5682         N_STATEMENT(380);
5683         N_STATEMENT(381);
5684         N_STATEMENT(382);
5685         N_STATEMENT(383);
5686         N_STATEMENT(384);
5687         N_STATEMENT(385);
5688         N_STATEMENT(386);
5689         N_STATEMENT(387);
5690         N_STATEMENT(388);
5691         N_STATEMENT(389);
5692         N_STATEMENT(390);
5693         N_STATEMENT(391);
5694         N_STATEMENT(392);
5695         N_STATEMENT(393);
5696         N_STATEMENT(394);
5697         N_STATEMENT(395);
5698         N_STATEMENT(396);
5699         N_STATEMENT(397);
5700         N_STATEMENT(398);
5701         N_STATEMENT(399);
5702         N_STATEMENT(400);
5703         N_STATEMENT(401);
5704         N_STATEMENT(402);
5705         N_STATEMENT(403);
5706         N_STATEMENT(404);
5707         N_STATEMENT(405);
5708         N_STATEMENT(406);
5709         N_STATEMENT(407);
5710         N_STATEMENT(408);
5711         N_STATEMENT(409);
5712         N_STATEMENT(410);
5713         N_STATEMENT(411);
5714         N_STATEMENT(412);
5715         N_STATEMENT(413);
5716         N_STATEMENT(414);
5717         N_STATEMENT(415);
5718         N_STATEMENT(416);
5719         N_STATEMENT(417);
5720         N_STATEMENT(418);
5721         N_STATEMENT(419);
5722         N_STATEMENT(420);
5723         N_STATEMENT(421);
5724         N_STATEMENT(422);
5725         N_STATEMENT(423);
5726         N_STATEMENT(424);
5727         N_STATEMENT(425);
5728         N_STATEMENT(426);
5729         N_STATEMENT(427);
5730         N_STATEMENT(428);
5731         N_STATEMENT(429);
5732         N_STATEMENT(430);
5733         N_STATEMENT(431);
5734         N_STATEMENT(432);
5735         N_STATEMENT(433);
5736         N_STATEMENT(434);
5737         N_STATEMENT(435);
5738         N_STATEMENT(436);
5739         N_STATEMENT(437);
5740         N_STATEMENT(438);
5741         N_STATEMENT(439);
5742         N_STATEMENT(440);
5743         N_STATEMENT(441);
5744         N_STATEMENT(442);
5745         N_STATEMENT(443);
5746         N_STATEMENT(444);
5747         N_STATEMENT(445);
5748         N_STATEMENT(446);
5749         N_STATEMENT(447);
5750         N_STATEMENT(448);
5751         N_STATEMENT(449);
5752         N_STATEMENT(450);
5753         N_STATEMENT(451);
5754         N_STATEMENT(452);
5755         N_STATEMENT(453);
5756         N_STATEMENT(454);
5757         N_STATEMENT(455);
5758         N_STATEMENT(456);
5759         N_STATEMENT(457);
5760         N_STATEMENT(458);
5761         N_STATEMENT(459);
5762         N_STATEMENT(460);
5763         N_STATEMENT(461);
5764         N_STATEMENT(462);
5765         N_STATEMENT(463);
5766         N_STATEMENT(464);
5767         N_STATEMENT(465);
5768         N_STATEMENT(466);
5769         N_STATEMENT(467);
5770         N_STATEMENT(468);
5771         N_STATEMENT(469);
5772         N_STATEMENT(470);
5773         N_STATEMENT(471);
5774         N_STATEMENT(472);
5775         N_STATEMENT(473);
5776         N_STATEMENT(474);
5777         N_STATEMENT(475);
5778         N_STATEMENT(476);
5779         N_STATEMENT(477);
5780         N_STATEMENT(478);
5781         N_STATEMENT(479);
5782         N_STATEMENT(480);
5783         N_STATEMENT(481);
5784         N_STATEMENT(482);
5785         N_STATEMENT(483);
5786         N_STATEMENT(484);
5787         N_STATEMENT(485);
5788         N_STATEMENT(486);
5789         N_STATEMENT(487);
5790         N_STATEMENT(488);
5791         N_STATEMENT(489);
5792         N_STATEMENT(490);
5793         N_STATEMENT(491);
5794         N_STATEMENT(492);
5795         N_STATEMENT(493);
5796         N_STATEMENT(494);
5797         N_STATEMENT(495);
5798         N_STATEMENT(496);
5799         N_STATEMENT(497);
5800         N_STATEMENT(498);
5801         N_STATEMENT(499);
5802         N_STATEMENT(500);
5803         N_STATEMENT(501);
5804         N_STATEMENT(502);
5805         N_STATEMENT(503);
5806         N_STATEMENT(504);
5807         N_STATEMENT(505);
5808         N_STATEMENT(506);
5809         N_STATEMENT(507);
5810         N_STATEMENT(508);
5811         N_STATEMENT(509);
5812         N_STATEMENT(510);
5813         N_STATEMENT(511);
5814         N_STATEMENT(512);
5815         N_STATEMENT(513);
5816         N_STATEMENT(514);
5817         N_STATEMENT(515);
5818         N_STATEMENT(516);
5819         N_STATEMENT(517);
5820         N_STATEMENT(518);
5821         N_STATEMENT(519);
5822         N_STATEMENT(520);
5823         N_STATEMENT(521);
5824         N_STATEMENT(522);
5825         N_STATEMENT(523);
5826         N_STATEMENT(524);
5827         N_STATEMENT(525);
5828         N_STATEMENT(526);
5829         N_STATEMENT(527);
5830         N_STATEMENT(528);
5831         N_STATEMENT(529);
5832         N_STATEMENT(530);
5833         N_STATEMENT(531);
5834         N_STATEMENT(532);
5835         N_STATEMENT(533);
5836         N_STATEMENT(534);
5837         N_STATEMENT(535);
5838         N_STATEMENT(536);
5839         N_STATEMENT(537);
5840         N_STATEMENT(538);
5841         N_STATEMENT(539);
5842         N_STATEMENT(540);
5843         N_STATEMENT(541);
5844         N_STATEMENT(542);
5845         N_STATEMENT(543);
5846         N_STATEMENT(544);
5847         N_STATEMENT(545);
5848         N_STATEMENT(546);
5849         N_STATEMENT(547);
5850         N_STATEMENT(548);
5851         N_STATEMENT(549);
5852         N_STATEMENT(550);
5853         N_STATEMENT(551);
5854         N_STATEMENT(552);
5855         N_STATEMENT(553);
5856         N_STATEMENT(554);
5857         N_STATEMENT(555);
5858         N_STATEMENT(556);
5859         N_STATEMENT(557);
5860         N_STATEMENT(558);
5861         N_STATEMENT(559);
5862         N_STATEMENT(560);
5863         N_STATEMENT(561);
5864         N_STATEMENT(562);
5865         N_STATEMENT(563);
5866         N_STATEMENT(564);
5867         N_STATEMENT(565);
5868         N_STATEMENT(566);
5869         N_STATEMENT(567);
5870         N_STATEMENT(568);
5871         N_STATEMENT(569);
5872         N_STATEMENT(570);
5873         N_STATEMENT(571);
5874         N_STATEMENT(572);
5875         N_STATEMENT(573);
5876         N_STATEMENT(574);
5877         N_STATEMENT(575);
5878         N_STATEMENT(576);
5879         N_STATEMENT(577);
5880         N_STATEMENT(578);
5881         N_STATEMENT(579);
5882         N_STATEMENT(580);
5883         N_STATEMENT(581);
5884         N_STATEMENT(582);
5885         N_STATEMENT(583);
5886         N_STATEMENT(584);
5887         N_STATEMENT(585);
5888         N_STATEMENT(586);
5889         N_STATEMENT(587);
5890         N_STATEMENT(588);
5891         N_STATEMENT(589);
5892         N_STATEMENT(590);
5893         N_STATEMENT(591);
5894         N_STATEMENT(592);
5895         N_STATEMENT(593);
5896         N_STATEMENT(594);
5897         N_STATEMENT(595);
5898         N_STATEMENT(596);
5899         N_STATEMENT(597);
5900         N_STATEMENT(598);
5901         N_STATEMENT(599);
5902         N_STATEMENT(600);
5903         N_STATEMENT(601);
5904         N_STATEMENT(602);
5905         N_STATEMENT(603);
5906         N_STATEMENT(604);
5907         N_STATEMENT(605);
5908         N_STATEMENT(606);
5909         N_STATEMENT(607);
5910         N_STATEMENT(608);
5911         N_STATEMENT(609);
5912         N_STATEMENT(610);
5913         N_STATEMENT(611);
5914         N_STATEMENT(612);
5915         N_STATEMENT(613);
5916         N_STATEMENT(614);
5917         N_STATEMENT(615);
5918         N_STATEMENT(616);
5919         N_STATEMENT(617);
5920         N_STATEMENT(618);
5921         N_STATEMENT(619);
5922         N_STATEMENT(620);
5923         N_STATEMENT(621);
5924         N_STATEMENT(622);
5925         N_STATEMENT(623);
5926         N_STATEMENT(624);
5927         N_STATEMENT(625);
5928         N_STATEMENT(626);
5929         N_STATEMENT(627);
5930         N_STATEMENT(628);
5931         N_STATEMENT(629);
5932         N_STATEMENT(630);
5933         N_STATEMENT(631);
5934         N_STATEMENT(632);
5935         N_STATEMENT(633);
5936         N_STATEMENT(634);
5937         N_STATEMENT(635);
5938         N_STATEMENT(636);
5939         N_STATEMENT(637);
5940         N_STATEMENT(638);
5941         N_STATEMENT(639);
5942         N_STATEMENT(640);
5943         N_STATEMENT(641);
5944         N_STATEMENT(642);
5945         N_STATEMENT(643);
5946         N_STATEMENT(644);
5947         N_STATEMENT(645);
5948         N_STATEMENT(646);
5949         N_STATEMENT(647);
5950         N_STATEMENT(648);
5951         N_STATEMENT(649);
5952         N_STATEMENT(650);
5953         N_STATEMENT(651);
5954         N_STATEMENT(652);
5955         N_STATEMENT(653);
5956         N_STATEMENT(654);
5957         N_STATEMENT(655);
5958         N_STATEMENT(656);
5959         N_STATEMENT(657);
5960         N_STATEMENT(658);
5961         N_STATEMENT(659);
5962         N_STATEMENT(660);
5963         N_STATEMENT(661);
5964         N_STATEMENT(662);
5965         N_STATEMENT(663);
5966         N_STATEMENT(664);
5967         N_STATEMENT(665);
5968         N_STATEMENT(666);
5969         N_STATEMENT(667);
5970         N_STATEMENT(668);
5971         N_STATEMENT(669);
5972         N_STATEMENT(670);
5973         N_STATEMENT(671);
5974         N_STATEMENT(672);
5975         N_STATEMENT(673);
5976         N_STATEMENT(674);
5977         N_STATEMENT(675);
5978         N_STATEMENT(676);
5979         N_STATEMENT(677);
5980         N_STATEMENT(678);
5981         N_STATEMENT(679);
5982         N_STATEMENT(680);
5983         N_STATEMENT(681);
5984         N_STATEMENT(682);
5985         N_STATEMENT(683);
5986         N_STATEMENT(684);
5987         N_STATEMENT(685);
5988         N_STATEMENT(686);
5989         N_STATEMENT(687);
5990         N_STATEMENT(688);
5991         N_STATEMENT(689);
5992         N_STATEMENT(690);
5993         N_STATEMENT(691);
5994         N_STATEMENT(692);
5995         N_STATEMENT(693);
5996         N_STATEMENT(694);
5997         N_STATEMENT(695);
5998         N_STATEMENT(696);
5999         N_STATEMENT(697);
6000         N_STATEMENT(698);
6001         N_STATEMENT(699);
6002         N_STATEMENT(700);
6003         N_STATEMENT(701);
6004         N_STATEMENT(702);
6005         N_STATEMENT(703);
6006         N_STATEMENT(704);
6007         N_STATEMENT(705);
6008         N_STATEMENT(706);
6009         N_STATEMENT(707);
6010         N_STATEMENT(708);
6011         N_STATEMENT(709);
6012         N_STATEMENT(710);
6013         N_STATEMENT(711);
6014         N_STATEMENT(712);
6015         N_STATEMENT(713);
6016         N_STATEMENT(714);
6017         N_STATEMENT(715);
6018         N_STATEMENT(716);
6019         N_STATEMENT(717);
6020         N_STATEMENT(718);
6021         N_STATEMENT(719);
6022         N_STATEMENT(720);
6023         N_STATEMENT(721);
6024         N_STATEMENT(722);
6025         N_STATEMENT(723);
6026         N_STATEMENT(724);
6027         N_STATEMENT(725);
6028         N_STATEMENT(726);
6029         N_STATEMENT(727);
6030         N_STATEMENT(728);
6031         N_STATEMENT(729);
6032         N_STATEMENT(730);
6033         N_STATEMENT(731);
6034         N_STATEMENT(732);
6035         N_STATEMENT(733);
6036         N_STATEMENT(734);
6037         N_STATEMENT(735);
6038         N_STATEMENT(736);
6039         N_STATEMENT(737);
6040         N_STATEMENT(738);
6041         N_STATEMENT(739);
6042         N_STATEMENT(740);
6043         N_STATEMENT(741);
6044         N_STATEMENT(742);
6045         N_STATEMENT(743);
6046         N_STATEMENT(744);
6047         N_STATEMENT(745);
6048         N_STATEMENT(746);
6049         N_STATEMENT(747);
6050         N_STATEMENT(748);
6051         N_STATEMENT(749);
6052         N_STATEMENT(750);
6053         N_STATEMENT(751);
6054         N_STATEMENT(752);
6055         N_STATEMENT(753);
6056         N_STATEMENT(754);
6057         N_STATEMENT(755);
6058         N_STATEMENT(756);
6059         N_STATEMENT(757);
6060         N_STATEMENT(758);
6061         N_STATEMENT(759);
6062         N_STATEMENT(760);
6063         N_STATEMENT(761);
6064         N_STATEMENT(762);
6065         N_STATEMENT(763);
6066         N_STATEMENT(764);
6067         N_STATEMENT(765);
6068         N_STATEMENT(766);
6069         N_STATEMENT(767);
6070         N_STATEMENT(768);
6071         N_STATEMENT(769);
6072         N_STATEMENT(770);
6073         N_STATEMENT(771);
6074         N_STATEMENT(772);
6075         N_STATEMENT(773);
6076         N_STATEMENT(774);
6077         N_STATEMENT(775);
6078         N_STATEMENT(776);
6079         N_STATEMENT(777);
6080         N_STATEMENT(778);
6081         N_STATEMENT(779);
6082         N_STATEMENT(780);
6083         N_STATEMENT(781);
6084         N_STATEMENT(782);
6085         N_STATEMENT(783);
6086         N_STATEMENT(784);
6087         N_STATEMENT(785);
6088         N_STATEMENT(786);
6089         N_STATEMENT(787);
6090         N_STATEMENT(788);
6091         N_STATEMENT(789);
6092         N_STATEMENT(790);
6093         N_STATEMENT(791);
6094         N_STATEMENT(792);
6095         N_STATEMENT(793);
6096         N_STATEMENT(794);
6097         N_STATEMENT(795);
6098         N_STATEMENT(796);
6099         N_STATEMENT(797);
6100         N_STATEMENT(798);
6101         N_STATEMENT(799);
6102         N_STATEMENT(800);
6103         N_STATEMENT(801);
6104         N_STATEMENT(802);
6105         N_STATEMENT(803);
6106         N_STATEMENT(804);
6107         N_STATEMENT(805);
6108         N_STATEMENT(806);
6109         N_STATEMENT(807);
6110         N_STATEMENT(808);
6111         N_STATEMENT(809);
6112         N_STATEMENT(810);
6113         N_STATEMENT(811);
6114         N_STATEMENT(812);
6115         N_STATEMENT(813);
6116         N_STATEMENT(814);
6117         N_STATEMENT(815);
6118         N_STATEMENT(816);
6119         N_STATEMENT(817);
6120         N_STATEMENT(818);
6121         N_STATEMENT(819);
6122         N_STATEMENT(820);
6123         N_STATEMENT(821);
6124         N_STATEMENT(822);
6125         N_STATEMENT(823);
6126         N_STATEMENT(824);
6127         N_STATEMENT(825);
6128         N_STATEMENT(826);
6129         N_STATEMENT(827);
6130         N_STATEMENT(828);
6131         N_STATEMENT(829);
6132         N_STATEMENT(830);
6133         N_STATEMENT(831);
6134         N_STATEMENT(832);
6135         N_STATEMENT(833);
6136         N_STATEMENT(834);
6137         N_STATEMENT(835);
6138         N_STATEMENT(836);
6139         N_STATEMENT(837);
6140         N_STATEMENT(838);
6141         N_STATEMENT(839);
6142         N_STATEMENT(840);
6143         N_STATEMENT(841);
6144         N_STATEMENT(842);
6145         N_STATEMENT(843);
6146         N_STATEMENT(844);
6147         N_STATEMENT(845);
6148         N_STATEMENT(846);
6149         N_STATEMENT(847);
6150         N_STATEMENT(848);
6151         N_STATEMENT(849);
6152         N_STATEMENT(850);
6153         N_STATEMENT(851);
6154         N_STATEMENT(852);
6155         N_STATEMENT(853);
6156         N_STATEMENT(854);
6157         N_STATEMENT(855);
6158         N_STATEMENT(856);
6159         N_STATEMENT(857);
6160         N_STATEMENT(858);
6161         N_STATEMENT(859);
6162         N_STATEMENT(860);
6163         N_STATEMENT(861);
6164         N_STATEMENT(862);
6165         N_STATEMENT(863);
6166         N_STATEMENT(864);
6167         N_STATEMENT(865);
6168         N_STATEMENT(866);
6169         N_STATEMENT(867);
6170         N_STATEMENT(868);
6171         N_STATEMENT(869);
6172         N_STATEMENT(870);
6173         N_STATEMENT(871);
6174         N_STATEMENT(872);
6175         N_STATEMENT(873);
6176         N_STATEMENT(874);
6177         N_STATEMENT(875);
6178         N_STATEMENT(876);
6179         N_STATEMENT(877);
6180         N_STATEMENT(878);
6181         N_STATEMENT(879);
6182         N_STATEMENT(880);
6183         N_STATEMENT(881);
6184         N_STATEMENT(882);
6185         N_STATEMENT(883);
6186         N_STATEMENT(884);
6187         N_STATEMENT(885);
6188         N_STATEMENT(886);
6189         N_STATEMENT(887);
6190         N_STATEMENT(888);
6191         N_STATEMENT(889);
6192         N_STATEMENT(890);
6193         N_STATEMENT(891);
6194         N_STATEMENT(892);
6195         N_STATEMENT(893);
6196         N_STATEMENT(894);
6197         N_STATEMENT(895);
6198         N_STATEMENT(896);
6199         N_STATEMENT(897);
6200         N_STATEMENT(898);
6201         N_STATEMENT(899);
6202         N_STATEMENT(900);
6203         N_STATEMENT(901);
6204         N_STATEMENT(902);
6205         N_STATEMENT(903);
6206         N_STATEMENT(904);
6207         N_STATEMENT(905);
6208         N_STATEMENT(906);
6209         N_STATEMENT(907);
6210         N_STATEMENT(908);
6211         N_STATEMENT(909);
6212         N_STATEMENT(910);
6213         N_STATEMENT(911);
6214         N_STATEMENT(912);
6215         N_STATEMENT(913);
6216         N_STATEMENT(914);
6217         N_STATEMENT(915);
6218         N_STATEMENT(916);
6219         N_STATEMENT(917);
6220         N_STATEMENT(918);
6221         N_STATEMENT(919);
6222         N_STATEMENT(920);
6223         N_STATEMENT(921);
6224         N_STATEMENT(922);
6225         N_STATEMENT(923);
6226         N_STATEMENT(924);
6227         N_STATEMENT(925);
6228         N_STATEMENT(926);
6229         N_STATEMENT(927);
6230         N_STATEMENT(928);
6231         N_STATEMENT(929);
6232         N_STATEMENT(930);
6233         N_STATEMENT(931);
6234         N_STATEMENT(932);
6235         N_STATEMENT(933);
6236         N_STATEMENT(934);
6237         N_STATEMENT(935);
6238         N_STATEMENT(936);
6239         N_STATEMENT(937);
6240         N_STATEMENT(938);
6241         N_STATEMENT(939);
6242         N_STATEMENT(940);
6243         N_STATEMENT(941);
6244         N_STATEMENT(942);
6245         N_STATEMENT(943);
6246         N_STATEMENT(944);
6247         N_STATEMENT(945);
6248         N_STATEMENT(946);
6249         N_STATEMENT(947);
6250         N_STATEMENT(948);
6251         N_STATEMENT(949);
6252         N_STATEMENT(950);
6253     }
6254     return am_error;          /* NOTREACHED, shut up the compiler */
6255 }
6256 
6257 #endif /* HAVE_USE_DTRACE */
6258