1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 1996-2020. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 
21 #ifdef HAVE_CONFIG_H
22 #  include "config.h"
23 #endif
24 
25 #define ERTS_DO_INCL_GLB_INLINE_FUNC_DEF
26 
27 #include "sys.h"
28 #include "erl_vm.h"
29 #include "global.h"
30 #include "erl_process.h"
31 #include "big.h"
32 #include "bif.h"
33 #include "erl_binary.h"
34 #include "erl_bits.h"
35 #include "erl_map.h"
36 #include "packet_parser.h"
37 #include "erl_gc.h"
38 #define ERTS_WANT_DB_INTERNAL__
39 #include "erl_db.h"
40 #include "erl_threads.h"
41 #include "register.h"
42 #include "dist.h"
43 #include "erl_printf.h"
44 #include "erl_threads.h"
45 #include "erl_lock_count.h"
46 #include "erl_time.h"
47 #include "erl_thr_progress.h"
48 #include "erl_thr_queue.h"
49 #include "erl_sched_spec_pre_alloc.h"
50 #include "beam_bp.h"
51 #include "erl_ptab.h"
52 #include "erl_check_io.h"
53 #include "erl_bif_unique.h"
54 #include "erl_io_queue.h"
55 #define ERTS_WANT_TIMER_WHEEL_API
56 #include "erl_time.h"
57 #ifdef HIPE
58 #  include "hipe_mode_switch.h"
59 #endif
60 #define ERTS_WANT_NFUNC_SCHED_INTERNALS__
61 #include "erl_nfunc_sched.h"
62 #include "erl_proc_sig_queue.h"
63 
64 #undef M_TRIM_THRESHOLD
65 #undef M_TOP_PAD
66 #undef M_MMAP_THRESHOLD
67 #undef M_MMAP_MAX
68 
69 #if defined(__GLIBC__) && defined(HAVE_MALLOC_H)
70 #include <malloc.h>
71 #endif
72 
73 #if !defined(HAVE_MALLOPT)
74 #undef  HAVE_MALLOPT
75 #define HAVE_MALLOPT 0
76 #endif
77 
78 Eterm*
erts_heap_alloc(Process * p,Uint need,Uint xtra)79 erts_heap_alloc(Process* p, Uint need, Uint xtra)
80 {
81     ErlHeapFragment* bp;
82     Uint n;
83 #if defined(DEBUG) || defined(CHECK_FOR_HOLES)
84     Uint i;
85 #endif
86 
87 #ifdef FORCE_HEAP_FRAGS
88     if (p->space_verified && p->space_verified_from!=NULL
89 	&& HEAP_TOP(p) >= p->space_verified_from
90 	&& HEAP_TOP(p) + need <= p->space_verified_from + p->space_verified
91 	&& HEAP_LIMIT(p) - HEAP_TOP(p) >= need) {
92 
93 	Uint consumed = need + (HEAP_TOP(p) - p->space_verified_from);
94 	ASSERT(consumed <= p->space_verified);
95 	p->space_verified -= consumed;
96 	p->space_verified_from += consumed;
97 	HEAP_TOP(p) = p->space_verified_from;
98 	return HEAP_TOP(p) - need;
99     }
100     p->space_verified = 0;
101     p->space_verified_from = NULL;
102 #endif /* FORCE_HEAP_FRAGS */
103 
104     n = need + xtra;
105     bp = MBUF(p);
106     if (bp != NULL && need <= (bp->alloc_size - bp->used_size)) {
107 	Eterm* ret = bp->mem + bp->used_size;
108 	bp->used_size += need;
109 	p->mbuf_sz += need;
110 	return ret;
111     }
112 #ifdef DEBUG
113     n++;
114 #endif
115     bp = (ErlHeapFragment*)
116 	ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG, ERTS_HEAP_FRAG_SIZE(n));
117 
118 #if defined(DEBUG) || defined(CHECK_FOR_HOLES)
119     for (i = 0; i < n; i++) {
120 	bp->mem[i] = ERTS_HOLE_MARKER;
121     }
122 #endif
123 
124 #ifdef DEBUG
125     n--;
126 #endif
127 
128     bp->next = MBUF(p);
129     MBUF(p) = bp;
130     bp->alloc_size = n;
131     bp->used_size = need;
132     MBUF_SIZE(p) += need;
133     bp->off_heap.first = NULL;
134     bp->off_heap.overhead = 0;
135     return bp->mem;
136 }
137 
138 #ifdef CHECK_FOR_HOLES
139 Eterm*
erts_set_hole_marker(Eterm * ptr,Uint sz)140 erts_set_hole_marker(Eterm* ptr, Uint sz)
141 {
142     Eterm* p = ptr;
143     Uint i;
144 
145     for (i = 0; i < sz; i++) {
146 	*p++ = ERTS_HOLE_MARKER;
147     }
148     return ptr;
149 }
150 #endif
151 
152 /*
153  * Helper function for the ESTACK macros defined in global.h.
154  */
155 void
erl_grow_estack(ErtsEStack * s,Uint need)156 erl_grow_estack(ErtsEStack* s, Uint need)
157 {
158     Uint old_size = (s->end - s->start);
159     Uint new_size;
160     Uint sp_offs = s->sp - s->start;
161 
162     if (need < old_size)
163 	new_size = 2*old_size;
164     else
165 	new_size = ((need / old_size) + 2) * old_size;
166 
167     if (s->start != s->edefault) {
168 	s->start = erts_realloc(s->alloc_type, s->start,
169 				new_size*sizeof(Eterm));
170     } else {
171 	Eterm* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(Eterm));
172 	sys_memcpy(new_ptr, s->start, old_size*sizeof(Eterm));
173 	s->start = new_ptr;
174     }
175     s->end = s->start + new_size;
176     s->sp = s->start + sp_offs;
177 }
178 /*
179  * Helper function for the WSTACK macros defined in global.h.
180  */
181 void
erl_grow_wstack(ErtsWStack * s,Uint need)182 erl_grow_wstack(ErtsWStack* s, Uint need)
183 {
184     Uint old_size = (s->wend - s->wstart);
185     Uint new_size;
186     Uint sp_offs = s->wsp - s->wstart;
187 
188     if (need < old_size)
189 	new_size = 2 * old_size;
190     else
191 	new_size = ((need / old_size) + 2) * old_size;
192 
193     if (s->wstart != s->wdefault) {
194 	s->wstart = erts_realloc(s->alloc_type, s->wstart,
195 				 new_size*sizeof(UWord));
196     } else {
197 	UWord* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(UWord));
198 	sys_memcpy(new_ptr, s->wstart, old_size*sizeof(UWord));
199 	s->wstart = new_ptr;
200     }
201     s->wend = s->wstart + new_size;
202     s->wsp = s->wstart + sp_offs;
203 }
204 
205 /*
206  * Helper function for the PSTACK macros defined in global.h.
207  */
208 void
erl_grow_pstack(ErtsPStack * s,void * default_pstack,unsigned need_bytes)209 erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
210 {
211     Uint old_size = s->size;
212     Uint new_size;
213 
214     if (need_bytes < old_size)
215 	new_size = 2 * old_size;
216     else
217 	new_size = ((need_bytes / old_size) + 2) * old_size;
218 
219     if (s->pstart != default_pstack) {
220 	s->pstart = erts_realloc(s->alloc_type, s->pstart, new_size);
221     } else {
222 	byte* new_ptr = erts_alloc(s->alloc_type, new_size);
223 	sys_memcpy(new_ptr, s->pstart, old_size);
224 	s->pstart = new_ptr;
225     }
226     s->size = new_size;
227 }
228 
229 /*
230  * Helper function for the EQUEUE macros defined in global.h.
231  */
232 
233 void
erl_grow_equeue(ErtsEQueue * q,Eterm * default_equeue)234 erl_grow_equeue(ErtsEQueue* q, Eterm* default_equeue)
235 {
236     Uint old_size = (q->end - q->start);
237     Uint new_size = old_size * 2;
238     Uint first_part = (q->end - q->front);
239     Uint second_part = (q->back - q->start);
240     Eterm* new_ptr = erts_alloc(q->alloc_type, new_size*sizeof(Eterm));
241     ASSERT(q->back == q->front);   // of course the queue is full now!
242     if (first_part > 0)
243       sys_memcpy(new_ptr, q->front, first_part*sizeof(Eterm));
244     if (second_part > 0)
245       sys_memcpy(new_ptr+first_part, q->start, second_part*sizeof(Eterm));
246     if (q->start != default_equeue)
247       erts_free(q->alloc_type, q->start);
248     q->start = new_ptr;
249     q->end = q->start + new_size;
250     q->front = q->start;
251     q->back = q->start + old_size;
252 }
253 
254 /* CTYPE macros */
255 
256 #define LATIN1
257 
258 #define IS_DIGIT(c)  ((c) >= '0' && (c) <= '9')
259 #ifdef LATIN1
260 #define IS_LOWER(c)  (((c) >= 'a' && (c) <= 'z') \
261 		      || ((c) >= 128+95 && (c) <= 255 && (c) != 247))
262 #define IS_UPPER(c)  (((c) >= 'A' && (c) <= 'Z') \
263 		      || ((c) >= 128+64 && (c) <= 128+94 && (c) != 247-32))
264 #else
265 #define IS_LOWER(c)  ((c) >= 'a' && (c) <= 'z')
266 #define IS_UPPER(c)  ((c) >= 'A' && (c) <= 'Z')
267 #endif
268 
269 #define IS_ALNUM(c)  (IS_DIGIT(c) || IS_LOWER(c) || IS_UPPER(c))
270 
271 /* We don't include 160 (non-breaking space). */
272 #define IS_SPACE(c)  (c == ' ' || c == '\n' || c == '\t' || c == '\r')
273 
274 #ifdef LATIN1
275 #define IS_CNTRL(c)  ((c) < ' ' || (c) == 127 \
276 		      || ((c) >= 128 && (c) < 128+32))
277 #else
278 /* Treat all non-ASCII as control characters */
279 #define IS_CNTRL(c)  ((c) < ' ' || (c) >= 127)
280 #endif
281 
282 #define IS_PRINT(c)  (!IS_CNTRL(c))
283 
284 /*
285  * Calculate length of a list.
286  * Returns -1 if not a proper list (i.e. not terminated with NIL)
287  */
288 Sint
erts_list_length(Eterm list)289 erts_list_length(Eterm list)
290 {
291     Sint i = 0;
292 
293     while(is_list(list)) {
294 	i++;
295 	list = CDR(list_val(list));
296     }
297     if (is_not_nil(list)) {
298 	return -1;
299     }
300     return i;
301 }
302 
303 static const struct {
304     Sint64 mask;
305     int bits;
306 } fib_data[] = {{ERTS_I64_LITERAL(0x2), 1},
307 		{ERTS_I64_LITERAL(0xc), 2},
308 		{ERTS_I64_LITERAL(0xf0), 4},
309 		{ERTS_I64_LITERAL(0xff00), 8},
310 		{ERTS_I64_LITERAL(0xffff0000), 16},
311 		{ERTS_I64_LITERAL(0xffffffff00000000), 32}};
312 
313 static ERTS_INLINE int
fit_in_bits(Sint64 value,int start)314 fit_in_bits(Sint64 value, int start)
315 {
316     int bits = 0;
317     int i;
318 
319     for (i = start; i >= 0; i--) {
320 	if (value & fib_data[i].mask) {
321 	    value >>= fib_data[i].bits;
322 	    bits |= fib_data[i].bits;
323 	}
324     }
325 
326     bits++;
327 
328     return bits;
329 }
330 
erts_fit_in_bits_int64(Sint64 value)331 int erts_fit_in_bits_int64(Sint64 value)
332 {
333     return fit_in_bits(value, 5);
334 }
335 
erts_fit_in_bits_int32(Sint32 value)336 int erts_fit_in_bits_int32(Sint32 value)
337 {
338     return fit_in_bits((Sint64) (Uint32) value, 4);
339 }
340 
erts_fit_in_bits_uint(Uint value)341 int erts_fit_in_bits_uint(Uint value)
342 {
343 #if ERTS_SIZEOF_ETERM == 4
344     return fit_in_bits((Sint64) (Uint32) value, 4);
345 #elif ERTS_SIZEOF_ETERM == 8
346     return fit_in_bits(value, 5);
347 #else
348 # error "No way, Jose"
349 #endif
350 }
351 
352 int
erts_print(fmtfn_t to,void * arg,char * format,...)353 erts_print(fmtfn_t to, void *arg, char *format, ...)
354 {
355     int res;
356     va_list arg_list;
357     va_start(arg_list, format);
358 
359     {
360 	switch ((UWord)to) {
361 	case (UWord)ERTS_PRINT_STDOUT:
362 	    res = erts_vprintf(format, arg_list);
363 	    break;
364 	case (UWord)ERTS_PRINT_STDERR:
365 	    res = erts_vfprintf(stderr, format, arg_list);
366 	    break;
367 	case (UWord)ERTS_PRINT_FILE:
368 	    res = erts_vfprintf((FILE *) arg, format, arg_list);
369 	    break;
370 	case (UWord)ERTS_PRINT_SBUF:
371 	    res = erts_vsprintf((char *) arg, format, arg_list);
372 	    break;
373 	case (UWord)ERTS_PRINT_SNBUF:
374 	    res = erts_vsnprintf(((erts_print_sn_buf *) arg)->buf,
375 				 ((erts_print_sn_buf *) arg)->size,
376 				 format,
377 				 arg_list);
378 	    break;
379 	case (UWord)ERTS_PRINT_DSBUF:
380 	    res = erts_vdsprintf((erts_dsprintf_buf_t *) arg, format, arg_list);
381 	    break;
382         case (UWord)ERTS_PRINT_FD:
383 	    res = erts_vfdprintf((int)(SWord) arg, format, arg_list);
384 	    break;
385         default:
386             res = erts_vcbprintf(to, arg, format, arg_list);
387             break;
388 	}
389     }
390 
391     va_end(arg_list);
392     return res;
393 }
394 
395 int
erts_putc(fmtfn_t to,void * arg,char c)396 erts_putc(fmtfn_t to, void *arg, char c)
397 {
398     return erts_print(to, arg, "%c", c);
399 }
400 
401 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
402  * Some Erlang term building utility functions (to be used when performance  *
403  * isn't critical).                                                          *
404  *                                                                           *
405  * Add more functions like these here (and function prototypes in global.h)  *
406  * when needed.                                                              *
407  *                                                                           *
408 \*                                                                           */
409 
410 Eterm
erts_bld_atom(Uint ** hpp,Uint * szp,char * str)411 erts_bld_atom(Uint **hpp, Uint *szp, char *str)
412 {
413     if (hpp)
414 	return erts_atom_put((byte *) str, sys_strlen(str), ERTS_ATOM_ENC_LATIN1, 1);
415     else
416 	return THE_NON_VALUE;
417 }
418 
419 Eterm
erts_bld_uint(Uint ** hpp,Uint * szp,Uint ui)420 erts_bld_uint(Uint **hpp, Uint *szp, Uint ui)
421 {
422     Eterm res = THE_NON_VALUE;
423     if (IS_USMALL(0, ui)) {
424 	if (hpp)
425 	    res = make_small(ui);
426     }
427     else {
428 	if (szp)
429 	    *szp += BIG_UINT_HEAP_SIZE;
430 	if (hpp) {
431 	    res = uint_to_big(ui, *hpp);
432 	    *hpp += BIG_UINT_HEAP_SIZE;
433 	}
434     }
435     return res;
436 }
437 
438 /*
439  * Erts_bld_uword is more or less similar to erts_bld_uint, but a pointer
440  * can safely be passed.
441  */
442 
443 Eterm
erts_bld_uword(Uint ** hpp,Uint * szp,UWord uw)444 erts_bld_uword(Uint **hpp, Uint *szp, UWord uw)
445 {
446     Eterm res = THE_NON_VALUE;
447     if (IS_USMALL(0, uw)) {
448 	if (hpp)
449 	    res = make_small((Uint) uw);
450     }
451     else {
452 	if (szp)
453 	    *szp += BIG_UWORD_HEAP_SIZE(uw);
454 	if (hpp) {
455 	    res = uword_to_big(uw, *hpp);
456 	    *hpp += BIG_UWORD_HEAP_SIZE(uw);
457 	}
458     }
459     return res;
460 }
461 
462 
463 Eterm
erts_bld_uint64(Uint ** hpp,Uint * szp,Uint64 ui64)464 erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
465 {
466     Eterm res = THE_NON_VALUE;
467     if (IS_USMALL(0, ui64)) {
468 	if (hpp)
469 	    res = make_small((Uint) ui64);
470     }
471     else {
472 	if (szp)
473 	    *szp += ERTS_UINT64_HEAP_SIZE(ui64);
474 	if (hpp)
475 	    res = erts_uint64_to_big(ui64, hpp);
476     }
477     return res;
478 }
479 
480 Eterm
erts_bld_sint64(Uint ** hpp,Uint * szp,Sint64 si64)481 erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64)
482 {
483     Eterm res = THE_NON_VALUE;
484     if (IS_SSMALL(si64)) {
485 	if (hpp)
486 	    res = make_small((Sint) si64);
487     }
488     else {
489 	if (szp)
490 	    *szp += ERTS_SINT64_HEAP_SIZE(si64);
491 	if (hpp)
492 	    res = erts_sint64_to_big(si64, hpp);
493     }
494     return res;
495 }
496 
497 
498 Eterm
erts_bld_cons(Uint ** hpp,Uint * szp,Eterm car,Eterm cdr)499 erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr)
500 {
501     Eterm res = THE_NON_VALUE;
502     if (szp)
503 	*szp += 2;
504     if (hpp) {
505 	res = CONS(*hpp, car, cdr);
506 	*hpp += 2;
507     }
508     return res;
509 }
510 
511 Eterm
erts_bld_tuple(Uint ** hpp,Uint * szp,Uint arity,...)512 erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...)
513 {
514     Eterm res = THE_NON_VALUE;
515 
516     ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
517 
518     if (szp)
519 	*szp += arity + 1;
520     if (hpp) {
521 	res = make_tuple(*hpp);
522 	*((*hpp)++) = make_arityval(arity);
523 
524 	if (arity > 0) {
525 	    Uint i;
526 	    va_list argp;
527 
528 	    va_start(argp, arity);
529 	    for (i = 0; i < arity; i++) {
530                 *((*hpp)++) = va_arg(argp, Eterm);
531             }
532 	    va_end(argp);
533 	}
534     }
535     return res;
536 }
537 
538 
erts_bld_tuplev(Uint ** hpp,Uint * szp,Uint arity,Eterm terms[])539 Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[])
540 {
541     Eterm res = THE_NON_VALUE;
542     /*
543      * Note callers expect that 'terms' is *not* accessed if hpp == NULL.
544      */
545 
546     ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
547 
548     if (szp)
549 	*szp += arity + 1;
550     if (hpp) {
551 
552 	res = make_tuple(*hpp);
553 	*((*hpp)++) = make_arityval(arity);
554 
555 	if (arity > 0) {
556 	    Uint i;
557 	    for (i = 0; i < arity; i++)
558 		*((*hpp)++) = terms[i];
559 	}
560     }
561     return res;
562 }
563 
564 Eterm
erts_bld_string_n(Uint ** hpp,Uint * szp,const char * str,Sint len)565 erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len)
566 {
567     Eterm res = THE_NON_VALUE;
568     Sint i = len;
569     if (szp)
570 	*szp += len*2;
571     if (hpp) {
572 	res = NIL;
573 	while (--i >= 0) {
574 	    res = CONS(*hpp, make_small((byte) str[i]), res);
575 	    *hpp += 2;
576 	}
577     }
578     return res;
579 }
580 
581 Eterm
erts_bld_list(Uint ** hpp,Uint * szp,Sint length,Eterm terms[])582 erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[])
583 {
584     Eterm list = THE_NON_VALUE;
585     if (szp)
586 	*szp += 2*length;
587     if (hpp) {
588 	Sint i = length;
589 	list = NIL;
590 
591 	while (--i >= 0) {
592 	    list = CONS(*hpp, terms[i], list);
593 	    *hpp += 2;
594 	}
595     }
596     return list;
597 }
598 
599 Eterm
erts_bld_2tup_list(Uint ** hpp,Uint * szp,Sint length,Eterm terms1[],Uint terms2[])600 erts_bld_2tup_list(Uint **hpp, Uint *szp,
601 		   Sint length, Eterm terms1[], Uint terms2[])
602 {
603     Eterm res = THE_NON_VALUE;
604     if (szp)
605 	*szp += 5*length;
606     if (hpp) {
607 	Sint i = length;
608 	res = NIL;
609 
610 	while (--i >= 0) {
611 	    res = CONS(*hpp+3, TUPLE2(*hpp, terms1[i], terms2[i]), res);
612 	    *hpp += 5;
613 	}
614     }
615     return res;
616 }
617 
618 Eterm
erts_bld_atom_uword_2tup_list(Uint ** hpp,Uint * szp,Sint length,Eterm atoms[],UWord uints[])619 erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
620                               Sint length, Eterm atoms[], UWord uints[])
621 {
622     Sint i;
623     Eterm res = THE_NON_VALUE;
624     if (szp) {
625 	*szp += 5*length;
626 	i = length;
627 	while (--i >= 0) {
628 	    if (!IS_USMALL(0, uints[i]))
629 		*szp += BIG_UINT_HEAP_SIZE;
630 	}
631     }
632     if (hpp) {
633 	i = length;
634 	res = NIL;
635 
636 	while (--i >= 0) {
637 	    Eterm ui;
638 
639 	    if (IS_USMALL(0, uints[i]))
640 		ui = make_small(uints[i]);
641 	    else {
642 		ui = uint_to_big(uints[i], *hpp);
643 		*hpp += BIG_UINT_HEAP_SIZE;
644 	    }
645 
646 	    res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res);
647 	    *hpp += 5;
648 	}
649     }
650     return res;
651 }
652 
653 Eterm
erts_bld_atom_2uint_3tup_list(Uint ** hpp,Uint * szp,Sint length,Eterm atoms[],Uint uints1[],Uint uints2[])654 erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
655 			      Eterm atoms[], Uint uints1[], Uint uints2[])
656 {
657     Sint i;
658     Eterm res = THE_NON_VALUE;
659     if (szp) {
660 	*szp += 6*length;
661 	i = length;
662 	while (--i >= 0) {
663 	    if (!IS_USMALL(0, uints1[i]))
664 		*szp += BIG_UINT_HEAP_SIZE;
665 	    if (!IS_USMALL(0, uints2[i]))
666 		*szp += BIG_UINT_HEAP_SIZE;
667 	}
668     }
669     if (hpp) {
670 	i = length;
671 	res = NIL;
672 
673 	while (--i >= 0) {
674 	    Eterm ui1;
675 	    Eterm ui2;
676 
677 	    if (IS_USMALL(0, uints1[i]))
678 		ui1 = make_small(uints1[i]);
679 	    else {
680 		ui1 = uint_to_big(uints1[i], *hpp);
681 		*hpp += BIG_UINT_HEAP_SIZE;
682 	    }
683 
684 	    if (IS_USMALL(0, uints2[i]))
685 		ui2 = make_small(uints2[i]);
686 	    else {
687 		ui2 = uint_to_big(uints2[i], *hpp);
688 		*hpp += BIG_UINT_HEAP_SIZE;
689 	    }
690 
691 	    res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res);
692 	    *hpp += 6;
693 	}
694     }
695     return res;
696 }
697 
698 /*                                                                           *\
699  *                                                                           *
700 \* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
701 
702 /* make a hash index from an erlang term */
703 
704 /*
705 ** There are two hash functions.
706 **
707 ** make_hash: A hash function that will give the same values for the same
708 ** terms regardless of the internal representation. Small integers are
709 ** hashed using the same algorithm as bignums and bignums are hashed
710 ** independent of the CPU endianess.
711 ** Make_hash also hashes pids, ports and references like 32 bit numbers
712 ** (but with different constants).
713 ** make_hash() is called from the bif erlang:phash/2
714 **
715 ** The idea behind the hash algorithm is to produce values suitable for
716 ** linear dynamic hashing. We cannot choose the range at all while hashing
717 ** (it's not even supplied to the hashing functions). The good old algorithm
718 ** [H = H*C+X mod M, where H is the hash value, C is a "random" constant(or M),
719 ** M is the range, preferably a prime, and X is each byte value] is therefore
720 ** modified to:
721 ** H = H*C+X mod 2^32, where C is a large prime. This gives acceptable
722 ** "spreading" of the hashes, so that later modulo calculations also will give
723 ** acceptable "spreading" in the range.
724 ** We really need to hash on bytes, otherwise the
725 ** upper bytes of a word will be less significant than the lower ones. That's
726 ** not acceptable at all. For internal use one could maybe optimize by using
727 ** another hash function, that is less strict but faster. That is, however, not
728 ** implemented.
729 **
730 ** Short semi-formal description of make_hash:
731 **
732 ** In make_hash, the number N is treated like this:
733 **  Abs(N) is hashed bytewise with the least significant byte, B(0), first.
734 **  The number of bytes (J) to calculate hash on in N is
735 **  (the number of _32_ bit words needed to store the unsigned
736 **   value of abs(N)) * 4.
737 **  X = FUNNY_NUMBER2
738 **  If N < 0, Y = FUNNY_NUMBER4 else Y = FUNNY_NUMBER3.
739 **  The hash value is Y*h(J) mod 2^32 where h(J) is calculated like
740 **  h(0) = <initial hash>
741 **  h(i) = h(i-1)*X + B(i-1)
742 ** The above should hold regardless of internal representation.
743 ** Pids are hashed like small numbers but with differrent constants, as are
744 ** ports.
745 ** References are hashed like ports but only on the least significant byte.
746 ** Binaries are hashed on all bytes (not on the 15 first as in
747 ** make_broken_hash()).
748 ** Bytes in lists (possibly text strings) use a simpler multiplication inlined
749 ** in the handling of lists, that is an optimization.
750 ** Everything else is like in the old hash (make_broken_hash()).
751 **
752 ** make_hash2() is faster than make_hash, in particular for bignums
753 ** and binaries, and produces better hash values.
754 */
755 
756 /* some prime numbers just above 2 ^ 28 */
757 
758 #define FUNNY_NUMBER1  268440163
759 #define FUNNY_NUMBER2  268439161
760 #define FUNNY_NUMBER3  268435459
761 #define FUNNY_NUMBER4  268436141
762 #define FUNNY_NUMBER5  268438633
763 #define FUNNY_NUMBER6  268437017
764 #define FUNNY_NUMBER7  268438039
765 #define FUNNY_NUMBER8  268437511
766 #define FUNNY_NUMBER9  268439627
767 #define FUNNY_NUMBER10 268440479
768 #define FUNNY_NUMBER11 268440577
769 #define FUNNY_NUMBER12 268440581
770 #define FUNNY_NUMBER13 268440593
771 #define FUNNY_NUMBER14 268440611
772 
773 static Uint32
hash_binary_bytes(Eterm bin,Uint sz,Uint32 hash)774 hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
775 {
776     byte* ptr;
777     Uint bitoffs;
778     Uint bitsize;
779 
780     ERTS_GET_BINARY_BYTES(bin, ptr, bitoffs, bitsize);
781     if (bitoffs == 0) {
782 	while (sz--) {
783 	    hash = hash*FUNNY_NUMBER1 + *ptr++;
784 	}
785 	if (bitsize > 0) {
786 	    byte b = *ptr;
787 
788 	    b >>= 8 - bitsize;
789 	    hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
790 	}
791     } else {
792 	Uint previous = *ptr++;
793 	Uint b;
794 	Uint lshift = bitoffs;
795 	Uint rshift = 8 - lshift;
796 
797 	while (sz--) {
798 	    b = (previous << lshift) & 0xFF;
799 	    previous = *ptr++;
800 	    b |= previous >> rshift;
801 	    hash = hash*FUNNY_NUMBER1 + b;
802 	}
803 	if (bitsize > 0) {
804 	    b = (previous << lshift) & 0xFF;
805 	    previous = *ptr++;
806 	    b |= previous >> rshift;
807 
808 	    b >>= 8 - bitsize;
809 	    hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
810 	}
811     }
812     return hash;
813 }
814 
make_hash(Eterm term_arg)815 Uint32 make_hash(Eterm term_arg)
816 {
817     DECLARE_WSTACK(stack);
818     Eterm term = term_arg;
819     Eterm hash = 0;
820     unsigned op;
821 
822 #define MAKE_HASH_TUPLE_OP      (FIRST_VACANT_TAG_DEF)
823 #define MAKE_HASH_TERM_ARRAY_OP (FIRST_VACANT_TAG_DEF+1)
824 #define MAKE_HASH_CDR_PRE_OP    (FIRST_VACANT_TAG_DEF+2)
825 #define MAKE_HASH_CDR_POST_OP   (FIRST_VACANT_TAG_DEF+3)
826 
827     /*
828     ** Convenience macro for calculating a bytewise hash on an unsigned 32 bit
829     ** integer.
830     ** If the endianess is known, we could be smarter here,
831     ** but that gives no significant speedup (on a sparc at least)
832     */
833 #define UINT32_HASH_STEP(Expr, Prime1)					\
834 	do {								\
835 	    Uint32 x = (Uint32) (Expr);	                                \
836 	    hash =							\
837 		(((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) +	        \
838 		((x >> 8) & 0xFF)) * (Prime1) +			        \
839 		((x >> 16) & 0xFF)) * (Prime1) +			\
840 		 (x >> 24));						\
841 	} while(0)
842 
843 #define UINT32_HASH_RET(Expr, Prime1, Prime2)           \
844 	UINT32_HASH_STEP(Expr, Prime1);			\
845         hash = hash * (Prime2);				\
846         break
847 
848 
849     /*
850      * Significant additions needed for real 64 bit port with larger fixnums.
851      */
852 
853     /*
854      * Note, for the simple 64bit port, not utilizing the
855      * larger word size this function will work without modification.
856      */
857 tail_recur:
858     op = tag_val_def(term);
859 
860     for (;;) {
861     switch (op) {
862     case NIL_DEF:
863 	hash = hash*FUNNY_NUMBER3 + 1;
864 	break;
865     case ATOM_DEF:
866 	hash = hash*FUNNY_NUMBER1 +
867 	    (atom_tab(atom_val(term))->slot.bucket.hvalue);
868 	break;
869     case SMALL_DEF:
870 	{
871 	    Sint y1 = signed_val(term);
872 	    Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
873 
874 	    UINT32_HASH_STEP(y2, FUNNY_NUMBER2);
875 #if defined(ARCH_64)
876 	    if (y2 >> 32)
877 		UINT32_HASH_STEP(y2 >> 32, FUNNY_NUMBER2);
878 #endif
879 	    hash *= (y1 < 0 ? FUNNY_NUMBER4 : FUNNY_NUMBER3);
880 	    break;
881 	}
882     case BINARY_DEF:
883 	{
884 	    Uint sz = binary_size(term);
885 
886 	    hash = hash_binary_bytes(term, sz, hash);
887 	    hash = hash*FUNNY_NUMBER4 + sz;
888 	    break;
889 	}
890     case EXPORT_DEF:
891 	{
892 	    Export* ep = *((Export **) (export_val(term) + 1));
893 
894 	    hash = hash * FUNNY_NUMBER11 + ep->info.mfa.arity;
895 	    hash = hash*FUNNY_NUMBER1 +
896 		(atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue);
897 	    hash = hash*FUNNY_NUMBER1 +
898 		(atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue);
899 	    break;
900 	}
901 
902     case FUN_DEF:
903 	{
904 	    ErlFunThing* funp = (ErlFunThing *) fun_val(term);
905 	    Uint num_free = funp->num_free;
906 
907 	    hash = hash * FUNNY_NUMBER10 + num_free;
908 	    hash = hash*FUNNY_NUMBER1 +
909 		(atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
910 	    hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
911 	    hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
912 	    if (num_free > 0) {
913 		if (num_free > 1) {
914 		    WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
915 		}
916 		term = funp->env[0];
917 		goto tail_recur;
918 	    }
919 	    break;
920 	}
921     case PID_DEF:
922 	UINT32_HASH_RET(internal_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
923     case EXTERNAL_PID_DEF:
924 	UINT32_HASH_RET(external_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
925     case PORT_DEF:
926 	UINT32_HASH_RET(internal_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
927     case EXTERNAL_PORT_DEF:
928 	UINT32_HASH_RET(external_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
929     case REF_DEF:
930 	UINT32_HASH_RET(internal_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
931     case EXTERNAL_REF_DEF:
932 	UINT32_HASH_RET(external_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
933     case FLOAT_DEF:
934 	{
935             FloatDef ff;
936             GET_DOUBLE(term, ff);
937             if (ff.fd == 0.0f) {
938                 /* ensure positive 0.0 */
939                 ff.fd = erts_get_positive_zero_float();
940             }
941             hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
942             break;
943 	}
944     case MAKE_HASH_CDR_PRE_OP:
945 	term = (Eterm) WSTACK_POP(stack);
946 	if (is_not_list(term)) {
947 	    WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
948 	    goto tail_recur;
949 	}
950 	/* fall through */
951     case LIST_DEF:
952 	{
953 	    Eterm* list = list_val(term);
954 	    while(is_byte(*list)) {
955 		/* Optimization for strings.
956 		** Note that this hash is different from a 'small' hash,
957 		** as multiplications on a Sparc is so slow.
958 		*/
959 		hash = hash*FUNNY_NUMBER2 + unsigned_val(*list);
960 
961 		if (is_not_list(CDR(list))) {
962 		    WSTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
963 		    term = CDR(list);
964 		    goto tail_recur;
965 		}
966 		list = list_val(CDR(list));
967 	    }
968 	    WSTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
969 	    term = CAR(list);
970 	    goto tail_recur;
971 	}
972     case MAKE_HASH_CDR_POST_OP:
973 	hash *= FUNNY_NUMBER8;
974 	break;
975 
976     case BIG_DEF:
977 	/* Note that this is the exact same thing as the hashing of smalls.*/
978 	{
979 	    Eterm* ptr  = big_val(term);
980 	    Uint n = BIG_SIZE(ptr);
981 	    Uint k = n-1;
982 	    ErtsDigit d;
983 	    int is_neg = BIG_SIGN(ptr);
984 	    Uint i;
985 	    int j;
986 
987 	    for (i = 0; i < k; i++)  {
988 		d = BIG_DIGIT(ptr, i);
989 		for(j = 0; j < sizeof(ErtsDigit); ++j) {
990 		    hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
991 		    d >>= 8;
992 		}
993 	    }
994 	    d = BIG_DIGIT(ptr, k);
995 	    k = sizeof(ErtsDigit);
996 #if defined(ARCH_64)
997 	    if (!(d >> 32))
998 		k /= 2;
999 #endif
1000 	    for(j = 0; j < (int)k; ++j) {
1001 		hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
1002 		d >>= 8;
1003 	    }
1004 	    hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3;
1005 	    break;
1006 	}
1007     case MAP_DEF:
1008         hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
1009         break;
1010     case TUPLE_DEF:
1011 	{
1012 	    Eterm* ptr = tuple_val(term);
1013 	    Uint arity = arityval(*ptr);
1014 
1015 	    WSTACK_PUSH3(stack, (UWord) arity, (UWord)(ptr+1), (UWord) arity);
1016 	    op = MAKE_HASH_TUPLE_OP;
1017 	}/*fall through*/
1018     case MAKE_HASH_TUPLE_OP:
1019     case MAKE_HASH_TERM_ARRAY_OP:
1020 	{
1021 	    Uint i = (Uint) WSTACK_POP(stack);
1022 	    Eterm* ptr = (Eterm*) WSTACK_POP(stack);
1023 	    if (i != 0) {
1024 		term = *ptr;
1025 		WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
1026 		goto tail_recur;
1027 	    }
1028 	    if (op == MAKE_HASH_TUPLE_OP) {
1029 		Uint32 arity = (Uint32) WSTACK_POP(stack);
1030 		hash = hash*FUNNY_NUMBER9 + arity;
1031 	    }
1032 	    break;
1033 	}
1034 
1035     default:
1036 	erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
1037 	return 0;
1038       }
1039       if (WSTACK_ISEMPTY(stack)) break;
1040       op = WSTACK_POP(stack);
1041     }
1042     DESTROY_WSTACK(stack);
1043     return hash;
1044 
1045 #undef MAKE_HASH_TUPLE_OP
1046 #undef MAKE_HASH_TERM_ARRAY_OP
1047 #undef MAKE_HASH_CDR_PRE_OP
1048 #undef MAKE_HASH_CDR_POST_OP
1049 #undef UINT32_HASH_STEP
1050 #undef UINT32_HASH_RET
1051 }
1052 
1053 
1054 
1055 /* Hash function suggested by Bob Jenkins. */
1056 
1057 #define MIX(a,b,c)                 \
1058 do {                               \
1059   a -= b; a -= c; a ^= (c>>13);    \
1060   b -= c; b -= a; b ^= (a<<8);     \
1061   c -= a; c -= b; c ^= (b>>13);    \
1062   a -= b; a -= c; a ^= (c>>12);    \
1063   b -= c; b -= a; b ^= (a<<16);    \
1064   c -= a; c -= b; c ^= (b>>5);     \
1065   a -= b; a -= c; a ^= (c>>3);     \
1066   b -= c; b -= a; b ^= (a<<10);    \
1067   c -= a; c -= b; c ^= (b>>15);    \
1068 } while(0)
1069 
1070 #define HCONST 0x9e3779b9UL /* the golden ratio; an arbitrary value */
1071 
1072 static Uint32
block_hash(byte * k,Uint length,Uint32 initval)1073 block_hash(byte *k, Uint length, Uint32 initval)
1074 {
1075    Uint32 a,b,c;
1076    Uint len;
1077 
1078    /* Set up the internal state */
1079    len = length;
1080    a = b = HCONST;
1081    c = initval;           /* the previous hash value */
1082 
1083    while (len >= 12)
1084    {
1085       a += (k[0] +((Uint32)k[1]<<8) +((Uint32)k[2]<<16) +((Uint32)k[3]<<24));
1086       b += (k[4] +((Uint32)k[5]<<8) +((Uint32)k[6]<<16) +((Uint32)k[7]<<24));
1087       c += (k[8] +((Uint32)k[9]<<8) +((Uint32)k[10]<<16)+((Uint32)k[11]<<24));
1088       MIX(a,b,c);
1089       k += 12; len -= 12;
1090    }
1091 
1092    c += length;
1093    switch(len)              /* all the case statements fall through */
1094    {
1095    case 11: c+=((Uint32)k[10]<<24);
1096    case 10: c+=((Uint32)k[9]<<16);
1097    case 9 : c+=((Uint32)k[8]<<8);
1098       /* the first byte of c is reserved for the length */
1099    case 8 : b+=((Uint32)k[7]<<24);
1100    case 7 : b+=((Uint32)k[6]<<16);
1101    case 6 : b+=((Uint32)k[5]<<8);
1102    case 5 : b+=k[4];
1103    case 4 : a+=((Uint32)k[3]<<24);
1104    case 3 : a+=((Uint32)k[2]<<16);
1105    case 2 : a+=((Uint32)k[1]<<8);
1106    case 1 : a+=k[0];
1107      /* case 0: nothing left to add */
1108    }
1109    MIX(a,b,c);
1110    return c;
1111 }
1112 
1113 Uint32
make_hash2(Eterm term)1114 make_hash2(Eterm term)
1115 {
1116     Uint32 hash;
1117     Uint32 hash_xor_pairs;
1118     DeclareTmpHeapNoproc(tmp_big,2);
1119 
1120     ERTS_UNDEF(hash_xor_pairs, 0);
1121 
1122 /* (HCONST * {2, ..., 22}) mod 2^32 */
1123 #define HCONST_2 0x3c6ef372UL
1124 #define HCONST_3 0xdaa66d2bUL
1125 #define HCONST_4 0x78dde6e4UL
1126 #define HCONST_5 0x1715609dUL
1127 #define HCONST_6 0xb54cda56UL
1128 #define HCONST_7 0x5384540fUL
1129 #define HCONST_8 0xf1bbcdc8UL
1130 #define HCONST_9 0x8ff34781UL
1131 #define HCONST_10 0x2e2ac13aUL
1132 #define HCONST_11 0xcc623af3UL
1133 #define HCONST_12 0x6a99b4acUL
1134 #define HCONST_13 0x08d12e65UL
1135 #define HCONST_14 0xa708a81eUL
1136 #define HCONST_15 0x454021d7UL
1137 #define HCONST_16 0xe3779b90UL
1138 #define HCONST_17 0x81af1549UL
1139 #define HCONST_18 0x1fe68f02UL
1140 #define HCONST_19 0xbe1e08bbUL
1141 #define HCONST_20 0x5c558274UL
1142 #define HCONST_21 0xfa8cfc2dUL
1143 #define HCONST_22 0x98c475e6UL
1144 
1145 #define HASH_MAP_TAIL (_make_header(1,_TAG_HEADER_REF))
1146 #define HASH_MAP_PAIR (_make_header(2,_TAG_HEADER_REF))
1147 #define HASH_CDR      (_make_header(3,_TAG_HEADER_REF))
1148 
1149 #define UINT32_HASH_2(Expr1, Expr2, AConst)       \
1150          do {                                     \
1151 	    Uint32 a,b;                           \
1152 	    a = AConst + (Uint32) (Expr1);        \
1153 	    b = AConst + (Uint32) (Expr2);        \
1154 	    MIX(a,b,hash);                        \
1155 	 } while(0)
1156 
1157 #define UINT32_HASH(Expr, AConst) UINT32_HASH_2(Expr, 0, AConst)
1158 
1159 #define SINT32_HASH(Expr, AConst)                 \
1160 	do {					  \
1161             Sint32 y = (Sint32) (Expr);           \
1162 	    if (y < 0) {			  \
1163 		UINT32_HASH(-y, AConst);          \
1164                 /* Negative numbers are unnecessarily mixed twice. */ \
1165 	    }                                     \
1166 	    UINT32_HASH(y, AConst);               \
1167 	} while(0)
1168 
1169 #define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
1170 
1171 #ifdef ARCH_64
1172 #  define POINTER_HASH(Ptr, AConst) UINT32_HASH_2((Uint32)(UWord)(Ptr), (((UWord)(Ptr)) >> 32), AConst)
1173 #else
1174 #  define POINTER_HASH(Ptr, AConst) UINT32_HASH(Ptr, AConst)
1175 #endif
1176 
1177     /* Optimization. Simple cases before declaration of estack. */
1178     if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
1179 	switch (term & _TAG_IMMED1_MASK) {
1180 	case _TAG_IMMED1_IMMED2:
1181 	    switch (term & _TAG_IMMED2_MASK) {
1182 	    case _TAG_IMMED2_ATOM:
1183 		/* Fast, but the poor hash value should be mixed. */
1184 		return atom_tab(atom_val(term))->slot.bucket.hvalue;
1185 	    }
1186 	    break;
1187 	case _TAG_IMMED1_SMALL:
1188 	  {
1189 	      Sint x = signed_val(term);
1190 
1191 	      if (SMALL_BITS > 28 && !IS_SSMALL28(x)) {
1192 		  term = small_to_big(x, tmp_big);
1193 		  break;
1194 	      }
1195 	      hash = 0;
1196 	      SINT32_HASH(x, HCONST);
1197 	      return hash;
1198 	  }
1199 	}
1200     };
1201     {
1202     Eterm tmp;
1203     DECLARE_ESTACK(s);
1204 
1205     UseTmpHeapNoproc(2);
1206     hash = 0;
1207     for (;;) {
1208 	switch (primary_tag(term)) {
1209 	case TAG_PRIMARY_LIST:
1210 	{
1211 	    int c = 0;
1212 	    Uint32 sh = 0;
1213 	    Eterm* ptr = list_val(term);
1214 	    while (is_byte(*ptr)) {
1215 		/* Optimization for strings. */
1216 		sh = (sh << 8) + unsigned_val(*ptr);
1217 		if (c == 3) {
1218 		    UINT32_HASH(sh, HCONST_4);
1219 		    c = sh = 0;
1220 		} else {
1221 		    c++;
1222 		}
1223 		term = CDR(ptr);
1224 		if (is_not_list(term))
1225 		    break;
1226 		ptr = list_val(term);
1227 	    }
1228 	    if (c > 0)
1229 		UINT32_HASH(sh, HCONST_4);
1230 	    if (is_list(term)) {
1231 		tmp = CDR(ptr);
1232                 ESTACK_PUSH(s, tmp);
1233 		term = CAR(ptr);
1234 	    }
1235 	}
1236 	break;
1237 	case TAG_PRIMARY_BOXED:
1238 	{
1239 	    Eterm hdr = *boxed_val(term);
1240 	    ASSERT(is_header(hdr));
1241 	    switch (hdr & _TAG_HEADER_MASK) {
1242 	    case ARITYVAL_SUBTAG:
1243 	    {
1244 		int i;
1245 		int arity = header_arity(hdr);
1246 		Eterm* elem = tuple_val(term);
1247 		UINT32_HASH(arity, HCONST_9);
1248 		if (arity == 0) /* Empty tuple */
1249 		    goto hash2_common;
1250 		for (i = arity; ; i--) {
1251 		    term = elem[i];
1252                     if (i == 1)
1253                         break;
1254                     ESTACK_PUSH(s, term);
1255 		}
1256 	    }
1257 	    break;
1258             case MAP_SUBTAG:
1259             {
1260                 Eterm* ptr = boxed_val(term) + 1;
1261                 Uint size;
1262                 int i;
1263                 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
1264                 case HAMT_SUBTAG_HEAD_FLATMAP:
1265                 {
1266                     flatmap_t *mp = (flatmap_t *)flatmap_val(term);
1267                     Eterm *ks = flatmap_get_keys(mp);
1268                     Eterm *vs = flatmap_get_values(mp);
1269                     size      = flatmap_get_size(mp);
1270                     UINT32_HASH(size, HCONST_16);
1271                     if (size == 0)
1272                         goto hash2_common;
1273 
1274                     /* We want a portable hash function that is *independent* of
1275                      * the order in which keys and values are encountered.
1276                      * We therefore calculate context independent hashes for all    				      .
1277                      * key-value pairs and then xor them together.
1278                      */
1279                     ESTACK_PUSH(s, hash_xor_pairs);
1280                     ESTACK_PUSH(s, hash);
1281                     ESTACK_PUSH(s, HASH_MAP_TAIL);
1282                     hash = 0;
1283                     hash_xor_pairs = 0;
1284                     for (i = size - 1; i >= 0; i--) {
1285                         ESTACK_PUSH(s, HASH_MAP_PAIR);
1286                         ESTACK_PUSH(s, vs[i]);
1287                         ESTACK_PUSH(s, ks[i]);
1288                     }
1289                     goto hash2_common;
1290                 }
1291 
1292                 case HAMT_SUBTAG_HEAD_ARRAY:
1293                 case HAMT_SUBTAG_HEAD_BITMAP:
1294                     size = *ptr++;
1295                     UINT32_HASH(size, HCONST_16);
1296                     if (size == 0)
1297                         goto hash2_common;
1298                     ESTACK_PUSH(s, hash_xor_pairs);
1299                     ESTACK_PUSH(s, hash);
1300                     ESTACK_PUSH(s, HASH_MAP_TAIL);
1301                     hash = 0;
1302                     hash_xor_pairs = 0;
1303                 }
1304                 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
1305                 case HAMT_SUBTAG_HEAD_ARRAY:
1306                     i = 16;
1307                     break;
1308                 case HAMT_SUBTAG_HEAD_BITMAP:
1309                 case HAMT_SUBTAG_NODE_BITMAP:
1310                     i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
1311                     break;
1312                 default:
1313                     erts_exit(ERTS_ERROR_EXIT, "bad header");
1314                 }
1315                 while (i) {
1316                     if (is_list(*ptr)) {
1317                         Eterm* cons = list_val(*ptr);
1318                         ESTACK_PUSH(s, HASH_MAP_PAIR);
1319                         ESTACK_PUSH(s, CDR(cons));
1320                         ESTACK_PUSH(s, CAR(cons));
1321                     }
1322                     else {
1323                         ASSERT(is_boxed(*ptr));
1324                         ESTACK_PUSH(s, *ptr);
1325                     }
1326                     i--; ptr++;
1327                 }
1328                 goto hash2_common;
1329             }
1330             break;
1331 	    case EXPORT_SUBTAG:
1332 	    {
1333 		Export* ep = *((Export **) (export_val(term) + 1));
1334 		UINT32_HASH_2
1335 		    (ep->info.mfa.arity,
1336 		     atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue,
1337 		     HCONST);
1338 		UINT32_HASH
1339 		    (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue,
1340 		     HCONST_14);
1341 		goto hash2_common;
1342 	    }
1343 
1344 	    case FUN_SUBTAG:
1345 	    {
1346 		ErlFunThing* funp = (ErlFunThing *) fun_val(term);
1347 		Uint num_free = funp->num_free;
1348 		UINT32_HASH_2
1349 		    (num_free,
1350 		     atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue,
1351 		     HCONST);
1352 		UINT32_HASH_2
1353 		    (funp->fe->old_index, funp->fe->old_uniq, HCONST);
1354 		if (num_free == 0) {
1355 		    goto hash2_common;
1356 		} else {
1357 		    Eterm* bptr = funp->env + num_free - 1;
1358 		    while (num_free-- > 1) {
1359 			term = *bptr--;
1360 			ESTACK_PUSH(s, term);
1361 		    }
1362 		    term = *bptr;
1363 		}
1364 	    }
1365 	    break;
1366 	    case REFC_BINARY_SUBTAG:
1367 	    case HEAP_BINARY_SUBTAG:
1368 	    case SUB_BINARY_SUBTAG:
1369 	    {
1370 		byte* bptr;
1371 		unsigned sz = binary_size(term);
1372 		Uint32 con = HCONST_13 + hash;
1373 		Uint bitoffs;
1374 		Uint bitsize;
1375 
1376 		ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
1377 		if (sz == 0 && bitsize == 0) {
1378 		    hash = con;
1379 		} else {
1380 		    if (bitoffs == 0) {
1381 			hash = block_hash(bptr, sz, con);
1382 			if (bitsize > 0) {
1383 			    UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
1384 					  HCONST_15);
1385 			}
1386 		    } else {
1387 			byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
1388 							sz + (bitsize != 0));
1389 			erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
1390 			hash = block_hash(buf, sz, con);
1391 			if (bitsize > 0) {
1392 			    UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
1393 					  HCONST_15);
1394 			}
1395 			erts_free(ERTS_ALC_T_TMP, (void *) buf);
1396 		    }
1397 		}
1398 		goto hash2_common;
1399 	    }
1400 	    break;
1401 	    case POS_BIG_SUBTAG:
1402 	    case NEG_BIG_SUBTAG:
1403 	    {
1404 		Eterm* ptr = big_val(term);
1405 		Uint i = 0;
1406 		Uint n = BIG_SIZE(ptr);
1407 		Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
1408 #if D_EXP == 16
1409 		do {
1410 		    Uint32 x, y;
1411 		    x = i < n ? BIG_DIGIT(ptr, i++) : 0;
1412 		    x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
1413 		    y = i < n ? BIG_DIGIT(ptr, i++) : 0;
1414 		    y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
1415 		    UINT32_HASH_2(x, y, con);
1416 		} while (i < n);
1417 #elif D_EXP == 32
1418 		do {
1419 		    Uint32 x, y;
1420 		    x = i < n ? BIG_DIGIT(ptr, i++) : 0;
1421 		    y = i < n ? BIG_DIGIT(ptr, i++) : 0;
1422 		    UINT32_HASH_2(x, y, con);
1423 		} while (i < n);
1424 #elif D_EXP == 64
1425 		do {
1426 		    Uint t;
1427 		    Uint32 x, y;
1428                     ASSERT(i < n);
1429 		    t = BIG_DIGIT(ptr, i++);
1430 		    x = t & 0xffffffff;
1431 		    y = t >> 32;
1432 		    UINT32_HASH_2(x, y, con);
1433 		} while (i < n);
1434 #else
1435 #error "unsupported D_EXP size"
1436 #endif
1437 		goto hash2_common;
1438 	    }
1439 	    break;
1440 	    case REF_SUBTAG:
1441 		/* All parts of the ref should be hashed. */
1442 		UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
1443 		goto hash2_common;
1444 		break;
1445 	    case EXTERNAL_REF_SUBTAG:
1446 		/* All parts of the ref should be hashed. */
1447 		UINT32_HASH(external_ref_numbers(term)[0], HCONST_7);
1448 		goto hash2_common;
1449 		break;
1450 	    case EXTERNAL_PID_SUBTAG:
1451 		/* Only 15 bits are hashed. */
1452 		UINT32_HASH(external_pid_number(term), HCONST_5);
1453 		goto hash2_common;
1454 	    case EXTERNAL_PORT_SUBTAG:
1455 		/* Only 15 bits are hashed. */
1456 		UINT32_HASH(external_port_number(term), HCONST_6);
1457 		goto hash2_common;
1458 	    case FLOAT_SUBTAG:
1459 	    {
1460 		FloatDef ff;
1461 		GET_DOUBLE(term, ff);
1462                 if (ff.fd == 0.0f) {
1463                     /* ensure positive 0.0 */
1464                     ff.fd = erts_get_positive_zero_float();
1465                 }
1466 #if defined(WORDS_BIGENDIAN) || defined(DOUBLE_MIDDLE_ENDIAN)
1467 		UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
1468 #else
1469 		UINT32_HASH_2(ff.fw[1], ff.fw[0], HCONST_12);
1470 #endif
1471 		goto hash2_common;
1472 	    }
1473 	    break;
1474 
1475 	    default:
1476 		erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
1477 	    }
1478 	}
1479 	break;
1480 	case TAG_PRIMARY_IMMED1:
1481 	    switch (term & _TAG_IMMED1_MASK) {
1482 	    case _TAG_IMMED1_PID:
1483 		/* Only 15 bits are hashed. */
1484 		UINT32_HASH(internal_pid_number(term), HCONST_5);
1485 		goto hash2_common;
1486 	    case _TAG_IMMED1_PORT:
1487 		/* Only 15 bits are hashed. */
1488 		UINT32_HASH(internal_port_number(term), HCONST_6);
1489 		goto hash2_common;
1490 	    case _TAG_IMMED1_IMMED2:
1491 		switch (term & _TAG_IMMED2_MASK) {
1492 		case _TAG_IMMED2_ATOM:
1493 		    if (hash == 0)
1494 			/* Fast, but the poor hash value should be mixed. */
1495 			hash = atom_tab(atom_val(term))->slot.bucket.hvalue;
1496 		    else
1497 			UINT32_HASH(atom_tab(atom_val(term))->slot.bucket.hvalue,
1498 				    HCONST_3);
1499 		    goto hash2_common;
1500 		case _TAG_IMMED2_NIL:
1501 		    if (hash == 0)
1502 			hash = 3468870702UL;
1503 		    else
1504 			UINT32_HASH(NIL_DEF, HCONST_2);
1505 		    goto hash2_common;
1506 		default:
1507 		    erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
1508 		}
1509 	    case _TAG_IMMED1_SMALL:
1510 	      {
1511 		  Sint x = signed_val(term);
1512 
1513 		  if (SMALL_BITS > 28 && !IS_SSMALL28(x)) {
1514 		      term = small_to_big(x, tmp_big);
1515 		      break;
1516 		  }
1517 		  SINT32_HASH(x, HCONST);
1518 		  goto hash2_common;
1519 	      }
1520 	    }
1521 	    break;
1522 	default:
1523 	    erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
1524 	hash2_common:
1525 
1526 	    /* Uint32 hash always has the hash value of the previous term,
1527 	     * compounded or otherwise.
1528 	     */
1529 
1530 	    if (ESTACK_ISEMPTY(s)) {
1531 		DESTROY_ESTACK(s);
1532 		UnUseTmpHeapNoproc(2);
1533 		return hash;
1534 	    }
1535 
1536 	    term = ESTACK_POP(s);
1537 
1538 	    switch (term) {
1539 		case HASH_MAP_TAIL: {
1540 		    hash = (Uint32) ESTACK_POP(s);
1541                     UINT32_HASH(hash_xor_pairs, HCONST_19);
1542 		    hash_xor_pairs = (Uint32) ESTACK_POP(s);
1543 		    goto hash2_common;
1544 		}
1545 		case HASH_MAP_PAIR:
1546 		    hash_xor_pairs ^= hash;
1547                     hash = 0;
1548 		    goto hash2_common;
1549 		default:
1550 		    break;
1551 	    }
1552 	}
1553     }
1554     }
1555 }
1556 
1557 /* Term hash function for internal use.
1558  *
1559  * Limitation #1: Is not "portable" in any way between different VM instances.
1560  *
1561  * Limitation #2: The hash value is only valid as long as the term exists
1562  * somewhere in the VM. Why? Because external pids, ports and refs are hashed
1563  * by mixing the node *pointer* value. If a node disappears and later reappears
1564  * with a new ErlNode struct, externals from that node will hash different than
1565  * before.
1566  *
1567  * One IMPORTANT property must hold (for hamt).
1568  * EVERY BIT of the term that is significant for equality (see EQ)
1569  * MUST BE USED AS INPUT FOR THE HASH. Two different terms must always have a
1570  * chance of hashing different when salted: hash([Salt|A]) vs hash([Salt|B]).
1571  *
1572  * This is why we cannot use cached hash values for atoms for example.
1573  *
1574  */
1575 
1576 #define CONST_HASH(AConst)                              \
1577 do {  /* Lightweight mixing of constant (type info) */  \
1578     hash ^= AConst;                                     \
1579     hash = (hash << 17) ^ (hash >> (32-17));            \
1580 } while (0)
1581 
1582 Uint32
make_internal_hash(Eterm term,Uint32 salt)1583 make_internal_hash(Eterm term, Uint32 salt)
1584 {
1585     Uint32 hash;
1586 
1587     /* Optimization. Simple cases before declaration of estack. */
1588     if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
1589         hash = salt;
1590     #if ERTS_SIZEOF_ETERM == 8
1591         UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
1592     #elif ERTS_SIZEOF_ETERM == 4
1593         UINT32_HASH(term, HCONST);
1594     #else
1595     #  error "No you don't"
1596     #endif
1597         return hash;
1598     }
1599     {
1600     Eterm tmp;
1601     DECLARE_ESTACK(s);
1602 
1603     hash = salt;
1604     for (;;) {
1605 	switch (primary_tag(term)) {
1606 	case TAG_PRIMARY_LIST:
1607 	{
1608 	    int c = 0;
1609 	    Uint32 sh = 0;
1610 	    Eterm* ptr = list_val(term);
1611 	    while (is_byte(*ptr)) {
1612 		/* Optimization for strings. */
1613 		sh = (sh << 8) + unsigned_val(*ptr);
1614 		if (c == 3) {
1615 		    UINT32_HASH(sh, HCONST_4);
1616 		    c = sh = 0;
1617 		} else {
1618 		    c++;
1619 		}
1620 		term = CDR(ptr);
1621 		if (is_not_list(term))
1622 		    break;
1623 		ptr = list_val(term);
1624 	    }
1625             if (c > 0)
1626                 UINT32_HASH_2(sh, (Uint32)c, HCONST_22);
1627 
1628 	    if (is_list(term)) {
1629 		tmp = CDR(ptr);
1630                 CONST_HASH(HCONST_17);  /* Hash CAR in cons cell */
1631                 ESTACK_PUSH(s, tmp);
1632                 if (is_not_list(tmp)) {
1633                     ESTACK_PUSH(s, HASH_CDR);
1634                 }
1635 		term = CAR(ptr);
1636 	    }
1637 	}
1638 	break;
1639 	case TAG_PRIMARY_BOXED:
1640 	{
1641 	    Eterm hdr = *boxed_val(term);
1642 	    ASSERT(is_header(hdr));
1643 	    switch (hdr & _TAG_HEADER_MASK) {
1644 	    case ARITYVAL_SUBTAG:
1645 	    {
1646 		int i;
1647 		int arity = header_arity(hdr);
1648 		Eterm* elem = tuple_val(term);
1649 		UINT32_HASH(arity, HCONST_9);
1650 		if (arity == 0) /* Empty tuple */
1651 		    goto pop_next;
1652 		for (i = arity; ; i--) {
1653 		    term = elem[i];
1654                     if (i == 1)
1655                         break;
1656                     ESTACK_PUSH(s, term);
1657 		}
1658 	    }
1659 	    break;
1660 
1661             case MAP_SUBTAG:
1662             {
1663                 Eterm* ptr = boxed_val(term) + 1;
1664                 Uint size;
1665                 int i;
1666 
1667                 /*
1668                  * We rely on key-value iteration order being constant
1669                  * for identical maps (in this VM instance).
1670                  */
1671                 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
1672                 case HAMT_SUBTAG_HEAD_FLATMAP:
1673                 {
1674                     flatmap_t *mp = (flatmap_t *)flatmap_val(term);
1675                     Eterm *ks = flatmap_get_keys(mp);
1676                     Eterm *vs = flatmap_get_values(mp);
1677                     size      = flatmap_get_size(mp);
1678                     UINT32_HASH(size, HCONST_16);
1679                     if (size == 0)
1680                         goto pop_next;
1681 
1682                     for (i = size - 1; i >= 0; i--) {
1683                         ESTACK_PUSH(s, vs[i]);
1684                         ESTACK_PUSH(s, ks[i]);
1685                     }
1686                     goto pop_next;
1687                 }
1688                 case HAMT_SUBTAG_HEAD_ARRAY:
1689                 case HAMT_SUBTAG_HEAD_BITMAP:
1690                     size = *ptr++;
1691                     UINT32_HASH(size, HCONST_16);
1692                     if (size == 0)
1693                         goto pop_next;
1694                 }
1695                 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
1696                 case HAMT_SUBTAG_HEAD_ARRAY:
1697                     i = 16;
1698                     break;
1699                 case HAMT_SUBTAG_HEAD_BITMAP:
1700                 case HAMT_SUBTAG_NODE_BITMAP:
1701                     i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
1702                     break;
1703                 default:
1704                     erts_exit(ERTS_ERROR_EXIT, "bad header");
1705                 }
1706                 while (i) {
1707                     if (is_list(*ptr)) {
1708                         Eterm* cons = list_val(*ptr);
1709                         ESTACK_PUSH(s, CDR(cons));
1710                         ESTACK_PUSH(s, CAR(cons));
1711                     }
1712                     else {
1713                         ASSERT(is_boxed(*ptr));
1714                         ESTACK_PUSH(s, *ptr);
1715                     }
1716                     i--; ptr++;
1717                 }
1718                 goto pop_next;
1719             }
1720             break;
1721 	    case EXPORT_SUBTAG:
1722 	    {
1723 		Export* ep = *((Export **) (export_val(term) + 1));
1724                 /* Assumes Export entries never move */
1725                 POINTER_HASH(ep, HCONST_14);
1726 		goto pop_next;
1727 	    }
1728 
1729 	    case FUN_SUBTAG:
1730 	    {
1731 		ErlFunThing* funp = (ErlFunThing *) fun_val(term);
1732 		Uint num_free = funp->num_free;
1733                 UINT32_HASH_2(num_free, funp->fe->module, HCONST_20);
1734                 UINT32_HASH_2(funp->fe->old_index, funp->fe->old_uniq, HCONST_21);
1735 		if (num_free == 0) {
1736 		    goto pop_next;
1737 		} else {
1738 		    Eterm* bptr = funp->env + num_free - 1;
1739 		    while (num_free-- > 1) {
1740 			term = *bptr--;
1741 			ESTACK_PUSH(s, term);
1742 		    }
1743 		    term = *bptr;
1744 		}
1745 	    }
1746 	    break;
1747 	    case REFC_BINARY_SUBTAG:
1748 	    case HEAP_BINARY_SUBTAG:
1749 	    case SUB_BINARY_SUBTAG:
1750 	    {
1751 		byte* bptr;
1752 		Uint sz = binary_size(term);
1753 		Uint32 con = HCONST_13 + hash;
1754 		Uint bitoffs;
1755 		Uint bitsize;
1756 
1757 		ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
1758 		if (sz == 0 && bitsize == 0) {
1759 		    hash = con;
1760 		} else {
1761 		    if (bitoffs == 0) {
1762 			hash = block_hash(bptr, sz, con);
1763 			if (bitsize > 0) {
1764 			    UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
1765 					  HCONST_15);
1766 			}
1767 		    } else {
1768 			byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
1769 							sz + (bitsize != 0));
1770 			erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
1771 			hash = block_hash(buf, sz, con);
1772 			if (bitsize > 0) {
1773 			    UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
1774 					  HCONST_15);
1775 			}
1776 			erts_free(ERTS_ALC_T_TMP, (void *) buf);
1777 		    }
1778 		}
1779 		goto pop_next;
1780 	    }
1781 	    break;
1782 	    case POS_BIG_SUBTAG:
1783 	    case NEG_BIG_SUBTAG:
1784 	    {
1785 		Eterm* ptr = big_val(term);
1786 		Uint i = 0;
1787 		Uint n = BIG_SIZE(ptr);
1788 		Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
1789 #if D_EXP == 16
1790 		do {
1791 		    Uint32 x, y;
1792 		    x = i < n ? BIG_DIGIT(ptr, i++) : 0;
1793 		    x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
1794 		    y = i < n ? BIG_DIGIT(ptr, i++) : 0;
1795 		    y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
1796 		    UINT32_HASH_2(x, y, con);
1797 		} while (i < n);
1798 #elif D_EXP == 32
1799 		do {
1800 		    Uint32 x, y;
1801 		    x = i < n ? BIG_DIGIT(ptr, i++) : 0;
1802 		    y = i < n ? BIG_DIGIT(ptr, i++) : 0;
1803 		    UINT32_HASH_2(x, y, con);
1804 		} while (i < n);
1805 #elif D_EXP == 64
1806 		do {
1807 		    Uint t;
1808 		    Uint32 x, y;
1809                     ASSERT(i < n);
1810 		    t = BIG_DIGIT(ptr, i++);
1811 		    x = t & 0xffffffff;
1812 		    y = t >> 32;
1813 		    UINT32_HASH_2(x, y, con);
1814 		} while (i < n);
1815 #else
1816 #error "unsupported D_EXP size"
1817 #endif
1818 		goto pop_next;
1819 	    }
1820 	    break;
1821 	    case REF_SUBTAG:
1822 		UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
1823                 ASSERT(internal_ref_no_numbers(term) == 3);
1824                 UINT32_HASH_2(internal_ref_numbers(term)[1],
1825                               internal_ref_numbers(term)[2], HCONST_8);
1826                 goto pop_next;
1827 
1828             case EXTERNAL_REF_SUBTAG:
1829             {
1830                 ExternalThing* thing = external_thing_ptr(term);
1831 
1832                 ASSERT(external_thing_ref_no_numbers(thing) == 3);
1833                 /* See limitation #2 */
1834             #ifdef ARCH_64
1835                 POINTER_HASH(thing->node, HCONST_7);
1836                 UINT32_HASH(external_thing_ref_numbers(thing)[0], HCONST_7);
1837             #else
1838                 UINT32_HASH_2(thing->node,
1839                               external_thing_ref_numbers(thing)[0], HCONST_7);
1840             #endif
1841                 UINT32_HASH_2(external_thing_ref_numbers(thing)[1],
1842                               external_thing_ref_numbers(thing)[2], HCONST_8);
1843                 goto pop_next;
1844             }
1845             case EXTERNAL_PID_SUBTAG: {
1846                 ExternalThing* thing = external_thing_ptr(term);
1847                 /* See limitation #2 */
1848             #ifdef ARCH_64
1849                 POINTER_HASH(thing->node, HCONST_5);
1850                 UINT32_HASH(thing->data.ui[0], HCONST_5);
1851             #else
1852                 UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_5);
1853             #endif
1854 		goto pop_next;
1855             }
1856 	    case EXTERNAL_PORT_SUBTAG: {
1857                 ExternalThing* thing = external_thing_ptr(term);
1858                 /* See limitation #2 */
1859             #ifdef ARCH_64
1860                 POINTER_HASH(thing->node, HCONST_6);
1861                 UINT32_HASH(thing->data.ui[0], HCONST_6);
1862             #else
1863                 UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_6);
1864             #endif
1865 		goto pop_next;
1866             }
1867 	    case FLOAT_SUBTAG:
1868 	    {
1869 		FloatDef ff;
1870 		GET_DOUBLE(term, ff);
1871                 if (ff.fd == 0.0f) {
1872                     /* ensure positive 0.0 */
1873                     ff.fd = erts_get_positive_zero_float();
1874                 }
1875 		UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
1876 		goto pop_next;
1877 	    }
1878 	    default:
1879 		erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
1880 	    }
1881 	}
1882 	break;
1883         case TAG_PRIMARY_IMMED1:
1884         #if ERTS_SIZEOF_ETERM == 8
1885             UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
1886         #else
1887             UINT32_HASH(term, HCONST);
1888         #endif
1889             goto pop_next;
1890 
1891 	default:
1892 	    erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
1893 
1894 	pop_next:
1895 	    if (ESTACK_ISEMPTY(s)) {
1896 		DESTROY_ESTACK(s);
1897 
1898 		return hash;
1899 	    }
1900 
1901 	    term = ESTACK_POP(s);
1902 
1903 	    switch (term) {
1904 	        case HASH_CDR:
1905 		    CONST_HASH(HCONST_18);   /* Hash CDR i cons cell */
1906 		    goto pop_next;
1907 		default:
1908 		    break;
1909 	    }
1910 	}
1911     }
1912     }
1913 
1914 #undef CONST_HASH
1915 #undef HASH_MAP_TAIL
1916 #undef HASH_MAP_PAIR
1917 #undef HASH_CDR
1918 
1919 #undef UINT32_HASH_2
1920 #undef UINT32_HASH
1921 #undef SINT32_HASH
1922 }
1923 
1924 #undef HCONST
1925 #undef MIX
1926 
1927 /* error_logger !
1928    {log, Level, format, [args], #{ gl, pid, time, error_logger => #{tag, emulator => true} }}
1929 */
1930 static Eterm
do_allocate_logger_message(Eterm gleader,ErtsMonotonicTime * ts,Eterm * pid,Eterm ** hp,ErlOffHeap ** ohp,ErlHeapFragment ** bp,Uint sz)1931 do_allocate_logger_message(Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid,
1932                            Eterm **hp, ErlOffHeap **ohp,
1933 			   ErlHeapFragment **bp, Uint sz)
1934 {
1935     Uint gl_sz;
1936     gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
1937     sz = sz + gl_sz + 6 /*outer 5-tuple*/
1938         + MAP2_SZ /* error_logger map */;
1939 
1940     *pid = erts_get_current_pid();
1941 
1942     if (is_nil(gleader) && is_non_value(*pid)) {
1943         sz += MAP2_SZ /* metadata map no gl, no pid */;
1944     } else if (is_nil(gleader) || is_non_value(*pid))
1945         sz += MAP3_SZ /* metadata map no gl or no pid*/;
1946     else
1947         sz += MAP4_SZ /* metadata map w gl w pid*/;
1948 
1949     *ts = ERTS_MONOTONIC_TO_USEC(erts_os_system_time());
1950     erts_bld_sint64(NULL, &sz, *ts);
1951 
1952     *bp = new_message_buffer(sz);
1953     *ohp = &(*bp)->off_heap;
1954     *hp = (*bp)->mem;
1955 
1956     return copy_struct(gleader,gl_sz,hp,*ohp);
1957 }
1958 
do_send_logger_message(Eterm gl,Eterm tag,Eterm format,Eterm args,ErtsMonotonicTime ts,Eterm pid,Eterm * hp,ErlHeapFragment * bp)1959 static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args,
1960                                    ErtsMonotonicTime ts, Eterm pid,
1961                                    Eterm *hp, ErlHeapFragment *bp)
1962 {
1963     Eterm message, md, el_tag = tag;
1964     Eterm time = erts_bld_sint64(&hp, NULL, ts);
1965 
1966     /* This mapping is needed for the backwards compatible error_logger */
1967     switch (tag) {
1968     case am_info: el_tag = am_info_msg; break;
1969     case am_warning: el_tag = am_warning_msg; break;
1970     default:
1971         ASSERT(am_error);
1972         break;
1973     }
1974 
1975     md = MAP2(hp, am_emulator, am_true, ERTS_MAKE_AM("tag"), el_tag);
1976     hp += MAP2_SZ;
1977 
1978     if (is_nil(gl) && is_non_value(pid)) {
1979         /* no gl and no pid, probably from a port */
1980         md = MAP2(hp,
1981                   am_error_logger, md,
1982                   am_time, time);
1983         hp += MAP2_SZ;
1984         pid = NIL;
1985     } else if (is_nil(gl)) {
1986         /* no gl */
1987         md = MAP3(hp,
1988                   am_error_logger, md,
1989                   am_pid, pid,
1990                   am_time, time);
1991         hp += MAP3_SZ;
1992     } else if (is_non_value(pid)) {
1993         /* no gl */
1994         md = MAP3(hp,
1995                   am_error_logger, md,
1996                   ERTS_MAKE_AM("gl"), gl,
1997                   am_time, time);
1998         hp += MAP3_SZ;
1999         pid = NIL;
2000     } else {
2001         md = MAP4(hp,
2002                   am_error_logger, md,
2003                   ERTS_MAKE_AM("gl"), gl,
2004                   am_pid, pid,
2005                   am_time, time);
2006         hp += MAP4_SZ;
2007     }
2008 
2009     message = TUPLE5(hp, am_log, tag, format, args, md);
2010     erts_queue_error_logger_message(pid, message, bp);
2011 }
2012 
do_send_to_logger(Eterm tag,Eterm gl,char * buf,size_t len)2013 static int do_send_to_logger(Eterm tag, Eterm gl, char *buf, size_t len)
2014 {
2015     Uint sz;
2016     Eterm list, args, format, pid;
2017     ErtsMonotonicTime ts;
2018 
2019     Eterm *hp = NULL;
2020     ErlOffHeap *ohp = NULL;
2021     ErlHeapFragment *bp = NULL;
2022 
2023     sz = len * 2 /* message list */ + 2 /* cons surrounding message list */
2024 	+ 8 /* "~s~n" */;
2025 
2026     /* gleader size is accounted and allocated next */
2027     gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
2028 
2029     list = buf_to_intlist(&hp, buf, len, NIL);
2030     args = CONS(hp,list,NIL);
2031     hp += 2;
2032     format = buf_to_intlist(&hp, "~s~n", 4, NIL);
2033 
2034     do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
2035     return 0;
2036 }
2037 
do_send_term_to_logger(Eterm tag,Eterm gl,char * buf,size_t len,Eterm args)2038 static int do_send_term_to_logger(Eterm tag, Eterm gl,
2039 				  char *buf, size_t len, Eterm args)
2040 {
2041     Uint sz;
2042     Uint args_sz;
2043     Eterm format, pid;
2044     ErtsMonotonicTime ts;
2045 
2046     Eterm *hp = NULL;
2047     ErlOffHeap *ohp = NULL;
2048     ErlHeapFragment *bp = NULL;
2049 
2050     ASSERT(len > 0);
2051 
2052     args_sz = size_object(args);
2053     sz = len * 2 /* format */ + args_sz;
2054 
2055     /* gleader size is accounted and allocated next */
2056     gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
2057 
2058     format = buf_to_intlist(&hp, buf, len, NIL);
2059     args = copy_struct(args, args_sz, &hp, ohp);
2060 
2061     do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
2062     return 0;
2063 }
2064 
2065 static ERTS_INLINE int
send_info_to_logger(Eterm gleader,char * buf,size_t len)2066 send_info_to_logger(Eterm gleader, char *buf, size_t len)
2067 {
2068     return do_send_to_logger(am_info, gleader, buf, len);
2069 }
2070 
2071 static ERTS_INLINE int
send_warning_to_logger(Eterm gleader,char * buf,size_t len)2072 send_warning_to_logger(Eterm gleader, char *buf, size_t len)
2073 {
2074     return do_send_to_logger(erts_error_logger_warnings, gleader, buf, len);
2075 }
2076 
2077 static ERTS_INLINE int
send_error_to_logger(Eterm gleader,char * buf,size_t len)2078 send_error_to_logger(Eterm gleader, char *buf, size_t len)
2079 {
2080     return do_send_to_logger(am_error, gleader, buf, len);
2081 }
2082 
2083 static ERTS_INLINE int
send_error_term_to_logger(Eterm gleader,char * buf,size_t len,Eterm args)2084 send_error_term_to_logger(Eterm gleader, char *buf, size_t len, Eterm args)
2085 {
2086     return do_send_term_to_logger(am_error, gleader, buf, len, args);
2087 }
2088 
2089 #define LOGGER_DSBUF_INC_SZ 256
2090 
2091 static erts_dsprintf_buf_t *
grow_logger_dsbuf(erts_dsprintf_buf_t * dsbufp,size_t need)2092 grow_logger_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
2093 {
2094     size_t size;
2095     size_t free_size = dsbufp->size - dsbufp->str_len;
2096 
2097     ASSERT(dsbufp && dsbufp->str);
2098 
2099     if (need <= free_size)
2100 	return dsbufp;
2101 
2102     size = need - free_size + LOGGER_DSBUF_INC_SZ;
2103     size = (((size + LOGGER_DSBUF_INC_SZ - 1) / LOGGER_DSBUF_INC_SZ)
2104 	    * LOGGER_DSBUF_INC_SZ);
2105     size += dsbufp->size;
2106     ASSERT(dsbufp->str_len + need <= size);
2107     dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_LOGGER_DSBUF,
2108 					(void *) dsbufp->str,
2109 					size);
2110     dsbufp->size = size;
2111     return dsbufp;
2112 }
2113 
2114 erts_dsprintf_buf_t *
erts_create_logger_dsbuf(void)2115 erts_create_logger_dsbuf(void)
2116 {
2117     erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_logger_dsbuf);
2118     erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
2119 					     sizeof(erts_dsprintf_buf_t));
2120     sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
2121     dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
2122 				      LOGGER_DSBUF_INC_SZ);
2123     dsbufp->str[0] = '\0';
2124     dsbufp->size = LOGGER_DSBUF_INC_SZ;
2125     return dsbufp;
2126 }
2127 
2128 static ERTS_INLINE void
destroy_logger_dsbuf(erts_dsprintf_buf_t * dsbufp)2129 destroy_logger_dsbuf(erts_dsprintf_buf_t *dsbufp)
2130 {
2131     ASSERT(dsbufp && dsbufp->str);
2132     erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp->str);
2133     erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp);
2134 }
2135 
2136 int
erts_send_info_to_logger(Eterm gleader,erts_dsprintf_buf_t * dsbufp)2137 erts_send_info_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
2138 {
2139     int res;
2140     res = send_info_to_logger(gleader, dsbufp->str, dsbufp->str_len);
2141     destroy_logger_dsbuf(dsbufp);
2142     return res;
2143 }
2144 
2145 int
erts_send_warning_to_logger(Eterm gleader,erts_dsprintf_buf_t * dsbufp)2146 erts_send_warning_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
2147 {
2148     int res;
2149     res = send_warning_to_logger(gleader, dsbufp->str, dsbufp->str_len);
2150     destroy_logger_dsbuf(dsbufp);
2151     return res;
2152 }
2153 
2154 int
erts_send_error_to_logger(Eterm gleader,erts_dsprintf_buf_t * dsbufp)2155 erts_send_error_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
2156 {
2157     int res;
2158     res = send_error_to_logger(gleader, dsbufp->str, dsbufp->str_len);
2159     destroy_logger_dsbuf(dsbufp);
2160     return res;
2161 }
2162 
2163 int
erts_send_error_term_to_logger(Eterm gleader,erts_dsprintf_buf_t * dsbufp,Eterm args)2164 erts_send_error_term_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp, Eterm args)
2165 {
2166     int res;
2167     res = send_error_term_to_logger(gleader, dsbufp->str, dsbufp->str_len, args);
2168     destroy_logger_dsbuf(dsbufp);
2169     return res;
2170 }
2171 
2172 int
erts_send_info_to_logger_str(Eterm gleader,char * str)2173 erts_send_info_to_logger_str(Eterm gleader, char *str)
2174 {
2175     return send_info_to_logger(gleader, str, sys_strlen(str));
2176 }
2177 
2178 int
erts_send_warning_to_logger_str(Eterm gleader,char * str)2179 erts_send_warning_to_logger_str(Eterm gleader, char *str)
2180 {
2181     return send_warning_to_logger(gleader, str, sys_strlen(str));
2182 }
2183 
2184 int
erts_send_error_to_logger_str(Eterm gleader,char * str)2185 erts_send_error_to_logger_str(Eterm gleader, char *str)
2186 {
2187     return send_error_to_logger(gleader, str, sys_strlen(str));
2188 }
2189 
2190 int
erts_send_info_to_logger_nogl(erts_dsprintf_buf_t * dsbuf)2191 erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
2192 {
2193     return erts_send_info_to_logger(NIL, dsbuf);
2194 }
2195 
2196 int
erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t * dsbuf)2197 erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
2198 {
2199     return erts_send_warning_to_logger(NIL, dsbuf);
2200 }
2201 
2202 int
erts_send_error_to_logger_nogl(erts_dsprintf_buf_t * dsbuf)2203 erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
2204 {
2205     return erts_send_error_to_logger(NIL, dsbuf);
2206 }
2207 
2208 int
erts_send_info_to_logger_str_nogl(char * str)2209 erts_send_info_to_logger_str_nogl(char *str)
2210 {
2211     return erts_send_info_to_logger_str(NIL, str);
2212 }
2213 
2214 int
erts_send_warning_to_logger_str_nogl(char * str)2215 erts_send_warning_to_logger_str_nogl(char *str)
2216 {
2217     return erts_send_warning_to_logger_str(NIL, str);
2218 }
2219 
2220 int
erts_send_error_to_logger_str_nogl(char * str)2221 erts_send_error_to_logger_str_nogl(char *str)
2222 {
2223     return erts_send_error_to_logger_str(NIL, str);
2224 }
2225 
2226 
2227 #define TMP_DSBUF_INC_SZ 256
2228 
2229 static erts_dsprintf_buf_t *
grow_tmp_dsbuf(erts_dsprintf_buf_t * dsbufp,size_t need)2230 grow_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
2231 {
2232     size_t size;
2233     size_t free_size = dsbufp->size - dsbufp->str_len;
2234 
2235     ASSERT(dsbufp);
2236 
2237     if (need <= free_size)
2238 	return dsbufp;
2239     size = need - free_size + TMP_DSBUF_INC_SZ;
2240     size = ((size + TMP_DSBUF_INC_SZ - 1)/TMP_DSBUF_INC_SZ)*TMP_DSBUF_INC_SZ;
2241     size += dsbufp->size;
2242     ASSERT(dsbufp->str_len + need <= size);
2243     dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_TMP_DSBUF,
2244 					(void *) dsbufp->str,
2245 					size);
2246     dsbufp->size = size;
2247     return dsbufp;
2248 }
2249 
2250 erts_dsprintf_buf_t *
erts_create_tmp_dsbuf(Uint size)2251 erts_create_tmp_dsbuf(Uint size)
2252 {
2253     Uint init_size = size ? size : TMP_DSBUF_INC_SZ;
2254     erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_tmp_dsbuf);
2255     erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_TMP_DSBUF,
2256 					     sizeof(erts_dsprintf_buf_t));
2257     sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
2258     dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_TMP_DSBUF, init_size);
2259     dsbufp->str[0] = '\0';
2260     dsbufp->size = init_size;
2261     return dsbufp;
2262 }
2263 
2264 void
erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t * dsbufp)2265 erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp)
2266 {
2267     if (dsbufp->str)
2268 	erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp->str);
2269     erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp);
2270 }
2271 
2272 /* eq and cmp are written as separate functions a eq is a little faster */
2273 
2274 /*
2275  * Test for equality of two terms.
2276  * Returns 0 if not equal, or a non-zero value otherwise.
2277  */
eq(Eterm a,Eterm b)2278 int eq(Eterm a, Eterm b)
2279 {
2280     DECLARE_WSTACK(stack);
2281     Sint sz;
2282     Eterm* aa;
2283     Eterm* bb;
2284 
2285 tailrecur:
2286     if (is_same(a, b)) goto pop_next;
2287 tailrecur_ne:
2288 
2289     switch (primary_tag(a)) {
2290     case TAG_PRIMARY_LIST:
2291 	if (is_list(b)) {
2292 	    Eterm* aval = list_val(a);
2293 	    Eterm* bval = list_val(b);
2294 	    while (1) {
2295 		Eterm atmp = CAR(aval);
2296 		Eterm btmp = CAR(bval);
2297 		if (!is_same(atmp,btmp)) {
2298 		    WSTACK_PUSH2(stack,(UWord) CDR(bval),(UWord) CDR(aval));
2299 		    a = atmp;
2300 		    b = btmp;
2301 		    goto tailrecur_ne;
2302 		}
2303 		atmp = CDR(aval);
2304 		btmp = CDR(bval);
2305 		if (is_same(atmp,btmp)) {
2306 		    goto pop_next;
2307 		}
2308 		if (is_not_list(atmp) || is_not_list(btmp)) {
2309 		    a = atmp;
2310 		    b = btmp;
2311 		    goto tailrecur_ne;
2312 		}
2313 		aval = list_val(atmp);
2314 		bval = list_val(btmp);
2315 	    }
2316 	}
2317 	break; /* not equal */
2318 
2319     case TAG_PRIMARY_BOXED:
2320 	{
2321 	    Eterm hdr = *boxed_val(a);
2322 	    switch (hdr & _TAG_HEADER_MASK) {
2323 	    case ARITYVAL_SUBTAG:
2324 		{
2325 		    aa = tuple_val(a);
2326 		    if (!is_boxed(b) || *boxed_val(b) != *aa)
2327 			goto not_equal;
2328 		    bb = tuple_val(b);
2329 		    if ((sz = arityval(*aa)) == 0) goto pop_next;
2330 		    ++aa;
2331 		    ++bb;
2332 		    goto term_array;
2333 		}
2334 	    case REFC_BINARY_SUBTAG:
2335 	    case HEAP_BINARY_SUBTAG:
2336 	    case SUB_BINARY_SUBTAG:
2337 		{
2338 		    byte* a_ptr;
2339 		    byte* b_ptr;
2340 		    size_t a_size;
2341 		    size_t b_size;
2342 		    Uint a_bitsize;
2343 		    Uint b_bitsize;
2344 		    Uint a_bitoffs;
2345 		    Uint b_bitoffs;
2346 
2347 		    if (!is_binary(b)) {
2348 			goto not_equal;
2349 		    }
2350 		    a_size = binary_size(a);
2351 		    b_size = binary_size(b);
2352 		    if (a_size != b_size) {
2353 			goto not_equal;
2354 		    }
2355 		    ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
2356 		    ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
2357 		    if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
2358 			if (sys_memcmp(a_ptr, b_ptr, a_size) == 0) goto pop_next;
2359 		    } else if (a_bitsize == b_bitsize) {
2360 			if (erts_cmp_bits(a_ptr, a_bitoffs, b_ptr, b_bitoffs,
2361 					  (a_size << 3) + a_bitsize) == 0) goto pop_next;
2362 		    }
2363 		    break; /* not equal */
2364 		}
2365 	    case EXPORT_SUBTAG:
2366 		{
2367 		    if (is_export(b)) {
2368 			Export* a_exp = *((Export **) (export_val(a) + 1));
2369 			Export* b_exp = *((Export **) (export_val(b) + 1));
2370 			if (a_exp == b_exp) goto pop_next;
2371 		    }
2372 		    break; /* not equal */
2373 		}
2374 	    case FUN_SUBTAG:
2375 		{
2376 		    ErlFunThing* f1;
2377 		    ErlFunThing* f2;
2378 
2379 		    if (!is_fun(b))
2380 			goto not_equal;
2381 		    f1 = (ErlFunThing *) fun_val(a);
2382 		    f2 = (ErlFunThing *) fun_val(b);
2383 		    if (f1->fe->module != f2->fe->module ||
2384 			f1->fe->old_index != f2->fe->old_index ||
2385 			f1->fe->old_uniq != f2->fe->old_uniq ||
2386 			f1->num_free != f2->num_free) {
2387 			goto not_equal;
2388 		    }
2389 		    if ((sz = f1->num_free) == 0) goto pop_next;
2390 		    aa = f1->env;
2391 		    bb = f2->env;
2392 		    goto term_array;
2393 		}
2394 
2395 	    case EXTERNAL_PID_SUBTAG:
2396 	    case EXTERNAL_PORT_SUBTAG: {
2397 		ExternalThing *ap;
2398 		ExternalThing *bp;
2399 
2400 		if(!is_external(b))
2401 		    goto not_equal;
2402 
2403 		ap = external_thing_ptr(a);
2404 		bp = external_thing_ptr(b);
2405 
2406 		if(ap->header == bp->header && ap->node == bp->node) {
2407 		    ASSERT(1 == external_data_words(a));
2408 		    ASSERT(1 == external_data_words(b));
2409 
2410 		    if (ap->data.ui[0] == bp->data.ui[0]) goto pop_next;
2411 		}
2412 		break; /* not equal */
2413 	    }
2414 	    case EXTERNAL_REF_SUBTAG: {
2415 		/*
2416 		 * Observe!
2417 		 *  When comparing refs we need to compare ref numbers
2418 		 * (32-bit words) *not* ref data words.
2419 		 */
2420 		Uint32 *anum;
2421 		Uint32 *bnum;
2422 		Uint common_len;
2423 		Uint alen;
2424 		Uint blen;
2425 		Uint i;
2426 		ExternalThing* athing;
2427 		ExternalThing* bthing;
2428 
2429 		if(!is_external_ref(b))
2430 		    goto not_equal;
2431 
2432 		athing = external_thing_ptr(a);
2433 		bthing = external_thing_ptr(b);
2434 
2435 		if(athing->node != bthing->node)
2436 		    goto not_equal;
2437 
2438 		anum = external_thing_ref_numbers(athing);
2439 		bnum = external_thing_ref_numbers(bthing);
2440 		alen = external_thing_ref_no_numbers(athing);
2441 		blen = external_thing_ref_no_numbers(bthing);
2442 
2443 		goto ref_common;
2444 
2445 	    case REF_SUBTAG:
2446 
2447 		if (!is_internal_ref(b))
2448 		    goto not_equal;
2449 
2450 		alen = internal_ref_no_numbers(a);
2451 		anum = internal_ref_numbers(a);
2452 		blen = internal_ref_no_numbers(b);
2453 		bnum = internal_ref_numbers(b);
2454 
2455 	    ref_common:
2456 		    ASSERT(alen > 0 && blen > 0);
2457 
2458 		    if (anum[0] != bnum[0])
2459 			goto not_equal;
2460 
2461 		    if (alen == 3 && blen == 3) {
2462 			/* Most refs are of length 3 */
2463 			if (anum[1] == bnum[1] && anum[2] == bnum[2]) {
2464 			    goto pop_next;
2465 			} else {
2466 			    goto not_equal;
2467 			}
2468 		    }
2469 
2470 		    common_len = alen;
2471 		    if (blen < alen)
2472 			common_len = blen;
2473 
2474 		    for (i = 1; i < common_len; i++)
2475 			if (anum[i] != bnum[i])
2476 			    goto not_equal;
2477 
2478 		    if(alen != blen) {
2479 
2480 			if (alen > blen) {
2481 			    for (i = common_len; i < alen; i++)
2482 				if (anum[i] != 0)
2483 				    goto not_equal;
2484 			}
2485 			else {
2486 			    for (i = common_len; i < blen; i++)
2487 				if (bnum[i] != 0)
2488 				    goto not_equal;
2489 			}
2490 		    }
2491 		    goto pop_next;
2492 	    }
2493 	    case POS_BIG_SUBTAG:
2494 	    case NEG_BIG_SUBTAG:
2495 		{
2496 		    int i;
2497 
2498 		    if (!is_big(b))
2499 			goto not_equal;
2500 		    aa = big_val(a);
2501 		    bb = big_val(b);
2502 		    if (*aa != *bb)
2503 			goto not_equal;
2504 		    i = BIG_ARITY(aa);
2505 		    while(i--) {
2506 			if (*++aa != *++bb)
2507 			    goto not_equal;
2508 		    }
2509 		    goto pop_next;
2510 		}
2511 	    case FLOAT_SUBTAG:
2512 		{
2513 		    FloatDef af;
2514 		    FloatDef bf;
2515 
2516 		    if (is_float(b)) {
2517 			GET_DOUBLE(a, af);
2518 			GET_DOUBLE(b, bf);
2519 			if (af.fd == bf.fd) goto pop_next;
2520 		    }
2521 		    break; /* not equal */
2522 		}
2523 	    case MAP_SUBTAG:
2524                 if (is_flatmap(a)) {
2525 		    aa = flatmap_val(a);
2526 		    if (!is_boxed(b) || *boxed_val(b) != *aa)
2527 			goto not_equal;
2528 		    bb = flatmap_val(b);
2529 		    sz = flatmap_get_size((flatmap_t*)aa);
2530 
2531 		    if (sz != flatmap_get_size((flatmap_t*)bb)) goto not_equal;
2532 		    if (sz == 0) goto pop_next;
2533 
2534 		    aa += 2;
2535 		    bb += 2;
2536 		    sz += 1; /* increment for tuple-keys */
2537 		    goto term_array;
2538 
2539                 } else {
2540 		    if (!is_boxed(b) || *boxed_val(b) != hdr)
2541 			goto not_equal;
2542 
2543 		    aa = hashmap_val(a) + 1;
2544 		    bb = hashmap_val(b) + 1;
2545 		    switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
2546 		    case HAMT_SUBTAG_HEAD_ARRAY:
2547 			aa++; bb++;
2548 			sz = 16;
2549 			break;
2550 		    case HAMT_SUBTAG_HEAD_BITMAP:
2551 			aa++; bb++;
2552 		    case HAMT_SUBTAG_NODE_BITMAP:
2553 			sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
2554 			ASSERT(sz > 0 && sz < 17);
2555 			break;
2556 		    default:
2557 			erts_exit(ERTS_ERROR_EXIT, "Unknown hashmap subsubtag\n");
2558 		    }
2559 		    goto term_array;
2560 		}
2561 	    default:
2562 		ASSERT(!"Unknown boxed subtab in EQ");
2563 	    }
2564 	    break;
2565 	}
2566     }
2567     goto not_equal;
2568 
2569 
2570 term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
2571     ASSERT(sz != 0);
2572     {
2573 	Eterm* ap = aa;
2574 	Eterm* bp = bb;
2575 	Sint i = sz;
2576 	for (;;) {
2577 	    if (!is_same(*ap,*bp)) break;
2578 	    if (--i == 0) goto pop_next;
2579 	    ++ap;
2580 	    ++bp;
2581 	}
2582 	a = *ap;
2583 	b = *bp;
2584 	if (is_both_immed(a,b)) {
2585 	    goto not_equal;
2586 	}
2587 	if (i > 1) { /* push the rest */
2588 	    WSTACK_PUSH3(stack, i-1, (UWord)(bp+1),
2589 			 ((UWord)(ap+1)) | TAG_PRIMARY_HEADER);
2590 	    /* We (ab)use TAG_PRIMARY_HEADER to recognize a term_array */
2591 	}
2592 	goto tailrecur_ne;
2593     }
2594 
2595 pop_next:
2596     if (!WSTACK_ISEMPTY(stack)) {
2597 	UWord something  = WSTACK_POP(stack);
2598 	if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
2599 	    aa = (Eterm*) something;
2600 	    bb = (Eterm*) WSTACK_POP(stack);
2601 	    sz = WSTACK_POP(stack);
2602 	    goto term_array;
2603 	}
2604 	a = something;
2605 	b = WSTACK_POP(stack);
2606 	goto tailrecur;
2607     }
2608 
2609     DESTROY_WSTACK(stack);
2610     return 1;
2611 
2612 not_equal:
2613     DESTROY_WSTACK(stack);
2614     return 0;
2615 }
2616 
2617 
2618 
2619 /*
2620  * Compare objects.
2621  * Returns 0 if equal, a negative value if a < b, or a positive number a > b.
2622  *
2623  * According to the Erlang Standard, types are orderered as follows:
2624  *   numbers < (characters) < atoms < refs < funs < ports < pids <
2625  *   tuples < maps < [] < conses < binaries.
2626  *
2627  * Note that characters are currently not implemented.
2628  *
2629  */
2630 
2631 /* cmp(Eterm a, Eterm b)
2632  *  For compatibility with HiPE - arith-based compare.
2633  */
cmp(Eterm a,Eterm b)2634 Sint cmp(Eterm a, Eterm b)
2635 {
2636     return erts_cmp(a, b, 0, 0);
2637 }
2638 
2639 Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only);
2640 
2641 /* erts_cmp(Eterm a, Eterm b, int exact)
2642  * exact = 1 -> term-based compare
2643  * exact = 0 -> arith-based compare
2644  */
erts_cmp_compound(Eterm a,Eterm b,int exact,int eq_only)2645 Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only)
2646 {
2647 #define PSTACK_TYPE struct erts_cmp_hashmap_state
2648     struct erts_cmp_hashmap_state {
2649         Sint wstack_rollback;
2650         int was_exact;
2651         Eterm *ap;
2652         Eterm *bp;
2653         Eterm min_key;
2654         Sint cmp_res;   /* result so far -1,0,+1 */
2655     };
2656     PSTACK_DECLARE(hmap_stack, 1);
2657     WSTACK_DECLARE(stack);
2658     WSTACK_DECLARE(b_stack); /* only used by hashmaps */
2659     Eterm* aa;
2660     Eterm* bb;
2661     int i;
2662     Sint j;
2663     int a_tag;
2664     int b_tag;
2665     ErlNode *anode;
2666     ErlNode *bnode;
2667     Uint adata;
2668     Uint bdata;
2669     Uint alen;
2670     Uint blen;
2671     Uint32 *anum;
2672     Uint32 *bnum;
2673 
2674 /* The WSTACK contains naked Eterms and Operations marked with header-tags */
2675 #define OP_BITS 4
2676 #define OP_MASK 0xF
2677 #define TERM_ARRAY_OP                 0
2678 #define SWITCH_EXACT_OFF_OP           1
2679 #define HASHMAP_PHASE1_ARE_KEYS_EQUAL 2
2680 #define HASHMAP_PHASE1_IS_MIN_KEY     3
2681 #define HASHMAP_PHASE1_CMP_VALUES     4
2682 #define HASHMAP_PHASE2_ARE_KEYS_EQUAL 5
2683 #define HASHMAP_PHASE2_IS_MIN_KEY_A   6
2684 #define HASHMAP_PHASE2_IS_MIN_KEY_B   7
2685 
2686 
2687 #define OP_WORD(OP)  (((OP)  << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER)
2688 #define TERM_ARRAY_OP_WORD(SZ) OP_WORD(((SZ) << OP_BITS) | TERM_ARRAY_OP)
2689 
2690 #define GET_OP(WORD) (ASSERT(is_header(WORD)), ((WORD) >> _TAG_PRIMARY_SIZE) & OP_MASK)
2691 #define GET_OP_ARG(WORD) (ASSERT(is_header(WORD)), ((WORD) >> (OP_BITS + _TAG_PRIMARY_SIZE)))
2692 
2693 
2694 #define RETURN_NEQ(cmp) { j=(cmp); ASSERT(j != 0); goto not_equal; }
2695 #define ON_CMP_GOTO(cmp) if ((j=(cmp)) == 0) goto pop_next; else goto not_equal
2696 
2697 #undef  CMP_NODES
2698 #define CMP_NODES(AN, BN)						\
2699     do {								\
2700 	if((AN) != (BN)) {						\
2701             if((AN)->sysname != (BN)->sysname)				\
2702                 RETURN_NEQ(erts_cmp_atoms((AN)->sysname, (BN)->sysname));	\
2703 	    ASSERT((AN)->creation != (BN)->creation);			\
2704             if ((AN)->creation != 0 && (BN)->creation != 0)             \
2705                 RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1);	\
2706 	}								\
2707     } while (0)
2708 
2709 
2710 bodyrecur:
2711     j = 0;
2712 tailrecur:
2713     if (is_same(a,b)) {	/* Equal values or pointers. */
2714 	goto pop_next;
2715     }
2716 tailrecur_ne:
2717 
2718     /* deal with majority (?) cases by brute-force */
2719     if (is_atom(a)) {
2720 	if (is_atom(b)) {
2721 	    ON_CMP_GOTO(erts_cmp_atoms(a, b));
2722 	}
2723     } else if (is_both_small(a, b)) {
2724 	ON_CMP_GOTO(signed_val(a) - signed_val(b));
2725     }
2726 
2727     /*
2728      * Take care of cases where the types are the same.
2729      */
2730 
2731     a_tag = 42;			/* Suppress warning */
2732     switch (primary_tag(a)) {
2733     case TAG_PRIMARY_IMMED1:
2734 	switch ((a & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
2735 	case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE):
2736 	    if (is_internal_port(b)) {
2737 		bnode = erts_this_node;
2738 		bdata = internal_port_data(b);
2739 	    } else if (is_external_port(b)) {
2740 		bnode = external_port_node(b);
2741 		bdata = external_port_data(b);
2742 	    } else {
2743 		a_tag = PORT_DEF;
2744 		goto mixed_types;
2745 	    }
2746 	    anode = erts_this_node;
2747 	    adata = internal_port_data(a);
2748 
2749 	port_common:
2750 	    CMP_NODES(anode, bnode);
2751 	    ON_CMP_GOTO((Sint)(adata - bdata));
2752 
2753 	case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE):
2754 	    if (is_internal_pid(b)) {
2755 		bnode = erts_this_node;
2756 		bdata = internal_pid_data(b);
2757 	    } else if (is_external_pid(b)) {
2758 		bnode = external_pid_node(b);
2759 		bdata = external_pid_data(b);
2760 	    } else {
2761 		a_tag = PID_DEF;
2762 		goto mixed_types;
2763 	    }
2764 	    anode = erts_this_node;
2765 	    adata = internal_pid_data(a);
2766 
2767 	pid_common:
2768 	    if (adata != bdata) {
2769 		RETURN_NEQ(adata < bdata ? -1 : 1);
2770 	    }
2771 	    CMP_NODES(anode, bnode);
2772 	    goto pop_next;
2773 	case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
2774 	    a_tag = SMALL_DEF;
2775 	    goto mixed_types;
2776 	case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): {
2777 	    switch ((a & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) {
2778 	    case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE):
2779 		a_tag = ATOM_DEF;
2780 		goto mixed_types;
2781 	    case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE):
2782 		a_tag = NIL_DEF;
2783 		goto mixed_types;
2784 	    }
2785 	}
2786 	}
2787     case TAG_PRIMARY_LIST:
2788 	if (is_not_list(b)) {
2789 	    a_tag = LIST_DEF;
2790 	    goto mixed_types;
2791 	}
2792 	aa = list_val(a);
2793 	bb = list_val(b);
2794 	while (1) {
2795 	    Eterm atmp = CAR(aa);
2796 	    Eterm btmp = CAR(bb);
2797 	    if (!is_same(atmp,btmp)) {
2798 		WSTACK_PUSH2(stack,(UWord) CDR(bb),(UWord) CDR(aa));
2799 		a = atmp;
2800 		b = btmp;
2801 		goto tailrecur_ne;
2802 	    }
2803 	    atmp = CDR(aa);
2804 	    btmp = CDR(bb);
2805 	    if (is_same(atmp,btmp)) {
2806 		goto pop_next;
2807 	    }
2808 	    if (is_not_list(atmp) || is_not_list(btmp)) {
2809 		a = atmp;
2810 		b = btmp;
2811 		goto tailrecur_ne;
2812 	    }
2813 	    aa = list_val(atmp);
2814 	    bb = list_val(btmp);
2815 	}
2816     case TAG_PRIMARY_BOXED:
2817 	{
2818 	    Eterm ahdr = *boxed_val(a);
2819 	    switch ((ahdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
2820 	    case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE):
2821 		if (!is_tuple(b)) {
2822 		    a_tag = TUPLE_DEF;
2823 		    goto mixed_types;
2824 		}
2825 		aa = tuple_val(a);
2826 		bb = tuple_val(b);
2827 		/* compare the arities */
2828 		i = arityval(ahdr);	/* get the arity*/
2829 		if (i != arityval(*bb)) {
2830 		    RETURN_NEQ((int)(i - arityval(*bb)));
2831 		}
2832 		if (i == 0) {
2833 		    goto pop_next;
2834 		}
2835 		++aa;
2836 		++bb;
2837 		goto term_array;
2838             case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) :
2839 		{
2840                     struct erts_cmp_hashmap_state* sp;
2841                     if (is_flatmap_header(ahdr)) {
2842                         if (!is_flatmap(b)) {
2843                             if (is_hashmap(b)) {
2844                                 aa = (Eterm *)flatmap_val(a);
2845                                 i = flatmap_get_size((flatmap_t*)aa) - hashmap_size(b);
2846                                 ASSERT(i != 0);
2847                                 RETURN_NEQ(i);
2848                             }
2849                             a_tag = MAP_DEF;
2850                             goto mixed_types;
2851                         }
2852                         aa = (Eterm *)flatmap_val(a);
2853                         bb = (Eterm *)flatmap_val(b);
2854 
2855                         i = flatmap_get_size((flatmap_t*)aa);
2856                         if (i != flatmap_get_size((flatmap_t*)bb)) {
2857                             RETURN_NEQ((int)(i - flatmap_get_size((flatmap_t*)bb)));
2858                         }
2859                         if (i == 0) {
2860                             goto pop_next;
2861                         }
2862                         aa += 2;
2863                         bb += 2;
2864                         if (exact) {
2865                             i  += 1; /* increment for tuple-keys */
2866                             goto term_array;
2867                         }
2868                         else {
2869                             /* Value array */
2870                             WSTACK_PUSH3(stack,(UWord)(bb+1),(UWord)(aa+1),TERM_ARRAY_OP_WORD(i));
2871                             /* Switch back from 'exact' key compare */
2872                             WSTACK_PUSH(stack,OP_WORD(SWITCH_EXACT_OFF_OP));
2873                             /* Now do 'exact' compare of key tuples */
2874                             a = *aa;
2875                             b = *bb;
2876                             exact = 1;
2877                             goto bodyrecur;
2878                         }
2879                     }
2880 		    if (!is_hashmap(b)) {
2881                         if (is_flatmap(b)) {
2882                             bb = (Eterm *)flatmap_val(b);
2883                             i = hashmap_size(a) - flatmap_get_size((flatmap_t*)bb);
2884                             ASSERT(i != 0);
2885                             RETURN_NEQ(i);
2886                         }
2887 			a_tag = MAP_DEF;
2888 			goto mixed_types;
2889 		    }
2890 		    i = hashmap_size(a) - hashmap_size(b);
2891 		    if (i) {
2892 			RETURN_NEQ(i);
2893 		    }
2894                     if (hashmap_size(a) == 0) {
2895                         goto pop_next;
2896                     }
2897 
2898                 /* Hashmap compare strategy:
2899                    Phase 1. While keys are identical
2900                      Do synchronous stepping through leafs of both trees in hash
2901                      order. Maintain value compare result of minimal key.
2902 
2903                    Phase 2. If key diff was found in phase 1
2904                      Ignore values from now on.
2905                      Continue iterate trees by always advancing the one
2906                      lagging behind hash-wise. Identical keys are skipped.
2907                      A minimal key can only be candidate as tie-breaker if we
2908                      have passed that hash value in the other tree (which means
2909                      the key did not exist in the other tree).
2910                 */
2911 
2912                     sp = PSTACK_PUSH(hmap_stack);
2913                     hashmap_iterator_init(&stack, a, 0);
2914                     hashmap_iterator_init(&b_stack, b, 0);
2915                     sp->ap = hashmap_iterator_next(&stack);
2916                     sp->bp = hashmap_iterator_next(&b_stack);
2917                     sp->cmp_res = 0;
2918                     ASSERT(sp->ap && sp->bp);
2919 
2920                     a = CAR(sp->ap);
2921                     b = CAR(sp->bp);
2922                     sp->was_exact = exact;
2923                     exact = 1;
2924                     WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
2925                     sp->wstack_rollback = WSTACK_COUNT(stack);
2926                     goto bodyrecur;
2927 		}
2928 	    case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
2929 		if (!is_float(b)) {
2930 		    a_tag = FLOAT_DEF;
2931 		    goto mixed_types;
2932 		} else {
2933 		    FloatDef af;
2934 		    FloatDef bf;
2935 
2936 		    GET_DOUBLE(a, af);
2937 		    GET_DOUBLE(b, bf);
2938 		    ON_CMP_GOTO(erts_float_comp(af.fd, bf.fd));
2939 		}
2940 	    case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
2941 	    case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
2942 		if (!is_big(b)) {
2943 		    a_tag = BIG_DEF;
2944 		    goto mixed_types;
2945 		}
2946 		ON_CMP_GOTO(big_comp(a, b));
2947 	    case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE):
2948 		if (!is_export(b)) {
2949 		    a_tag = EXPORT_DEF;
2950 		    goto mixed_types;
2951 		} else {
2952 		    Export* a_exp = *((Export **) (export_val(a) + 1));
2953 		    Export* b_exp = *((Export **) (export_val(b) + 1));
2954 
2955 		    if ((j = erts_cmp_atoms(a_exp->info.mfa.module,
2956                                             b_exp->info.mfa.module)) != 0) {
2957 			RETURN_NEQ(j);
2958 		    }
2959 		    if ((j = erts_cmp_atoms(a_exp->info.mfa.function,
2960                                             b_exp->info.mfa.function)) != 0) {
2961 			RETURN_NEQ(j);
2962 		    }
2963 		    ON_CMP_GOTO((Sint) a_exp->info.mfa.arity - (Sint) b_exp->info.mfa.arity);
2964 		}
2965 		break;
2966 	    case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
2967 		if (!is_fun(b)) {
2968 		    a_tag = FUN_DEF;
2969 		    goto mixed_types;
2970 		} else {
2971 		    ErlFunThing* f1 = (ErlFunThing *) fun_val(a);
2972 		    ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
2973 		    Sint diff;
2974 
2975                     diff = erts_cmp_atoms((f1->fe)->module, (f2->fe)->module);
2976 		    if (diff != 0) {
2977 			RETURN_NEQ(diff);
2978 		    }
2979 		    diff = f1->fe->old_index - f2->fe->old_index;
2980 		    if (diff != 0) {
2981 			RETURN_NEQ(diff);
2982 		    }
2983 		    diff = f1->fe->old_uniq - f2->fe->old_uniq;
2984 		    if (diff != 0) {
2985 			RETURN_NEQ(diff);
2986 		    }
2987 		    diff = f1->num_free - f2->num_free;
2988 		    if (diff != 0) {
2989 			RETURN_NEQ(diff);
2990 		    }
2991 		    i = f1->num_free;
2992 		    if (i == 0) goto pop_next;
2993 		    aa = f1->env;
2994 		    bb = f2->env;
2995 		    goto term_array;
2996 		}
2997 	    case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE):
2998 		if (is_internal_pid(b)) {
2999 		    bnode = erts_this_node;
3000 		    bdata = internal_pid_data(b);
3001 		} else if (is_external_pid(b)) {
3002 		    bnode = external_pid_node(b);
3003 		    bdata = external_pid_data(b);
3004 		} else {
3005 		    a_tag = EXTERNAL_PID_DEF;
3006 		    goto mixed_types;
3007 		}
3008 		anode = external_pid_node(a);
3009 		adata = external_pid_data(a);
3010 		goto pid_common;
3011 	    case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE):
3012 		if (is_internal_port(b)) {
3013 		    bnode = erts_this_node;
3014 		    bdata = internal_port_data(b);
3015 		} else if (is_external_port(b)) {
3016 		    bnode = external_port_node(b);
3017 		    bdata = external_port_data(b);
3018 		} else {
3019 		    a_tag = EXTERNAL_PORT_DEF;
3020 		    goto mixed_types;
3021 		}
3022 		anode = external_port_node(a);
3023 		adata = external_port_data(a);
3024 		goto port_common;
3025 	    case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
3026 		/*
3027 		 * Note! When comparing refs we need to compare ref numbers
3028 		 * (32-bit words), *not* ref data words.
3029 		 */
3030 
3031 		if (is_internal_ref(b)) {
3032 		    bnode = erts_this_node;
3033 		    blen = internal_ref_no_numbers(b);
3034 		    bnum = internal_ref_numbers(b);
3035 		} else if(is_external_ref(b)) {
3036 		    ExternalThing* bthing = external_thing_ptr(b);
3037 		    bnode = bthing->node;
3038 		    bnum = external_thing_ref_numbers(bthing);
3039 		    blen = external_thing_ref_no_numbers(bthing);
3040 		} else {
3041 		    a_tag = REF_DEF;
3042 		    goto mixed_types;
3043 		}
3044 		anode = erts_this_node;
3045 		alen = internal_ref_no_numbers(a);
3046 		anum = internal_ref_numbers(a);
3047 
3048 	    ref_common:
3049 		CMP_NODES(anode, bnode);
3050 
3051 		ASSERT(alen > 0 && blen > 0);
3052 		if (alen != blen) {
3053 		    if (alen > blen) {
3054 			do {
3055 			    if (anum[alen - 1] != 0)
3056 				RETURN_NEQ(1);
3057 			    alen--;
3058 			} while (alen > blen);
3059 		    }
3060 		    else {
3061 			do {
3062 			    if (bnum[blen - 1] != 0)
3063 				RETURN_NEQ(-1);
3064 			    blen--;
3065 			} while (alen < blen);
3066 		    }
3067 		}
3068 
3069 		ASSERT(alen == blen);
3070 		for (i = (Sint) alen - 1; i >= 0; i--)
3071 		    if (anum[i] != bnum[i])
3072 			RETURN_NEQ(anum[i] < bnum[i] ? -1 : 1);
3073 		goto pop_next;
3074 	    case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
3075 		if (is_internal_ref(b)) {
3076 		    bnode = erts_this_node;
3077 		    blen = internal_ref_no_numbers(b);
3078 		    bnum = internal_ref_numbers(b);
3079 		} else if (is_external_ref(b)) {
3080 		    ExternalThing* bthing = external_thing_ptr(b);
3081 		    bnode = bthing->node;
3082 		    bnum = external_thing_ref_numbers(bthing);
3083 		    blen = external_thing_ref_no_numbers(bthing);
3084 		} else {
3085 		    a_tag = EXTERNAL_REF_DEF;
3086 		    goto mixed_types;
3087 		}
3088 		{
3089 		    ExternalThing* athing = external_thing_ptr(a);
3090 		    anode = athing->node;
3091 		    anum = external_thing_ref_numbers(athing);
3092 		    alen = external_thing_ref_no_numbers(athing);
3093 		}
3094 		goto ref_common;
3095 	    default:
3096 		/* Must be a binary */
3097 		ASSERT(is_binary(a));
3098 		if (!is_binary(b)) {
3099 		    a_tag = BINARY_DEF;
3100 		    goto mixed_types;
3101 		} else {
3102 		    Uint a_size = binary_size(a);
3103 		    Uint b_size = binary_size(b);
3104 		    Uint a_bitsize;
3105 		    Uint b_bitsize;
3106 		    Uint a_bitoffs;
3107 		    Uint b_bitoffs;
3108 		    Uint min_size;
3109 		    int cmp;
3110 		    byte* a_ptr;
3111 		    byte* b_ptr;
3112 		    if (eq_only && a_size != b_size) {
3113 		        RETURN_NEQ(a_size - b_size);
3114 		    }
3115 		    ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
3116 		    ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
3117 		    if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
3118 			min_size = (a_size < b_size) ? a_size : b_size;
3119 			if ((cmp = sys_memcmp(a_ptr, b_ptr, min_size)) != 0) {
3120 			    RETURN_NEQ(cmp);
3121 			}
3122 		    }
3123 		    else {
3124 			a_size = (a_size << 3) + a_bitsize;
3125 			b_size = (b_size << 3) + b_bitsize;
3126 			min_size = (a_size < b_size) ? a_size : b_size;
3127 			if ((cmp = erts_cmp_bits(a_ptr,a_bitoffs,
3128 						 b_ptr,b_bitoffs,min_size)) != 0) {
3129 			    RETURN_NEQ(cmp);
3130 			}
3131 		    }
3132 		    ON_CMP_GOTO((Sint)(a_size - b_size));
3133 		}
3134 	    }
3135 	}
3136     }
3137 
3138     /*
3139      * Take care of the case that the tags are different.
3140      */
3141 
3142  mixed_types:
3143 
3144     {
3145 	FloatDef f1, f2;
3146 	Eterm big;
3147 	Eterm aw = a;
3148 	Eterm bw = b;
3149 #define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2))
3150 #define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1))
3151 #define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */
3152 	Eterm big_buf[BIG_NEED_SIZE(BIG_ARITY_FLOAT_MAX)];
3153 
3154 	b_tag = tag_val_def(bw);
3155 
3156 	switch(_NUMBER_CODE(a_tag, b_tag)) {
3157 	case SMALL_BIG:
3158 	    j = big_sign(bw) ? 1 : -1;
3159 	    break;
3160 	case BIG_SMALL:
3161 	    j = big_sign(aw) ? -1 : 1;
3162 	    break;
3163 	case SMALL_FLOAT:
3164 	    if (exact) goto exact_fall_through;
3165 	    GET_DOUBLE(bw, f2);
3166 	    if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
3167 		/* Float is within the no loss limit */
3168 		f1.fd = signed_val(aw);
3169 		j = erts_float_comp(f1.fd, f2.fd);
3170 	    }
3171 #if ERTS_SIZEOF_ETERM == 8
3172 	    else if (f2.fd > (double) (MAX_SMALL + 1)) {
3173 		/* Float is a positive bignum, i.e. bigger */
3174 		j = -1;
3175 	    } else if (f2.fd < (double) (MIN_SMALL - 1)) {
3176 		/* Float is a negative bignum, i.e. smaller */
3177 		j = 1;
3178 	    } else {
3179 		/* Float is a Sint but less precise */
3180 		j = signed_val(aw) - (Sint) f2.fd;
3181 	    }
3182 #else
3183 	    else {
3184 		/* If float is positive it is bigger than small */
3185 		j = (f2.fd > 0.0) ? -1 : 1;
3186 	    }
3187 #endif /* ERTS_SIZEOF_ETERM == 8 */
3188 	    break;
3189         case FLOAT_BIG:
3190 	    if (exact) goto exact_fall_through;
3191 	{
3192 	    Wterm tmp = aw;
3193 	    aw = bw;
3194 	    bw = tmp;
3195 	}/* fall through */
3196 	case BIG_FLOAT:
3197 	    if (exact) goto exact_fall_through;
3198 	    GET_DOUBLE(bw, f2);
3199 	    if ((f2.fd < (double) (MAX_SMALL + 1))
3200 		    && (f2.fd > (double) (MIN_SMALL - 1))) {
3201 		/* Float is a Sint */
3202 		j = big_sign(aw) ? -1 : 1;
3203 	    } else if (big_arity(aw) > BIG_ARITY_FLOAT_MAX
3204 		       || pow(2.0,(big_arity(aw)-1)*D_EXP) > fabs(f2.fd)) {
3205 		/* If bignum size shows that it is bigger than the abs float */
3206 		j = big_sign(aw) ? -1 : 1;
3207 	    } else if (big_arity(aw) < BIG_ARITY_FLOAT_MAX
3208 		       && (pow(2.0,(big_arity(aw))*D_EXP)-1.0) < fabs(f2.fd)) {
3209 		/* If bignum size shows that it is smaller than the abs float */
3210 		j = f2.fd < 0 ? 1 : -1;
3211 	    } else if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
3212 		/* Float is within the no loss limit */
3213 		if (big_to_double(aw, &f1.fd) < 0) {
3214 		    j = big_sign(aw) ? -1 : 1;
3215 		} else {
3216 		    j = erts_float_comp(f1.fd, f2.fd);
3217 		}
3218 	    } else {
3219 		big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
3220 		j = big_comp(aw, big);
3221 	    }
3222 	    if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) {
3223 		j = -j;
3224 	    }
3225 	    break;
3226 	case FLOAT_SMALL:
3227 	    if (exact) goto exact_fall_through;
3228 	    GET_DOUBLE(aw, f1);
3229 	    if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
3230 		/* Float is within the no loss limit */
3231 		f2.fd = signed_val(bw);
3232 		j = erts_float_comp(f1.fd, f2.fd);
3233 	    }
3234 #if ERTS_SIZEOF_ETERM == 8
3235 	    else if (f1.fd > (double) (MAX_SMALL + 1)) {
3236 		/* Float is a positive bignum, i.e. bigger */
3237 		j = 1;
3238 	    } else if (f1.fd < (double) (MIN_SMALL - 1)) {
3239 		/* Float is a negative bignum, i.e. smaller */
3240 		j = -1;
3241 	    } else {
3242 		/* Float is a Sint but less precise it */
3243 		j = (Sint) f1.fd - signed_val(bw);
3244 	    }
3245 #else
3246 	    else {
3247 		/* If float is positive it is bigger than small */
3248 		j = (f1.fd > 0.0) ? 1 : -1;
3249 	    }
3250 #endif /* ERTS_SIZEOF_ETERM == 8 */
3251 	    break;
3252 exact_fall_through:
3253 	default:
3254 	    j = b_tag - a_tag;
3255 	}
3256     }
3257     if (j == 0) {
3258 	goto pop_next;
3259     } else {
3260 	goto not_equal;
3261     }
3262 
3263 term_array: /* arrays in 'aa' and 'bb', length in 'i' */
3264     ASSERT(i>0);
3265     while (--i) {
3266 	a = *aa++;
3267 	b = *bb++;
3268 	if (!is_same(a, b)) {
3269 	    if (is_atom(a) && is_atom(b)) {
3270 		if ((j = erts_cmp_atoms(a, b)) != 0) {
3271 		    goto not_equal;
3272 		}
3273 	    } else if (is_both_small(a, b)) {
3274 		if ((j = signed_val(a)-signed_val(b)) != 0) {
3275 		    goto not_equal;
3276 		}
3277 	    } else {
3278 		WSTACK_PUSH3(stack, (UWord)bb, (UWord)aa, TERM_ARRAY_OP_WORD(i));
3279 		goto tailrecur_ne;
3280 	    }
3281 	}
3282     }
3283     a = *aa;
3284     b = *bb;
3285     goto tailrecur;
3286 
3287 pop_next:
3288     if (!WSTACK_ISEMPTY(stack)) {
3289 	UWord something = WSTACK_POP(stack);
3290         struct erts_cmp_hashmap_state* sp;
3291 	if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* an operation */
3292 	    switch (GET_OP(something)) {
3293 	    case TERM_ARRAY_OP:
3294 		i = GET_OP_ARG(something);
3295 		aa = (Eterm*)WSTACK_POP(stack);
3296 		bb = (Eterm*) WSTACK_POP(stack);
3297 		goto term_array;
3298 
3299 	    case SWITCH_EXACT_OFF_OP:
3300 		/* Done with exact compare of map keys, switch back */
3301 		ASSERT(exact);
3302 		exact = 0;
3303 		goto pop_next;
3304 
3305             case HASHMAP_PHASE1_ARE_KEYS_EQUAL: {
3306                 sp = PSTACK_TOP(hmap_stack);
3307                 if (j) {
3308                     /* Key diff found, enter phase 2 */
3309                     if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
3310                         sp->min_key = CAR(sp->ap);
3311                         sp->cmp_res = -1;
3312                         sp->ap = hashmap_iterator_next(&stack);
3313                     }
3314                     else {
3315                         sp->min_key = CAR(sp->bp);
3316                         sp->cmp_res = 1;
3317                         sp->bp = hashmap_iterator_next(&b_stack);
3318                     }
3319                     exact = 1; /* only exact key compares in phase 2 */
3320                     goto case_HASHMAP_PHASE2_LOOP;
3321                 }
3322 
3323                 /* No key diff found so far, compare values if min key */
3324 
3325                 if (sp->cmp_res) {
3326                     a = CAR(sp->ap);
3327                     b = sp->min_key;
3328                     exact = 1;
3329                     WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_IS_MIN_KEY));
3330                     sp->wstack_rollback = WSTACK_COUNT(stack);
3331                     goto bodyrecur;
3332                 }
3333                 /* no min key-value found yet */
3334                 a = CDR(sp->ap);
3335                 b = CDR(sp->bp);
3336                 exact = sp->was_exact;
3337                 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
3338                 sp->wstack_rollback = WSTACK_COUNT(stack);
3339                 goto bodyrecur;
3340             }
3341             case HASHMAP_PHASE1_IS_MIN_KEY:
3342                 sp = PSTACK_TOP(hmap_stack);
3343                 if (j < 0) {
3344                     a = CDR(sp->ap);
3345                     b = CDR(sp->bp);
3346                     exact = sp->was_exact;
3347                     WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
3348                     sp->wstack_rollback = WSTACK_COUNT(stack);
3349                     goto bodyrecur;
3350                 }
3351                 goto case_HASHMAP_PHASE1_LOOP;
3352 
3353             case HASHMAP_PHASE1_CMP_VALUES:
3354                 sp = PSTACK_TOP(hmap_stack);
3355                 if (j) {
3356                     sp->cmp_res = j;
3357                     sp->min_key = CAR(sp->ap);
3358                 }
3359             case_HASHMAP_PHASE1_LOOP:
3360                 sp->ap = hashmap_iterator_next(&stack);
3361                 sp->bp = hashmap_iterator_next(&b_stack);
3362                 if (!sp->ap) {
3363                     /* end of maps with identical keys */
3364                     ASSERT(!sp->bp);
3365                     j = sp->cmp_res;
3366                     exact = sp->was_exact;
3367                     (void) PSTACK_POP(hmap_stack);
3368                     ON_CMP_GOTO(j);
3369                 }
3370                 a = CAR(sp->ap);
3371                 b = CAR(sp->bp);
3372                 exact = 1;
3373                 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
3374                 sp->wstack_rollback = WSTACK_COUNT(stack);
3375                 goto bodyrecur;
3376 
3377             case_HASHMAP_PHASE2_LOOP:
3378                 if (sp->ap && sp->bp) {
3379                     a = CAR(sp->ap);
3380                     b = CAR(sp->bp);
3381                     ASSERT(exact);
3382                     WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_ARE_KEYS_EQUAL));
3383                     sp->wstack_rollback = WSTACK_COUNT(stack);
3384                     goto bodyrecur;
3385                 }
3386                 goto case_HASHMAP_PHASE2_NEXT_STEP;
3387 
3388             case HASHMAP_PHASE2_ARE_KEYS_EQUAL:
3389                 sp = PSTACK_TOP(hmap_stack);
3390                 if (j == 0) {
3391                     /* keys are equal, skip them */
3392                     sp->ap = hashmap_iterator_next(&stack);
3393                     sp->bp = hashmap_iterator_next(&b_stack);
3394                     goto case_HASHMAP_PHASE2_LOOP;
3395                 }
3396                 /* fall through */
3397             case_HASHMAP_PHASE2_NEXT_STEP:
3398                 if (sp->ap || sp->bp) {
3399                     if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
3400                         ASSERT(sp->ap);
3401                         a = CAR(sp->ap);
3402                         b = sp->min_key;
3403                         ASSERT(exact);
3404                         WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_A));
3405                     }
3406                     else { /* hash_cmp > 0 */
3407                         ASSERT(sp->bp);
3408                         a = CAR(sp->bp);
3409                         b = sp->min_key;
3410                         ASSERT(exact);
3411                         WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_B));
3412                     }
3413                     sp->wstack_rollback = WSTACK_COUNT(stack);
3414                     goto bodyrecur;
3415                 }
3416                 /* End of both maps */
3417                 j = sp->cmp_res;
3418                 exact = sp->was_exact;
3419                 (void) PSTACK_POP(hmap_stack);
3420                 ON_CMP_GOTO(j);
3421 
3422             case HASHMAP_PHASE2_IS_MIN_KEY_A:
3423                 sp = PSTACK_TOP(hmap_stack);
3424                 if (j < 0) {
3425                     sp->min_key = CAR(sp->ap);
3426                     sp->cmp_res = -1;
3427                 }
3428                 sp->ap = hashmap_iterator_next(&stack);
3429                 goto case_HASHMAP_PHASE2_LOOP;
3430 
3431             case HASHMAP_PHASE2_IS_MIN_KEY_B:
3432                 sp = PSTACK_TOP(hmap_stack);
3433                 if (j < 0) {
3434                     sp->min_key = CAR(sp->bp);
3435                     sp->cmp_res = 1;
3436                 }
3437                 sp->bp = hashmap_iterator_next(&b_stack);
3438                 goto case_HASHMAP_PHASE2_LOOP;
3439 
3440             default:
3441                 ASSERT(!"Invalid cmp op");
3442             } /* switch */
3443 	}
3444 	a = (Eterm) something;
3445 	b = (Eterm) WSTACK_POP(stack);
3446 	goto tailrecur;
3447     }
3448 
3449     ASSERT(PSTACK_IS_EMPTY(hmap_stack));
3450     PSTACK_DESTROY(hmap_stack);
3451     WSTACK_DESTROY(stack);
3452     WSTACK_DESTROY(b_stack);
3453     return 0;
3454 
3455 not_equal:
3456     if (!PSTACK_IS_EMPTY(hmap_stack) && !eq_only) {
3457         WSTACK_ROLLBACK(stack, PSTACK_TOP(hmap_stack)->wstack_rollback);
3458         goto pop_next;
3459     }
3460     PSTACK_DESTROY(hmap_stack);
3461     WSTACK_DESTROY(stack);
3462     WSTACK_DESTROY(b_stack);
3463     return j;
3464 
3465 #undef CMP_NODES
3466 }
3467 
3468 
3469 Eterm
store_external_or_ref_(Uint ** hpp,ErlOffHeap * oh,Eterm ns)3470 store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
3471 {
3472     struct erl_off_heap_header *ohhp;
3473     Uint i;
3474     Uint size;
3475     Eterm *from_hp;
3476     Eterm *to_hp = *hpp;
3477 
3478     ASSERT(is_external(ns) || is_internal_ref(ns));
3479 
3480     from_hp = boxed_val(ns);
3481     size = thing_arityval(*from_hp) + 1;
3482     *hpp += size;
3483 
3484     for(i = 0; i < size; i++)
3485 	to_hp[i] = from_hp[i];
3486 
3487     if (is_external_header(*from_hp)) {
3488 	ExternalThing *etp = (ExternalThing *) from_hp;
3489 	ASSERT(is_external(ns));
3490         erts_ref_node_entry(etp->node, 2, make_boxed(to_hp));
3491     }
3492     else if (is_ordinary_ref_thing(from_hp))
3493 	return make_internal_ref(to_hp);
3494     else {
3495 	ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp;
3496         ErtsMagicBinary *mb = mreft->mb;
3497 	ASSERT(is_magic_ref_thing(from_hp));
3498 	erts_refc_inc(&mb->intern.refc, 2);
3499         OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm));
3500     }
3501 
3502     ohhp = (struct erl_off_heap_header*) to_hp;
3503     ohhp->next = oh->first;
3504     oh->first = ohhp;
3505 
3506     return make_boxed(to_hp);
3507 }
3508 
3509 Eterm
store_external_or_ref_in_proc_(Process * proc,Eterm ns)3510 store_external_or_ref_in_proc_(Process *proc, Eterm ns)
3511 {
3512     Uint sz;
3513     Uint *hp;
3514 
3515     ASSERT(is_external(ns) || is_internal_ref(ns));
3516 
3517     sz = NC_HEAP_SIZE(ns);
3518     ASSERT(sz > 0);
3519     hp = HAlloc(proc, sz);
3520     return store_external_or_ref_(&hp, &MSO(proc), ns);
3521 }
3522 
bin_write(fmtfn_t to,void * to_arg,byte * buf,size_t sz)3523 void bin_write(fmtfn_t to, void *to_arg, byte* buf, size_t sz)
3524 {
3525     size_t i;
3526 
3527     for (i=0;i<sz;i++) {
3528 	if (IS_DIGIT(buf[i]))
3529 	    erts_print(to, to_arg, "%d,", buf[i]);
3530 	else if (IS_PRINT(buf[i])) {
3531 	    erts_print(to, to_arg, "%c,", buf[i]);
3532 	}
3533 	else
3534 	    erts_print(to, to_arg, "%d,", buf[i]);
3535     }
3536     erts_putc(to, to_arg, '\n');
3537 }
3538 
3539 /* Fill buf with the contents of bytelist list
3540  * return number of chars in list
3541  * or -1 for type error
3542  * or -2 for not enough buffer space (buffer contains truncated result)
3543  */
3544 Sint
intlist_to_buf(Eterm list,char * buf,Sint len)3545 intlist_to_buf(Eterm list, char *buf, Sint len)
3546 {
3547     Eterm* listptr;
3548     Sint sz = 0;
3549 
3550     if (is_nil(list))
3551 	return 0;
3552     if (is_not_list(list))
3553 	return -1;
3554     listptr = list_val(list);
3555 
3556     while (sz < len) {
3557 	if (!is_byte(*listptr))
3558 	    return -1;
3559 	buf[sz++] = unsigned_val(*listptr);
3560 	if (is_nil(*(listptr + 1)))
3561 	    return(sz);
3562 	if (is_not_list(*(listptr + 1)))
3563 	    return -1;
3564 	listptr = list_val(*(listptr + 1));
3565     }
3566     return -2;			/* not enough space */
3567 }
3568 
3569 /** @brief Fill buf with the UTF8 contents of the unicode list
3570  * @param len Max number of characters to write.
3571  * @param written NULL or bytes written.
3572  * @return 0 ok,
3573  *        -1 type error,
3574  *        -2 list too long, only \c len characters written
3575  */
3576 int
erts_unicode_list_to_buf(Eterm list,byte * buf,Sint len,Sint * written)3577 erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written)
3578 {
3579     Eterm* listptr;
3580     Sint sz = 0;
3581     Sint val;
3582     int res;
3583 
3584     while (1) {
3585         if (is_nil(list)) {
3586             res = 0;
3587             break;
3588         }
3589         if (is_not_list(list)) {
3590             res = -1;
3591             break;
3592         }
3593         listptr = list_val(list);
3594 
3595         if (len-- <= 0) {
3596             res = -2;
3597             break;
3598         }
3599 
3600 	if (is_not_small(CAR(listptr))) {
3601 	    res = -1;
3602             break;
3603 	}
3604 	val = signed_val(CAR(listptr));
3605 	if (0 <= val && val < 0x80) {
3606 	    buf[sz] = val;
3607 	    sz++;
3608 	} else if (val < 0x800) {
3609 	    buf[sz+0] = 0xC0 | (val >> 6);
3610 	    buf[sz+1] = 0x80 | (val & 0x3F);
3611 	    sz += 2;
3612 	} else if (val < 0x10000UL) {
3613 	    if (0xD800 <= val && val <= 0xDFFF) {
3614 		res = -1;
3615                 break;
3616 	    }
3617 	    buf[sz+0] = 0xE0 | (val >> 12);
3618 	    buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
3619 	    buf[sz+2] = 0x80 | (val & 0x3F);
3620 	    sz += 3;
3621 	} else if (val < 0x110000) {
3622 	    buf[sz+0] = 0xF0 | (val >> 18);
3623 	    buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
3624 	    buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
3625 	    buf[sz+3] = 0x80 | (val & 0x3F);
3626 	    sz += 4;
3627 	} else {
3628             res = -1;
3629             break;
3630 	}
3631 	list = CDR(listptr);
3632     }
3633 
3634     if (written)
3635         *written = sz;
3636     return res;
3637 }
3638 
3639 Sint
erts_unicode_list_to_buf_len(Eterm list)3640 erts_unicode_list_to_buf_len(Eterm list)
3641 {
3642     Eterm* listptr;
3643     Sint sz = 0;
3644 
3645     if (is_nil(list)) {
3646 	return 0;
3647     }
3648     if (is_not_list(list)) {
3649 	return -1;
3650     }
3651     listptr = list_val(list);
3652 
3653     while (1) {
3654 	Sint val;
3655 
3656 	if (is_not_small(CAR(listptr))) {
3657 	    return -1;
3658 	}
3659 	val = signed_val(CAR(listptr));
3660 	if (0 <= val && val < 0x80) {
3661 	    sz++;
3662 	} else if (val < 0x800) {
3663 	    sz += 2;
3664 	} else if (val < 0x10000UL) {
3665 	    if (0xD800 <= val && val <= 0xDFFF) {
3666 		return -1;
3667 	    }
3668 	    sz += 3;
3669 	} else if (val < 0x110000) {
3670 	    sz += 4;
3671 	} else {
3672 	    return -1;
3673 	}
3674 	list = CDR(listptr);
3675 	if (is_nil(list)) {
3676 	    return sz;
3677 	}
3678 	if (is_not_list(list)) {
3679 	    return -1;
3680 	}
3681 	listptr = list_val(list);
3682     }
3683 }
3684 
3685 /* Prints an integer in the given base, returning the number of digits printed.
3686  *
3687  * (*buf) is a pointer to the buffer, and is set to the start of the string
3688  * when returning. */
Sint_to_buf(Sint n,int base,char ** buf,size_t buf_size)3689 int Sint_to_buf(Sint n, int base, char **buf, size_t buf_size)
3690 {
3691     char *p = &(*buf)[buf_size - 1];
3692     int sign = 0, size = 0;
3693 
3694     ASSERT(base >= 2 && base <= 36);
3695 
3696     if (n == 0) {
3697         *p-- = '0';
3698         size++;
3699     } else if (n < 0) {
3700         sign = 1;
3701         n = -n;
3702     }
3703 
3704     while (n != 0) {
3705         int digit = n % base;
3706 
3707         if (digit < 10) {
3708             *p-- = '0' + digit;
3709         } else {
3710             *p-- = 'A' + (digit - 10);
3711         }
3712 
3713         size++;
3714 
3715         n /= base;
3716     }
3717 
3718     if (sign) {
3719         *p-- = '-';
3720         size++;
3721     }
3722 
3723     *buf = p + 1;
3724 
3725     return size;
3726 }
3727 
3728 /* Build a list of integers in some safe memory area
3729 ** Memory must be pre allocated prio call 2*len in size
3730 ** hp is a pointer to the "heap" pointer on return
3731 ** this pointer is updated to point after the list
3732 */
3733 
3734 Eterm
buf_to_intlist(Eterm ** hpp,const char * buf,size_t len,Eterm tail)3735 buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail)
3736 {
3737     Eterm* hp = *hpp;
3738     size_t i = len;
3739 
3740     while(i != 0) {
3741 	--i;
3742 	tail = CONS(hp, make_small((Uint)(byte)buf[i]), tail);
3743 	hp += 2;
3744     }
3745 
3746     *hpp = hp;
3747     return tail;
3748 }
3749 
3750 /*
3751 ** Write io list in to a buffer.
3752 **
3753 ** An iolist is defined as:
3754 **
3755 ** iohead ::= Binary
3756 **        |   Byte (i.e integer in range [0..255]
3757 **        |   iolist
3758 **        ;
3759 **
3760 ** iotail ::= []
3761 **        |   Binary  (added by tony)
3762 **        |   iolist
3763 **        ;
3764 **
3765 ** iolist ::= []
3766 **        |   Binary
3767 **        |   [ iohead | iotail]
3768 **        ;
3769 **
3770 ** Return remaining bytes in buffer on success
3771 **        ERTS_IOLIST_TO_BUF_OVERFLOW on overflow
3772 **        ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes)
3773 **
3774 ** Note!
3775 ** Do not detect indata errors in this fiunction that are not detected by erts_iolist_size!
3776 **
3777 ** A caller should be able to rely on a successful return from erts_iolist_to_buf
3778 ** if erts_iolist_size is previously successfully called and erts_iolist_to_buf
3779 ** is called with a buffer at least as large as the value given by erts_iolist_size.
3780 **
3781 */
3782 
3783 typedef enum {
3784     ERTS_IL2B_BCOPY_OK,
3785     ERTS_IL2B_BCOPY_YIELD,
3786     ERTS_IL2B_BCOPY_OVERFLOW,
3787     ERTS_IL2B_BCOPY_TYPE_ERROR
3788 } ErtsIL2BBCopyRes;
3789 
3790 static ErtsIL2BBCopyRes
3791 iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
3792 
3793 static ERTS_INLINE ErlDrvSizeT
iolist_to_buf(const int yield_support,ErtsIOList2BufState * state,Eterm obj,char * buf,ErlDrvSizeT alloced_len)3794 iolist_to_buf(const int yield_support,
3795 	      ErtsIOList2BufState *state,
3796 	      Eterm obj,
3797 	      char* buf,
3798 	      ErlDrvSizeT alloced_len)
3799 {
3800 #undef IOLIST_TO_BUF_BCOPY
3801 #define IOLIST_TO_BUF_BCOPY(CONSP)					\
3802 do {									\
3803     size_t size = binary_size(obj);					\
3804     if (size > 0) {							\
3805 	Uint bitsize;							\
3806 	byte* bptr;							\
3807 	Uint bitoffs;							\
3808 	Uint num_bits;							\
3809 	if (yield_support) {						\
3810 	    size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;	\
3811 	    if (yield_count > 0)					\
3812 		max_size *= yield_count+1;				\
3813 	    if (size > max_size) {					\
3814 		state->objp = CONSP;					\
3815 		goto L_bcopy_yield;					\
3816 	    }								\
3817 	    if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {	\
3818 		int cost = (int) size;					\
3819 		cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;	\
3820 		yield_count -= cost;					\
3821 	    }								\
3822 	}								\
3823 	if (len < size)							\
3824 	    goto L_overflow;						\
3825 	ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);		\
3826 	if (bitsize != 0)						\
3827 	    goto L_type_error;						\
3828 	num_bits = 8*size;						\
3829 	copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);		\
3830 	buf += size;							\
3831 	len -= size;							\
3832     }									\
3833 } while (0)
3834 
3835     ErlDrvSizeT res, len;
3836     Eterm* objp = NULL;
3837     int init_yield_count;
3838     int yield_count;
3839     DECLARE_ESTACK(s);
3840 
3841     len = (ErlDrvSizeT) alloced_len;
3842 
3843     if (!yield_support) {
3844 	yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
3845 	goto L_again;
3846     }
3847     else {
3848 
3849 	if (state->iolist.reds_left <= 0)
3850 	    return ERTS_IOLIST_TO_BUF_YIELD;
3851 
3852 	ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
3853 	init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
3854 			   * state->iolist.reds_left);
3855 	yield_count = init_yield_count;
3856 
3857 	if (!state->iolist.estack.start)
3858 	    goto L_again;
3859 	else {
3860 	    int chk_stack;
3861 	    /* Restart; restore state... */
3862 	    ESTACK_RESTORE(s, &state->iolist.estack);
3863 
3864 	    if (!state->bcopy.bptr)
3865 		chk_stack = 0;
3866 	    else {
3867 		chk_stack = 1;
3868 		switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
3869 		case ERTS_IL2B_BCOPY_OK:
3870 		    break;
3871 		case ERTS_IL2B_BCOPY_YIELD:
3872 		    BUMP_ALL_REDS(state->iolist.c_p);
3873 		    state->iolist.reds_left = 0;
3874 		    ESTACK_SAVE(s, &state->iolist.estack);
3875 		    return ERTS_IOLIST_TO_BUF_YIELD;
3876 		case ERTS_IL2B_BCOPY_OVERFLOW:
3877 		    goto L_overflow;
3878 		case ERTS_IL2B_BCOPY_TYPE_ERROR:
3879 		    goto L_type_error;
3880 		}
3881 	    }
3882 
3883 	    obj = state->iolist.obj;
3884 	    buf = state->buf;
3885 	    len = state->len;
3886 	    objp = state->objp;
3887 	    state->objp = NULL;
3888 	    if (objp)
3889 		goto L_tail;
3890 	    if (!chk_stack)
3891 		goto L_again;
3892 	    /* check stack */
3893 	}
3894     }
3895 
3896     while (!ESTACK_ISEMPTY(s)) {
3897 	obj = ESTACK_POP(s);
3898     L_again:
3899 	if (is_list(obj)) {
3900 	    while (1) { /* Tail loop */
3901 		while (1) { /* Head loop */
3902 		    if (yield_support && --yield_count <= 0)
3903 			goto L_yield;
3904 		    objp = list_val(obj);
3905 		    obj = CAR(objp);
3906 		    if (is_byte(obj)) {
3907 			if (len == 0) {
3908 			    goto L_overflow;
3909 			}
3910 			*buf++ = unsigned_val(obj);
3911 			len--;
3912 		    } else if (is_binary(obj)) {
3913 			IOLIST_TO_BUF_BCOPY(objp);
3914 		    } else if (is_list(obj)) {
3915 			ESTACK_PUSH(s, CDR(objp));
3916 			continue; /* Head loop */
3917 		    } else if (is_not_nil(obj)) {
3918 			goto L_type_error;
3919 		    }
3920 		    break;
3921 		}
3922 
3923 	    L_tail:
3924 
3925 		obj = CDR(objp);
3926 
3927 		if (is_list(obj)) {
3928 		    continue; /* Tail loop */
3929 		} else if (is_binary(obj)) {
3930 		    IOLIST_TO_BUF_BCOPY(NULL);
3931 		} else if (is_not_nil(obj)) {
3932 		    goto L_type_error;
3933 		}
3934 		break;
3935 	    }
3936 	} else if (is_binary(obj)) {
3937 	    IOLIST_TO_BUF_BCOPY(NULL);
3938 	} else if (is_not_nil(obj)) {
3939 	    goto L_type_error;
3940 	} else if (yield_support && --yield_count <= 0)
3941 	    goto L_yield;
3942     }
3943 
3944     res = len;
3945 
3946  L_return:
3947 
3948     DESTROY_ESTACK(s);
3949 
3950     if (yield_support) {
3951 	int reds;
3952 	CLEAR_SAVED_ESTACK(&state->iolist.estack);
3953 	reds = ((init_yield_count - yield_count - 1)
3954 		/ ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
3955 	BUMP_REDS(state->iolist.c_p, reds);
3956 	state->iolist.reds_left -= reds;
3957 	if (state->iolist.reds_left < 0)
3958 	    state->iolist.reds_left = 0;
3959     }
3960 
3961 
3962     return res;
3963 
3964  L_type_error:
3965     res = ERTS_IOLIST_TO_BUF_TYPE_ERROR;
3966     goto L_return;
3967 
3968  L_overflow:
3969     res = ERTS_IOLIST_TO_BUF_OVERFLOW;
3970     goto L_return;
3971 
3972  L_bcopy_yield:
3973 
3974     state->buf = buf;
3975     state->len = len;
3976 
3977     switch (iolist_to_buf_bcopy(state, obj, &yield_count)) {
3978     case ERTS_IL2B_BCOPY_OK:
3979 	ERTS_INTERNAL_ERROR("Missing yield");
3980     case ERTS_IL2B_BCOPY_YIELD:
3981 	BUMP_ALL_REDS(state->iolist.c_p);
3982 	state->iolist.reds_left = 0;
3983 	ESTACK_SAVE(s, &state->iolist.estack);
3984 	return ERTS_IOLIST_TO_BUF_YIELD;
3985     case ERTS_IL2B_BCOPY_OVERFLOW:
3986 	goto L_overflow;
3987     case ERTS_IL2B_BCOPY_TYPE_ERROR:
3988 	goto L_type_error;
3989     }
3990 
3991  L_yield:
3992 
3993     BUMP_ALL_REDS(state->iolist.c_p);
3994     state->iolist.reds_left = 0;
3995     state->iolist.obj = obj;
3996     state->buf = buf;
3997     state->len = len;
3998     ESTACK_SAVE(s, &state->iolist.estack);
3999     return ERTS_IOLIST_TO_BUF_YIELD;
4000 
4001 #undef IOLIST_TO_BUF_BCOPY
4002 }
4003 
4004 static ErtsIL2BBCopyRes
iolist_to_buf_bcopy(ErtsIOList2BufState * state,Eterm obj,int * yield_countp)4005 iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
4006 {
4007     ErtsIL2BBCopyRes res;
4008     char *buf = state->buf;
4009     ErlDrvSizeT len = state->len;
4010     byte* bptr;
4011     size_t size;
4012     size_t max_size;
4013     Uint bitoffs;
4014     Uint num_bits;
4015     int yield_count = *yield_countp;
4016 
4017     if (state->bcopy.bptr) {
4018 	bptr = state->bcopy.bptr;
4019 	size = state->bcopy.size;
4020 	bitoffs = state->bcopy.bitoffs;
4021 	state->bcopy.bptr = NULL;
4022     }
4023     else {
4024 	Uint bitsize;
4025 
4026 	ASSERT(is_binary(obj));
4027 
4028 	size = binary_size(obj);
4029 	if (size <= 0)
4030 	    return ERTS_IL2B_BCOPY_OK;
4031 
4032 	if (len < size)
4033 	    return ERTS_IL2B_BCOPY_OVERFLOW;
4034 
4035 	ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
4036 	if (bitsize != 0)
4037 	    return ERTS_IL2B_BCOPY_TYPE_ERROR;
4038     }
4039 
4040     ASSERT(size > 0);
4041     max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
4042     if (yield_count > 0)
4043 	max_size *= (size_t) (yield_count+1);
4044 
4045     if (size <= max_size) {
4046 	if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
4047 	    int cost = (int) size;
4048 	    cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
4049 	    yield_count -= cost;
4050 	}
4051 	res = ERTS_IL2B_BCOPY_OK;
4052     }
4053     else {
4054 	ASSERT(0 < max_size && max_size < size);
4055 	yield_count = 0;
4056 	state->bcopy.bptr = bptr + max_size;
4057 	state->bcopy.bitoffs = bitoffs;
4058 	state->bcopy.size = size - max_size;
4059 	size = max_size;
4060 	res = ERTS_IL2B_BCOPY_YIELD;
4061     }
4062 
4063     num_bits = 8*size;
4064     copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
4065     state->buf += size;
4066     state->len -= size;
4067     *yield_countp = yield_count;
4068 
4069     return res;
4070 }
4071 
erts_iolist_to_buf_yielding(ErtsIOList2BufState * state)4072 ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state)
4073 {
4074     return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len);
4075 }
4076 
erts_iolist_to_buf(Eterm obj,char * buf,ErlDrvSizeT alloced_len)4077 ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
4078 {
4079     return iolist_to_buf(0, NULL, obj, buf, alloced_len);
4080 }
4081 
4082 /*
4083  * Return 0 if successful, and non-zero if unsuccessful.
4084  *
4085  * It is vital that if erts_iolist_to_buf would return an error for
4086  * any type of term data, this function should do so as well.
4087  * Any input term error detected in erts_iolist_to_buf should also
4088  * be detected in this function!
4089  */
4090 
4091 static ERTS_INLINE int
iolist_size(const int yield_support,ErtsIOListState * state,Eterm obj,ErlDrvSizeT * sizep)4092 iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep)
4093 {
4094     int res, init_yield_count, yield_count;
4095     Eterm* objp;
4096     Uint size = (Uint) *sizep;
4097     DECLARE_ESTACK(s);
4098 
4099     if (!yield_support)
4100 	yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
4101     else {
4102 	if (state->reds_left <= 0)
4103 	    return ERTS_IOLIST_YIELD;
4104 	ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
4105 	init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
4106 	init_yield_count *= state->reds_left;
4107 	yield_count = init_yield_count;
4108 	if (state->estack.start) {
4109 	    /* Restart; restore state... */
4110 	    ESTACK_RESTORE(s, &state->estack);
4111 	    size = (Uint) state->size;
4112 	    obj = state->obj;
4113 	}
4114     }
4115 
4116     goto L_again;
4117 
4118 #define SAFE_ADD(Var, Val)			\
4119     do {					\
4120         Uint valvar = (Val);			\
4121 	Var += valvar;				\
4122 	if (Var < valvar) {			\
4123 	    goto L_overflow_error;		\
4124 	}					\
4125     } while (0)
4126 
4127     while (!ESTACK_ISEMPTY(s)) {
4128 	obj = ESTACK_POP(s);
4129     L_again:
4130 	if (is_list(obj)) {
4131 	    while (1) { /* Tail loop */
4132 		while (1) { /* Head loop */
4133 		    if (yield_support && --yield_count <= 0)
4134 			goto L_yield;
4135 		    objp = list_val(obj);
4136 		    /* Head */
4137 		    obj = CAR(objp);
4138 		    if (is_byte(obj)) {
4139 			size++;
4140 			if (size == 0) {
4141 			    goto L_overflow_error;
4142 			}
4143 		    } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
4144 			SAFE_ADD(size, binary_size(obj));
4145 		    } else if (is_list(obj)) {
4146 			ESTACK_PUSH(s, CDR(objp));
4147 			continue; /* Head loop */
4148 		    } else if (is_not_nil(obj)) {
4149 			goto L_type_error;
4150 		    }
4151 		    break;
4152 		}
4153 		/* Tail */
4154 		obj = CDR(objp);
4155 		if (is_list(obj))
4156 		    continue; /* Tail loop */
4157 		else if (is_binary(obj) && binary_bitsize(obj) == 0) {
4158 		    SAFE_ADD(size, binary_size(obj));
4159 		} else if (is_not_nil(obj)) {
4160 		    goto L_type_error;
4161 		}
4162 		break;
4163 	    }
4164 	} else {
4165 	    if (yield_support && --yield_count <= 0)
4166 		goto L_yield;
4167 	    if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
4168 		SAFE_ADD(size, binary_size(obj));
4169 	    } else if (is_not_nil(obj)) {
4170 		goto L_type_error;
4171 	    }
4172 	}
4173     }
4174 #undef SAFE_ADD
4175 
4176     *sizep = (ErlDrvSizeT) size;
4177 
4178     res = ERTS_IOLIST_OK;
4179 
4180  L_return:
4181 
4182     DESTROY_ESTACK(s);
4183 
4184     if (yield_support) {
4185 	int yc, reds;
4186 	CLEAR_SAVED_ESTACK(&state->estack);
4187 	yc = init_yield_count - yield_count;
4188 	reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1;
4189 	BUMP_REDS(state->c_p, reds);
4190 	state->reds_left -= reds;
4191 	state->size = (ErlDrvSizeT) size;
4192 	state->have_size = 1;
4193     }
4194 
4195     return res;
4196 
4197  L_overflow_error:
4198     res = ERTS_IOLIST_OVERFLOW;
4199     size = 0;
4200     goto L_return;
4201 
4202  L_type_error:
4203     res = ERTS_IOLIST_TYPE;
4204     size = 0;
4205     goto L_return;
4206 
4207  L_yield:
4208     BUMP_ALL_REDS(state->c_p);
4209     state->reds_left = 0;
4210     state->size = size;
4211     state->obj = obj;
4212     ESTACK_SAVE(s, &state->estack);
4213     return ERTS_IOLIST_YIELD;
4214 }
4215 
erts_iolist_size_yielding(ErtsIOListState * state)4216 int erts_iolist_size_yielding(ErtsIOListState *state)
4217 {
4218     ErlDrvSizeT size = state->size;
4219     return iolist_size(1, state, state->obj, &size);
4220 }
4221 
erts_iolist_size(Eterm obj,ErlDrvSizeT * sizep)4222 int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
4223 {
4224     *sizep = 0;
4225     return iolist_size(0, NULL, obj, sizep);
4226 }
4227 
4228 /* return 0 if item is not a non-empty flat list of bytes
4229    otherwise return the nonzero length of the list */
4230 Sint
is_string(Eterm list)4231 is_string(Eterm list)
4232 {
4233     Sint len = 0;
4234 
4235     while(is_list(list)) {
4236 	Eterm* consp = list_val(list);
4237 	Eterm hd = CAR(consp);
4238 
4239 	if (!is_byte(hd))
4240 	    return 0;
4241 	len++;
4242 	list = CDR(consp);
4243     }
4244     if (is_nil(list))
4245 	return len;
4246     return 0;
4247 }
4248 
4249 static int trim_threshold;
4250 static int top_pad;
4251 static int mmap_threshold;
4252 static int mmap_max;
4253 
4254 Uint tot_bin_allocated;
4255 
erts_init_utils(void)4256 void erts_init_utils(void)
4257 {
4258 
4259 }
4260 
erts_init_utils_mem(void)4261 void erts_init_utils_mem(void)
4262 {
4263     trim_threshold = -1;
4264     top_pad = -1;
4265     mmap_threshold = -1;
4266     mmap_max = -1;
4267 }
4268 
4269 int
sys_alloc_opt(int opt,int value)4270 sys_alloc_opt(int opt, int value)
4271 {
4272 #if HAVE_MALLOPT
4273   int m_opt;
4274   int *curr_val;
4275 
4276   switch(opt) {
4277   case SYS_ALLOC_OPT_TRIM_THRESHOLD:
4278 #ifdef M_TRIM_THRESHOLD
4279     m_opt = M_TRIM_THRESHOLD;
4280     curr_val = &trim_threshold;
4281     break;
4282 #else
4283     return 0;
4284 #endif
4285   case SYS_ALLOC_OPT_TOP_PAD:
4286 #ifdef M_TOP_PAD
4287     m_opt = M_TOP_PAD;
4288     curr_val = &top_pad;
4289     break;
4290 #else
4291     return 0;
4292 #endif
4293   case SYS_ALLOC_OPT_MMAP_THRESHOLD:
4294 #ifdef M_MMAP_THRESHOLD
4295     m_opt = M_MMAP_THRESHOLD;
4296     curr_val = &mmap_threshold;
4297     break;
4298 #else
4299     return 0;
4300 #endif
4301   case SYS_ALLOC_OPT_MMAP_MAX:
4302 #ifdef M_MMAP_MAX
4303     m_opt = M_MMAP_MAX;
4304     curr_val = &mmap_max;
4305     break;
4306 #else
4307     return 0;
4308 #endif
4309   default:
4310     return 0;
4311   }
4312 
4313   if(mallopt(m_opt, value)) {
4314     *curr_val = value;
4315     return 1;
4316   }
4317 
4318 #endif /* #if HAVE_MALLOPT */
4319 
4320   return 0;
4321 }
4322 
4323 void
sys_alloc_stat(SysAllocStat * sasp)4324 sys_alloc_stat(SysAllocStat *sasp)
4325 {
4326    sasp->trim_threshold = trim_threshold;
4327    sasp->top_pad        = top_pad;
4328    sasp->mmap_threshold = mmap_threshold;
4329    sasp->mmap_max       = mmap_max;
4330 
4331 }
4332 
4333 char *
erts_read_env(char * key)4334 erts_read_env(char *key)
4335 {
4336     size_t value_len = 256;
4337     char *value = erts_alloc(ERTS_ALC_T_TMP, value_len);
4338     int res;
4339     while (1) {
4340         res = erts_sys_explicit_8bit_getenv(key, value, &value_len);
4341 
4342         if (res >= 0) {
4343             break;
4344         }
4345 
4346         value = erts_realloc(ERTS_ALC_T_TMP, value, value_len);
4347     }
4348 
4349     if (res != 1) {
4350         erts_free(ERTS_ALC_T_TMP, value);
4351         return NULL;
4352     }
4353 
4354     return value;
4355 }
4356 
4357 void
erts_free_read_env(void * value)4358 erts_free_read_env(void *value)
4359 {
4360     if (value)
4361 	erts_free(ERTS_ALC_T_TMP, value);
4362 }
4363 
4364 
4365 typedef struct {
4366     size_t sz;
4367     char *ptr;
4368 } ErtsEmuArg;
4369 
4370 typedef struct {
4371     int argc;
4372     ErtsEmuArg *arg;
4373     size_t no_bytes;
4374 } ErtsEmuArgs;
4375 
4376 ErtsEmuArgs saved_emu_args = {0};
4377 
4378 void
erts_save_emu_args(int argc,char ** argv)4379 erts_save_emu_args(int argc, char **argv)
4380 {
4381 #ifdef DEBUG
4382     char *end_ptr;
4383 #endif
4384     char *ptr;
4385     int i;
4386     size_t arg_sz[100];
4387     size_t size;
4388 
4389     ASSERT(!saved_emu_args.argc);
4390 
4391     size = sizeof(ErtsEmuArg)*argc;
4392     for (i = 0; i < argc; i++) {
4393 	size_t sz = sys_strlen(argv[i]);
4394 	if (i < sizeof(arg_sz)/sizeof(arg_sz[0]))
4395 	    arg_sz[i] = sz;
4396 	size += sz+1;
4397     }
4398     ptr = (char *) malloc(size);
4399     if (!ptr) {
4400         ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
4401     }
4402 #ifdef DEBUG
4403     end_ptr = ptr + size;
4404 #endif
4405     saved_emu_args.arg = (ErtsEmuArg *) ptr;
4406     ptr += sizeof(ErtsEmuArg)*argc;
4407     saved_emu_args.argc = argc;
4408     saved_emu_args.no_bytes = 0;
4409     for (i = 0; i < argc; i++) {
4410 	size_t sz;
4411 	if (i < sizeof(arg_sz)/sizeof(arg_sz[0]))
4412 	    sz = arg_sz[i];
4413 	else
4414 	    sz = sys_strlen(argv[i]);
4415 	saved_emu_args.arg[i].ptr = ptr;
4416 	saved_emu_args.arg[i].sz = sz;
4417 	saved_emu_args.no_bytes += sz;
4418 	ptr += sz+1;
4419 	sys_strcpy(saved_emu_args.arg[i].ptr, argv[i]);
4420     }
4421     ASSERT(ptr == end_ptr);
4422 }
4423 
4424 Eterm
erts_get_emu_args(Process * c_p)4425 erts_get_emu_args(Process *c_p)
4426 {
4427 #ifdef DEBUG
4428     Eterm *end_hp;
4429 #endif
4430     int i;
4431     Uint hsz;
4432     Eterm *hp, res;
4433 
4434     hsz = saved_emu_args.no_bytes*2;
4435     hsz += saved_emu_args.argc*2;
4436 
4437     hp = HAlloc(c_p, hsz);
4438 #ifdef DEBUG
4439     end_hp = hp + hsz;
4440 #endif
4441     res = NIL;
4442 
4443     for (i = saved_emu_args.argc-1; i >= 0; i--) {
4444     Eterm arg = buf_to_intlist(&hp,
4445 				   saved_emu_args.arg[i].ptr,
4446 				   saved_emu_args.arg[i].sz,
4447 				   NIL);
4448 	res = CONS(hp, arg, res);
4449 	hp += 2;
4450     }
4451 
4452     ASSERT(hp == end_hp);
4453 
4454     return res;
4455 }
4456 
4457 
4458 Eterm
erts_get_ethread_info(Process * c_p)4459 erts_get_ethread_info(Process *c_p)
4460 {
4461     Uint sz, *szp;
4462     Eterm res, *hp, **hpp, *end_hp = NULL;
4463 
4464     sz = 0;
4465     szp = &sz;
4466     hpp = NULL;
4467 
4468     while (1) {
4469 	Eterm tup, list, name;
4470 #if defined(ETHR_NATIVE_ATOMIC32_IMPL)	  \
4471     || defined(ETHR_NATIVE_ATOMIC64_IMPL)	\
4472     || defined(ETHR_NATIVE_DW_ATOMIC_IMPL)
4473 	char buf[1024];
4474 	int i;
4475 	char **str;
4476 #endif
4477 
4478 	res = NIL;
4479 
4480 #ifdef ETHR_X86_MEMBAR_H__
4481 
4482 	tup = erts_bld_tuple(hpp, szp, 2,
4483 			     erts_bld_string(hpp, szp, "sse2"),
4484 #ifdef ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
4485 			     erts_bld_string(hpp, szp,
4486 					     (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
4487 					      ? "yes" : "no"))
4488 #else
4489 			     erts_bld_string(hpp, szp, "yes")
4490 #endif
4491 	    );
4492 	res = erts_bld_cons(hpp, szp, tup, res);
4493 
4494 	tup = erts_bld_tuple(hpp, szp, 2,
4495 			     erts_bld_string(hpp, szp,
4496 					     "x86"
4497 #ifdef ARCH_64
4498 					     "_64"
4499 #endif
4500 					     " OOO"),
4501 			     erts_bld_string(hpp, szp,
4502 #ifdef ETHR_X86_OUT_OF_ORDER
4503 					     "yes"
4504 #else
4505 					     "no"
4506 #endif
4507 				 ));
4508 
4509 	res = erts_bld_cons(hpp, szp, tup, res);
4510 #endif
4511 
4512 #ifdef ETHR_SPARC_V9_MEMBAR_H__
4513 
4514 	tup = erts_bld_tuple(hpp, szp, 2,
4515 			     erts_bld_string(hpp, szp, "Sparc V9"),
4516 			     erts_bld_string(hpp, szp,
4517 #if defined(ETHR_SPARC_TSO)
4518 					     "TSO"
4519 #elif defined(ETHR_SPARC_PSO)
4520 					     "PSO"
4521 #elif defined(ETHR_SPARC_RMO)
4522 					     "RMO"
4523 #else
4524 					     "undefined"
4525 #endif
4526 				 ));
4527 
4528 	res = erts_bld_cons(hpp, szp, tup, res);
4529 
4530 #endif
4531 
4532 #ifdef ETHR_PPC_MEMBAR_H__
4533 
4534 	tup = erts_bld_tuple(hpp, szp, 2,
4535 			     erts_bld_string(hpp, szp, "lwsync"),
4536 			     erts_bld_string(hpp, szp,
4537 #if defined(ETHR_PPC_HAVE_LWSYNC)
4538 					     "yes"
4539 #elif defined(ETHR_PPC_HAVE_NO_LWSYNC)
4540 					     "no"
4541 #elif defined(ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__)
4542 					     ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ ? "yes" : "no"
4543 #else
4544 					     "undefined"
4545 #endif
4546 				 ));
4547 
4548 	res = erts_bld_cons(hpp, szp, tup, res);
4549 
4550 #endif
4551 
4552 	tup = erts_bld_tuple(hpp, szp, 2,
4553 			     erts_bld_string(hpp, szp, "Native rw-spinlocks"),
4554 #ifdef ETHR_NATIVE_RWSPINLOCK_IMPL
4555 			     erts_bld_string(hpp, szp, ETHR_NATIVE_RWSPINLOCK_IMPL)
4556 #else
4557 			     erts_bld_string(hpp, szp, "no")
4558 #endif
4559 	    );
4560 	res = erts_bld_cons(hpp, szp, tup, res);
4561 
4562 	tup = erts_bld_tuple(hpp, szp, 2,
4563 			     erts_bld_string(hpp, szp, "Native spinlocks"),
4564 #ifdef ETHR_NATIVE_SPINLOCK_IMPL
4565 			     erts_bld_string(hpp, szp, ETHR_NATIVE_SPINLOCK_IMPL)
4566 #else
4567 			     erts_bld_string(hpp, szp, "no")
4568 #endif
4569 	    );
4570 	res = erts_bld_cons(hpp, szp, tup, res);
4571 
4572 
4573 	list = NIL;
4574 #ifdef ETHR_NATIVE_DW_ATOMIC_IMPL
4575 	if (ethr_have_native_dw_atomic()) {
4576 	    name = erts_bld_string(hpp, szp, ETHR_NATIVE_DW_ATOMIC_IMPL);
4577 	    str = ethr_native_dw_atomic_ops();
4578 	    for (i = 0; str[i]; i++) {
4579 		erts_snprintf(buf, sizeof(buf), "ethr_native_dw_atomic_%s()", str[i]);
4580 		list = erts_bld_cons(hpp, szp,
4581 				     erts_bld_string(hpp, szp, buf),
4582 				     list);
4583 	    }
4584 	    str = ethr_native_su_dw_atomic_ops();
4585 	    for (i = 0; str[i]; i++) {
4586 		erts_snprintf(buf, sizeof(buf), "ethr_native_su_dw_atomic_%s()", str[i]);
4587 		list = erts_bld_cons(hpp, szp,
4588 				     erts_bld_string(hpp, szp, buf),
4589 				     list);
4590 	    }
4591 	}
4592 	else
4593 #endif
4594 	    name = erts_bld_string(hpp, szp, "no");
4595 
4596 	tup = erts_bld_tuple(hpp, szp, 3,
4597 			     erts_bld_string(hpp, szp, "Double word native atomics"),
4598 			     name,
4599 			     list);
4600 	res = erts_bld_cons(hpp, szp, tup, res);
4601 
4602 	list = NIL;
4603 #ifdef ETHR_NATIVE_ATOMIC64_IMPL
4604 	name = erts_bld_string(hpp, szp, ETHR_NATIVE_ATOMIC64_IMPL);
4605 	str = ethr_native_atomic64_ops();
4606 	for (i = 0; str[i]; i++) {
4607 	    erts_snprintf(buf, sizeof(buf), "ethr_native_atomic64_%s()", str[i]);
4608 	    list = erts_bld_cons(hpp, szp,
4609 				 erts_bld_string(hpp, szp, buf),
4610 				 list);
4611 	}
4612 #else
4613 	name = erts_bld_string(hpp, szp, "no");
4614 #endif
4615 	tup = erts_bld_tuple(hpp, szp, 3,
4616 			     erts_bld_string(hpp, szp, "64-bit native atomics"),
4617 			     name,
4618 			     list);
4619 	res = erts_bld_cons(hpp, szp, tup, res);
4620 
4621 	list = NIL;
4622 #ifdef ETHR_NATIVE_ATOMIC32_IMPL
4623 	name = erts_bld_string(hpp, szp, ETHR_NATIVE_ATOMIC32_IMPL);
4624 	str = ethr_native_atomic32_ops();
4625 	for (i = 0; str[i]; i++) {
4626 	    erts_snprintf(buf, sizeof(buf), "ethr_native_atomic32_%s()", str[i]);
4627 	    list = erts_bld_cons(hpp, szp,
4628 				erts_bld_string(hpp, szp, buf),
4629 				list);
4630 	}
4631 #else
4632 	name = erts_bld_string(hpp, szp, "no");
4633 #endif
4634 	tup = erts_bld_tuple(hpp, szp, 3,
4635 			     erts_bld_string(hpp, szp, "32-bit native atomics"),
4636 			     name,
4637 			     list);
4638 	res = erts_bld_cons(hpp, szp, tup, res);
4639 
4640 	if (hpp) {
4641 	    HRelease(c_p, end_hp, *hpp)
4642 	    return res;
4643 	}
4644 
4645 	hp = HAlloc(c_p, sz);
4646 	end_hp = hp + sz;
4647 	hpp = &hp;
4648 	szp = NULL;
4649     }
4650 }
4651 
4652 /*
4653  * To be used to silence unused result warnings, but do not abuse it.
4654  */
erts_silence_warn_unused_result(long unused)4655 void erts_silence_warn_unused_result(long unused)
4656 {
4657 
4658 }
4659 
4660 /*
4661  * Interval counts
4662  */
4663 void
erts_interval_init(erts_interval_t * icp)4664 erts_interval_init(erts_interval_t *icp)
4665 {
4666     erts_atomic64_init_nob(&icp->counter.atomic, 0);
4667 }
4668 
4669 static ERTS_INLINE Uint64
step_interval_nob(erts_interval_t * icp)4670 step_interval_nob(erts_interval_t *icp)
4671 {
4672     return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
4673 }
4674 
4675 static ERTS_INLINE Uint64
step_interval_relb(erts_interval_t * icp)4676 step_interval_relb(erts_interval_t *icp)
4677 {
4678     return (Uint64) erts_atomic64_inc_read_relb(&icp->counter.atomic);
4679 }
4680 
4681 
4682 static ERTS_INLINE Uint64
ensure_later_interval_nob(erts_interval_t * icp,Uint64 ic)4683 ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
4684 {
4685     Uint64 curr_ic;
4686     curr_ic = (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
4687     if (curr_ic > ic)
4688 	return curr_ic;
4689     return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
4690 }
4691 
4692 
4693 static ERTS_INLINE Uint64
ensure_later_interval_acqb(erts_interval_t * icp,Uint64 ic)4694 ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
4695 {
4696     Uint64 curr_ic;
4697     curr_ic = (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
4698     if (curr_ic > ic)
4699 	return curr_ic;
4700     return (Uint64) erts_atomic64_inc_read_acqb(&icp->counter.atomic);
4701 }
4702 
4703 Uint64
erts_step_interval_nob(erts_interval_t * icp)4704 erts_step_interval_nob(erts_interval_t *icp)
4705 {
4706     return step_interval_nob(icp);
4707 }
4708 
4709 Uint64
erts_step_interval_relb(erts_interval_t * icp)4710 erts_step_interval_relb(erts_interval_t *icp)
4711 {
4712     return step_interval_relb(icp);
4713 }
4714 
4715 Uint64
erts_ensure_later_interval_nob(erts_interval_t * icp,Uint64 ic)4716 erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
4717 {
4718     return ensure_later_interval_nob(icp, ic);
4719 }
4720 
4721 Uint64
erts_ensure_later_interval_acqb(erts_interval_t * icp,Uint64 ic)4722 erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
4723 {
4724     return ensure_later_interval_acqb(icp, ic);
4725 }
4726 
4727 /*
4728  * A millisecond timestamp without time correction where there's no hrtime
4729  * - for tracing on "long" things...
4730  */
erts_timestamp_millis(void)4731 Uint64 erts_timestamp_millis(void)
4732 {
4733 #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
4734     return ERTS_MONOTONIC_TO_MSEC(erts_os_monotonic_time());
4735 #else
4736     Uint64 res;
4737     SysTimeval tv;
4738     sys_gettimeofday(&tv);
4739     res = (Uint64) tv.tv_sec*1000000;
4740     res += (Uint64) tv.tv_usec;
4741     return (res / 1000);
4742 #endif
4743 }
4744 
4745 void *
erts_calc_stacklimit(char * prev_c,UWord stacksize)4746 erts_calc_stacklimit(char *prev_c, UWord stacksize)
4747 {
4748     /*
4749      * We *don't* want this function inlined, i.e., it is
4750      * risky to call this function from another function
4751      * in utils.c
4752      */
4753 
4754     UWord pagesize = erts_sys_get_page_size();
4755     char c;
4756     char *start;
4757     if (&c > prev_c) {
4758         start = (char *) ((((UWord) prev_c) / pagesize) * pagesize);
4759         return (void *) (start + stacksize);
4760     }
4761     else {
4762         start = (char *) (((((UWord) prev_c) - 1) / pagesize + 1) * pagesize);
4763         return (void *) (start - stacksize);
4764     }
4765 }
4766 
4767 /*
4768  * erts_check_below_limit() and
4769  * erts_check_above_limit() are put
4770  * in utils.c in order to prevent
4771  * inlining.
4772  */
4773 
4774 int
erts_check_below_limit(char * ptr,char * limit)4775 erts_check_below_limit(char *ptr, char *limit)
4776 {
4777     return ptr < limit;
4778 }
4779 
4780 int
erts_check_above_limit(char * ptr,char * limit)4781 erts_check_above_limit(char *ptr, char *limit)
4782 {
4783     return ptr > limit;
4784 }
4785 
4786 void *
erts_ptr_id(void * ptr)4787 erts_ptr_id(void *ptr)
4788 {
4789     return ptr;
4790 }
4791 
erts_check_if_stack_grows_downwards(char * ptr)4792 int erts_check_if_stack_grows_downwards(char *ptr)
4793 {
4794     char c;
4795     if (erts_check_below_limit(&c, ptr))
4796         return 1;
4797     else
4798         return 0;
4799 }
4800