1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 1996-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24
25 #define ERTS_DO_INCL_GLB_INLINE_FUNC_DEF
26
27 #include "sys.h"
28 #include "erl_vm.h"
29 #include "global.h"
30 #include "erl_process.h"
31 #include "big.h"
32 #include "bif.h"
33 #include "erl_binary.h"
34 #include "erl_bits.h"
35 #include "erl_map.h"
36 #include "packet_parser.h"
37 #include "erl_gc.h"
38 #define ERTS_WANT_DB_INTERNAL__
39 #include "erl_db.h"
40 #include "erl_threads.h"
41 #include "register.h"
42 #include "dist.h"
43 #include "erl_printf.h"
44 #include "erl_threads.h"
45 #include "erl_lock_count.h"
46 #include "erl_time.h"
47 #include "erl_thr_progress.h"
48 #include "erl_thr_queue.h"
49 #include "erl_sched_spec_pre_alloc.h"
50 #include "beam_bp.h"
51 #include "erl_ptab.h"
52 #include "erl_check_io.h"
53 #include "erl_bif_unique.h"
54 #include "erl_io_queue.h"
55 #define ERTS_WANT_TIMER_WHEEL_API
56 #include "erl_time.h"
57 #ifdef HIPE
58 # include "hipe_mode_switch.h"
59 #endif
60 #define ERTS_WANT_NFUNC_SCHED_INTERNALS__
61 #include "erl_nfunc_sched.h"
62 #include "erl_proc_sig_queue.h"
63
64 #undef M_TRIM_THRESHOLD
65 #undef M_TOP_PAD
66 #undef M_MMAP_THRESHOLD
67 #undef M_MMAP_MAX
68
69 #if (defined(__GLIBC__) || defined(_AIX)) && defined(HAVE_MALLOC_H)
70 #include <malloc.h>
71 #endif
72
73 #if !defined(HAVE_MALLOPT)
74 #undef HAVE_MALLOPT
75 #define HAVE_MALLOPT 0
76 #endif
77
78 Eterm*
erts_heap_alloc(Process * p,Uint need,Uint xtra)79 erts_heap_alloc(Process* p, Uint need, Uint xtra)
80 {
81 ErlHeapFragment* bp;
82 Uint n;
83 #if defined(DEBUG) || defined(CHECK_FOR_HOLES)
84 Uint i;
85 #endif
86
87 #ifdef FORCE_HEAP_FRAGS
88 if (p->space_verified && p->space_verified_from!=NULL
89 && HEAP_TOP(p) >= p->space_verified_from
90 && HEAP_TOP(p) + need <= p->space_verified_from + p->space_verified
91 && HEAP_LIMIT(p) - HEAP_TOP(p) >= need) {
92
93 Uint consumed = need + (HEAP_TOP(p) - p->space_verified_from);
94 ASSERT(consumed <= p->space_verified);
95 p->space_verified -= consumed;
96 p->space_verified_from += consumed;
97 HEAP_TOP(p) = p->space_verified_from;
98 return HEAP_TOP(p) - need;
99 }
100 p->space_verified = 0;
101 p->space_verified_from = NULL;
102 #endif /* FORCE_HEAP_FRAGS */
103
104 n = need + xtra;
105 bp = MBUF(p);
106 if (bp != NULL && need <= (bp->alloc_size - bp->used_size)) {
107 Eterm* ret = bp->mem + bp->used_size;
108 bp->used_size += need;
109 p->mbuf_sz += need;
110 return ret;
111 }
112 #ifdef DEBUG
113 n++;
114 #endif
115 bp = (ErlHeapFragment*)
116 ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG, ERTS_HEAP_FRAG_SIZE(n));
117
118 #if defined(DEBUG) || defined(CHECK_FOR_HOLES)
119 for (i = 0; i < n; i++) {
120 bp->mem[i] = ERTS_HOLE_MARKER;
121 }
122 #endif
123
124 #ifdef DEBUG
125 n--;
126 #endif
127
128 bp->next = MBUF(p);
129 MBUF(p) = bp;
130 bp->alloc_size = n;
131 bp->used_size = need;
132 MBUF_SIZE(p) += need;
133 bp->off_heap.first = NULL;
134 bp->off_heap.overhead = 0;
135 return bp->mem;
136 }
137
138 #ifdef CHECK_FOR_HOLES
139 Eterm*
erts_set_hole_marker(Eterm * ptr,Uint sz)140 erts_set_hole_marker(Eterm* ptr, Uint sz)
141 {
142 Eterm* p = ptr;
143 Uint i;
144
145 for (i = 0; i < sz; i++) {
146 *p++ = ERTS_HOLE_MARKER;
147 }
148 return ptr;
149 }
150 #endif
151
152 /*
153 * Helper function for the ESTACK macros defined in global.h.
154 */
155 void
erl_grow_estack(ErtsEStack * s,Uint need)156 erl_grow_estack(ErtsEStack* s, Uint need)
157 {
158 Uint old_size = (s->end - s->start);
159 Uint new_size;
160 Uint sp_offs = s->sp - s->start;
161
162 if (need < old_size)
163 new_size = 2*old_size;
164 else
165 new_size = ((need / old_size) + 2) * old_size;
166
167 if (s->start != s->edefault) {
168 s->start = erts_realloc(s->alloc_type, s->start,
169 new_size*sizeof(Eterm));
170 } else {
171 Eterm* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(Eterm));
172 sys_memcpy(new_ptr, s->start, old_size*sizeof(Eterm));
173 s->start = new_ptr;
174 }
175 s->end = s->start + new_size;
176 s->sp = s->start + sp_offs;
177 }
178 /*
179 * Helper function for the WSTACK macros defined in global.h.
180 */
181 void
erl_grow_wstack(ErtsWStack * s,Uint need)182 erl_grow_wstack(ErtsWStack* s, Uint need)
183 {
184 Uint old_size = (s->wend - s->wstart);
185 Uint new_size;
186 Uint sp_offs = s->wsp - s->wstart;
187
188 if (need < old_size)
189 new_size = 2 * old_size;
190 else
191 new_size = ((need / old_size) + 2) * old_size;
192
193 if (s->wstart != s->wdefault) {
194 s->wstart = erts_realloc(s->alloc_type, s->wstart,
195 new_size*sizeof(UWord));
196 } else {
197 UWord* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(UWord));
198 sys_memcpy(new_ptr, s->wstart, old_size*sizeof(UWord));
199 s->wstart = new_ptr;
200 }
201 s->wend = s->wstart + new_size;
202 s->wsp = s->wstart + sp_offs;
203 }
204
205 /*
206 * Helper function for the PSTACK macros defined in global.h.
207 */
208 void
erl_grow_pstack(ErtsPStack * s,void * default_pstack,unsigned need_bytes)209 erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
210 {
211 Uint old_size = s->size;
212 Uint new_size;
213
214 if (need_bytes < old_size)
215 new_size = 2 * old_size;
216 else
217 new_size = ((need_bytes / old_size) + 2) * old_size;
218
219 if (s->pstart != default_pstack) {
220 s->pstart = erts_realloc(s->alloc_type, s->pstart, new_size);
221 } else {
222 byte* new_ptr = erts_alloc(s->alloc_type, new_size);
223 sys_memcpy(new_ptr, s->pstart, old_size);
224 s->pstart = new_ptr;
225 }
226 s->size = new_size;
227 }
228
229 /*
230 * Helper function for the EQUEUE macros defined in global.h.
231 */
232
233 void
erl_grow_equeue(ErtsEQueue * q,Eterm * default_equeue)234 erl_grow_equeue(ErtsEQueue* q, Eterm* default_equeue)
235 {
236 Uint old_size = (q->end - q->start);
237 Uint new_size = old_size * 2;
238 Uint first_part = (q->end - q->front);
239 Uint second_part = (q->back - q->start);
240 Eterm* new_ptr = erts_alloc(q->alloc_type, new_size*sizeof(Eterm));
241 ASSERT(q->back == q->front); // of course the queue is full now!
242 if (first_part > 0)
243 sys_memcpy(new_ptr, q->front, first_part*sizeof(Eterm));
244 if (second_part > 0)
245 sys_memcpy(new_ptr+first_part, q->start, second_part*sizeof(Eterm));
246 if (q->start != default_equeue)
247 erts_free(q->alloc_type, q->start);
248 q->start = new_ptr;
249 q->end = q->start + new_size;
250 q->front = q->start;
251 q->back = q->start + old_size;
252 }
253
254 /* CTYPE macros */
255
256 #define LATIN1
257
258 #define IS_DIGIT(c) ((c) >= '0' && (c) <= '9')
259 #ifdef LATIN1
260 #define IS_LOWER(c) (((c) >= 'a' && (c) <= 'z') \
261 || ((c) >= 128+95 && (c) <= 255 && (c) != 247))
262 #define IS_UPPER(c) (((c) >= 'A' && (c) <= 'Z') \
263 || ((c) >= 128+64 && (c) <= 128+94 && (c) != 247-32))
264 #else
265 #define IS_LOWER(c) ((c) >= 'a' && (c) <= 'z')
266 #define IS_UPPER(c) ((c) >= 'A' && (c) <= 'Z')
267 #endif
268
269 #define IS_ALNUM(c) (IS_DIGIT(c) || IS_LOWER(c) || IS_UPPER(c))
270
271 /* We don't include 160 (non-breaking space). */
272 #define IS_SPACE(c) (c == ' ' || c == '\n' || c == '\t' || c == '\r')
273
274 #ifdef LATIN1
275 #define IS_CNTRL(c) ((c) < ' ' || (c) == 127 \
276 || ((c) >= 128 && (c) < 128+32))
277 #else
278 /* Treat all non-ASCII as control characters */
279 #define IS_CNTRL(c) ((c) < ' ' || (c) >= 127)
280 #endif
281
282 #define IS_PRINT(c) (!IS_CNTRL(c))
283
284 /*
285 * Calculate length of a list.
286 * Returns -1 if not a proper list (i.e. not terminated with NIL)
287 */
288 Sint
erts_list_length(Eterm list)289 erts_list_length(Eterm list)
290 {
291 Sint i = 0;
292
293 while(is_list(list)) {
294 i++;
295 list = CDR(list_val(list));
296 }
297 if (is_not_nil(list)) {
298 return -1;
299 }
300 return i;
301 }
302
303 static const struct {
304 Sint64 mask;
305 int bits;
306 } fib_data[] = {{ERTS_I64_LITERAL(0x2), 1},
307 {ERTS_I64_LITERAL(0xc), 2},
308 {ERTS_I64_LITERAL(0xf0), 4},
309 {ERTS_I64_LITERAL(0xff00), 8},
310 {ERTS_I64_LITERAL(0xffff0000), 16},
311 {ERTS_I64_LITERAL(0xffffffff00000000), 32}};
312
313 static ERTS_INLINE int
fit_in_bits(Sint64 value,int start)314 fit_in_bits(Sint64 value, int start)
315 {
316 int bits = 0;
317 int i;
318
319 for (i = start; i >= 0; i--) {
320 if (value & fib_data[i].mask) {
321 value >>= fib_data[i].bits;
322 bits |= fib_data[i].bits;
323 }
324 }
325
326 bits++;
327
328 return bits;
329 }
330
erts_fit_in_bits_int64(Sint64 value)331 int erts_fit_in_bits_int64(Sint64 value)
332 {
333 return fit_in_bits(value, 5);
334 }
335
erts_fit_in_bits_int32(Sint32 value)336 int erts_fit_in_bits_int32(Sint32 value)
337 {
338 return fit_in_bits((Sint64) (Uint32) value, 4);
339 }
340
erts_fit_in_bits_uint(Uint value)341 int erts_fit_in_bits_uint(Uint value)
342 {
343 #if ERTS_SIZEOF_ETERM == 4
344 return fit_in_bits((Sint64) (Uint32) value, 4);
345 #elif ERTS_SIZEOF_ETERM == 8
346 return fit_in_bits(value, 5);
347 #else
348 # error "No way, Jose"
349 #endif
350 }
351
352 int
erts_print(fmtfn_t to,void * arg,char * format,...)353 erts_print(fmtfn_t to, void *arg, char *format, ...)
354 {
355 int res;
356 va_list arg_list;
357 va_start(arg_list, format);
358
359 {
360 switch ((UWord)to) {
361 case (UWord)ERTS_PRINT_STDOUT:
362 res = erts_vprintf(format, arg_list);
363 break;
364 case (UWord)ERTS_PRINT_STDERR:
365 res = erts_vfprintf(stderr, format, arg_list);
366 break;
367 case (UWord)ERTS_PRINT_FILE:
368 res = erts_vfprintf((FILE *) arg, format, arg_list);
369 break;
370 case (UWord)ERTS_PRINT_SBUF:
371 res = erts_vsprintf((char *) arg, format, arg_list);
372 break;
373 case (UWord)ERTS_PRINT_SNBUF:
374 res = erts_vsnprintf(((erts_print_sn_buf *) arg)->buf,
375 ((erts_print_sn_buf *) arg)->size,
376 format,
377 arg_list);
378 break;
379 case (UWord)ERTS_PRINT_DSBUF:
380 res = erts_vdsprintf((erts_dsprintf_buf_t *) arg, format, arg_list);
381 break;
382 case (UWord)ERTS_PRINT_FD:
383 res = erts_vfdprintf((int)(SWord) arg, format, arg_list);
384 break;
385 default:
386 res = erts_vcbprintf(to, arg, format, arg_list);
387 break;
388 }
389 }
390
391 va_end(arg_list);
392 return res;
393 }
394
395 int
erts_putc(fmtfn_t to,void * arg,char c)396 erts_putc(fmtfn_t to, void *arg, char c)
397 {
398 return erts_print(to, arg, "%c", c);
399 }
400
401 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
402 * Some Erlang term building utility functions (to be used when performance *
403 * isn't critical). *
404 * *
405 * Add more functions like these here (and function prototypes in global.h) *
406 * when needed. *
407 * *
408 \* */
409
410 Eterm
erts_bld_atom(Uint ** hpp,Uint * szp,char * str)411 erts_bld_atom(Uint **hpp, Uint *szp, char *str)
412 {
413 if (hpp)
414 return erts_atom_put((byte *) str, sys_strlen(str), ERTS_ATOM_ENC_LATIN1, 1);
415 else
416 return THE_NON_VALUE;
417 }
418
419 Eterm
erts_bld_uint(Uint ** hpp,Uint * szp,Uint ui)420 erts_bld_uint(Uint **hpp, Uint *szp, Uint ui)
421 {
422 Eterm res = THE_NON_VALUE;
423 if (IS_USMALL(0, ui)) {
424 if (hpp)
425 res = make_small(ui);
426 }
427 else {
428 if (szp)
429 *szp += BIG_UINT_HEAP_SIZE;
430 if (hpp) {
431 res = uint_to_big(ui, *hpp);
432 *hpp += BIG_UINT_HEAP_SIZE;
433 }
434 }
435 return res;
436 }
437
438 /*
439 * Erts_bld_uword is more or less similar to erts_bld_uint, but a pointer
440 * can safely be passed.
441 */
442
443 Eterm
erts_bld_uword(Uint ** hpp,Uint * szp,UWord uw)444 erts_bld_uword(Uint **hpp, Uint *szp, UWord uw)
445 {
446 Eterm res = THE_NON_VALUE;
447 if (IS_USMALL(0, uw)) {
448 if (hpp)
449 res = make_small((Uint) uw);
450 }
451 else {
452 if (szp)
453 *szp += BIG_UWORD_HEAP_SIZE(uw);
454 if (hpp) {
455 res = uword_to_big(uw, *hpp);
456 *hpp += BIG_UWORD_HEAP_SIZE(uw);
457 }
458 }
459 return res;
460 }
461
462
463 Eterm
erts_bld_uint64(Uint ** hpp,Uint * szp,Uint64 ui64)464 erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
465 {
466 Eterm res = THE_NON_VALUE;
467 if (IS_USMALL(0, ui64)) {
468 if (hpp)
469 res = make_small((Uint) ui64);
470 }
471 else {
472 if (szp)
473 *szp += ERTS_UINT64_HEAP_SIZE(ui64);
474 if (hpp)
475 res = erts_uint64_to_big(ui64, hpp);
476 }
477 return res;
478 }
479
480 Eterm
erts_bld_sint64(Uint ** hpp,Uint * szp,Sint64 si64)481 erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64)
482 {
483 Eterm res = THE_NON_VALUE;
484 if (IS_SSMALL(si64)) {
485 if (hpp)
486 res = make_small((Sint) si64);
487 }
488 else {
489 if (szp)
490 *szp += ERTS_SINT64_HEAP_SIZE(si64);
491 if (hpp)
492 res = erts_sint64_to_big(si64, hpp);
493 }
494 return res;
495 }
496
497
498 Eterm
erts_bld_cons(Uint ** hpp,Uint * szp,Eterm car,Eterm cdr)499 erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr)
500 {
501 Eterm res = THE_NON_VALUE;
502 if (szp)
503 *szp += 2;
504 if (hpp) {
505 res = CONS(*hpp, car, cdr);
506 *hpp += 2;
507 }
508 return res;
509 }
510
511 Eterm
erts_bld_tuple(Uint ** hpp,Uint * szp,Uint arity,...)512 erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...)
513 {
514 Eterm res = THE_NON_VALUE;
515
516 ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
517
518 if (szp)
519 *szp += arity + 1;
520 if (hpp) {
521 res = make_tuple(*hpp);
522 *((*hpp)++) = make_arityval(arity);
523
524 if (arity > 0) {
525 Uint i;
526 va_list argp;
527
528 va_start(argp, arity);
529 for (i = 0; i < arity; i++) {
530 *((*hpp)++) = va_arg(argp, Eterm);
531 }
532 va_end(argp);
533 }
534 }
535 return res;
536 }
537
538
erts_bld_tuplev(Uint ** hpp,Uint * szp,Uint arity,Eterm terms[])539 Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[])
540 {
541 Eterm res = THE_NON_VALUE;
542 /*
543 * Note callers expect that 'terms' is *not* accessed if hpp == NULL.
544 */
545
546 ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
547
548 if (szp)
549 *szp += arity + 1;
550 if (hpp) {
551
552 res = make_tuple(*hpp);
553 *((*hpp)++) = make_arityval(arity);
554
555 if (arity > 0) {
556 Uint i;
557 for (i = 0; i < arity; i++)
558 *((*hpp)++) = terms[i];
559 }
560 }
561 return res;
562 }
563
564 Eterm
erts_bld_string_n(Uint ** hpp,Uint * szp,const char * str,Sint len)565 erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len)
566 {
567 Eterm res = THE_NON_VALUE;
568 Sint i = len;
569 if (szp)
570 *szp += len*2;
571 if (hpp) {
572 res = NIL;
573 while (--i >= 0) {
574 res = CONS(*hpp, make_small((byte) str[i]), res);
575 *hpp += 2;
576 }
577 }
578 return res;
579 }
580
581 Eterm
erts_bld_list(Uint ** hpp,Uint * szp,Sint length,Eterm terms[])582 erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[])
583 {
584 Eterm list = THE_NON_VALUE;
585 if (szp)
586 *szp += 2*length;
587 if (hpp) {
588 Sint i = length;
589 list = NIL;
590
591 while (--i >= 0) {
592 list = CONS(*hpp, terms[i], list);
593 *hpp += 2;
594 }
595 }
596 return list;
597 }
598
599 Eterm
erts_bld_2tup_list(Uint ** hpp,Uint * szp,Sint length,Eterm terms1[],Uint terms2[])600 erts_bld_2tup_list(Uint **hpp, Uint *szp,
601 Sint length, Eterm terms1[], Uint terms2[])
602 {
603 Eterm res = THE_NON_VALUE;
604 if (szp)
605 *szp += 5*length;
606 if (hpp) {
607 Sint i = length;
608 res = NIL;
609
610 while (--i >= 0) {
611 res = CONS(*hpp+3, TUPLE2(*hpp, terms1[i], terms2[i]), res);
612 *hpp += 5;
613 }
614 }
615 return res;
616 }
617
618 Eterm
erts_bld_atom_uword_2tup_list(Uint ** hpp,Uint * szp,Sint length,Eterm atoms[],UWord uints[])619 erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp,
620 Sint length, Eterm atoms[], UWord uints[])
621 {
622 Sint i;
623 Eterm res = THE_NON_VALUE;
624 if (szp) {
625 *szp += 5*length;
626 i = length;
627 while (--i >= 0) {
628 if (!IS_USMALL(0, uints[i]))
629 *szp += BIG_UINT_HEAP_SIZE;
630 }
631 }
632 if (hpp) {
633 i = length;
634 res = NIL;
635
636 while (--i >= 0) {
637 Eterm ui;
638
639 if (IS_USMALL(0, uints[i]))
640 ui = make_small(uints[i]);
641 else {
642 ui = uint_to_big(uints[i], *hpp);
643 *hpp += BIG_UINT_HEAP_SIZE;
644 }
645
646 res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res);
647 *hpp += 5;
648 }
649 }
650 return res;
651 }
652
653 Eterm
erts_bld_atom_2uint_3tup_list(Uint ** hpp,Uint * szp,Sint length,Eterm atoms[],Uint uints1[],Uint uints2[])654 erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
655 Eterm atoms[], Uint uints1[], Uint uints2[])
656 {
657 Sint i;
658 Eterm res = THE_NON_VALUE;
659 if (szp) {
660 *szp += 6*length;
661 i = length;
662 while (--i >= 0) {
663 if (!IS_USMALL(0, uints1[i]))
664 *szp += BIG_UINT_HEAP_SIZE;
665 if (!IS_USMALL(0, uints2[i]))
666 *szp += BIG_UINT_HEAP_SIZE;
667 }
668 }
669 if (hpp) {
670 i = length;
671 res = NIL;
672
673 while (--i >= 0) {
674 Eterm ui1;
675 Eterm ui2;
676
677 if (IS_USMALL(0, uints1[i]))
678 ui1 = make_small(uints1[i]);
679 else {
680 ui1 = uint_to_big(uints1[i], *hpp);
681 *hpp += BIG_UINT_HEAP_SIZE;
682 }
683
684 if (IS_USMALL(0, uints2[i]))
685 ui2 = make_small(uints2[i]);
686 else {
687 ui2 = uint_to_big(uints2[i], *hpp);
688 *hpp += BIG_UINT_HEAP_SIZE;
689 }
690
691 res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res);
692 *hpp += 6;
693 }
694 }
695 return res;
696 }
697
698 /* *\
699 * *
700 \* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
701
702 /* make a hash index from an erlang term */
703
704 /*
705 ** There are two hash functions.
706 **
707 ** make_hash: A hash function that will give the same values for the same
708 ** terms regardless of the internal representation. Small integers are
709 ** hashed using the same algorithm as bignums and bignums are hashed
710 ** independent of the CPU endianess.
711 ** Make_hash also hashes pids, ports and references like 32 bit numbers
712 ** (but with different constants).
713 ** make_hash() is called from the bif erlang:phash/2
714 **
715 ** The idea behind the hash algorithm is to produce values suitable for
716 ** linear dynamic hashing. We cannot choose the range at all while hashing
717 ** (it's not even supplied to the hashing functions). The good old algorithm
718 ** [H = H*C+X mod M, where H is the hash value, C is a "random" constant(or M),
719 ** M is the range, preferably a prime, and X is each byte value] is therefore
720 ** modified to:
721 ** H = H*C+X mod 2^32, where C is a large prime. This gives acceptable
722 ** "spreading" of the hashes, so that later modulo calculations also will give
723 ** acceptable "spreading" in the range.
724 ** We really need to hash on bytes, otherwise the
725 ** upper bytes of a word will be less significant than the lower ones. That's
726 ** not acceptable at all. For internal use one could maybe optimize by using
727 ** another hash function, that is less strict but faster. That is, however, not
728 ** implemented.
729 **
730 ** Short semi-formal description of make_hash:
731 **
732 ** In make_hash, the number N is treated like this:
733 ** Abs(N) is hashed bytewise with the least significant byte, B(0), first.
734 ** The number of bytes (J) to calculate hash on in N is
735 ** (the number of _32_ bit words needed to store the unsigned
736 ** value of abs(N)) * 4.
737 ** X = FUNNY_NUMBER2
738 ** If N < 0, Y = FUNNY_NUMBER4 else Y = FUNNY_NUMBER3.
739 ** The hash value is Y*h(J) mod 2^32 where h(J) is calculated like
740 ** h(0) = <initial hash>
741 ** h(i) = h(i-1)*X + B(i-1)
742 ** The above should hold regardless of internal representation.
743 ** Pids are hashed like small numbers but with differrent constants, as are
744 ** ports.
745 ** References are hashed like ports but only on the least significant byte.
746 ** Binaries are hashed on all bytes (not on the 15 first as in
747 ** make_broken_hash()).
748 ** Bytes in lists (possibly text strings) use a simpler multiplication inlined
749 ** in the handling of lists, that is an optimization.
750 ** Everything else is like in the old hash (make_broken_hash()).
751 **
752 ** make_hash2() is faster than make_hash, in particular for bignums
753 ** and binaries, and produces better hash values.
754 */
755
756 /* some prime numbers just above 2 ^ 28 */
757
758 #define FUNNY_NUMBER1 268440163
759 #define FUNNY_NUMBER2 268439161
760 #define FUNNY_NUMBER3 268435459
761 #define FUNNY_NUMBER4 268436141
762 #define FUNNY_NUMBER5 268438633
763 #define FUNNY_NUMBER6 268437017
764 #define FUNNY_NUMBER7 268438039
765 #define FUNNY_NUMBER8 268437511
766 #define FUNNY_NUMBER9 268439627
767 #define FUNNY_NUMBER10 268440479
768 #define FUNNY_NUMBER11 268440577
769 #define FUNNY_NUMBER12 268440581
770 #define FUNNY_NUMBER13 268440593
771 #define FUNNY_NUMBER14 268440611
772
773 static Uint32
hash_binary_bytes(Eterm bin,Uint sz,Uint32 hash)774 hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
775 {
776 byte* ptr;
777 Uint bitoffs;
778 Uint bitsize;
779
780 ERTS_GET_BINARY_BYTES(bin, ptr, bitoffs, bitsize);
781 if (bitoffs == 0) {
782 while (sz--) {
783 hash = hash*FUNNY_NUMBER1 + *ptr++;
784 }
785 if (bitsize > 0) {
786 byte b = *ptr;
787
788 b >>= 8 - bitsize;
789 hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
790 }
791 } else {
792 Uint previous = *ptr++;
793 Uint b;
794 Uint lshift = bitoffs;
795 Uint rshift = 8 - lshift;
796
797 while (sz--) {
798 b = (previous << lshift) & 0xFF;
799 previous = *ptr++;
800 b |= previous >> rshift;
801 hash = hash*FUNNY_NUMBER1 + b;
802 }
803 if (bitsize > 0) {
804 b = (previous << lshift) & 0xFF;
805 previous = *ptr++;
806 b |= previous >> rshift;
807
808 b >>= 8 - bitsize;
809 hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
810 }
811 }
812 return hash;
813 }
814
make_hash(Eterm term_arg)815 Uint32 make_hash(Eterm term_arg)
816 {
817 DECLARE_WSTACK(stack);
818 Eterm term = term_arg;
819 Eterm hash = 0;
820 unsigned op;
821
822 #define MAKE_HASH_TUPLE_OP (FIRST_VACANT_TAG_DEF)
823 #define MAKE_HASH_TERM_ARRAY_OP (FIRST_VACANT_TAG_DEF+1)
824 #define MAKE_HASH_CDR_PRE_OP (FIRST_VACANT_TAG_DEF+2)
825 #define MAKE_HASH_CDR_POST_OP (FIRST_VACANT_TAG_DEF+3)
826
827 /*
828 ** Convenience macro for calculating a bytewise hash on an unsigned 32 bit
829 ** integer.
830 ** If the endianess is known, we could be smarter here,
831 ** but that gives no significant speedup (on a sparc at least)
832 */
833 #define UINT32_HASH_STEP(Expr, Prime1) \
834 do { \
835 Uint32 x = (Uint32) (Expr); \
836 hash = \
837 (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \
838 ((x >> 8) & 0xFF)) * (Prime1) + \
839 ((x >> 16) & 0xFF)) * (Prime1) + \
840 (x >> 24)); \
841 } while(0)
842
843 #define UINT32_HASH_RET(Expr, Prime1, Prime2) \
844 UINT32_HASH_STEP(Expr, Prime1); \
845 hash = hash * (Prime2); \
846 break
847
848
849 /*
850 * Significant additions needed for real 64 bit port with larger fixnums.
851 */
852
853 /*
854 * Note, for the simple 64bit port, not utilizing the
855 * larger word size this function will work without modification.
856 */
857 tail_recur:
858 op = tag_val_def(term);
859
860 for (;;) {
861 switch (op) {
862 case NIL_DEF:
863 hash = hash*FUNNY_NUMBER3 + 1;
864 break;
865 case ATOM_DEF:
866 hash = hash*FUNNY_NUMBER1 +
867 (atom_tab(atom_val(term))->slot.bucket.hvalue);
868 break;
869 case SMALL_DEF:
870 {
871 Sint y1 = signed_val(term);
872 Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
873
874 UINT32_HASH_STEP(y2, FUNNY_NUMBER2);
875 #if defined(ARCH_64)
876 if (y2 >> 32)
877 UINT32_HASH_STEP(y2 >> 32, FUNNY_NUMBER2);
878 #endif
879 hash *= (y1 < 0 ? FUNNY_NUMBER4 : FUNNY_NUMBER3);
880 break;
881 }
882 case BINARY_DEF:
883 {
884 Uint sz = binary_size(term);
885
886 hash = hash_binary_bytes(term, sz, hash);
887 hash = hash*FUNNY_NUMBER4 + sz;
888 break;
889 }
890 case EXPORT_DEF:
891 {
892 Export* ep = *((Export **) (export_val(term) + 1));
893
894 hash = hash * FUNNY_NUMBER11 + ep->info.mfa.arity;
895 hash = hash*FUNNY_NUMBER1 +
896 (atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue);
897 hash = hash*FUNNY_NUMBER1 +
898 (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue);
899 break;
900 }
901
902 case FUN_DEF:
903 {
904 ErlFunThing* funp = (ErlFunThing *) fun_val(term);
905 Uint num_free = funp->num_free;
906
907 hash = hash * FUNNY_NUMBER10 + num_free;
908 hash = hash*FUNNY_NUMBER1 +
909 (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
910 hash = hash*FUNNY_NUMBER2 + funp->fe->index;
911 hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
912 if (num_free > 0) {
913 if (num_free > 1) {
914 WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
915 }
916 term = funp->env[0];
917 goto tail_recur;
918 }
919 break;
920 }
921 case PID_DEF:
922 UINT32_HASH_RET(internal_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
923 case EXTERNAL_PID_DEF:
924 UINT32_HASH_RET(external_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
925 case PORT_DEF:
926 UINT32_HASH_RET(internal_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
927 case EXTERNAL_PORT_DEF:
928 UINT32_HASH_RET(external_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
929 case REF_DEF:
930 UINT32_HASH_RET(internal_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
931 case EXTERNAL_REF_DEF:
932 UINT32_HASH_RET(external_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
933 case FLOAT_DEF:
934 {
935 FloatDef ff;
936 GET_DOUBLE(term, ff);
937 if (ff.fd == 0.0f) {
938 /* ensure positive 0.0 */
939 ff.fd = erts_get_positive_zero_float();
940 }
941 hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
942 break;
943 }
944 case MAKE_HASH_CDR_PRE_OP:
945 term = (Eterm) WSTACK_POP(stack);
946 if (is_not_list(term)) {
947 WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
948 goto tail_recur;
949 }
950 /* fall through */
951 case LIST_DEF:
952 {
953 Eterm* list = list_val(term);
954 while(is_byte(*list)) {
955 /* Optimization for strings.
956 ** Note that this hash is different from a 'small' hash,
957 ** as multiplications on a Sparc is so slow.
958 */
959 hash = hash*FUNNY_NUMBER2 + unsigned_val(*list);
960
961 if (is_not_list(CDR(list))) {
962 WSTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
963 term = CDR(list);
964 goto tail_recur;
965 }
966 list = list_val(CDR(list));
967 }
968 WSTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
969 term = CAR(list);
970 goto tail_recur;
971 }
972 case MAKE_HASH_CDR_POST_OP:
973 hash *= FUNNY_NUMBER8;
974 break;
975
976 case BIG_DEF:
977 /* Note that this is the exact same thing as the hashing of smalls.*/
978 {
979 Eterm* ptr = big_val(term);
980 Uint n = BIG_SIZE(ptr);
981 Uint k = n-1;
982 ErtsDigit d;
983 int is_neg = BIG_SIGN(ptr);
984 Uint i;
985 int j;
986
987 for (i = 0; i < k; i++) {
988 d = BIG_DIGIT(ptr, i);
989 for(j = 0; j < sizeof(ErtsDigit); ++j) {
990 hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
991 d >>= 8;
992 }
993 }
994 d = BIG_DIGIT(ptr, k);
995 k = sizeof(ErtsDigit);
996 #if defined(ARCH_64)
997 if (!(d >> 32))
998 k /= 2;
999 #endif
1000 for(j = 0; j < (int)k; ++j) {
1001 hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
1002 d >>= 8;
1003 }
1004 hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3;
1005 break;
1006 }
1007 case MAP_DEF:
1008 hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
1009 break;
1010 case TUPLE_DEF:
1011 {
1012 Eterm* ptr = tuple_val(term);
1013 Uint arity = arityval(*ptr);
1014
1015 WSTACK_PUSH3(stack, (UWord) arity, (UWord)(ptr+1), (UWord) arity);
1016 op = MAKE_HASH_TUPLE_OP;
1017 }/*fall through*/
1018 case MAKE_HASH_TUPLE_OP:
1019 case MAKE_HASH_TERM_ARRAY_OP:
1020 {
1021 Uint i = (Uint) WSTACK_POP(stack);
1022 Eterm* ptr = (Eterm*) WSTACK_POP(stack);
1023 if (i != 0) {
1024 term = *ptr;
1025 WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
1026 goto tail_recur;
1027 }
1028 if (op == MAKE_HASH_TUPLE_OP) {
1029 Uint32 arity = (Uint32) WSTACK_POP(stack);
1030 hash = hash*FUNNY_NUMBER9 + arity;
1031 }
1032 break;
1033 }
1034
1035 default:
1036 erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
1037 return 0;
1038 }
1039 if (WSTACK_ISEMPTY(stack)) break;
1040 op = WSTACK_POP(stack);
1041 }
1042 DESTROY_WSTACK(stack);
1043 return hash;
1044
1045 #undef MAKE_HASH_TUPLE_OP
1046 #undef MAKE_HASH_TERM_ARRAY_OP
1047 #undef MAKE_HASH_CDR_PRE_OP
1048 #undef MAKE_HASH_CDR_POST_OP
1049 #undef UINT32_HASH_STEP
1050 #undef UINT32_HASH_RET
1051 }
1052
1053
1054
1055 /* Hash function suggested by Bob Jenkins. */
1056
1057 #define MIX(a,b,c) \
1058 do { \
1059 a -= b; a -= c; a ^= (c>>13); \
1060 b -= c; b -= a; b ^= (a<<8); \
1061 c -= a; c -= b; c ^= (b>>13); \
1062 a -= b; a -= c; a ^= (c>>12); \
1063 b -= c; b -= a; b ^= (a<<16); \
1064 c -= a; c -= b; c ^= (b>>5); \
1065 a -= b; a -= c; a ^= (c>>3); \
1066 b -= c; b -= a; b ^= (a<<10); \
1067 c -= a; c -= b; c ^= (b>>15); \
1068 } while(0)
1069
1070 #define HCONST 0x9e3779b9UL /* the golden ratio; an arbitrary value */
1071
1072 typedef struct {
1073 Uint32 a,b,c;
1074 } ErtsBlockHashHelperCtx;
1075
1076 #define BLOCK_HASH_BYTES_PER_ITER 12
1077
1078 /* The three functions below are separated into different functions even
1079 though they are always used together to make trapping and handling
1080 of unaligned binaries easier. Examples of how they are used can be
1081 found in block_hash and make_hash2_helper.*/
1082 static ERTS_INLINE
block_hash_setup(Uint32 initval,ErtsBlockHashHelperCtx * ctx)1083 void block_hash_setup(Uint32 initval,
1084 ErtsBlockHashHelperCtx* ctx /* out parameter */)
1085 {
1086 ctx->a = ctx->b = HCONST;
1087 ctx->c = initval; /* the previous hash value */
1088 }
1089
1090 static ERTS_INLINE
block_hash_buffer(byte * buf,Uint buf_length,ErtsBlockHashHelperCtx * ctx)1091 void block_hash_buffer(byte *buf,
1092 Uint buf_length,
1093 ErtsBlockHashHelperCtx* ctx /* out parameter */)
1094 {
1095 Uint len = buf_length;
1096 byte *k = buf;
1097 ASSERT(buf_length % BLOCK_HASH_BYTES_PER_ITER == 0);
1098 while (len >= BLOCK_HASH_BYTES_PER_ITER) {
1099 ctx->a += (k[0] +((Uint32)k[1]<<8) +((Uint32)k[2]<<16) +((Uint32)k[3]<<24));
1100 ctx->b += (k[4] +((Uint32)k[5]<<8) +((Uint32)k[6]<<16) +((Uint32)k[7]<<24));
1101 ctx->c += (k[8] +((Uint32)k[9]<<8) +((Uint32)k[10]<<16)+((Uint32)k[11]<<24));
1102 MIX(ctx->a,ctx->b,ctx->c);
1103 k += BLOCK_HASH_BYTES_PER_ITER; len -= BLOCK_HASH_BYTES_PER_ITER;
1104 }
1105 }
1106
1107 static ERTS_INLINE
block_hash_final_bytes(byte * buf,Uint buf_length,Uint full_length,ErtsBlockHashHelperCtx * ctx)1108 Uint32 block_hash_final_bytes(byte *buf,
1109 Uint buf_length,
1110 Uint full_length,
1111 ErtsBlockHashHelperCtx* ctx)
1112 {
1113 Uint len = buf_length;
1114 byte *k = buf;
1115 ctx->c += full_length;
1116 switch(len)
1117 { /* all the case statements fall through */
1118 case 11: ctx->c+=((Uint32)k[10]<<24);
1119 case 10: ctx->c+=((Uint32)k[9]<<16);
1120 case 9 : ctx->c+=((Uint32)k[8]<<8);
1121 /* the first byte of c is reserved for the length */
1122 case 8 : ctx->b+=((Uint32)k[7]<<24);
1123 case 7 : ctx->b+=((Uint32)k[6]<<16);
1124 case 6 : ctx->b+=((Uint32)k[5]<<8);
1125 case 5 : ctx->b+=k[4];
1126 case 4 : ctx->a+=((Uint32)k[3]<<24);
1127 case 3 : ctx->a+=((Uint32)k[2]<<16);
1128 case 2 : ctx->a+=((Uint32)k[1]<<8);
1129 case 1 : ctx->a+=k[0];
1130 /* case 0: nothing left to add */
1131 }
1132 MIX(ctx->a,ctx->b,ctx->c);
1133 return ctx->c;
1134 }
1135
1136 static
1137 Uint32
block_hash(byte * block,Uint block_length,Uint32 initval)1138 block_hash(byte *block, Uint block_length, Uint32 initval)
1139 {
1140 ErtsBlockHashHelperCtx ctx;
1141 Uint no_bytes_not_in_loop =
1142 (block_length % BLOCK_HASH_BYTES_PER_ITER);
1143 Uint no_bytes_to_process_in_loop =
1144 block_length - no_bytes_not_in_loop;
1145 byte *final_bytes = block + no_bytes_to_process_in_loop;
1146 block_hash_setup(initval, &ctx);
1147 block_hash_buffer(block,
1148 no_bytes_to_process_in_loop,
1149 &ctx);
1150 return block_hash_final_bytes(final_bytes,
1151 no_bytes_not_in_loop,
1152 block_length,
1153 &ctx);
1154 }
1155
1156 typedef enum {
1157 tag_primary_list,
1158 arityval_subtag,
1159 hamt_subtag_head_flatmap,
1160 map_subtag,
1161 fun_subtag,
1162 neg_big_subtag,
1163 sub_binary_subtag_1,
1164 sub_binary_subtag_2,
1165 hash2_common_1,
1166 hash2_common_2,
1167 hash2_common_3,
1168 } ErtsMakeHash2TrapLocation;
1169
1170 typedef struct {
1171 int c;
1172 Uint32 sh;
1173 Eterm* ptr;
1174 } ErtsMakeHash2Context_TAG_PRIMARY_LIST;
1175
1176 typedef struct {
1177 int i;
1178 int arity;
1179 Eterm* elem;
1180 } ErtsMakeHash2Context_ARITYVAL_SUBTAG;
1181
1182 typedef struct {
1183 Eterm *ks;
1184 Eterm *vs;
1185 int i;
1186 Uint size;
1187 } ErtsMakeHash2Context_HAMT_SUBTAG_HEAD_FLATMAP;
1188
1189 typedef struct {
1190 Eterm* ptr;
1191 int i;
1192 } ErtsMakeHash2Context_MAP_SUBTAG;
1193
1194 typedef struct {
1195 Uint num_free;
1196 Eterm* bptr;
1197 } ErtsMakeHash2Context_FUN_SUBTAG;
1198
1199 typedef struct {
1200 Eterm* ptr;
1201 Uint i;
1202 Uint n;
1203 Uint32 con;
1204 } ErtsMakeHash2Context_NEG_BIG_SUBTAG;
1205
1206 typedef struct {
1207 byte* bptr;
1208 Uint sz;
1209 Uint bitsize;
1210 Uint bitoffs;
1211 Uint no_bytes_processed;
1212 ErtsBlockHashHelperCtx block_hash_ctx;
1213 /* The following fields are only used when bitoffs != 0 */
1214 byte* buf;
1215 int done;
1216
1217 } ErtsMakeHash2Context_SUB_BINARY_SUBTAG;
1218
1219 typedef struct {
1220 int dummy__; /* Empty structs are not supported on all platforms */
1221 } ErtsMakeHash2Context_EMPTY;
1222
1223 typedef struct {
1224 ErtsMakeHash2TrapLocation trap_location;
1225 /* specific to the trap location: */
1226 union {
1227 ErtsMakeHash2Context_TAG_PRIMARY_LIST tag_primary_list;
1228 ErtsMakeHash2Context_ARITYVAL_SUBTAG arityval_subtag;
1229 ErtsMakeHash2Context_HAMT_SUBTAG_HEAD_FLATMAP hamt_subtag_head_flatmap;
1230 ErtsMakeHash2Context_MAP_SUBTAG map_subtag;
1231 ErtsMakeHash2Context_FUN_SUBTAG fun_subtag;
1232 ErtsMakeHash2Context_NEG_BIG_SUBTAG neg_big_subtag;
1233 ErtsMakeHash2Context_SUB_BINARY_SUBTAG sub_binary_subtag_1;
1234 ErtsMakeHash2Context_SUB_BINARY_SUBTAG sub_binary_subtag_2;
1235 ErtsMakeHash2Context_EMPTY hash2_common_1;
1236 ErtsMakeHash2Context_EMPTY hash2_common_2;
1237 ErtsMakeHash2Context_EMPTY hash2_common_3;
1238 } trap_location_state;
1239 /* same for all trap locations: */
1240 Eterm term;
1241 Uint32 hash;
1242 Uint32 hash_xor_pairs;
1243 ErtsEStack stack;
1244 } ErtsMakeHash2Context;
1245
make_hash2_ctx_bin_dtor(Binary * context_bin)1246 static int make_hash2_ctx_bin_dtor(Binary *context_bin) {
1247 ErtsMakeHash2Context* context = ERTS_MAGIC_BIN_DATA(context_bin);
1248 DESTROY_SAVED_ESTACK(&context->stack);
1249 if (context->trap_location == sub_binary_subtag_2 &&
1250 context->trap_location_state.sub_binary_subtag_2.buf != NULL) {
1251 erts_free(ERTS_ALC_T_PHASH2_TRAP, context->trap_location_state.sub_binary_subtag_2.buf);
1252 }
1253 return 1;
1254 }
1255
1256 /* hash2_save_trap_state is called seldom so we want to avoid inlining */
1257 static ERTS_NOINLINE
hash2_save_trap_state(Eterm state_mref,Uint32 hash_xor_pairs,Uint32 hash,Process * p,Eterm term,Eterm * ESTK_DEF_STACK (s),ErtsEStack s,ErtsMakeHash2TrapLocation trap_location,void * trap_location_state_ptr,size_t trap_location_state_size)1258 Eterm hash2_save_trap_state(Eterm state_mref,
1259 Uint32 hash_xor_pairs,
1260 Uint32 hash,
1261 Process* p,
1262 Eterm term,
1263 Eterm* ESTK_DEF_STACK(s),
1264 ErtsEStack s,
1265 ErtsMakeHash2TrapLocation trap_location,
1266 void* trap_location_state_ptr,
1267 size_t trap_location_state_size) {
1268 Binary* state_bin;
1269 ErtsMakeHash2Context* context;
1270 if (state_mref == THE_NON_VALUE) {
1271 Eterm* hp;
1272 state_bin = erts_create_magic_binary(sizeof(ErtsMakeHash2Context),
1273 make_hash2_ctx_bin_dtor);
1274 hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
1275 state_mref = erts_mk_magic_ref(&hp, &MSO(p), state_bin);
1276 } else {
1277 state_bin = erts_magic_ref2bin(state_mref);
1278 }
1279 context = ERTS_MAGIC_BIN_DATA(state_bin);
1280 context->term = term;
1281 context->hash = hash;
1282 context->hash_xor_pairs = hash_xor_pairs;
1283 ESTACK_SAVE(s, &context->stack);
1284 context->trap_location = trap_location;
1285 sys_memcpy(&context->trap_location_state,
1286 trap_location_state_ptr,
1287 trap_location_state_size);
1288 erts_set_gc_state(p, 0);
1289 BUMP_ALL_REDS(p);
1290 return state_mref;
1291 }
1292 #undef NOINLINE_HASH2_SAVE_TRAP_STATE
1293
1294 /* Writes back a magic reference to *state_mref_write_back when the
1295 function traps */
1296 static ERTS_INLINE Uint32
make_hash2_helper(Eterm term_param,const int can_trap,Eterm * state_mref_write_back,Process * p)1297 make_hash2_helper(Eterm term_param, const int can_trap, Eterm* state_mref_write_back, Process* p)
1298 {
1299 static const Uint ITERATIONS_PER_RED = 64;
1300 Uint32 hash;
1301 Uint32 hash_xor_pairs;
1302 Eterm term = term_param;
1303 ERTS_UNDEF(hash_xor_pairs, 0);
1304
1305 /* (HCONST * {2, ..., 22}) mod 2^32 */
1306 #define HCONST_2 0x3c6ef372UL
1307 #define HCONST_3 0xdaa66d2bUL
1308 #define HCONST_4 0x78dde6e4UL
1309 #define HCONST_5 0x1715609dUL
1310 #define HCONST_6 0xb54cda56UL
1311 #define HCONST_7 0x5384540fUL
1312 #define HCONST_8 0xf1bbcdc8UL
1313 #define HCONST_9 0x8ff34781UL
1314 #define HCONST_10 0x2e2ac13aUL
1315 #define HCONST_11 0xcc623af3UL
1316 #define HCONST_12 0x6a99b4acUL
1317 #define HCONST_13 0x08d12e65UL
1318 #define HCONST_14 0xa708a81eUL
1319 #define HCONST_15 0x454021d7UL
1320 #define HCONST_16 0xe3779b90UL
1321 #define HCONST_17 0x81af1549UL
1322 #define HCONST_18 0x1fe68f02UL
1323 #define HCONST_19 0xbe1e08bbUL
1324 #define HCONST_20 0x5c558274UL
1325 #define HCONST_21 0xfa8cfc2dUL
1326 #define HCONST_22 0x98c475e6UL
1327
1328 #define HASH_MAP_TAIL (_make_header(1,_TAG_HEADER_REF))
1329 #define HASH_MAP_PAIR (_make_header(2,_TAG_HEADER_REF))
1330 #define HASH_CDR (_make_header(3,_TAG_HEADER_REF))
1331
1332 #define UINT32_HASH_2(Expr1, Expr2, AConst) \
1333 do { \
1334 Uint32 a,b; \
1335 a = AConst + (Uint32) (Expr1); \
1336 b = AConst + (Uint32) (Expr2); \
1337 MIX(a,b,hash); \
1338 } while(0)
1339
1340 #define UINT32_HASH(Expr, AConst) UINT32_HASH_2(Expr, 0, AConst)
1341
1342 #define SINT32_HASH(Expr, AConst) \
1343 do { \
1344 Sint32 y = (Sint32) (Expr); \
1345 if (y < 0) { \
1346 UINT32_HASH(-y, AConst); \
1347 /* Negative numbers are unnecessarily mixed twice. */ \
1348 } \
1349 UINT32_HASH(y, AConst); \
1350 } while(0)
1351
1352 #define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
1353
1354 #define NOT_SSMALL28_HASH(SMALL) \
1355 do { \
1356 Uint64 t; \
1357 Uint32 x, y; \
1358 Uint32 con; \
1359 if (SMALL < 0) { \
1360 con = HCONST_10; \
1361 t = (Uint64)(SMALL * (-1)); \
1362 } else { \
1363 con = HCONST_11; \
1364 t = SMALL; \
1365 } \
1366 x = t & 0xffffffff; \
1367 y = t >> 32; \
1368 UINT32_HASH_2(x, y, con); \
1369 } while(0)
1370
1371 #ifdef ARCH_64
1372 # define POINTER_HASH(Ptr, AConst) UINT32_HASH_2((Uint32)(UWord)(Ptr), (((UWord)(Ptr)) >> 32), AConst)
1373 #else
1374 # define POINTER_HASH(Ptr, AConst) UINT32_HASH(Ptr, AConst)
1375 #endif
1376
1377 #define TRAP_LOCATION_NO_RED(location_name) \
1378 do { \
1379 if(can_trap && iterations_until_trap <= 0) { \
1380 *state_mref_write_back = \
1381 hash2_save_trap_state(state_mref, \
1382 hash_xor_pairs, \
1383 hash, \
1384 p, \
1385 term, \
1386 ESTK_DEF_STACK(s), \
1387 s, \
1388 location_name, \
1389 &ctx, \
1390 sizeof(ctx)); \
1391 return 0; \
1392 L_##location_name: \
1393 ctx = context->trap_location_state. location_name; \
1394 } \
1395 } while(0)
1396
1397 #define TRAP_LOCATION(location_name) \
1398 do { \
1399 if (can_trap) { \
1400 iterations_until_trap--; \
1401 TRAP_LOCATION_NO_RED(location_name); \
1402 } \
1403 } while(0)
1404
1405 #define TRAP_LOCATION_NO_CTX(location_name) \
1406 do { \
1407 ErtsMakeHash2Context_EMPTY ctx; \
1408 TRAP_LOCATION(location_name); \
1409 } while(0)
1410
1411 /* Optimization. Simple cases before declaration of estack. */
1412 if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
1413 switch (term & _TAG_IMMED1_MASK) {
1414 case _TAG_IMMED1_IMMED2:
1415 switch (term & _TAG_IMMED2_MASK) {
1416 case _TAG_IMMED2_ATOM:
1417 /* Fast, but the poor hash value should be mixed. */
1418 return atom_tab(atom_val(term))->slot.bucket.hvalue;
1419 }
1420 break;
1421 case _TAG_IMMED1_SMALL:
1422 {
1423 Sint small = signed_val(term);
1424 if (SMALL_BITS > 28 && !IS_SSMALL28(small)) {
1425 hash = 0;
1426 NOT_SSMALL28_HASH(small);
1427 return hash;
1428 }
1429 hash = 0;
1430 SINT32_HASH(small, HCONST);
1431 return hash;
1432 }
1433 }
1434 };
1435 {
1436 Eterm tmp;
1437 long max_iterations = 0;
1438 long iterations_until_trap = 0;
1439 Eterm state_mref = THE_NON_VALUE;
1440 ErtsMakeHash2Context* context = NULL;
1441 DECLARE_ESTACK(s);
1442 ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
1443 if(can_trap){
1444 #ifdef DEBUG
1445 (void)ITERATIONS_PER_RED;
1446 iterations_until_trap = max_iterations =
1447 (1103515245 * (ERTS_BIF_REDS_LEFT(p)) + 12345) % 227;
1448 #else
1449 iterations_until_trap = max_iterations =
1450 ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(p);
1451 #endif
1452 }
1453 if (can_trap && is_internal_magic_ref(term)) {
1454 Binary* state_bin;
1455 state_mref = term;
1456 state_bin = erts_magic_ref2bin(state_mref);
1457 if (ERTS_MAGIC_BIN_DESTRUCTOR(state_bin) == make_hash2_ctx_bin_dtor) {
1458 /* Restore state after a trap */
1459 context = ERTS_MAGIC_BIN_DATA(state_bin);
1460 term = context->term;
1461 hash = context->hash;
1462 hash_xor_pairs = context->hash_xor_pairs;
1463 ESTACK_RESTORE(s, &context->stack);
1464 ASSERT(p->flags & F_DISABLE_GC);
1465 erts_set_gc_state(p, 1);
1466 switch (context->trap_location) {
1467 case hash2_common_3: goto L_hash2_common_3;
1468 case tag_primary_list: goto L_tag_primary_list;
1469 case arityval_subtag: goto L_arityval_subtag;
1470 case hamt_subtag_head_flatmap: goto L_hamt_subtag_head_flatmap;
1471 case map_subtag: goto L_map_subtag;
1472 case fun_subtag: goto L_fun_subtag;
1473 case neg_big_subtag: goto L_neg_big_subtag;
1474 case sub_binary_subtag_1: goto L_sub_binary_subtag_1;
1475 case sub_binary_subtag_2: goto L_sub_binary_subtag_2;
1476 case hash2_common_1: goto L_hash2_common_1;
1477 case hash2_common_2: goto L_hash2_common_2;
1478 }
1479 }
1480 }
1481 hash = 0;
1482 for (;;) {
1483 switch (primary_tag(term)) {
1484 case TAG_PRIMARY_LIST:
1485 {
1486 ErtsMakeHash2Context_TAG_PRIMARY_LIST ctx = {
1487 .c = 0,
1488 .sh = 0,
1489 .ptr = list_val(term)};
1490 while (is_byte(*ctx.ptr)) {
1491 /* Optimization for strings. */
1492 ctx.sh = (ctx.sh << 8) + unsigned_val(*ctx.ptr);
1493 if (ctx.c == 3) {
1494 UINT32_HASH(ctx.sh, HCONST_4);
1495 ctx.c = ctx.sh = 0;
1496 } else {
1497 ctx.c++;
1498 }
1499 term = CDR(ctx.ptr);
1500 if (is_not_list(term))
1501 break;
1502 ctx.ptr = list_val(term);
1503 TRAP_LOCATION(tag_primary_list);
1504 }
1505 if (ctx.c > 0)
1506 UINT32_HASH(ctx.sh, HCONST_4);
1507 if (is_list(term)) {
1508 tmp = CDR(ctx.ptr);
1509 ESTACK_PUSH(s, tmp);
1510 term = CAR(ctx.ptr);
1511 }
1512 }
1513 break;
1514 case TAG_PRIMARY_BOXED:
1515 {
1516 Eterm hdr = *boxed_val(term);
1517 ASSERT(is_header(hdr));
1518 switch (hdr & _TAG_HEADER_MASK) {
1519 case ARITYVAL_SUBTAG:
1520 {
1521 ErtsMakeHash2Context_ARITYVAL_SUBTAG ctx = {
1522 .i = 0,
1523 .arity = header_arity(hdr),
1524 .elem = tuple_val(term)};
1525 UINT32_HASH(ctx.arity, HCONST_9);
1526 if (ctx.arity == 0) /* Empty tuple */
1527 goto hash2_common;
1528 for (ctx.i = ctx.arity; ; ctx.i--) {
1529 term = ctx.elem[ctx.i];
1530 if (ctx.i == 1)
1531 break;
1532 ESTACK_PUSH(s, term);
1533 TRAP_LOCATION(arityval_subtag);
1534 }
1535 }
1536 break;
1537 case MAP_SUBTAG:
1538 {
1539 Uint size;
1540 ErtsMakeHash2Context_MAP_SUBTAG ctx = {
1541 .ptr = boxed_val(term) + 1,
1542 .i = 0};
1543 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
1544 case HAMT_SUBTAG_HEAD_FLATMAP:
1545 {
1546 flatmap_t *mp = (flatmap_t *)flatmap_val(term);
1547 ErtsMakeHash2Context_HAMT_SUBTAG_HEAD_FLATMAP ctx = {
1548 .ks = flatmap_get_keys(mp),
1549 .vs = flatmap_get_values(mp),
1550 .i = 0,
1551 .size = flatmap_get_size(mp)};
1552 UINT32_HASH(ctx.size, HCONST_16);
1553 if (ctx.size == 0)
1554 goto hash2_common;
1555
1556 /* We want a portable hash function that is *independent* of
1557 * the order in which keys and values are encountered.
1558 * We therefore calculate context independent hashes for all .
1559 * key-value pairs and then xor them together.
1560 */
1561 ESTACK_PUSH(s, hash_xor_pairs);
1562 ESTACK_PUSH(s, hash);
1563 ESTACK_PUSH(s, HASH_MAP_TAIL);
1564 hash = 0;
1565 hash_xor_pairs = 0;
1566 for (ctx.i = ctx.size - 1; ctx.i >= 0; ctx.i--) {
1567 ESTACK_PUSH(s, HASH_MAP_PAIR);
1568 ESTACK_PUSH(s, ctx.vs[ctx.i]);
1569 ESTACK_PUSH(s, ctx.ks[ctx.i]);
1570 TRAP_LOCATION(hamt_subtag_head_flatmap);
1571 }
1572 goto hash2_common;
1573 }
1574
1575 case HAMT_SUBTAG_HEAD_ARRAY:
1576 case HAMT_SUBTAG_HEAD_BITMAP:
1577 size = *ctx.ptr++;
1578 UINT32_HASH(size, HCONST_16);
1579 if (size == 0)
1580 goto hash2_common;
1581 ESTACK_PUSH(s, hash_xor_pairs);
1582 ESTACK_PUSH(s, hash);
1583 ESTACK_PUSH(s, HASH_MAP_TAIL);
1584 hash = 0;
1585 hash_xor_pairs = 0;
1586 }
1587 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
1588 case HAMT_SUBTAG_HEAD_ARRAY:
1589 ctx.i = 16;
1590 break;
1591 case HAMT_SUBTAG_HEAD_BITMAP:
1592 case HAMT_SUBTAG_NODE_BITMAP:
1593 ctx.i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
1594 break;
1595 default:
1596 erts_exit(ERTS_ERROR_EXIT, "bad header");
1597 }
1598 while (ctx.i) {
1599 if (is_list(*ctx.ptr)) {
1600 Eterm* cons = list_val(*ctx.ptr);
1601 ESTACK_PUSH(s, HASH_MAP_PAIR);
1602 ESTACK_PUSH(s, CDR(cons));
1603 ESTACK_PUSH(s, CAR(cons));
1604 }
1605 else {
1606 ASSERT(is_boxed(*ctx.ptr));
1607 ESTACK_PUSH(s, *ctx.ptr);
1608 }
1609 ctx.i--; ctx.ptr++;
1610 TRAP_LOCATION(map_subtag);
1611 }
1612 goto hash2_common;
1613 }
1614 break;
1615 case EXPORT_SUBTAG:
1616 {
1617 Export* ep = *((Export **) (export_val(term) + 1));
1618 UINT32_HASH_2
1619 (ep->info.mfa.arity,
1620 atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue,
1621 HCONST);
1622 UINT32_HASH
1623 (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue,
1624 HCONST_14);
1625 goto hash2_common;
1626 }
1627
1628 case FUN_SUBTAG:
1629 {
1630 ErlFunThing* funp = (ErlFunThing *) fun_val(term);
1631 ErtsMakeHash2Context_FUN_SUBTAG ctx = {
1632 .num_free = funp->num_free,
1633 .bptr = NULL};
1634 UINT32_HASH_2
1635 (ctx.num_free,
1636 atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue,
1637 HCONST);
1638 UINT32_HASH_2
1639 (funp->fe->index, funp->fe->old_uniq, HCONST);
1640 if (ctx.num_free == 0) {
1641 goto hash2_common;
1642 } else {
1643 ctx.bptr = funp->env + ctx.num_free - 1;
1644 while (ctx.num_free-- > 1) {
1645 term = *ctx.bptr--;
1646 ESTACK_PUSH(s, term);
1647 TRAP_LOCATION(fun_subtag);
1648 }
1649 term = *ctx.bptr;
1650 }
1651 }
1652 break;
1653 case REFC_BINARY_SUBTAG:
1654 case HEAP_BINARY_SUBTAG:
1655 case SUB_BINARY_SUBTAG:
1656 {
1657 #define BYTE_BITS 8
1658 ErtsMakeHash2Context_SUB_BINARY_SUBTAG ctx = {
1659 .bptr = 0,
1660 /* !!!!!!!!!!!!!!!!!!!! OBS !!!!!!!!!!!!!!!!!!!!
1661 *
1662 * The size is truncated to 32 bits on the line
1663 * below so that the code is compatible with old
1664 * versions of the code. This means that hash
1665 * values for binaries with a size greater than
1666 * 4GB do not take all bytes in consideration.
1667 *
1668 * !!!!!!!!!!!!!!!!!!!! OBS !!!!!!!!!!!!!!!!!!!!
1669 */
1670 .sz = (0xFFFFFFFF & binary_size(term)),
1671 .bitsize = 0,
1672 .bitoffs = 0,
1673 .no_bytes_processed = 0
1674 };
1675 Uint32 con = HCONST_13 + hash;
1676 Uint iters_for_bin = MAX(1, ctx.sz / BLOCK_HASH_BYTES_PER_ITER);
1677 ERTS_GET_BINARY_BYTES(term, ctx.bptr, ctx.bitoffs, ctx.bitsize);
1678 if (ctx.sz == 0 && ctx.bitsize == 0) {
1679 hash = con;
1680 } else if (ctx.bitoffs == 0 &&
1681 (!can_trap ||
1682 (iterations_until_trap - iters_for_bin) > 0)) {
1683 /* No need to trap while hashing binary */
1684 if (can_trap) iterations_until_trap -= iters_for_bin;
1685 hash = block_hash(ctx.bptr, ctx.sz, con);
1686 if (ctx.bitsize > 0) {
1687 UINT32_HASH_2(ctx.bitsize,
1688 (ctx.bptr[ctx.sz] >> (BYTE_BITS - ctx.bitsize)),
1689 HCONST_15);
1690 }
1691 } else if (ctx.bitoffs == 0) {
1692 /* Need to trap while hashing binary */
1693 ErtsBlockHashHelperCtx* block_hash_ctx = &ctx.block_hash_ctx;
1694 block_hash_setup(con, block_hash_ctx);
1695 do {
1696 Uint max_bytes_to_process =
1697 iterations_until_trap <= 0 ? BLOCK_HASH_BYTES_PER_ITER :
1698 iterations_until_trap * BLOCK_HASH_BYTES_PER_ITER;
1699 Uint bytes_left = ctx.sz - ctx.no_bytes_processed;
1700 Uint even_bytes_left =
1701 bytes_left - (bytes_left % BLOCK_HASH_BYTES_PER_ITER);
1702 Uint bytes_to_process =
1703 MIN(max_bytes_to_process, even_bytes_left);
1704 block_hash_buffer(&ctx.bptr[ctx.no_bytes_processed],
1705 bytes_to_process,
1706 block_hash_ctx);
1707 ctx.no_bytes_processed += bytes_to_process;
1708 iterations_until_trap -=
1709 MAX(1, bytes_to_process / BLOCK_HASH_BYTES_PER_ITER);
1710 TRAP_LOCATION_NO_RED(sub_binary_subtag_1);
1711 block_hash_ctx = &ctx.block_hash_ctx; /* Restore after trap */
1712 } while ((ctx.sz - ctx.no_bytes_processed) >=
1713 BLOCK_HASH_BYTES_PER_ITER);
1714 hash = block_hash_final_bytes(ctx.bptr +
1715 ctx.no_bytes_processed,
1716 ctx.sz - ctx.no_bytes_processed,
1717 ctx.sz,
1718 block_hash_ctx);
1719 if (ctx.bitsize > 0) {
1720 UINT32_HASH_2(ctx.bitsize,
1721 (ctx.bptr[ctx.sz] >> (BYTE_BITS - ctx.bitsize)),
1722 HCONST_15);
1723 }
1724 } else if (/* ctx.bitoffs != 0 && */
1725 (!can_trap ||
1726 (iterations_until_trap - iters_for_bin) > 0)) {
1727 /* No need to trap while hashing binary */
1728 Uint nr_of_bytes = ctx.sz + (ctx.bitsize != 0);
1729 byte *buf = erts_alloc(ERTS_ALC_T_TMP, nr_of_bytes);
1730 Uint nr_of_bits_to_copy = ctx.sz*BYTE_BITS+ctx.bitsize;
1731 if (can_trap) iterations_until_trap -= iters_for_bin;
1732 erts_copy_bits(ctx.bptr,
1733 ctx.bitoffs, 1, buf, 0, 1, nr_of_bits_to_copy);
1734 hash = block_hash(buf, ctx.sz, con);
1735 if (ctx.bitsize > 0) {
1736 UINT32_HASH_2(ctx.bitsize,
1737 (buf[ctx.sz] >> (BYTE_BITS - ctx.bitsize)),
1738 HCONST_15);
1739 }
1740 erts_free(ERTS_ALC_T_TMP, buf);
1741 } else /* ctx.bitoffs != 0 && */ {
1742 #ifdef DEBUG
1743 #define BINARY_BUF_SIZE (BLOCK_HASH_BYTES_PER_ITER * 3)
1744 #else
1745 #define BINARY_BUF_SIZE (BLOCK_HASH_BYTES_PER_ITER * 256)
1746 #endif
1747 #define BINARY_BUF_SIZE_BITS (BINARY_BUF_SIZE*BYTE_BITS)
1748 /* Need to trap while hashing binary */
1749 ErtsBlockHashHelperCtx* block_hash_ctx = &ctx.block_hash_ctx;
1750 Uint nr_of_bytes = ctx.sz + (ctx.bitsize != 0);
1751 ERTS_CT_ASSERT(BINARY_BUF_SIZE % BLOCK_HASH_BYTES_PER_ITER == 0);
1752 ctx.buf = erts_alloc(ERTS_ALC_T_PHASH2_TRAP,
1753 MIN(nr_of_bytes, BINARY_BUF_SIZE));
1754 block_hash_setup(con, block_hash_ctx);
1755 do {
1756 Uint bytes_left =
1757 ctx.sz - ctx.no_bytes_processed;
1758 Uint even_bytes_left =
1759 bytes_left - (bytes_left % BLOCK_HASH_BYTES_PER_ITER);
1760 Uint bytes_to_process =
1761 MIN(BINARY_BUF_SIZE, even_bytes_left);
1762 Uint nr_of_bits_left =
1763 (ctx.sz*BYTE_BITS+ctx.bitsize) -
1764 ctx.no_bytes_processed*BYTE_BITS;
1765 Uint nr_of_bits_to_copy =
1766 MIN(nr_of_bits_left, BINARY_BUF_SIZE_BITS);
1767 ctx.done = nr_of_bits_left == nr_of_bits_to_copy;
1768 erts_copy_bits(ctx.bptr + ctx.no_bytes_processed,
1769 ctx.bitoffs, 1, ctx.buf, 0, 1,
1770 nr_of_bits_to_copy);
1771 block_hash_buffer(ctx.buf,
1772 bytes_to_process,
1773 block_hash_ctx);
1774 ctx.no_bytes_processed += bytes_to_process;
1775 iterations_until_trap -=
1776 MAX(1, bytes_to_process / BLOCK_HASH_BYTES_PER_ITER);
1777 TRAP_LOCATION_NO_RED(sub_binary_subtag_2);
1778 block_hash_ctx = &ctx.block_hash_ctx; /* Restore after trap */
1779 } while (!ctx.done);
1780 nr_of_bytes = ctx.sz + (ctx.bitsize != 0);
1781 hash = block_hash_final_bytes(ctx.buf +
1782 (ctx.no_bytes_processed -
1783 ((nr_of_bytes-1) / BINARY_BUF_SIZE) * BINARY_BUF_SIZE),
1784 ctx.sz - ctx.no_bytes_processed,
1785 ctx.sz,
1786 block_hash_ctx);
1787 if (ctx.bitsize > 0) {
1788 Uint last_byte_index =
1789 nr_of_bytes - (((nr_of_bytes-1) / BINARY_BUF_SIZE) * BINARY_BUF_SIZE) -1;
1790 UINT32_HASH_2(ctx.bitsize,
1791 (ctx.buf[last_byte_index] >> (BYTE_BITS - ctx.bitsize)),
1792 HCONST_15);
1793 }
1794 erts_free(ERTS_ALC_T_PHASH2_TRAP, ctx.buf);
1795 context->trap_location_state.sub_binary_subtag_2.buf = NULL;
1796 }
1797 goto hash2_common;
1798 #undef BYTE_BITS
1799 #undef BINARY_BUF_SIZE
1800 #undef BINARY_BUF_SIZE_BITS
1801 }
1802 break;
1803 case POS_BIG_SUBTAG:
1804 case NEG_BIG_SUBTAG:
1805 {
1806 Eterm* big_val_ptr = big_val(term);
1807 ErtsMakeHash2Context_NEG_BIG_SUBTAG ctx = {
1808 .ptr = big_val_ptr,
1809 .i = 0,
1810 .n = BIG_SIZE(big_val_ptr),
1811 .con = BIG_SIGN(big_val_ptr) ? HCONST_10 : HCONST_11};
1812 #if D_EXP == 16
1813 do {
1814 Uint32 x, y;
1815 x = ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0;
1816 x += (Uint32)(ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0) << 16;
1817 y = ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0;
1818 y += (Uint32)(ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0) << 16;
1819 UINT32_HASH_2(x, y, ctx.con);
1820 TRAP_LOCATION(neg_big_subtag);
1821 } while (ctx.i < ctx.n);
1822 #elif D_EXP == 32
1823 do {
1824 Uint32 x, y;
1825 x = ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0;
1826 y = ctx.i < ctx.n ? BIG_DIGIT(ctx.ptr, ctx.i++) : 0;
1827 UINT32_HASH_2(x, y, ctx.con);
1828 TRAP_LOCATION(neg_big_subtag);
1829 } while (ctx.i < ctx.n);
1830 #elif D_EXP == 64
1831 do {
1832 Uint t;
1833 Uint32 x, y;
1834 ASSERT(ctx.i < ctx.n);
1835 t = BIG_DIGIT(ctx.ptr, ctx.i++);
1836 x = t & 0xffffffff;
1837 y = t >> 32;
1838 UINT32_HASH_2(x, y, ctx.con);
1839 TRAP_LOCATION(neg_big_subtag);
1840 } while (ctx.i < ctx.n);
1841 #else
1842 #error "unsupported D_EXP size"
1843 #endif
1844 goto hash2_common;
1845 }
1846 break;
1847 case REF_SUBTAG:
1848 /* All parts of the ref should be hashed. */
1849 UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
1850 goto hash2_common;
1851 break;
1852 case EXTERNAL_REF_SUBTAG:
1853 /* All parts of the ref should be hashed. */
1854 UINT32_HASH(external_ref_numbers(term)[0], HCONST_7);
1855 goto hash2_common;
1856 break;
1857 case EXTERNAL_PID_SUBTAG:
1858 /* Only 15 bits are hashed. */
1859 UINT32_HASH(external_pid_number(term), HCONST_5);
1860 goto hash2_common;
1861 case EXTERNAL_PORT_SUBTAG:
1862 /* Only 15 bits are hashed. */
1863 UINT32_HASH(external_port_number(term), HCONST_6);
1864 goto hash2_common;
1865 case FLOAT_SUBTAG:
1866 {
1867 FloatDef ff;
1868 GET_DOUBLE(term, ff);
1869 if (ff.fd == 0.0f) {
1870 /* ensure positive 0.0 */
1871 ff.fd = erts_get_positive_zero_float();
1872 }
1873 #if defined(WORDS_BIGENDIAN) || defined(DOUBLE_MIDDLE_ENDIAN)
1874 UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
1875 #else
1876 UINT32_HASH_2(ff.fw[1], ff.fw[0], HCONST_12);
1877 #endif
1878 goto hash2_common;
1879 }
1880 break;
1881
1882 default:
1883 erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
1884 }
1885 }
1886 break;
1887 case TAG_PRIMARY_IMMED1:
1888 switch (term & _TAG_IMMED1_MASK) {
1889 case _TAG_IMMED1_PID:
1890 /* Only 15 bits are hashed. */
1891 UINT32_HASH(internal_pid_number(term), HCONST_5);
1892 goto hash2_common;
1893 case _TAG_IMMED1_PORT:
1894 /* Only 15 bits are hashed. */
1895 UINT32_HASH(internal_port_number(term), HCONST_6);
1896 goto hash2_common;
1897 case _TAG_IMMED1_IMMED2:
1898 switch (term & _TAG_IMMED2_MASK) {
1899 case _TAG_IMMED2_ATOM:
1900 if (hash == 0)
1901 /* Fast, but the poor hash value should be mixed. */
1902 hash = atom_tab(atom_val(term))->slot.bucket.hvalue;
1903 else
1904 UINT32_HASH(atom_tab(atom_val(term))->slot.bucket.hvalue,
1905 HCONST_3);
1906 goto hash2_common;
1907 case _TAG_IMMED2_NIL:
1908 if (hash == 0)
1909 hash = 3468870702UL;
1910 else
1911 UINT32_HASH(NIL_DEF, HCONST_2);
1912 goto hash2_common;
1913 default:
1914 erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
1915 }
1916 case _TAG_IMMED1_SMALL:
1917 {
1918 Sint small = signed_val(term);
1919 if (SMALL_BITS > 28 && !IS_SSMALL28(small)) {
1920 NOT_SSMALL28_HASH(small);
1921 } else {
1922 SINT32_HASH(small, HCONST);
1923 }
1924
1925 goto hash2_common;
1926 }
1927 }
1928 break;
1929 default:
1930 erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
1931 hash2_common:
1932
1933 /* Uint32 hash always has the hash value of the previous term,
1934 * compounded or otherwise.
1935 */
1936
1937 if (ESTACK_ISEMPTY(s)) {
1938 DESTROY_ESTACK(s);
1939 if (can_trap) {
1940 BUMP_REDS(p, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
1941 ASSERT(!(p->flags & F_DISABLE_GC));
1942 }
1943 return hash;
1944 }
1945
1946 term = ESTACK_POP(s);
1947
1948 switch (term) {
1949 case HASH_MAP_TAIL: {
1950 hash = (Uint32) ESTACK_POP(s);
1951 UINT32_HASH(hash_xor_pairs, HCONST_19);
1952 hash_xor_pairs = (Uint32) ESTACK_POP(s);
1953 TRAP_LOCATION_NO_CTX(hash2_common_1);
1954 goto hash2_common;
1955 }
1956 case HASH_MAP_PAIR:
1957 hash_xor_pairs ^= hash;
1958 hash = 0;
1959 TRAP_LOCATION_NO_CTX(hash2_common_2);
1960 goto hash2_common;
1961 default:
1962 break;
1963 }
1964
1965 }
1966 TRAP_LOCATION_NO_CTX(hash2_common_3);
1967 }
1968 }
1969 #undef TRAP_LOCATION_NO_RED
1970 #undef TRAP_LOCATION
1971 #undef TRAP_LOCATION_NO_CTX
1972 }
1973
1974 Uint32
make_hash2(Eterm term)1975 make_hash2(Eterm term)
1976 {
1977 return make_hash2_helper(term, 0, NULL, NULL);
1978 }
1979
1980 Uint32
trapping_make_hash2(Eterm term,Eterm * state_mref_write_back,Process * p)1981 trapping_make_hash2(Eterm term, Eterm* state_mref_write_back, Process* p)
1982 {
1983 return make_hash2_helper(term, 1, state_mref_write_back, p);
1984 }
1985
1986 /* Term hash function for internal use.
1987 *
1988 * Limitation #1: Is not "portable" in any way between different VM instances.
1989 *
1990 * Limitation #2: The hash value is only valid as long as the term exists
1991 * somewhere in the VM. Why? Because external pids, ports and refs are hashed
1992 * by mixing the node *pointer* value. If a node disappears and later reappears
1993 * with a new ErlNode struct, externals from that node will hash different than
1994 * before.
1995 *
1996 * One IMPORTANT property must hold (for hamt).
1997 * EVERY BIT of the term that is significant for equality (see EQ)
1998 * MUST BE USED AS INPUT FOR THE HASH. Two different terms must always have a
1999 * chance of hashing different when salted: hash([Salt|A]) vs hash([Salt|B]).
2000 *
2001 * This is why we cannot use cached hash values for atoms for example.
2002 *
2003 */
2004
2005 #define CONST_HASH(AConst) \
2006 do { /* Lightweight mixing of constant (type info) */ \
2007 hash ^= AConst; \
2008 hash = (hash << 17) ^ (hash >> (32-17)); \
2009 } while (0)
2010
2011 Uint32
make_internal_hash(Eterm term,Uint32 salt)2012 make_internal_hash(Eterm term, Uint32 salt)
2013 {
2014 Uint32 hash;
2015
2016 /* Optimization. Simple cases before declaration of estack. */
2017 if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
2018 hash = salt;
2019 #if ERTS_SIZEOF_ETERM == 8
2020 UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
2021 #elif ERTS_SIZEOF_ETERM == 4
2022 UINT32_HASH(term, HCONST);
2023 #else
2024 # error "No you don't"
2025 #endif
2026 return hash;
2027 }
2028 {
2029 Eterm tmp;
2030 DECLARE_ESTACK(s);
2031
2032 hash = salt;
2033 for (;;) {
2034 switch (primary_tag(term)) {
2035 case TAG_PRIMARY_LIST:
2036 {
2037 int c = 0;
2038 Uint32 sh = 0;
2039 Eterm* ptr = list_val(term);
2040 while (is_byte(*ptr)) {
2041 /* Optimization for strings. */
2042 sh = (sh << 8) + unsigned_val(*ptr);
2043 if (c == 3) {
2044 UINT32_HASH(sh, HCONST_4);
2045 c = sh = 0;
2046 } else {
2047 c++;
2048 }
2049 term = CDR(ptr);
2050 if (is_not_list(term))
2051 break;
2052 ptr = list_val(term);
2053 }
2054 if (c > 0)
2055 UINT32_HASH_2(sh, (Uint32)c, HCONST_22);
2056
2057 if (is_list(term)) {
2058 tmp = CDR(ptr);
2059 CONST_HASH(HCONST_17); /* Hash CAR in cons cell */
2060 ESTACK_PUSH(s, tmp);
2061 if (is_not_list(tmp)) {
2062 ESTACK_PUSH(s, HASH_CDR);
2063 }
2064 term = CAR(ptr);
2065 }
2066 }
2067 break;
2068 case TAG_PRIMARY_BOXED:
2069 {
2070 Eterm hdr = *boxed_val(term);
2071 ASSERT(is_header(hdr));
2072 switch (hdr & _TAG_HEADER_MASK) {
2073 case ARITYVAL_SUBTAG:
2074 {
2075 int i;
2076 int arity = header_arity(hdr);
2077 Eterm* elem = tuple_val(term);
2078 UINT32_HASH(arity, HCONST_9);
2079 if (arity == 0) /* Empty tuple */
2080 goto pop_next;
2081 for (i = arity; ; i--) {
2082 term = elem[i];
2083 if (i == 1)
2084 break;
2085 ESTACK_PUSH(s, term);
2086 }
2087 }
2088 break;
2089
2090 case MAP_SUBTAG:
2091 {
2092 Eterm* ptr = boxed_val(term) + 1;
2093 Uint size;
2094 int i;
2095
2096 /*
2097 * We rely on key-value iteration order being constant
2098 * for identical maps (in this VM instance).
2099 */
2100 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
2101 case HAMT_SUBTAG_HEAD_FLATMAP:
2102 {
2103 flatmap_t *mp = (flatmap_t *)flatmap_val(term);
2104 Eterm *ks = flatmap_get_keys(mp);
2105 Eterm *vs = flatmap_get_values(mp);
2106 size = flatmap_get_size(mp);
2107 UINT32_HASH(size, HCONST_16);
2108 if (size == 0)
2109 goto pop_next;
2110
2111 for (i = size - 1; i >= 0; i--) {
2112 ESTACK_PUSH(s, vs[i]);
2113 ESTACK_PUSH(s, ks[i]);
2114 }
2115 goto pop_next;
2116 }
2117 case HAMT_SUBTAG_HEAD_ARRAY:
2118 case HAMT_SUBTAG_HEAD_BITMAP:
2119 size = *ptr++;
2120 UINT32_HASH(size, HCONST_16);
2121 if (size == 0)
2122 goto pop_next;
2123 }
2124 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
2125 case HAMT_SUBTAG_HEAD_ARRAY:
2126 i = 16;
2127 break;
2128 case HAMT_SUBTAG_HEAD_BITMAP:
2129 case HAMT_SUBTAG_NODE_BITMAP:
2130 i = hashmap_bitcount(MAP_HEADER_VAL(hdr));
2131 break;
2132 default:
2133 erts_exit(ERTS_ERROR_EXIT, "bad header");
2134 }
2135 while (i) {
2136 if (is_list(*ptr)) {
2137 Eterm* cons = list_val(*ptr);
2138 ESTACK_PUSH(s, CDR(cons));
2139 ESTACK_PUSH(s, CAR(cons));
2140 }
2141 else {
2142 ASSERT(is_boxed(*ptr));
2143 ESTACK_PUSH(s, *ptr);
2144 }
2145 i--; ptr++;
2146 }
2147 goto pop_next;
2148 }
2149 break;
2150 case EXPORT_SUBTAG:
2151 {
2152 Export* ep = *((Export **) (export_val(term) + 1));
2153 /* Assumes Export entries never move */
2154 POINTER_HASH(ep, HCONST_14);
2155 goto pop_next;
2156 }
2157
2158 case FUN_SUBTAG:
2159 {
2160 ErlFunThing* funp = (ErlFunThing *) fun_val(term);
2161 Uint num_free = funp->num_free;
2162 UINT32_HASH_2(num_free, funp->fe->module, HCONST_20);
2163 UINT32_HASH_2(funp->fe->index, funp->fe->old_uniq, HCONST_21);
2164 if (num_free == 0) {
2165 goto pop_next;
2166 } else {
2167 Eterm* bptr = funp->env + num_free - 1;
2168 while (num_free-- > 1) {
2169 term = *bptr--;
2170 ESTACK_PUSH(s, term);
2171 }
2172 term = *bptr;
2173 }
2174 }
2175 break;
2176 case REFC_BINARY_SUBTAG:
2177 case HEAP_BINARY_SUBTAG:
2178 case SUB_BINARY_SUBTAG:
2179 {
2180 byte* bptr;
2181 Uint sz = binary_size(term);
2182 Uint32 con = HCONST_13 + hash;
2183 Uint bitoffs;
2184 Uint bitsize;
2185
2186 ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
2187 if (sz == 0 && bitsize == 0) {
2188 hash = con;
2189 } else {
2190 if (bitoffs == 0) {
2191 hash = block_hash(bptr, sz, con);
2192 if (bitsize > 0) {
2193 UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
2194 HCONST_15);
2195 }
2196 } else {
2197 byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
2198 sz + (bitsize != 0));
2199 erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
2200 hash = block_hash(buf, sz, con);
2201 if (bitsize > 0) {
2202 UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
2203 HCONST_15);
2204 }
2205 erts_free(ERTS_ALC_T_TMP, (void *) buf);
2206 }
2207 }
2208 goto pop_next;
2209 }
2210 break;
2211 case POS_BIG_SUBTAG:
2212 case NEG_BIG_SUBTAG:
2213 {
2214 Eterm* ptr = big_val(term);
2215 Uint i = 0;
2216 Uint n = BIG_SIZE(ptr);
2217 Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
2218 #if D_EXP == 16
2219 do {
2220 Uint32 x, y;
2221 x = i < n ? BIG_DIGIT(ptr, i++) : 0;
2222 x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
2223 y = i < n ? BIG_DIGIT(ptr, i++) : 0;
2224 y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
2225 UINT32_HASH_2(x, y, con);
2226 } while (i < n);
2227 #elif D_EXP == 32
2228 do {
2229 Uint32 x, y;
2230 x = i < n ? BIG_DIGIT(ptr, i++) : 0;
2231 y = i < n ? BIG_DIGIT(ptr, i++) : 0;
2232 UINT32_HASH_2(x, y, con);
2233 } while (i < n);
2234 #elif D_EXP == 64
2235 do {
2236 Uint t;
2237 Uint32 x, y;
2238 ASSERT(i < n);
2239 t = BIG_DIGIT(ptr, i++);
2240 x = t & 0xffffffff;
2241 y = t >> 32;
2242 UINT32_HASH_2(x, y, con);
2243 } while (i < n);
2244 #else
2245 #error "unsupported D_EXP size"
2246 #endif
2247 goto pop_next;
2248 }
2249 break;
2250 case REF_SUBTAG:
2251 UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
2252 ASSERT(internal_ref_no_numbers(term) == 3);
2253 UINT32_HASH_2(internal_ref_numbers(term)[1],
2254 internal_ref_numbers(term)[2], HCONST_8);
2255 goto pop_next;
2256
2257 case EXTERNAL_REF_SUBTAG:
2258 {
2259 ExternalThing* thing = external_thing_ptr(term);
2260
2261 ASSERT(external_thing_ref_no_numbers(thing) == 3);
2262 /* See limitation #2 */
2263 #ifdef ARCH_64
2264 POINTER_HASH(thing->node, HCONST_7);
2265 UINT32_HASH(external_thing_ref_numbers(thing)[0], HCONST_7);
2266 #else
2267 UINT32_HASH_2(thing->node,
2268 external_thing_ref_numbers(thing)[0], HCONST_7);
2269 #endif
2270 UINT32_HASH_2(external_thing_ref_numbers(thing)[1],
2271 external_thing_ref_numbers(thing)[2], HCONST_8);
2272 goto pop_next;
2273 }
2274 case EXTERNAL_PID_SUBTAG: {
2275 ExternalThing* thing = external_thing_ptr(term);
2276 /* See limitation #2 */
2277 #ifdef ARCH_64
2278 POINTER_HASH(thing->node, HCONST_5);
2279 UINT32_HASH(thing->data.ui[0], HCONST_5);
2280 #else
2281 UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_5);
2282 #endif
2283 goto pop_next;
2284 }
2285 case EXTERNAL_PORT_SUBTAG: {
2286 ExternalThing* thing = external_thing_ptr(term);
2287 /* See limitation #2 */
2288 #ifdef ARCH_64
2289 POINTER_HASH(thing->node, HCONST_6);
2290 UINT32_HASH(thing->data.ui[0], HCONST_6);
2291 #else
2292 UINT32_HASH_2(thing->node, thing->data.ui[0], HCONST_6);
2293 #endif
2294 goto pop_next;
2295 }
2296 case FLOAT_SUBTAG:
2297 {
2298 FloatDef ff;
2299 GET_DOUBLE(term, ff);
2300 if (ff.fd == 0.0f) {
2301 /* ensure positive 0.0 */
2302 ff.fd = erts_get_positive_zero_float();
2303 }
2304 UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
2305 goto pop_next;
2306 }
2307 default:
2308 erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
2309 }
2310 }
2311 break;
2312 case TAG_PRIMARY_IMMED1:
2313 #if ERTS_SIZEOF_ETERM == 8
2314 UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
2315 #else
2316 UINT32_HASH(term, HCONST);
2317 #endif
2318 goto pop_next;
2319
2320 default:
2321 erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
2322
2323 pop_next:
2324 if (ESTACK_ISEMPTY(s)) {
2325 DESTROY_ESTACK(s);
2326
2327 return hash;
2328 }
2329
2330 term = ESTACK_POP(s);
2331
2332 switch (term) {
2333 case HASH_CDR:
2334 CONST_HASH(HCONST_18); /* Hash CDR i cons cell */
2335 goto pop_next;
2336 default:
2337 break;
2338 }
2339 }
2340 }
2341 }
2342
2343 #undef CONST_HASH
2344 #undef HASH_MAP_TAIL
2345 #undef HASH_MAP_PAIR
2346 #undef HASH_CDR
2347
2348 #undef UINT32_HASH_2
2349 #undef UINT32_HASH
2350 #undef SINT32_HASH
2351 }
2352
2353 #undef HCONST
2354 #undef MIX
2355
2356 /* error_logger !
2357 {log, Level, format, [args], #{ gl, pid, time, error_logger => #{tag, emulator => true} }}
2358 */
2359 static Eterm
do_allocate_logger_message(Eterm gleader,ErtsMonotonicTime * ts,Eterm * pid,Eterm ** hp,ErlOffHeap ** ohp,ErlHeapFragment ** bp,Uint sz)2360 do_allocate_logger_message(Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid,
2361 Eterm **hp, ErlOffHeap **ohp,
2362 ErlHeapFragment **bp, Uint sz)
2363 {
2364 Uint gl_sz;
2365 gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
2366 sz = sz + gl_sz + 6 /*outer 5-tuple*/
2367 + MAP2_SZ /* error_logger map */;
2368
2369 *pid = erts_get_current_pid();
2370
2371 if (is_nil(gleader) && is_non_value(*pid)) {
2372 sz += MAP2_SZ /* metadata map no gl, no pid */;
2373 } else if (is_nil(gleader) || is_non_value(*pid))
2374 sz += MAP3_SZ /* metadata map no gl or no pid*/;
2375 else
2376 sz += MAP4_SZ /* metadata map w gl w pid*/;
2377
2378 *ts = ERTS_MONOTONIC_TO_USEC(erts_os_system_time());
2379 erts_bld_sint64(NULL, &sz, *ts);
2380
2381 *bp = new_message_buffer(sz);
2382 *ohp = &(*bp)->off_heap;
2383 *hp = (*bp)->mem;
2384
2385 return copy_struct(gleader,gl_sz,hp,*ohp);
2386 }
2387
do_send_logger_message(Eterm gl,Eterm tag,Eterm format,Eterm args,ErtsMonotonicTime ts,Eterm pid,Eterm * hp,ErlHeapFragment * bp)2388 static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args,
2389 ErtsMonotonicTime ts, Eterm pid,
2390 Eterm *hp, ErlHeapFragment *bp)
2391 {
2392 Eterm message, md, el_tag = tag;
2393 Eterm time = erts_bld_sint64(&hp, NULL, ts);
2394
2395 /* This mapping is needed for the backwards compatible error_logger */
2396 switch (tag) {
2397 case am_info: el_tag = am_info_msg; break;
2398 case am_warning: el_tag = am_warning_msg; break;
2399 default:
2400 ASSERT(am_error);
2401 break;
2402 }
2403
2404 md = MAP2(hp, am_emulator, am_true, ERTS_MAKE_AM("tag"), el_tag);
2405 hp += MAP2_SZ;
2406
2407 if (is_nil(gl) && is_non_value(pid)) {
2408 /* no gl and no pid, probably from a port */
2409 md = MAP2(hp,
2410 am_error_logger, md,
2411 am_time, time);
2412 hp += MAP2_SZ;
2413 pid = NIL;
2414 } else if (is_nil(gl)) {
2415 /* no gl */
2416 md = MAP3(hp,
2417 am_error_logger, md,
2418 am_pid, pid,
2419 am_time, time);
2420 hp += MAP3_SZ;
2421 } else if (is_non_value(pid)) {
2422 /* no gl */
2423 md = MAP3(hp,
2424 am_error_logger, md,
2425 ERTS_MAKE_AM("gl"), gl,
2426 am_time, time);
2427 hp += MAP3_SZ;
2428 pid = NIL;
2429 } else {
2430 md = MAP4(hp,
2431 am_error_logger, md,
2432 ERTS_MAKE_AM("gl"), gl,
2433 am_pid, pid,
2434 am_time, time);
2435 hp += MAP4_SZ;
2436 }
2437
2438 message = TUPLE5(hp, am_log, tag, format, args, md);
2439 erts_queue_error_logger_message(pid, message, bp);
2440 }
2441
do_send_to_logger(Eterm tag,Eterm gl,char * buf,size_t len)2442 static int do_send_to_logger(Eterm tag, Eterm gl, char *buf, size_t len)
2443 {
2444 Uint sz;
2445 Eterm list, args, format, pid;
2446 ErtsMonotonicTime ts;
2447
2448 Eterm *hp = NULL;
2449 ErlOffHeap *ohp = NULL;
2450 ErlHeapFragment *bp = NULL;
2451
2452 sz = len * 2 /* message list */ + 2 /* cons surrounding message list */
2453 + 8 /* "~s~n" */;
2454
2455 /* gleader size is accounted and allocated next */
2456 gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
2457
2458 list = buf_to_intlist(&hp, buf, len, NIL);
2459 args = CONS(hp,list,NIL);
2460 hp += 2;
2461 format = buf_to_intlist(&hp, "~s~n", 4, NIL);
2462
2463 do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
2464 return 0;
2465 }
2466
do_send_term_to_logger(Eterm tag,Eterm gl,char * buf,size_t len,Eterm args)2467 static int do_send_term_to_logger(Eterm tag, Eterm gl,
2468 char *buf, size_t len, Eterm args)
2469 {
2470 Uint sz;
2471 Uint args_sz;
2472 Eterm format, pid;
2473 ErtsMonotonicTime ts;
2474
2475 Eterm *hp = NULL;
2476 ErlOffHeap *ohp = NULL;
2477 ErlHeapFragment *bp = NULL;
2478
2479 ASSERT(len > 0);
2480
2481 args_sz = size_object(args);
2482 sz = len * 2 /* format */ + args_sz;
2483
2484 /* gleader size is accounted and allocated next */
2485 gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
2486
2487 format = buf_to_intlist(&hp, buf, len, NIL);
2488 args = copy_struct(args, args_sz, &hp, ohp);
2489
2490 do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
2491 return 0;
2492 }
2493
2494 static ERTS_INLINE int
send_info_to_logger(Eterm gleader,char * buf,size_t len)2495 send_info_to_logger(Eterm gleader, char *buf, size_t len)
2496 {
2497 return do_send_to_logger(am_info, gleader, buf, len);
2498 }
2499
2500 static ERTS_INLINE int
send_warning_to_logger(Eterm gleader,char * buf,size_t len)2501 send_warning_to_logger(Eterm gleader, char *buf, size_t len)
2502 {
2503 return do_send_to_logger(erts_error_logger_warnings, gleader, buf, len);
2504 }
2505
2506 static ERTS_INLINE int
send_error_to_logger(Eterm gleader,char * buf,size_t len)2507 send_error_to_logger(Eterm gleader, char *buf, size_t len)
2508 {
2509 return do_send_to_logger(am_error, gleader, buf, len);
2510 }
2511
2512 static ERTS_INLINE int
send_error_term_to_logger(Eterm gleader,char * buf,size_t len,Eterm args)2513 send_error_term_to_logger(Eterm gleader, char *buf, size_t len, Eterm args)
2514 {
2515 return do_send_term_to_logger(am_error, gleader, buf, len, args);
2516 }
2517
2518 #define LOGGER_DSBUF_INC_SZ 256
2519
2520 static erts_dsprintf_buf_t *
grow_logger_dsbuf(erts_dsprintf_buf_t * dsbufp,size_t need)2521 grow_logger_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
2522 {
2523 size_t size;
2524 size_t free_size = dsbufp->size - dsbufp->str_len;
2525
2526 ASSERT(dsbufp && dsbufp->str);
2527
2528 if (need <= free_size)
2529 return dsbufp;
2530
2531 size = need - free_size + LOGGER_DSBUF_INC_SZ;
2532 size = (((size + LOGGER_DSBUF_INC_SZ - 1) / LOGGER_DSBUF_INC_SZ)
2533 * LOGGER_DSBUF_INC_SZ);
2534 size += dsbufp->size;
2535 ASSERT(dsbufp->str_len + need <= size);
2536 dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_LOGGER_DSBUF,
2537 (void *) dsbufp->str,
2538 size);
2539 dsbufp->size = size;
2540 return dsbufp;
2541 }
2542
2543 erts_dsprintf_buf_t *
erts_create_logger_dsbuf(void)2544 erts_create_logger_dsbuf(void)
2545 {
2546 erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_logger_dsbuf);
2547 erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
2548 sizeof(erts_dsprintf_buf_t));
2549 sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
2550 dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
2551 LOGGER_DSBUF_INC_SZ);
2552 dsbufp->str[0] = '\0';
2553 dsbufp->size = LOGGER_DSBUF_INC_SZ;
2554 return dsbufp;
2555 }
2556
2557 static ERTS_INLINE void
destroy_logger_dsbuf(erts_dsprintf_buf_t * dsbufp)2558 destroy_logger_dsbuf(erts_dsprintf_buf_t *dsbufp)
2559 {
2560 ASSERT(dsbufp && dsbufp->str);
2561 erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp->str);
2562 erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp);
2563 }
2564
2565 int
erts_send_info_to_logger(Eterm gleader,erts_dsprintf_buf_t * dsbufp)2566 erts_send_info_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
2567 {
2568 int res;
2569 res = send_info_to_logger(gleader, dsbufp->str, dsbufp->str_len);
2570 destroy_logger_dsbuf(dsbufp);
2571 return res;
2572 }
2573
2574 int
erts_send_warning_to_logger(Eterm gleader,erts_dsprintf_buf_t * dsbufp)2575 erts_send_warning_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
2576 {
2577 int res;
2578 res = send_warning_to_logger(gleader, dsbufp->str, dsbufp->str_len);
2579 destroy_logger_dsbuf(dsbufp);
2580 return res;
2581 }
2582
2583 int
erts_send_error_to_logger(Eterm gleader,erts_dsprintf_buf_t * dsbufp)2584 erts_send_error_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
2585 {
2586 int res;
2587 res = send_error_to_logger(gleader, dsbufp->str, dsbufp->str_len);
2588 destroy_logger_dsbuf(dsbufp);
2589 return res;
2590 }
2591
2592 int
erts_send_error_term_to_logger(Eterm gleader,erts_dsprintf_buf_t * dsbufp,Eterm args)2593 erts_send_error_term_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp, Eterm args)
2594 {
2595 int res;
2596 res = send_error_term_to_logger(gleader, dsbufp->str, dsbufp->str_len, args);
2597 destroy_logger_dsbuf(dsbufp);
2598 return res;
2599 }
2600
2601 int
erts_send_info_to_logger_str(Eterm gleader,char * str)2602 erts_send_info_to_logger_str(Eterm gleader, char *str)
2603 {
2604 return send_info_to_logger(gleader, str, sys_strlen(str));
2605 }
2606
2607 int
erts_send_warning_to_logger_str(Eterm gleader,char * str)2608 erts_send_warning_to_logger_str(Eterm gleader, char *str)
2609 {
2610 return send_warning_to_logger(gleader, str, sys_strlen(str));
2611 }
2612
2613 int
erts_send_error_to_logger_str(Eterm gleader,char * str)2614 erts_send_error_to_logger_str(Eterm gleader, char *str)
2615 {
2616 return send_error_to_logger(gleader, str, sys_strlen(str));
2617 }
2618
2619 int
erts_send_info_to_logger_nogl(erts_dsprintf_buf_t * dsbuf)2620 erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
2621 {
2622 return erts_send_info_to_logger(NIL, dsbuf);
2623 }
2624
2625 int
erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t * dsbuf)2626 erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
2627 {
2628 return erts_send_warning_to_logger(NIL, dsbuf);
2629 }
2630
2631 int
erts_send_error_to_logger_nogl(erts_dsprintf_buf_t * dsbuf)2632 erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
2633 {
2634 return erts_send_error_to_logger(NIL, dsbuf);
2635 }
2636
2637 int
erts_send_info_to_logger_str_nogl(char * str)2638 erts_send_info_to_logger_str_nogl(char *str)
2639 {
2640 return erts_send_info_to_logger_str(NIL, str);
2641 }
2642
2643 int
erts_send_warning_to_logger_str_nogl(char * str)2644 erts_send_warning_to_logger_str_nogl(char *str)
2645 {
2646 return erts_send_warning_to_logger_str(NIL, str);
2647 }
2648
2649 int
erts_send_error_to_logger_str_nogl(char * str)2650 erts_send_error_to_logger_str_nogl(char *str)
2651 {
2652 return erts_send_error_to_logger_str(NIL, str);
2653 }
2654
2655
2656 #define TMP_DSBUF_INC_SZ 256
2657
2658 static erts_dsprintf_buf_t *
grow_tmp_dsbuf(erts_dsprintf_buf_t * dsbufp,size_t need)2659 grow_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
2660 {
2661 size_t size;
2662 size_t free_size = dsbufp->size - dsbufp->str_len;
2663
2664 ASSERT(dsbufp);
2665
2666 if (need <= free_size)
2667 return dsbufp;
2668 size = need - free_size + TMP_DSBUF_INC_SZ;
2669 size = ((size + TMP_DSBUF_INC_SZ - 1)/TMP_DSBUF_INC_SZ)*TMP_DSBUF_INC_SZ;
2670 size += dsbufp->size;
2671 ASSERT(dsbufp->str_len + need <= size);
2672 dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_TMP_DSBUF,
2673 (void *) dsbufp->str,
2674 size);
2675 dsbufp->size = size;
2676 return dsbufp;
2677 }
2678
2679 erts_dsprintf_buf_t *
erts_create_tmp_dsbuf(Uint size)2680 erts_create_tmp_dsbuf(Uint size)
2681 {
2682 Uint init_size = size ? size : TMP_DSBUF_INC_SZ;
2683 erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_tmp_dsbuf);
2684 erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_TMP_DSBUF,
2685 sizeof(erts_dsprintf_buf_t));
2686 sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
2687 dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_TMP_DSBUF, init_size);
2688 dsbufp->str[0] = '\0';
2689 dsbufp->size = init_size;
2690 return dsbufp;
2691 }
2692
2693 void
erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t * dsbufp)2694 erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp)
2695 {
2696 if (dsbufp->str)
2697 erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp->str);
2698 erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp);
2699 }
2700
2701 /* eq and cmp are written as separate functions a eq is a little faster */
2702
2703 /*
2704 * Test for equality of two terms.
2705 * Returns 0 if not equal, or a non-zero value otherwise.
2706 */
eq(Eterm a,Eterm b)2707 int eq(Eterm a, Eterm b)
2708 {
2709 DECLARE_WSTACK(stack);
2710 Sint sz;
2711 Eterm* aa;
2712 Eterm* bb;
2713
2714 tailrecur:
2715 if (is_same(a, b)) goto pop_next;
2716 tailrecur_ne:
2717
2718 switch (primary_tag(a)) {
2719 case TAG_PRIMARY_LIST:
2720 if (is_list(b)) {
2721 Eterm* aval = list_val(a);
2722 Eterm* bval = list_val(b);
2723 while (1) {
2724 Eterm atmp = CAR(aval);
2725 Eterm btmp = CAR(bval);
2726 if (!is_same(atmp,btmp)) {
2727 WSTACK_PUSH2(stack,(UWord) CDR(bval),(UWord) CDR(aval));
2728 a = atmp;
2729 b = btmp;
2730 goto tailrecur_ne;
2731 }
2732 atmp = CDR(aval);
2733 btmp = CDR(bval);
2734 if (is_same(atmp,btmp)) {
2735 goto pop_next;
2736 }
2737 if (is_not_list(atmp) || is_not_list(btmp)) {
2738 a = atmp;
2739 b = btmp;
2740 goto tailrecur_ne;
2741 }
2742 aval = list_val(atmp);
2743 bval = list_val(btmp);
2744 }
2745 }
2746 break; /* not equal */
2747
2748 case TAG_PRIMARY_BOXED:
2749 {
2750 Eterm hdr = *boxed_val(a);
2751 switch (hdr & _TAG_HEADER_MASK) {
2752 case ARITYVAL_SUBTAG:
2753 {
2754 aa = tuple_val(a);
2755 if (!is_boxed(b) || *boxed_val(b) != *aa)
2756 goto not_equal;
2757 bb = tuple_val(b);
2758 if ((sz = arityval(*aa)) == 0) goto pop_next;
2759 ++aa;
2760 ++bb;
2761 goto term_array;
2762 }
2763 case REFC_BINARY_SUBTAG:
2764 case HEAP_BINARY_SUBTAG:
2765 case SUB_BINARY_SUBTAG:
2766 {
2767 byte* a_ptr;
2768 byte* b_ptr;
2769 size_t a_size;
2770 size_t b_size;
2771 Uint a_bitsize;
2772 Uint b_bitsize;
2773 Uint a_bitoffs;
2774 Uint b_bitoffs;
2775
2776 if (!is_binary(b)) {
2777 goto not_equal;
2778 }
2779 a_size = binary_size(a);
2780 b_size = binary_size(b);
2781 if (a_size != b_size) {
2782 goto not_equal;
2783 }
2784 ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
2785 ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
2786 if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
2787 if (sys_memcmp(a_ptr, b_ptr, a_size) == 0) goto pop_next;
2788 } else if (a_bitsize == b_bitsize) {
2789 if (erts_cmp_bits(a_ptr, a_bitoffs, b_ptr, b_bitoffs,
2790 (a_size << 3) + a_bitsize) == 0) goto pop_next;
2791 }
2792 break; /* not equal */
2793 }
2794 case EXPORT_SUBTAG:
2795 {
2796 if (is_export(b)) {
2797 Export* a_exp = *((Export **) (export_val(a) + 1));
2798 Export* b_exp = *((Export **) (export_val(b) + 1));
2799 if (a_exp == b_exp) goto pop_next;
2800 }
2801 break; /* not equal */
2802 }
2803 case FUN_SUBTAG:
2804 {
2805 ErlFunThing* f1;
2806 ErlFunThing* f2;
2807
2808 if (!is_fun(b))
2809 goto not_equal;
2810 f1 = (ErlFunThing *) fun_val(a);
2811 f2 = (ErlFunThing *) fun_val(b);
2812 if (f1->fe->module != f2->fe->module ||
2813 f1->fe->index != f2->fe->index ||
2814 f1->fe->old_uniq != f2->fe->old_uniq ||
2815 f1->num_free != f2->num_free) {
2816 goto not_equal;
2817 }
2818 if ((sz = f1->num_free) == 0) goto pop_next;
2819 aa = f1->env;
2820 bb = f2->env;
2821 goto term_array;
2822 }
2823
2824 case EXTERNAL_PID_SUBTAG:
2825 case EXTERNAL_PORT_SUBTAG: {
2826 ExternalThing *ap;
2827 ExternalThing *bp;
2828
2829 if(!is_external(b))
2830 goto not_equal;
2831
2832 ap = external_thing_ptr(a);
2833 bp = external_thing_ptr(b);
2834
2835 if(ap->header == bp->header && ap->node == bp->node) {
2836 ASSERT(1 == external_data_words(a));
2837 ASSERT(1 == external_data_words(b));
2838
2839 if (ap->data.ui[0] == bp->data.ui[0]) goto pop_next;
2840 }
2841 break; /* not equal */
2842 }
2843 case EXTERNAL_REF_SUBTAG: {
2844 /*
2845 * Observe!
2846 * When comparing refs we need to compare ref numbers
2847 * (32-bit words) *not* ref data words.
2848 */
2849 Uint32 *anum;
2850 Uint32 *bnum;
2851 Uint common_len;
2852 Uint alen;
2853 Uint blen;
2854 Uint i;
2855 ExternalThing* athing;
2856 ExternalThing* bthing;
2857
2858 if(!is_external_ref(b))
2859 goto not_equal;
2860
2861 athing = external_thing_ptr(a);
2862 bthing = external_thing_ptr(b);
2863
2864 if(athing->node != bthing->node)
2865 goto not_equal;
2866
2867 anum = external_thing_ref_numbers(athing);
2868 bnum = external_thing_ref_numbers(bthing);
2869 alen = external_thing_ref_no_numbers(athing);
2870 blen = external_thing_ref_no_numbers(bthing);
2871
2872 goto ref_common;
2873
2874 case REF_SUBTAG:
2875
2876 if (!is_internal_ref(b))
2877 goto not_equal;
2878
2879 alen = internal_ref_no_numbers(a);
2880 anum = internal_ref_numbers(a);
2881 blen = internal_ref_no_numbers(b);
2882 bnum = internal_ref_numbers(b);
2883
2884 ref_common:
2885 ASSERT(alen > 0 && blen > 0);
2886
2887 if (anum[0] != bnum[0])
2888 goto not_equal;
2889
2890 if (alen == 3 && blen == 3) {
2891 /* Most refs are of length 3 */
2892 if (anum[1] == bnum[1] && anum[2] == bnum[2]) {
2893 goto pop_next;
2894 } else {
2895 goto not_equal;
2896 }
2897 }
2898
2899 common_len = alen;
2900 if (blen < alen)
2901 common_len = blen;
2902
2903 for (i = 1; i < common_len; i++)
2904 if (anum[i] != bnum[i])
2905 goto not_equal;
2906
2907 if(alen != blen) {
2908
2909 if (alen > blen) {
2910 for (i = common_len; i < alen; i++)
2911 if (anum[i] != 0)
2912 goto not_equal;
2913 }
2914 else {
2915 for (i = common_len; i < blen; i++)
2916 if (bnum[i] != 0)
2917 goto not_equal;
2918 }
2919 }
2920 goto pop_next;
2921 }
2922 case POS_BIG_SUBTAG:
2923 case NEG_BIG_SUBTAG:
2924 {
2925 int i;
2926
2927 if (!is_big(b))
2928 goto not_equal;
2929 aa = big_val(a);
2930 bb = big_val(b);
2931 if (*aa != *bb)
2932 goto not_equal;
2933 i = BIG_ARITY(aa);
2934 while(i--) {
2935 if (*++aa != *++bb)
2936 goto not_equal;
2937 }
2938 goto pop_next;
2939 }
2940 case FLOAT_SUBTAG:
2941 {
2942 FloatDef af;
2943 FloatDef bf;
2944
2945 if (is_float(b)) {
2946 GET_DOUBLE(a, af);
2947 GET_DOUBLE(b, bf);
2948 if (af.fd == bf.fd) goto pop_next;
2949 }
2950 break; /* not equal */
2951 }
2952 case MAP_SUBTAG:
2953 if (is_flatmap(a)) {
2954 aa = flatmap_val(a);
2955 if (!is_boxed(b) || *boxed_val(b) != *aa)
2956 goto not_equal;
2957 bb = flatmap_val(b);
2958 sz = flatmap_get_size((flatmap_t*)aa);
2959
2960 if (sz != flatmap_get_size((flatmap_t*)bb)) goto not_equal;
2961 if (sz == 0) goto pop_next;
2962
2963 aa += 2;
2964 bb += 2;
2965 sz += 1; /* increment for tuple-keys */
2966 goto term_array;
2967
2968 } else {
2969 if (!is_boxed(b) || *boxed_val(b) != hdr)
2970 goto not_equal;
2971
2972 aa = hashmap_val(a) + 1;
2973 bb = hashmap_val(b) + 1;
2974 switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
2975 case HAMT_SUBTAG_HEAD_ARRAY:
2976 if (aa[0] != bb[0])
2977 goto not_equal;
2978 aa++; bb++;
2979 sz = 16;
2980 break;
2981 case HAMT_SUBTAG_HEAD_BITMAP:
2982 if (aa[0] != bb[0])
2983 goto not_equal;
2984 aa++; bb++;
2985 case HAMT_SUBTAG_NODE_BITMAP:
2986 sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
2987 ASSERT(sz > 0 && sz < 17);
2988 break;
2989 default:
2990 erts_exit(ERTS_ERROR_EXIT, "Unknown hashmap subsubtag\n");
2991 }
2992 goto term_array;
2993 }
2994 default:
2995 ASSERT(!"Unknown boxed subtab in EQ");
2996 }
2997 break;
2998 }
2999 }
3000 goto not_equal;
3001
3002
3003 term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
3004 ASSERT(sz != 0);
3005 {
3006 Eterm* ap = aa;
3007 Eterm* bp = bb;
3008 Sint i = sz;
3009 for (;;) {
3010 if (!is_same(*ap,*bp)) break;
3011 if (--i == 0) goto pop_next;
3012 ++ap;
3013 ++bp;
3014 }
3015 a = *ap;
3016 b = *bp;
3017 if (is_both_immed(a,b)) {
3018 goto not_equal;
3019 }
3020 if (i > 1) { /* push the rest */
3021 WSTACK_PUSH3(stack, i-1, (UWord)(bp+1),
3022 ((UWord)(ap+1)) | TAG_PRIMARY_HEADER);
3023 /* We (ab)use TAG_PRIMARY_HEADER to recognize a term_array */
3024 }
3025 goto tailrecur_ne;
3026 }
3027
3028 pop_next:
3029 if (!WSTACK_ISEMPTY(stack)) {
3030 UWord something = WSTACK_POP(stack);
3031 if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
3032 aa = (Eterm*) something;
3033 bb = (Eterm*) WSTACK_POP(stack);
3034 sz = WSTACK_POP(stack);
3035 goto term_array;
3036 }
3037 a = something;
3038 b = WSTACK_POP(stack);
3039 goto tailrecur;
3040 }
3041
3042 DESTROY_WSTACK(stack);
3043 return 1;
3044
3045 not_equal:
3046 DESTROY_WSTACK(stack);
3047 return 0;
3048 }
3049
3050
3051
3052 /*
3053 * Compare objects.
3054 * Returns 0 if equal, a negative value if a < b, or a positive number a > b.
3055 *
3056 * According to the Erlang Standard, types are orderered as follows:
3057 * numbers < (characters) < atoms < refs < funs < ports < pids <
3058 * tuples < maps < [] < conses < binaries.
3059 *
3060 * Note that characters are currently not implemented.
3061 *
3062 */
3063
3064 /* cmp(Eterm a, Eterm b)
3065 * For compatibility with HiPE - arith-based compare.
3066 */
cmp(Eterm a,Eterm b)3067 Sint cmp(Eterm a, Eterm b)
3068 {
3069 return erts_cmp(a, b, 0, 0);
3070 }
3071
3072 Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only);
3073
3074 /* erts_cmp(Eterm a, Eterm b, int exact)
3075 * exact = 1 -> term-based compare
3076 * exact = 0 -> arith-based compare
3077 */
erts_cmp_compound(Eterm a,Eterm b,int exact,int eq_only)3078 Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only)
3079 {
3080 #define PSTACK_TYPE struct erts_cmp_hashmap_state
3081 struct erts_cmp_hashmap_state {
3082 Sint wstack_rollback;
3083 int was_exact;
3084 Eterm *ap;
3085 Eterm *bp;
3086 Eterm min_key;
3087 Sint cmp_res; /* result so far -1,0,+1 */
3088 };
3089 PSTACK_DECLARE(hmap_stack, 1);
3090 WSTACK_DECLARE(stack);
3091 WSTACK_DECLARE(b_stack); /* only used by hashmaps */
3092 Eterm* aa;
3093 Eterm* bb;
3094 int i;
3095 Sint j;
3096 int a_tag;
3097 int b_tag;
3098 ErlNode *anode;
3099 ErlNode *bnode;
3100 Uint adata;
3101 Uint bdata;
3102 Uint alen;
3103 Uint blen;
3104 Uint32 *anum;
3105 Uint32 *bnum;
3106
3107 /* The WSTACK contains naked Eterms and Operations marked with header-tags */
3108 #define OP_BITS 4
3109 #define OP_MASK 0xF
3110 #define TERM_ARRAY_OP 0
3111 #define SWITCH_EXACT_OFF_OP 1
3112 #define HASHMAP_PHASE1_ARE_KEYS_EQUAL 2
3113 #define HASHMAP_PHASE1_IS_MIN_KEY 3
3114 #define HASHMAP_PHASE1_CMP_VALUES 4
3115 #define HASHMAP_PHASE2_ARE_KEYS_EQUAL 5
3116 #define HASHMAP_PHASE2_IS_MIN_KEY_A 6
3117 #define HASHMAP_PHASE2_IS_MIN_KEY_B 7
3118
3119
3120 #define OP_WORD(OP) (((OP) << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER)
3121 #define TERM_ARRAY_OP_WORD(SZ) OP_WORD(((SZ) << OP_BITS) | TERM_ARRAY_OP)
3122
3123 #define GET_OP(WORD) (ASSERT(is_header(WORD)), ((WORD) >> _TAG_PRIMARY_SIZE) & OP_MASK)
3124 #define GET_OP_ARG(WORD) (ASSERT(is_header(WORD)), ((WORD) >> (OP_BITS + _TAG_PRIMARY_SIZE)))
3125
3126
3127 #define RETURN_NEQ(cmp) { j=(cmp); ASSERT(j != 0); goto not_equal; }
3128 #define ON_CMP_GOTO(cmp) if ((j=(cmp)) == 0) goto pop_next; else goto not_equal
3129
3130 #undef CMP_NODES
3131 #define CMP_NODES(AN, BN) \
3132 do { \
3133 if((AN) != (BN)) { \
3134 if((AN)->sysname != (BN)->sysname) \
3135 RETURN_NEQ(erts_cmp_atoms((AN)->sysname, (BN)->sysname)); \
3136 ASSERT((AN)->creation != (BN)->creation); \
3137 RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1); \
3138 } \
3139 } while (0)
3140
3141
3142 bodyrecur:
3143 j = 0;
3144 tailrecur:
3145 if (is_same(a,b)) { /* Equal values or pointers. */
3146 goto pop_next;
3147 }
3148 tailrecur_ne:
3149
3150 /* deal with majority (?) cases by brute-force */
3151 if (is_atom(a)) {
3152 if (is_atom(b)) {
3153 ON_CMP_GOTO(erts_cmp_atoms(a, b));
3154 }
3155 } else if (is_both_small(a, b)) {
3156 ON_CMP_GOTO(signed_val(a) - signed_val(b));
3157 }
3158
3159 /*
3160 * Take care of cases where the types are the same.
3161 */
3162
3163 a_tag = 42; /* Suppress warning */
3164 switch (primary_tag(a)) {
3165 case TAG_PRIMARY_IMMED1:
3166 switch ((a & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
3167 case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE):
3168 if (is_internal_port(b)) {
3169 bnode = erts_this_node;
3170 bdata = internal_port_data(b);
3171 } else if (is_external_port(b)) {
3172 bnode = external_port_node(b);
3173 bdata = external_port_data(b);
3174 } else {
3175 a_tag = PORT_DEF;
3176 goto mixed_types;
3177 }
3178 anode = erts_this_node;
3179 adata = internal_port_data(a);
3180
3181 port_common:
3182 CMP_NODES(anode, bnode);
3183 ON_CMP_GOTO((Sint)(adata - bdata));
3184
3185 case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE):
3186 if (is_internal_pid(b)) {
3187 bnode = erts_this_node;
3188 bdata = internal_pid_data(b);
3189 } else if (is_external_pid(b)) {
3190 bnode = external_pid_node(b);
3191 bdata = external_pid_data(b);
3192 } else {
3193 a_tag = PID_DEF;
3194 goto mixed_types;
3195 }
3196 anode = erts_this_node;
3197 adata = internal_pid_data(a);
3198
3199 pid_common:
3200 if (adata != bdata) {
3201 RETURN_NEQ(adata < bdata ? -1 : 1);
3202 }
3203 CMP_NODES(anode, bnode);
3204 goto pop_next;
3205 case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
3206 a_tag = SMALL_DEF;
3207 goto mixed_types;
3208 case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): {
3209 switch ((a & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) {
3210 case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE):
3211 a_tag = ATOM_DEF;
3212 goto mixed_types;
3213 case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE):
3214 a_tag = NIL_DEF;
3215 goto mixed_types;
3216 }
3217 }
3218 }
3219 case TAG_PRIMARY_LIST:
3220 if (is_not_list(b)) {
3221 a_tag = LIST_DEF;
3222 goto mixed_types;
3223 }
3224 aa = list_val(a);
3225 bb = list_val(b);
3226 while (1) {
3227 Eterm atmp = CAR(aa);
3228 Eterm btmp = CAR(bb);
3229 if (!is_same(atmp,btmp)) {
3230 WSTACK_PUSH2(stack,(UWord) CDR(bb),(UWord) CDR(aa));
3231 a = atmp;
3232 b = btmp;
3233 goto tailrecur_ne;
3234 }
3235 atmp = CDR(aa);
3236 btmp = CDR(bb);
3237 if (is_same(atmp,btmp)) {
3238 goto pop_next;
3239 }
3240 if (is_not_list(atmp) || is_not_list(btmp)) {
3241 a = atmp;
3242 b = btmp;
3243 goto tailrecur_ne;
3244 }
3245 aa = list_val(atmp);
3246 bb = list_val(btmp);
3247 }
3248 case TAG_PRIMARY_BOXED:
3249 {
3250 Eterm ahdr = *boxed_val(a);
3251 switch ((ahdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
3252 case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE):
3253 if (!is_tuple(b)) {
3254 a_tag = TUPLE_DEF;
3255 goto mixed_types;
3256 }
3257 aa = tuple_val(a);
3258 bb = tuple_val(b);
3259 /* compare the arities */
3260 i = arityval(ahdr); /* get the arity*/
3261 if (i != arityval(*bb)) {
3262 RETURN_NEQ((int)(i - arityval(*bb)));
3263 }
3264 if (i == 0) {
3265 goto pop_next;
3266 }
3267 ++aa;
3268 ++bb;
3269 goto term_array;
3270 case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) :
3271 {
3272 struct erts_cmp_hashmap_state* sp;
3273 if (is_flatmap_header(ahdr)) {
3274 if (!is_flatmap(b)) {
3275 if (is_hashmap(b)) {
3276 aa = (Eterm *)flatmap_val(a);
3277 i = flatmap_get_size((flatmap_t*)aa) - hashmap_size(b);
3278 ASSERT(i != 0);
3279 RETURN_NEQ(i);
3280 }
3281 a_tag = MAP_DEF;
3282 goto mixed_types;
3283 }
3284 aa = (Eterm *)flatmap_val(a);
3285 bb = (Eterm *)flatmap_val(b);
3286
3287 i = flatmap_get_size((flatmap_t*)aa);
3288 if (i != flatmap_get_size((flatmap_t*)bb)) {
3289 RETURN_NEQ((int)(i - flatmap_get_size((flatmap_t*)bb)));
3290 }
3291 if (i == 0) {
3292 goto pop_next;
3293 }
3294 aa += 2;
3295 bb += 2;
3296 if (exact) {
3297 i += 1; /* increment for tuple-keys */
3298 goto term_array;
3299 }
3300 else {
3301 /* Value array */
3302 WSTACK_PUSH3(stack,(UWord)(bb+1),(UWord)(aa+1),TERM_ARRAY_OP_WORD(i));
3303 /* Switch back from 'exact' key compare */
3304 WSTACK_PUSH(stack,OP_WORD(SWITCH_EXACT_OFF_OP));
3305 /* Now do 'exact' compare of key tuples */
3306 a = *aa;
3307 b = *bb;
3308 exact = 1;
3309 goto bodyrecur;
3310 }
3311 }
3312 if (!is_hashmap(b)) {
3313 if (is_flatmap(b)) {
3314 bb = (Eterm *)flatmap_val(b);
3315 i = hashmap_size(a) - flatmap_get_size((flatmap_t*)bb);
3316 ASSERT(i != 0);
3317 RETURN_NEQ(i);
3318 }
3319 a_tag = MAP_DEF;
3320 goto mixed_types;
3321 }
3322 i = hashmap_size(a) - hashmap_size(b);
3323 if (i) {
3324 RETURN_NEQ(i);
3325 }
3326 if (hashmap_size(a) == 0) {
3327 goto pop_next;
3328 }
3329
3330 /* Hashmap compare strategy:
3331 Phase 1. While keys are identical
3332 Do synchronous stepping through leafs of both trees in hash
3333 order. Maintain value compare result of minimal key.
3334
3335 Phase 2. If key diff was found in phase 1
3336 Ignore values from now on.
3337 Continue iterate trees by always advancing the one
3338 lagging behind hash-wise. Identical keys are skipped.
3339 A minimal key can only be candidate as tie-breaker if we
3340 have passed that hash value in the other tree (which means
3341 the key did not exist in the other tree).
3342 */
3343
3344 sp = PSTACK_PUSH(hmap_stack);
3345 hashmap_iterator_init(&stack, a, 0);
3346 hashmap_iterator_init(&b_stack, b, 0);
3347 sp->ap = hashmap_iterator_next(&stack);
3348 sp->bp = hashmap_iterator_next(&b_stack);
3349 sp->cmp_res = 0;
3350 ASSERT(sp->ap && sp->bp);
3351
3352 a = CAR(sp->ap);
3353 b = CAR(sp->bp);
3354 sp->was_exact = exact;
3355 exact = 1;
3356 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
3357 sp->wstack_rollback = WSTACK_COUNT(stack);
3358 goto bodyrecur;
3359 }
3360 case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
3361 if (!is_float(b)) {
3362 a_tag = FLOAT_DEF;
3363 goto mixed_types;
3364 } else {
3365 FloatDef af;
3366 FloatDef bf;
3367
3368 GET_DOUBLE(a, af);
3369 GET_DOUBLE(b, bf);
3370 ON_CMP_GOTO(erts_float_comp(af.fd, bf.fd));
3371 }
3372 case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
3373 case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
3374 if (!is_big(b)) {
3375 a_tag = BIG_DEF;
3376 goto mixed_types;
3377 }
3378 ON_CMP_GOTO(big_comp(a, b));
3379 case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE):
3380 if (!is_export(b)) {
3381 a_tag = EXPORT_DEF;
3382 goto mixed_types;
3383 } else {
3384 Export* a_exp = *((Export **) (export_val(a) + 1));
3385 Export* b_exp = *((Export **) (export_val(b) + 1));
3386
3387 if ((j = erts_cmp_atoms(a_exp->info.mfa.module,
3388 b_exp->info.mfa.module)) != 0) {
3389 RETURN_NEQ(j);
3390 }
3391 if ((j = erts_cmp_atoms(a_exp->info.mfa.function,
3392 b_exp->info.mfa.function)) != 0) {
3393 RETURN_NEQ(j);
3394 }
3395 ON_CMP_GOTO((Sint) a_exp->info.mfa.arity - (Sint) b_exp->info.mfa.arity);
3396 }
3397 break;
3398 case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
3399 if (!is_fun(b)) {
3400 a_tag = FUN_DEF;
3401 goto mixed_types;
3402 } else {
3403 ErlFunThing* f1 = (ErlFunThing *) fun_val(a);
3404 ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
3405 Sint diff;
3406
3407 diff = erts_cmp_atoms((f1->fe)->module, (f2->fe)->module);
3408 if (diff != 0) {
3409 RETURN_NEQ(diff);
3410 }
3411 diff = f1->fe->index - f2->fe->index;
3412 if (diff != 0) {
3413 RETURN_NEQ(diff);
3414 }
3415 diff = f1->fe->old_uniq - f2->fe->old_uniq;
3416 if (diff != 0) {
3417 RETURN_NEQ(diff);
3418 }
3419 diff = f1->num_free - f2->num_free;
3420 if (diff != 0) {
3421 RETURN_NEQ(diff);
3422 }
3423 i = f1->num_free;
3424 if (i == 0) goto pop_next;
3425 aa = f1->env;
3426 bb = f2->env;
3427 goto term_array;
3428 }
3429 case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE):
3430 if (is_internal_pid(b)) {
3431 bnode = erts_this_node;
3432 bdata = internal_pid_data(b);
3433 } else if (is_external_pid(b)) {
3434 bnode = external_pid_node(b);
3435 bdata = external_pid_data(b);
3436 } else {
3437 a_tag = EXTERNAL_PID_DEF;
3438 goto mixed_types;
3439 }
3440 anode = external_pid_node(a);
3441 adata = external_pid_data(a);
3442 goto pid_common;
3443 case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE):
3444 if (is_internal_port(b)) {
3445 bnode = erts_this_node;
3446 bdata = internal_port_data(b);
3447 } else if (is_external_port(b)) {
3448 bnode = external_port_node(b);
3449 bdata = external_port_data(b);
3450 } else {
3451 a_tag = EXTERNAL_PORT_DEF;
3452 goto mixed_types;
3453 }
3454 anode = external_port_node(a);
3455 adata = external_port_data(a);
3456 goto port_common;
3457 case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
3458 /*
3459 * Note! When comparing refs we need to compare ref numbers
3460 * (32-bit words), *not* ref data words.
3461 */
3462
3463 if (is_internal_ref(b)) {
3464 bnode = erts_this_node;
3465 blen = internal_ref_no_numbers(b);
3466 bnum = internal_ref_numbers(b);
3467 } else if(is_external_ref(b)) {
3468 ExternalThing* bthing = external_thing_ptr(b);
3469 bnode = bthing->node;
3470 bnum = external_thing_ref_numbers(bthing);
3471 blen = external_thing_ref_no_numbers(bthing);
3472 } else {
3473 a_tag = REF_DEF;
3474 goto mixed_types;
3475 }
3476 anode = erts_this_node;
3477 alen = internal_ref_no_numbers(a);
3478 anum = internal_ref_numbers(a);
3479
3480 ref_common:
3481 CMP_NODES(anode, bnode);
3482
3483 ASSERT(alen > 0 && blen > 0);
3484 if (alen != blen) {
3485 if (alen > blen) {
3486 do {
3487 if (anum[alen - 1] != 0)
3488 RETURN_NEQ(1);
3489 alen--;
3490 } while (alen > blen);
3491 }
3492 else {
3493 do {
3494 if (bnum[blen - 1] != 0)
3495 RETURN_NEQ(-1);
3496 blen--;
3497 } while (alen < blen);
3498 }
3499 }
3500
3501 ASSERT(alen == blen);
3502 for (i = (Sint) alen - 1; i >= 0; i--)
3503 if (anum[i] != bnum[i])
3504 RETURN_NEQ(anum[i] < bnum[i] ? -1 : 1);
3505 goto pop_next;
3506 case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
3507 if (is_internal_ref(b)) {
3508 bnode = erts_this_node;
3509 blen = internal_ref_no_numbers(b);
3510 bnum = internal_ref_numbers(b);
3511 } else if (is_external_ref(b)) {
3512 ExternalThing* bthing = external_thing_ptr(b);
3513 bnode = bthing->node;
3514 bnum = external_thing_ref_numbers(bthing);
3515 blen = external_thing_ref_no_numbers(bthing);
3516 } else {
3517 a_tag = EXTERNAL_REF_DEF;
3518 goto mixed_types;
3519 }
3520 {
3521 ExternalThing* athing = external_thing_ptr(a);
3522 anode = athing->node;
3523 anum = external_thing_ref_numbers(athing);
3524 alen = external_thing_ref_no_numbers(athing);
3525 }
3526 goto ref_common;
3527 default:
3528 /* Must be a binary */
3529 ASSERT(is_binary(a));
3530 if (!is_binary(b)) {
3531 a_tag = BINARY_DEF;
3532 goto mixed_types;
3533 } else {
3534 Uint a_size = binary_size(a);
3535 Uint b_size = binary_size(b);
3536 Uint a_bitsize;
3537 Uint b_bitsize;
3538 Uint a_bitoffs;
3539 Uint b_bitoffs;
3540 Uint min_size;
3541 int cmp;
3542 byte* a_ptr;
3543 byte* b_ptr;
3544 if (eq_only && a_size != b_size) {
3545 RETURN_NEQ(a_size - b_size);
3546 }
3547 ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
3548 ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
3549 if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
3550 min_size = (a_size < b_size) ? a_size : b_size;
3551 if ((cmp = sys_memcmp(a_ptr, b_ptr, min_size)) != 0) {
3552 RETURN_NEQ(cmp);
3553 }
3554 }
3555 else {
3556 a_size = (a_size << 3) + a_bitsize;
3557 b_size = (b_size << 3) + b_bitsize;
3558 min_size = (a_size < b_size) ? a_size : b_size;
3559 if ((cmp = erts_cmp_bits(a_ptr,a_bitoffs,
3560 b_ptr,b_bitoffs,min_size)) != 0) {
3561 RETURN_NEQ(cmp);
3562 }
3563 }
3564 ON_CMP_GOTO((Sint)(a_size - b_size));
3565 }
3566 }
3567 }
3568 }
3569
3570 /*
3571 * Take care of the case that the tags are different.
3572 */
3573
3574 mixed_types:
3575
3576 {
3577 FloatDef f1, f2;
3578 Eterm big;
3579 Eterm aw = a;
3580 Eterm bw = b;
3581 #define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2))
3582 #define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1))
3583 #define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */
3584 Eterm big_buf[BIG_NEED_SIZE(BIG_ARITY_FLOAT_MAX)];
3585
3586 b_tag = tag_val_def(bw);
3587
3588 switch(_NUMBER_CODE(a_tag, b_tag)) {
3589 case SMALL_BIG:
3590 j = big_sign(bw) ? 1 : -1;
3591 break;
3592 case BIG_SMALL:
3593 j = big_sign(aw) ? -1 : 1;
3594 break;
3595 case SMALL_FLOAT:
3596 if (exact) goto exact_fall_through;
3597 GET_DOUBLE(bw, f2);
3598 if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
3599 /* Float is within the no loss limit */
3600 f1.fd = signed_val(aw);
3601 j = erts_float_comp(f1.fd, f2.fd);
3602 }
3603 #if ERTS_SIZEOF_ETERM == 8
3604 else if (f2.fd > (double) (MAX_SMALL + 1)) {
3605 /* Float is a positive bignum, i.e. bigger */
3606 j = -1;
3607 } else if (f2.fd < (double) (MIN_SMALL - 1)) {
3608 /* Float is a negative bignum, i.e. smaller */
3609 j = 1;
3610 } else {
3611 /* Float is a Sint but less precise */
3612 j = signed_val(aw) - (Sint) f2.fd;
3613 }
3614 #else
3615 else {
3616 /* If float is positive it is bigger than small */
3617 j = (f2.fd > 0.0) ? -1 : 1;
3618 }
3619 #endif /* ERTS_SIZEOF_ETERM == 8 */
3620 break;
3621 case FLOAT_BIG:
3622 if (exact) goto exact_fall_through;
3623 {
3624 Wterm tmp = aw;
3625 aw = bw;
3626 bw = tmp;
3627 }/* fall through */
3628 case BIG_FLOAT:
3629 if (exact) goto exact_fall_through;
3630 GET_DOUBLE(bw, f2);
3631 if ((f2.fd < (double) (MAX_SMALL + 1))
3632 && (f2.fd > (double) (MIN_SMALL - 1))) {
3633 /* Float is a Sint */
3634 j = big_sign(aw) ? -1 : 1;
3635 } else if (big_arity(aw) > BIG_ARITY_FLOAT_MAX
3636 || pow(2.0,(big_arity(aw)-1)*D_EXP) > fabs(f2.fd)) {
3637 /* If bignum size shows that it is bigger than the abs float */
3638 j = big_sign(aw) ? -1 : 1;
3639 } else if (big_arity(aw) < BIG_ARITY_FLOAT_MAX
3640 && (pow(2.0,(big_arity(aw))*D_EXP)-1.0) < fabs(f2.fd)) {
3641 /* If bignum size shows that it is smaller than the abs float */
3642 j = f2.fd < 0 ? 1 : -1;
3643 } else if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
3644 /* Float is within the no loss limit */
3645 if (big_to_double(aw, &f1.fd) < 0) {
3646 j = big_sign(aw) ? -1 : 1;
3647 } else {
3648 j = erts_float_comp(f1.fd, f2.fd);
3649 }
3650 } else {
3651 big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
3652 j = big_comp(aw, big);
3653 }
3654 if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) {
3655 j = -j;
3656 }
3657 break;
3658 case FLOAT_SMALL:
3659 if (exact) goto exact_fall_through;
3660 GET_DOUBLE(aw, f1);
3661 if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
3662 /* Float is within the no loss limit */
3663 f2.fd = signed_val(bw);
3664 j = erts_float_comp(f1.fd, f2.fd);
3665 }
3666 #if ERTS_SIZEOF_ETERM == 8
3667 else if (f1.fd > (double) (MAX_SMALL + 1)) {
3668 /* Float is a positive bignum, i.e. bigger */
3669 j = 1;
3670 } else if (f1.fd < (double) (MIN_SMALL - 1)) {
3671 /* Float is a negative bignum, i.e. smaller */
3672 j = -1;
3673 } else {
3674 /* Float is a Sint but less precise it */
3675 j = (Sint) f1.fd - signed_val(bw);
3676 }
3677 #else
3678 else {
3679 /* If float is positive it is bigger than small */
3680 j = (f1.fd > 0.0) ? 1 : -1;
3681 }
3682 #endif /* ERTS_SIZEOF_ETERM == 8 */
3683 break;
3684 exact_fall_through:
3685 default:
3686 j = b_tag - a_tag;
3687 }
3688 }
3689 if (j == 0) {
3690 goto pop_next;
3691 } else {
3692 goto not_equal;
3693 }
3694
3695 term_array: /* arrays in 'aa' and 'bb', length in 'i' */
3696 ASSERT(i>0);
3697 while (--i) {
3698 a = *aa++;
3699 b = *bb++;
3700 if (!is_same(a, b)) {
3701 if (is_atom(a) && is_atom(b)) {
3702 if ((j = erts_cmp_atoms(a, b)) != 0) {
3703 goto not_equal;
3704 }
3705 } else if (is_both_small(a, b)) {
3706 if ((j = signed_val(a)-signed_val(b)) != 0) {
3707 goto not_equal;
3708 }
3709 } else {
3710 WSTACK_PUSH3(stack, (UWord)bb, (UWord)aa, TERM_ARRAY_OP_WORD(i));
3711 goto tailrecur_ne;
3712 }
3713 }
3714 }
3715 a = *aa;
3716 b = *bb;
3717 goto tailrecur;
3718
3719 pop_next:
3720 if (!WSTACK_ISEMPTY(stack)) {
3721 UWord something = WSTACK_POP(stack);
3722 struct erts_cmp_hashmap_state* sp;
3723 if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* an operation */
3724 switch (GET_OP(something)) {
3725 case TERM_ARRAY_OP:
3726 i = GET_OP_ARG(something);
3727 aa = (Eterm*)WSTACK_POP(stack);
3728 bb = (Eterm*) WSTACK_POP(stack);
3729 goto term_array;
3730
3731 case SWITCH_EXACT_OFF_OP:
3732 /* Done with exact compare of map keys, switch back */
3733 ASSERT(exact);
3734 exact = 0;
3735 goto pop_next;
3736
3737 case HASHMAP_PHASE1_ARE_KEYS_EQUAL: {
3738 sp = PSTACK_TOP(hmap_stack);
3739 if (j) {
3740 /* Key diff found, enter phase 2 */
3741 if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
3742 sp->min_key = CAR(sp->ap);
3743 sp->cmp_res = -1;
3744 sp->ap = hashmap_iterator_next(&stack);
3745 }
3746 else {
3747 sp->min_key = CAR(sp->bp);
3748 sp->cmp_res = 1;
3749 sp->bp = hashmap_iterator_next(&b_stack);
3750 }
3751 exact = 1; /* only exact key compares in phase 2 */
3752 goto case_HASHMAP_PHASE2_LOOP;
3753 }
3754
3755 /* No key diff found so far, compare values if min key */
3756
3757 if (sp->cmp_res) {
3758 a = CAR(sp->ap);
3759 b = sp->min_key;
3760 exact = 1;
3761 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_IS_MIN_KEY));
3762 sp->wstack_rollback = WSTACK_COUNT(stack);
3763 goto bodyrecur;
3764 }
3765 /* no min key-value found yet */
3766 a = CDR(sp->ap);
3767 b = CDR(sp->bp);
3768 exact = sp->was_exact;
3769 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
3770 sp->wstack_rollback = WSTACK_COUNT(stack);
3771 goto bodyrecur;
3772 }
3773 case HASHMAP_PHASE1_IS_MIN_KEY:
3774 sp = PSTACK_TOP(hmap_stack);
3775 if (j < 0) {
3776 a = CDR(sp->ap);
3777 b = CDR(sp->bp);
3778 exact = sp->was_exact;
3779 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES));
3780 sp->wstack_rollback = WSTACK_COUNT(stack);
3781 goto bodyrecur;
3782 }
3783 goto case_HASHMAP_PHASE1_LOOP;
3784
3785 case HASHMAP_PHASE1_CMP_VALUES:
3786 sp = PSTACK_TOP(hmap_stack);
3787 if (j) {
3788 sp->cmp_res = j;
3789 sp->min_key = CAR(sp->ap);
3790 }
3791 case_HASHMAP_PHASE1_LOOP:
3792 sp->ap = hashmap_iterator_next(&stack);
3793 sp->bp = hashmap_iterator_next(&b_stack);
3794 if (!sp->ap) {
3795 /* end of maps with identical keys */
3796 ASSERT(!sp->bp);
3797 j = sp->cmp_res;
3798 exact = sp->was_exact;
3799 (void) PSTACK_POP(hmap_stack);
3800 ON_CMP_GOTO(j);
3801 }
3802 a = CAR(sp->ap);
3803 b = CAR(sp->bp);
3804 exact = 1;
3805 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL));
3806 sp->wstack_rollback = WSTACK_COUNT(stack);
3807 goto bodyrecur;
3808
3809 case_HASHMAP_PHASE2_LOOP:
3810 if (sp->ap && sp->bp) {
3811 a = CAR(sp->ap);
3812 b = CAR(sp->bp);
3813 ASSERT(exact);
3814 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_ARE_KEYS_EQUAL));
3815 sp->wstack_rollback = WSTACK_COUNT(stack);
3816 goto bodyrecur;
3817 }
3818 goto case_HASHMAP_PHASE2_NEXT_STEP;
3819
3820 case HASHMAP_PHASE2_ARE_KEYS_EQUAL:
3821 sp = PSTACK_TOP(hmap_stack);
3822 if (j == 0) {
3823 /* keys are equal, skip them */
3824 sp->ap = hashmap_iterator_next(&stack);
3825 sp->bp = hashmap_iterator_next(&b_stack);
3826 goto case_HASHMAP_PHASE2_LOOP;
3827 }
3828 /* fall through */
3829 case_HASHMAP_PHASE2_NEXT_STEP:
3830 if (sp->ap || sp->bp) {
3831 if (hashmap_key_hash_cmp(sp->ap, sp->bp) < 0) {
3832 ASSERT(sp->ap);
3833 a = CAR(sp->ap);
3834 b = sp->min_key;
3835 ASSERT(exact);
3836 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_A));
3837 }
3838 else { /* hash_cmp > 0 */
3839 ASSERT(sp->bp);
3840 a = CAR(sp->bp);
3841 b = sp->min_key;
3842 ASSERT(exact);
3843 WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_B));
3844 }
3845 sp->wstack_rollback = WSTACK_COUNT(stack);
3846 goto bodyrecur;
3847 }
3848 /* End of both maps */
3849 j = sp->cmp_res;
3850 exact = sp->was_exact;
3851 (void) PSTACK_POP(hmap_stack);
3852 ON_CMP_GOTO(j);
3853
3854 case HASHMAP_PHASE2_IS_MIN_KEY_A:
3855 sp = PSTACK_TOP(hmap_stack);
3856 if (j < 0) {
3857 sp->min_key = CAR(sp->ap);
3858 sp->cmp_res = -1;
3859 }
3860 sp->ap = hashmap_iterator_next(&stack);
3861 goto case_HASHMAP_PHASE2_LOOP;
3862
3863 case HASHMAP_PHASE2_IS_MIN_KEY_B:
3864 sp = PSTACK_TOP(hmap_stack);
3865 if (j < 0) {
3866 sp->min_key = CAR(sp->bp);
3867 sp->cmp_res = 1;
3868 }
3869 sp->bp = hashmap_iterator_next(&b_stack);
3870 goto case_HASHMAP_PHASE2_LOOP;
3871
3872 default:
3873 ASSERT(!"Invalid cmp op");
3874 } /* switch */
3875 }
3876 a = (Eterm) something;
3877 b = (Eterm) WSTACK_POP(stack);
3878 goto tailrecur;
3879 }
3880
3881 ASSERT(PSTACK_IS_EMPTY(hmap_stack));
3882 PSTACK_DESTROY(hmap_stack);
3883 WSTACK_DESTROY(stack);
3884 WSTACK_DESTROY(b_stack);
3885 return 0;
3886
3887 not_equal:
3888 if (!PSTACK_IS_EMPTY(hmap_stack) && !eq_only) {
3889 WSTACK_ROLLBACK(stack, PSTACK_TOP(hmap_stack)->wstack_rollback);
3890 goto pop_next;
3891 }
3892 PSTACK_DESTROY(hmap_stack);
3893 WSTACK_DESTROY(stack);
3894 WSTACK_DESTROY(b_stack);
3895 return j;
3896
3897 #undef CMP_NODES
3898 }
3899
3900
3901 Eterm
store_external_or_ref_(Uint ** hpp,ErlOffHeap * oh,Eterm ns)3902 store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
3903 {
3904 struct erl_off_heap_header *ohhp;
3905 Uint i;
3906 Uint size;
3907 Eterm *from_hp;
3908 Eterm *to_hp = *hpp;
3909
3910 ASSERT(is_external(ns) || is_internal_ref(ns));
3911
3912 from_hp = boxed_val(ns);
3913 size = thing_arityval(*from_hp) + 1;
3914 *hpp += size;
3915
3916 for(i = 0; i < size; i++)
3917 to_hp[i] = from_hp[i];
3918
3919 if (is_external_header(*from_hp)) {
3920 ExternalThing *etp = (ExternalThing *) from_hp;
3921 ASSERT(is_external(ns));
3922 erts_ref_node_entry(etp->node, 2, make_boxed(to_hp));
3923 }
3924 else if (is_ordinary_ref_thing(from_hp))
3925 return make_internal_ref(to_hp);
3926 else {
3927 ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp;
3928 ErtsMagicBinary *mb = mreft->mb;
3929 ASSERT(is_magic_ref_thing(from_hp));
3930 erts_refc_inc(&mb->intern.refc, 2);
3931 OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm));
3932 }
3933
3934 ohhp = (struct erl_off_heap_header*) to_hp;
3935 ohhp->next = oh->first;
3936 oh->first = ohhp;
3937
3938 return make_boxed(to_hp);
3939 }
3940
3941 Eterm
store_external_or_ref_in_proc_(Process * proc,Eterm ns)3942 store_external_or_ref_in_proc_(Process *proc, Eterm ns)
3943 {
3944 Uint sz;
3945 Uint *hp;
3946
3947 ASSERT(is_external(ns) || is_internal_ref(ns));
3948
3949 sz = NC_HEAP_SIZE(ns);
3950 ASSERT(sz > 0);
3951 hp = HAlloc(proc, sz);
3952 return store_external_or_ref_(&hp, &MSO(proc), ns);
3953 }
3954
bin_write(fmtfn_t to,void * to_arg,byte * buf,size_t sz)3955 void bin_write(fmtfn_t to, void *to_arg, byte* buf, size_t sz)
3956 {
3957 size_t i;
3958
3959 for (i=0;i<sz;i++) {
3960 if (IS_DIGIT(buf[i]))
3961 erts_print(to, to_arg, "%d,", buf[i]);
3962 else if (IS_PRINT(buf[i])) {
3963 erts_print(to, to_arg, "%c,", buf[i]);
3964 }
3965 else
3966 erts_print(to, to_arg, "%d,", buf[i]);
3967 }
3968 erts_putc(to, to_arg, '\n');
3969 }
3970
3971 /* Fill buf with the contents of bytelist list
3972 * return number of chars in list
3973 * or -1 for type error
3974 * or -2 for not enough buffer space (buffer contains truncated result)
3975 */
3976 Sint
intlist_to_buf(Eterm list,char * buf,Sint len)3977 intlist_to_buf(Eterm list, char *buf, Sint len)
3978 {
3979 Eterm* listptr;
3980 Sint sz = 0;
3981
3982 if (is_nil(list))
3983 return 0;
3984 if (is_not_list(list))
3985 return -1;
3986 listptr = list_val(list);
3987
3988 while (sz < len) {
3989 if (!is_byte(*listptr))
3990 return -1;
3991 buf[sz++] = unsigned_val(*listptr);
3992 if (is_nil(*(listptr + 1)))
3993 return(sz);
3994 if (is_not_list(*(listptr + 1)))
3995 return -1;
3996 listptr = list_val(*(listptr + 1));
3997 }
3998 return -2; /* not enough space */
3999 }
4000
4001 /** @brief Fill buf with the UTF8 contents of the unicode list
4002 * @param len Max number of characters to write.
4003 * @param written NULL or bytes written.
4004 * @return 0 ok,
4005 * -1 type error,
4006 * -2 list too long, only \c len characters written
4007 */
4008 int
erts_unicode_list_to_buf(Eterm list,byte * buf,Sint len,Sint * written)4009 erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written)
4010 {
4011 Eterm* listptr;
4012 Sint sz = 0;
4013 Sint val;
4014 int res;
4015
4016 while (1) {
4017 if (is_nil(list)) {
4018 res = 0;
4019 break;
4020 }
4021 if (is_not_list(list)) {
4022 res = -1;
4023 break;
4024 }
4025 listptr = list_val(list);
4026
4027 if (len-- <= 0) {
4028 res = -2;
4029 break;
4030 }
4031
4032 if (is_not_small(CAR(listptr))) {
4033 res = -1;
4034 break;
4035 }
4036 val = signed_val(CAR(listptr));
4037 if (0 <= val && val < 0x80) {
4038 buf[sz] = val;
4039 sz++;
4040 } else if (val < 0x800) {
4041 buf[sz+0] = 0xC0 | (val >> 6);
4042 buf[sz+1] = 0x80 | (val & 0x3F);
4043 sz += 2;
4044 } else if (val < 0x10000UL) {
4045 if (0xD800 <= val && val <= 0xDFFF) {
4046 res = -1;
4047 break;
4048 }
4049 buf[sz+0] = 0xE0 | (val >> 12);
4050 buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
4051 buf[sz+2] = 0x80 | (val & 0x3F);
4052 sz += 3;
4053 } else if (val < 0x110000) {
4054 buf[sz+0] = 0xF0 | (val >> 18);
4055 buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
4056 buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
4057 buf[sz+3] = 0x80 | (val & 0x3F);
4058 sz += 4;
4059 } else {
4060 res = -1;
4061 break;
4062 }
4063 list = CDR(listptr);
4064 }
4065
4066 if (written)
4067 *written = sz;
4068 return res;
4069 }
4070
4071 Sint
erts_unicode_list_to_buf_len(Eterm list)4072 erts_unicode_list_to_buf_len(Eterm list)
4073 {
4074 Eterm* listptr;
4075 Sint sz = 0;
4076
4077 if (is_nil(list)) {
4078 return 0;
4079 }
4080 if (is_not_list(list)) {
4081 return -1;
4082 }
4083 listptr = list_val(list);
4084
4085 while (1) {
4086 Sint val;
4087
4088 if (is_not_small(CAR(listptr))) {
4089 return -1;
4090 }
4091 val = signed_val(CAR(listptr));
4092 if (0 <= val && val < 0x80) {
4093 sz++;
4094 } else if (val < 0x800) {
4095 sz += 2;
4096 } else if (val < 0x10000UL) {
4097 if (0xD800 <= val && val <= 0xDFFF) {
4098 return -1;
4099 }
4100 sz += 3;
4101 } else if (val < 0x110000) {
4102 sz += 4;
4103 } else {
4104 return -1;
4105 }
4106 list = CDR(listptr);
4107 if (is_nil(list)) {
4108 return sz;
4109 }
4110 if (is_not_list(list)) {
4111 return -1;
4112 }
4113 listptr = list_val(list);
4114 }
4115 }
4116
4117 /* Prints an integer in the given base, returning the number of digits printed.
4118 *
4119 * (*buf) is a pointer to the buffer, and is set to the start of the string
4120 * when returning. */
Sint_to_buf(Sint n,int base,char ** buf,size_t buf_size)4121 int Sint_to_buf(Sint n, int base, char **buf, size_t buf_size)
4122 {
4123 char *p = &(*buf)[buf_size - 1];
4124 int sign = 0, size = 0;
4125
4126 ASSERT(base >= 2 && base <= 36);
4127
4128 if (n == 0) {
4129 *p-- = '0';
4130 size++;
4131 } else if (n < 0) {
4132 sign = 1;
4133 n = -n;
4134 }
4135
4136 while (n != 0) {
4137 int digit = n % base;
4138
4139 if (digit < 10) {
4140 *p-- = '0' + digit;
4141 } else {
4142 *p-- = 'A' + (digit - 10);
4143 }
4144
4145 size++;
4146
4147 n /= base;
4148 }
4149
4150 if (sign) {
4151 *p-- = '-';
4152 size++;
4153 }
4154
4155 *buf = p + 1;
4156
4157 return size;
4158 }
4159
4160 /* Build a list of integers in some safe memory area
4161 ** Memory must be pre allocated prio call 2*len in size
4162 ** hp is a pointer to the "heap" pointer on return
4163 ** this pointer is updated to point after the list
4164 */
4165
4166 Eterm
buf_to_intlist(Eterm ** hpp,const char * buf,size_t len,Eterm tail)4167 buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail)
4168 {
4169 Eterm* hp = *hpp;
4170 size_t i = len;
4171
4172 while(i != 0) {
4173 --i;
4174 tail = CONS(hp, make_small((Uint)(byte)buf[i]), tail);
4175 hp += 2;
4176 }
4177
4178 *hpp = hp;
4179 return tail;
4180 }
4181
4182 /*
4183 ** Write io list in to a buffer.
4184 **
4185 ** An iolist is defined as:
4186 **
4187 ** iohead ::= Binary
4188 ** | Byte (i.e integer in range [0..255]
4189 ** | iolist
4190 ** ;
4191 **
4192 ** iotail ::= []
4193 ** | Binary (added by tony)
4194 ** | iolist
4195 ** ;
4196 **
4197 ** iolist ::= []
4198 ** | Binary
4199 ** | [ iohead | iotail]
4200 ** ;
4201 **
4202 ** Return remaining bytes in buffer on success
4203 ** ERTS_IOLIST_TO_BUF_OVERFLOW on overflow
4204 ** ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes)
4205 **
4206 ** Note!
4207 ** Do not detect indata errors in this fiunction that are not detected by erts_iolist_size!
4208 **
4209 ** A caller should be able to rely on a successful return from erts_iolist_to_buf
4210 ** if erts_iolist_size is previously successfully called and erts_iolist_to_buf
4211 ** is called with a buffer at least as large as the value given by erts_iolist_size.
4212 **
4213 */
4214
4215 typedef enum {
4216 ERTS_IL2B_BCOPY_OK,
4217 ERTS_IL2B_BCOPY_YIELD,
4218 ERTS_IL2B_BCOPY_OVERFLOW,
4219 ERTS_IL2B_BCOPY_TYPE_ERROR
4220 } ErtsIL2BBCopyRes;
4221
4222 static ErtsIL2BBCopyRes
4223 iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp);
4224
4225 static ERTS_INLINE ErlDrvSizeT
iolist_to_buf(const int yield_support,ErtsIOList2BufState * state,Eterm obj,char * buf,ErlDrvSizeT alloced_len)4226 iolist_to_buf(const int yield_support,
4227 ErtsIOList2BufState *state,
4228 Eterm obj,
4229 char* buf,
4230 ErlDrvSizeT alloced_len)
4231 {
4232 #undef IOLIST_TO_BUF_BCOPY
4233 #define IOLIST_TO_BUF_BCOPY(CONSP) \
4234 do { \
4235 size_t size = binary_size(obj); \
4236 if (size > 0) { \
4237 Uint bitsize; \
4238 byte* bptr; \
4239 Uint bitoffs; \
4240 Uint num_bits; \
4241 if (yield_support) { \
4242 size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
4243 if (yield_count > 0) \
4244 max_size *= yield_count+1; \
4245 if (size > max_size) { \
4246 state->objp = CONSP; \
4247 goto L_bcopy_yield; \
4248 } \
4249 if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \
4250 int cost = (int) size; \
4251 cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \
4252 yield_count -= cost; \
4253 } \
4254 } \
4255 if (len < size) \
4256 goto L_overflow; \
4257 ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \
4258 if (bitsize != 0) \
4259 goto L_type_error; \
4260 num_bits = 8*size; \
4261 copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); \
4262 buf += size; \
4263 len -= size; \
4264 } \
4265 } while (0)
4266
4267 ErlDrvSizeT res, len;
4268 Eterm* objp = NULL;
4269 int init_yield_count;
4270 int yield_count;
4271 DECLARE_ESTACK(s);
4272
4273 len = (ErlDrvSizeT) alloced_len;
4274
4275 if (!yield_support) {
4276 yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
4277 goto L_again;
4278 }
4279 else {
4280
4281 if (state->iolist.reds_left <= 0)
4282 return ERTS_IOLIST_TO_BUF_YIELD;
4283
4284 ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
4285 init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED
4286 * state->iolist.reds_left);
4287 yield_count = init_yield_count;
4288
4289 if (!state->iolist.estack.start)
4290 goto L_again;
4291 else {
4292 int chk_stack;
4293 /* Restart; restore state... */
4294 ESTACK_RESTORE(s, &state->iolist.estack);
4295
4296 if (!state->bcopy.bptr)
4297 chk_stack = 0;
4298 else {
4299 chk_stack = 1;
4300 switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) {
4301 case ERTS_IL2B_BCOPY_OK:
4302 break;
4303 case ERTS_IL2B_BCOPY_YIELD:
4304 BUMP_ALL_REDS(state->iolist.c_p);
4305 state->iolist.reds_left = 0;
4306 ESTACK_SAVE(s, &state->iolist.estack);
4307 return ERTS_IOLIST_TO_BUF_YIELD;
4308 case ERTS_IL2B_BCOPY_OVERFLOW:
4309 goto L_overflow;
4310 case ERTS_IL2B_BCOPY_TYPE_ERROR:
4311 goto L_type_error;
4312 }
4313 }
4314
4315 obj = state->iolist.obj;
4316 buf = state->buf;
4317 len = state->len;
4318 objp = state->objp;
4319 state->objp = NULL;
4320 if (objp)
4321 goto L_tail;
4322 if (!chk_stack)
4323 goto L_again;
4324 /* check stack */
4325 }
4326 }
4327
4328 while (!ESTACK_ISEMPTY(s)) {
4329 obj = ESTACK_POP(s);
4330 L_again:
4331 if (is_list(obj)) {
4332 while (1) { /* Tail loop */
4333 while (1) { /* Head loop */
4334 if (yield_support && --yield_count <= 0)
4335 goto L_yield;
4336 objp = list_val(obj);
4337 obj = CAR(objp);
4338 if (is_byte(obj)) {
4339 if (len == 0) {
4340 goto L_overflow;
4341 }
4342 *buf++ = unsigned_val(obj);
4343 len--;
4344 } else if (is_binary(obj)) {
4345 IOLIST_TO_BUF_BCOPY(objp);
4346 } else if (is_list(obj)) {
4347 ESTACK_PUSH(s, CDR(objp));
4348 continue; /* Head loop */
4349 } else if (is_not_nil(obj)) {
4350 goto L_type_error;
4351 }
4352 break;
4353 }
4354
4355 L_tail:
4356
4357 obj = CDR(objp);
4358
4359 if (is_list(obj)) {
4360 continue; /* Tail loop */
4361 } else if (is_binary(obj)) {
4362 IOLIST_TO_BUF_BCOPY(NULL);
4363 } else if (is_not_nil(obj)) {
4364 goto L_type_error;
4365 }
4366 break;
4367 }
4368 } else if (is_binary(obj)) {
4369 IOLIST_TO_BUF_BCOPY(NULL);
4370 } else if (is_not_nil(obj)) {
4371 goto L_type_error;
4372 } else if (yield_support && --yield_count <= 0)
4373 goto L_yield;
4374 }
4375
4376 res = len;
4377
4378 L_return:
4379
4380 DESTROY_ESTACK(s);
4381
4382 if (yield_support) {
4383 int reds;
4384 CLEAR_SAVED_ESTACK(&state->iolist.estack);
4385 reds = ((init_yield_count - yield_count - 1)
4386 / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1;
4387 BUMP_REDS(state->iolist.c_p, reds);
4388 state->iolist.reds_left -= reds;
4389 if (state->iolist.reds_left < 0)
4390 state->iolist.reds_left = 0;
4391 }
4392
4393
4394 return res;
4395
4396 L_type_error:
4397 res = ERTS_IOLIST_TO_BUF_TYPE_ERROR;
4398 goto L_return;
4399
4400 L_overflow:
4401 res = ERTS_IOLIST_TO_BUF_OVERFLOW;
4402 goto L_return;
4403
4404 L_bcopy_yield:
4405
4406 state->buf = buf;
4407 state->len = len;
4408
4409 switch (iolist_to_buf_bcopy(state, obj, &yield_count)) {
4410 case ERTS_IL2B_BCOPY_OK:
4411 ERTS_INTERNAL_ERROR("Missing yield");
4412 case ERTS_IL2B_BCOPY_YIELD:
4413 BUMP_ALL_REDS(state->iolist.c_p);
4414 state->iolist.reds_left = 0;
4415 ESTACK_SAVE(s, &state->iolist.estack);
4416 return ERTS_IOLIST_TO_BUF_YIELD;
4417 case ERTS_IL2B_BCOPY_OVERFLOW:
4418 goto L_overflow;
4419 case ERTS_IL2B_BCOPY_TYPE_ERROR:
4420 goto L_type_error;
4421 }
4422
4423 L_yield:
4424
4425 BUMP_ALL_REDS(state->iolist.c_p);
4426 state->iolist.reds_left = 0;
4427 state->iolist.obj = obj;
4428 state->buf = buf;
4429 state->len = len;
4430 ESTACK_SAVE(s, &state->iolist.estack);
4431 return ERTS_IOLIST_TO_BUF_YIELD;
4432
4433 #undef IOLIST_TO_BUF_BCOPY
4434 }
4435
4436 static ErtsIL2BBCopyRes
iolist_to_buf_bcopy(ErtsIOList2BufState * state,Eterm obj,int * yield_countp)4437 iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp)
4438 {
4439 ErtsIL2BBCopyRes res;
4440 char *buf = state->buf;
4441 ErlDrvSizeT len = state->len;
4442 byte* bptr;
4443 size_t size;
4444 size_t max_size;
4445 Uint bitoffs;
4446 Uint num_bits;
4447 int yield_count = *yield_countp;
4448
4449 if (state->bcopy.bptr) {
4450 bptr = state->bcopy.bptr;
4451 size = state->bcopy.size;
4452 bitoffs = state->bcopy.bitoffs;
4453 state->bcopy.bptr = NULL;
4454 }
4455 else {
4456 Uint bitsize;
4457
4458 ASSERT(is_binary(obj));
4459
4460 size = binary_size(obj);
4461 if (size <= 0)
4462 return ERTS_IL2B_BCOPY_OK;
4463
4464 if (len < size)
4465 return ERTS_IL2B_BCOPY_OVERFLOW;
4466
4467 ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
4468 if (bitsize != 0)
4469 return ERTS_IL2B_BCOPY_TYPE_ERROR;
4470 }
4471
4472 ASSERT(size > 0);
4473 max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
4474 if (yield_count > 0)
4475 max_size *= (size_t) (yield_count+1);
4476
4477 if (size <= max_size) {
4478 if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) {
4479 int cost = (int) size;
4480 cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT;
4481 yield_count -= cost;
4482 }
4483 res = ERTS_IL2B_BCOPY_OK;
4484 }
4485 else {
4486 ASSERT(0 < max_size && max_size < size);
4487 yield_count = 0;
4488 state->bcopy.bptr = bptr + max_size;
4489 state->bcopy.bitoffs = bitoffs;
4490 state->bcopy.size = size - max_size;
4491 size = max_size;
4492 res = ERTS_IL2B_BCOPY_YIELD;
4493 }
4494
4495 num_bits = 8*size;
4496 copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
4497 state->buf += size;
4498 state->len -= size;
4499 *yield_countp = yield_count;
4500
4501 return res;
4502 }
4503
erts_iolist_to_buf_yielding(ErtsIOList2BufState * state)4504 ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state)
4505 {
4506 return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len);
4507 }
4508
erts_iolist_to_buf(Eterm obj,char * buf,ErlDrvSizeT alloced_len)4509 ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
4510 {
4511 return iolist_to_buf(0, NULL, obj, buf, alloced_len);
4512 }
4513
4514 /*
4515 * Return 0 if successful, and non-zero if unsuccessful.
4516 *
4517 * It is vital that if erts_iolist_to_buf would return an error for
4518 * any type of term data, this function should do so as well.
4519 * Any input term error detected in erts_iolist_to_buf should also
4520 * be detected in this function!
4521 */
4522
4523 static ERTS_INLINE int
iolist_size(const int yield_support,ErtsIOListState * state,Eterm obj,ErlDrvSizeT * sizep)4524 iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep)
4525 {
4526 int res, init_yield_count, yield_count;
4527 Eterm* objp;
4528 Uint size = (Uint) *sizep;
4529 DECLARE_ESTACK(s);
4530
4531 if (!yield_support)
4532 yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */
4533 else {
4534 if (state->reds_left <= 0)
4535 return ERTS_IOLIST_YIELD;
4536 ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
4537 init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED;
4538 init_yield_count *= state->reds_left;
4539 yield_count = init_yield_count;
4540 if (state->estack.start) {
4541 /* Restart; restore state... */
4542 ESTACK_RESTORE(s, &state->estack);
4543 size = (Uint) state->size;
4544 obj = state->obj;
4545 }
4546 }
4547
4548 goto L_again;
4549
4550 #define SAFE_ADD(Var, Val) \
4551 do { \
4552 Uint valvar = (Val); \
4553 Var += valvar; \
4554 if (Var < valvar) { \
4555 goto L_overflow_error; \
4556 } \
4557 } while (0)
4558
4559 while (!ESTACK_ISEMPTY(s)) {
4560 obj = ESTACK_POP(s);
4561 L_again:
4562 if (is_list(obj)) {
4563 while (1) { /* Tail loop */
4564 while (1) { /* Head loop */
4565 if (yield_support && --yield_count <= 0)
4566 goto L_yield;
4567 objp = list_val(obj);
4568 /* Head */
4569 obj = CAR(objp);
4570 if (is_byte(obj)) {
4571 size++;
4572 if (size == 0) {
4573 goto L_overflow_error;
4574 }
4575 } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
4576 SAFE_ADD(size, binary_size(obj));
4577 } else if (is_list(obj)) {
4578 ESTACK_PUSH(s, CDR(objp));
4579 continue; /* Head loop */
4580 } else if (is_not_nil(obj)) {
4581 goto L_type_error;
4582 }
4583 break;
4584 }
4585 /* Tail */
4586 obj = CDR(objp);
4587 if (is_list(obj))
4588 continue; /* Tail loop */
4589 else if (is_binary(obj) && binary_bitsize(obj) == 0) {
4590 SAFE_ADD(size, binary_size(obj));
4591 } else if (is_not_nil(obj)) {
4592 goto L_type_error;
4593 }
4594 break;
4595 }
4596 } else {
4597 if (yield_support && --yield_count <= 0)
4598 goto L_yield;
4599 if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
4600 SAFE_ADD(size, binary_size(obj));
4601 } else if (is_not_nil(obj)) {
4602 goto L_type_error;
4603 }
4604 }
4605 }
4606 #undef SAFE_ADD
4607
4608 *sizep = (ErlDrvSizeT) size;
4609
4610 res = ERTS_IOLIST_OK;
4611
4612 L_return:
4613
4614 DESTROY_ESTACK(s);
4615
4616 if (yield_support) {
4617 int yc, reds;
4618 CLEAR_SAVED_ESTACK(&state->estack);
4619 yc = init_yield_count - yield_count;
4620 reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1;
4621 BUMP_REDS(state->c_p, reds);
4622 state->reds_left -= reds;
4623 state->size = (ErlDrvSizeT) size;
4624 state->have_size = 1;
4625 }
4626
4627 return res;
4628
4629 L_overflow_error:
4630 res = ERTS_IOLIST_OVERFLOW;
4631 size = 0;
4632 goto L_return;
4633
4634 L_type_error:
4635 res = ERTS_IOLIST_TYPE;
4636 size = 0;
4637 goto L_return;
4638
4639 L_yield:
4640 BUMP_ALL_REDS(state->c_p);
4641 state->reds_left = 0;
4642 state->size = size;
4643 state->obj = obj;
4644 ESTACK_SAVE(s, &state->estack);
4645 return ERTS_IOLIST_YIELD;
4646 }
4647
erts_iolist_size_yielding(ErtsIOListState * state)4648 int erts_iolist_size_yielding(ErtsIOListState *state)
4649 {
4650 ErlDrvSizeT size = state->size;
4651 return iolist_size(1, state, state->obj, &size);
4652 }
4653
erts_iolist_size(Eterm obj,ErlDrvSizeT * sizep)4654 int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
4655 {
4656 *sizep = 0;
4657 return iolist_size(0, NULL, obj, sizep);
4658 }
4659
4660 /* return 0 if item is not a non-empty flat list of bytes
4661 otherwise return the nonzero length of the list */
4662 Sint
is_string(Eterm list)4663 is_string(Eterm list)
4664 {
4665 Sint len = 0;
4666
4667 while(is_list(list)) {
4668 Eterm* consp = list_val(list);
4669 Eterm hd = CAR(consp);
4670
4671 if (!is_byte(hd))
4672 return 0;
4673 len++;
4674 list = CDR(consp);
4675 }
4676 if (is_nil(list))
4677 return len;
4678 return 0;
4679 }
4680
4681 static int trim_threshold;
4682 static int top_pad;
4683 static int mmap_threshold;
4684 static int mmap_max;
4685
4686 Uint tot_bin_allocated;
4687
erts_init_utils(void)4688 void erts_init_utils(void)
4689 {
4690
4691 }
4692
erts_init_utils_mem(void)4693 void erts_init_utils_mem(void)
4694 {
4695 trim_threshold = -1;
4696 top_pad = -1;
4697 mmap_threshold = -1;
4698 mmap_max = -1;
4699 }
4700
4701 int
sys_alloc_opt(int opt,int value)4702 sys_alloc_opt(int opt, int value)
4703 {
4704 #if HAVE_MALLOPT
4705 int m_opt;
4706 int *curr_val;
4707
4708 switch(opt) {
4709 case SYS_ALLOC_OPT_TRIM_THRESHOLD:
4710 #ifdef M_TRIM_THRESHOLD
4711 m_opt = M_TRIM_THRESHOLD;
4712 curr_val = &trim_threshold;
4713 break;
4714 #else
4715 return 0;
4716 #endif
4717 case SYS_ALLOC_OPT_TOP_PAD:
4718 #ifdef M_TOP_PAD
4719 m_opt = M_TOP_PAD;
4720 curr_val = &top_pad;
4721 break;
4722 #else
4723 return 0;
4724 #endif
4725 case SYS_ALLOC_OPT_MMAP_THRESHOLD:
4726 #ifdef M_MMAP_THRESHOLD
4727 m_opt = M_MMAP_THRESHOLD;
4728 curr_val = &mmap_threshold;
4729 break;
4730 #else
4731 return 0;
4732 #endif
4733 case SYS_ALLOC_OPT_MMAP_MAX:
4734 #ifdef M_MMAP_MAX
4735 m_opt = M_MMAP_MAX;
4736 curr_val = &mmap_max;
4737 break;
4738 #else
4739 return 0;
4740 #endif
4741 default:
4742 return 0;
4743 }
4744
4745 if(mallopt(m_opt, value)) {
4746 *curr_val = value;
4747 return 1;
4748 }
4749
4750 #endif /* #if HAVE_MALLOPT */
4751
4752 return 0;
4753 }
4754
4755 void
sys_alloc_stat(SysAllocStat * sasp)4756 sys_alloc_stat(SysAllocStat *sasp)
4757 {
4758 sasp->trim_threshold = trim_threshold;
4759 sasp->top_pad = top_pad;
4760 sasp->mmap_threshold = mmap_threshold;
4761 sasp->mmap_max = mmap_max;
4762
4763 }
4764
4765 char *
erts_read_env(char * key)4766 erts_read_env(char *key)
4767 {
4768 size_t value_len = 256;
4769 char *value = erts_alloc(ERTS_ALC_T_TMP, value_len);
4770 int res;
4771 while (1) {
4772 res = erts_sys_explicit_8bit_getenv(key, value, &value_len);
4773
4774 if (res >= 0) {
4775 break;
4776 }
4777
4778 value = erts_realloc(ERTS_ALC_T_TMP, value, value_len);
4779 }
4780
4781 if (res != 1) {
4782 erts_free(ERTS_ALC_T_TMP, value);
4783 return NULL;
4784 }
4785
4786 return value;
4787 }
4788
4789 void
erts_free_read_env(void * value)4790 erts_free_read_env(void *value)
4791 {
4792 if (value)
4793 erts_free(ERTS_ALC_T_TMP, value);
4794 }
4795
4796
4797 typedef struct {
4798 size_t sz;
4799 char *ptr;
4800 } ErtsEmuArg;
4801
4802 typedef struct {
4803 int argc;
4804 ErtsEmuArg *arg;
4805 size_t no_bytes;
4806 } ErtsEmuArgs;
4807
4808 ErtsEmuArgs saved_emu_args = {0};
4809
4810 void
erts_save_emu_args(int argc,char ** argv)4811 erts_save_emu_args(int argc, char **argv)
4812 {
4813 #ifdef DEBUG
4814 char *end_ptr;
4815 #endif
4816 char *ptr;
4817 int i;
4818 size_t arg_sz[100];
4819 size_t size;
4820
4821 ASSERT(!saved_emu_args.argc);
4822
4823 size = sizeof(ErtsEmuArg)*argc;
4824 for (i = 0; i < argc; i++) {
4825 size_t sz = sys_strlen(argv[i]);
4826 if (i < sizeof(arg_sz)/sizeof(arg_sz[0]))
4827 arg_sz[i] = sz;
4828 size += sz+1;
4829 }
4830 ptr = (char *) malloc(size);
4831 if (!ptr) {
4832 ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
4833 }
4834 #ifdef DEBUG
4835 end_ptr = ptr + size;
4836 #endif
4837 saved_emu_args.arg = (ErtsEmuArg *) ptr;
4838 ptr += sizeof(ErtsEmuArg)*argc;
4839 saved_emu_args.argc = argc;
4840 saved_emu_args.no_bytes = 0;
4841 for (i = 0; i < argc; i++) {
4842 size_t sz;
4843 if (i < sizeof(arg_sz)/sizeof(arg_sz[0]))
4844 sz = arg_sz[i];
4845 else
4846 sz = sys_strlen(argv[i]);
4847 saved_emu_args.arg[i].ptr = ptr;
4848 saved_emu_args.arg[i].sz = sz;
4849 saved_emu_args.no_bytes += sz;
4850 ptr += sz+1;
4851 sys_strcpy(saved_emu_args.arg[i].ptr, argv[i]);
4852 }
4853 ASSERT(ptr == end_ptr);
4854 }
4855
4856 Eterm
erts_get_emu_args(Process * c_p)4857 erts_get_emu_args(Process *c_p)
4858 {
4859 #ifdef DEBUG
4860 Eterm *end_hp;
4861 #endif
4862 int i;
4863 Uint hsz;
4864 Eterm *hp, res;
4865
4866 hsz = saved_emu_args.no_bytes*2;
4867 hsz += saved_emu_args.argc*2;
4868
4869 hp = HAlloc(c_p, hsz);
4870 #ifdef DEBUG
4871 end_hp = hp + hsz;
4872 #endif
4873 res = NIL;
4874
4875 for (i = saved_emu_args.argc-1; i >= 0; i--) {
4876 Eterm arg = buf_to_intlist(&hp,
4877 saved_emu_args.arg[i].ptr,
4878 saved_emu_args.arg[i].sz,
4879 NIL);
4880 res = CONS(hp, arg, res);
4881 hp += 2;
4882 }
4883
4884 ASSERT(hp == end_hp);
4885
4886 return res;
4887 }
4888
4889 /*
4890 * To be used to silence unused result warnings, but do not abuse it.
4891 */
erts_silence_warn_unused_result(long unused)4892 void erts_silence_warn_unused_result(long unused)
4893 {
4894
4895 }
4896
4897 /*
4898 * Interval counts
4899 */
4900 void
erts_interval_init(erts_interval_t * icp)4901 erts_interval_init(erts_interval_t *icp)
4902 {
4903 erts_atomic64_init_nob(&icp->counter.atomic, 0);
4904 }
4905
4906 static ERTS_INLINE Uint64
step_interval_nob(erts_interval_t * icp)4907 step_interval_nob(erts_interval_t *icp)
4908 {
4909 return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
4910 }
4911
4912 static ERTS_INLINE Uint64
step_interval_relb(erts_interval_t * icp)4913 step_interval_relb(erts_interval_t *icp)
4914 {
4915 return (Uint64) erts_atomic64_inc_read_relb(&icp->counter.atomic);
4916 }
4917
4918
4919 static ERTS_INLINE Uint64
ensure_later_interval_nob(erts_interval_t * icp,Uint64 ic)4920 ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
4921 {
4922 Uint64 curr_ic;
4923 curr_ic = (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
4924 if (curr_ic > ic)
4925 return curr_ic;
4926 return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic);
4927 }
4928
4929
4930 static ERTS_INLINE Uint64
ensure_later_interval_acqb(erts_interval_t * icp,Uint64 ic)4931 ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
4932 {
4933 Uint64 curr_ic;
4934 curr_ic = (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
4935 if (curr_ic > ic)
4936 return curr_ic;
4937 return (Uint64) erts_atomic64_inc_read_acqb(&icp->counter.atomic);
4938 }
4939
4940 Uint64
erts_step_interval_nob(erts_interval_t * icp)4941 erts_step_interval_nob(erts_interval_t *icp)
4942 {
4943 return step_interval_nob(icp);
4944 }
4945
4946 Uint64
erts_step_interval_relb(erts_interval_t * icp)4947 erts_step_interval_relb(erts_interval_t *icp)
4948 {
4949 return step_interval_relb(icp);
4950 }
4951
4952 Uint64
erts_ensure_later_interval_nob(erts_interval_t * icp,Uint64 ic)4953 erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
4954 {
4955 return ensure_later_interval_nob(icp, ic);
4956 }
4957
4958 Uint64
erts_ensure_later_interval_acqb(erts_interval_t * icp,Uint64 ic)4959 erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
4960 {
4961 return ensure_later_interval_acqb(icp, ic);
4962 }
4963
4964 /*
4965 * A millisecond timestamp without time correction where there's no hrtime
4966 * - for tracing on "long" things...
4967 */
erts_timestamp_millis(void)4968 Uint64 erts_timestamp_millis(void)
4969 {
4970 #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
4971 return ERTS_MONOTONIC_TO_MSEC(erts_os_monotonic_time());
4972 #else
4973 Uint64 res;
4974 SysTimeval tv;
4975 sys_gettimeofday(&tv);
4976 res = (Uint64) tv.tv_sec*1000000;
4977 res += (Uint64) tv.tv_usec;
4978 return (res / 1000);
4979 #endif
4980 }
4981
4982 void *
erts_calc_stacklimit(char * prev_c,UWord stacksize)4983 erts_calc_stacklimit(char *prev_c, UWord stacksize)
4984 {
4985 /*
4986 * We *don't* want this function inlined, i.e., it is
4987 * risky to call this function from another function
4988 * in utils.c
4989 */
4990
4991 UWord pagesize = erts_sys_get_page_size();
4992 char c;
4993 char *start;
4994 if (&c > prev_c) {
4995 start = (char *) ((((UWord) prev_c) / pagesize) * pagesize);
4996 return (void *) (start + stacksize);
4997 }
4998 else {
4999 start = (char *) (((((UWord) prev_c) - 1) / pagesize + 1) * pagesize);
5000 return (void *) (start - stacksize);
5001 }
5002 }
5003
5004 /*
5005 * erts_check_below_limit() and
5006 * erts_check_above_limit() are put
5007 * in utils.c in order to prevent
5008 * inlining.
5009 */
5010
5011 int
erts_check_below_limit(char * ptr,char * limit)5012 erts_check_below_limit(char *ptr, char *limit)
5013 {
5014 return ptr < limit;
5015 }
5016
5017 int
erts_check_above_limit(char * ptr,char * limit)5018 erts_check_above_limit(char *ptr, char *limit)
5019 {
5020 return ptr > limit;
5021 }
5022
5023 void *
erts_ptr_id(void * ptr)5024 erts_ptr_id(void *ptr)
5025 {
5026 return ptr;
5027 }
5028
erts_check_if_stack_grows_downwards(char * ptr)5029 int erts_check_if_stack_grows_downwards(char *ptr)
5030 {
5031 char c;
5032 if (erts_check_below_limit(&c, ptr))
5033 return 1;
5034 else
5035 return 0;
5036 }
5037