1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 1996-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24
25 #define ERL_WANT_GC_INTERNALS__
26
27 #include "sys.h"
28 #include "erl_vm.h"
29 #include "global.h"
30 #include "erl_process.h"
31 #include "erl_gc.h"
32 #include "big.h"
33 #include "erl_map.h"
34 #include "erl_binary.h"
35 #include "erl_bits.h"
36 #include "dtrace-wrapper.h"
37
38 static void move_one_frag(Eterm** hpp, ErlHeapFragment*, ErlOffHeap*, int);
39
40 /*
41 * Copy object "obj" to process p.
42 */
copy_object_x(Eterm obj,Process * to,Uint extra)43 Eterm copy_object_x(Eterm obj, Process* to, Uint extra)
44 {
45 if (!is_immed(obj)) {
46 Uint size = size_object(obj);
47 Eterm* hp = HAllocX(to, size, extra);
48 Eterm res;
49
50 #ifdef USE_VM_PROBES
51 if (DTRACE_ENABLED(copy_object)) {
52 DTRACE_CHARBUF(proc_name, 64);
53
54 erts_snprintf(proc_name, sizeof(DTRACE_CHARBUF_NAME(proc_name)),
55 "%T", to->common.id);
56 DTRACE2(copy_object, proc_name, size);
57 }
58 #endif
59 res = copy_struct(obj, size, &hp, &to->off_heap);
60 #ifdef DEBUG
61 if (eq(obj, res) == 0) {
62 erts_exit(ERTS_ABORT_EXIT, "copy not equal to source\n");
63 }
64 #endif
65 return res;
66 }
67 return obj;
68 }
69
70 /*
71 * Return the "flat" size of the object.
72 */
73
74 #define in_literal_purge_area(PTR) \
75 (lit_purge_ptr && ( \
76 (lit_purge_ptr <= (PTR) && \
77 (PTR) < (lit_purge_ptr + lit_purge_sz))))
78
size_object_x(Eterm obj,erts_literal_area_t * litopt)79 Uint size_object_x(Eterm obj, erts_literal_area_t *litopt)
80 {
81 Uint sum = 0;
82 Eterm* ptr;
83 int arity;
84 Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
85 Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
86 #ifdef DEBUG
87 Eterm mypid = erts_get_current_pid();
88 #endif
89 DECLARE_ESTACK(s);
90 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size_object %p\n", mypid, obj));
91
92 for (;;) {
93 switch (primary_tag(obj)) {
94 case TAG_PRIMARY_LIST:
95 ptr = list_val(obj);
96 if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
97 goto pop_next;
98 }
99 sum += 2;
100 obj = *ptr++;
101 if (!IS_CONST(obj)) {
102 ESTACK_PUSH(s, obj);
103 }
104 obj = *ptr;
105 break;
106 case TAG_PRIMARY_BOXED:
107 {
108 Eterm hdr;
109 ptr = boxed_val(obj);
110 if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
111 goto pop_next;
112 }
113 hdr = *ptr;
114 ASSERT(is_header(hdr));
115 switch (hdr & _TAG_HEADER_MASK) {
116 case ARITYVAL_SUBTAG:
117 ptr = tuple_val(obj);
118 arity = header_arity(hdr);
119 sum += arity + 1;
120 if (arity == 0) { /* Empty tuple -- unusual. */
121 goto pop_next;
122 }
123 while (arity-- > 1) {
124 obj = *++ptr;
125 if (!IS_CONST(obj)) {
126 ESTACK_PUSH(s, obj);
127 }
128 }
129 obj = *++ptr;
130 break;
131 case FUN_SUBTAG:
132 {
133 Eterm* bptr = fun_val(obj);
134 ErlFunThing* funp = (ErlFunThing *) bptr;
135 unsigned eterms = 1 /* creator */ + funp->num_free;
136 unsigned sz = thing_arityval(hdr);
137 sum += 1 /* header */ + sz + eterms;
138 bptr += 1 /* header */ + sz;
139 while (eterms-- > 1) {
140 obj = *bptr++;
141 if (!IS_CONST(obj)) {
142 ESTACK_PUSH(s, obj);
143 }
144 }
145 obj = *bptr;
146 break;
147 }
148 case MAP_SUBTAG:
149 switch (MAP_HEADER_TYPE(hdr)) {
150 case MAP_HEADER_TAG_FLATMAP_HEAD :
151 {
152 Uint n;
153 flatmap_t *mp;
154 mp = (flatmap_t*)flatmap_val(obj);
155 ptr = (Eterm *)mp;
156 n = flatmap_get_size(mp) + 1;
157 sum += n + 2;
158 ptr += 2; /* hdr + size words */
159 while (n--) {
160 obj = *ptr++;
161 if (!IS_CONST(obj)) {
162 ESTACK_PUSH(s, obj);
163 }
164 }
165 goto pop_next;
166 }
167 case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
168 case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
169 case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
170 {
171 Eterm *head;
172 Uint sz;
173 head = hashmap_val(obj);
174 sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
175 sum += 1 + sz + header_arity(hdr);
176 head += 1 + header_arity(hdr);
177
178 if (sz == 0) {
179 goto pop_next;
180 }
181 while(sz-- > 1) {
182 obj = head[sz];
183 if (!IS_CONST(obj)) {
184 ESTACK_PUSH(s, obj);
185 }
186 }
187 obj = head[0];
188 }
189 break;
190 default:
191 erts_exit(ERTS_ABORT_EXIT, "size_object: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
192 }
193 break;
194 case SUB_BINARY_SUBTAG:
195 {
196 Eterm real_bin;
197 ERTS_DECLARE_DUMMY(Uint offset); /* Not used. */
198 Uint bitsize;
199 Uint bitoffs;
200 Uint extra_bytes;
201 Eterm hdr;
202 ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
203 if ((bitsize + bitoffs) > 8) {
204 sum += ERL_SUB_BIN_SIZE;
205 extra_bytes = 2;
206 } else if ((bitsize + bitoffs) > 0) {
207 sum += ERL_SUB_BIN_SIZE;
208 extra_bytes = 1;
209 } else {
210 extra_bytes = 0;
211 }
212 hdr = *binary_val(real_bin);
213 if (thing_subtag(hdr) == REFC_BINARY_SUBTAG) {
214 sum += PROC_BIN_SIZE;
215 } else {
216 sum += heap_bin_size(binary_size(obj)+extra_bytes);
217 }
218 goto pop_next;
219 }
220 break;
221 case BIN_MATCHSTATE_SUBTAG:
222 erts_exit(ERTS_ABORT_EXIT,
223 "size_object: matchstate term not allowed");
224 default:
225 sum += thing_arityval(hdr) + 1;
226 goto pop_next;
227 }
228 }
229 break;
230 case TAG_PRIMARY_IMMED1:
231 pop_next:
232 if (ESTACK_ISEMPTY(s)) {
233 DESTROY_ESTACK(s);
234 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size was: %u\n", mypid, sum));
235 return sum;
236 }
237 obj = ESTACK_POP(s);
238 break;
239 default:
240 erts_exit(ERTS_ABORT_EXIT, "size_object: bad tag for %#x\n", obj);
241 }
242 }
243 }
244
245 /*
246 * Machinery for sharing preserving information
247 * Using a WSTACK but not very transparently; consider refactoring
248 */
249
250 #define DECLARE_BITSTORE(s) \
251 DECLARE_WSTACK(s); \
252 int WSTK_CONCAT(s,_bitoffs) = 0; \
253 int WSTK_CONCAT(s,_offset) = 0; \
254 UWord WSTK_CONCAT(s,_buffer) = 0
255
256 #define DESTROY_BITSTORE(s) DESTROY_WSTACK(s)
257 #define BITSTORE_PUT(s,i) \
258 do { \
259 WSTK_CONCAT(s,_buffer) |= i << WSTK_CONCAT(s,_bitoffs); \
260 WSTK_CONCAT(s,_bitoffs) += 2; \
261 if (WSTK_CONCAT(s,_bitoffs) >= 8*sizeof(UWord)) { \
262 WSTACK_PUSH(s, WSTK_CONCAT(s,_buffer)); \
263 WSTK_CONCAT(s,_bitoffs) = 0; \
264 WSTK_CONCAT(s,_buffer) = 0; \
265 } \
266 } while(0)
267 #define BITSTORE_CLOSE(s) \
268 do { \
269 if (WSTK_CONCAT(s,_bitoffs) > 0) { \
270 WSTACK_PUSH(s, WSTK_CONCAT(s,_buffer)); \
271 WSTK_CONCAT(s,_bitoffs) = 0; \
272 } \
273 } while(0)
274
275 #define BITSTORE_FETCH(s,dst) \
276 do { \
277 UWord result; \
278 if (WSTK_CONCAT(s,_bitoffs) <= 0) { \
279 ASSERT(WSTK_CONCAT(s,_offset) < (s.wsp - s.wstart)); \
280 WSTK_CONCAT(s,_buffer) = s.wstart[WSTK_CONCAT(s,_offset)]; \
281 WSTK_CONCAT(s,_offset)++; \
282 WSTK_CONCAT(s,_bitoffs) = 8*sizeof(UWord); \
283 } \
284 WSTK_CONCAT(s,_bitoffs) -= 2; \
285 result = WSTK_CONCAT(s,_buffer) & 3; \
286 WSTK_CONCAT(s,_buffer) >>= 2; \
287 (dst) = result; \
288 } while(0)
289
290 #define COUNT_OFF_HEAP (0)
291
292 /*
293 * Return the real size of an object and find sharing information
294 * This currently returns the same as erts_debug:size/1.
295 * It is argued whether the size of subterms in constant pools
296 * should be counted or not.
297 */
298
size_shared(Eterm obj)299 Uint size_shared(Eterm obj)
300 {
301 Eterm saved_obj = obj;
302 Uint sum = 0;
303 Eterm* ptr;
304
305 DECLARE_EQUEUE(s);
306 DECLARE_BITSTORE(b);
307
308 for (;;) {
309 switch (primary_tag(obj)) {
310 case TAG_PRIMARY_LIST: {
311 Eterm head, tail;
312 ptr = list_val(obj);
313 /* we're not counting anything that's outside our heap */
314 if (!COUNT_OFF_HEAP && erts_is_literal(obj,ptr)) {
315 goto pop_next;
316 }
317 head = CAR(ptr);
318 tail = CDR(ptr);
319 /* if it's visited, don't count it */
320 if (primary_tag(tail) == TAG_PRIMARY_HEADER ||
321 primary_tag(head) == TAG_PRIMARY_HEADER) {
322 goto pop_next;
323 }
324 /* else make it visited now */
325 switch (primary_tag(tail)) {
326 case TAG_PRIMARY_LIST:
327 ptr[1] = (tail - TAG_PRIMARY_LIST) | TAG_PRIMARY_HEADER;
328 break;
329 case TAG_PRIMARY_IMMED1:
330 CAR(ptr) = (head - primary_tag(head)) | TAG_PRIMARY_HEADER;
331 CDR(ptr) = (tail - TAG_PRIMARY_IMMED1) | primary_tag(head);
332 break;
333 case TAG_PRIMARY_BOXED:
334 BITSTORE_PUT(b, primary_tag(head));
335 CAR(ptr) = (head - primary_tag(head)) | TAG_PRIMARY_HEADER;
336 CDR(ptr) = (tail - TAG_PRIMARY_BOXED) | TAG_PRIMARY_HEADER;
337 break;
338 }
339 /* and count it */
340 sum += 2;
341 if (!IS_CONST(head)) {
342 EQUEUE_PUT(s, head);
343 }
344 obj = tail;
345 break;
346 }
347 case TAG_PRIMARY_BOXED: {
348 Eterm hdr;
349 ptr = boxed_val(obj);
350 /* we're not counting anything that's outside our heap */
351 if (!COUNT_OFF_HEAP && erts_is_literal(obj,ptr)) {
352 goto pop_next;
353 }
354 hdr = *ptr;
355 /* if it's visited, don't count it */
356 if (primary_tag(hdr) != TAG_PRIMARY_HEADER) {
357 goto pop_next;
358 }
359 /* else make it visited now */
360 *ptr = (hdr - primary_tag(hdr)) + BOXED_VISITED;
361 /* and count it */
362 ASSERT(is_header(hdr));
363 switch (hdr & _TAG_HEADER_MASK) {
364 case ARITYVAL_SUBTAG: {
365 int arity = header_arity(hdr);
366 sum += arity + 1;
367 if (arity == 0) { /* Empty tuple -- unusual. */
368 goto pop_next;
369 }
370 while (arity-- > 0) {
371 obj = *++ptr;
372 if (!IS_CONST(obj)) {
373 EQUEUE_PUT(s, obj);
374 }
375 }
376 goto pop_next;
377 }
378 case FUN_SUBTAG: {
379 ErlFunThing* funp = (ErlFunThing *) ptr;
380 unsigned eterms = 1 /* creator */ + funp->num_free;
381 unsigned sz = thing_arityval(hdr);
382 sum += 1 /* header */ + sz + eterms;
383 ptr += 1 /* header */ + sz;
384 while (eterms-- > 0) {
385 obj = *ptr++;
386 if (!IS_CONST(obj)) {
387 EQUEUE_PUT(s, obj);
388 }
389 }
390 goto pop_next;
391 }
392 case SUB_BINARY_SUBTAG: {
393 ErlSubBin* sb = (ErlSubBin *) ptr;
394 Uint extra_bytes;
395 Eterm hdr;
396 ASSERT((sb->thing_word & ~BOXED_VISITED_MASK) == HEADER_SUB_BIN);
397 if (sb->bitsize + sb->bitoffs > 8) {
398 sum += ERL_SUB_BIN_SIZE;
399 extra_bytes = 2;
400 } else if (sb->bitsize + sb->bitoffs > 0) {
401 sum += ERL_SUB_BIN_SIZE;
402 extra_bytes = 1;
403 } else {
404 extra_bytes = 0;
405 }
406 ptr = binary_val(sb->orig);
407 hdr = (*ptr) & ~BOXED_VISITED_MASK;
408 if (thing_subtag(hdr) == REFC_BINARY_SUBTAG) {
409 sum += PROC_BIN_SIZE;
410 } else {
411 ASSERT(thing_subtag(hdr) == HEAP_BINARY_SUBTAG);
412 sum += heap_bin_size(binary_size(obj) + extra_bytes);
413 }
414 goto pop_next;
415 }
416 case MAP_SUBTAG:
417 switch (MAP_HEADER_TYPE(hdr)) {
418 case MAP_HEADER_TAG_FLATMAP_HEAD : {
419 flatmap_t *mp = (flatmap_t*)flatmap_val(obj);
420 Uint n = flatmap_get_size(mp) + 1;
421 ptr = (Eterm *)mp;
422 sum += n + 2;
423 ptr += 2; /* hdr + size words */
424 while (n--) {
425 obj = *ptr++;
426 if (!IS_CONST(obj)) {
427 EQUEUE_PUT(s, obj);
428 }
429 }
430 goto pop_next;
431 }
432 case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
433 case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
434 case MAP_HEADER_TAG_HAMT_NODE_BITMAP : {
435 Uint n = hashmap_bitcount(MAP_HEADER_VAL(hdr));
436 sum += 1 + n + header_arity(hdr);
437 ptr += 1 + header_arity(hdr);
438 while (n--) {
439 obj = *ptr++;
440 if (!IS_CONST(obj)) {
441 EQUEUE_PUT(s, obj);
442 }
443 }
444 goto pop_next;
445 }
446 default:
447 erts_exit(ERTS_ABORT_EXIT, "size_shared: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
448 }
449 case BIN_MATCHSTATE_SUBTAG:
450 erts_exit(ERTS_ABORT_EXIT,
451 "size_shared: matchstate term not allowed");
452 default:
453 sum += thing_arityval(hdr) + 1;
454 goto pop_next;
455 }
456 break;
457 }
458 case TAG_PRIMARY_IMMED1:
459 pop_next:
460 if (EQUEUE_ISEMPTY(s)) {
461 goto cleanup;
462 }
463 obj = EQUEUE_GET(s);
464 break;
465 default:
466 erts_exit(ERTS_ABORT_EXIT, "size_shared: bad tag for %#x\n", obj);
467 }
468 }
469
470 cleanup:
471 obj = saved_obj;
472 BITSTORE_CLOSE(b);
473 for (;;) {
474 switch (primary_tag(obj)) {
475 case TAG_PRIMARY_LIST: {
476 Eterm head, tail;
477 ptr = list_val(obj);
478 if (!COUNT_OFF_HEAP && erts_is_literal(obj,ptr)) {
479 goto cleanup_next;
480 }
481 head = CAR(ptr);
482 tail = CDR(ptr);
483 /* if not already clean, clean it up */
484 if (primary_tag(tail) == TAG_PRIMARY_HEADER) {
485 if (primary_tag(head) == TAG_PRIMARY_HEADER) {
486 Eterm saved;
487 BITSTORE_FETCH(b, saved);
488 CAR(ptr) = head = (head - TAG_PRIMARY_HEADER) | saved;
489 CDR(ptr) = tail = (tail - TAG_PRIMARY_HEADER) | TAG_PRIMARY_BOXED;
490 } else {
491 CDR(ptr) = tail = (tail - TAG_PRIMARY_HEADER) | TAG_PRIMARY_LIST;
492 }
493 } else if (primary_tag(head) == TAG_PRIMARY_HEADER) {
494 CAR(ptr) = head = (head - TAG_PRIMARY_HEADER) | primary_tag(tail);
495 CDR(ptr) = tail = (tail - primary_tag(tail)) | TAG_PRIMARY_IMMED1;
496 } else {
497 goto cleanup_next;
498 }
499 /* and its children too */
500 if (!IS_CONST(head)) {
501 EQUEUE_PUT_UNCHECKED(s, head);
502 }
503 obj = tail;
504 break;
505 }
506 case TAG_PRIMARY_BOXED: {
507 Eterm hdr;
508 ptr = boxed_val(obj);
509 if (!COUNT_OFF_HEAP && erts_is_literal(obj,ptr)) {
510 goto cleanup_next;
511 }
512 hdr = *ptr;
513 /* if not already clean, clean it up */
514 if (primary_tag(hdr) == TAG_PRIMARY_HEADER) {
515 goto cleanup_next;
516 }
517 else {
518 ASSERT(primary_tag(hdr) == BOXED_VISITED);
519 *ptr = hdr = (hdr - BOXED_VISITED) + TAG_PRIMARY_HEADER;
520 }
521 /* and its children too */
522 switch (hdr & _TAG_HEADER_MASK) {
523 case ARITYVAL_SUBTAG: {
524 int arity = header_arity(hdr);
525 if (arity == 0) { /* Empty tuple -- unusual. */
526 goto cleanup_next;
527 }
528 while (arity-- > 0) {
529 obj = *++ptr;
530 if (!IS_CONST(obj)) {
531 EQUEUE_PUT_UNCHECKED(s, obj);
532 }
533 }
534 goto cleanup_next;
535 }
536 case FUN_SUBTAG: {
537 ErlFunThing* funp = (ErlFunThing *) ptr;
538 unsigned eterms = 1 /* creator */ + funp->num_free;
539 unsigned sz = thing_arityval(hdr);
540 ptr += 1 /* header */ + sz;
541 while (eterms-- > 0) {
542 obj = *ptr++;
543 if (!IS_CONST(obj)) {
544 EQUEUE_PUT_UNCHECKED(s, obj);
545 }
546 }
547 goto cleanup_next;
548 }
549 case MAP_SUBTAG:
550 switch (MAP_HEADER_TYPE(hdr)) {
551 case MAP_HEADER_TAG_FLATMAP_HEAD : {
552 flatmap_t *mp = (flatmap_t *) ptr;
553 Uint n = flatmap_get_size(mp) + 1;
554 ptr += 2; /* hdr + size words */
555 while (n--) {
556 obj = *ptr++;
557 if (!IS_CONST(obj)) {
558 EQUEUE_PUT_UNCHECKED(s, obj);
559 }
560 }
561 goto cleanup_next;
562 }
563 case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
564 case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
565 case MAP_HEADER_TAG_HAMT_NODE_BITMAP : {
566 Uint n = hashmap_bitcount(MAP_HEADER_VAL(hdr));
567 sum += 1 + n + header_arity(hdr);
568 ptr += 1 + header_arity(hdr);
569 while (n--) {
570 obj = *ptr++;
571 if (!IS_CONST(obj)) {
572 EQUEUE_PUT_UNCHECKED(s, obj);
573 }
574 }
575 goto cleanup_next;
576 }
577 default:
578 erts_exit(ERTS_ABORT_EXIT, "size_shared: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
579 }
580 default:
581 goto cleanup_next;
582 }
583 break;
584 }
585 case TAG_PRIMARY_IMMED1:
586 cleanup_next:
587 if (EQUEUE_ISEMPTY(s)) {
588 goto all_clean;
589 }
590 obj = EQUEUE_GET(s);
591 break;
592 default:
593 erts_exit(ERTS_ABORT_EXIT, "size_shared: bad tag for %#x\n", obj);
594 }
595 }
596
597 all_clean:
598 /* Return the result */
599 DESTROY_EQUEUE(s);
600 DESTROY_BITSTORE(b);
601 return sum;
602 }
603
604
605 /*
606 * Copy a structure to a heap.
607 */
copy_struct_x(Eterm obj,Uint sz,Eterm ** hpp,ErlOffHeap * off_heap,Uint * bsz,erts_literal_area_t * litopt,char * file,int line)608 Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap,
609 Uint *bsz, erts_literal_area_t *litopt
610 #ifdef ERTS_COPY_REGISTER_LOCATION
611 , char *file, int line
612 #endif
613 )
614 {
615 char* hstart;
616 Uint hsize;
617 Eterm* htop;
618 Eterm* hbot;
619 Eterm* hp;
620 Eterm* ERTS_RESTRICT objp;
621 Eterm* tp;
622 Eterm res;
623 Eterm elem;
624 Eterm* tailp;
625 Eterm* argp;
626 Eterm* const_tuple;
627 Eterm hdr;
628 Eterm *hend;
629 int i;
630 Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
631 Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
632 #ifdef DEBUG
633 Eterm org_obj = obj;
634 Uint org_sz = sz;
635 Eterm mypid = erts_get_current_pid();
636 #endif
637
638 if (IS_CONST(obj))
639 return obj;
640
641 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] copy_struct %p\n", mypid, obj));
642
643 DTRACE1(copy_struct, (int32_t)sz);
644
645 hp = htop = *hpp;
646 hbot = hend = htop + sz;
647 hstart = (char *)htop;
648 hsize = (char*) hbot - hstart;
649 const_tuple = 0;
650
651 /* Copy the object onto the heap */
652 switch (primary_tag(obj)) {
653 case TAG_PRIMARY_LIST:
654 argp = &res;
655 objp = list_val(obj);
656 goto L_copy_list;
657 case TAG_PRIMARY_BOXED: argp = &res; goto L_copy_boxed;
658 default:
659 erts_exit(ERTS_ABORT_EXIT,
660 "%s, line %d: Internal error in copy_struct: 0x%08x\n",
661 __FILE__, __LINE__,obj);
662 }
663
664 L_copy:
665 while (hp != htop) {
666 obj = *hp;
667 switch (primary_tag(obj)) {
668 case TAG_PRIMARY_IMMED1:
669 hp++;
670 break;
671 case TAG_PRIMARY_LIST:
672 objp = list_val(obj);
673 if (ErtsInArea(objp,hstart,hsize)) {
674 hp++;
675 break;
676 }
677 argp = hp++;
678 /* Fall through */
679
680 L_copy_list:
681 tailp = argp;
682 if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
683 *tailp = obj;
684 goto L_copy;
685 }
686 for (;;) {
687 tp = tailp;
688 elem = CAR(objp);
689 if (IS_CONST(elem)) {
690 hbot -= 2;
691 CAR(hbot) = elem;
692 tailp = &CDR(hbot);
693 } else {
694 CAR(htop) = elem;
695 tailp = &CDR(htop);
696 htop += 2;
697 }
698 *tp = make_list(tailp - 1);
699 obj = CDR(objp);
700
701 if (!is_list(obj)) {
702 break;
703 }
704 objp = list_val(obj);
705
706 if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
707 *tailp = obj;
708 goto L_copy;
709 }
710 }
711 switch (primary_tag(obj)) {
712 case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
713 case TAG_PRIMARY_BOXED: argp = tailp; goto L_copy_boxed;
714 default:
715 erts_exit(ERTS_ABORT_EXIT,
716 "%s, line %d: Internal error in copy_struct: 0x%08x\n",
717 __FILE__, __LINE__,obj);
718 }
719
720 case TAG_PRIMARY_BOXED:
721 if (ErtsInArea(boxed_val(obj),hstart,hsize)) {
722 hp++;
723 break;
724 }
725 argp = hp++;
726
727 L_copy_boxed:
728 objp = boxed_val(obj);
729 if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
730 *argp = obj;
731 break;
732 }
733 hdr = *objp;
734 switch (hdr & _TAG_HEADER_MASK) {
735 case ARITYVAL_SUBTAG:
736 {
737 int const_flag = 1; /* assume constant tuple */
738 i = arityval(hdr);
739 *argp = make_tuple(htop);
740 tp = htop; /* tp is pointer to new arity value */
741 *htop++ = *objp++; /* copy arity value */
742 while (i--) {
743 elem = *objp++;
744 if (!IS_CONST(elem)) {
745 const_flag = 0;
746 }
747 *htop++ = elem;
748 }
749 if (const_flag) {
750 const_tuple = tp; /* this is the latest const_tuple */
751 }
752 }
753 break;
754 case REFC_BINARY_SUBTAG:
755 {
756 ProcBin* pb;
757
758 pb = (ProcBin *) objp;
759 if (pb->flags) {
760 erts_emasculate_writable_binary(pb);
761 }
762 i = thing_arityval(*objp) + 1;
763 hbot -= i;
764 tp = hbot;
765 while (i--) {
766 *tp++ = *objp++;
767 }
768 *argp = make_binary(hbot);
769 pb = (ProcBin*) hbot;
770 erts_refc_inc(&pb->val->intern.refc, 2);
771 pb->next = off_heap->first;
772 pb->flags = 0;
773 off_heap->first = (struct erl_off_heap_header*) pb;
774 OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
775 }
776 break;
777 case SUB_BINARY_SUBTAG:
778 {
779 ErlSubBin* sb = (ErlSubBin *) objp;
780 Eterm real_bin = sb->orig;
781 Uint bit_offset = sb->bitoffs;
782 Uint bit_size = sb -> bitsize;
783 Uint offset = sb->offs;
784 size_t size = sb->size;
785 Uint extra_bytes;
786 Uint real_size;
787 if ((bit_size + bit_offset) > 8) {
788 extra_bytes = 2;
789 } else if ((bit_size + bit_offset) > 0) {
790 extra_bytes = 1;
791 } else {
792 extra_bytes = 0;
793 }
794 real_size = size+extra_bytes;
795 objp = binary_val(real_bin);
796 if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
797 ErlHeapBin* from = (ErlHeapBin *) objp;
798 ErlHeapBin* to;
799 i = heap_bin_size(real_size);
800 hbot -= i;
801 to = (ErlHeapBin *) hbot;
802 to->thing_word = header_heap_bin(real_size);
803 to->size = real_size;
804 sys_memcpy(to->data, ((byte *)from->data)+offset, real_size);
805 } else {
806 ProcBin* from = (ProcBin *) objp;
807 ProcBin* to;
808
809 ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
810 if (from->flags) {
811 erts_emasculate_writable_binary(from);
812 }
813 hbot -= PROC_BIN_SIZE;
814 to = (ProcBin *) hbot;
815 to->thing_word = HEADER_PROC_BIN;
816 to->size = real_size;
817 to->val = from->val;
818 erts_refc_inc(&to->val->intern.refc, 2);
819 to->bytes = from->bytes + offset;
820 to->next = off_heap->first;
821 to->flags = 0;
822 off_heap->first = (struct erl_off_heap_header*) to;
823 OH_OVERHEAD(off_heap, to->size / sizeof(Eterm));
824 }
825 *argp = make_binary(hbot);
826 if (extra_bytes != 0) {
827 ErlSubBin* res;
828 hbot -= ERL_SUB_BIN_SIZE;
829 res = (ErlSubBin *) hbot;
830 res->thing_word = HEADER_SUB_BIN;
831 res->size = size;
832 res->bitsize = bit_size;
833 res->bitoffs = bit_offset;
834 res->offs = 0;
835 res->is_writable = 0;
836 res->orig = *argp;
837 *argp = make_binary(hbot);
838 }
839 break;
840 }
841 break;
842 case FUN_SUBTAG:
843 {
844 ErlFunThing* funp = (ErlFunThing *) objp;
845
846 i = thing_arityval(hdr) + 2 + funp->num_free;
847 tp = htop;
848 while (i--) {
849 *htop++ = *objp++;
850 }
851 funp = (ErlFunThing *) tp;
852 funp->next = off_heap->first;
853 off_heap->first = (struct erl_off_heap_header*) funp;
854 erts_refc_inc(&funp->fe->refc, 2);
855 *argp = make_fun(tp);
856 }
857 break;
858 case EXTERNAL_PID_SUBTAG:
859 case EXTERNAL_PORT_SUBTAG:
860 case EXTERNAL_REF_SUBTAG:
861 {
862 ExternalThing *etp = (ExternalThing *) objp;
863 #if defined(ERTS_COPY_REGISTER_LOCATION) && defined(ERL_NODE_BOOKKEEP)
864 erts_ref_node_entry__(etp->node, 2, make_boxed(htop), file, line);
865 #else
866 erts_ref_node_entry(etp->node, 2, make_boxed(htop));
867 #endif
868 }
869 L_off_heap_node_container_common:
870 {
871 struct erl_off_heap_header *ohhp;
872 ohhp = (struct erl_off_heap_header *) htop;
873 i = thing_arityval(hdr) + 1;
874 *argp = make_boxed(htop);
875 tp = htop;
876
877 while (i--) {
878 *htop++ = *objp++;
879 }
880
881 ohhp->next = off_heap->first;
882 off_heap->first = ohhp;
883
884 }
885 break;
886 case MAP_SUBTAG:
887 tp = htop;
888 switch (MAP_HEADER_TYPE(hdr)) {
889 case MAP_HEADER_TAG_FLATMAP_HEAD :
890 i = flatmap_get_size(objp) + 3;
891 *argp = make_flatmap(htop);
892 while (i--) {
893 *htop++ = *objp++;
894 }
895 break;
896 case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
897 case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
898 *htop++ = *objp++;
899 case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
900 i = 1 + hashmap_bitcount(MAP_HEADER_VAL(hdr));
901 while (i--) { *htop++ = *objp++; }
902 *argp = make_hashmap(tp);
903 break;
904 default:
905 erts_exit(ERTS_ABORT_EXIT, "copy_struct: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
906 }
907 break;
908 case BIN_MATCHSTATE_SUBTAG:
909 erts_exit(ERTS_ABORT_EXIT,
910 "copy_struct: matchstate term not allowed");
911 case REF_SUBTAG:
912 if (is_magic_ref_thing(objp)) {
913 ErtsMRefThing *mreft = (ErtsMRefThing *) objp;
914 erts_refc_inc(&mreft->mb->intern.refc, 2);
915 goto L_off_heap_node_container_common;
916 }
917 /* Fall through... */
918 default:
919 i = thing_arityval(hdr)+1;
920 hbot -= i;
921 tp = hbot;
922 *argp = make_boxed(hbot);
923 while (i--) {
924 *tp++ = *objp++;
925 }
926 }
927 break;
928 case TAG_PRIMARY_HEADER:
929 if (header_is_thing(obj) || hp == const_tuple) {
930 hp += header_arity(obj) + 1;
931 } else {
932 hp++;
933 }
934 break;
935 }
936 }
937
938 if (bsz) {
939 *hpp = htop;
940 *bsz = hend - hbot;
941 } else {
942 #ifdef DEBUG
943 if (!eq(org_obj, res)) {
944 erts_exit(ERTS_ABORT_EXIT,
945 "Internal error in copy_struct() when copying %T:"
946 " not equal to copy %T\n",
947 org_obj, res);
948 }
949 if (htop != hbot)
950 erts_exit(ERTS_ABORT_EXIT,
951 "Internal error in copy_struct() when copying %T:"
952 " htop=%p != hbot=%p (sz=%beu)\n",
953 org_obj, htop, hbot, org_sz);
954 #else
955 if (htop > hbot) {
956 erts_exit(ERTS_ABORT_EXIT,
957 "Internal error in copy_struct(): htop, hbot overrun\n");
958 }
959 #endif
960 *hpp = (Eterm *) (hstart+hsize);
961 }
962 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] result is at %p\n", mypid, res));
963 return res;
964 }
965
966
967 /*
968 * Machinery for the table used by the sharing preserving copier
969 * Using an ESTACK but not very transparently; consider refactoring
970 */
971
972 #define DECLARE_SHTABLE(s) \
973 DECLARE_ESTACK(s); \
974 Uint ESTK_CONCAT(s,_offset) = 0
975 #define DESTROY_SHTABLE(s) DESTROY_ESTACK(s)
976 #define SHTABLE_INCR 4
977 #define SHTABLE_NEXT(s) ESTK_CONCAT(s,_offset)
978 #define SHTABLE_PUSH(s,x,y,b) \
979 do { \
980 if (s.sp > s.end - SHTABLE_INCR) { \
981 erl_grow_estack(&(s), SHTABLE_INCR); \
982 } \
983 *s.sp++ = (x); \
984 *s.sp++ = (y); \
985 *s.sp++ = (Eterm) NULL; \
986 *s.sp++ = (Eterm) (b); \
987 ESTK_CONCAT(s,_offset) += SHTABLE_INCR; \
988 } while(0)
989 #define SHTABLE_X(s,e) (s.start[e])
990 #define SHTABLE_Y(s,e) (s.start[(e)+1])
991 #define SHTABLE_FWD(s,e) ((Eterm *) (s.start[(e)+2]))
992 #define SHTABLE_FWD_UPD(s,e,p) (s.start[(e)+2] = (Eterm) (p))
993 #define SHTABLE_REV(s,e) ((Eterm *) (s.start[(e)+3]))
994
995 #define LIST_SHARED_UNPROCESSED ((Eterm) 0)
996 #define LIST_SHARED_PROCESSED ((Eterm) 1)
997
998 #define HEAP_ELEM_TO_BE_FILLED _unchecked_make_list(NULL)
999
1000
1001 /*
1002 * Specialized macros for using/reusing the persistent state
1003 */
1004
1005 #define DECLARE_EQUEUE_INIT_INFO(q, info) \
1006 UWord* EQUE_DEF_QUEUE(q) = info->queue_default; \
1007 ErtsEQueue q = { \
1008 EQUE_DEF_QUEUE(q), /* start */ \
1009 EQUE_DEF_QUEUE(q), /* front */ \
1010 EQUE_DEF_QUEUE(q), /* back */ \
1011 1, /* possibly_empty */ \
1012 EQUE_DEF_QUEUE(q) + DEF_EQUEUE_SIZE, /* end */ \
1013 ERTS_ALC_T_ESTACK /* alloc_type */ \
1014 }
1015
1016 #define DECLARE_EQUEUE_FROM_INFO(q, info) \
1017 /* no EQUE_DEF_QUEUE(q), read-only */ \
1018 ErtsEQueue q = { \
1019 info->queue_start, /* start */ \
1020 info->queue_start, /* front */ \
1021 info->queue_start, /* back */ \
1022 1, /* possibly_empty */ \
1023 info->queue_end, /* end */ \
1024 info->queue_alloc_type /* alloc_type */ \
1025 }
1026
1027 #define DECLARE_BITSTORE_INIT_INFO(s, info) \
1028 UWord* WSTK_DEF_STACK(s) = info->bitstore_default; \
1029 ErtsWStack s = { \
1030 WSTK_DEF_STACK(s), /* wstart */ \
1031 WSTK_DEF_STACK(s), /* wsp */ \
1032 WSTK_DEF_STACK(s) + DEF_WSTACK_SIZE, /* wend */ \
1033 WSTK_DEF_STACK(s), /* wdflt */ \
1034 ERTS_ALC_T_ESTACK /* alloc_type */ \
1035 }; \
1036 int WSTK_CONCAT(s,_bitoffs) = 0; \
1037 /* no WSTK_CONCAT(s,_offset), write-only */ \
1038 UWord WSTK_CONCAT(s,_buffer) = 0
1039
1040 #ifdef DEBUG
1041 # define DEBUG_COND(D,E) D
1042 #else
1043 # define DEBUG_COND(D,E) E
1044 #endif
1045
1046 #define DECLARE_BITSTORE_FROM_INFO(s, info) \
1047 /* no WSTK_DEF_STACK(s), read-only */ \
1048 ErtsWStack s = { \
1049 info->bitstore_start, /* wstart */ \
1050 DEBUG_COND(info->bitstore_stop, NULL), /* wsp, read-only */ \
1051 NULL, /* wend, read-only */ \
1052 NULL, /* wdef, read-only */ \
1053 info->bitstore_alloc_type /* alloc_type */ \
1054 }; \
1055 int WSTK_CONCAT(s,_bitoffs) = 0; \
1056 int WSTK_CONCAT(s,_offset) = 0; \
1057 UWord WSTK_CONCAT(s,_buffer) = 0
1058
1059 #define DECLARE_SHTABLE_INIT_INFO(s, info) \
1060 Eterm* ESTK_DEF_STACK(s) = info->shtable_default; \
1061 ErtsEStack s = { \
1062 ESTK_DEF_STACK(s), /* start */ \
1063 ESTK_DEF_STACK(s), /* sp */ \
1064 ESTK_DEF_STACK(s) + DEF_ESTACK_SIZE, /* end */ \
1065 ESTK_DEF_STACK(s), /* default */ \
1066 ERTS_ALC_T_ESTACK /* alloc_type */ \
1067 }; \
1068 Uint ESTK_CONCAT(s,_offset) = 0
1069
1070 #define DECLARE_SHTABLE_FROM_INFO(s, info) \
1071 /* no ESTK_DEF_STACK(s), read-only */ \
1072 ErtsEStack s = { \
1073 info->shtable_start, /* start */ \
1074 NULL, /* sp, read-only */ \
1075 NULL, /* end, read-only */ \
1076 NULL, /* def, read-only */ \
1077 info->shtable_alloc_type /* alloc_type */ \
1078 }; \
1079 /* no ESTK_CONCAT(s,_offset), read-only */
1080
1081 /*
1082 * Copy object "obj" preserving sharing.
1083 * First half: count size and calculate sharing.
1084 */
copy_shared_calculate(Eterm obj,erts_shcopy_t * info)1085 Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
1086 {
1087 Uint sum;
1088 Uint e;
1089 unsigned sz;
1090 Eterm* ptr;
1091 Eterm *lit_purge_ptr = info->lit_purge_ptr;
1092 Uint lit_purge_sz = info->lit_purge_sz;
1093 int copy_literals = info->copy_literals;
1094 #ifdef DEBUG
1095 Eterm mypid = erts_get_current_pid();
1096 #endif
1097
1098 DECLARE_EQUEUE_INIT_INFO(s, info);
1099 DECLARE_BITSTORE_INIT_INFO(b, info);
1100 DECLARE_SHTABLE_INIT_INFO(t, info);
1101
1102 /* step #0:
1103 -------------------------------------------------------
1104 get rid of the easy cases first:
1105 - copying constants
1106 - if not a proper process, do flat copy
1107 */
1108
1109 if (IS_CONST(obj))
1110 return 0;
1111
1112 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] copy_shared_calculate %p\n", mypid, obj));
1113 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] message is %T\n", mypid, obj));
1114
1115 /* step #1:
1116 -------------------------------------------------------
1117 traverse the term and calculate the size;
1118 when traversing, transform as you do in size_shared
1119 but when you find shared objects:
1120
1121 a. add entry in the table, indexed by i
1122 b. mark them:
1123 b1. boxed terms, set header to (i | 11)
1124 store (old header, NONV, NULL, backptr) in the entry
1125 b2. cons cells, set CDR to NONV, set CAR to i
1126 store (old CAR, old CDR, NULL, backptr) in the entry
1127 */
1128
1129 sum = 0;
1130
1131 for (;;) {
1132 switch (primary_tag(obj)) {
1133 case TAG_PRIMARY_LIST: {
1134 Eterm head, tail;
1135 ptr = list_val(obj);
1136 /* off heap list pointers are copied verbatim */
1137 if (erts_is_literal(obj,ptr)) {
1138 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
1139 if (copy_literals || in_literal_purge_area(ptr))
1140 info->literal_size += size_object(obj);
1141 goto pop_next;
1142 }
1143 head = CAR(ptr);
1144 tail = CDR(ptr);
1145 /* if it's visited, don't count it;
1146 if not already shared, make it shared and store it in the table */
1147 if (primary_tag(tail) == TAG_PRIMARY_HEADER ||
1148 primary_tag(head) == TAG_PRIMARY_HEADER) {
1149 if (tail != THE_NON_VALUE) {
1150 e = SHTABLE_NEXT(t);
1151 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] tabling L %p\n", mypid, ptr));
1152 SHTABLE_PUSH(t, head, tail, ptr);
1153 CAR(ptr) = (e << _TAG_PRIMARY_SIZE) | LIST_SHARED_UNPROCESSED;
1154 CDR(ptr) = THE_NON_VALUE;
1155 }
1156 goto pop_next;
1157 }
1158 /* else make it visited now */
1159 switch (primary_tag(tail)) {
1160 case TAG_PRIMARY_LIST:
1161 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] mangling L/L %p\n", mypid, ptr));
1162 CDR(ptr) = (tail - TAG_PRIMARY_LIST) | TAG_PRIMARY_HEADER;
1163 break;
1164 case TAG_PRIMARY_IMMED1:
1165 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] mangling L/I %p\n", mypid, ptr));
1166 CAR(ptr) = (head - primary_tag(head)) | TAG_PRIMARY_HEADER;
1167 CDR(ptr) = (tail - TAG_PRIMARY_IMMED1) | primary_tag(head);
1168 break;
1169 case TAG_PRIMARY_BOXED:
1170 BITSTORE_PUT(b, primary_tag(head));
1171 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] mangling L/B %p\n", mypid, ptr));
1172 CAR(ptr) = (head - primary_tag(head)) | TAG_PRIMARY_HEADER;
1173 CDR(ptr) = (tail - TAG_PRIMARY_BOXED) | TAG_PRIMARY_HEADER;
1174 break;
1175 }
1176 /* and count it */
1177 sum += 2;
1178 if (!IS_CONST(head)) {
1179 EQUEUE_PUT(s, head);
1180 }
1181 obj = tail;
1182 break;
1183 }
1184 case TAG_PRIMARY_BOXED: {
1185 Eterm hdr;
1186 ptr = boxed_val(obj);
1187 /* off heap pointers to boxes are copied verbatim */
1188 if (erts_is_literal(obj,ptr)) {
1189 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
1190 if (copy_literals || in_literal_purge_area(ptr))
1191 info->literal_size += size_object(obj);
1192 goto pop_next;
1193 }
1194 hdr = *ptr;
1195 /* if it's visited, don't count it;
1196 if not already shared, make it shared and store it in the table */
1197 if (primary_tag(hdr) != TAG_PRIMARY_HEADER) {
1198 if (primary_tag(hdr) == BOXED_VISITED) {
1199 e = SHTABLE_NEXT(t);
1200 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] tabling B %p\n", mypid, ptr));
1201 SHTABLE_PUSH(t, hdr, THE_NON_VALUE, ptr);
1202 *ptr = (e << _TAG_PRIMARY_SIZE) | BOXED_SHARED_UNPROCESSED;
1203 }
1204 goto pop_next;
1205 }
1206 /* else make it visited now */
1207 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] mangling B %p\n", mypid, ptr));
1208 *ptr = (hdr - primary_tag(hdr)) + BOXED_VISITED;
1209 /* and count it */
1210 ASSERT(is_header(hdr));
1211 switch (hdr & _TAG_HEADER_MASK) {
1212 case ARITYVAL_SUBTAG: {
1213 int arity = header_arity(hdr);
1214 sum += arity + 1;
1215 if (arity == 0) { /* Empty tuple -- unusual. */
1216 goto pop_next;
1217 }
1218 while (arity-- > 0) {
1219 obj = *++ptr;
1220 if (!IS_CONST(obj)) {
1221 EQUEUE_PUT(s, obj);
1222 }
1223 }
1224 goto pop_next;
1225 }
1226 case FUN_SUBTAG: {
1227 ErlFunThing* funp = (ErlFunThing *) ptr;
1228 unsigned eterms = 1 /* creator */ + funp->num_free;
1229 sz = thing_arityval(hdr);
1230 sum += 1 /* header */ + sz + eterms;
1231 ptr += 1 /* header */ + sz;
1232 while (eterms-- > 0) {
1233 obj = *ptr++;
1234 if (!IS_CONST(obj)) {
1235 EQUEUE_PUT(s, obj);
1236 }
1237 }
1238 goto pop_next;
1239 }
1240 case SUB_BINARY_SUBTAG: {
1241 ErlSubBin* sb = (ErlSubBin *) ptr;
1242 Eterm real_bin = sb->orig;
1243 Uint bit_offset = sb->bitoffs;
1244 Uint bit_size = sb->bitsize;
1245 size_t size = sb->size;
1246 Uint extra_bytes;
1247 Eterm hdr;
1248 if (bit_size + bit_offset > 8) {
1249 sum += ERL_SUB_BIN_SIZE;
1250 extra_bytes = 2;
1251 } else if (bit_size + bit_offset > 0) {
1252 sum += ERL_SUB_BIN_SIZE;
1253 extra_bytes = 1;
1254 } else {
1255 extra_bytes = 0;
1256 }
1257 ASSERT(is_boxed(real_bin));
1258 hdr = *_unchecked_binary_val(real_bin);
1259 switch (primary_tag(hdr)) {
1260 case TAG_PRIMARY_HEADER:
1261 /* real_bin is untouched, only referred by sub-bins so far */
1262 break;
1263 case BOXED_VISITED:
1264 /* real_bin referred directly once so far */
1265 hdr = (hdr - BOXED_VISITED) + TAG_PRIMARY_HEADER;
1266 break;
1267 case BOXED_SHARED_PROCESSED:
1268 case BOXED_SHARED_UNPROCESSED:
1269 /* real_bin referred directly more than once */
1270 e = hdr >> _TAG_PRIMARY_SIZE;
1271 hdr = SHTABLE_X(t, e);
1272 hdr = (hdr & ~BOXED_VISITED_MASK) + TAG_PRIMARY_HEADER;
1273 break;
1274 }
1275
1276 if (thing_subtag(hdr) == HEAP_BINARY_SUBTAG) {
1277 sum += heap_bin_size(size+extra_bytes);
1278 } else {
1279 ASSERT(thing_subtag(hdr) == REFC_BINARY_SUBTAG);
1280 sum += PROC_BIN_SIZE;
1281 }
1282 goto pop_next;
1283 }
1284 case MAP_SUBTAG:
1285 switch (MAP_HEADER_TYPE(hdr)) {
1286 case MAP_HEADER_TAG_FLATMAP_HEAD : {
1287 flatmap_t *mp = (flatmap_t *) ptr;
1288 Uint n = flatmap_get_size(mp) + 1;
1289 sum += n + 2;
1290 ptr += 2; /* hdr + size words */
1291 while (n--) {
1292 obj = *ptr++;
1293 if (!IS_CONST(obj)) {
1294 EQUEUE_PUT(s, obj);
1295 }
1296 }
1297 goto pop_next;
1298 }
1299 case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
1300 case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
1301 case MAP_HEADER_TAG_HAMT_NODE_BITMAP : {
1302 Uint n = hashmap_bitcount(MAP_HEADER_VAL(hdr));
1303 sum += 1 + n + header_arity(hdr);
1304 ptr += 1 + header_arity(hdr);
1305
1306 if (n == 0) {
1307 goto pop_next;
1308 }
1309 while(n--) {
1310 obj = *ptr++;
1311 if (!IS_CONST(obj)) {
1312 EQUEUE_PUT(s, obj);
1313 }
1314 }
1315 goto pop_next;
1316 }
1317 default:
1318 erts_exit(ERTS_ABORT_EXIT, "copy_shared_calculate: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
1319 }
1320 case BIN_MATCHSTATE_SUBTAG:
1321 erts_exit(ERTS_ABORT_EXIT,
1322 "size_shared: matchstate term not allowed");
1323 default:
1324 sum += thing_arityval(hdr) + 1;
1325 goto pop_next;
1326 }
1327 break;
1328 }
1329 case TAG_PRIMARY_IMMED1:
1330 pop_next:
1331 if (EQUEUE_ISEMPTY(s)) {
1332 /* add sentinel to the table */
1333 SHTABLE_PUSH(t, THE_NON_VALUE, THE_NON_VALUE, NULL);
1334 /* store persistent info */
1335 BITSTORE_CLOSE(b);
1336 info->queue_start = s.start;
1337 info->queue_end = s.end;
1338 info->queue_alloc_type = s.alloc_type;
1339 info->bitstore_start = b.wstart;
1340 #ifdef DEBUG
1341 info->bitstore_stop = b.wsp;
1342 #endif
1343 info->bitstore_alloc_type = b.alloc_type;
1344 info->shtable_start = t.start;
1345 info->shtable_alloc_type = t.alloc_type;
1346 /* single point of return: the size of the object */
1347 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size was: %u\n", mypid, sum));
1348 return sum + info->literal_size;
1349 }
1350 obj = EQUEUE_GET(s);
1351 break;
1352 default:
1353 erts_exit(ERTS_ABORT_EXIT, "[pid=%T] size_shared: bad tag for %#x\n", obj);
1354 }
1355 }
1356 }
1357
1358 /*
1359 * Copy object "obj" preserving sharing.
1360 * Second half: copy and restore the object.
1361 */
copy_shared_perform_x(Eterm obj,Uint size,erts_shcopy_t * info,Eterm ** hpp,ErlOffHeap * off_heap,char * file,int line)1362 Uint copy_shared_perform_x(Eterm obj, Uint size, erts_shcopy_t *info,
1363 Eterm** hpp, ErlOffHeap* off_heap
1364 #ifdef ERTS_COPY_REGISTER_LOCATION
1365 , char *file, int line
1366 #endif
1367 )
1368 {
1369 Uint e;
1370 unsigned sz;
1371 Eterm* ptr;
1372 Eterm* hp;
1373 Eterm* hscan;
1374 Eterm result;
1375 Eterm* resp;
1376 Eterm *hbot, *hend;
1377 unsigned remaining;
1378 Eterm *lit_purge_ptr = info->lit_purge_ptr;
1379 Uint lit_purge_sz = info->lit_purge_sz;
1380 int copy_literals = info->copy_literals;
1381 #ifdef DEBUG
1382 Eterm mypid = erts_get_current_pid();
1383 Eterm saved_obj = obj;
1384 #endif
1385
1386 DECLARE_EQUEUE_FROM_INFO(s, info);
1387 DECLARE_BITSTORE_FROM_INFO(b, info);
1388 DECLARE_SHTABLE_FROM_INFO(t, info);
1389
1390 /* step #0:
1391 -------------------------------------------------------
1392 get rid of the easy cases first:
1393 - copying constants
1394 - if not a proper process, do flat copy
1395 */
1396
1397 if (IS_CONST(obj))
1398 return obj;
1399
1400 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] copy_shared_perform %p\n", mypid, obj));
1401
1402 /* step #2: was performed before this function was called
1403 -------------------------------------------------------
1404 allocate new space
1405 */
1406
1407 hscan = hp = *hpp;
1408 hbot = hend = hp + size;
1409
1410 /* step #3:
1411 -------------------------------------------------------
1412 traverse the term a second time and when traversing:
1413 a. if the object is marked as shared
1414 a1. if the entry contains a forwarding ptr, use that
1415 a2. otherwise, copy it to the new space and store the
1416 forwarding ptr to the entry
1417 b. otherwise, reverse-transform as you do in size_shared
1418 and copy to the new space
1419 */
1420
1421 resp = &result;
1422 remaining = 0;
1423 for (;;) {
1424 switch (primary_tag(obj)) {
1425 case TAG_PRIMARY_LIST: {
1426 Eterm head, tail;
1427 ptr = list_val(obj);
1428 /* off heap list pointers are copied verbatim */
1429 if (erts_is_literal(obj,ptr)) {
1430 if (!(copy_literals || in_literal_purge_area(ptr))) {
1431 *resp = obj;
1432 } else {
1433 Uint bsz = 0;
1434 *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL
1435 #ifdef ERTS_COPY_REGISTER_LOCATION
1436 , file, line
1437 #endif
1438 ); /* copy literal */
1439 hbot -= bsz;
1440 }
1441 goto cleanup_next;
1442 }
1443 head = CAR(ptr);
1444 tail = CDR(ptr);
1445 /* if it is shared */
1446 if (tail == THE_NON_VALUE) {
1447 e = head >> _TAG_PRIMARY_SIZE;
1448 /* if it has been processed, just use the forwarding pointer */
1449 if (primary_tag(head) == LIST_SHARED_PROCESSED) {
1450 *resp = make_list(SHTABLE_FWD(t, e));
1451 goto cleanup_next;
1452 }
1453 /* else, let's process it now,
1454 copy it and keep the forwarding pointer */
1455 else {
1456 CAR(ptr) = (head - primary_tag(head)) + LIST_SHARED_PROCESSED;
1457 head = SHTABLE_X(t, e);
1458 tail = SHTABLE_Y(t, e);
1459 ptr = &(SHTABLE_X(t, e));
1460 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] tabled L %p is %p\n", mypid, ptr, SHTABLE_REV(t, e)));
1461 SHTABLE_FWD_UPD(t, e, hp);
1462 }
1463 }
1464 /* if not already clean, clean it up and copy it */
1465 if (primary_tag(tail) == TAG_PRIMARY_HEADER) {
1466 if (primary_tag(head) == TAG_PRIMARY_HEADER) {
1467 Eterm saved;
1468 BITSTORE_FETCH(b, saved);
1469 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling L/B %p\n", mypid, ptr));
1470 CAR(ptr) = head = (head - TAG_PRIMARY_HEADER) + saved;
1471 CDR(ptr) = tail = (tail - TAG_PRIMARY_HEADER) + TAG_PRIMARY_BOXED;
1472 } else {
1473 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling L/L %p\n", mypid, ptr));
1474 CDR(ptr) = tail = (tail - TAG_PRIMARY_HEADER) + TAG_PRIMARY_LIST;
1475 }
1476 } else if (primary_tag(head) == TAG_PRIMARY_HEADER) {
1477 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling L/I %p\n", mypid, ptr));
1478 CAR(ptr) = head = (head - TAG_PRIMARY_HEADER) | primary_tag(tail);
1479 CDR(ptr) = tail = (tail - primary_tag(tail)) | TAG_PRIMARY_IMMED1;
1480 } else {
1481 ASSERT(0 && "cannot come here");
1482 goto cleanup_next;
1483 }
1484 /* and its children too */
1485 if (IS_CONST(head)) {
1486 CAR(hp) = head;
1487 } else {
1488 EQUEUE_PUT_UNCHECKED(s, head);
1489 CAR(hp) = HEAP_ELEM_TO_BE_FILLED;
1490 }
1491 *resp = make_list(hp);
1492 resp = &(CDR(hp));
1493 hp += 2;
1494 obj = tail;
1495 break;
1496 }
1497 case TAG_PRIMARY_BOXED: {
1498 Eterm hdr;
1499 ptr = boxed_val(obj);
1500 /* off heap pointers to boxes are copied verbatim */
1501 if (erts_is_literal(obj,ptr)) {
1502 if (!(copy_literals || in_literal_purge_area(ptr))) {
1503 *resp = obj;
1504 } else {
1505 Uint bsz = 0;
1506 *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL
1507 #ifdef ERTS_COPY_REGISTER_LOCATION
1508 , file, line
1509 #endif
1510 ); /* copy literal */
1511 hbot -= bsz;
1512 }
1513 goto cleanup_next;
1514 }
1515 hdr = *ptr;
1516 /* clean it up, unless it's already clean or shared and processed */
1517 switch (primary_tag(hdr)) {
1518 case TAG_PRIMARY_HEADER:
1519 ASSERT(0 && "cannot come here");
1520 /* if it is shared and has been processed,
1521 just use the forwarding pointer */
1522 case BOXED_SHARED_PROCESSED:
1523 e = hdr >> _TAG_PRIMARY_SIZE;
1524 *resp = make_boxed(SHTABLE_FWD(t, e));
1525 goto cleanup_next;
1526 /* if it is shared but has not been processed yet, let's process
1527 it now: copy it and keep the forwarding pointer */
1528 case BOXED_SHARED_UNPROCESSED:
1529 e = hdr >> _TAG_PRIMARY_SIZE;
1530 *ptr = (hdr - primary_tag(hdr)) + BOXED_SHARED_PROCESSED;
1531 hdr = SHTABLE_X(t, e);
1532 ASSERT(primary_tag(hdr) == BOXED_VISITED);
1533 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] tabled B %p is %p\n", mypid, ptr, SHTABLE_REV(t, e)));
1534 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling B %p\n", mypid, ptr));
1535 SHTABLE_X(t, e) = hdr = (hdr - BOXED_VISITED) + TAG_PRIMARY_HEADER;
1536 SHTABLE_FWD_UPD(t, e, hp);
1537 break;
1538 case BOXED_VISITED:
1539 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] unmangling B %p\n", mypid, ptr));
1540 *ptr = hdr = (hdr - BOXED_VISITED) + TAG_PRIMARY_HEADER;
1541 break;
1542 }
1543 /* and its children too */
1544 switch (hdr & _TAG_HEADER_MASK) {
1545 case ARITYVAL_SUBTAG: {
1546 int arity = header_arity(hdr);
1547 *resp = make_boxed(hp);
1548 *hp++ = hdr;
1549 while (arity-- > 0) {
1550 obj = *++ptr;
1551 if (IS_CONST(obj)) {
1552 *hp++ = obj;
1553 } else {
1554 EQUEUE_PUT_UNCHECKED(s, obj);
1555 *hp++ = HEAP_ELEM_TO_BE_FILLED;
1556 }
1557 }
1558 goto cleanup_next;
1559 }
1560 case FUN_SUBTAG: {
1561 ErlFunThing* funp = (ErlFunThing *) ptr;
1562 unsigned eterms = 1 /* creator */ + funp->num_free;
1563 sz = thing_arityval(hdr);
1564 funp = (ErlFunThing *) hp;
1565 *resp = make_fun(hp);
1566 *hp++ = hdr;
1567 ptr++;
1568 while (sz-- > 0) {
1569 *hp++ = *ptr++;
1570 }
1571 while (eterms-- > 0) {
1572 obj = *ptr++;
1573 if (IS_CONST(obj)) {
1574 *hp++ = obj;
1575 } else {
1576 EQUEUE_PUT_UNCHECKED(s, obj);
1577 *hp++ = HEAP_ELEM_TO_BE_FILLED;
1578 }
1579 }
1580 funp->next = off_heap->first;
1581 off_heap->first = (struct erl_off_heap_header*) funp;
1582 erts_refc_inc(&funp->fe->refc, 2);
1583 goto cleanup_next;
1584 }
1585 case MAP_SUBTAG:
1586 *resp = make_flatmap(hp);
1587 *hp++ = hdr;
1588 switch (MAP_HEADER_TYPE(hdr)) {
1589 case MAP_HEADER_TAG_FLATMAP_HEAD : {
1590 flatmap_t *mp = (flatmap_t *) ptr;
1591 Uint n = flatmap_get_size(mp) + 1;
1592 *hp++ = *++ptr; /* keys */
1593 while (n--) {
1594 obj = *++ptr;
1595 if (IS_CONST(obj)) {
1596 *hp++ = obj;
1597 } else {
1598 EQUEUE_PUT_UNCHECKED(s, obj);
1599 *hp++ = HEAP_ELEM_TO_BE_FILLED;
1600 }
1601 }
1602 goto cleanup_next;
1603 }
1604 case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
1605 case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
1606 *hp++ = *++ptr; /* total map size */
1607 case MAP_HEADER_TAG_HAMT_NODE_BITMAP : {
1608 Uint n = hashmap_bitcount(MAP_HEADER_VAL(hdr));
1609 while (n--) {
1610 obj = *++ptr;
1611 if (IS_CONST(obj)) {
1612 *hp++ = obj;
1613 } else {
1614 EQUEUE_PUT_UNCHECKED(s, obj);
1615 *hp++ = HEAP_ELEM_TO_BE_FILLED;
1616 }
1617 }
1618 goto cleanup_next;
1619 }
1620 default:
1621 erts_exit(ERTS_ABORT_EXIT, "copy_shared_perform: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
1622 }
1623 case REFC_BINARY_SUBTAG: {
1624 ProcBin* pb = (ProcBin *) ptr;
1625 sz = thing_arityval(hdr);
1626 if (pb->flags) {
1627 erts_emasculate_writable_binary(pb);
1628 }
1629 pb = (ProcBin *) hp;
1630 *resp = make_binary(hp);
1631 *hp++ = hdr;
1632 ptr++;
1633 while (sz-- > 0) {
1634 *hp++ = *ptr++;
1635 }
1636 erts_refc_inc(&pb->val->intern.refc, 2);
1637 pb->next = off_heap->first;
1638 pb->flags = 0;
1639 off_heap->first = (struct erl_off_heap_header*) pb;
1640 OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
1641 goto cleanup_next;
1642 }
1643 case SUB_BINARY_SUBTAG: {
1644 ErlSubBin* sb = (ErlSubBin *) ptr;
1645 Eterm real_bin = sb->orig;
1646 Uint bit_offset = sb->bitoffs;
1647 Uint bit_size = sb->bitsize;
1648 Uint offset = sb->offs;
1649 size_t size = sb->size;
1650 Uint extra_bytes;
1651 Uint real_size;
1652 if ((bit_size + bit_offset) > 8) {
1653 extra_bytes = 2;
1654 } else if ((bit_size + bit_offset) > 0) {
1655 extra_bytes = 1;
1656 } else {
1657 extra_bytes = 0;
1658 }
1659 real_size = size+extra_bytes;
1660 *resp = make_binary(hp);
1661 if (extra_bytes != 0) {
1662 ErlSubBin* res = (ErlSubBin *) hp;
1663 hp += ERL_SUB_BIN_SIZE;
1664 res->thing_word = HEADER_SUB_BIN;
1665 res->size = size;
1666 res->bitsize = bit_size;
1667 res->bitoffs = bit_offset;
1668 res->offs = 0;
1669 res->is_writable = 0;
1670 res->orig = make_binary(hp);
1671 }
1672 ASSERT(is_boxed(real_bin));
1673 ptr = _unchecked_binary_val(real_bin);
1674 hdr = *ptr;
1675 switch (primary_tag(hdr)) {
1676 case TAG_PRIMARY_HEADER:
1677 /* real_bin is untouched, ie only referred by sub-bins */
1678 break;
1679 case BOXED_VISITED:
1680 /* real_bin referred directly once */
1681 hdr = (hdr - BOXED_VISITED) + TAG_PRIMARY_HEADER;
1682 break;
1683 case BOXED_SHARED_PROCESSED:
1684 case BOXED_SHARED_UNPROCESSED:
1685 /* real_bin referred directly more than once */
1686 e = hdr >> _TAG_PRIMARY_SIZE;
1687 hdr = SHTABLE_X(t, e);
1688 hdr = (hdr & ~BOXED_VISITED_MASK) + TAG_PRIMARY_HEADER;
1689 break;
1690 }
1691 if (thing_subtag(hdr) == HEAP_BINARY_SUBTAG) {
1692 ErlHeapBin* from = (ErlHeapBin *) ptr;
1693 ErlHeapBin* to = (ErlHeapBin *) hp;
1694 hp += heap_bin_size(real_size);
1695 to->thing_word = header_heap_bin(real_size);
1696 to->size = real_size;
1697 sys_memcpy(to->data, ((byte *)from->data)+offset, real_size);
1698 } else {
1699 ProcBin* from = (ProcBin *) ptr;
1700 ProcBin* to = (ProcBin *) hp;
1701 ASSERT(thing_subtag(hdr) == REFC_BINARY_SUBTAG);
1702 if (from->flags) {
1703 erts_emasculate_writable_binary(from);
1704 }
1705 hp += PROC_BIN_SIZE;
1706 to->thing_word = HEADER_PROC_BIN;
1707 to->size = real_size;
1708 to->val = from->val;
1709 erts_refc_inc(&to->val->intern.refc, 2);
1710 to->bytes = from->bytes + offset;
1711 to->next = off_heap->first;
1712 to->flags = 0;
1713 off_heap->first = (struct erl_off_heap_header*) to;
1714 OH_OVERHEAD(off_heap, to->size / sizeof(Eterm));
1715 }
1716 goto cleanup_next;
1717 }
1718 case EXTERNAL_PID_SUBTAG:
1719 case EXTERNAL_PORT_SUBTAG:
1720 case EXTERNAL_REF_SUBTAG:
1721 {
1722 ExternalThing *etp = (ExternalThing *) ptr;
1723
1724 #if defined(ERTS_COPY_REGISTER_LOCATION) && defined(ERL_NODE_BOOKKEEP)
1725 erts_ref_node_entry__(etp->node, 2, make_boxed(hp), file, line);
1726 #else
1727 erts_ref_node_entry(etp->node, 2, make_boxed(hp));
1728 #endif
1729 }
1730 off_heap_node_container_common:
1731 {
1732 struct erl_off_heap_header *ohhp;
1733 ohhp = (struct erl_off_heap_header *) hp;
1734 sz = thing_arityval(hdr);
1735 *resp = make_boxed(hp);
1736 *hp++ = hdr;
1737 ptr++;
1738 while (sz-- > 0) {
1739 *hp++ = *ptr++;
1740 }
1741 ohhp->next = off_heap->first;
1742 off_heap->first = ohhp;
1743 goto cleanup_next;
1744 }
1745 case REF_SUBTAG:
1746 if (is_magic_ref_thing_with_hdr(ptr,hdr)) {
1747 ErtsMRefThing *mreft = (ErtsMRefThing *) ptr;
1748 erts_refc_inc(&mreft->mb->intern.refc, 2);
1749 goto off_heap_node_container_common;
1750 }
1751 /* Fall through... */
1752 default:
1753 sz = thing_arityval(hdr);
1754 *resp = make_boxed(hp);
1755 *hp++ = hdr;
1756 ptr++;
1757 while (sz-- > 0) {
1758 *hp++ = *ptr++;
1759 }
1760 goto cleanup_next;
1761 }
1762 break;
1763 }
1764 case TAG_PRIMARY_IMMED1:
1765 *resp = obj;
1766 cleanup_next:
1767 if (EQUEUE_ISEMPTY(s)) {
1768 goto all_clean;
1769 }
1770 obj = EQUEUE_GET(s);
1771 for (;;) {
1772 ASSERT(hscan < hp);
1773 if (remaining == 0) {
1774 if (*hscan == HEAP_ELEM_TO_BE_FILLED) {
1775 resp = hscan;
1776 hscan += 2;
1777 break; /* scanning loop */
1778 } else if (primary_tag(*hscan) == TAG_PRIMARY_HEADER) {
1779 switch (*hscan & _TAG_HEADER_MASK) {
1780 case ARITYVAL_SUBTAG:
1781 remaining = header_arity(*hscan);
1782 hscan++;
1783 break;
1784 case FUN_SUBTAG: {
1785 ErlFunThing* funp = (ErlFunThing *) hscan;
1786 hscan += 1 + thing_arityval(*hscan);
1787 remaining = 1 + funp->num_free;
1788 break;
1789 }
1790 case MAP_SUBTAG:
1791 switch (MAP_HEADER_TYPE(*hscan)) {
1792 case MAP_HEADER_TAG_FLATMAP_HEAD : {
1793 flatmap_t *mp = (flatmap_t *) hscan;
1794 remaining = flatmap_get_size(mp) + 1;
1795 hscan += 2;
1796 break;
1797 }
1798 case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
1799 case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
1800 case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
1801 remaining = hashmap_bitcount(MAP_HEADER_VAL(*hscan));
1802 hscan += MAP_HEADER_ARITY(*hscan) + 1;
1803 break;
1804 default:
1805 erts_exit(ERTS_ABORT_EXIT,
1806 "copy_shared_perform: bad hashmap type %d\n",
1807 MAP_HEADER_TYPE(*hscan));
1808 }
1809 break;
1810 case SUB_BINARY_SUBTAG:
1811 ASSERT(((ErlSubBin *) hscan)->bitoffs +
1812 ((ErlSubBin *) hscan)->bitsize > 0);
1813 hscan += ERL_SUB_BIN_SIZE;
1814 break;
1815 default:
1816 hscan += 1 + thing_arityval(*hscan);
1817 break;
1818 }
1819 } else {
1820 hscan++;
1821 }
1822 } else if (*hscan == HEAP_ELEM_TO_BE_FILLED) {
1823 resp = hscan++;
1824 remaining--;
1825 break; /* scanning loop */
1826 } else {
1827 hscan++;
1828 remaining--;
1829 }
1830 }
1831 ASSERT(resp < hp);
1832 break;
1833 default:
1834 erts_exit(ERTS_ABORT_EXIT, "size_shared: bad tag for %#x\n", obj);
1835 }
1836 }
1837
1838 /* step #4:
1839 -------------------------------------------------------
1840 traverse the table and reverse-transform all stored entries
1841 */
1842
1843 all_clean:
1844 for (e = 0; ; e += SHTABLE_INCR) {
1845 ptr = SHTABLE_REV(t, e);
1846 if (ptr == NULL)
1847 break;
1848 VERBOSE(DEBUG_SHCOPY, ("[copy] restoring shared: %x\n", ptr));
1849 /* entry was a list */
1850 if (SHTABLE_Y(t, e) != THE_NON_VALUE) {
1851 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] untabling L %p\n", mypid, ptr));
1852 CAR(ptr) = SHTABLE_X(t, e);
1853 CDR(ptr) = SHTABLE_Y(t, e);
1854 }
1855 /* entry was boxed */
1856 else {
1857 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] untabling B %p\n", mypid, ptr));
1858 *ptr = SHTABLE_X(t, e);
1859 ASSERT(primary_tag(*ptr) == TAG_PRIMARY_HEADER);
1860 }
1861 }
1862
1863 #ifdef DEBUG
1864 if (eq(saved_obj, result) == 0) {
1865 erts_fprintf(stderr, "original = %T\n", saved_obj);
1866 erts_fprintf(stderr, "copy = %T\n", result);
1867 erts_exit(ERTS_ABORT_EXIT, "copy (shared) not equal to source\n");
1868 }
1869 #endif
1870
1871 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] original was %T\n", mypid, saved_obj));
1872 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] copy is %T\n", mypid, result));
1873 VERBOSE(DEBUG_SHCOPY, ("[pid=%T] result is at %p\n", mypid, result));
1874
1875 ASSERT(hbot == hp);
1876 ASSERT(size == ((hp - *hpp) + (hend - hbot)));
1877 *hpp = hend;
1878 return result;
1879 }
1880
1881
1882 /*
1883 * Copy a term that is guaranteed to be contained in a single
1884 * heap block. The heap block is copied word by word, and any
1885 * pointers are offsetted to point correctly in the new location.
1886 *
1887 * Typically used to copy a term from an ets table.
1888 *
1889 * NOTE: Assumes that term is a tuple (ptr is an untagged tuple ptr).
1890 */
1891 Eterm
copy_shallow_x(Eterm * ERTS_RESTRICT ptr,Uint sz,Eterm ** hpp,ErlOffHeap * off_heap,char * file,int line)1892 copy_shallow_x(Eterm* ERTS_RESTRICT ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap
1893 #ifdef ERTS_COPY_REGISTER_LOCATION
1894 , char *file, int line
1895 #endif
1896 )
1897 {
1898 Eterm* tp = ptr;
1899 Eterm* hp = *hpp;
1900 const Eterm res = make_tuple(hp);
1901 const Sint offs = (hp - tp) * sizeof(Eterm);
1902
1903 while (sz--) {
1904 Eterm val = *tp++;
1905
1906 switch (primary_tag(val)) {
1907 case TAG_PRIMARY_IMMED1:
1908 *hp++ = val;
1909 break;
1910 case TAG_PRIMARY_LIST:
1911 case TAG_PRIMARY_BOXED:
1912 *hp++ = byte_offset_ptr(val, offs);
1913 break;
1914 case TAG_PRIMARY_HEADER:
1915 *hp++ = val;
1916 switch (val & _HEADER_SUBTAG_MASK) {
1917 case ARITYVAL_SUBTAG:
1918 break;
1919 case REFC_BINARY_SUBTAG:
1920 {
1921 ProcBin* pb = (ProcBin *) (tp-1);
1922 erts_refc_inc(&pb->val->intern.refc, 2);
1923 OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
1924 }
1925 goto off_heap_common;
1926
1927 case FUN_SUBTAG:
1928 {
1929 ErlFunThing* funp = (ErlFunThing *) (tp-1);
1930 erts_refc_inc(&funp->fe->refc, 2);
1931 }
1932 goto off_heap_common;
1933 case EXTERNAL_PID_SUBTAG:
1934 case EXTERNAL_PORT_SUBTAG:
1935 case EXTERNAL_REF_SUBTAG:
1936 {
1937 ExternalThing* etp = (ExternalThing *) (tp-1);
1938 #if defined(ERTS_COPY_REGISTER_LOCATION) && defined(ERL_NODE_BOOKKEEP)
1939 erts_ref_node_entry__(etp->node, 2, make_boxed(hp-1), file, line);
1940 #else
1941 erts_ref_node_entry(etp->node, 2, make_boxed(hp-1));
1942 #endif
1943 }
1944 off_heap_common:
1945 {
1946 struct erl_off_heap_header* ohh = (struct erl_off_heap_header*)(hp-1);
1947 int tari = thing_arityval(val);
1948
1949 sz -= tari;
1950 while (tari--) {
1951 *hp++ = *tp++;
1952 }
1953 ohh->next = off_heap->first;
1954 off_heap->first = ohh;
1955 }
1956 break;
1957 case REF_SUBTAG: {
1958 ErtsRefThing *rtp = (ErtsRefThing *) (tp - 1);
1959 if (is_magic_ref_thing(rtp)) {
1960 ErtsMRefThing *mreft = (ErtsMRefThing *) rtp;
1961 erts_refc_inc(&mreft->mb->intern.refc, 2);
1962 goto off_heap_common;
1963 }
1964 /* Fall through... */
1965 }
1966 default:
1967 {
1968 int tari = header_arity(val);
1969
1970 sz -= tari;
1971 while (tari--) {
1972 *hp++ = *tp++;
1973 }
1974 }
1975 break;
1976 }
1977 break;
1978 }
1979 }
1980 *hpp = hp;
1981 return res;
1982 }
1983
1984 /* Move all terms in heap fragments into heap. The terms must be guaranteed to
1985 * be contained within the fragments. The source terms are destructed with
1986 * move markers.
1987 * Typically used to copy a multi-fragmented message (from NIF).
1988 */
erts_move_multi_frags(Eterm ** hpp,ErlOffHeap * off_heap,ErlHeapFragment * first,Eterm * refs,unsigned nrefs,int literals)1989 void erts_move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
1990 Eterm* refs, unsigned nrefs, int literals)
1991 {
1992 ErlHeapFragment* bp;
1993 Eterm* hp_start = *hpp;
1994 Eterm* hp_end;
1995 Eterm* hp;
1996 unsigned i;
1997 Eterm literal_tag;
1998
1999 #ifdef TAG_LITERAL_PTR
2000 literal_tag = (Eterm) literals ? TAG_LITERAL_PTR : 0;
2001 #else
2002 literal_tag = (Eterm) 0;
2003 #endif
2004
2005 for (bp=first; bp!=NULL; bp=bp->next) {
2006 move_one_frag(hpp, bp, off_heap, literals);
2007 }
2008 hp_end = *hpp;
2009 for (hp=hp_start; hp<hp_end; ++hp) {
2010 Eterm* ptr;
2011 Eterm val;
2012 Eterm gval = *hp;
2013 switch (primary_tag(gval)) {
2014 case TAG_PRIMARY_BOXED:
2015 ptr = boxed_val(gval);
2016 val = *ptr;
2017 if (IS_MOVED_BOXED(val)) {
2018 ASSERT(is_boxed(val));
2019 #ifdef TAG_LITERAL_PTR
2020 val |= literal_tag;
2021 #endif
2022 *hp = val;
2023 }
2024 break;
2025 case TAG_PRIMARY_LIST:
2026 ptr = list_val(gval);
2027 val = *ptr;
2028 if (IS_MOVED_CONS(val)) {
2029 val = ptr[1];
2030 #ifdef TAG_LITERAL_PTR
2031 val |= literal_tag;
2032 #endif
2033 *hp = val;
2034 }
2035 break;
2036 case TAG_PRIMARY_HEADER:
2037 if (header_is_thing(gval)) {
2038 hp += thing_arityval(gval);
2039 }
2040 break;
2041 }
2042 }
2043 for (i=0; i<nrefs; ++i) {
2044 refs[i] = follow_moved(refs[i], literal_tag);
2045 }
2046 }
2047
2048 static void
move_one_frag(Eterm ** hpp,ErlHeapFragment * frag,ErlOffHeap * off_heap,int literals)2049 move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int literals)
2050 {
2051 Eterm* ptr = frag->mem;
2052 Eterm* end = ptr + frag->used_size;
2053 Eterm dummy_ref;
2054 Eterm* hp = *hpp;
2055
2056 while (ptr != end) {
2057 Eterm val;
2058 ASSERT(ptr < end);
2059 val = *ptr;
2060 ASSERT(val != ERTS_HOLE_MARKER);
2061 if (is_header(val)) {
2062 struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
2063 ASSERT(ptr + header_arity(val) < end);
2064 ptr = move_boxed(ptr, val, &hp, &dummy_ref);
2065 switch (val & _HEADER_SUBTAG_MASK) {
2066 case REF_SUBTAG:
2067 if (!is_magic_ref_thing(hdr))
2068 break;
2069 case REFC_BINARY_SUBTAG:
2070 case FUN_SUBTAG:
2071 case EXTERNAL_PID_SUBTAG:
2072 case EXTERNAL_PORT_SUBTAG:
2073 case EXTERNAL_REF_SUBTAG:
2074 hdr->next = off_heap->first;
2075 off_heap->first = hdr;
2076 break;
2077 }
2078 }
2079 else { /* must be a cons cell */
2080 ASSERT(ptr+1 < end);
2081 move_cons(ptr, val, &hp, &dummy_ref);
2082 ptr += 2;
2083 }
2084 }
2085 *hpp = hp;
2086 OH_OVERHEAD(off_heap, frag->off_heap.overhead);
2087 frag->off_heap.first = NULL;
2088 }
2089