1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 2018-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 /*
22 * Purpose: Implement persistent term storage.
23 */
24
25 #ifdef HAVE_CONFIG_H
26 # include "config.h"
27 #endif
28
29 #include "sys.h"
30 #include "erl_vm.h"
31 #include "global.h"
32 #include "erl_process.h"
33 #include "error.h"
34 #include "erl_driver.h"
35 #include "bif.h"
36 #include "erl_map.h"
37 #include "erl_binary.h"
38
39 /*
40 * Parameters for the hash table.
41 */
42 #define INITIAL_SIZE 8
43 #define LOAD_FACTOR ((Uint)50)
44 #define MUST_GROW(t) (((Uint)100) * t->num_entries >= LOAD_FACTOR * t->allocated)
45 #define MUST_SHRINK(t) (((Uint)200) * t->num_entries <= LOAD_FACTOR * t->allocated && \
46 t->allocated > INITIAL_SIZE)
47
48 typedef struct hash_table {
49 Uint allocated;
50 Uint num_entries;
51 Uint mask;
52 Uint first_to_delete;
53 Uint num_to_delete;
54 erts_atomic_t refc;
55 struct hash_table* delete_next;
56 ErtsThrPrgrLaterOp thr_prog_op;
57 Eterm term[1];
58 } HashTable;
59
60 typedef struct trap_data {
61 HashTable* table;
62 Uint idx;
63 Uint remaining;
64 Uint memory; /* Used by info/0 to count used memory */
65 } TrapData;
66
67 typedef enum {
68 ERTS_PERSISTENT_TERM_CPY_PLACE_START,
69 ERTS_PERSISTENT_TERM_CPY_PLACE_1,
70 ERTS_PERSISTENT_TERM_CPY_PLACE_2,
71 ERTS_PERSISTENT_TERM_CPY_PLACE_3
72 } ErtsPersistentTermCpyTableLocation;
73
74 typedef enum {
75 ERTS_PERSISTENT_TERM_CPY_NO_REHASH = 0,
76 ERTS_PERSISTENT_TERM_CPY_REHASH = 1,
77 ERTS_PERSISTENT_TERM_CPY_TEMP = 2
78 } ErtsPersistentTermCpyTableType;
79
80 typedef struct {
81 HashTable* old_table; /* in param */
82 Uint new_size; /* in param */
83 ErtsPersistentTermCpyTableType copy_type; /* in param */
84 Uint max_iterations; /* in param */
85 ErtsPersistentTermCpyTableLocation location; /* in/out param */
86 Uint iterations_done; /* in/out param */
87 Uint total_iterations_done; /* in/out param */
88 HashTable* new_table; /* out param */
89 } ErtsPersistentTermCpyTableCtx;
90
91 typedef enum {
92 PUT2_TRAP_LOCATION_NEW_KEY,
93 PUT2_TRAP_LOCATION_REPLACE_VALUE
94 } ErtsPersistentTermPut2TrapLocation;
95
96 typedef struct {
97 ErtsPersistentTermPut2TrapLocation trap_location;
98 Eterm key;
99 Eterm term;
100 Uint entry_index;
101 HashTable* hash_table;
102 Eterm heap[3];
103 Eterm tuple;
104 ErtsPersistentTermCpyTableCtx cpy_ctx;
105 } ErtsPersistentTermPut2Context;
106
107 typedef enum {
108 ERASE1_TRAP_LOCATION_TMP_COPY,
109 ERASE1_TRAP_LOCATION_FINAL_COPY
110 } ErtsPersistentTermErase1TrapLocation;
111
112 typedef struct {
113 ErtsPersistentTermErase1TrapLocation trap_location;
114 Eterm key;
115 HashTable* old_table;
116 HashTable* new_table;
117 Uint entry_index;
118 Eterm old_term;
119 HashTable* tmp_table;
120 ErtsPersistentTermCpyTableCtx cpy_ctx;
121 } ErtsPersistentTermErase1Context;
122
123 /*
124 * Declarations of local functions.
125 */
126
127 static HashTable* create_initial_table(void);
128 static Uint lookup(HashTable* hash_table, Eterm key);
129 static HashTable* copy_table(ErtsPersistentTermCpyTableCtx* ctx);
130 static int try_seize_update_permission(Process* c_p);
131 static void release_update_permission(int release_updater);
132 static void table_updater(void* table);
133 static void table_deleter(void* hash_table);
134 static void dec_table_refc(Process* c_p, HashTable* old_table);
135 static void delete_table(Process* c_p, HashTable* table);
136 static void mark_for_deletion(HashTable* hash_table, Uint entry_index);
137 static ErtsLiteralArea* term_to_area(Eterm tuple);
138 static void suspend_updater(Process* c_p);
139 static Eterm do_get_all(Process* c_p, TrapData* trap_data, Eterm res);
140 static Eterm do_info(Process* c_p, TrapData* trap_data);
141 static void append_to_delete_queue(HashTable* table);
142 static HashTable* next_to_delete(void);
143 static Eterm alloc_trap_data(Process* c_p);
144 static int cleanup_trap_data(Binary *bp);
145
146 /*
147 * Traps
148 */
149
150 static Export persistent_term_get_all_export;
151 static BIF_RETTYPE persistent_term_get_all_trap(BIF_ALIST_2);
152 static Export persistent_term_info_export;
153 static BIF_RETTYPE persistent_term_info_trap(BIF_ALIST_1);
154
155 /*
156 * Pointer to the current hash table.
157 */
158
159 static erts_atomic_t the_hash_table;
160
161 /*
162 * Queue of processes waiting to update the hash table.
163 */
164
165 struct update_queue_item {
166 Process *p;
167 struct update_queue_item* next;
168 };
169
170 static erts_mtx_t update_table_permission_mtx;
171 static struct update_queue_item* update_queue = NULL;
172 static Process* updater_process = NULL;
173
174 /* Protected by update_table_permission_mtx */
175 static ErtsThrPrgrLaterOp thr_prog_op;
176
177 /*
178 * Queue of hash tables to be deleted.
179 */
180
181 static erts_mtx_t delete_queue_mtx;
182 static HashTable* delete_queue_head = NULL;
183 static HashTable** delete_queue_tail = &delete_queue_head;
184
185 /*
186 * The following variables are only used during crash dumping. They
187 * are initialized by erts_init_persistent_dumping().
188 */
189
190 ErtsLiteralArea** erts_persistent_areas;
191 Uint erts_num_persistent_areas;
192
erts_init_bif_persistent_term(void)193 void erts_init_bif_persistent_term(void)
194 {
195 HashTable* hash_table;
196
197 /*
198 * Initialize the mutex protecting updates.
199 */
200
201 erts_mtx_init(&update_table_permission_mtx,
202 "update_persistent_term_permission",
203 NIL,
204 ERTS_LOCK_FLAGS_PROPERTY_STATIC |
205 ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
206
207 /*
208 * Initialize delete queue.
209 */
210
211 erts_mtx_init(&delete_queue_mtx,
212 "persistent_term_delete_permission",
213 NIL,
214 ERTS_LOCK_FLAGS_PROPERTY_STATIC |
215 ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
216
217 /*
218 * Allocate a small initial hash table.
219 */
220
221 hash_table = create_initial_table();
222 erts_atomic_init_nob(&the_hash_table, (erts_aint_t)hash_table);
223
224 /*
225 * Initialize export entry for traps
226 */
227
228 erts_init_trap_export(&persistent_term_get_all_export,
229 am_persistent_term, am_get_all_trap, 2,
230 &persistent_term_get_all_trap);
231 erts_init_trap_export(&persistent_term_info_export,
232 am_persistent_term, am_info_trap, 1,
233 &persistent_term_info_trap);
234 }
235
236 /*
237 * Macro used for trapping in persistent_term_put_2 and
238 * persistent_term_erase_1
239 */
240 #define TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME, TRAP_CODE) \
241 do { \
242 ctx->cpy_ctx = (ErtsPersistentTermCpyTableCtx){ \
243 .old_table = OLD_TABLE, \
244 .new_size = NEW_SIZE, \
245 .copy_type = COPY_TYPE, \
246 .location = ERTS_PERSISTENT_TERM_CPY_PLACE_START \
247 }; \
248 L_ ## LOC_NAME: \
249 ctx->cpy_ctx.max_iterations = MAX(1, max_iterations); \
250 TABLE_DEST = copy_table(&ctx->cpy_ctx); \
251 iterations_until_trap -= ctx->cpy_ctx.total_iterations_done; \
252 if (TABLE_DEST == NULL) { \
253 ctx->trap_location = LOC_NAME; \
254 erts_set_gc_state(BIF_P, 0); \
255 BUMP_ALL_REDS(BIF_P); \
256 TRAP_CODE; \
257 } \
258 } while (0)
259
persistent_term_put_2_ctx_bin_dtor(Binary * context_bin)260 static int persistent_term_put_2_ctx_bin_dtor(Binary *context_bin)
261 {
262 ErtsPersistentTermPut2Context* ctx = ERTS_MAGIC_BIN_DATA(context_bin);
263 if (ctx->cpy_ctx.new_table != NULL) {
264 erts_free(ERTS_ALC_T_PERSISTENT_TERM, ctx->cpy_ctx.new_table);
265 release_update_permission(0);
266 }
267 return 1;
268 }
269 /*
270 * A linear congruential generator that is used in the debug emulator
271 * to trap after a random number of iterations in
272 * persistent_term_put_2 and persistent_term_erase_1.
273 *
274 * https://en.wikipedia.org/wiki/Linear_congruential_generator
275 */
276 #define GET_SMALL_RANDOM_INT(SEED) \
277 (1103515245 * (SEED) + 12345) % 227
278
persistent_term_put_2(BIF_ALIST_2)279 BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2)
280 {
281 static const Uint ITERATIONS_PER_RED = 32;
282 ErtsPersistentTermPut2Context* ctx;
283 Eterm state_mref = THE_NON_VALUE;
284 long iterations_until_trap;
285 long max_iterations;
286 #define PUT_TRAP_CODE \
287 BIF_TRAP2(bif_export[BIF_persistent_term_put_2], BIF_P, state_mref, BIF_ARG_2)
288 #define TRAPPING_COPY_TABLE_PUT(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME) \
289 TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME, PUT_TRAP_CODE)
290
291 #ifdef DEBUG
292 (void)ITERATIONS_PER_RED;
293 iterations_until_trap = max_iterations =
294 GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx);
295 #else
296 iterations_until_trap = max_iterations =
297 ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P);
298 #endif
299 if (is_internal_magic_ref(BIF_ARG_1) &&
300 (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) ==
301 persistent_term_put_2_ctx_bin_dtor)) {
302 /* Restore state after a trap */
303 Binary* state_bin;
304 state_mref = BIF_ARG_1;
305 state_bin = erts_magic_ref2bin(state_mref);
306 ctx = ERTS_MAGIC_BIN_DATA(state_bin);
307 ASSERT(BIF_P->flags & F_DISABLE_GC);
308 erts_set_gc_state(BIF_P, 1);
309 switch (ctx->trap_location) {
310 case PUT2_TRAP_LOCATION_NEW_KEY:
311 goto L_PUT2_TRAP_LOCATION_NEW_KEY;
312 case PUT2_TRAP_LOCATION_REPLACE_VALUE:
313 goto L_PUT2_TRAP_LOCATION_REPLACE_VALUE;
314 }
315 } else {
316 /* Save state in magic bin in case trapping is necessary */
317 Eterm* hp;
318 Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermPut2Context),
319 persistent_term_put_2_ctx_bin_dtor);
320 hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
321 state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin);
322 ctx = ERTS_MAGIC_BIN_DATA(state_bin);
323 /*
324 * IMPORTANT: The following field is used to detect if
325 * persistent_term_put_2_ctx_bin_dtor needs to free memory
326 */
327 ctx->cpy_ctx.new_table = NULL;
328 }
329
330
331 if (!try_seize_update_permission(BIF_P)) {
332 ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2],
333 BIF_P, BIF_ARG_1, BIF_ARG_2);
334 }
335 ctx->hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
336
337 ctx->key = BIF_ARG_1;
338 ctx->term = BIF_ARG_2;
339
340 ctx->entry_index = lookup(ctx->hash_table, ctx->key);
341
342 ctx->heap[0] = make_arityval(2);
343 ctx->heap[1] = ctx->key;
344 ctx->heap[2] = ctx->term;
345 ctx->tuple = make_tuple(ctx->heap);
346
347 if (is_nil(ctx->hash_table->term[ctx->entry_index])) {
348 Uint new_size = ctx->hash_table->allocated;
349 if (MUST_GROW(ctx->hash_table)) {
350 new_size *= 2;
351 }
352 TRAPPING_COPY_TABLE_PUT(ctx->hash_table,
353 ctx->hash_table,
354 new_size,
355 ERTS_PERSISTENT_TERM_CPY_NO_REHASH,
356 PUT2_TRAP_LOCATION_NEW_KEY);
357 ctx->entry_index = lookup(ctx->hash_table, ctx->key);
358 ctx->hash_table->num_entries++;
359 } else {
360 Eterm tuple = ctx->hash_table->term[ctx->entry_index];
361 Eterm old_term;
362
363 ASSERT(is_tuple_arity(tuple, 2));
364 old_term = boxed_val(tuple)[2];
365 if (EQ(ctx->term, old_term)) {
366 /* Same value. No need to update anything. */
367 release_update_permission(0);
368 BIF_RET(am_ok);
369 } else {
370 /* Mark the old term for deletion. */
371 mark_for_deletion(ctx->hash_table, ctx->entry_index);
372 TRAPPING_COPY_TABLE_PUT(ctx->hash_table,
373 ctx->hash_table,
374 ctx->hash_table->allocated,
375 ERTS_PERSISTENT_TERM_CPY_NO_REHASH,
376 PUT2_TRAP_LOCATION_REPLACE_VALUE);
377 }
378 }
379
380 {
381 Uint term_size;
382 Uint lit_area_size;
383 ErlOffHeap code_off_heap;
384 ErtsLiteralArea* literal_area;
385 erts_shcopy_t info;
386 Eterm* ptr;
387 /*
388 * Preserve internal sharing in the term by using the
389 * sharing-preserving functions. However, literals must
390 * be copied in case the module holding them are unloaded.
391 */
392 INITIALIZE_SHCOPY(info);
393 info.copy_literals = 1;
394 term_size = copy_shared_calculate(ctx->tuple, &info);
395 ERTS_INIT_OFF_HEAP(&code_off_heap);
396 lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size);
397 literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size);
398 ptr = &literal_area->start[0];
399 literal_area->end = ptr + term_size;
400 ctx->tuple = copy_shared_perform(ctx->tuple, term_size, &info, &ptr, &code_off_heap);
401 ASSERT(tuple_val(ctx->tuple) == literal_area->start);
402 literal_area->off_heap = code_off_heap.first;
403 DESTROY_SHCOPY(info);
404 erts_set_literal_tag(&ctx->tuple, literal_area->start, term_size);
405 ctx->hash_table->term[ctx->entry_index] = ctx->tuple;
406
407 erts_schedule_thr_prgr_later_op(table_updater, ctx->hash_table, &thr_prog_op);
408 suspend_updater(BIF_P);
409 }
410 BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
411 ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
412 }
413
persistent_term_get_0(BIF_ALIST_0)414 BIF_RETTYPE persistent_term_get_0(BIF_ALIST_0)
415 {
416 HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
417 TrapData* trap_data;
418 Eterm res = NIL;
419 Eterm magic_ref;
420 Binary* mbp;
421
422 magic_ref = alloc_trap_data(BIF_P);
423 mbp = erts_magic_ref2bin(magic_ref);
424 trap_data = ERTS_MAGIC_BIN_DATA(mbp);
425 trap_data->table = hash_table;
426 trap_data->idx = 0;
427 trap_data->remaining = hash_table->num_entries;
428 res = do_get_all(BIF_P, trap_data, res);
429 if (trap_data->remaining == 0) {
430 BUMP_REDS(BIF_P, hash_table->num_entries);
431 trap_data->table = NULL; /* Prevent refc decrement */
432 BIF_RET(res);
433 } else {
434 /*
435 * Increment the ref counter to prevent an update operation (by put/2
436 * or erase/1) to delete this hash table.
437 */
438 erts_atomic_inc_nob(&hash_table->refc);
439 BUMP_ALL_REDS(BIF_P);
440 BIF_TRAP2(&persistent_term_get_all_export, BIF_P, magic_ref, res);
441 }
442 }
443
persistent_term_get_1(BIF_ALIST_1)444 BIF_RETTYPE persistent_term_get_1(BIF_ALIST_1)
445 {
446 Eterm key = BIF_ARG_1;
447 HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
448 Uint entry_index;
449 Eterm term;
450
451 entry_index = lookup(hash_table, key);
452 term = hash_table->term[entry_index];
453 if (is_boxed(term)) {
454 ASSERT(is_tuple_arity(term, 2));
455 BIF_RET(tuple_val(term)[2]);
456 }
457 BIF_ERROR(BIF_P, BADARG);
458 }
459
persistent_term_get_2(BIF_ALIST_2)460 BIF_RETTYPE persistent_term_get_2(BIF_ALIST_2)
461 {
462 Eterm key = BIF_ARG_1;
463 Eterm result = BIF_ARG_2;
464 HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
465 Uint entry_index;
466 Eterm term;
467
468 entry_index = lookup(hash_table, key);
469 term = hash_table->term[entry_index];
470 if (is_boxed(term)) {
471 ASSERT(is_tuple_arity(term, 2));
472 result = tuple_val(term)[2];
473 }
474 BIF_RET(result);
475 }
476
persistent_term_erase_1_ctx_bin_dtor(Binary * context_bin)477 static int persistent_term_erase_1_ctx_bin_dtor(Binary *context_bin)
478 {
479 ErtsPersistentTermErase1Context* ctx = ERTS_MAGIC_BIN_DATA(context_bin);
480 if (ctx->cpy_ctx.new_table != NULL) {
481 if (ctx->cpy_ctx.copy_type == ERTS_PERSISTENT_TERM_CPY_TEMP) {
482 erts_free(ERTS_ALC_T_PERSISTENT_TERM_TMP, ctx->cpy_ctx.new_table);
483 } else {
484 erts_free(ERTS_ALC_T_PERSISTENT_TERM, ctx->cpy_ctx.new_table);
485 }
486 if (ctx->tmp_table != NULL) {
487 erts_free(ERTS_ALC_T_PERSISTENT_TERM_TMP, ctx->tmp_table);
488 }
489 release_update_permission(0);
490 }
491 return 1;
492 }
493
persistent_term_erase_1(BIF_ALIST_1)494 BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
495 {
496 static const Uint ITERATIONS_PER_RED = 32;
497 ErtsPersistentTermErase1Context* ctx;
498 Eterm state_mref = THE_NON_VALUE;
499 long iterations_until_trap;
500 long max_iterations;
501 #ifdef DEBUG
502 (void)ITERATIONS_PER_RED;
503 iterations_until_trap = max_iterations =
504 GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx);
505 #else
506 iterations_until_trap = max_iterations =
507 ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P);
508 #endif
509 #define ERASE_TRAP_CODE \
510 BIF_TRAP1(bif_export[BIF_persistent_term_erase_1], BIF_P, state_mref);
511 #define TRAPPING_COPY_TABLE_ERASE(TABLE_DEST, OLD_TABLE, NEW_SIZE, REHASH, LOC_NAME) \
512 TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, REHASH, LOC_NAME, ERASE_TRAP_CODE)
513 if (is_internal_magic_ref(BIF_ARG_1) &&
514 (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) ==
515 persistent_term_erase_1_ctx_bin_dtor)) {
516 /* Restore the state after a trap */
517 Binary* state_bin;
518 state_mref = BIF_ARG_1;
519 state_bin = erts_magic_ref2bin(state_mref);
520 ctx = ERTS_MAGIC_BIN_DATA(state_bin);
521 ASSERT(BIF_P->flags & F_DISABLE_GC);
522 erts_set_gc_state(BIF_P, 1);
523 switch (ctx->trap_location) {
524 case ERASE1_TRAP_LOCATION_TMP_COPY:
525 goto L_ERASE1_TRAP_LOCATION_TMP_COPY;
526 case ERASE1_TRAP_LOCATION_FINAL_COPY:
527 goto L_ERASE1_TRAP_LOCATION_FINAL_COPY;
528 }
529 } else {
530 /* Save state in magic bin in case trapping is necessary */
531 Eterm* hp;
532 Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermErase1Context),
533 persistent_term_erase_1_ctx_bin_dtor);
534 hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
535 state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin);
536 ctx = ERTS_MAGIC_BIN_DATA(state_bin);
537 /*
538 * IMPORTANT: The following two fields are used to detect if
539 * persistent_term_erase_1_ctx_bin_dtor needs to free memory
540 */
541 ctx->cpy_ctx.new_table = NULL;
542 ctx->tmp_table = NULL;
543 }
544 if (!try_seize_update_permission(BIF_P)) {
545 ERTS_BIF_YIELD1(bif_export[BIF_persistent_term_erase_1],
546 BIF_P, BIF_ARG_1);
547 }
548
549 ctx->key = BIF_ARG_1;
550 ctx->old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
551 ctx->entry_index = lookup(ctx->old_table, ctx->key);
552 ctx->old_term = ctx->old_table->term[ctx->entry_index];
553 if (is_boxed(ctx->old_term)) {
554 Uint new_size;
555 /*
556 * Since we don't use any delete markers, we must rehash
557 * the table when deleting terms to ensure that all terms
558 * can still be reached if there are hash collisions.
559 * We can't rehash in place and it would not be safe to modify
560 * the old table yet, so we will first need a new
561 * temporary table copy of the same size as the old one.
562 */
563
564 ASSERT(is_tuple_arity(ctx->old_term, 2));
565 TRAPPING_COPY_TABLE_ERASE(ctx->tmp_table,
566 ctx->old_table,
567 ctx->old_table->allocated,
568 ERTS_PERSISTENT_TERM_CPY_TEMP,
569 ERASE1_TRAP_LOCATION_TMP_COPY);
570
571 /*
572 * Delete the term from the temporary table. Then copy the
573 * temporary table to a new table, rehashing the entries
574 * while copying.
575 */
576
577 ctx->tmp_table->term[ctx->entry_index] = NIL;
578 ctx->tmp_table->num_entries--;
579 new_size = ctx->tmp_table->allocated;
580 if (MUST_SHRINK(ctx->tmp_table)) {
581 new_size /= 2;
582 }
583 TRAPPING_COPY_TABLE_ERASE(ctx->new_table,
584 ctx->tmp_table,
585 new_size,
586 ERTS_PERSISTENT_TERM_CPY_REHASH,
587 ERASE1_TRAP_LOCATION_FINAL_COPY);
588 erts_free(ERTS_ALC_T_PERSISTENT_TERM_TMP, ctx->tmp_table);
589 /*
590 * IMPORTANT: Memory management depends on that ctx->tmp_table
591 * is set to NULL on the line below
592 */
593 ctx->tmp_table = NULL;
594
595 mark_for_deletion(ctx->old_table, ctx->entry_index);
596 erts_schedule_thr_prgr_later_op(table_updater, ctx->new_table, &thr_prog_op);
597 suspend_updater(BIF_P);
598 BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
599 ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
600 }
601
602 /*
603 * Key is not present. Nothing to do.
604 */
605
606 ASSERT(is_nil(ctx->old_term));
607 release_update_permission(0);
608 BIF_RET(am_false);
609 }
610
erts_internal_erase_persistent_terms_0(BIF_ALIST_0)611 BIF_RETTYPE erts_internal_erase_persistent_terms_0(BIF_ALIST_0)
612 {
613 HashTable* old_table;
614 HashTable* new_table;
615
616 if (!try_seize_update_permission(BIF_P)) {
617 ERTS_BIF_YIELD0(bif_export[BIF_erts_internal_erase_persistent_terms_0],
618 BIF_P);
619 }
620 old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
621 old_table->first_to_delete = 0;
622 old_table->num_to_delete = old_table->allocated;
623 new_table = create_initial_table();
624 erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
625 suspend_updater(BIF_P);
626 ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
627 }
628
persistent_term_info_0(BIF_ALIST_0)629 BIF_RETTYPE persistent_term_info_0(BIF_ALIST_0)
630 {
631 HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
632 TrapData* trap_data;
633 Eterm res = NIL;
634 Eterm magic_ref;
635 Binary* mbp;
636
637 magic_ref = alloc_trap_data(BIF_P);
638 mbp = erts_magic_ref2bin(magic_ref);
639 trap_data = ERTS_MAGIC_BIN_DATA(mbp);
640 trap_data->table = hash_table;
641 trap_data->idx = 0;
642 trap_data->remaining = hash_table->num_entries;
643 trap_data->memory = 0;
644 res = do_info(BIF_P, trap_data);
645 if (trap_data->remaining == 0) {
646 BUMP_REDS(BIF_P, hash_table->num_entries);
647 trap_data->table = NULL; /* Prevent refc decrement */
648 BIF_RET(res);
649 } else {
650 /*
651 * Increment the ref counter to prevent an update operation (by put/2
652 * or erase/1) to delete this hash table.
653 */
654 erts_atomic_inc_nob(&hash_table->refc);
655 BUMP_ALL_REDS(BIF_P);
656 BIF_TRAP2(&persistent_term_info_export, BIF_P, magic_ref, res);
657 }
658 }
659
660 Uint
erts_persistent_term_count(void)661 erts_persistent_term_count(void)
662 {
663 HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
664 return hash_table->num_entries;
665 }
666
667 void
erts_init_persistent_dumping(void)668 erts_init_persistent_dumping(void)
669 {
670 HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
671 ErtsLiteralArea** area_p;
672 Uint i;
673
674 /*
675 * Overwrite the array of Eterms in the current hash table
676 * with pointers to literal areas.
677 */
678
679 erts_persistent_areas = (ErtsLiteralArea **) hash_table->term;
680 erts_num_persistent_areas = hash_table->num_entries;
681 area_p = erts_persistent_areas;
682 for (i = 0; i < hash_table->allocated; i++) {
683 Eterm term = hash_table->term[i];
684
685 if (is_boxed(term)) {
686 *area_p++ = term_to_area(term);
687 }
688 }
689 }
690
691 /*
692 * Local functions.
693 */
694
695 static HashTable*
create_initial_table(void)696 create_initial_table(void)
697 {
698 HashTable* hash_table;
699 int i;
700
701 hash_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
702 sizeof(HashTable)+sizeof(Eterm) *
703 (INITIAL_SIZE-1));
704 hash_table->allocated = INITIAL_SIZE;
705 hash_table->num_entries = 0;
706 hash_table->mask = INITIAL_SIZE-1;
707 hash_table->first_to_delete = 0;
708 hash_table->num_to_delete = 0;
709 erts_atomic_init_nob(&hash_table->refc, (erts_aint_t)1);
710 for (i = 0; i < INITIAL_SIZE; i++) {
711 hash_table->term[i] = NIL;
712 }
713 return hash_table;
714 }
715
716 static BIF_RETTYPE
persistent_term_get_all_trap(BIF_ALIST_2)717 persistent_term_get_all_trap(BIF_ALIST_2)
718 {
719 TrapData* trap_data;
720 Eterm res = BIF_ARG_2;
721 Uint bump_reds;
722 Binary* mbp;
723
724 ASSERT(is_list(BIF_ARG_2));
725 mbp = erts_magic_ref2bin(BIF_ARG_1);
726 trap_data = ERTS_MAGIC_BIN_DATA(mbp);
727 bump_reds = trap_data->remaining;
728 res = do_get_all(BIF_P, trap_data, res);
729 ASSERT(is_list(res));
730 if (trap_data->remaining > 0) {
731 BUMP_ALL_REDS(BIF_P);
732 BIF_TRAP2(&persistent_term_get_all_export, BIF_P, BIF_ARG_1, res);
733 } else {
734 /*
735 * Decrement ref count (and possibly delete the hash table
736 * and associated literal area).
737 */
738 dec_table_refc(BIF_P, trap_data->table);
739 trap_data->table = NULL; /* Prevent refc decrement */
740 BUMP_REDS(BIF_P, bump_reds);
741 BIF_RET(res);
742 }
743 }
744
745 static Eterm
do_get_all(Process * c_p,TrapData * trap_data,Eterm res)746 do_get_all(Process* c_p, TrapData* trap_data, Eterm res)
747 {
748 HashTable* hash_table;
749 Uint remaining;
750 Uint idx;
751 Uint max_iter;
752 Uint i;
753 Eterm* hp;
754 Uint heap_size;
755 struct copy_term {
756 Uint key_size;
757 Eterm* tuple_ptr;
758 } *copy_data;
759
760 hash_table = trap_data->table;
761 idx = trap_data->idx;
762 #if defined(DEBUG) || defined(VALGRIND)
763 max_iter = 50;
764 #else
765 max_iter = ERTS_BIF_REDS_LEFT(c_p);
766 #endif
767 remaining = trap_data->remaining < max_iter ?
768 trap_data->remaining : max_iter;
769 trap_data->remaining -= remaining;
770
771 copy_data = (struct copy_term *) erts_alloc(ERTS_ALC_T_TMP,
772 remaining *
773 sizeof(struct copy_term));
774 i = 0;
775 heap_size = (2 + 3) * remaining;
776 while (remaining != 0) {
777 Eterm term = hash_table->term[idx];
778 if (is_tuple(term)) {
779 Uint key_size;
780 Eterm* tup_val;
781
782 ASSERT(is_tuple_arity(term, 2));
783 tup_val = tuple_val(term);
784 key_size = size_object(tup_val[1]);
785 copy_data[i].key_size = key_size;
786 copy_data[i].tuple_ptr = tup_val;
787 heap_size += key_size;
788 i++;
789 remaining--;
790 }
791 idx++;
792 }
793 trap_data->idx = idx;
794
795 hp = HAlloc(c_p, heap_size);
796 remaining = i;
797 for (i = 0; i < remaining; i++) {
798 Eterm* tuple_ptr;
799 Uint key_size;
800 Eterm key;
801 Eterm tup;
802
803 tuple_ptr = copy_data[i].tuple_ptr;
804 key_size = copy_data[i].key_size;
805 key = copy_struct(tuple_ptr[1], key_size, &hp, &c_p->off_heap);
806 tup = TUPLE2(hp, key, tuple_ptr[2]);
807 hp += 3;
808 res = CONS(hp, tup, res);
809 hp += 2;
810 }
811 erts_free(ERTS_ALC_T_TMP, copy_data);
812 return res;
813 }
814
815 static BIF_RETTYPE
persistent_term_info_trap(BIF_ALIST_1)816 persistent_term_info_trap(BIF_ALIST_1)
817 {
818 TrapData* trap_data = (TrapData *) BIF_ARG_1;
819 Eterm res;
820 Uint bump_reds;
821 Binary* mbp;
822
823 mbp = erts_magic_ref2bin(BIF_ARG_1);
824 trap_data = ERTS_MAGIC_BIN_DATA(mbp);
825 bump_reds = trap_data->remaining;
826 res = do_info(BIF_P, trap_data);
827 if (trap_data->remaining > 0) {
828 ASSERT(res == am_ok);
829 BUMP_ALL_REDS(BIF_P);
830 BIF_TRAP1(&persistent_term_info_export, BIF_P, BIF_ARG_1);
831 } else {
832 /*
833 * Decrement ref count (and possibly delete the hash table
834 * and associated literal area).
835 */
836 dec_table_refc(BIF_P, trap_data->table);
837 trap_data->table = NULL; /* Prevent refc decrement */
838 BUMP_REDS(BIF_P, bump_reds);
839 ASSERT(is_map(res));
840 BIF_RET(res);
841 }
842 }
843
844 #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
845
846 static Eterm
do_info(Process * c_p,TrapData * trap_data)847 do_info(Process* c_p, TrapData* trap_data)
848 {
849 HashTable* hash_table;
850 Uint remaining;
851 Uint idx;
852 Uint max_iter;
853
854 hash_table = trap_data->table;
855 idx = trap_data->idx;
856 #if defined(DEBUG) || defined(VALGRIND)
857 max_iter = 50;
858 #else
859 max_iter = ERTS_BIF_REDS_LEFT(c_p);
860 #endif
861 remaining = trap_data->remaining < max_iter ? trap_data->remaining : max_iter;
862 trap_data->remaining -= remaining;
863 while (remaining != 0) {
864 if (is_boxed(hash_table->term[idx])) {
865 ErtsLiteralArea* area;
866 area = term_to_area(hash_table->term[idx]);
867 trap_data->memory += sizeof(ErtsLiteralArea) +
868 sizeof(Eterm) * (area->end - area->start - 1);
869 remaining--;
870 }
871 idx++;
872 }
873 trap_data->idx = idx;
874 if (trap_data->remaining > 0) {
875 return am_ok; /* Dummy return value */
876 } else {
877 Eterm* hp;
878 Eterm count_term;
879 Eterm memory_term;
880 Eterm res;
881 Uint memory;
882 Uint hsz = MAP_SZ(2);
883
884 memory = sizeof(HashTable) + (trap_data->table->allocated-1) *
885 sizeof(Eterm) + trap_data->memory;
886 (void) erts_bld_uint(NULL, &hsz, hash_table->num_entries);
887 (void) erts_bld_uint(NULL, &hsz, memory);
888 hp = HAlloc(c_p, hsz);
889 count_term = erts_bld_uint(&hp, NULL, hash_table->num_entries);
890 memory_term = erts_bld_uint(&hp, NULL, memory);
891 res = MAP2(hp, am_count, count_term, am_memory, memory_term);
892 return res;
893 }
894 }
895
896 #undef DECL_AM
897
898 static Eterm
alloc_trap_data(Process * c_p)899 alloc_trap_data(Process* c_p)
900 {
901 Binary* mbp = erts_create_magic_binary(sizeof(TrapData),
902 cleanup_trap_data);
903 Eterm* hp;
904
905 hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
906 return erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
907 }
908
909 static int
cleanup_trap_data(Binary * bp)910 cleanup_trap_data(Binary *bp)
911 {
912 TrapData* trap_data = ERTS_MAGIC_BIN_DATA(bp);
913
914 if (trap_data->table) {
915 /*
916 * The process has been killed and is now exiting.
917 * Decrement the reference counter for the table.
918 */
919 dec_table_refc(NULL, trap_data->table);
920 }
921 return 1;
922 }
923
924 static Uint
lookup(HashTable * hash_table,Eterm key)925 lookup(HashTable* hash_table, Eterm key)
926 {
927 Uint mask = hash_table->mask;
928 Eterm* table = hash_table->term;
929 Uint32 idx = make_internal_hash(key, 0);
930 Eterm term;
931
932 do {
933 idx++;
934 term = table[idx & mask];
935 } while (is_boxed(term) && !EQ(key, (tuple_val(term))[1]));
936 return idx & mask;
937 }
938
939 static HashTable*
copy_table(ErtsPersistentTermCpyTableCtx * ctx)940 copy_table(ErtsPersistentTermCpyTableCtx* ctx)
941 {
942 Uint old_size = ctx->old_table->allocated;
943 Uint i;
944 ErtsAlcType_t alloc_type;
945 ctx->total_iterations_done = 0;
946 switch(ctx->location) {
947 case ERTS_PERSISTENT_TERM_CPY_PLACE_1: goto L_copy_table_place_1;
948 case ERTS_PERSISTENT_TERM_CPY_PLACE_2: goto L_copy_table_place_2;
949 case ERTS_PERSISTENT_TERM_CPY_PLACE_3: goto L_copy_table_place_3;
950 case ERTS_PERSISTENT_TERM_CPY_PLACE_START:
951 ctx->iterations_done = 0;
952 }
953 if (ctx->copy_type == ERTS_PERSISTENT_TERM_CPY_TEMP) {
954 alloc_type = ERTS_ALC_T_PERSISTENT_TERM_TMP;
955 } else {
956 alloc_type = ERTS_ALC_T_PERSISTENT_TERM;
957 }
958 ctx->new_table = (HashTable *) erts_alloc(alloc_type,
959 sizeof(HashTable) +
960 sizeof(Eterm) * (ctx->new_size-1));
961 if (ctx->old_table->allocated == ctx->new_size &&
962 (ctx->copy_type == ERTS_PERSISTENT_TERM_CPY_NO_REHASH ||
963 ctx->copy_type == ERTS_PERSISTENT_TERM_CPY_TEMP)) {
964 /*
965 * Same size and no key deleted. Make an exact copy of the table.
966 */
967 *ctx->new_table = *ctx->old_table;
968 L_copy_table_place_1:
969 for (i = ctx->iterations_done;
970 i < MIN(ctx->iterations_done + ctx->max_iterations,
971 ctx->new_size);
972 i++) {
973 ctx->new_table->term[i] = ctx->old_table->term[i];
974 }
975 ctx->total_iterations_done = (i - ctx->iterations_done);
976 if (i < ctx->new_size) {
977 ctx->iterations_done = i;
978 ctx->location = ERTS_PERSISTENT_TERM_CPY_PLACE_1;
979 return NULL;
980 }
981 ctx->iterations_done = 0;
982 } else {
983 /*
984 * The size of the table has changed or an element has been
985 * deleted. Must rehash, by inserting all old terms into the
986 * new (empty) table.
987 */
988 ctx->new_table->allocated = ctx->new_size;
989 ctx->new_table->num_entries = ctx->old_table->num_entries;
990 ctx->new_table->mask = ctx->new_size - 1;
991 L_copy_table_place_2:
992 for (i = ctx->iterations_done;
993 i < MIN(ctx->iterations_done + ctx->max_iterations,
994 ctx->new_size);
995 i++) {
996 ctx->new_table->term[i] = NIL;
997 }
998 ctx->total_iterations_done = (i - ctx->iterations_done);
999 ctx->max_iterations -= ctx->total_iterations_done;
1000 if (i < ctx->new_size) {
1001 ctx->iterations_done = i;
1002 ctx->location = ERTS_PERSISTENT_TERM_CPY_PLACE_2;
1003 return NULL;
1004 }
1005 ctx->iterations_done = 0;
1006 L_copy_table_place_3:
1007 for (i = ctx->iterations_done;
1008 i < MIN(ctx->iterations_done + ctx->max_iterations,
1009 old_size);
1010 i++) {
1011 if (is_tuple(ctx->old_table->term[i])) {
1012 Eterm key = tuple_val(ctx->old_table->term[i])[1];
1013 Uint entry_index = lookup(ctx->new_table, key);
1014 ASSERT(is_nil(ctx->new_table->term[entry_index]));
1015 ctx->new_table->term[entry_index] = ctx->old_table->term[i];
1016 }
1017 }
1018 ctx->total_iterations_done += (i - ctx->iterations_done);
1019 if (i < old_size) {
1020 ctx->iterations_done = i;
1021 ctx->location = ERTS_PERSISTENT_TERM_CPY_PLACE_3;
1022 return NULL;
1023 }
1024 ctx->iterations_done = 0;
1025 }
1026 ctx->new_table->first_to_delete = 0;
1027 ctx->new_table->num_to_delete = 0;
1028 erts_atomic_init_nob(&ctx->new_table->refc, (erts_aint_t)1);
1029 {
1030 HashTable* new_table = ctx->new_table;
1031 /*
1032 * IMPORTANT: Memory management depends on that ctx->new_table is
1033 * set to NULL on the line below
1034 */
1035 ctx->new_table = NULL;
1036 return new_table;
1037 }
1038 }
1039
1040 static void
mark_for_deletion(HashTable * hash_table,Uint entry_index)1041 mark_for_deletion(HashTable* hash_table, Uint entry_index)
1042 {
1043 hash_table->first_to_delete = entry_index;
1044 hash_table->num_to_delete = 1;
1045 }
1046
1047 static ErtsLiteralArea*
term_to_area(Eterm tuple)1048 term_to_area(Eterm tuple)
1049 {
1050 ASSERT(is_tuple_arity(tuple, 2));
1051 return (ErtsLiteralArea *) (((char *) tuple_val(tuple)) -
1052 offsetof(ErtsLiteralArea, start));
1053 }
1054
1055 static void
table_updater(void * data)1056 table_updater(void* data)
1057 {
1058 HashTable* old_table;
1059 HashTable* new_table;
1060
1061 old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
1062 new_table = (HashTable *) data;
1063 ASSERT(new_table->num_to_delete == 0);
1064 erts_atomic_set_nob(&the_hash_table, (erts_aint_t)new_table);
1065 append_to_delete_queue(old_table);
1066 erts_schedule_thr_prgr_later_op(table_deleter,
1067 old_table,
1068 &old_table->thr_prog_op);
1069 release_update_permission(1);
1070 }
1071
1072 static void
table_deleter(void * data)1073 table_deleter(void* data)
1074 {
1075 HashTable* old_table = (HashTable *) data;
1076
1077 dec_table_refc(NULL, old_table);
1078 }
1079
1080 static void
dec_table_refc(Process * c_p,HashTable * old_table)1081 dec_table_refc(Process* c_p, HashTable* old_table)
1082 {
1083 erts_aint_t refc = erts_atomic_dec_read_nob(&old_table->refc);
1084
1085 if (refc == 0) {
1086 HashTable* to_delete;
1087
1088 while ((to_delete = next_to_delete()) != NULL) {
1089 delete_table(c_p, to_delete);
1090 }
1091 }
1092 }
1093
1094 static void
delete_table(Process * c_p,HashTable * table)1095 delete_table(Process* c_p, HashTable* table)
1096 {
1097 Uint idx = table->first_to_delete;
1098 Uint n = table->num_to_delete;
1099
1100 /*
1101 * There are no longer any references to this hash table.
1102 *
1103 * Any literals pointed for deletion can be queued for
1104 * deletion and the table itself can be deallocated.
1105 */
1106
1107 #ifdef DEBUG
1108 if (n == 1) {
1109 ASSERT(is_tuple_arity(table->term[idx], 2));
1110 }
1111 #endif
1112
1113 while (n > 0) {
1114 Eterm term = table->term[idx];
1115
1116 if (is_tuple_arity(term, 2)) {
1117 if (is_immed(tuple_val(term)[2])) {
1118 erts_release_literal_area(term_to_area(term));
1119 } else {
1120 erts_queue_release_literals(c_p, term_to_area(term));
1121 }
1122 }
1123 idx++, n--;
1124 }
1125 erts_free(ERTS_ALC_T_PERSISTENT_TERM, table);
1126 }
1127
1128 /*
1129 * Caller *must* yield if this function returns 0.
1130 */
1131
1132 static int
try_seize_update_permission(Process * c_p)1133 try_seize_update_permission(Process* c_p)
1134 {
1135 int success;
1136
1137 ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
1138 ASSERT(c_p != NULL);
1139
1140 erts_mtx_lock(&update_table_permission_mtx);
1141 ASSERT(updater_process != c_p);
1142 success = (updater_process == NULL);
1143 if (success) {
1144 updater_process = c_p;
1145 } else {
1146 struct update_queue_item* qitem;
1147 qitem = erts_alloc(ERTS_ALC_T_PERSISTENT_LOCK_Q, sizeof(*qitem));
1148 qitem->p = c_p;
1149 erts_proc_inc_refc(c_p);
1150 qitem->next = update_queue;
1151 update_queue = qitem;
1152 erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
1153 }
1154 erts_mtx_unlock(&update_table_permission_mtx);
1155 return success;
1156 }
1157
1158 static void
release_update_permission(int release_updater)1159 release_update_permission(int release_updater)
1160 {
1161 erts_mtx_lock(&update_table_permission_mtx);
1162 ASSERT(updater_process != NULL);
1163
1164 if (release_updater) {
1165 erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS);
1166 if (!ERTS_PROC_IS_EXITING(updater_process)) {
1167 erts_resume(updater_process, ERTS_PROC_LOCK_STATUS);
1168 }
1169 erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS);
1170 erts_proc_dec_refc(updater_process);
1171 }
1172 updater_process = NULL;
1173
1174 while (update_queue != NULL) { /* Unleash the entire herd */
1175 struct update_queue_item* qitem = update_queue;
1176 erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
1177 if (!ERTS_PROC_IS_EXITING(qitem->p)) {
1178 erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
1179 }
1180 erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
1181 update_queue = qitem->next;
1182 erts_proc_dec_refc(qitem->p);
1183 erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem);
1184 }
1185 erts_mtx_unlock(&update_table_permission_mtx);
1186 }
1187
1188 static void
suspend_updater(Process * c_p)1189 suspend_updater(Process* c_p)
1190 {
1191 #ifdef DEBUG
1192 ASSERT(c_p != NULL);
1193 erts_mtx_lock(&update_table_permission_mtx);
1194 ASSERT(updater_process == c_p);
1195 erts_mtx_unlock(&update_table_permission_mtx);
1196 #endif
1197 erts_proc_inc_refc(c_p);
1198 erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
1199 }
1200
1201 static void
append_to_delete_queue(HashTable * table)1202 append_to_delete_queue(HashTable* table)
1203 {
1204 erts_mtx_lock(&delete_queue_mtx);
1205 table->delete_next = NULL;
1206 *delete_queue_tail = table;
1207 delete_queue_tail = &table->delete_next;
1208 erts_mtx_unlock(&delete_queue_mtx);
1209 }
1210
1211 static HashTable*
next_to_delete(void)1212 next_to_delete(void)
1213 {
1214 HashTable* table;
1215
1216 erts_mtx_lock(&delete_queue_mtx);
1217 table = delete_queue_head;
1218 if (table) {
1219 if (erts_atomic_read_nob(&table->refc)) {
1220 /*
1221 * This hash table is still referenced. Hash tables
1222 * must be deleted in order, so we return a NULL
1223 * pointer.
1224 */
1225 table = NULL;
1226 } else {
1227 /*
1228 * Remove the first hash table from the queue.
1229 */
1230 delete_queue_head = table->delete_next;
1231 if (delete_queue_head == NULL) {
1232 delete_queue_tail = &delete_queue_head;
1233 }
1234 }
1235 }
1236 erts_mtx_unlock(&delete_queue_mtx);
1237 return table;
1238 }
1239
1240 /*
1241 * test/debug functionality follow...
1242 */
1243
1244 static Uint accessed_literal_areas_size;
1245 static Uint accessed_no_literal_areas;
1246 static ErtsLiteralArea **accessed_literal_areas;
1247
1248 int
erts_debug_have_accessed_literal_area(ErtsLiteralArea * lap)1249 erts_debug_have_accessed_literal_area(ErtsLiteralArea *lap)
1250 {
1251 Uint i;
1252 for (i = 0; i < accessed_no_literal_areas; i++) {
1253 if (accessed_literal_areas[i] == lap)
1254 return !0;
1255 }
1256 return 0;
1257 }
1258
1259 void
erts_debug_save_accessed_literal_area(ErtsLiteralArea * lap)1260 erts_debug_save_accessed_literal_area(ErtsLiteralArea *lap)
1261 {
1262 if (accessed_no_literal_areas == accessed_literal_areas_size) {
1263 accessed_literal_areas_size += 10;
1264 accessed_literal_areas = erts_realloc(ERTS_ALC_T_TMP,
1265 accessed_literal_areas,
1266 (sizeof(ErtsLiteralArea *)
1267 *accessed_literal_areas_size));
1268 }
1269 accessed_literal_areas[accessed_no_literal_areas++] = lap;
1270 }
1271
debug_foreach_off_heap(HashTable * tbl,void (* func)(ErlOffHeap *,void *),void * arg)1272 static void debug_foreach_off_heap(HashTable *tbl, void (*func)(ErlOffHeap *, void *), void *arg)
1273 {
1274 int i;
1275
1276 for (i = 0; i < tbl->allocated; i++) {
1277 Eterm term = tbl->term[i];
1278 if (is_tuple_arity(term, 2)) {
1279 ErtsLiteralArea *lap = term_to_area(term);
1280 ErlOffHeap oh;
1281 if (!erts_debug_have_accessed_literal_area(lap)) {
1282 ERTS_INIT_OFF_HEAP(&oh);
1283 oh.first = lap->off_heap;
1284 (*func)(&oh, arg);
1285 erts_debug_save_accessed_literal_area(lap);
1286 }
1287 }
1288 }
1289 }
1290
1291 struct debug_la_oh {
1292 void (*func)(ErlOffHeap *, void *);
1293 void *arg;
1294 };
1295
debug_handle_table(void * vfap,ErtsThrPrgrVal val,void * vtbl)1296 static void debug_handle_table(void *vfap,
1297 ErtsThrPrgrVal val,
1298 void *vtbl)
1299 {
1300 struct debug_la_oh *fap = vfap;
1301 HashTable *tbl = vtbl;
1302 debug_foreach_off_heap(tbl, fap->func, fap->arg);
1303 }
1304
1305 void
erts_debug_foreach_persistent_term_off_heap(void (* func)(ErlOffHeap *,void *),void * arg)1306 erts_debug_foreach_persistent_term_off_heap(void (*func)(ErlOffHeap *, void *), void *arg)
1307 {
1308 HashTable *tbl;
1309 struct debug_la_oh fa;
1310 accessed_no_literal_areas = 0;
1311 accessed_literal_areas_size = 10;
1312 accessed_literal_areas = erts_alloc(ERTS_ALC_T_TMP,
1313 (sizeof(ErtsLiteralArea *)
1314 * accessed_literal_areas_size));
1315
1316 tbl = (HashTable *) erts_atomic_read_nob(&the_hash_table);
1317 debug_foreach_off_heap(tbl, func, arg);
1318 erts_mtx_lock(&delete_queue_mtx);
1319 for (tbl = delete_queue_head; tbl; tbl = tbl->delete_next)
1320 debug_foreach_off_heap(tbl, func, arg);
1321 erts_mtx_unlock(&delete_queue_mtx);
1322 fa.func = func;
1323 fa.arg = arg;
1324 erts_debug_later_op_foreach(table_updater,
1325 debug_handle_table,
1326 (void *) &fa);
1327 erts_debug_later_op_foreach(table_deleter,
1328 debug_handle_table,
1329 (void *) &fa);
1330 erts_debug_foreach_release_literal_area_off_heap(func, arg);
1331
1332 erts_free(ERTS_ALC_T_TMP, accessed_literal_areas);
1333 accessed_no_literal_areas = 0;
1334 accessed_literal_areas_size = 0;
1335 accessed_literal_areas = NULL;
1336 }
1337
1338