1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 2008-2017. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24
25 #ifdef ERTS_ENABLE_LOCK_COUNT
26
27 #include "sys.h"
28
29 #include "global.h"
30
31 #include "erl_lock_count.h"
32 #include "erl_thr_progress.h"
33
34 #include "erl_node_tables.h"
35 #include "erl_alloc_util.h"
36 #include "erl_check_io.h"
37 #include "erl_poll.h"
38 #include "erl_db.h"
39
40 #define LCNT_MAX_CARRIER_ENTRIES 255
41
42 /* - Locals that are shared with the header implementation - */
43
44 #ifdef DEBUG
45 int lcnt_initialization_completed__;
46 #endif
47
48 erts_lock_flags_t lcnt_category_mask__;
49 ethr_tsd_key lcnt_thr_data_key__;
50
51 const int lcnt_log2_tab64__[64] = {
52 63, 0, 58, 1, 59, 47, 53, 2,
53 60, 39, 48, 27, 54, 33, 42, 3,
54 61, 51, 37, 40, 49, 18, 28, 20,
55 55, 30, 34, 11, 43, 14, 22, 4,
56 62, 57, 46, 52, 38, 26, 32, 41,
57 50, 36, 17, 19, 29, 10, 13, 21,
58 56, 45, 25, 31, 35, 16, 9, 12,
59 44, 24, 15, 8, 23, 7, 6, 5};
60
61 /* - Local variables - */
62
63 typedef struct lcnt_static_lock_ref_ {
64 erts_lcnt_ref_t *reference;
65
66 erts_lock_flags_t flags;
67 const char *name;
68 Eterm id;
69
70 struct lcnt_static_lock_ref_ *next;
71 } lcnt_static_lock_ref_t;
72
73 static ethr_atomic_t lcnt_static_lock_registry;
74
75 static erts_lcnt_lock_info_list_t lcnt_current_lock_list;
76 static erts_lcnt_lock_info_list_t lcnt_deleted_lock_list;
77
78 static erts_lcnt_time_t lcnt_timer_start;
79
80 static int lcnt_preserve_info;
81
82 /* local functions */
83
lcnt_clear_stats(erts_lcnt_lock_info_t * info)84 static void lcnt_clear_stats(erts_lcnt_lock_info_t *info) {
85 size_t i;
86
87 for(i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
88 erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
89
90 sys_memzero(&stats->wait_time_histogram, sizeof(stats->wait_time_histogram));
91
92 stats->total_time_waited.s = 0;
93 stats->total_time_waited.ns = 0;
94
95 stats->times_waited = 0;
96
97 stats->file = NULL;
98 stats->line = 0;
99
100 ethr_atomic_set(&stats->attempts, 0);
101 ethr_atomic_set(&stats->collisions, 0);
102 }
103
104 info->location_count = 1;
105 }
106
lcnt_thread_data_alloc(void)107 static lcnt_thread_data_t__ *lcnt_thread_data_alloc(void) {
108 lcnt_thread_data_t__ *eltd =
109 (lcnt_thread_data_t__*)malloc(sizeof(lcnt_thread_data_t__));
110
111 if(!eltd) {
112 ERTS_INTERNAL_ERROR("Failed to allocate lcnt thread data.");
113 }
114
115 eltd->timer_set = 0;
116 eltd->lock_in_conflict = 0;
117
118 return eltd;
119 }
120
121 /* - List operations -
122 *
123 * Info entries are kept in a doubly linked list where each entry is locked
124 * with its neighbors rather than a global lock. Deletion is rather quick, but
125 * insertion is still serial since the head becomes a de facto global lock.
126 *
127 * We rely on ad-hoc spinlocks to avoid "recursing" into this module. */
128
129 #define LCNT_SPINLOCK_YIELD_ITERATIONS 50
130
131 #define LCNT_SPINLOCK_HELPER_INIT \
132 Uint failed_spin_count = 0;
133
134 #define LCNT_SPINLOCK_HELPER_YIELD \
135 do { \
136 failed_spin_count++; \
137 if(!(failed_spin_count % LCNT_SPINLOCK_YIELD_ITERATIONS)) { \
138 erts_thr_yield(); \
139 } else { \
140 ERTS_SPIN_BODY; \
141 } \
142 } while(0)
143
lcnt_unlock_list_entry(erts_lcnt_lock_info_t * info)144 static void lcnt_unlock_list_entry(erts_lcnt_lock_info_t *info) {
145 ethr_atomic32_set_relb(&info->lock, 0);
146 }
147
lcnt_try_lock_list_entry(erts_lcnt_lock_info_t * info)148 static int lcnt_try_lock_list_entry(erts_lcnt_lock_info_t *info) {
149 return ethr_atomic32_cmpxchg_acqb(&info->lock, 1, 0) == 0;
150 }
151
lcnt_lock_list_entry(erts_lcnt_lock_info_t * info)152 static void lcnt_lock_list_entry(erts_lcnt_lock_info_t *info) {
153 LCNT_SPINLOCK_HELPER_INIT;
154
155 while(!lcnt_try_lock_list_entry(info)) {
156 LCNT_SPINLOCK_HELPER_YIELD;
157 }
158 }
159
lcnt_lock_list_entry_with_neighbors(erts_lcnt_lock_info_t * info)160 static void lcnt_lock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
161 LCNT_SPINLOCK_HELPER_INIT;
162
163 for(;;) {
164 if(!lcnt_try_lock_list_entry(info))
165 goto retry_after_entry_failed;
166 if(!lcnt_try_lock_list_entry(info->next))
167 goto retry_after_next_failed;
168 if(!lcnt_try_lock_list_entry(info->prev))
169 goto retry_after_prev_failed;
170
171 return;
172
173 retry_after_prev_failed:
174 lcnt_unlock_list_entry(info->next);
175 retry_after_next_failed:
176 lcnt_unlock_list_entry(info);
177 retry_after_entry_failed:
178 LCNT_SPINLOCK_HELPER_YIELD;
179 }
180 }
181
lcnt_unlock_list_entry_with_neighbors(erts_lcnt_lock_info_t * info)182 static void lcnt_unlock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
183 lcnt_unlock_list_entry(info->prev);
184 lcnt_unlock_list_entry(info->next);
185 lcnt_unlock_list_entry(info);
186 }
187
lcnt_insert_list_entry(erts_lcnt_lock_info_list_t * list,erts_lcnt_lock_info_t * info)188 static void lcnt_insert_list_entry(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t *info) {
189 erts_lcnt_lock_info_t *next, *prev;
190
191 prev = &list->head;
192
193 lcnt_lock_list_entry(prev);
194
195 next = prev->next;
196
197 lcnt_lock_list_entry(next);
198
199 info->next = next;
200 info->prev = prev;
201
202 prev->next = info;
203 next->prev = info;
204
205 lcnt_unlock_list_entry(next);
206 lcnt_unlock_list_entry(prev);
207 }
208
lcnt_insert_list_carrier(erts_lcnt_lock_info_list_t * list,erts_lcnt_lock_info_carrier_t * carrier)209 static void lcnt_insert_list_carrier(erts_lcnt_lock_info_list_t *list,
210 erts_lcnt_lock_info_carrier_t *carrier) {
211 erts_lcnt_lock_info_t *next, *prev;
212 size_t i;
213
214 for(i = 0; i < carrier->entry_count; i++) {
215 erts_lcnt_lock_info_t *info = &carrier->entries[i];
216
217 info->prev = &carrier->entries[i - 1];
218 info->next = &carrier->entries[i + 1];
219 }
220
221 prev = &list->head;
222
223 lcnt_lock_list_entry(prev);
224
225 next = prev->next;
226
227 lcnt_lock_list_entry(next);
228
229 next->prev = &carrier->entries[carrier->entry_count - 1];
230 carrier->entries[carrier->entry_count - 1].next = next;
231
232 prev->next = &carrier->entries[0];
233 carrier->entries[0].prev = prev;
234
235 lcnt_unlock_list_entry(next);
236 lcnt_unlock_list_entry(prev);
237 }
238
lcnt_init_list(erts_lcnt_lock_info_list_t * list)239 static void lcnt_init_list(erts_lcnt_lock_info_list_t *list) {
240 /* Ensure that ref_count operations explode when touching the sentinels in
241 * DEBUG mode. */
242 ethr_atomic_init(&(list->head.ref_count), -1);
243 ethr_atomic_init(&(list->tail.ref_count), -1);
244
245 ethr_atomic32_init(&(list->head.lock), 0);
246 (list->head).next = &list->tail;
247 (list->head).prev = &list->tail;
248
249 ethr_atomic32_init(&(list->tail.lock), 0);
250 (list->tail).next = &list->head;
251 (list->tail).prev = &list->head;
252 }
253
254 /* - Carrier operations - */
255
lcnt_thr_progress_unmanaged_delay__(void)256 int lcnt_thr_progress_unmanaged_delay__(void) {
257 return erts_thr_progress_unmanaged_delay();
258 }
259
lcnt_thr_progress_unmanaged_continue__(int handle)260 void lcnt_thr_progress_unmanaged_continue__(int handle) {
261 return erts_thr_progress_unmanaged_continue(handle);
262 }
263
lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t * carrier)264 void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
265 ASSERT(ethr_atomic_read(&carrier->ref_count) == 0);
266 erts_free(ERTS_ALC_T_LCNT_CARRIER, (void*)carrier);
267 }
268
lcnt_thr_prg_cleanup_carrier(void * data)269 static void lcnt_thr_prg_cleanup_carrier(void *data) {
270 erts_lcnt_lock_info_carrier_t *carrier = data;
271 size_t entry_count, i;
272
273 /* carrier->entry_count will be replaced with garbage if it's deallocated
274 * on the final iteration, so we'll tuck it away to get a clean exit. */
275 entry_count = carrier->entry_count;
276
277 for(i = 0; i < entry_count; i++) {
278 ASSERT(ethr_atomic_read(&carrier->ref_count) >= (entry_count - i));
279
280 erts_lcnt_release_lock_info(&carrier->entries[i]);
281 }
282 }
283
lcnt_schedule_carrier_cleanup(void * data)284 static void lcnt_schedule_carrier_cleanup(void *data) {
285 ErtsSchedulerData *esdp = erts_get_scheduler_data();
286
287 /* We can't issue cleanup jobs on anything other than normal schedulers, so
288 * we move to the first scheduler if required. */
289
290 if(!esdp || esdp->type != ERTS_SCHED_NORMAL) {
291 erts_schedule_misc_aux_work(1, &lcnt_schedule_carrier_cleanup, data);
292 } else {
293 erts_lcnt_lock_info_carrier_t *carrier = data;
294 size_t carrier_size;
295
296 carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
297 sizeof(erts_lcnt_lock_info_t) * carrier->entry_count;
298
299 erts_schedule_thr_prgr_later_cleanup_op(&lcnt_thr_prg_cleanup_carrier,
300 data, (ErtsThrPrgrLaterOp*)&carrier->release_entries, carrier_size);
301 }
302 }
303
lcnt_info_deallocate(erts_lcnt_lock_info_t * info)304 static void lcnt_info_deallocate(erts_lcnt_lock_info_t *info) {
305 lcnt_release_carrier__(info->carrier);
306 }
307
lcnt_info_dispose(erts_lcnt_lock_info_t * info)308 static void lcnt_info_dispose(erts_lcnt_lock_info_t *info) {
309 ASSERT(ethr_atomic_read(&info->ref_count) == 0);
310
311 if(lcnt_preserve_info) {
312 ethr_atomic_set(&info->ref_count, 1);
313
314 /* Move straight to deallocation the next time around. */
315 info->dispose = &lcnt_info_deallocate;
316
317 lcnt_insert_list_entry(&lcnt_deleted_lock_list, info);
318 } else {
319 lcnt_info_deallocate(info);
320 }
321 }
322
lcnt_lock_info_init_helper(erts_lcnt_lock_info_t * info)323 static void lcnt_lock_info_init_helper(erts_lcnt_lock_info_t *info) {
324 ethr_atomic_init(&info->ref_count, 1);
325 ethr_atomic32_init(&info->lock, 0);
326
327 ethr_atomic_init(&info->r_state, 0);
328 ethr_atomic_init(&info->w_state, 0);
329
330 info->dispose = &lcnt_info_dispose;
331
332 lcnt_clear_stats(info);
333 }
334
erts_lcnt_create_lock_info_carrier(int entry_count)335 erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int entry_count) {
336 erts_lcnt_lock_info_carrier_t *result;
337 size_t carrier_size, i;
338
339 ASSERT(entry_count > 0 && entry_count <= LCNT_MAX_CARRIER_ENTRIES);
340 ASSERT(lcnt_initialization_completed__);
341
342 carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
343 sizeof(erts_lcnt_lock_info_t) * entry_count;
344
345 result = (erts_lcnt_lock_info_carrier_t*)erts_alloc(ERTS_ALC_T_LCNT_CARRIER, carrier_size);
346 result->entry_count = entry_count;
347
348 ethr_atomic_init(&result->ref_count, entry_count);
349
350 for(i = 0; i < entry_count; i++) {
351 erts_lcnt_lock_info_t *info = &result->entries[i];
352
353 lcnt_lock_info_init_helper(info);
354
355 info->carrier = result;
356 }
357
358 return result;
359 }
360
erts_lcnt_install(erts_lcnt_ref_t * ref,erts_lcnt_lock_info_carrier_t * carrier)361 void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier) {
362 ethr_sint_t swapped_carrier;
363
364 #ifdef DEBUG
365 int i;
366
367 /* Verify that all locks share the same categories/static property; all
368 * other flags are fair game. */
369 for(i = 1; i < carrier->entry_count; i++) {
370 const erts_lock_flags_t SIGNIFICANT_DIFF_MASK =
371 ERTS_LOCK_FLAGS_MASK_CATEGORY | ERTS_LOCK_FLAGS_PROPERTY_STATIC;
372
373 erts_lcnt_lock_info_t *previous, *current;
374
375 previous = &carrier->entries[i - 1];
376 current = &carrier->entries[i];
377
378 ASSERT(!((previous->flags ^ current->flags) & SIGNIFICANT_DIFF_MASK));
379 }
380 #endif
381
382 swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)carrier, (ethr_sint_t)NULL);
383
384 if(swapped_carrier != (ethr_sint_t)NULL) {
385 #ifdef DEBUG
386 ASSERT(ethr_atomic_read(&carrier->ref_count) == carrier->entry_count);
387 ethr_atomic_set(&carrier->ref_count, 0);
388 #endif
389
390 lcnt_deallocate_carrier__(carrier);
391 } else {
392 lcnt_insert_list_carrier(&lcnt_current_lock_list, carrier);
393 }
394 }
395
erts_lcnt_uninstall(erts_lcnt_ref_t * ref)396 void erts_lcnt_uninstall(erts_lcnt_ref_t *ref) {
397 ethr_sint_t previous_carrier, swapped_carrier;
398
399 previous_carrier = ethr_atomic_read(ref);
400 swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)NULL, previous_carrier);
401
402 if(previous_carrier && previous_carrier == swapped_carrier) {
403 lcnt_schedule_carrier_cleanup((void*)previous_carrier);
404 }
405 }
406
407 /* - Static lock registry -
408 *
409 * Since static locks can be trusted to never disappear, we can track them
410 * pretty cheaply and won't need to bother writing an "erts_lcnt_update_xx"
411 * variant. */
412
lcnt_init_static_lock_registry(void)413 static void lcnt_init_static_lock_registry(void) {
414 ethr_atomic_init(&lcnt_static_lock_registry, (ethr_sint_t)NULL);
415 }
416
lcnt_update_static_locks(void)417 static void lcnt_update_static_locks(void) {
418 lcnt_static_lock_ref_t *iterator =
419 (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
420
421 while(iterator != NULL) {
422 if(!erts_lcnt_check_enabled(iterator->flags)) {
423 erts_lcnt_uninstall(iterator->reference);
424 } else if(!erts_lcnt_check_ref_installed(iterator->reference)) {
425 erts_lcnt_lock_info_carrier_t *carrier = erts_lcnt_create_lock_info_carrier(1);
426
427 erts_lcnt_init_lock_info_idx(carrier, 0, iterator->name, iterator->id, iterator->flags);
428
429 erts_lcnt_install(iterator->reference, carrier);
430 }
431
432 iterator = iterator->next;
433 }
434 }
435
lcnt_register_static_lock__(erts_lcnt_ref_t * reference,const char * name,Eterm id,erts_lock_flags_t flags)436 void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
437 erts_lock_flags_t flags) {
438 lcnt_static_lock_ref_t *lock = malloc(sizeof(lcnt_static_lock_ref_t));
439 int retry_insertion;
440
441 ASSERT(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC);
442
443 lock->reference = reference;
444 lock->flags = flags;
445 lock->name = name;
446 lock->id = id;
447
448 do {
449 ethr_sint_t swapped_head;
450
451 lock->next = (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
452
453 swapped_head = ethr_atomic_cmpxchg_acqb(
454 &lcnt_static_lock_registry,
455 (ethr_sint_t)lock,
456 (ethr_sint_t)lock->next);
457
458 retry_insertion = (swapped_head != (ethr_sint_t)lock->next);
459 } while(retry_insertion);
460 }
461
462 /* - Initialization - */
463
erts_lcnt_pre_thr_init()464 void erts_lcnt_pre_thr_init() {
465 /* Ensure that the dependency hack mentioned in the header doesn't
466 * explode at runtime. */
467 ERTS_CT_ASSERT(sizeof(LcntThrPrgrLaterOp) >= sizeof(ErtsThrPrgrLaterOp));
468 ERTS_CT_ASSERT(ERTS_THR_PRGR_DHANDLE_MANAGED ==
469 (ErtsThrPrgrDelayHandle)LCNT_THR_PRGR_DHANDLE_MANAGED);
470
471 lcnt_init_list(&lcnt_current_lock_list);
472 lcnt_init_list(&lcnt_deleted_lock_list);
473
474 lcnt_init_static_lock_registry();
475 }
476
erts_lcnt_post_thr_init()477 void erts_lcnt_post_thr_init() {
478 /* ASSUMPTION: this is safe since it runs prior to the creation of other
479 * threads (Directly after ethread init). */
480
481 ethr_tsd_key_create(&lcnt_thr_data_key__, "lcnt_data");
482
483 erts_lcnt_thread_setup();
484 }
485
erts_lcnt_late_init()486 void erts_lcnt_late_init() {
487 /* Set start timer and zero all statistics */
488 erts_lcnt_clear_counters();
489 erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
490
491 #ifdef DEBUG
492 /* It's safe to use erts_alloc and thread progress past this point. */
493 lcnt_initialization_completed__ = 1;
494 #endif
495 }
496
erts_lcnt_post_startup(void)497 void erts_lcnt_post_startup(void) {
498 /* Default to capturing everything to match the behavior of the old lock
499 * counter build. */
500 erts_lcnt_set_category_mask(ERTS_LOCK_FLAGS_MASK_CATEGORY);
501 }
502
erts_lcnt_thread_setup()503 void erts_lcnt_thread_setup() {
504 lcnt_thread_data_t__ *eltd = lcnt_thread_data_alloc();
505
506 ASSERT(eltd);
507
508 ethr_tsd_set(lcnt_thr_data_key__, eltd);
509 }
510
erts_lcnt_thread_exit_handler()511 void erts_lcnt_thread_exit_handler() {
512 lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
513
514 if (eltd) {
515 free(eltd);
516 }
517 }
518
519 /* - BIF interface - */
520
erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t * info)521 void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info) {
522 #ifdef DEBUG
523 ASSERT(ethr_atomic_inc_read_acqb(&info->ref_count) >= 2);
524 #else
525 ethr_atomic_inc_acqb(&info->ref_count);
526 #endif
527 }
528
erts_lcnt_release_lock_info(erts_lcnt_lock_info_t * info)529 void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info) {
530 ethr_sint_t count;
531
532 /* We need to acquire the lock before decrementing ref_count to avoid
533 * racing with list iteration; there's a short window between reading the
534 * reference to info and increasing its ref_count. */
535 lcnt_lock_list_entry_with_neighbors(info);
536
537 count = ethr_atomic_dec_read(&info->ref_count);
538
539 ASSERT(count >= 0);
540
541 if(count > 0) {
542 lcnt_unlock_list_entry_with_neighbors(info);
543 } else {
544 (info->next)->prev = info->prev;
545 (info->prev)->next = info->next;
546
547 lcnt_unlock_list_entry_with_neighbors(info);
548
549 info->dispose(info);
550 }
551 }
552
erts_lcnt_get_category_mask()553 erts_lock_flags_t erts_lcnt_get_category_mask() {
554 return lcnt_category_mask__;
555 }
556
erts_lcnt_set_category_mask(erts_lock_flags_t mask)557 void erts_lcnt_set_category_mask(erts_lock_flags_t mask) {
558 erts_lock_flags_t changed_categories;
559
560 ASSERT(!(mask & ~ERTS_LOCK_FLAGS_MASK_CATEGORY));
561 ASSERT(lcnt_initialization_completed__);
562
563 changed_categories = (lcnt_category_mask__ ^ mask);
564 lcnt_category_mask__ = mask;
565
566 if(changed_categories) {
567 lcnt_update_static_locks();
568 }
569
570 if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION) {
571 erts_lcnt_update_distribution_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
572 }
573
574 if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
575 erts_lcnt_update_allocator_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
576 }
577
578 if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_PROCESS) {
579 erts_lcnt_update_process_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
580 }
581
582 if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_IO) {
583 erts_lcnt_update_cio_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
584 erts_lcnt_update_driver_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
585 erts_lcnt_update_port_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
586 }
587
588 if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DB) {
589 erts_lcnt_update_db_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DB);
590 }
591 }
592
erts_lcnt_set_preserve_info(int enable)593 void erts_lcnt_set_preserve_info(int enable) {
594 lcnt_preserve_info = enable;
595 }
596
erts_lcnt_get_preserve_info()597 int erts_lcnt_get_preserve_info() {
598 return lcnt_preserve_info;
599 }
600
erts_lcnt_clear_counters(void)601 void erts_lcnt_clear_counters(void) {
602 erts_lcnt_lock_info_t *iterator;
603
604 lcnt_time__(&lcnt_timer_start);
605
606 iterator = NULL;
607 while(erts_lcnt_iterate_list(&lcnt_current_lock_list, &iterator)) {
608 lcnt_clear_stats(iterator);
609 }
610
611 iterator = NULL;
612 while(erts_lcnt_iterate_list(&lcnt_deleted_lock_list, &iterator)) {
613 erts_lcnt_release_lock_info(iterator);
614 }
615 }
616
erts_lcnt_get_data(void)617 erts_lcnt_data_t erts_lcnt_get_data(void) {
618 erts_lcnt_time_t timer_stop;
619 erts_lcnt_data_t result;
620
621 lcnt_time__(&timer_stop);
622
623 result.timer_start = lcnt_timer_start;
624
625 result.current_locks = &lcnt_current_lock_list;
626 result.deleted_locks = &lcnt_deleted_lock_list;
627
628 lcnt_time_diff__(&result.duration, &timer_stop, &result.timer_start);
629
630 return result;
631 }
632
erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t * list,erts_lcnt_lock_info_t ** iterator)633 int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator) {
634 erts_lcnt_lock_info_t *current, *next;
635
636 current = *iterator ? *iterator : &list->head;
637
638 ASSERT(current != &list->tail);
639
640 lcnt_lock_list_entry(current);
641
642 next = current->next;
643
644 if(next != &list->tail) {
645 erts_lcnt_retain_lock_info(next);
646 }
647
648 lcnt_unlock_list_entry(current);
649
650 if(current != &list->head) {
651 erts_lcnt_release_lock_info(current);
652 }
653
654 *iterator = next;
655
656 return next != &list->tail;
657 }
658
659 #endif /* #ifdef ERTS_ENABLE_LOCK_COUNT */
660