1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 2007-2018. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21
22 /*
23 * Description: Implementation of Erlang process locks.
24 *
25 * Author: Rickard Green
26 */
27
28 /*
29 * A short explanation of the process lock implementation:
30 * Each process has a lock bitfield and a number of lock wait
31 * queues.
32 * The bit field contains of a number of lock flags (L1, L2, ...)
33 * and a number of wait flags (W1, W2, ...). Each lock flag has a
34 * corresponding wait flag. The bit field isn't guarranteed to be
35 * larger than 32-bits which sets a maximum of 16 different locks
36 * per process. Currently, only 4 locks per process are used. The
37 * bit field is operated on by use of atomic operations (custom
38 * made bitwise atomic operations). When a lock is locked the
39 * corresponding lock bit is set. When a thread is waiting on a
40 * lock the wait flag for the lock is set.
41 * The process table is protected by pix (process index) locks
42 * which is spinlocks that protects a number of process indices in
43 * the process table. The pix locks also protects the lock queues
44 * and modifications of wait flags.
45 * When acquiring a process lock we first try to set the lock
46 * flag. If we are able to set the lock flag and the wait flag
47 * isn't set we are done. If the lock flag was already set we
48 * have to acquire the pix lock, set the wait flag, and put
49 * ourselves in the wait queue.
50 * Process locks will always be acquired in fifo order.
51 * When releasing a process lock we first unset all lock flags
52 * whose corresponding wait flag is clear (which will succeed).
53 * If wait flags were set for the locks being released, we acquire
54 * the pix lock, and transfer the lock to the first thread
55 * in the wait queue.
56 * Note that wait flags may be read without the pix lock, but
57 * it is important that wait flags only are modified when the pix
58 * lock is held.
59 * This implementation assumes that erts_atomic_or_retold()
60 * provides necessary memorybarriers for a lock operation, and that
61 * erts_atomic_and_retold() provides necessary memorybarriers
62 * for an unlock operation.
63 */
64
65 #ifdef HAVE_CONFIG_H
66 # include "config.h"
67 #endif
68
69 #include "erl_process.h"
70 #include "erl_thr_progress.h"
71
72
73 #if ERTS_PROC_LOCK_OWN_IMPL
74
75 #define ERTS_PROC_LOCK_SPIN_COUNT_MAX 2000
76 #define ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC 32
77 #define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000
78 #define ERTS_PROC_LOCK_AUX_SPIN_COUNT 50
79
80 #define ERTS_PROC_LOCK_SPIN_UNTIL_YIELD 25
81
82 #ifdef ERTS_PROC_LOCK_DEBUG
83 #define ERTS_PROC_LOCK_HARD_DEBUG
84 #endif
85
86 #ifdef ERTS_PROC_LOCK_HARD_DEBUG
87 static void check_queue(erts_proc_lock_t *lck);
88 #endif
89
90 #if SIZEOF_INT < 4
91 #error "The size of the 'uflgs' field of the erts_tse_t type is too small"
92 #endif
93
94 static int proc_lock_spin_count;
95 static int aux_thr_proc_lock_spin_count;
96
97 static void cleanup_tse(void);
98
99 #endif /* ERTS_PROC_LOCK_OWN_IMPL */
100
101 #ifdef ERTS_ENABLE_LOCK_CHECK
102 static struct {
103 Sint16 proc_lock_main;
104 Sint16 proc_lock_msgq;
105 Sint16 proc_lock_btm;
106 Sint16 proc_lock_status;
107 Sint16 proc_lock_trace;
108 } lc_id;
109 #endif
110
111 erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
112
113 void
erts_init_proc_lock(int cpus)114 erts_init_proc_lock(int cpus)
115 {
116 int i;
117 for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
118 erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i),
119 ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
120 }
121 #if ERTS_PROC_LOCK_OWN_IMPL
122 erts_thr_install_exit_handler(cleanup_tse);
123 if (cpus > 1) {
124 proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
125 proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC
126 * ((int) erts_no_schedulers));
127 aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT;
128 }
129 else if (cpus == 1) {
130 proc_lock_spin_count = 0;
131 aux_thr_proc_lock_spin_count = 0;
132 }
133 else { /* No of cpus unknown. Assume multi proc, but be conservative. */
134 proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE/2;
135 aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT/2;
136 }
137 if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
138 proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
139 #endif
140 #ifdef ERTS_ENABLE_LOCK_CHECK
141 lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
142 lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
143 lc_id.proc_lock_btm = erts_lc_get_lock_order_id("proc_btm");
144 lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
145 lc_id.proc_lock_trace = erts_lc_get_lock_order_id("proc_trace");
146 #endif
147 }
148
149 #if ERTS_PROC_LOCK_OWN_IMPL
150
151 #ifdef ERTS_ENABLE_LOCK_CHECK
152 #define CHECK_UNUSED_TSE(W) ASSERT((W)->uflgs == 0)
153 #else
154 #define CHECK_UNUSED_TSE(W)
155 #endif
156
157 static ERTS_INLINE erts_tse_t *
tse_fetch(erts_pix_lock_t * pix_lock)158 tse_fetch(erts_pix_lock_t *pix_lock)
159 {
160 erts_tse_t *tse = erts_tse_fetch();
161 tse->uflgs = 0;
162 return tse;
163 }
164
165 static ERTS_INLINE void
tse_return(erts_tse_t * tse)166 tse_return(erts_tse_t *tse)
167 {
168 CHECK_UNUSED_TSE(tse);
169 erts_tse_return(tse);
170 }
171
172 static void
cleanup_tse(void)173 cleanup_tse(void)
174 {
175 erts_tse_t *tse = erts_tse_fetch();
176 if (tse)
177 erts_tse_return(tse);
178 }
179
180
181 /*
182 * Waiters are queued in a circular double linked list;
183 * where lck->queue[lock_ix] is the first waiter in queue, and
184 * lck->queue[lock_ix]->prev is the last waiter in queue.
185 */
186
187 static ERTS_INLINE void
enqueue_waiter(erts_proc_lock_t * lck,int ix,erts_tse_t * wtr)188 enqueue_waiter(erts_proc_lock_t *lck, int ix, erts_tse_t *wtr)
189 {
190 if (!lck->queue[ix]) {
191 lck->queue[ix] = wtr;
192 wtr->next = wtr;
193 wtr->prev = wtr;
194 }
195 else {
196 ASSERT(lck->queue[ix]->next && lck->queue[ix]->prev);
197 wtr->next = lck->queue[ix];
198 wtr->prev = lck->queue[ix]->prev;
199 wtr->prev->next = wtr;
200 lck->queue[ix]->prev = wtr;
201 }
202 }
203
204 static erts_tse_t *
dequeue_waiter(erts_proc_lock_t * lck,int ix)205 dequeue_waiter(erts_proc_lock_t *lck, int ix)
206 {
207 erts_tse_t *wtr = lck->queue[ix];
208 ASSERT(lck->queue[ix]);
209 if (wtr->next == wtr) {
210 ASSERT(lck->queue[ix]->prev == wtr);
211 lck->queue[ix] = NULL;
212 }
213 else {
214 ASSERT(wtr->next != wtr);
215 ASSERT(wtr->prev != wtr);
216 wtr->next->prev = wtr->prev;
217 wtr->prev->next = wtr->next;
218 lck->queue[ix] = wtr->next;
219 }
220 return wtr;
221 }
222
223 /*
224 * Tries to aquire as many locks as possible in lock order,
225 * and sets the wait flag on the first lock not possible to
226 * aquire.
227 *
228 * Note: We need the pix lock during this operation. Wait
229 * flags are only allowed to be manipulated under pix
230 * lock.
231 */
232 static ERTS_INLINE void
try_aquire(erts_proc_lock_t * lck,erts_tse_t * wtr)233 try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
234 {
235 ErtsProcLocks got_locks = (ErtsProcLocks) 0;
236 ErtsProcLocks locks = wtr->uflgs;
237 int lock_no;
238
239 ASSERT(got_locks != locks);
240
241 for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
242 ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no;
243 if (locks & lock) {
244 ErtsProcLocks wflg, old_lflgs;
245 if (lck->queue[lock_no]) {
246 /* Others already waiting */
247 enqueue:
248 ASSERT(ERTS_PROC_LOCK_FLGS_READ_(lck)
249 & (lock << ERTS_PROC_LOCK_WAITER_SHIFT));
250 enqueue_waiter(lck, lock_no, wtr);
251 break;
252 }
253 wflg = lock << ERTS_PROC_LOCK_WAITER_SHIFT;
254 old_lflgs = ERTS_PROC_LOCK_FLGS_BOR_ACQB_(lck, wflg | lock);
255 if (old_lflgs & lock) {
256 /* Didn't get the lock */
257 goto enqueue;
258 }
259 else {
260 /* Got the lock */
261 got_locks |= lock;
262 ASSERT(!(old_lflgs & wflg));
263 /* No one else can be waiting for the lock; remove wait flag */
264 (void) ERTS_PROC_LOCK_FLGS_BAND_(lck, ~wflg);
265 if (got_locks == locks)
266 break;
267 }
268 }
269 }
270
271 wtr->uflgs &= ~got_locks;
272 }
273
274 /*
275 * Transfer 'trnsfr_lcks' held by this executing thread to other
276 * threads waiting for the locks. When a lock has been transferred
277 * we also have to try to aquire as many lock as possible for the
278 * other thread.
279 */
280 static int
transfer_locks(Process * p,ErtsProcLocks trnsfr_lcks,erts_pix_lock_t * pix_lock,int unlock)281 transfer_locks(Process *p,
282 ErtsProcLocks trnsfr_lcks,
283 erts_pix_lock_t *pix_lock,
284 int unlock)
285 {
286 int transferred = 0;
287 erts_tse_t *wake = NULL;
288 erts_tse_t *wtr;
289 ErtsProcLocks unset_waiter = 0;
290 ErtsProcLocks tlocks = trnsfr_lcks;
291 int lock_no;
292
293 ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
294
295 #ifdef ERTS_PROC_LOCK_HARD_DEBUG
296 check_queue(&p->lock);
297 #endif
298
299 for (lock_no = 0; tlocks && lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
300 ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no;
301 if (tlocks & lock) {
302 /* Transfer lock */
303 #ifdef ERTS_ENABLE_LOCK_CHECK
304 tlocks &= ~lock;
305 #endif
306 ASSERT(ERTS_PROC_LOCK_FLGS_READ_(&p->lock)
307 & (lock << ERTS_PROC_LOCK_WAITER_SHIFT));
308 transferred++;
309 wtr = dequeue_waiter(&p->lock, lock_no);
310 ASSERT(wtr != NULL);
311 if (!p->lock.queue[lock_no])
312 unset_waiter |= lock;
313 ASSERT(wtr->uflgs & lock);
314 wtr->uflgs &= ~lock;
315 if (wtr->uflgs)
316 try_aquire(&p->lock, wtr);
317 if (!wtr->uflgs) {
318 /*
319 * The other thread got all locks it needs;
320 * need to wake it up.
321 */
322 wtr->next = wake;
323 wake = wtr;
324 }
325 }
326
327 }
328
329 if (unset_waiter) {
330 unset_waiter <<= ERTS_PROC_LOCK_WAITER_SHIFT;
331 (void) ERTS_PROC_LOCK_FLGS_BAND_(&p->lock, ~unset_waiter);
332 }
333
334 #ifdef ERTS_PROC_LOCK_HARD_DEBUG
335 check_queue(&p->lock);
336 #endif
337
338 ASSERT(tlocks == 0); /* We should have transferred all of them */
339
340 if (!wake) {
341 if (unlock)
342 erts_pix_unlock(pix_lock);
343 }
344 else {
345 erts_pix_unlock(pix_lock);
346
347 do {
348 erts_tse_t *tmp = wake;
349 wake = wake->next;
350 erts_atomic32_set_nob(&tmp->uaflgs, 0);
351 erts_tse_set(tmp);
352 } while (wake);
353
354 if (!unlock)
355 erts_pix_lock(pix_lock);
356 }
357 return transferred;
358 }
359
360 /*
361 * Determine which locks in 'need_locks' are not currently locked in
362 * 'in_use', but do not return any locks "above" some lock we need,
363 * so we do not attempt to grab locks out of order.
364 *
365 * For example, if we want to lock 10111, and 00100 was already locked, this
366 * would return 00011, indicating we should not try for 10000 yet because
367 * that would be a lock-ordering violation.
368 */
369 static ERTS_INLINE ErtsProcLocks
in_order_locks(ErtsProcLocks in_use,ErtsProcLocks need_locks)370 in_order_locks(ErtsProcLocks in_use, ErtsProcLocks need_locks)
371 {
372 /* All locks we want that are already locked by someone else. */
373 ErtsProcLocks busy = in_use & need_locks;
374
375 /* Just the lowest numbered lock we want that's in use; 0 if none. */
376 ErtsProcLocks lowest_busy = busy & -busy;
377
378 /* All locks below the lowest one we want that's in use already. */
379 return need_locks & (lowest_busy - 1);
380 }
381
382 /*
383 * Try to grab locks one at a time in lock order and wait on the lowest
384 * lock we fail to grab, if any.
385 *
386 * If successful, this returns 0 and all locks in 'need_locks' are held.
387 *
388 * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL.
389 * On exit it is not held.
390 */
391 static void
wait_for_locks(Process * p,erts_pix_lock_t * pixlck,ErtsProcLocks locks,ErtsProcLocks need_locks,ErtsProcLocks olflgs)392 wait_for_locks(Process *p,
393 erts_pix_lock_t *pixlck,
394 ErtsProcLocks locks,
395 ErtsProcLocks need_locks,
396 ErtsProcLocks olflgs)
397 {
398 erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
399 erts_tse_t *wtr;
400
401 /* Acquire a waiter object on which this thread can wait. */
402 wtr = tse_fetch(pix_lock);
403
404 /* Record which locks this waiter needs. */
405 wtr->uflgs = need_locks;
406
407 ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
408
409 #if ERTS_PROC_LOCK_ATOMIC_IMPL
410 erts_pix_lock(pix_lock);
411 #endif
412
413 ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
414
415 #ifdef ERTS_PROC_LOCK_HARD_DEBUG
416 check_queue(&p->lock);
417 #endif
418
419 /* Try to aquire locks one at a time in lock order and set wait flag */
420 try_aquire(&p->lock, wtr);
421
422 ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
423
424 #ifdef ERTS_PROC_LOCK_HARD_DEBUG
425 check_queue(&p->lock);
426 #endif
427
428 if (wtr->uflgs == 0)
429 erts_pix_unlock(pix_lock);
430 else {
431 /* We didn't get them all; need to wait... */
432
433 ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
434
435 erts_atomic32_set_nob(&wtr->uaflgs, 1);
436 erts_pix_unlock(pix_lock);
437
438 while (1) {
439 int res;
440 erts_tse_reset(wtr);
441
442 if (erts_atomic32_read_nob(&wtr->uaflgs) == 0)
443 break;
444
445 /*
446 * Wait for needed locks. When we are woken all needed locks have
447 * have been acquired by other threads and transfered to us.
448 * However, we need to be prepared for spurious wakeups.
449 */
450 do {
451 res = erts_tse_wait(wtr); /* might return EINTR */
452 } while (res != 0);
453 }
454
455 ASSERT(wtr->uflgs == 0);
456 }
457
458 ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
459
460 tse_return(wtr);
461 }
462
463 /*
464 * erts_proc_lock_failed() is called when erts_proc_lock()
465 * wasn't able to lock all locks. We may need to transfer locks
466 * to waiters and wait for our turn on locks.
467 *
468 * Iff !ERTS_PROC_LOCK_ATOMIC_IMPL, the pix lock is locked on entry.
469 *
470 * This always returns with the pix lock unlocked.
471 */
472 void
erts_proc_lock_failed(Process * p,erts_pix_lock_t * pixlck,ErtsProcLocks locks,ErtsProcLocks old_lflgs)473 erts_proc_lock_failed(Process *p,
474 erts_pix_lock_t *pixlck,
475 ErtsProcLocks locks,
476 ErtsProcLocks old_lflgs)
477 {
478 int until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
479 int thr_spin_count;
480 int spin_count;
481 ErtsProcLocks need_locks = locks;
482 ErtsProcLocks olflgs = old_lflgs;
483
484 if (erts_thr_get_main_status())
485 thr_spin_count = proc_lock_spin_count;
486 else
487 thr_spin_count = aux_thr_proc_lock_spin_count;
488
489 spin_count = thr_spin_count;
490
491 while (need_locks != 0) {
492 ErtsProcLocks can_grab;
493
494 can_grab = in_order_locks(olflgs, need_locks);
495
496 if (can_grab == 0) {
497 /* Someone already has the lowest-numbered lock we want. */
498
499 if (spin_count-- <= 0) {
500 /* Too many retries, give up and sleep for the lock. */
501 wait_for_locks(p, pixlck, locks, need_locks, olflgs);
502 return;
503 }
504
505 ERTS_SPIN_BODY;
506
507 if (--until_yield == 0) {
508 until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
509 erts_thr_yield();
510 }
511
512 olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
513 }
514 else {
515 /* Try to grab all of the grabbable locks at once with cmpxchg. */
516 ErtsProcLocks grabbed = olflgs | can_grab;
517 ErtsProcLocks nflgs =
518 ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock, grabbed, olflgs);
519
520 if (nflgs == olflgs) {
521 /* Success! We grabbed the 'can_grab' locks. */
522 olflgs = grabbed;
523 need_locks &= ~can_grab;
524
525 /* Since we made progress, reset the spin count. */
526 spin_count = thr_spin_count;
527 }
528 else {
529 /* Compare-and-exchange failed, try again. */
530 olflgs = nflgs;
531 }
532 }
533 }
534
535 /* Now we have all of the locks we wanted. */
536
537 #if !ERTS_PROC_LOCK_ATOMIC_IMPL
538 erts_pix_unlock(pixlck);
539 #endif
540 }
541
542 /*
543 * erts_proc_unlock_failed() is called when erts_proc_unlock()
544 * wasn't able to unlock all locks. We may need to transfer locks
545 * to waiters.
546 */
547 void
erts_proc_unlock_failed(Process * p,erts_pix_lock_t * pixlck,ErtsProcLocks wait_locks)548 erts_proc_unlock_failed(Process *p,
549 erts_pix_lock_t *pixlck,
550 ErtsProcLocks wait_locks)
551 {
552 erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
553
554 #if ERTS_PROC_LOCK_ATOMIC_IMPL
555 erts_pix_lock(pix_lock);
556 #endif
557
558 transfer_locks(p, wait_locks, pix_lock, 1); /* unlocks pix_lock */
559 }
560
561 #endif /* ERTS_PROC_LOCK_OWN_IMPL */
562
563 void
erts_proc_lock_prepare_proc_lock_waiter(void)564 erts_proc_lock_prepare_proc_lock_waiter(void)
565 {
566 #if ERTS_PROC_LOCK_OWN_IMPL
567 tse_return(tse_fetch(NULL));
568 #endif
569 }
570
571 /*
572 * proc_safelock() locks process locks on two processes. In order
573 * to avoid a deadlock, proc_safelock() unlocks those locks that
574 * needs to be unlocked, and then acquires locks in lock order
575 * (including the previously unlocked ones).
576 */
577
578 static void
proc_safelock(int is_managed,Process * a_proc,ErtsProcLocks a_have_locks,ErtsProcLocks a_need_locks,Process * b_proc,ErtsProcLocks b_have_locks,ErtsProcLocks b_need_locks)579 proc_safelock(int is_managed,
580 Process *a_proc,
581 ErtsProcLocks a_have_locks,
582 ErtsProcLocks a_need_locks,
583 Process *b_proc,
584 ErtsProcLocks b_have_locks,
585 ErtsProcLocks b_need_locks)
586 {
587 Process *p1, *p2;
588 #ifdef ERTS_ENABLE_LOCK_CHECK
589 Eterm pid1, pid2;
590 #endif
591 ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2;
592 ErtsProcLocks unlock_mask;
593 int lock_no, refc1 = 0, refc2 = 0;
594
595 ASSERT(b_proc);
596
597
598 /* Determine inter process lock order...
599 * Locks with the same lock order should be locked on p1 before p2.
600 */
601 if (a_proc) {
602 if (a_proc->common.id < b_proc->common.id) {
603 p1 = a_proc;
604 #ifdef ERTS_ENABLE_LOCK_CHECK
605 pid1 = a_proc->common.id;
606 #endif
607 need_locks1 = a_need_locks;
608 have_locks1 = a_have_locks;
609 p2 = b_proc;
610 #ifdef ERTS_ENABLE_LOCK_CHECK
611 pid2 = b_proc->common.id;
612 #endif
613 need_locks2 = b_need_locks;
614 have_locks2 = b_have_locks;
615 }
616 else if (a_proc->common.id > b_proc->common.id) {
617 p1 = b_proc;
618 #ifdef ERTS_ENABLE_LOCK_CHECK
619 pid1 = b_proc->common.id;
620 #endif
621 need_locks1 = b_need_locks;
622 have_locks1 = b_have_locks;
623 p2 = a_proc;
624 #ifdef ERTS_ENABLE_LOCK_CHECK
625 pid2 = a_proc->common.id;
626 #endif
627 need_locks2 = a_need_locks;
628 have_locks2 = a_have_locks;
629 }
630 else {
631 ASSERT(a_proc == b_proc);
632 ASSERT(a_proc->common.id == b_proc->common.id);
633 p1 = a_proc;
634 #ifdef ERTS_ENABLE_LOCK_CHECK
635 pid1 = a_proc->common.id;
636 #endif
637 need_locks1 = a_need_locks | b_need_locks;
638 have_locks1 = a_have_locks | b_have_locks;
639 p2 = NULL;
640 #ifdef ERTS_ENABLE_LOCK_CHECK
641 pid2 = 0;
642 #endif
643 need_locks2 = 0;
644 have_locks2 = 0;
645 }
646 }
647 else {
648 p1 = b_proc;
649 #ifdef ERTS_ENABLE_LOCK_CHECK
650 pid1 = b_proc->common.id;
651 #endif
652 need_locks1 = b_need_locks;
653 have_locks1 = b_have_locks;
654 p2 = NULL;
655 #ifdef ERTS_ENABLE_LOCK_CHECK
656 pid2 = 0;
657 #endif
658 need_locks2 = 0;
659 have_locks2 = 0;
660 #ifdef ERTS_ENABLE_LOCK_CHECK
661 a_need_locks = 0;
662 a_have_locks = 0;
663 #endif
664 }
665
666 #ifdef ERTS_ENABLE_LOCK_CHECK
667 if (p1)
668 erts_proc_lc_chk_proc_locks(p1, have_locks1);
669 if (p2)
670 erts_proc_lc_chk_proc_locks(p2, have_locks2);
671
672 if ((need_locks1 & have_locks1) != have_locks1)
673 erts_lc_fail("Thread tries to release process lock(s) "
674 "on %T via erts_proc_safelock().", pid1);
675 if ((need_locks2 & have_locks2) != have_locks2)
676 erts_lc_fail("Thread tries to release process lock(s) "
677 "on %T via erts_proc_safelock().",
678 pid2);
679 #endif
680
681
682 need_locks1 &= ~have_locks1;
683 need_locks2 &= ~have_locks2;
684
685 /* Figure out the range of locks that needs to be unlocked... */
686 unlock_mask = ERTS_PROC_LOCKS_ALL;
687 for (lock_no = 0;
688 lock_no <= ERTS_PROC_LOCK_MAX_BIT;
689 lock_no++) {
690 ErtsProcLocks lock = (1 << lock_no);
691 if (lock & need_locks1)
692 break;
693 unlock_mask &= ~lock;
694 if (lock & need_locks2)
695 break;
696 }
697
698 /* ... and unlock locks in that range... */
699 if (have_locks1 || have_locks2) {
700 ErtsProcLocks unlock_locks;
701 unlock_locks = unlock_mask & have_locks1;
702 if (unlock_locks) {
703 have_locks1 &= ~unlock_locks;
704 need_locks1 |= unlock_locks;
705 if (!is_managed && !have_locks1) {
706 refc1 = 1;
707 erts_proc_inc_refc(p1);
708 }
709 erts_proc_unlock(p1, unlock_locks);
710 }
711 unlock_locks = unlock_mask & have_locks2;
712 if (unlock_locks) {
713 have_locks2 &= ~unlock_locks;
714 need_locks2 |= unlock_locks;
715 if (!is_managed && !have_locks2) {
716 refc2 = 1;
717 erts_proc_inc_refc(p2);
718 }
719 erts_proc_unlock(p2, unlock_locks);
720 }
721 }
722
723 /*
724 * lock_no equals the number of the first lock to lock on
725 * either p1 *or* p2.
726 */
727
728
729 #ifdef ERTS_ENABLE_LOCK_CHECK
730 if (p1)
731 erts_proc_lc_chk_proc_locks(p1, have_locks1);
732 if (p2)
733 erts_proc_lc_chk_proc_locks(p2, have_locks2);
734 #endif
735
736 /* Lock locks in lock order... */
737 while (lock_no <= ERTS_PROC_LOCK_MAX_BIT) {
738 ErtsProcLocks locks;
739 ErtsProcLocks lock = (1 << lock_no);
740 ErtsProcLocks lock_mask = 0;
741 if (need_locks1 & lock) {
742 do {
743 lock = (1 << lock_no++);
744 lock_mask |= lock;
745 } while (lock_no <= ERTS_PROC_LOCK_MAX_BIT
746 && !(need_locks2 & lock));
747 if (need_locks2 & lock)
748 lock_no--;
749 locks = need_locks1 & lock_mask;
750 erts_proc_lock(p1, locks);
751 have_locks1 |= locks;
752 need_locks1 &= ~locks;
753 }
754 else if (need_locks2 & lock) {
755 while (lock_no <= ERTS_PROC_LOCK_MAX_BIT
756 && !(need_locks1 & lock)) {
757 lock_mask |= lock;
758 lock = (1 << ++lock_no);
759 }
760 locks = need_locks2 & lock_mask;
761 erts_proc_lock(p2, locks);
762 have_locks2 |= locks;
763 need_locks2 &= ~locks;
764 }
765 else
766 lock_no++;
767 }
768
769 #ifdef ERTS_ENABLE_LOCK_CHECK
770 if (p1)
771 erts_proc_lc_chk_proc_locks(p1, have_locks1);
772 if (p2)
773 erts_proc_lc_chk_proc_locks(p2, have_locks2);
774
775 if (p1 && p2) {
776 if (p1 == a_proc) {
777 ASSERT(a_need_locks == have_locks1);
778 ASSERT(b_need_locks == have_locks2);
779 }
780 else {
781 ASSERT(a_need_locks == have_locks2);
782 ASSERT(b_need_locks == have_locks1);
783 }
784 }
785 else {
786 ASSERT(p1);
787 if (a_proc) {
788 ASSERT(have_locks1 == (a_need_locks | b_need_locks));
789 }
790 else {
791 ASSERT(have_locks1 == b_need_locks);
792 }
793 }
794 #endif
795
796 if (!is_managed) {
797 if (refc1)
798 erts_proc_dec_refc(p1);
799 if (refc2)
800 erts_proc_dec_refc(p2);
801 }
802 }
803
804 void
erts_proc_safelock(Process * a_proc,ErtsProcLocks a_have_locks,ErtsProcLocks a_need_locks,Process * b_proc,ErtsProcLocks b_have_locks,ErtsProcLocks b_need_locks)805 erts_proc_safelock(Process *a_proc,
806 ErtsProcLocks a_have_locks,
807 ErtsProcLocks a_need_locks,
808 Process *b_proc,
809 ErtsProcLocks b_have_locks,
810 ErtsProcLocks b_need_locks)
811 {
812 proc_safelock(erts_get_scheduler_id() != 0,
813 a_proc,
814 a_have_locks,
815 a_need_locks,
816 b_proc,
817 b_have_locks,
818 b_need_locks);
819 }
820
821 Process *
erts_pid2proc_opt(Process * c_p,ErtsProcLocks c_p_have_locks,Eterm pid,ErtsProcLocks pid_need_locks,int flags)822 erts_pid2proc_opt(Process *c_p,
823 ErtsProcLocks c_p_have_locks,
824 Eterm pid,
825 ErtsProcLocks pid_need_locks,
826 int flags)
827 {
828 Process *dec_refc_proc = NULL;
829 ErtsThrPrgrDelayHandle dhndl;
830 ErtsProcLocks need_locks;
831 Uint pix;
832 Process *proc;
833 #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
834 ErtsProcLocks lcnt_locks;
835 #endif
836
837 #ifdef ERTS_ENABLE_LOCK_CHECK
838 if (c_p) {
839 ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks;
840 if (might_unlock)
841 erts_proc_lc_might_unlock(c_p, might_unlock);
842 }
843 #endif
844
845 if (is_not_internal_pid(pid))
846 return NULL;
847 pix = internal_pid_index(pid);
848
849 ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
850 need_locks = pid_need_locks;
851
852 if (c_p && c_p->common.id == pid) {
853 ASSERT(c_p->common.id != ERTS_INVALID_PID);
854 ASSERT(c_p == erts_pix2proc(pix));
855
856 if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
857 && ERTS_PROC_IS_EXITING(c_p))
858 return NULL;
859 need_locks &= ~c_p_have_locks;
860 if (!need_locks) {
861 if (flags & ERTS_P2P_FLG_INC_REFC)
862 erts_proc_inc_refc(c_p);
863 return c_p;
864 }
865 }
866
867 dhndl = erts_thr_progress_unmanaged_delay();
868
869 proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix);
870
871 if (proc) {
872 if (proc->common.id != pid)
873 proc = NULL;
874 else if (!need_locks) {
875 if (flags & ERTS_P2P_FLG_INC_REFC)
876 erts_proc_inc_refc(proc);
877 }
878 else {
879 int busy;
880
881 #if ERTS_PROC_LOCK_OWN_IMPL
882 #ifdef ERTS_ENABLE_LOCK_COUNT
883 lcnt_locks = need_locks;
884 if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
885 erts_lcnt_proc_lock(&proc->lock, need_locks);
886 }
887 #endif
888
889 #ifdef ERTS_ENABLE_LOCK_CHECK
890 /* Make sure erts_pid2proc_safelock() is enough to handle
891 a potential lock order violation situation... */
892 busy = erts_proc_lc_trylock_force_busy(proc, need_locks);
893 if (!busy)
894 #endif
895 #endif /* ERTS_PROC_LOCK_OWN_IMPL */
896 {
897 /* Try a quick trylock to grab all the locks we need. */
898 busy = (int) erts_proc_raw_trylock__(proc, need_locks);
899
900 #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
901 erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__);
902 #endif
903 #ifdef ERTS_PROC_LOCK_DEBUG
904 if (!busy)
905 erts_proc_lock_op_debug(proc, need_locks, 1);
906 #endif
907 }
908
909 #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
910 if (flags & ERTS_P2P_FLG_TRY_LOCK)
911 erts_lcnt_proc_trylock(&proc->lock, need_locks,
912 busy ? EBUSY : 0);
913 #endif
914
915 if (!busy) {
916 if (flags & ERTS_P2P_FLG_INC_REFC)
917 erts_proc_inc_refc(proc);
918
919 #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
920 /* all is great */
921 if (!(flags & ERTS_P2P_FLG_TRY_LOCK))
922 erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks,
923 __FILE__, __LINE__);
924 #endif
925
926 }
927 else {
928 if (flags & ERTS_P2P_FLG_TRY_LOCK)
929 proc = ERTS_PROC_LOCK_BUSY;
930 else {
931 int managed;
932 if (flags & ERTS_P2P_FLG_INC_REFC)
933 erts_proc_inc_refc(proc);
934
935 #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
936 erts_lcnt_proc_lock_unacquire(&proc->lock, lcnt_locks);
937 #endif
938
939 managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
940 if (!managed) {
941 erts_proc_inc_refc(proc);
942 erts_thr_progress_unmanaged_continue(dhndl);
943 dec_refc_proc = proc;
944
945 /*
946 * We don't want to call
947 * erts_thr_progress_unmanaged_continue()
948 * again.
949 */
950 dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED;
951 }
952
953 proc_safelock(managed,
954 c_p,
955 c_p_have_locks,
956 c_p_have_locks,
957 proc,
958 0,
959 need_locks);
960 }
961 }
962 }
963 }
964
965 if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
966 erts_thr_progress_unmanaged_continue(dhndl);
967
968 if (need_locks
969 && proc
970 && proc != ERTS_PROC_LOCK_BUSY
971 && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
972 ? ERTS_PROC_IS_EXITING(proc)
973 : (proc
974 != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) {
975
976 erts_proc_unlock(proc, need_locks);
977
978 if (flags & ERTS_P2P_FLG_INC_REFC)
979 dec_refc_proc = proc;
980 proc = NULL;
981
982 }
983
984 if (dec_refc_proc)
985 erts_proc_dec_refc(dec_refc_proc);
986
987 #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
988 ASSERT(!proc
989 || proc == ERTS_PROC_LOCK_BUSY
990 || (pid_need_locks ==
991 (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
992 & pid_need_locks)));
993 #endif
994
995 return proc;
996 }
997
998 static ERTS_INLINE
proc_lookup_inc_refc(Eterm pid,int allow_exit)999 Process *proc_lookup_inc_refc(Eterm pid, int allow_exit)
1000 {
1001 Process *proc;
1002 ErtsThrPrgrDelayHandle dhndl;
1003
1004 dhndl = erts_thr_progress_unmanaged_delay();
1005
1006 proc = erts_proc_lookup_raw(pid);
1007 if (proc) {
1008 if (!allow_exit && ERTS_PROC_IS_EXITING(proc))
1009 proc = NULL;
1010 else
1011 erts_proc_inc_refc(proc);
1012 }
1013
1014 erts_thr_progress_unmanaged_continue(dhndl);
1015
1016 return proc;
1017 }
1018
erts_proc_lookup_inc_refc(Eterm pid)1019 Process *erts_proc_lookup_inc_refc(Eterm pid)
1020 {
1021 return proc_lookup_inc_refc(pid, 0);
1022 }
1023
erts_proc_lookup_raw_inc_refc(Eterm pid)1024 Process *erts_proc_lookup_raw_inc_refc(Eterm pid)
1025 {
1026 return proc_lookup_inc_refc(pid, 1);
1027 }
1028
1029 void
erts_proc_lock_init(Process * p)1030 erts_proc_lock_init(Process *p)
1031 {
1032 #if ERTS_PROC_LOCK_OWN_IMPL || defined(ERTS_PROC_LOCK_DEBUG)
1033 int i;
1034 #endif
1035 #if ERTS_PROC_LOCK_OWN_IMPL
1036 /* We always start with all locks locked */
1037 #if ERTS_PROC_LOCK_ATOMIC_IMPL
1038 erts_atomic32_init_nob(&p->lock.flags,
1039 (erts_aint32_t) ERTS_PROC_LOCKS_ALL);
1040 #else
1041 p->lock.flags = ERTS_PROC_LOCKS_ALL;
1042 #endif
1043 for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
1044 p->lock.queue[i] = NULL;
1045 #ifdef ERTS_ENABLE_LOCK_CHECK
1046 erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1,__FILE__,__LINE__);
1047 #endif
1048 #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1049
1050 erts_mtx_init(&p->lock.main, "proc_main", p->common.id,
1051 ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
1052 ethr_mutex_lock(&p->lock.main.mtx);
1053 #ifdef ERTS_ENABLE_LOCK_CHECK
1054 erts_lc_trylock(1, &p->lock.main.lc);
1055 #endif
1056 erts_mtx_init(&p->lock.msgq, "proc_msgq", p->common.id,
1057 ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
1058 ethr_mutex_lock(&p->lock.msgq.mtx);
1059 #ifdef ERTS_ENABLE_LOCK_CHECK
1060 erts_lc_trylock(1, &p->lock.msgq.lc);
1061 #endif
1062 erts_mtx_init(&p->lock.btm, "proc_btm", p->common.id,
1063 ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
1064 ethr_mutex_lock(&p->lock.btm.mtx);
1065 #ifdef ERTS_ENABLE_LOCK_CHECK
1066 erts_lc_trylock(1, &p->lock.btm.lc);
1067 #endif
1068 erts_mtx_init(&p->lock.status, "proc_status", p->common.id,
1069 ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
1070 ethr_mutex_lock(&p->lock.status.mtx);
1071 #ifdef ERTS_ENABLE_LOCK_CHECK
1072 erts_lc_trylock(1, &p->lock.status.lc);
1073 #endif
1074 erts_mtx_init(&p->lock.trace, "proc_trace", p->common.id,
1075 ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
1076 ethr_mutex_lock(&p->lock.trace.mtx);
1077 #ifdef ERTS_ENABLE_LOCK_CHECK
1078 erts_lc_trylock(1, &p->lock.trace.lc);
1079 #endif
1080 #endif
1081 #ifdef ERTS_PROC_LOCK_DEBUG
1082 for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
1083 erts_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
1084 #endif
1085 #ifdef ERTS_ENABLE_LOCK_COUNT
1086 erts_lcnt_proc_lock_init(p);
1087 erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL);
1088 erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__);
1089 #endif
1090 }
1091
1092 void
erts_proc_lock_fin(Process * p)1093 erts_proc_lock_fin(Process *p)
1094 {
1095 #if ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1096 erts_mtx_destroy(&p->lock.main);
1097 erts_mtx_destroy(&p->lock.msgq);
1098 erts_mtx_destroy(&p->lock.btm);
1099 erts_mtx_destroy(&p->lock.status);
1100 erts_mtx_destroy(&p->lock.trace);
1101 #endif
1102 #if defined(ERTS_ENABLE_LOCK_COUNT)
1103 erts_lcnt_proc_lock_destroy(p);
1104 #endif
1105 }
1106
1107 /* --- Process lock counting ----------------------------------------------- */
1108
1109 #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
1110
erts_lcnt_proc_lock_init(Process * p)1111 void erts_lcnt_proc_lock_init(Process *p) {
1112 erts_lcnt_init_ref(&p->lock.lcnt_carrier);
1113
1114 if(erts_lcnt_check_enabled(ERTS_LOCK_FLAGS_CATEGORY_PROCESS)) {
1115 erts_lcnt_enable_proc_lock_count(p, 1);
1116 }
1117 } /* logic reversed */
1118
erts_lcnt_proc_lock_destroy(Process * p)1119 void erts_lcnt_proc_lock_destroy(Process *p) {
1120 erts_lcnt_uninstall(&p->lock.lcnt_carrier);
1121 }
1122
erts_lcnt_enable_proc_lock_count(Process * proc,int enable)1123 void erts_lcnt_enable_proc_lock_count(Process *proc, int enable) {
1124 if(proc->common.id == ERTS_INVALID_PID) {
1125 /* Locks without an id are more trouble than they're worth; there's no
1126 * way to look them up and we can't track them with _STATIC since it's
1127 * too early to tell whether we're a system process (proc->static_flags
1128 * hasn't been not set yet). */
1129 } else if(!enable) {
1130 erts_lcnt_proc_lock_destroy(proc);
1131 } else if(!erts_lcnt_check_ref_installed(&proc->lock.lcnt_carrier)) {
1132 erts_lcnt_lock_info_carrier_t *carrier;
1133
1134 carrier = erts_lcnt_create_lock_info_carrier(ERTS_LCNT_PROCLOCK_COUNT);
1135
1136 erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN,
1137 "proc_main", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
1138 erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ,
1139 "proc_msgq", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
1140 erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM,
1141 "proc_btm", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
1142 erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS,
1143 "proc_status",proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
1144 erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE,
1145 "proc_trace", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
1146
1147 erts_lcnt_install(&proc->lock.lcnt_carrier, carrier);
1148 }
1149 }
1150
erts_lcnt_update_process_locks(int enable)1151 void erts_lcnt_update_process_locks(int enable) {
1152 int i, max;
1153
1154 max = erts_ptab_max(&erts_proc);
1155
1156 for(i = 0; i < max; i++) {
1157 int delay_handle;
1158 Process *proc;
1159
1160 delay_handle = erts_thr_progress_unmanaged_delay();
1161 proc = erts_pix2proc(i);
1162
1163 if(proc != NULL) {
1164 erts_lcnt_enable_proc_lock_count(proc, enable);
1165 }
1166
1167 if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
1168 erts_thr_progress_unmanaged_continue(delay_handle);
1169 }
1170 }
1171 }
1172
1173 #endif /* ERTS_ENABLE_LOCK_COUNT */
1174
1175
1176 /* --- Process lock checking ----------------------------------------------- */
1177
1178 #ifdef ERTS_ENABLE_LOCK_CHECK
1179
1180 #if ERTS_PROC_LOCK_OWN_IMPL
1181
1182 void
erts_proc_lc_lock(Process * p,ErtsProcLocks locks,const char * file,unsigned int line)1183 erts_proc_lc_lock(Process *p, ErtsProcLocks locks, const char *file, unsigned int line)
1184 {
1185 erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
1186 p->common.id,
1187 ERTS_LOCK_TYPE_PROCLOCK);
1188 if (locks & ERTS_PROC_LOCK_MAIN) {
1189 lck.id = lc_id.proc_lock_main;
1190 erts_lc_lock_x(&lck,file,line);
1191 }
1192 if (locks & ERTS_PROC_LOCK_MSGQ) {
1193 lck.id = lc_id.proc_lock_msgq;
1194 erts_lc_lock_x(&lck,file,line);
1195 }
1196 if (locks & ERTS_PROC_LOCK_BTM) {
1197 lck.id = lc_id.proc_lock_btm;
1198 erts_lc_lock_x(&lck,file,line);
1199 }
1200 if (locks & ERTS_PROC_LOCK_STATUS) {
1201 lck.id = lc_id.proc_lock_status;
1202 erts_lc_lock_x(&lck,file,line);
1203 }
1204 if (locks & ERTS_PROC_LOCK_TRACE) {
1205 lck.id = lc_id.proc_lock_trace;
1206 erts_lc_lock_x(&lck,file,line);
1207 }
1208 }
1209
1210 void
erts_proc_lc_trylock(Process * p,ErtsProcLocks locks,int locked,const char * file,unsigned int line)1211 erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
1212 const char *file, unsigned int line)
1213 {
1214 erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
1215 p->common.id,
1216 ERTS_LOCK_TYPE_PROCLOCK);
1217 if (locks & ERTS_PROC_LOCK_MAIN) {
1218 lck.id = lc_id.proc_lock_main;
1219 erts_lc_trylock_x(locked, &lck, file, line);
1220 }
1221 if (locks & ERTS_PROC_LOCK_MSGQ) {
1222 lck.id = lc_id.proc_lock_msgq;
1223 erts_lc_trylock_x(locked, &lck, file, line);
1224 }
1225 if (locks & ERTS_PROC_LOCK_BTM) {
1226 lck.id = lc_id.proc_lock_btm;
1227 erts_lc_trylock_x(locked, &lck, file, line);
1228 }
1229 if (locks & ERTS_PROC_LOCK_STATUS) {
1230 lck.id = lc_id.proc_lock_status;
1231 erts_lc_trylock_x(locked, &lck, file, line);
1232 }
1233 if (locks & ERTS_PROC_LOCK_TRACE) {
1234 lck.id = lc_id.proc_lock_trace;
1235 erts_lc_trylock_x(locked, &lck, file, line);
1236 }
1237 }
1238
1239 void
erts_proc_lc_unlock(Process * p,ErtsProcLocks locks)1240 erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
1241 {
1242 erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
1243 p->common.id,
1244 ERTS_LOCK_TYPE_PROCLOCK);
1245 if (locks & ERTS_PROC_LOCK_TRACE) {
1246 lck.id = lc_id.proc_lock_trace;
1247 erts_lc_unlock(&lck);
1248 }
1249 if (locks & ERTS_PROC_LOCK_STATUS) {
1250 lck.id = lc_id.proc_lock_status;
1251 erts_lc_unlock(&lck);
1252 }
1253 if (locks & ERTS_PROC_LOCK_BTM) {
1254 lck.id = lc_id.proc_lock_btm;
1255 erts_lc_unlock(&lck);
1256 }
1257 if (locks & ERTS_PROC_LOCK_MSGQ) {
1258 lck.id = lc_id.proc_lock_msgq;
1259 erts_lc_unlock(&lck);
1260 }
1261 if (locks & ERTS_PROC_LOCK_MAIN) {
1262 lck.id = lc_id.proc_lock_main;
1263 erts_lc_unlock(&lck);
1264 }
1265 }
1266
1267 #endif /* ERTS_PROC_LOCK_OWN_IMPL */
1268
1269 void
erts_proc_lc_might_unlock(Process * p,ErtsProcLocks locks)1270 erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
1271 {
1272 #if ERTS_PROC_LOCK_OWN_IMPL
1273 erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
1274 p->common.id,
1275 ERTS_LOCK_TYPE_PROCLOCK);
1276 if (locks & ERTS_PROC_LOCK_TRACE) {
1277 lck.id = lc_id.proc_lock_trace;
1278 erts_lc_might_unlock(&lck);
1279 }
1280 if (locks & ERTS_PROC_LOCK_STATUS) {
1281 lck.id = lc_id.proc_lock_status;
1282 erts_lc_might_unlock(&lck);
1283 }
1284 if (locks & ERTS_PROC_LOCK_BTM) {
1285 lck.id = lc_id.proc_lock_btm;
1286 erts_lc_might_unlock(&lck);
1287 }
1288 if (locks & ERTS_PROC_LOCK_MSGQ) {
1289 lck.id = lc_id.proc_lock_msgq;
1290 erts_lc_might_unlock(&lck);
1291 }
1292 if (locks & ERTS_PROC_LOCK_MAIN) {
1293 lck.id = lc_id.proc_lock_main;
1294 erts_lc_might_unlock(&lck);
1295 }
1296 #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1297 if (locks & ERTS_PROC_LOCK_MAIN)
1298 erts_lc_might_unlock(&p->lock.main.lc);
1299 if (locks & ERTS_PROC_LOCK_MSGQ)
1300 erts_lc_might_unlock(&p->lock.msgq.lc);
1301 if (locks & ERTS_PROC_LOCK_BTM)
1302 erts_lc_might_unlock(&p->lock.btm.lc);
1303 if (locks & ERTS_PROC_LOCK_STATUS)
1304 erts_lc_might_unlock(&p->lock.status.lc);
1305 if (locks & ERTS_PROC_LOCK_TRACE)
1306 erts_lc_might_unlock(&p->lock.trace.lc);
1307 #endif
1308 }
1309
1310 void
erts_proc_lc_require_lock(Process * p,ErtsProcLocks locks,const char * file,unsigned int line)1311 erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, const char *file,
1312 unsigned int line)
1313 {
1314 #if ERTS_PROC_LOCK_OWN_IMPL
1315 erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
1316 p->common.id,
1317 ERTS_LOCK_TYPE_PROCLOCK);
1318 if (locks & ERTS_PROC_LOCK_MAIN) {
1319 lck.id = lc_id.proc_lock_main;
1320 erts_lc_require_lock(&lck, file, line);
1321 }
1322 if (locks & ERTS_PROC_LOCK_MSGQ) {
1323 lck.id = lc_id.proc_lock_msgq;
1324 erts_lc_require_lock(&lck, file, line);
1325 }
1326 if (locks & ERTS_PROC_LOCK_BTM) {
1327 lck.id = lc_id.proc_lock_btm;
1328 erts_lc_require_lock(&lck, file, line);
1329 }
1330 if (locks & ERTS_PROC_LOCK_STATUS) {
1331 lck.id = lc_id.proc_lock_status;
1332 erts_lc_require_lock(&lck, file, line);
1333 }
1334 if (locks & ERTS_PROC_LOCK_TRACE) {
1335 lck.id = lc_id.proc_lock_trace;
1336 erts_lc_require_lock(&lck, file, line);
1337 }
1338 #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1339 if (locks & ERTS_PROC_LOCK_MAIN)
1340 erts_lc_require_lock(&p->lock.main.lc, file, line);
1341 if (locks & ERTS_PROC_LOCK_MSGQ)
1342 erts_lc_require_lock(&p->lock.msgq.lc, file, line);
1343 if (locks & ERTS_PROC_LOCK_BTM)
1344 erts_lc_require_lock(&p->lock.btm.lc, file, line);
1345 if (locks & ERTS_PROC_LOCK_STATUS)
1346 erts_lc_require_lock(&p->lock.status.lc, file, line);
1347 if (locks & ERTS_PROC_LOCK_TRACE)
1348 erts_lc_require_lock(&p->lock.trace.lc, file, line);
1349 #endif
1350 }
1351
1352 void
erts_proc_lc_unrequire_lock(Process * p,ErtsProcLocks locks)1353 erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
1354 {
1355 #if ERTS_PROC_LOCK_OWN_IMPL
1356 erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
1357 p->common.id,
1358 ERTS_LOCK_TYPE_PROCLOCK);
1359 if (locks & ERTS_PROC_LOCK_TRACE) {
1360 lck.id = lc_id.proc_lock_trace;
1361 erts_lc_unrequire_lock(&lck);
1362 }
1363 if (locks & ERTS_PROC_LOCK_STATUS) {
1364 lck.id = lc_id.proc_lock_status;
1365 erts_lc_unrequire_lock(&lck);
1366 }
1367 if (locks & ERTS_PROC_LOCK_BTM) {
1368 lck.id = lc_id.proc_lock_btm;
1369 erts_lc_unrequire_lock(&lck);
1370 }
1371 if (locks & ERTS_PROC_LOCK_MSGQ) {
1372 lck.id = lc_id.proc_lock_msgq;
1373 erts_lc_unrequire_lock(&lck);
1374 }
1375 if (locks & ERTS_PROC_LOCK_MAIN) {
1376 lck.id = lc_id.proc_lock_main;
1377 erts_lc_unrequire_lock(&lck);
1378 }
1379 #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1380 if (locks & ERTS_PROC_LOCK_MAIN)
1381 erts_lc_unrequire_lock(&p->lock.main.lc);
1382 if (locks & ERTS_PROC_LOCK_MSGQ)
1383 erts_lc_unrequire_lock(&p->lock.msgq.lc);
1384 if (locks & ERTS_PROC_LOCK_BTM)
1385 erts_lc_unrequire_lock(&p->lock.btm.lc);
1386 if (locks & ERTS_PROC_LOCK_STATUS)
1387 erts_lc_unrequire_lock(&p->lock.status.lc);
1388 if (locks & ERTS_PROC_LOCK_TRACE)
1389 erts_lc_unrequire_lock(&p->lock.trace.lc);
1390 #endif
1391 }
1392
1393 #if ERTS_PROC_LOCK_OWN_IMPL
1394
1395 int
erts_proc_lc_trylock_force_busy(Process * p,ErtsProcLocks locks)1396 erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
1397 {
1398 if (locks & ERTS_PROC_LOCKS_ALL) {
1399 erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
1400 p->common.id,
1401 ERTS_LOCK_TYPE_PROCLOCK);
1402
1403 if (locks & ERTS_PROC_LOCK_MAIN)
1404 lck.id = lc_id.proc_lock_main;
1405 else if (locks & ERTS_PROC_LOCK_MSGQ)
1406 lck.id = lc_id.proc_lock_msgq;
1407 else if (locks & ERTS_PROC_LOCK_BTM)
1408 lck.id = lc_id.proc_lock_btm;
1409 else if (locks & ERTS_PROC_LOCK_STATUS)
1410 lck.id = lc_id.proc_lock_status;
1411 else if (locks & ERTS_PROC_LOCK_TRACE)
1412 lck.id = lc_id.proc_lock_trace;
1413 else
1414 erts_lc_fail("Unknown proc lock found");
1415
1416 return erts_lc_trylock_force_busy(&lck);
1417 }
1418 return 0;
1419 }
1420
1421 #endif /* ERTS_PROC_LOCK_OWN_IMPL */
1422
erts_proc_lc_chk_only_proc_main(Process * p)1423 void erts_proc_lc_chk_only_proc_main(Process *p)
1424 {
1425 erts_proc_lc_chk_only_proc(p, ERTS_PROC_LOCK_MAIN);
1426 }
1427
1428 #if ERTS_PROC_LOCK_OWN_IMPL
1429 #define ERTS_PROC_LC_EMPTY_LOCK_INIT \
1430 ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LOCK_TYPE_PROCLOCK)
1431 #endif /* ERTS_PROC_LOCK_OWN_IMPL */
1432
erts_proc_lc_chk_only_proc(Process * p,ErtsProcLocks locks)1433 void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
1434 {
1435 int have_locks_len = 0;
1436 #if ERTS_PROC_LOCK_OWN_IMPL
1437 erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
1438 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1439 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1440 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1441 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1442 ERTS_PROC_LC_EMPTY_LOCK_INIT};
1443 if (locks & ERTS_PROC_LOCK_MAIN) {
1444 have_locks[have_locks_len].id = lc_id.proc_lock_main;
1445 have_locks[have_locks_len++].extra = p->common.id;
1446 }
1447 if (locks & ERTS_PROC_LOCK_MSGQ) {
1448 have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
1449 have_locks[have_locks_len++].extra = p->common.id;
1450 }
1451 if (locks & ERTS_PROC_LOCK_BTM) {
1452 have_locks[have_locks_len].id = lc_id.proc_lock_btm;
1453 have_locks[have_locks_len++].extra = p->common.id;
1454 }
1455 if (locks & ERTS_PROC_LOCK_STATUS) {
1456 have_locks[have_locks_len].id = lc_id.proc_lock_status;
1457 have_locks[have_locks_len++].extra = p->common.id;
1458 }
1459 if (locks & ERTS_PROC_LOCK_TRACE) {
1460 have_locks[have_locks_len].id = lc_id.proc_lock_trace;
1461 have_locks[have_locks_len++].extra = p->common.id;
1462 }
1463 #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1464 erts_lc_lock_t have_locks[6];
1465 if (locks & ERTS_PROC_LOCK_MAIN)
1466 have_locks[have_locks_len++] = p->lock.main.lc;
1467 if (locks & ERTS_PROC_LOCK_MSGQ)
1468 have_locks[have_locks_len++] = p->lock.msgq.lc;
1469 if (locks & ERTS_PROC_LOCK_BTM)
1470 have_locks[have_locks_len++] = p->lock.btm.lc;
1471 if (locks & ERTS_PROC_LOCK_STATUS)
1472 have_locks[have_locks_len++] = p->lock.status.lc;
1473 if (locks & ERTS_PROC_LOCK_TRACE)
1474 have_locks[have_locks_len++] = p->lock.trace.lc;
1475 #endif
1476 erts_lc_check_exact(have_locks, have_locks_len);
1477 }
1478
1479 void
erts_proc_lc_chk_have_proc_locks(Process * p,ErtsProcLocks locks)1480 erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
1481 {
1482 int have_locks_len = 0;
1483 #if ERTS_PROC_LOCK_OWN_IMPL
1484 erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
1485 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1486 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1487 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1488 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1489 ERTS_PROC_LC_EMPTY_LOCK_INIT};
1490 if (locks & ERTS_PROC_LOCK_MAIN) {
1491 have_locks[have_locks_len].id = lc_id.proc_lock_main;
1492 have_locks[have_locks_len++].extra = p->common.id;
1493 }
1494 if (locks & ERTS_PROC_LOCK_MSGQ) {
1495 have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
1496 have_locks[have_locks_len++].extra = p->common.id;
1497 }
1498 if (locks & ERTS_PROC_LOCK_BTM) {
1499 have_locks[have_locks_len].id = lc_id.proc_lock_btm;
1500 have_locks[have_locks_len++].extra = p->common.id;
1501 }
1502 if (locks & ERTS_PROC_LOCK_STATUS) {
1503 have_locks[have_locks_len].id = lc_id.proc_lock_status;
1504 have_locks[have_locks_len++].extra = p->common.id;
1505 }
1506 if (locks & ERTS_PROC_LOCK_TRACE) {
1507 have_locks[have_locks_len].id = lc_id.proc_lock_trace;
1508 have_locks[have_locks_len++].extra = p->common.id;
1509 }
1510 #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1511 erts_lc_lock_t have_locks[6];
1512 if (locks & ERTS_PROC_LOCK_MAIN)
1513 have_locks[have_locks_len++] = p->lock.main.lc;
1514 if (locks & ERTS_PROC_LOCK_MSGQ)
1515 have_locks[have_locks_len++] = p->lock.msgq.lc;
1516 if (locks & ERTS_PROC_LOCK_BTM)
1517 have_locks[have_locks_len++] = p->lock.btm.lc;
1518 if (locks & ERTS_PROC_LOCK_STATUS)
1519 have_locks[have_locks_len++] = p->lock.status.lc;
1520 if (locks & ERTS_PROC_LOCK_TRACE)
1521 have_locks[have_locks_len++] = p->lock.trace.lc;
1522 #endif
1523 erts_lc_check(have_locks, have_locks_len, NULL, 0);
1524 }
1525
1526 void
erts_proc_lc_chk_proc_locks(Process * p,ErtsProcLocks locks)1527 erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
1528 {
1529 int have_locks_len = 0;
1530 int have_not_locks_len = 0;
1531 #if ERTS_PROC_LOCK_OWN_IMPL
1532 erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
1533 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1534 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1535 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1536 ERTS_PROC_LC_EMPTY_LOCK_INIT};
1537 erts_lc_lock_t have_not_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
1538 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1539 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1540 ERTS_PROC_LC_EMPTY_LOCK_INIT,
1541 ERTS_PROC_LC_EMPTY_LOCK_INIT};
1542
1543 if (locks & ERTS_PROC_LOCK_MAIN) {
1544 have_locks[have_locks_len].id = lc_id.proc_lock_main;
1545 have_locks[have_locks_len++].extra = p->common.id;
1546 }
1547 else {
1548 have_not_locks[have_not_locks_len].id = lc_id.proc_lock_main;
1549 have_not_locks[have_not_locks_len++].extra = p->common.id;
1550 }
1551 if (locks & ERTS_PROC_LOCK_MSGQ) {
1552 have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
1553 have_locks[have_locks_len++].extra = p->common.id;
1554 }
1555 else {
1556 have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq;
1557 have_not_locks[have_not_locks_len++].extra = p->common.id;
1558 }
1559 if (locks & ERTS_PROC_LOCK_BTM) {
1560 have_locks[have_locks_len].id = lc_id.proc_lock_btm;
1561 have_locks[have_locks_len++].extra = p->common.id;
1562 }
1563 else {
1564 have_not_locks[have_not_locks_len].id = lc_id.proc_lock_btm;
1565 have_not_locks[have_not_locks_len++].extra = p->common.id;
1566 }
1567 if (locks & ERTS_PROC_LOCK_STATUS) {
1568 have_locks[have_locks_len].id = lc_id.proc_lock_status;
1569 have_locks[have_locks_len++].extra = p->common.id;
1570 }
1571 else {
1572 have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
1573 have_not_locks[have_not_locks_len++].extra = p->common.id;
1574 }
1575 if (locks & ERTS_PROC_LOCK_TRACE) {
1576 have_locks[have_locks_len].id = lc_id.proc_lock_trace;
1577 have_locks[have_locks_len++].extra = p->common.id;
1578 }
1579 else {
1580 have_not_locks[have_not_locks_len].id = lc_id.proc_lock_trace;
1581 have_not_locks[have_not_locks_len++].extra = p->common.id;
1582 }
1583 #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1584 erts_lc_lock_t have_locks[6];
1585 erts_lc_lock_t have_not_locks[6];
1586
1587 if (locks & ERTS_PROC_LOCK_MAIN)
1588 have_locks[have_locks_len++] = p->lock.main.lc;
1589 else
1590 have_not_locks[have_not_locks_len++] = p->lock.main.lc;
1591 if (locks & ERTS_PROC_LOCK_MSGQ)
1592 have_locks[have_locks_len++] = p->lock.msgq.lc;
1593 else
1594 have_not_locks[have_not_locks_len++] = p->lock.msgq.lc;
1595 if (locks & ERTS_PROC_LOCK_BTM)
1596 have_locks[have_locks_len++] = p->lock.btm.lc;
1597 else
1598 have_not_locks[have_not_locks_len++] = p->lock.btm.lc;
1599 if (locks & ERTS_PROC_LOCK_STATUS)
1600 have_locks[have_locks_len++] = p->lock.status.lc;
1601 else
1602 have_not_locks[have_not_locks_len++] = p->lock.status.lc;
1603 if (locks & ERTS_PROC_LOCK_TRACE)
1604 have_locks[have_locks_len++] = p->lock.trace.lc;
1605 else
1606 have_not_locks[have_not_locks_len++] = p->lock.trace.lc;
1607 #endif
1608
1609 erts_lc_check(have_locks, have_locks_len,
1610 have_not_locks, have_not_locks_len);
1611 }
1612
1613 ErtsProcLocks
erts_proc_lc_my_proc_locks(Process * p)1614 erts_proc_lc_my_proc_locks(Process *p)
1615 {
1616 int resv[5];
1617 ErtsProcLocks res = 0;
1618 #if ERTS_PROC_LOCK_OWN_IMPL
1619 erts_lc_lock_t locks[5] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
1620 p->common.id,
1621 ERTS_LOCK_TYPE_PROCLOCK),
1622 ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
1623 p->common.id,
1624 ERTS_LOCK_TYPE_PROCLOCK),
1625 ERTS_LC_LOCK_INIT(lc_id.proc_lock_btm,
1626 p->common.id,
1627 ERTS_LOCK_TYPE_PROCLOCK),
1628 ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
1629 p->common.id,
1630 ERTS_LOCK_TYPE_PROCLOCK),
1631 ERTS_LC_LOCK_INIT(lc_id.proc_lock_trace,
1632 p->common.id,
1633 ERTS_LOCK_TYPE_PROCLOCK)};
1634 #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
1635 erts_lc_lock_t locks[5] = {p->lock.main.lc,
1636 p->lock.msgq.lc,
1637 p->lock.btm.lc,
1638 p->lock.status.lc,
1639 p->lock.trace.lc};
1640 #endif
1641
1642 erts_lc_have_locks(resv, locks, 5);
1643 if (resv[0])
1644 res |= ERTS_PROC_LOCK_MAIN;
1645 if (resv[1])
1646 res |= ERTS_PROC_LOCK_MSGQ;
1647 if (resv[2])
1648 res |= ERTS_PROC_LOCK_BTM;
1649 if (resv[3])
1650 res |= ERTS_PROC_LOCK_STATUS;
1651 if (resv[4])
1652 res |= ERTS_PROC_LOCK_TRACE;
1653
1654 return res;
1655 }
1656
1657 void
erts_proc_lc_chk_no_proc_locks(const char * file,int line)1658 erts_proc_lc_chk_no_proc_locks(const char *file, int line)
1659 {
1660 int resv[5];
1661 int ids[5] = {lc_id.proc_lock_main,
1662 lc_id.proc_lock_msgq,
1663 lc_id.proc_lock_btm,
1664 lc_id.proc_lock_status,
1665 lc_id.proc_lock_trace};
1666 erts_lc_have_lock_ids(resv, ids, 5);
1667 if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4])) {
1668 erts_lc_fail("%s:%d: Thread has process locks locked when expected "
1669 "not to have any process locks locked",
1670 file, line);
1671 }
1672 }
1673
1674 #endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
1675
1676 #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_HARD_DEBUG)
1677 void
check_queue(erts_proc_lock_t * lck)1678 check_queue(erts_proc_lock_t *lck)
1679 {
1680 int lock_no;
1681 ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_READ_(lck);
1682
1683 for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
1684 ErtsProcLocks bit;
1685 bit = (((ErtsProcLocks) 1) << lock_no) << ERTS_PROC_LOCK_WAITER_SHIFT;
1686 if (lflgs & bit) {
1687 int n;
1688 erts_tse_t *wtr;
1689 ERTS_ASSERT(lck->queue[lock_no]);
1690 wtr = lck->queue[lock_no];
1691 n = 0;
1692 do {
1693 wtr = wtr->next;
1694 n++;
1695 } while (wtr != lck->queue[lock_no]);
1696 do {
1697 wtr = wtr->prev;
1698 n--;
1699 } while (wtr != lck->queue[lock_no]);
1700 ERTS_ASSERT(n == 0);
1701 }
1702 else {
1703 ERTS_ASSERT(!lck->queue[lock_no]);
1704 }
1705 }
1706 }
1707 #endif
1708
1709