1 /*===========================================================================
2 *
3 * PUBLIC DOMAIN NOTICE
4 * National Center for Biotechnology Information
5 *
6 * This software/database is a "United States Government Work" under the
7 * terms of the United States Copyright Act. It was written as part of
8 * the author's official duties as a United States Government employee and
9 * thus cannot be copyrighted. This software/database is freely available
10 * to the public for use. The National Library of Medicine and the U.S.
11 * Government have not placed any restriction on its use or reproduction.
12 *
13 * Although all reasonable efforts have been taken to ensure the accuracy
14 * and reliability of the software and data, the NLM and the U.S.
15 * Government do not and cannot warrant the performance or results that
16 * may be obtained by using this software or data. The NLM and the U.S.
17 * Government disclaim all warranties, express or implied, including
18 * warranties of performance, merchantability or fitness for any particular
19 * purpose.
20 *
21 * Please cite the author in any work or product based on this material.
22 *
23 * ===========================================================================
24 *
25 */
26
27 #include <kproc/extern.h>
28 #include "syslock-priv.h"
29 #include "syscond-priv.h"
30 #include <kproc/timeout.h>
31 #include <kproc/lock.h>
32 #include <kproc/cond.h>
33 #include <os-native.h>
34 #include <kproc/lock.h>
35 #include <klib/rc.h>
36 #include <sysalloc.h>
37
38 #include <stdlib.h>
39 #include <errno.h>
40
41 /*--------------------------------------------------------------------------
42 * pthread_mutex
43 */
44 static
pthread_mutex_whack(pthread_mutex_t * mutex)45 rc_t pthread_mutex_whack ( pthread_mutex_t *mutex )
46 {
47 int status = pthread_mutex_destroy ( mutex );
48 switch ( status )
49 {
50 case 0:
51 break;
52 case EBUSY:
53 return RC ( rcPS, rcLock, rcDestroying, rcLock, rcBusy );
54 case EINVAL:
55 return RC ( rcPS, rcLock, rcDestroying, rcLock, rcInvalid );
56 default:
57 return RC ( rcPS, rcLock, rcDestroying, rcNoObj, rcUnknown );
58 }
59 return 0;
60 }
61
62 static
pthread_mutex_acquire(pthread_mutex_t * mutex)63 rc_t pthread_mutex_acquire ( pthread_mutex_t *mutex )
64 {
65 /* pthread_t t = pthread_self();
66 fprintf(stdout, "pthread_mutex_lock(%p), thread=%x\n", mutex, t);*/
67 int status = pthread_mutex_lock ( mutex );
68 /*fprintf(stdout, "pthread_mutex_lock, thread=%x, status = %d\n", t, status);*/
69 switch ( status )
70 {
71 case 0:
72 break;
73 case EDEADLK:
74 return RC ( rcPS, rcLock, rcLocking, rcThread, rcDeadlock );
75 case EINVAL:
76 return RC ( rcPS, rcLock, rcLocking, rcLock, rcInvalid );
77 default:
78 return RC ( rcPS, rcLock, rcLocking, rcNoObj, rcUnknown );
79 }
80 return 0;
81 }
82
83 static
pthread_mutex_tryacquire(pthread_mutex_t * mutex)84 int pthread_mutex_tryacquire ( pthread_mutex_t *mutex )
85 {
86 /* pthread_t t = pthread_self();
87 fprintf(stdout, "pthread_mutex_trylock(%p), thread=%x\n", mutex, t);*/
88 int status = pthread_mutex_trylock ( mutex );
89 /*fprintf(stdout, "pthread_mutex_trylock, thread=%x, status = %d\n", t, status);*/
90 return status;
91 }
92
93 static
pthread_mutex_release(pthread_mutex_t * mutex)94 rc_t pthread_mutex_release ( pthread_mutex_t *mutex )
95 {
96 /* pthread_t t = pthread_self();
97 fprintf(stdout, "pthread_mutex_unlock(%p), thread=%x\n", mutex, t);*/
98 int status = pthread_mutex_unlock ( mutex );
99 /*fprintf(stdout, "pthread_mutex_unlock, thread=%x, status = %d\n", t, status);*/
100 switch ( status )
101 {
102 case 0:
103 break;
104 case EPERM:
105 return RC ( rcPS, rcLock, rcUnlocking, rcThread, rcIncorrect );
106 case EINVAL:
107 return RC ( rcPS, rcLock, rcUnlocking, rcLock, rcInvalid );
108 default:
109 return RC ( rcPS, rcLock, rcUnlocking, rcNoObj, rcUnknown );
110 }
111 return 0;
112 }
113
114 /*--------------------------------------------------------------------------
115 * pthread_condition
116 */
117 static
pthread_condition_init(pthread_cond_t * cond)118 rc_t pthread_condition_init ( pthread_cond_t *cond )
119 {
120 int status = pthread_cond_init ( cond, NULL );
121 switch ( status )
122 {
123 case 0:
124 break;
125 case EAGAIN:
126 return RC ( rcPS, rcCondition, rcConstructing, rcCondition, rcExhausted );
127 case ENOMEM:
128 return RC ( rcPS, rcCondition, rcConstructing, rcMemory, rcExhausted );
129 case EBUSY:
130 return RC ( rcPS, rcCondition, rcConstructing, rcCondition, rcBusy );
131 case EINVAL:
132 return RC ( rcPS, rcCondition, rcConstructing, rcCondition, rcInvalid );
133 default:
134 return RC ( rcPS, rcCondition, rcConstructing, rcNoObj, rcUnknown );
135 }
136
137 return 0;
138 }
139
140 static
pthread_condition_whack(pthread_cond_t * cond)141 rc_t pthread_condition_whack ( pthread_cond_t *cond )
142 {
143 int status = pthread_cond_destroy ( cond );
144 switch ( status )
145 {
146 case 0:
147 break;
148 case EBUSY:
149 return RC ( rcPS, rcLock, rcDestroying, rcCondition, rcBusy );
150 case EINVAL:
151 return RC ( rcPS, rcLock, rcDestroying, rcCondition, rcInvalid );
152 default:
153 return RC ( rcPS, rcLock, rcDestroying, rcNoObj, rcUnknown );
154 }
155 return 0;
156 }
157
158 static
pthread_condition_wait(pthread_cond_t * cond,pthread_mutex_t * mutex)159 rc_t pthread_condition_wait ( pthread_cond_t *cond, pthread_mutex_t *mutex )
160 {
161 /* pthread_t t = pthread_self();
162 fprintf(stdout, "pthread_cond_wait(%p, %p), thread=%x\n", cond, mutex, t);*/
163 int status = pthread_cond_wait( cond, mutex );
164 /*fprintf(stdout, "pthread_cond_wait, thread=%x, status = %d\n", t, status);*/
165
166 switch ( status )
167 {
168 case 0:
169 break;
170 default:
171 return RC ( rcPS, rcLock, rcSignaling, rcCondition, rcUnknown );
172 }
173 return 0;
174 }
175
176 static
pthread_condition_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,struct timespec * ts)177 rc_t pthread_condition_timedwait ( pthread_cond_t *cond, pthread_mutex_t *mutex, struct timespec *ts )
178 {
179 int status = pthread_cond_timedwait ( cond, mutex, ts );
180 switch ( status )
181 {
182 case 0:
183 break;
184 case ETIMEDOUT:
185 return RC ( rcPS, rcCondition, rcWaiting, rcTimeout, rcExhausted );
186 case EINTR:
187 return RC ( rcPS, rcCondition, rcWaiting, rcThread, rcInterrupted );
188 default:
189 return RC ( rcPS, rcCondition, rcWaiting, rcNoObj, rcUnknown );
190 }
191
192 return 0;
193 }
194
195 static
pthread_condition_signal(pthread_cond_t * cond)196 rc_t pthread_condition_signal( pthread_cond_t *cond )
197 {
198 int status = pthread_cond_signal ( cond );
199 switch ( status )
200 {
201 case 0:
202 break;
203 default:
204 return RC ( rcPS, rcCondition, rcSignaling, rcNoObj, rcUnknown );
205 }
206
207 return 0;
208 }
209
210 static
pthread_condition_broadcast(pthread_cond_t * cond)211 rc_t pthread_condition_broadcast ( pthread_cond_t *cond )
212 {
213 int status = pthread_cond_broadcast ( cond );
214 switch ( status )
215 {
216 case 0:
217 break;
218 default:
219 return RC ( rcPS, rcCondition, rcSignaling, rcNoObj, rcUnknown );
220 }
221
222 return 0;
223 }
224
225 /*--------------------------------------------------------------------------
226 * KLock
227 * a POSIX-style mutual exclusion lock
228 */
229
230 /* Destroy
231 */
232 static
KLockDestroy(KLock * self)233 rc_t KLockDestroy ( KLock *self )
234 {
235 return pthread_mutex_whack ( & self -> mutex );
236 }
237
238 /* Whack
239 */
240 static
KLockWhack(KLock * self)241 rc_t KLockWhack ( KLock *self )
242 {
243 rc_t rc = KLockDestroy ( self );
244 if ( rc == 0 )
245 free ( self );
246 return rc;
247 }
248
249 /* Init
250 */
251 static
KLockInit(KLock * self)252 rc_t KLockInit ( KLock *self )
253 {
254 int status = pthread_mutex_init ( & self -> mutex, NULL );
255 switch ( status )
256 {
257 case 0:
258 atomic32_set ( & self -> refcount, 1 );
259 return 0;
260 case EAGAIN:
261 return RC ( rcPS, rcLock, rcConstructing, rcResources, rcInsufficient );
262 case ENOMEM:
263 return RC ( rcPS, rcLock, rcConstructing, rcMemory, rcInsufficient );
264 }
265
266 return RC ( rcPS, rcLock, rcConstructing, rcNoObj, rcUnknown );
267 }
268
269
270 /* Make
271 * make a simple mutex
272 */
KLockMake(KLock ** lockp)273 LIB_EXPORT rc_t CC KLockMake ( KLock **lockp )
274 {
275 rc_t rc;
276 if ( lockp == NULL )
277 rc = RC ( rcPS, rcLock, rcConstructing, rcParam, rcNull );
278 else
279 {
280 KLock *lock = malloc ( sizeof * lock );
281 if ( lock == NULL )
282 rc = RC ( rcPS, rcLock, rcConstructing, rcMemory, rcExhausted );
283 else
284 {
285 rc = KLockInit ( lock );
286 if ( rc == 0 )
287 {
288 * lockp = lock;
289 return 0;
290 }
291
292 free ( lock );
293 }
294
295 * lockp = NULL;
296 }
297 return rc;
298 }
299
300
301 /* AddRef
302 * Release
303 */
KLockAddRef(const KLock * cself)304 LIB_EXPORT rc_t CC KLockAddRef ( const KLock *cself )
305 {
306 if ( cself != NULL )
307 atomic32_inc ( & ( ( KLock* ) cself ) -> refcount );
308 return 0;
309 }
310
KLockRelease(const KLock * cself)311 LIB_EXPORT rc_t CC KLockRelease ( const KLock *cself )
312 {
313 KLock *self = ( KLock* ) cself;
314 if ( cself != NULL )
315 {
316 if ( atomic32_dec_and_test ( & self -> refcount ) )
317 {
318 atomic32_set ( & self -> refcount, 1 );
319 return KLockWhack ( self );
320 }
321 }
322 return 0;
323 }
324
325
326 /* Acquire
327 * acquires lock
328 */
KLockAcquire(KLock * self)329 LIB_EXPORT rc_t CC KLockAcquire ( KLock *self )
330 {
331 rc_t rc;
332
333 if ( self == NULL )
334 rc = RC ( rcPS, rcLock, rcLocking, rcSelf, rcNull );
335 else
336 {
337 rc = pthread_mutex_acquire ( & self -> mutex );
338 }
339
340 return rc;
341 }
342
343 /* Unlock
344 * releases lock
345 */
KLockUnlock(KLock * self)346 LIB_EXPORT rc_t CC KLockUnlock ( KLock *self )
347 {
348 rc_t rc;
349
350 if ( self == NULL )
351 return RC ( rcPS, rcLock, rcUnlocking, rcSelf, rcNull );
352
353 /* release the guy */
354 rc = pthread_mutex_release ( & self -> mutex );
355
356 return rc;
357 }
358
359 /*--------------------------------------------------------------------------
360 * KTimedLock
361 * a POSIX-style mutual exclusion lock with support for timed acquire
362 */
363
364 /* Destroy
365 */
366 static
KTimedLockDestroy(KTimedLock * self)367 rc_t KTimedLockDestroy ( KTimedLock *self )
368 {
369 rc_t rc = pthread_mutex_whack ( & self -> mutex );
370 if ( rc == 0 )
371 {
372 pthread_mutex_whack ( & self -> cond_lock );
373 pthread_condition_whack ( & self -> cond );
374 }
375 return rc;
376 }
377
378 /* Whack
379 */
380 static
KTimedLockWhack(KTimedLock * self)381 rc_t KTimedLockWhack ( KTimedLock *self )
382 {
383 rc_t rc = KTimedLockDestroy ( self );
384 if ( rc == 0 )
385 free ( self );
386 return rc;
387 }
388
389 /* Init
390 */
391 static
KTimedLockInit(KTimedLock * self)392 rc_t KTimedLockInit ( KTimedLock *self )
393 {
394 int status = pthread_mutex_init ( & self -> mutex, NULL );
395 if ( status == 0 )
396 {
397 status = pthread_mutex_init ( & self -> cond_lock, NULL );
398 if ( status == 0 )
399 {
400 status = pthread_cond_init ( & self -> cond, NULL );
401 if ( status == 0 )
402 {
403 self -> waiters = 0;
404 atomic32_set ( & self -> refcount, 1 );
405 return 0;
406 }
407
408 pthread_mutex_destroy ( & self -> cond_lock );
409 }
410
411 pthread_mutex_destroy ( & self -> mutex );
412 }
413
414 switch ( status )
415 {
416 case EAGAIN:
417 return RC ( rcPS, rcLock, rcConstructing, rcResources, rcInsufficient );
418 case ENOMEM:
419 return RC ( rcPS, rcLock, rcConstructing, rcMemory, rcInsufficient );
420 }
421
422 return RC ( rcPS, rcLock, rcConstructing, rcNoObj, rcUnknown );
423 }
424
425
426 /* Make
427 * make a simple mutex
428 */
KTimedLockMake(KTimedLock ** lockp)429 LIB_EXPORT rc_t CC KTimedLockMake ( KTimedLock **lockp )
430 {
431 rc_t rc;
432 if ( lockp == NULL )
433 rc = RC ( rcPS, rcLock, rcConstructing, rcParam, rcNull );
434 else
435 {
436 KTimedLock *lock = malloc ( sizeof * lock );
437 if ( lock == NULL )
438 rc = RC ( rcPS, rcLock, rcConstructing, rcMemory, rcExhausted );
439 else
440 {
441 rc = KTimedLockInit ( lock );
442 if ( rc == 0 )
443 {
444 * lockp = lock;
445 return 0;
446 }
447
448 free ( lock );
449 }
450
451 * lockp = NULL;
452 }
453 return rc;
454 }
455
456
457 /* AddRef
458 * Release
459 */
KTimedLockAddRef(const KTimedLock * cself)460 LIB_EXPORT rc_t CC KTimedLockAddRef ( const KTimedLock *cself )
461 {
462 if ( cself != NULL )
463 atomic32_inc ( & ( ( KTimedLock* ) cself ) -> refcount );
464 return 0;
465 }
466
KTimedLockRelease(const KTimedLock * cself)467 LIB_EXPORT rc_t CC KTimedLockRelease ( const KTimedLock *cself )
468 {
469 KTimedLock *self = ( KTimedLock* ) cself;
470 if ( cself != NULL )
471 {
472 if ( atomic32_dec_and_test ( & self -> refcount ) )
473 {
474 atomic32_set ( & self -> refcount, 1 );
475 return KTimedLockWhack ( self );
476 }
477 }
478 return 0;
479 }
480
481
482 /* Acquire
483 * acquires lock
484 */
KTimedLockAcquire(KTimedLock * self,timeout_t * tm)485 LIB_EXPORT rc_t CC KTimedLockAcquire ( KTimedLock *self, timeout_t *tm )
486 {
487 rc_t rc;
488
489 if ( self == NULL )
490 return RC ( rcPS, rcLock, rcLocking, rcSelf, rcNull );
491
492 if ( tm == NULL )
493 return pthread_mutex_acquire ( & self -> mutex );
494
495 /* this is ugly, but don't want to prepare inside lock */
496 if ( ! tm -> prepared )
497 TimeoutPrepare ( tm );
498
499 rc = pthread_mutex_acquire ( & self -> cond_lock );
500 if ( rc == 0 )
501 {
502 int status = pthread_mutex_tryacquire ( & self -> mutex );
503 if ( status == EBUSY )
504 {
505 while ( 1 )
506 {
507 ++ self -> waiters;
508 status = pthread_cond_timedwait ( & self -> cond, & self -> cond_lock, & tm -> ts );
509 -- self -> waiters;
510
511 if ( status == EINTR )
512 continue;
513 if ( status != 0 )
514 break;
515 status = pthread_mutex_tryacquire ( & self -> mutex );
516 if ( status != EBUSY )
517 break;
518 }
519 }
520
521 pthread_mutex_release ( & self -> cond_lock );
522
523 switch ( status )
524 {
525 case 0:
526 break;
527 case ETIMEDOUT:
528 rc = RC ( rcPS, rcLock, rcLocking, rcTimeout, rcExhausted );
529 break;
530 case EBUSY:
531 rc = RC ( rcPS, rcLock, rcLocking, rcLock, rcBusy );
532 break;
533 case EINVAL:
534 rc = RC ( rcPS, rcLock, rcLocking, rcLock, rcInvalid );
535 break;
536 default:
537 rc = RC ( rcPS, rcLock, rcLocking, rcNoObj, rcUnknown );
538 }
539 }
540
541 return rc;
542 }
543
544 /* Unlock
545 * releases lock
546 */
KTimedLockUnlock(KTimedLock * self)547 LIB_EXPORT rc_t CC KTimedLockUnlock ( KTimedLock *self )
548 {
549 rc_t rc;
550
551 if ( self == NULL )
552 return RC ( rcPS, rcLock, rcUnlocking, rcSelf, rcNull );
553
554 rc = pthread_mutex_acquire ( & self -> cond_lock );
555 if ( rc == 0 )
556 {
557 /* release the guy */
558 rc = pthread_mutex_release ( & self -> mutex );
559
560 if ( self -> waiters != 0 )
561 pthread_condition_signal ( & self -> cond );
562
563 pthread_mutex_release ( & self -> cond_lock );
564 }
565
566 return rc;
567 }
568
569
570 /*--------------------------------------------------------------------------
571 * KRWLock
572 * a POSIX-style read/write lock
573 */
574 struct KRWLock
575 {
576 KLock lock;
577 pthread_cond_t rcond;
578 pthread_cond_t wcond;
579 uint32_t rwait;
580 uint32_t wwait;
581 int32_t count;
582 atomic32_t refcount;
583
584 /* used in KRWLockTimedAcquire */
585 pthread_mutex_t timed_lock;
586 pthread_cond_t timed_cond;
587 uint32_t timed_waiters;
588 };
589
590
591 /* Whack
592 */
593 static
KRWLockWhack(KRWLock * self)594 rc_t KRWLockWhack ( KRWLock *self )
595 {
596 rc_t rc;
597 if ( self -> count || self -> rwait || self -> wwait )
598 return RC ( rcPS, rcRWLock, rcDestroying, rcRWLock, rcBusy );
599
600 rc = KLockDestroy ( & self -> lock );
601 if ( rc == 0 )
602 {
603 pthread_cond_destroy ( & self -> rcond );
604 pthread_cond_destroy ( & self -> wcond );
605
606 pthread_cond_destroy ( & self -> timed_cond );
607 pthread_mutex_whack ( & self -> timed_lock );
608
609 free ( self );
610 }
611
612 return rc;
613 }
614
615
616 /* Make
617 * make a simple read/write lock
618 */
KRWLockMake(KRWLock ** lockp)619 LIB_EXPORT rc_t CC KRWLockMake ( KRWLock **lockp )
620 {
621 rc_t rc;
622
623 if ( lockp == NULL )
624 rc = RC ( rcPS, rcRWLock, rcConstructing, rcParam, rcNull );
625 else
626 {
627 KRWLock *lock = malloc ( sizeof * lock );
628 if ( lock == NULL )
629 rc = RC ( rcPS, rcRWLock, rcConstructing, rcMemory, rcExhausted );
630 else
631 {
632 rc = KLockInit ( & lock -> lock );
633 if ( rc == 0 )
634 {
635 rc = pthread_condition_init ( & lock -> rcond );
636 if ( rc == 0 )
637 {
638 rc = pthread_condition_init ( & lock -> wcond );
639 if ( rc == 0 )
640 {
641 rc = pthread_condition_init ( & lock -> timed_cond );
642 if ( rc == 0 )
643 {
644 int status = pthread_mutex_init ( & lock -> timed_lock, NULL );
645 if ( status == 0 )
646 {
647
648 lock -> rwait = lock -> wwait = 0;
649 lock -> count = 0;
650 atomic32_set ( & lock -> refcount, 1 );
651 lock -> timed_waiters = 0;
652 * lockp = lock;
653 return 0;
654 }
655 pthread_cond_destroy ( & lock -> timed_cond );
656 }
657 pthread_cond_destroy ( & lock -> wcond );
658 }
659
660 pthread_cond_destroy ( & lock -> rcond );
661 }
662
663 KLockDestroy ( & lock -> lock );
664 }
665
666 free ( lock );
667 }
668
669 * lockp = NULL;
670 }
671
672 return rc;
673 }
674
675 /* AddRef
676 * Release
677 */
KRWLockAddRef(const KRWLock * cself)678 LIB_EXPORT rc_t CC KRWLockAddRef ( const KRWLock *cself )
679 {
680 if ( cself != NULL )
681 atomic32_inc ( & ( ( KRWLock* ) cself ) -> refcount );
682 return 0;
683 }
684
KRWLockRelease(const KRWLock * cself)685 LIB_EXPORT rc_t CC KRWLockRelease ( const KRWLock *cself )
686 {
687 KRWLock *self = ( KRWLock* ) cself;
688 if ( cself != NULL )
689 {
690 if ( atomic32_dec_and_test ( & self -> refcount ) )
691 {
692 atomic32_set ( & self -> refcount, 1 );
693 return KRWLockWhack ( self );
694 }
695 }
696 return 0;
697 }
698
699
700 /* AcquireShared
701 * acquires read ( shared ) lock
702 */
KRWLockAcquireShared(KRWLock * self)703 LIB_EXPORT rc_t CC KRWLockAcquireShared ( KRWLock *self )
704 {
705 rc_t rc;
706
707 if ( self == NULL )
708 return RC ( rcPS, rcRWLock, rcLocking, rcSelf, rcNull );
709
710 rc = KLockAcquire ( & self -> lock );
711 if ( rc == 0 )
712 {
713 ++ self -> rwait;
714 while ( self -> count < 0 || self -> wwait != 0 )
715 {
716 rc = pthread_condition_wait ( & self -> rcond, & self -> lock . mutex );
717 if ( rc != 0 )
718 break;
719 }
720 -- self -> rwait;
721
722 if ( rc == 0 )
723 {
724 ++ self -> count;
725 }
726
727 KLockUnlock ( & self -> lock );
728 }
729
730 return rc;
731 }
732
733 static
KRWLockTimedAcquire(KRWLock * self,timeout_t * tm)734 rc_t KRWLockTimedAcquire( KRWLock *self, timeout_t *tm )
735 {
736 rc_t rc;
737
738 if ( self == NULL )
739 return RC ( rcPS, rcLock, rcLocking, rcSelf, rcNull );
740
741 if ( tm == NULL )
742 return pthread_mutex_acquire ( & self -> lock . mutex );
743
744 /* this is ugly, but don't want to prepare inside lock */
745 if ( ! tm -> prepared )
746 TimeoutPrepare ( tm );
747
748 rc = pthread_mutex_acquire ( & self -> timed_lock );
749 if ( rc == 0 )
750 {
751 int status = pthread_mutex_tryacquire ( & self -> lock . mutex );
752 if ( status == EBUSY )
753 {
754 while ( 1 )
755 {
756 ++ self -> timed_waiters;
757 status = pthread_cond_timedwait ( & self -> timed_cond, & self -> timed_lock, & tm -> ts );
758 -- self -> timed_waiters;
759
760 if ( status == EINTR )
761 continue;
762 if ( status != 0 )
763 break;
764 status = pthread_mutex_tryacquire ( & self -> lock . mutex );
765 if ( status != EBUSY )
766 break;
767 }
768 }
769
770 pthread_mutex_release ( & self -> timed_lock );
771
772 switch ( status )
773 {
774 case 0:
775 break;
776 case ETIMEDOUT:
777 rc = RC ( rcPS, rcLock, rcLocking, rcTimeout, rcExhausted );
778 break;
779 case EBUSY:
780 rc = RC ( rcPS, rcLock, rcLocking, rcLock, rcBusy );
781 break;
782 case EINVAL:
783 rc = RC ( rcPS, rcLock, rcLocking, rcLock, rcInvalid );
784 break;
785 default:
786 rc = RC ( rcPS, rcLock, rcLocking, rcNoObj, rcUnknown );
787 }
788 }
789
790 return rc;
791 }
792
KRWLockTimedAcquireShared(KRWLock * self,timeout_t * tm)793 LIB_EXPORT rc_t CC KRWLockTimedAcquireShared ( KRWLock *self, timeout_t *tm )
794 {
795 rc_t rc;
796
797 if ( self == NULL )
798 return RC ( rcPS, rcRWLock, rcLocking, rcSelf, rcNull );
799
800 rc = KRWLockTimedAcquire ( self, tm );
801 if ( rc == 0 )
802 {
803 ++ self -> rwait;
804 while ( self -> count < 0 || self -> wwait != 0 )
805 {
806 rc = pthread_condition_timedwait ( & self -> rcond, & self -> lock . mutex, & tm -> ts );
807 if ( rc != 0 )
808 {
809 if ( GetRCState ( rc ) == rcExhausted && GetRCObject ( rc ) == rcTimeout )
810 rc = ResetRCContext ( rc, rcPS, rcRWLock, rcLocking );
811 break;
812 }
813 }
814 -- self -> rwait;
815
816 if ( rc == 0 )
817 {
818 ++ self -> count;
819 }
820
821 KLockUnlock ( & self -> lock );
822 }
823
824 return rc;
825 }
826
827 /* AcquireExcl
828 * acquires write ( exclusive ) lock
829 */
KRWLockAcquireExcl(KRWLock * self)830 LIB_EXPORT rc_t CC KRWLockAcquireExcl ( KRWLock *self )
831 {
832 rc_t rc;
833
834 if ( self == NULL )
835 return RC ( rcPS, rcRWLock, rcLocking, rcSelf, rcNull );
836
837 rc = KLockAcquire ( & self -> lock );
838 if ( rc == 0 )
839 {
840 ++ self -> wwait;
841 while ( self -> count != 0 )
842 {
843 rc = pthread_condition_wait ( & self -> wcond, & self -> lock . mutex );
844 if ( rc != 0 )
845 break;
846 }
847 -- self -> wwait;
848
849 if ( rc == 0 )
850 {
851 self -> count = -1;
852 }
853
854 KLockUnlock ( & self -> lock );
855 }
856
857 return rc;
858 }
859
KRWLockTimedAcquireExcl(KRWLock * self,timeout_t * tm)860 LIB_EXPORT rc_t CC KRWLockTimedAcquireExcl ( KRWLock *self, timeout_t *tm )
861 {
862 rc_t rc;
863
864 if ( self == NULL )
865 return RC ( rcPS, rcRWLock, rcLocking, rcSelf, rcNull );
866
867 rc = KRWLockTimedAcquire ( self, tm );
868 if ( rc == 0 )
869 {
870 ++ self -> wwait;
871 while ( self -> count != 0 )
872 {
873 rc = pthread_condition_timedwait ( & self -> wcond, & self -> lock . mutex, & tm -> ts );
874 if ( rc != 0 )
875 {
876 if ( GetRCState ( rc ) == rcExhausted && GetRCObject ( rc ) == rcTimeout )
877 rc = ResetRCContext ( rc, rcPS, rcRWLock, rcLocking );
878 break;
879 }
880 }
881 -- self -> wwait;
882
883 if ( rc == 0 )
884 {
885 self -> count = -1;
886 }
887
888 KLockUnlock ( & self -> lock );
889 }
890
891 return rc;
892 }
893
894 /* Unlock
895 * releases lock
896 */
KRWLockUnlock(KRWLock * self)897 LIB_EXPORT rc_t CC KRWLockUnlock ( KRWLock *self )
898 {
899 rc_t rc;
900
901 if ( self == NULL )
902 return RC ( rcPS, rcRWLock, rcUnlocking, rcSelf, rcNull );
903
904 rc = KLockAcquire ( & self -> lock );
905 if ( rc == 0 )
906 {
907 /* release the count */
908 if ( self -> count < 0 )
909 self -> count = 0;
910 else if ( self -> count > 0 )
911 -- self -> count;
912
913 /* if there are writers waiting... */
914 if ( self -> wwait != 0 )
915 {
916 /* don't bother unless the lock is free */
917 if ( self -> count == 0 )
918 pthread_condition_signal ( & self -> wcond );
919 }
920
921 /* if there are readers waiting */
922 else if ( self -> rwait != 0 )
923 {
924 /* any number of readers can come through now */
925 pthread_condition_broadcast ( & self -> rcond );
926 }
927
928 KLockUnlock ( & self -> lock );
929 }
930
931 return rc;
932 }
933