1 /* Locking in multithreaded situations.
2    Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 
4    This program is free software; you can redistribute it and/or modify
5    it under the terms of the GNU General Public License as published by
6    the Free Software Foundation; either version 3, or (at your option)
7    any later version.
8 
9    This program is distributed in the hope that it will be useful,
10    but WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12    GNU General Public License for more details.
13 
14    You should have received a copy of the GNU General Public License
15    along with this program; if not, see <https://www.gnu.org/licenses/>.  */
16 
17 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
18    Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
19    gthr-win32.h.  */
20 
21 #include <config.h>
22 
23 #include "glthread/lock.h"
24 
25 /* ========================================================================= */
26 
27 #if USE_POSIX_THREADS
28 
29 /* -------------------------- gl_lock_t datatype -------------------------- */
30 
31 /* ------------------------- gl_rwlock_t datatype ------------------------- */
32 
33 # if HAVE_PTHREAD_RWLOCK && (HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER || (defined PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP && (__GNU_LIBRARY__ > 1)))
34 
35 #  ifdef PTHREAD_RWLOCK_INITIALIZER
36 
37 #   if !HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER
38      /* glibc with bug https://sourceware.org/bugzilla/show_bug.cgi?id=13701 */
39 
40 int
glthread_rwlock_init_for_glibc(pthread_rwlock_t * lock)41 glthread_rwlock_init_for_glibc (pthread_rwlock_t *lock)
42 {
43   pthread_rwlockattr_t attributes;
44   int err;
45 
46   err = pthread_rwlockattr_init (&attributes);
47   if (err != 0)
48     return err;
49   /* Note: PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP is the only value that
50      causes the writer to be preferred. PTHREAD_RWLOCK_PREFER_WRITER_NP does not
51      do this; see
52      http://man7.org/linux/man-pages/man3/pthread_rwlockattr_setkind_np.3.html */
53   err = pthread_rwlockattr_setkind_np (&attributes,
54                                        PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
55   if (err == 0)
56     err = pthread_rwlock_init(lock, &attributes);
57   /* pthread_rwlockattr_destroy always returns 0.  It cannot influence the
58      return value.  */
59   pthread_rwlockattr_destroy (&attributes);
60   return err;
61 }
62 
63 #   endif
64 #  else
65 
66 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)67 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
68 {
69   int err;
70 
71   err = pthread_rwlock_init (&lock->rwlock, NULL);
72   if (err != 0)
73     return err;
74   lock->initialized = 1;
75   return 0;
76 }
77 
78 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)79 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
80 {
81   if (!lock->initialized)
82     {
83       int err;
84 
85       err = pthread_mutex_lock (&lock->guard);
86       if (err != 0)
87         return err;
88       if (!lock->initialized)
89         {
90           err = glthread_rwlock_init_multithreaded (lock);
91           if (err != 0)
92             {
93               pthread_mutex_unlock (&lock->guard);
94               return err;
95             }
96         }
97       err = pthread_mutex_unlock (&lock->guard);
98       if (err != 0)
99         return err;
100     }
101   return pthread_rwlock_rdlock (&lock->rwlock);
102 }
103 
104 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)105 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
106 {
107   if (!lock->initialized)
108     {
109       int err;
110 
111       err = pthread_mutex_lock (&lock->guard);
112       if (err != 0)
113         return err;
114       if (!lock->initialized)
115         {
116           err = glthread_rwlock_init_multithreaded (lock);
117           if (err != 0)
118             {
119               pthread_mutex_unlock (&lock->guard);
120               return err;
121             }
122         }
123       err = pthread_mutex_unlock (&lock->guard);
124       if (err != 0)
125         return err;
126     }
127   return pthread_rwlock_wrlock (&lock->rwlock);
128 }
129 
130 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)131 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
132 {
133   if (!lock->initialized)
134     return EINVAL;
135   return pthread_rwlock_unlock (&lock->rwlock);
136 }
137 
138 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)139 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
140 {
141   int err;
142 
143   if (!lock->initialized)
144     return EINVAL;
145   err = pthread_rwlock_destroy (&lock->rwlock);
146   if (err != 0)
147     return err;
148   lock->initialized = 0;
149   return 0;
150 }
151 
152 #  endif
153 
154 # else
155 
156 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)157 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
158 {
159   int err;
160 
161   err = pthread_mutex_init (&lock->lock, NULL);
162   if (err != 0)
163     return err;
164   err = pthread_cond_init (&lock->waiting_readers, NULL);
165   if (err != 0)
166     return err;
167   err = pthread_cond_init (&lock->waiting_writers, NULL);
168   if (err != 0)
169     return err;
170   lock->waiting_writers_count = 0;
171   lock->runcount = 0;
172   return 0;
173 }
174 
175 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)176 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
177 {
178   int err;
179 
180   err = pthread_mutex_lock (&lock->lock);
181   if (err != 0)
182     return err;
183   /* Test whether only readers are currently running, and whether the runcount
184      field will not overflow, and whether no writer is waiting.  The latter
185      condition is because POSIX recommends that "write locks shall take
186      precedence over read locks", to avoid "writer starvation".  */
187   while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
188     {
189       /* This thread has to wait for a while.  Enqueue it among the
190          waiting_readers.  */
191       err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
192       if (err != 0)
193         {
194           pthread_mutex_unlock (&lock->lock);
195           return err;
196         }
197     }
198   lock->runcount++;
199   return pthread_mutex_unlock (&lock->lock);
200 }
201 
202 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)203 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
204 {
205   int err;
206 
207   err = pthread_mutex_lock (&lock->lock);
208   if (err != 0)
209     return err;
210   /* Test whether no readers or writers are currently running.  */
211   while (!(lock->runcount == 0))
212     {
213       /* This thread has to wait for a while.  Enqueue it among the
214          waiting_writers.  */
215       lock->waiting_writers_count++;
216       err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
217       if (err != 0)
218         {
219           lock->waiting_writers_count--;
220           pthread_mutex_unlock (&lock->lock);
221           return err;
222         }
223       lock->waiting_writers_count--;
224     }
225   lock->runcount--; /* runcount becomes -1 */
226   return pthread_mutex_unlock (&lock->lock);
227 }
228 
229 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)230 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
231 {
232   int err;
233 
234   err = pthread_mutex_lock (&lock->lock);
235   if (err != 0)
236     return err;
237   if (lock->runcount < 0)
238     {
239       /* Drop a writer lock.  */
240       if (!(lock->runcount == -1))
241         {
242           pthread_mutex_unlock (&lock->lock);
243           return EINVAL;
244         }
245       lock->runcount = 0;
246     }
247   else
248     {
249       /* Drop a reader lock.  */
250       if (!(lock->runcount > 0))
251         {
252           pthread_mutex_unlock (&lock->lock);
253           return EINVAL;
254         }
255       lock->runcount--;
256     }
257   if (lock->runcount == 0)
258     {
259       /* POSIX recommends that "write locks shall take precedence over read
260          locks", to avoid "writer starvation".  */
261       if (lock->waiting_writers_count > 0)
262         {
263           /* Wake up one of the waiting writers.  */
264           err = pthread_cond_signal (&lock->waiting_writers);
265           if (err != 0)
266             {
267               pthread_mutex_unlock (&lock->lock);
268               return err;
269             }
270         }
271       else
272         {
273           /* Wake up all waiting readers.  */
274           err = pthread_cond_broadcast (&lock->waiting_readers);
275           if (err != 0)
276             {
277               pthread_mutex_unlock (&lock->lock);
278               return err;
279             }
280         }
281     }
282   return pthread_mutex_unlock (&lock->lock);
283 }
284 
285 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)286 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
287 {
288   int err;
289 
290   err = pthread_mutex_destroy (&lock->lock);
291   if (err != 0)
292     return err;
293   err = pthread_cond_destroy (&lock->waiting_readers);
294   if (err != 0)
295     return err;
296   err = pthread_cond_destroy (&lock->waiting_writers);
297   if (err != 0)
298     return err;
299   return 0;
300 }
301 
302 # endif
303 
304 /* --------------------- gl_recursive_lock_t datatype --------------------- */
305 
306 # if HAVE_PTHREAD_MUTEX_RECURSIVE
307 
308 #  if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
309 
310 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)311 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
312 {
313   pthread_mutexattr_t attributes;
314   int err;
315 
316   err = pthread_mutexattr_init (&attributes);
317   if (err != 0)
318     return err;
319   err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
320   if (err != 0)
321     {
322       pthread_mutexattr_destroy (&attributes);
323       return err;
324     }
325   err = pthread_mutex_init (lock, &attributes);
326   if (err != 0)
327     {
328       pthread_mutexattr_destroy (&attributes);
329       return err;
330     }
331   err = pthread_mutexattr_destroy (&attributes);
332   if (err != 0)
333     return err;
334   return 0;
335 }
336 
337 #  else
338 
339 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)340 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
341 {
342   pthread_mutexattr_t attributes;
343   int err;
344 
345   err = pthread_mutexattr_init (&attributes);
346   if (err != 0)
347     return err;
348   err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
349   if (err != 0)
350     {
351       pthread_mutexattr_destroy (&attributes);
352       return err;
353     }
354   err = pthread_mutex_init (&lock->recmutex, &attributes);
355   if (err != 0)
356     {
357       pthread_mutexattr_destroy (&attributes);
358       return err;
359     }
360   err = pthread_mutexattr_destroy (&attributes);
361   if (err != 0)
362     return err;
363   lock->initialized = 1;
364   return 0;
365 }
366 
367 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)368 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
369 {
370   if (!lock->initialized)
371     {
372       int err;
373 
374       err = pthread_mutex_lock (&lock->guard);
375       if (err != 0)
376         return err;
377       if (!lock->initialized)
378         {
379           err = glthread_recursive_lock_init_multithreaded (lock);
380           if (err != 0)
381             {
382               pthread_mutex_unlock (&lock->guard);
383               return err;
384             }
385         }
386       err = pthread_mutex_unlock (&lock->guard);
387       if (err != 0)
388         return err;
389     }
390   return pthread_mutex_lock (&lock->recmutex);
391 }
392 
393 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)394 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
395 {
396   if (!lock->initialized)
397     return EINVAL;
398   return pthread_mutex_unlock (&lock->recmutex);
399 }
400 
401 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)402 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
403 {
404   int err;
405 
406   if (!lock->initialized)
407     return EINVAL;
408   err = pthread_mutex_destroy (&lock->recmutex);
409   if (err != 0)
410     return err;
411   lock->initialized = 0;
412   return 0;
413 }
414 
415 #  endif
416 
417 # else
418 
419 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)420 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
421 {
422   int err;
423 
424   err = pthread_mutex_init (&lock->mutex, NULL);
425   if (err != 0)
426     return err;
427   lock->owner = (pthread_t) 0;
428   lock->depth = 0;
429   return 0;
430 }
431 
432 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)433 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
434 {
435   pthread_t self = pthread_self ();
436   if (lock->owner != self)
437     {
438       int err;
439 
440       err = pthread_mutex_lock (&lock->mutex);
441       if (err != 0)
442         return err;
443       lock->owner = self;
444     }
445   if (++(lock->depth) == 0) /* wraparound? */
446     {
447       lock->depth--;
448       return EAGAIN;
449     }
450   return 0;
451 }
452 
453 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)454 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
455 {
456   if (lock->owner != pthread_self ())
457     return EPERM;
458   if (lock->depth == 0)
459     return EINVAL;
460   if (--(lock->depth) == 0)
461     {
462       lock->owner = (pthread_t) 0;
463       return pthread_mutex_unlock (&lock->mutex);
464     }
465   else
466     return 0;
467 }
468 
469 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)470 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
471 {
472   if (lock->owner != (pthread_t) 0)
473     return EBUSY;
474   return pthread_mutex_destroy (&lock->mutex);
475 }
476 
477 # endif
478 
479 /* -------------------------- gl_once_t datatype -------------------------- */
480 
481 static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
482 
483 int
glthread_once_singlethreaded(pthread_once_t * once_control)484 glthread_once_singlethreaded (pthread_once_t *once_control)
485 {
486   /* We don't know whether pthread_once_t is an integer type, a floating-point
487      type, a pointer type, or a structure type.  */
488   char *firstbyte = (char *)once_control;
489   if (*firstbyte == *(const char *)&fresh_once)
490     {
491       /* First time use of once_control.  Invert the first byte.  */
492       *firstbyte = ~ *(const char *)&fresh_once;
493       return 1;
494     }
495   else
496     return 0;
497 }
498 
499 #endif
500 
501 /* ========================================================================= */
502 
503 #if USE_PTH_THREADS
504 
505 /* Use the GNU Pth threads library.  */
506 
507 /* -------------------------- gl_lock_t datatype -------------------------- */
508 
509 /* ------------------------- gl_rwlock_t datatype ------------------------- */
510 
511 # if !HAVE_PTH_RWLOCK_ACQUIRE_PREFER_WRITER
512 
513 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)514 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
515 {
516   if (!pth_mutex_init (&lock->lock))
517     return errno;
518   if (!pth_cond_init (&lock->waiting_readers))
519     return errno;
520   if (!pth_cond_init (&lock->waiting_writers))
521     return errno;
522   lock->waiting_writers_count = 0;
523   lock->runcount = 0;
524   lock->initialized = 1;
525   return 0;
526 }
527 
528 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)529 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
530 {
531   if (!lock->initialized)
532     glthread_rwlock_init_multithreaded (lock);
533   if (!pth_mutex_acquire (&lock->lock, 0, NULL))
534     return errno;
535   /* Test whether only readers are currently running, and whether the runcount
536      field will not overflow, and whether no writer is waiting.  The latter
537      condition is because POSIX recommends that "write locks shall take
538      precedence over read locks", to avoid "writer starvation".  */
539   while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
540     {
541       /* This thread has to wait for a while.  Enqueue it among the
542          waiting_readers.  */
543       if (!pth_cond_await (&lock->waiting_readers, &lock->lock, NULL))
544         {
545           int err = errno;
546           pth_mutex_release (&lock->lock);
547           return err;
548         }
549     }
550   lock->runcount++;
551   return (!pth_mutex_release (&lock->lock) ? errno : 0);
552 }
553 
554 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)555 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
556 {
557   if (!lock->initialized)
558     glthread_rwlock_init_multithreaded (lock);
559   if (!pth_mutex_acquire (&lock->lock, 0, NULL))
560     return errno;
561   /* Test whether no readers or writers are currently running.  */
562   while (!(lock->runcount == 0))
563     {
564       /* This thread has to wait for a while.  Enqueue it among the
565          waiting_writers.  */
566       lock->waiting_writers_count++;
567       if (!pth_cond_await (&lock->waiting_writers, &lock->lock, NULL))
568         {
569           int err = errno;
570           lock->waiting_writers_count--;
571           pth_mutex_release (&lock->lock);
572           return err;
573         }
574       lock->waiting_writers_count--;
575     }
576   lock->runcount--; /* runcount becomes -1 */
577   return (!pth_mutex_release (&lock->lock) ? errno : 0);
578 }
579 
580 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)581 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
582 {
583   int err;
584 
585   if (!lock->initialized)
586     return EINVAL;
587   if (!pth_mutex_acquire (&lock->lock, 0, NULL))
588     return errno;
589   if (lock->runcount < 0)
590     {
591       /* Drop a writer lock.  */
592       if (!(lock->runcount == -1))
593         {
594           pth_mutex_release (&lock->lock);
595           return EINVAL;
596         }
597       lock->runcount = 0;
598     }
599   else
600     {
601       /* Drop a reader lock.  */
602       if (!(lock->runcount > 0))
603         {
604           pth_mutex_release (&lock->lock);
605           return EINVAL;
606         }
607       lock->runcount--;
608     }
609   if (lock->runcount == 0)
610     {
611       /* POSIX recommends that "write locks shall take precedence over read
612          locks", to avoid "writer starvation".  */
613       if (lock->waiting_writers_count > 0)
614         {
615           /* Wake up one of the waiting writers.  */
616           if (!pth_cond_notify (&lock->waiting_writers, FALSE))
617             {
618               int err = errno;
619               pth_mutex_release (&lock->lock);
620               return err;
621             }
622         }
623       else
624         {
625           /* Wake up all waiting readers.  */
626           if (!pth_cond_notify (&lock->waiting_readers, TRUE))
627             {
628               int err = errno;
629               pth_mutex_release (&lock->lock);
630               return err;
631             }
632         }
633     }
634   return (!pth_mutex_release (&lock->lock) ? errno : 0);
635 }
636 
637 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)638 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
639 {
640   lock->initialized = 0;
641   return 0;
642 }
643 
644 # endif
645 
646 /* --------------------- gl_recursive_lock_t datatype --------------------- */
647 
648 /* -------------------------- gl_once_t datatype -------------------------- */
649 
650 static void
glthread_once_call(void * arg)651 glthread_once_call (void *arg)
652 {
653   void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
654   void (*initfunction) (void) = *gl_once_temp_addr;
655   initfunction ();
656 }
657 
658 int
glthread_once_multithreaded(pth_once_t * once_control,void (* initfunction)(void))659 glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
660 {
661   void (*temp) (void) = initfunction;
662   return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
663 }
664 
665 int
glthread_once_singlethreaded(pth_once_t * once_control)666 glthread_once_singlethreaded (pth_once_t *once_control)
667 {
668   /* We know that pth_once_t is an integer type.  */
669   if (*once_control == PTH_ONCE_INIT)
670     {
671       /* First time use of once_control.  Invert the marker.  */
672       *once_control = ~ PTH_ONCE_INIT;
673       return 1;
674     }
675   else
676     return 0;
677 }
678 
679 #endif
680 
681 /* ========================================================================= */
682 
683 #if USE_SOLARIS_THREADS
684 
685 /* Use the old Solaris threads library.  */
686 
687 /* -------------------------- gl_lock_t datatype -------------------------- */
688 
689 /* ------------------------- gl_rwlock_t datatype ------------------------- */
690 
691 /* --------------------- gl_recursive_lock_t datatype --------------------- */
692 
693 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)694 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
695 {
696   int err;
697 
698   err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
699   if (err != 0)
700     return err;
701   lock->owner = (thread_t) 0;
702   lock->depth = 0;
703   return 0;
704 }
705 
706 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)707 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
708 {
709   thread_t self = thr_self ();
710   if (lock->owner != self)
711     {
712       int err;
713 
714       err = mutex_lock (&lock->mutex);
715       if (err != 0)
716         return err;
717       lock->owner = self;
718     }
719   if (++(lock->depth) == 0) /* wraparound? */
720     {
721       lock->depth--;
722       return EAGAIN;
723     }
724   return 0;
725 }
726 
727 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)728 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
729 {
730   if (lock->owner != thr_self ())
731     return EPERM;
732   if (lock->depth == 0)
733     return EINVAL;
734   if (--(lock->depth) == 0)
735     {
736       lock->owner = (thread_t) 0;
737       return mutex_unlock (&lock->mutex);
738     }
739   else
740     return 0;
741 }
742 
743 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)744 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
745 {
746   if (lock->owner != (thread_t) 0)
747     return EBUSY;
748   return mutex_destroy (&lock->mutex);
749 }
750 
751 /* -------------------------- gl_once_t datatype -------------------------- */
752 
753 int
glthread_once_multithreaded(gl_once_t * once_control,void (* initfunction)(void))754 glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
755 {
756   if (!once_control->inited)
757     {
758       int err;
759 
760       /* Use the mutex to guarantee that if another thread is already calling
761          the initfunction, this thread waits until it's finished.  */
762       err = mutex_lock (&once_control->mutex);
763       if (err != 0)
764         return err;
765       if (!once_control->inited)
766         {
767           once_control->inited = 1;
768           initfunction ();
769         }
770       return mutex_unlock (&once_control->mutex);
771     }
772   else
773     return 0;
774 }
775 
776 int
glthread_once_singlethreaded(gl_once_t * once_control)777 glthread_once_singlethreaded (gl_once_t *once_control)
778 {
779   /* We know that gl_once_t contains an integer type.  */
780   if (!once_control->inited)
781     {
782       /* First time use of once_control.  Invert the marker.  */
783       once_control->inited = ~ 0;
784       return 1;
785     }
786   else
787     return 0;
788 }
789 
790 #endif
791 
792 /* ========================================================================= */
793 
794 #if USE_WINDOWS_THREADS
795 
796 /* -------------------------- gl_lock_t datatype -------------------------- */
797 
798 void
glthread_lock_init_func(gl_lock_t * lock)799 glthread_lock_init_func (gl_lock_t *lock)
800 {
801   InitializeCriticalSection (&lock->lock);
802   lock->guard.done = 1;
803 }
804 
805 int
glthread_lock_lock_func(gl_lock_t * lock)806 glthread_lock_lock_func (gl_lock_t *lock)
807 {
808   if (!lock->guard.done)
809     {
810       if (InterlockedIncrement (&lock->guard.started) == 0)
811         /* This thread is the first one to need this lock.  Initialize it.  */
812         glthread_lock_init (lock);
813       else
814         /* Yield the CPU while waiting for another thread to finish
815            initializing this lock.  */
816         while (!lock->guard.done)
817           Sleep (0);
818     }
819   EnterCriticalSection (&lock->lock);
820   return 0;
821 }
822 
823 int
glthread_lock_unlock_func(gl_lock_t * lock)824 glthread_lock_unlock_func (gl_lock_t *lock)
825 {
826   if (!lock->guard.done)
827     return EINVAL;
828   LeaveCriticalSection (&lock->lock);
829   return 0;
830 }
831 
832 int
glthread_lock_destroy_func(gl_lock_t * lock)833 glthread_lock_destroy_func (gl_lock_t *lock)
834 {
835   if (!lock->guard.done)
836     return EINVAL;
837   DeleteCriticalSection (&lock->lock);
838   lock->guard.done = 0;
839   return 0;
840 }
841 
842 /* ------------------------- gl_rwlock_t datatype ------------------------- */
843 
844 /* In this file, the waitqueues are implemented as circular arrays.  */
845 #define gl_waitqueue_t gl_carray_waitqueue_t
846 
847 static void
gl_waitqueue_init(gl_waitqueue_t * wq)848 gl_waitqueue_init (gl_waitqueue_t *wq)
849 {
850   wq->array = NULL;
851   wq->count = 0;
852   wq->alloc = 0;
853   wq->offset = 0;
854 }
855 
856 /* Enqueues the current thread, represented by an event, in a wait queue.
857    Returns INVALID_HANDLE_VALUE if an allocation failure occurs.  */
858 static HANDLE
gl_waitqueue_add(gl_waitqueue_t * wq)859 gl_waitqueue_add (gl_waitqueue_t *wq)
860 {
861   HANDLE event;
862   unsigned int index;
863 
864   if (wq->count == wq->alloc)
865     {
866       unsigned int new_alloc = 2 * wq->alloc + 1;
867       HANDLE *new_array =
868         (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
869       if (new_array == NULL)
870         /* No more memory.  */
871         return INVALID_HANDLE_VALUE;
872       /* Now is a good opportunity to rotate the array so that its contents
873          starts at offset 0.  */
874       if (wq->offset > 0)
875         {
876           unsigned int old_count = wq->count;
877           unsigned int old_alloc = wq->alloc;
878           unsigned int old_offset = wq->offset;
879           unsigned int i;
880           if (old_offset + old_count > old_alloc)
881             {
882               unsigned int limit = old_offset + old_count - old_alloc;
883               for (i = 0; i < limit; i++)
884                 new_array[old_alloc + i] = new_array[i];
885             }
886           for (i = 0; i < old_count; i++)
887             new_array[i] = new_array[old_offset + i];
888           wq->offset = 0;
889         }
890       wq->array = new_array;
891       wq->alloc = new_alloc;
892     }
893   /* Whether the created event is a manual-reset one or an auto-reset one,
894      does not matter, since we will wait on it only once.  */
895   event = CreateEvent (NULL, TRUE, FALSE, NULL);
896   if (event == INVALID_HANDLE_VALUE)
897     /* No way to allocate an event.  */
898     return INVALID_HANDLE_VALUE;
899   index = wq->offset + wq->count;
900   if (index >= wq->alloc)
901     index -= wq->alloc;
902   wq->array[index] = event;
903   wq->count++;
904   return event;
905 }
906 
907 /* Notifies the first thread from a wait queue and dequeues it.  */
908 static void
gl_waitqueue_notify_first(gl_waitqueue_t * wq)909 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
910 {
911   SetEvent (wq->array[wq->offset + 0]);
912   wq->offset++;
913   wq->count--;
914   if (wq->count == 0 || wq->offset == wq->alloc)
915     wq->offset = 0;
916 }
917 
918 /* Notifies all threads from a wait queue and dequeues them all.  */
919 static void
gl_waitqueue_notify_all(gl_waitqueue_t * wq)920 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
921 {
922   unsigned int i;
923 
924   for (i = 0; i < wq->count; i++)
925     {
926       unsigned int index = wq->offset + i;
927       if (index >= wq->alloc)
928         index -= wq->alloc;
929       SetEvent (wq->array[index]);
930     }
931   wq->count = 0;
932   wq->offset = 0;
933 }
934 
935 void
glthread_rwlock_init_func(gl_rwlock_t * lock)936 glthread_rwlock_init_func (gl_rwlock_t *lock)
937 {
938   InitializeCriticalSection (&lock->lock);
939   gl_waitqueue_init (&lock->waiting_readers);
940   gl_waitqueue_init (&lock->waiting_writers);
941   lock->runcount = 0;
942   lock->guard.done = 1;
943 }
944 
945 int
glthread_rwlock_rdlock_func(gl_rwlock_t * lock)946 glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
947 {
948   if (!lock->guard.done)
949     {
950       if (InterlockedIncrement (&lock->guard.started) == 0)
951         /* This thread is the first one to need this lock.  Initialize it.  */
952         glthread_rwlock_init (lock);
953       else
954         /* Yield the CPU while waiting for another thread to finish
955            initializing this lock.  */
956         while (!lock->guard.done)
957           Sleep (0);
958     }
959   EnterCriticalSection (&lock->lock);
960   /* Test whether only readers are currently running, and whether the runcount
961      field will not overflow, and whether no writer is waiting.  The latter
962      condition is because POSIX recommends that "write locks shall take
963      precedence over read locks", to avoid "writer starvation".  */
964   if (!(lock->runcount + 1 > 0 && lock->waiting_writers.count == 0))
965     {
966       /* This thread has to wait for a while.  Enqueue it among the
967          waiting_readers.  */
968       HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
969       if (event != INVALID_HANDLE_VALUE)
970         {
971           DWORD result;
972           LeaveCriticalSection (&lock->lock);
973           /* Wait until another thread signals this event.  */
974           result = WaitForSingleObject (event, INFINITE);
975           if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
976             abort ();
977           CloseHandle (event);
978           /* The thread which signalled the event already did the bookkeeping:
979              removed us from the waiting_readers, incremented lock->runcount.  */
980           if (!(lock->runcount > 0))
981             abort ();
982           return 0;
983         }
984       else
985         {
986           /* Allocation failure.  Weird.  */
987           do
988             {
989               LeaveCriticalSection (&lock->lock);
990               Sleep (1);
991               EnterCriticalSection (&lock->lock);
992             }
993           while (!(lock->runcount + 1 > 0));
994         }
995     }
996   lock->runcount++;
997   LeaveCriticalSection (&lock->lock);
998   return 0;
999 }
1000 
1001 int
glthread_rwlock_wrlock_func(gl_rwlock_t * lock)1002 glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
1003 {
1004   if (!lock->guard.done)
1005     {
1006       if (InterlockedIncrement (&lock->guard.started) == 0)
1007         /* This thread is the first one to need this lock.  Initialize it.  */
1008         glthread_rwlock_init (lock);
1009       else
1010         /* Yield the CPU while waiting for another thread to finish
1011            initializing this lock.  */
1012         while (!lock->guard.done)
1013           Sleep (0);
1014     }
1015   EnterCriticalSection (&lock->lock);
1016   /* Test whether no readers or writers are currently running.  */
1017   if (!(lock->runcount == 0))
1018     {
1019       /* This thread has to wait for a while.  Enqueue it among the
1020          waiting_writers.  */
1021       HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
1022       if (event != INVALID_HANDLE_VALUE)
1023         {
1024           DWORD result;
1025           LeaveCriticalSection (&lock->lock);
1026           /* Wait until another thread signals this event.  */
1027           result = WaitForSingleObject (event, INFINITE);
1028           if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
1029             abort ();
1030           CloseHandle (event);
1031           /* The thread which signalled the event already did the bookkeeping:
1032              removed us from the waiting_writers, set lock->runcount = -1.  */
1033           if (!(lock->runcount == -1))
1034             abort ();
1035           return 0;
1036         }
1037       else
1038         {
1039           /* Allocation failure.  Weird.  */
1040           do
1041             {
1042               LeaveCriticalSection (&lock->lock);
1043               Sleep (1);
1044               EnterCriticalSection (&lock->lock);
1045             }
1046           while (!(lock->runcount == 0));
1047         }
1048     }
1049   lock->runcount--; /* runcount becomes -1 */
1050   LeaveCriticalSection (&lock->lock);
1051   return 0;
1052 }
1053 
1054 int
glthread_rwlock_unlock_func(gl_rwlock_t * lock)1055 glthread_rwlock_unlock_func (gl_rwlock_t *lock)
1056 {
1057   if (!lock->guard.done)
1058     return EINVAL;
1059   EnterCriticalSection (&lock->lock);
1060   if (lock->runcount < 0)
1061     {
1062       /* Drop a writer lock.  */
1063       if (!(lock->runcount == -1))
1064         abort ();
1065       lock->runcount = 0;
1066     }
1067   else
1068     {
1069       /* Drop a reader lock.  */
1070       if (!(lock->runcount > 0))
1071         {
1072           LeaveCriticalSection (&lock->lock);
1073           return EPERM;
1074         }
1075       lock->runcount--;
1076     }
1077   if (lock->runcount == 0)
1078     {
1079       /* POSIX recommends that "write locks shall take precedence over read
1080          locks", to avoid "writer starvation".  */
1081       if (lock->waiting_writers.count > 0)
1082         {
1083           /* Wake up one of the waiting writers.  */
1084           lock->runcount--;
1085           gl_waitqueue_notify_first (&lock->waiting_writers);
1086         }
1087       else
1088         {
1089           /* Wake up all waiting readers.  */
1090           lock->runcount += lock->waiting_readers.count;
1091           gl_waitqueue_notify_all (&lock->waiting_readers);
1092         }
1093     }
1094   LeaveCriticalSection (&lock->lock);
1095   return 0;
1096 }
1097 
1098 int
glthread_rwlock_destroy_func(gl_rwlock_t * lock)1099 glthread_rwlock_destroy_func (gl_rwlock_t *lock)
1100 {
1101   if (!lock->guard.done)
1102     return EINVAL;
1103   if (lock->runcount != 0)
1104     return EBUSY;
1105   DeleteCriticalSection (&lock->lock);
1106   if (lock->waiting_readers.array != NULL)
1107     free (lock->waiting_readers.array);
1108   if (lock->waiting_writers.array != NULL)
1109     free (lock->waiting_writers.array);
1110   lock->guard.done = 0;
1111   return 0;
1112 }
1113 
1114 /* --------------------- gl_recursive_lock_t datatype --------------------- */
1115 
1116 void
glthread_recursive_lock_init_func(gl_recursive_lock_t * lock)1117 glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
1118 {
1119   lock->owner = 0;
1120   lock->depth = 0;
1121   InitializeCriticalSection (&lock->lock);
1122   lock->guard.done = 1;
1123 }
1124 
1125 int
glthread_recursive_lock_lock_func(gl_recursive_lock_t * lock)1126 glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
1127 {
1128   if (!lock->guard.done)
1129     {
1130       if (InterlockedIncrement (&lock->guard.started) == 0)
1131         /* This thread is the first one to need this lock.  Initialize it.  */
1132         glthread_recursive_lock_init (lock);
1133       else
1134         /* Yield the CPU while waiting for another thread to finish
1135            initializing this lock.  */
1136         while (!lock->guard.done)
1137           Sleep (0);
1138     }
1139   {
1140     DWORD self = GetCurrentThreadId ();
1141     if (lock->owner != self)
1142       {
1143         EnterCriticalSection (&lock->lock);
1144         lock->owner = self;
1145       }
1146     if (++(lock->depth) == 0) /* wraparound? */
1147       {
1148         lock->depth--;
1149         return EAGAIN;
1150       }
1151   }
1152   return 0;
1153 }
1154 
1155 int
glthread_recursive_lock_unlock_func(gl_recursive_lock_t * lock)1156 glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
1157 {
1158   if (lock->owner != GetCurrentThreadId ())
1159     return EPERM;
1160   if (lock->depth == 0)
1161     return EINVAL;
1162   if (--(lock->depth) == 0)
1163     {
1164       lock->owner = 0;
1165       LeaveCriticalSection (&lock->lock);
1166     }
1167   return 0;
1168 }
1169 
1170 int
glthread_recursive_lock_destroy_func(gl_recursive_lock_t * lock)1171 glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
1172 {
1173   if (lock->owner != 0)
1174     return EBUSY;
1175   DeleteCriticalSection (&lock->lock);
1176   lock->guard.done = 0;
1177   return 0;
1178 }
1179 
1180 /* -------------------------- gl_once_t datatype -------------------------- */
1181 
1182 void
glthread_once_func(gl_once_t * once_control,void (* initfunction)(void))1183 glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
1184 {
1185   if (once_control->inited <= 0)
1186     {
1187       if (InterlockedIncrement (&once_control->started) == 0)
1188         {
1189           /* This thread is the first one to come to this once_control.  */
1190           InitializeCriticalSection (&once_control->lock);
1191           EnterCriticalSection (&once_control->lock);
1192           once_control->inited = 0;
1193           initfunction ();
1194           once_control->inited = 1;
1195           LeaveCriticalSection (&once_control->lock);
1196         }
1197       else
1198         {
1199           /* Undo last operation.  */
1200           InterlockedDecrement (&once_control->started);
1201           /* Some other thread has already started the initialization.
1202              Yield the CPU while waiting for the other thread to finish
1203              initializing and taking the lock.  */
1204           while (once_control->inited < 0)
1205             Sleep (0);
1206           if (once_control->inited <= 0)
1207             {
1208               /* Take the lock.  This blocks until the other thread has
1209                  finished calling the initfunction.  */
1210               EnterCriticalSection (&once_control->lock);
1211               LeaveCriticalSection (&once_control->lock);
1212               if (!(once_control->inited > 0))
1213                 abort ();
1214             }
1215         }
1216     }
1217 }
1218 
1219 #endif
1220 
1221 /* ========================================================================= */
1222