1 /* Locking in multithreaded situations.
2 Copyright (C) 2005-2012 Free Software Foundation, Inc.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 3, or (at your option)
7 any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software Foundation,
16 Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
17
18 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
19 Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
20 gthr-win32.h. */
21
22 #include <config.h>
23
24 #include "glthread/lock.h"
25
26 /* ========================================================================= */
27
28 #if USE_POSIX_THREADS
29
30 /* -------------------------- gl_lock_t datatype -------------------------- */
31
32 /* ------------------------- gl_rwlock_t datatype ------------------------- */
33
34 # if HAVE_PTHREAD_RWLOCK
35
36 # if !defined PTHREAD_RWLOCK_INITIALIZER
37
38 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)39 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
40 {
41 int err;
42
43 err = pthread_rwlock_init (&lock->rwlock, NULL);
44 if (err != 0)
45 return err;
46 lock->initialized = 1;
47 return 0;
48 }
49
50 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)51 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
52 {
53 if (!lock->initialized)
54 {
55 int err;
56
57 err = pthread_mutex_lock (&lock->guard);
58 if (err != 0)
59 return err;
60 if (!lock->initialized)
61 {
62 err = glthread_rwlock_init_multithreaded (lock);
63 if (err != 0)
64 {
65 pthread_mutex_unlock (&lock->guard);
66 return err;
67 }
68 }
69 err = pthread_mutex_unlock (&lock->guard);
70 if (err != 0)
71 return err;
72 }
73 return pthread_rwlock_rdlock (&lock->rwlock);
74 }
75
76 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)77 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
78 {
79 if (!lock->initialized)
80 {
81 int err;
82
83 err = pthread_mutex_lock (&lock->guard);
84 if (err != 0)
85 return err;
86 if (!lock->initialized)
87 {
88 err = glthread_rwlock_init_multithreaded (lock);
89 if (err != 0)
90 {
91 pthread_mutex_unlock (&lock->guard);
92 return err;
93 }
94 }
95 err = pthread_mutex_unlock (&lock->guard);
96 if (err != 0)
97 return err;
98 }
99 return pthread_rwlock_wrlock (&lock->rwlock);
100 }
101
102 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)103 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
104 {
105 if (!lock->initialized)
106 return EINVAL;
107 return pthread_rwlock_unlock (&lock->rwlock);
108 }
109
110 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)111 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
112 {
113 int err;
114
115 if (!lock->initialized)
116 return EINVAL;
117 err = pthread_rwlock_destroy (&lock->rwlock);
118 if (err != 0)
119 return err;
120 lock->initialized = 0;
121 return 0;
122 }
123
124 # endif
125
126 # else
127
128 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)129 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
130 {
131 int err;
132
133 err = pthread_mutex_init (&lock->lock, NULL);
134 if (err != 0)
135 return err;
136 err = pthread_cond_init (&lock->waiting_readers, NULL);
137 if (err != 0)
138 return err;
139 err = pthread_cond_init (&lock->waiting_writers, NULL);
140 if (err != 0)
141 return err;
142 lock->waiting_writers_count = 0;
143 lock->runcount = 0;
144 return 0;
145 }
146
147 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)148 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
149 {
150 int err;
151
152 err = pthread_mutex_lock (&lock->lock);
153 if (err != 0)
154 return err;
155 /* Test whether only readers are currently running, and whether the runcount
156 field will not overflow. */
157 /* POSIX says: "It is implementation-defined whether the calling thread
158 acquires the lock when a writer does not hold the lock and there are
159 writers blocked on the lock." Let's say, no: give the writers a higher
160 priority. */
161 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
162 {
163 /* This thread has to wait for a while. Enqueue it among the
164 waiting_readers. */
165 err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
166 if (err != 0)
167 {
168 pthread_mutex_unlock (&lock->lock);
169 return err;
170 }
171 }
172 lock->runcount++;
173 return pthread_mutex_unlock (&lock->lock);
174 }
175
176 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)177 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
178 {
179 int err;
180
181 err = pthread_mutex_lock (&lock->lock);
182 if (err != 0)
183 return err;
184 /* Test whether no readers or writers are currently running. */
185 while (!(lock->runcount == 0))
186 {
187 /* This thread has to wait for a while. Enqueue it among the
188 waiting_writers. */
189 lock->waiting_writers_count++;
190 err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
191 if (err != 0)
192 {
193 lock->waiting_writers_count--;
194 pthread_mutex_unlock (&lock->lock);
195 return err;
196 }
197 lock->waiting_writers_count--;
198 }
199 lock->runcount--; /* runcount becomes -1 */
200 return pthread_mutex_unlock (&lock->lock);
201 }
202
203 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)204 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
205 {
206 int err;
207
208 err = pthread_mutex_lock (&lock->lock);
209 if (err != 0)
210 return err;
211 if (lock->runcount < 0)
212 {
213 /* Drop a writer lock. */
214 if (!(lock->runcount == -1))
215 {
216 pthread_mutex_unlock (&lock->lock);
217 return EINVAL;
218 }
219 lock->runcount = 0;
220 }
221 else
222 {
223 /* Drop a reader lock. */
224 if (!(lock->runcount > 0))
225 {
226 pthread_mutex_unlock (&lock->lock);
227 return EINVAL;
228 }
229 lock->runcount--;
230 }
231 if (lock->runcount == 0)
232 {
233 /* POSIX recommends that "write locks shall take precedence over read
234 locks", to avoid "writer starvation". */
235 if (lock->waiting_writers_count > 0)
236 {
237 /* Wake up one of the waiting writers. */
238 err = pthread_cond_signal (&lock->waiting_writers);
239 if (err != 0)
240 {
241 pthread_mutex_unlock (&lock->lock);
242 return err;
243 }
244 }
245 else
246 {
247 /* Wake up all waiting readers. */
248 err = pthread_cond_broadcast (&lock->waiting_readers);
249 if (err != 0)
250 {
251 pthread_mutex_unlock (&lock->lock);
252 return err;
253 }
254 }
255 }
256 return pthread_mutex_unlock (&lock->lock);
257 }
258
259 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)260 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
261 {
262 int err;
263
264 err = pthread_mutex_destroy (&lock->lock);
265 if (err != 0)
266 return err;
267 err = pthread_cond_destroy (&lock->waiting_readers);
268 if (err != 0)
269 return err;
270 err = pthread_cond_destroy (&lock->waiting_writers);
271 if (err != 0)
272 return err;
273 return 0;
274 }
275
276 # endif
277
278 /* --------------------- gl_recursive_lock_t datatype --------------------- */
279
280 # if HAVE_PTHREAD_MUTEX_RECURSIVE
281
282 # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
283
284 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)285 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
286 {
287 pthread_mutexattr_t attributes;
288 int err;
289
290 err = pthread_mutexattr_init (&attributes);
291 if (err != 0)
292 return err;
293 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
294 if (err != 0)
295 {
296 pthread_mutexattr_destroy (&attributes);
297 return err;
298 }
299 err = pthread_mutex_init (lock, &attributes);
300 if (err != 0)
301 {
302 pthread_mutexattr_destroy (&attributes);
303 return err;
304 }
305 err = pthread_mutexattr_destroy (&attributes);
306 if (err != 0)
307 return err;
308 return 0;
309 }
310
311 # else
312
313 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)314 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
315 {
316 pthread_mutexattr_t attributes;
317 int err;
318
319 err = pthread_mutexattr_init (&attributes);
320 if (err != 0)
321 return err;
322 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
323 if (err != 0)
324 {
325 pthread_mutexattr_destroy (&attributes);
326 return err;
327 }
328 err = pthread_mutex_init (&lock->recmutex, &attributes);
329 if (err != 0)
330 {
331 pthread_mutexattr_destroy (&attributes);
332 return err;
333 }
334 err = pthread_mutexattr_destroy (&attributes);
335 if (err != 0)
336 return err;
337 lock->initialized = 1;
338 return 0;
339 }
340
341 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)342 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
343 {
344 if (!lock->initialized)
345 {
346 int err;
347
348 err = pthread_mutex_lock (&lock->guard);
349 if (err != 0)
350 return err;
351 if (!lock->initialized)
352 {
353 err = glthread_recursive_lock_init_multithreaded (lock);
354 if (err != 0)
355 {
356 pthread_mutex_unlock (&lock->guard);
357 return err;
358 }
359 }
360 err = pthread_mutex_unlock (&lock->guard);
361 if (err != 0)
362 return err;
363 }
364 return pthread_mutex_lock (&lock->recmutex);
365 }
366
367 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)368 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
369 {
370 if (!lock->initialized)
371 return EINVAL;
372 return pthread_mutex_unlock (&lock->recmutex);
373 }
374
375 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)376 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
377 {
378 int err;
379
380 if (!lock->initialized)
381 return EINVAL;
382 err = pthread_mutex_destroy (&lock->recmutex);
383 if (err != 0)
384 return err;
385 lock->initialized = 0;
386 return 0;
387 }
388
389 # endif
390
391 # else
392
393 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)394 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
395 {
396 int err;
397
398 err = pthread_mutex_init (&lock->mutex, NULL);
399 if (err != 0)
400 return err;
401 lock->owner = (pthread_t) 0;
402 lock->depth = 0;
403 return 0;
404 }
405
406 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)407 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
408 {
409 pthread_t self = pthread_self ();
410 if (lock->owner != self)
411 {
412 int err;
413
414 err = pthread_mutex_lock (&lock->mutex);
415 if (err != 0)
416 return err;
417 lock->owner = self;
418 }
419 if (++(lock->depth) == 0) /* wraparound? */
420 {
421 lock->depth--;
422 return EAGAIN;
423 }
424 return 0;
425 }
426
427 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)428 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
429 {
430 if (lock->owner != pthread_self ())
431 return EPERM;
432 if (lock->depth == 0)
433 return EINVAL;
434 if (--(lock->depth) == 0)
435 {
436 lock->owner = (pthread_t) 0;
437 return pthread_mutex_unlock (&lock->mutex);
438 }
439 else
440 return 0;
441 }
442
443 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)444 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
445 {
446 if (lock->owner != (pthread_t) 0)
447 return EBUSY;
448 return pthread_mutex_destroy (&lock->mutex);
449 }
450
451 # endif
452
453 /* -------------------------- gl_once_t datatype -------------------------- */
454
455 static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
456
457 int
glthread_once_singlethreaded(pthread_once_t * once_control)458 glthread_once_singlethreaded (pthread_once_t *once_control)
459 {
460 /* We don't know whether pthread_once_t is an integer type, a floating-point
461 type, a pointer type, or a structure type. */
462 char *firstbyte = (char *)once_control;
463 if (*firstbyte == *(const char *)&fresh_once)
464 {
465 /* First time use of once_control. Invert the first byte. */
466 *firstbyte = ~ *(const char *)&fresh_once;
467 return 1;
468 }
469 else
470 return 0;
471 }
472
473 #endif
474
475 /* ========================================================================= */
476
477 #if USE_PTH_THREADS
478
479 /* Use the GNU Pth threads library. */
480
481 /* -------------------------- gl_lock_t datatype -------------------------- */
482
483 /* ------------------------- gl_rwlock_t datatype ------------------------- */
484
485 /* --------------------- gl_recursive_lock_t datatype --------------------- */
486
487 /* -------------------------- gl_once_t datatype -------------------------- */
488
489 static void
glthread_once_call(void * arg)490 glthread_once_call (void *arg)
491 {
492 void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
493 void (*initfunction) (void) = *gl_once_temp_addr;
494 initfunction ();
495 }
496
497 int
glthread_once_multithreaded(pth_once_t * once_control,void (* initfunction)(void))498 glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
499 {
500 void (*temp) (void) = initfunction;
501 return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
502 }
503
504 int
glthread_once_singlethreaded(pth_once_t * once_control)505 glthread_once_singlethreaded (pth_once_t *once_control)
506 {
507 /* We know that pth_once_t is an integer type. */
508 if (*once_control == PTH_ONCE_INIT)
509 {
510 /* First time use of once_control. Invert the marker. */
511 *once_control = ~ PTH_ONCE_INIT;
512 return 1;
513 }
514 else
515 return 0;
516 }
517
518 #endif
519
520 /* ========================================================================= */
521
522 #if USE_SOLARIS_THREADS
523
524 /* Use the old Solaris threads library. */
525
526 /* -------------------------- gl_lock_t datatype -------------------------- */
527
528 /* ------------------------- gl_rwlock_t datatype ------------------------- */
529
530 /* --------------------- gl_recursive_lock_t datatype --------------------- */
531
532 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)533 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
534 {
535 int err;
536
537 err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
538 if (err != 0)
539 return err;
540 lock->owner = (thread_t) 0;
541 lock->depth = 0;
542 return 0;
543 }
544
545 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)546 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
547 {
548 thread_t self = thr_self ();
549 if (lock->owner != self)
550 {
551 int err;
552
553 err = mutex_lock (&lock->mutex);
554 if (err != 0)
555 return err;
556 lock->owner = self;
557 }
558 if (++(lock->depth) == 0) /* wraparound? */
559 {
560 lock->depth--;
561 return EAGAIN;
562 }
563 return 0;
564 }
565
566 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)567 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
568 {
569 if (lock->owner != thr_self ())
570 return EPERM;
571 if (lock->depth == 0)
572 return EINVAL;
573 if (--(lock->depth) == 0)
574 {
575 lock->owner = (thread_t) 0;
576 return mutex_unlock (&lock->mutex);
577 }
578 else
579 return 0;
580 }
581
582 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)583 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
584 {
585 if (lock->owner != (thread_t) 0)
586 return EBUSY;
587 return mutex_destroy (&lock->mutex);
588 }
589
590 /* -------------------------- gl_once_t datatype -------------------------- */
591
592 int
glthread_once_multithreaded(gl_once_t * once_control,void (* initfunction)(void))593 glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
594 {
595 if (!once_control->inited)
596 {
597 int err;
598
599 /* Use the mutex to guarantee that if another thread is already calling
600 the initfunction, this thread waits until it's finished. */
601 err = mutex_lock (&once_control->mutex);
602 if (err != 0)
603 return err;
604 if (!once_control->inited)
605 {
606 once_control->inited = 1;
607 initfunction ();
608 }
609 return mutex_unlock (&once_control->mutex);
610 }
611 else
612 return 0;
613 }
614
615 int
glthread_once_singlethreaded(gl_once_t * once_control)616 glthread_once_singlethreaded (gl_once_t *once_control)
617 {
618 /* We know that gl_once_t contains an integer type. */
619 if (!once_control->inited)
620 {
621 /* First time use of once_control. Invert the marker. */
622 once_control->inited = ~ 0;
623 return 1;
624 }
625 else
626 return 0;
627 }
628
629 #endif
630
631 /* ========================================================================= */
632
633 #if USE_WINDOWS_THREADS
634
635 /* -------------------------- gl_lock_t datatype -------------------------- */
636
637 void
glthread_lock_init_func(gl_lock_t * lock)638 glthread_lock_init_func (gl_lock_t *lock)
639 {
640 InitializeCriticalSection (&lock->lock);
641 lock->guard.done = 1;
642 }
643
644 int
glthread_lock_lock_func(gl_lock_t * lock)645 glthread_lock_lock_func (gl_lock_t *lock)
646 {
647 if (!lock->guard.done)
648 {
649 if (InterlockedIncrement (&lock->guard.started) == 0)
650 /* This thread is the first one to need this lock. Initialize it. */
651 glthread_lock_init (lock);
652 else
653 /* Yield the CPU while waiting for another thread to finish
654 initializing this lock. */
655 while (!lock->guard.done)
656 Sleep (0);
657 }
658 EnterCriticalSection (&lock->lock);
659 return 0;
660 }
661
662 int
glthread_lock_unlock_func(gl_lock_t * lock)663 glthread_lock_unlock_func (gl_lock_t *lock)
664 {
665 if (!lock->guard.done)
666 return EINVAL;
667 LeaveCriticalSection (&lock->lock);
668 return 0;
669 }
670
671 int
glthread_lock_destroy_func(gl_lock_t * lock)672 glthread_lock_destroy_func (gl_lock_t *lock)
673 {
674 if (!lock->guard.done)
675 return EINVAL;
676 DeleteCriticalSection (&lock->lock);
677 lock->guard.done = 0;
678 return 0;
679 }
680
681 /* ------------------------- gl_rwlock_t datatype ------------------------- */
682
683 /* In this file, the waitqueues are implemented as circular arrays. */
684 #define gl_waitqueue_t gl_carray_waitqueue_t
685
686 static inline void
gl_waitqueue_init(gl_waitqueue_t * wq)687 gl_waitqueue_init (gl_waitqueue_t *wq)
688 {
689 wq->array = NULL;
690 wq->count = 0;
691 wq->alloc = 0;
692 wq->offset = 0;
693 }
694
695 /* Enqueues the current thread, represented by an event, in a wait queue.
696 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
697 static HANDLE
gl_waitqueue_add(gl_waitqueue_t * wq)698 gl_waitqueue_add (gl_waitqueue_t *wq)
699 {
700 HANDLE event;
701 unsigned int index;
702
703 if (wq->count == wq->alloc)
704 {
705 unsigned int new_alloc = 2 * wq->alloc + 1;
706 HANDLE *new_array =
707 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
708 if (new_array == NULL)
709 /* No more memory. */
710 return INVALID_HANDLE_VALUE;
711 /* Now is a good opportunity to rotate the array so that its contents
712 starts at offset 0. */
713 if (wq->offset > 0)
714 {
715 unsigned int old_count = wq->count;
716 unsigned int old_alloc = wq->alloc;
717 unsigned int old_offset = wq->offset;
718 unsigned int i;
719 if (old_offset + old_count > old_alloc)
720 {
721 unsigned int limit = old_offset + old_count - old_alloc;
722 for (i = 0; i < limit; i++)
723 new_array[old_alloc + i] = new_array[i];
724 }
725 for (i = 0; i < old_count; i++)
726 new_array[i] = new_array[old_offset + i];
727 wq->offset = 0;
728 }
729 wq->array = new_array;
730 wq->alloc = new_alloc;
731 }
732 /* Whether the created event is a manual-reset one or an auto-reset one,
733 does not matter, since we will wait on it only once. */
734 event = CreateEvent (NULL, TRUE, FALSE, NULL);
735 if (event == INVALID_HANDLE_VALUE)
736 /* No way to allocate an event. */
737 return INVALID_HANDLE_VALUE;
738 index = wq->offset + wq->count;
739 if (index >= wq->alloc)
740 index -= wq->alloc;
741 wq->array[index] = event;
742 wq->count++;
743 return event;
744 }
745
746 /* Notifies the first thread from a wait queue and dequeues it. */
747 static inline void
gl_waitqueue_notify_first(gl_waitqueue_t * wq)748 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
749 {
750 SetEvent (wq->array[wq->offset + 0]);
751 wq->offset++;
752 wq->count--;
753 if (wq->count == 0 || wq->offset == wq->alloc)
754 wq->offset = 0;
755 }
756
757 /* Notifies all threads from a wait queue and dequeues them all. */
758 static inline void
gl_waitqueue_notify_all(gl_waitqueue_t * wq)759 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
760 {
761 unsigned int i;
762
763 for (i = 0; i < wq->count; i++)
764 {
765 unsigned int index = wq->offset + i;
766 if (index >= wq->alloc)
767 index -= wq->alloc;
768 SetEvent (wq->array[index]);
769 }
770 wq->count = 0;
771 wq->offset = 0;
772 }
773
774 void
glthread_rwlock_init_func(gl_rwlock_t * lock)775 glthread_rwlock_init_func (gl_rwlock_t *lock)
776 {
777 InitializeCriticalSection (&lock->lock);
778 gl_waitqueue_init (&lock->waiting_readers);
779 gl_waitqueue_init (&lock->waiting_writers);
780 lock->runcount = 0;
781 lock->guard.done = 1;
782 }
783
784 int
glthread_rwlock_rdlock_func(gl_rwlock_t * lock)785 glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
786 {
787 if (!lock->guard.done)
788 {
789 if (InterlockedIncrement (&lock->guard.started) == 0)
790 /* This thread is the first one to need this lock. Initialize it. */
791 glthread_rwlock_init (lock);
792 else
793 /* Yield the CPU while waiting for another thread to finish
794 initializing this lock. */
795 while (!lock->guard.done)
796 Sleep (0);
797 }
798 EnterCriticalSection (&lock->lock);
799 /* Test whether only readers are currently running, and whether the runcount
800 field will not overflow. */
801 if (!(lock->runcount + 1 > 0))
802 {
803 /* This thread has to wait for a while. Enqueue it among the
804 waiting_readers. */
805 HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
806 if (event != INVALID_HANDLE_VALUE)
807 {
808 DWORD result;
809 LeaveCriticalSection (&lock->lock);
810 /* Wait until another thread signals this event. */
811 result = WaitForSingleObject (event, INFINITE);
812 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
813 abort ();
814 CloseHandle (event);
815 /* The thread which signalled the event already did the bookkeeping:
816 removed us from the waiting_readers, incremented lock->runcount. */
817 if (!(lock->runcount > 0))
818 abort ();
819 return 0;
820 }
821 else
822 {
823 /* Allocation failure. Weird. */
824 do
825 {
826 LeaveCriticalSection (&lock->lock);
827 Sleep (1);
828 EnterCriticalSection (&lock->lock);
829 }
830 while (!(lock->runcount + 1 > 0));
831 }
832 }
833 lock->runcount++;
834 LeaveCriticalSection (&lock->lock);
835 return 0;
836 }
837
838 int
glthread_rwlock_wrlock_func(gl_rwlock_t * lock)839 glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
840 {
841 if (!lock->guard.done)
842 {
843 if (InterlockedIncrement (&lock->guard.started) == 0)
844 /* This thread is the first one to need this lock. Initialize it. */
845 glthread_rwlock_init (lock);
846 else
847 /* Yield the CPU while waiting for another thread to finish
848 initializing this lock. */
849 while (!lock->guard.done)
850 Sleep (0);
851 }
852 EnterCriticalSection (&lock->lock);
853 /* Test whether no readers or writers are currently running. */
854 if (!(lock->runcount == 0))
855 {
856 /* This thread has to wait for a while. Enqueue it among the
857 waiting_writers. */
858 HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
859 if (event != INVALID_HANDLE_VALUE)
860 {
861 DWORD result;
862 LeaveCriticalSection (&lock->lock);
863 /* Wait until another thread signals this event. */
864 result = WaitForSingleObject (event, INFINITE);
865 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
866 abort ();
867 CloseHandle (event);
868 /* The thread which signalled the event already did the bookkeeping:
869 removed us from the waiting_writers, set lock->runcount = -1. */
870 if (!(lock->runcount == -1))
871 abort ();
872 return 0;
873 }
874 else
875 {
876 /* Allocation failure. Weird. */
877 do
878 {
879 LeaveCriticalSection (&lock->lock);
880 Sleep (1);
881 EnterCriticalSection (&lock->lock);
882 }
883 while (!(lock->runcount == 0));
884 }
885 }
886 lock->runcount--; /* runcount becomes -1 */
887 LeaveCriticalSection (&lock->lock);
888 return 0;
889 }
890
891 int
glthread_rwlock_unlock_func(gl_rwlock_t * lock)892 glthread_rwlock_unlock_func (gl_rwlock_t *lock)
893 {
894 if (!lock->guard.done)
895 return EINVAL;
896 EnterCriticalSection (&lock->lock);
897 if (lock->runcount < 0)
898 {
899 /* Drop a writer lock. */
900 if (!(lock->runcount == -1))
901 abort ();
902 lock->runcount = 0;
903 }
904 else
905 {
906 /* Drop a reader lock. */
907 if (!(lock->runcount > 0))
908 {
909 LeaveCriticalSection (&lock->lock);
910 return EPERM;
911 }
912 lock->runcount--;
913 }
914 if (lock->runcount == 0)
915 {
916 /* POSIX recommends that "write locks shall take precedence over read
917 locks", to avoid "writer starvation". */
918 if (lock->waiting_writers.count > 0)
919 {
920 /* Wake up one of the waiting writers. */
921 lock->runcount--;
922 gl_waitqueue_notify_first (&lock->waiting_writers);
923 }
924 else
925 {
926 /* Wake up all waiting readers. */
927 lock->runcount += lock->waiting_readers.count;
928 gl_waitqueue_notify_all (&lock->waiting_readers);
929 }
930 }
931 LeaveCriticalSection (&lock->lock);
932 return 0;
933 }
934
935 int
glthread_rwlock_destroy_func(gl_rwlock_t * lock)936 glthread_rwlock_destroy_func (gl_rwlock_t *lock)
937 {
938 if (!lock->guard.done)
939 return EINVAL;
940 if (lock->runcount != 0)
941 return EBUSY;
942 DeleteCriticalSection (&lock->lock);
943 if (lock->waiting_readers.array != NULL)
944 free (lock->waiting_readers.array);
945 if (lock->waiting_writers.array != NULL)
946 free (lock->waiting_writers.array);
947 lock->guard.done = 0;
948 return 0;
949 }
950
951 /* --------------------- gl_recursive_lock_t datatype --------------------- */
952
953 void
glthread_recursive_lock_init_func(gl_recursive_lock_t * lock)954 glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
955 {
956 lock->owner = 0;
957 lock->depth = 0;
958 InitializeCriticalSection (&lock->lock);
959 lock->guard.done = 1;
960 }
961
962 int
glthread_recursive_lock_lock_func(gl_recursive_lock_t * lock)963 glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
964 {
965 if (!lock->guard.done)
966 {
967 if (InterlockedIncrement (&lock->guard.started) == 0)
968 /* This thread is the first one to need this lock. Initialize it. */
969 glthread_recursive_lock_init (lock);
970 else
971 /* Yield the CPU while waiting for another thread to finish
972 initializing this lock. */
973 while (!lock->guard.done)
974 Sleep (0);
975 }
976 {
977 DWORD self = GetCurrentThreadId ();
978 if (lock->owner != self)
979 {
980 EnterCriticalSection (&lock->lock);
981 lock->owner = self;
982 }
983 if (++(lock->depth) == 0) /* wraparound? */
984 {
985 lock->depth--;
986 return EAGAIN;
987 }
988 }
989 return 0;
990 }
991
992 int
glthread_recursive_lock_unlock_func(gl_recursive_lock_t * lock)993 glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
994 {
995 if (lock->owner != GetCurrentThreadId ())
996 return EPERM;
997 if (lock->depth == 0)
998 return EINVAL;
999 if (--(lock->depth) == 0)
1000 {
1001 lock->owner = 0;
1002 LeaveCriticalSection (&lock->lock);
1003 }
1004 return 0;
1005 }
1006
1007 int
glthread_recursive_lock_destroy_func(gl_recursive_lock_t * lock)1008 glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
1009 {
1010 if (lock->owner != 0)
1011 return EBUSY;
1012 DeleteCriticalSection (&lock->lock);
1013 lock->guard.done = 0;
1014 return 0;
1015 }
1016
1017 /* -------------------------- gl_once_t datatype -------------------------- */
1018
1019 void
glthread_once_func(gl_once_t * once_control,void (* initfunction)(void))1020 glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
1021 {
1022 if (once_control->inited <= 0)
1023 {
1024 if (InterlockedIncrement (&once_control->started) == 0)
1025 {
1026 /* This thread is the first one to come to this once_control. */
1027 InitializeCriticalSection (&once_control->lock);
1028 EnterCriticalSection (&once_control->lock);
1029 once_control->inited = 0;
1030 initfunction ();
1031 once_control->inited = 1;
1032 LeaveCriticalSection (&once_control->lock);
1033 }
1034 else
1035 {
1036 /* Undo last operation. */
1037 InterlockedDecrement (&once_control->started);
1038 /* Some other thread has already started the initialization.
1039 Yield the CPU while waiting for the other thread to finish
1040 initializing and taking the lock. */
1041 while (once_control->inited < 0)
1042 Sleep (0);
1043 if (once_control->inited <= 0)
1044 {
1045 /* Take the lock. This blocks until the other thread has
1046 finished calling the initfunction. */
1047 EnterCriticalSection (&once_control->lock);
1048 LeaveCriticalSection (&once_control->lock);
1049 if (!(once_control->inited > 0))
1050 abort ();
1051 }
1052 }
1053 }
1054 }
1055
1056 #endif
1057
1058 /* ========================================================================= */
1059