1 /* Locking in multithreaded situations.
2 Copyright (C) 2005-2008 Free Software Foundation, Inc.
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms of the GNU Library General Public License as published
6 by the Free Software Foundation; either version 2, or (at your option)
7 any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Library General Public License for more details.
13
14 You should have received a copy of the GNU Library General Public
15 License along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
17 USA. */
18
19 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
20 Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
21 gthr-win32.h. */
22
23 #include <config.h>
24
25 #include "lock.h"
26
27 /* ========================================================================= */
28
29 #if USE_POSIX_THREADS
30
31 /* -------------------------- gl_lock_t datatype -------------------------- */
32
33 /* ------------------------- gl_rwlock_t datatype ------------------------- */
34
35 # if HAVE_PTHREAD_RWLOCK
36
37 # if !defined PTHREAD_RWLOCK_INITIALIZER
38
39 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)40 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
41 {
42 int err;
43
44 err = pthread_rwlock_init (&lock->rwlock, NULL);
45 if (err != 0)
46 return err;
47 lock->initialized = 1;
48 return 0;
49 }
50
51 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)52 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
53 {
54 if (!lock->initialized)
55 {
56 int err;
57
58 err = pthread_mutex_lock (&lock->guard);
59 if (err != 0)
60 return err;
61 if (!lock->initialized)
62 {
63 err = glthread_rwlock_init_multithreaded (lock);
64 if (err != 0)
65 {
66 pthread_mutex_unlock (&lock->guard);
67 return err;
68 }
69 }
70 err = pthread_mutex_unlock (&lock->guard);
71 if (err != 0)
72 return err;
73 }
74 return pthread_rwlock_rdlock (&lock->rwlock);
75 }
76
77 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)78 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
79 {
80 if (!lock->initialized)
81 {
82 int err;
83
84 err = pthread_mutex_lock (&lock->guard);
85 if (err != 0)
86 return err;
87 if (!lock->initialized)
88 {
89 err = glthread_rwlock_init_multithreaded (lock);
90 if (err != 0)
91 {
92 pthread_mutex_unlock (&lock->guard);
93 return err;
94 }
95 }
96 err = pthread_mutex_unlock (&lock->guard);
97 if (err != 0)
98 return err;
99 }
100 return pthread_rwlock_wrlock (&lock->rwlock);
101 }
102
103 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)104 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
105 {
106 if (!lock->initialized)
107 return EINVAL;
108 return pthread_rwlock_unlock (&lock->rwlock);
109 }
110
111 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)112 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
113 {
114 int err;
115
116 if (!lock->initialized)
117 return EINVAL;
118 err = pthread_rwlock_destroy (&lock->rwlock);
119 if (err != 0)
120 return err;
121 lock->initialized = 0;
122 return 0;
123 }
124
125 # endif
126
127 # else
128
129 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)130 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
131 {
132 int err;
133
134 err = pthread_mutex_init (&lock->lock, NULL);
135 if (err != 0)
136 return err;
137 err = pthread_cond_init (&lock->waiting_readers, NULL);
138 if (err != 0)
139 return err;
140 err = pthread_cond_init (&lock->waiting_writers, NULL);
141 if (err != 0)
142 return err;
143 lock->waiting_writers_count = 0;
144 lock->runcount = 0;
145 return 0;
146 }
147
148 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)149 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
150 {
151 int err;
152
153 err = pthread_mutex_lock (&lock->lock);
154 if (err != 0)
155 return err;
156 /* Test whether only readers are currently running, and whether the runcount
157 field will not overflow. */
158 /* POSIX says: "It is implementation-defined whether the calling thread
159 acquires the lock when a writer does not hold the lock and there are
160 writers blocked on the lock." Let's say, no: give the writers a higher
161 priority. */
162 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
163 {
164 /* This thread has to wait for a while. Enqueue it among the
165 waiting_readers. */
166 err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
167 if (err != 0)
168 {
169 pthread_mutex_unlock (&lock->lock);
170 return err;
171 }
172 }
173 lock->runcount++;
174 return pthread_mutex_unlock (&lock->lock);
175 }
176
177 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)178 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
179 {
180 int err;
181
182 err = pthread_mutex_lock (&lock->lock);
183 if (err != 0)
184 return err;
185 /* Test whether no readers or writers are currently running. */
186 while (!(lock->runcount == 0))
187 {
188 /* This thread has to wait for a while. Enqueue it among the
189 waiting_writers. */
190 lock->waiting_writers_count++;
191 err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
192 if (err != 0)
193 {
194 lock->waiting_writers_count--;
195 pthread_mutex_unlock (&lock->lock);
196 return err;
197 }
198 lock->waiting_writers_count--;
199 }
200 lock->runcount--; /* runcount becomes -1 */
201 return pthread_mutex_unlock (&lock->lock);
202 }
203
204 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)205 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
206 {
207 int err;
208
209 err = pthread_mutex_lock (&lock->lock);
210 if (err != 0)
211 return err;
212 if (lock->runcount < 0)
213 {
214 /* Drop a writer lock. */
215 if (!(lock->runcount == -1))
216 {
217 pthread_mutex_unlock (&lock->lock);
218 return EINVAL;
219 }
220 lock->runcount = 0;
221 }
222 else
223 {
224 /* Drop a reader lock. */
225 if (!(lock->runcount > 0))
226 {
227 pthread_mutex_unlock (&lock->lock);
228 return EINVAL;
229 }
230 lock->runcount--;
231 }
232 if (lock->runcount == 0)
233 {
234 /* POSIX recommends that "write locks shall take precedence over read
235 locks", to avoid "writer starvation". */
236 if (lock->waiting_writers_count > 0)
237 {
238 /* Wake up one of the waiting writers. */
239 err = pthread_cond_signal (&lock->waiting_writers);
240 if (err != 0)
241 {
242 pthread_mutex_unlock (&lock->lock);
243 return err;
244 }
245 }
246 else
247 {
248 /* Wake up all waiting readers. */
249 err = pthread_cond_broadcast (&lock->waiting_readers);
250 if (err != 0)
251 {
252 pthread_mutex_unlock (&lock->lock);
253 return err;
254 }
255 }
256 }
257 return pthread_mutex_unlock (&lock->lock);
258 }
259
260 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)261 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
262 {
263 int err;
264
265 err = pthread_mutex_destroy (&lock->lock);
266 if (err != 0)
267 return err;
268 err = pthread_cond_destroy (&lock->waiting_readers);
269 if (err != 0)
270 return err;
271 err = pthread_cond_destroy (&lock->waiting_writers);
272 if (err != 0)
273 return err;
274 return 0;
275 }
276
277 # endif
278
279 /* --------------------- gl_recursive_lock_t datatype --------------------- */
280
281 # if HAVE_PTHREAD_MUTEX_RECURSIVE
282
283 # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
284
285 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)286 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
287 {
288 pthread_mutexattr_t attributes;
289 int err;
290
291 err = pthread_mutexattr_init (&attributes);
292 if (err != 0)
293 return err;
294 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
295 if (err != 0)
296 {
297 pthread_mutexattr_destroy (&attributes);
298 return err;
299 }
300 err = pthread_mutex_init (lock, &attributes);
301 if (err != 0)
302 {
303 pthread_mutexattr_destroy (&attributes);
304 return err;
305 }
306 err = pthread_mutexattr_destroy (&attributes);
307 if (err != 0)
308 return err;
309 return 0;
310 }
311
312 # else
313
314 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)315 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
316 {
317 pthread_mutexattr_t attributes;
318 int err;
319
320 err = pthread_mutexattr_init (&attributes);
321 if (err != 0)
322 return err;
323 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
324 if (err != 0)
325 {
326 pthread_mutexattr_destroy (&attributes);
327 return err;
328 }
329 err = pthread_mutex_init (&lock->recmutex, &attributes);
330 if (err != 0)
331 {
332 pthread_mutexattr_destroy (&attributes);
333 return err;
334 }
335 err = pthread_mutexattr_destroy (&attributes);
336 if (err != 0)
337 return err;
338 lock->initialized = 1;
339 return 0;
340 }
341
342 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)343 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
344 {
345 if (!lock->initialized)
346 {
347 int err;
348
349 err = pthread_mutex_lock (&lock->guard);
350 if (err != 0)
351 return err;
352 if (!lock->initialized)
353 {
354 err = glthread_recursive_lock_init_multithreaded (lock);
355 if (err != 0)
356 {
357 pthread_mutex_unlock (&lock->guard);
358 return err;
359 }
360 }
361 err = pthread_mutex_unlock (&lock->guard);
362 if (err != 0)
363 return err;
364 }
365 return pthread_mutex_lock (&lock->recmutex);
366 }
367
368 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)369 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
370 {
371 if (!lock->initialized)
372 return EINVAL;
373 return pthread_mutex_unlock (&lock->recmutex);
374 }
375
376 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)377 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
378 {
379 int err;
380
381 if (!lock->initialized)
382 return EINVAL;
383 err = pthread_mutex_destroy (&lock->recmutex);
384 if (err != 0)
385 return err;
386 lock->initialized = 0;
387 return 0;
388 }
389
390 # endif
391
392 # else
393
394 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)395 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
396 {
397 int err;
398
399 err = pthread_mutex_init (&lock->mutex, NULL);
400 if (err != 0)
401 return err;
402 lock->owner = (pthread_t) 0;
403 lock->depth = 0;
404 return 0;
405 }
406
407 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)408 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
409 {
410 pthread_t self = pthread_self ();
411 if (lock->owner != self)
412 {
413 int err;
414
415 err = pthread_mutex_lock (&lock->mutex);
416 if (err != 0)
417 return err;
418 lock->owner = self;
419 }
420 if (++(lock->depth) == 0) /* wraparound? */
421 {
422 lock->depth--;
423 return EAGAIN;
424 }
425 return 0;
426 }
427
428 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)429 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
430 {
431 if (lock->owner != pthread_self ())
432 return EPERM;
433 if (lock->depth == 0)
434 return EINVAL;
435 if (--(lock->depth) == 0)
436 {
437 lock->owner = (pthread_t) 0;
438 return pthread_mutex_unlock (&lock->mutex);
439 }
440 else
441 return 0;
442 }
443
444 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)445 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
446 {
447 if (lock->owner != (pthread_t) 0)
448 return EBUSY;
449 return pthread_mutex_destroy (&lock->mutex);
450 }
451
452 # endif
453
454 /* -------------------------- gl_once_t datatype -------------------------- */
455
456 static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
457
458 int
glthread_once_singlethreaded(pthread_once_t * once_control)459 glthread_once_singlethreaded (pthread_once_t *once_control)
460 {
461 /* We don't know whether pthread_once_t is an integer type, a floating-point
462 type, a pointer type, or a structure type. */
463 char *firstbyte = (char *)once_control;
464 if (*firstbyte == *(const char *)&fresh_once)
465 {
466 /* First time use of once_control. Invert the first byte. */
467 *firstbyte = ~ *(const char *)&fresh_once;
468 return 1;
469 }
470 else
471 return 0;
472 }
473
474 #endif
475
476 /* ========================================================================= */
477
478 #if USE_PTH_THREADS
479
480 /* Use the GNU Pth threads library. */
481
482 /* -------------------------- gl_lock_t datatype -------------------------- */
483
484 /* ------------------------- gl_rwlock_t datatype ------------------------- */
485
486 /* --------------------- gl_recursive_lock_t datatype --------------------- */
487
488 /* -------------------------- gl_once_t datatype -------------------------- */
489
490 static void
glthread_once_call(void * arg)491 glthread_once_call (void *arg)
492 {
493 void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
494 void (*initfunction) (void) = *gl_once_temp_addr;
495 initfunction ();
496 }
497
498 int
glthread_once_multithreaded(pth_once_t * once_control,void (* initfunction)(void))499 glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
500 {
501 void (*temp) (void) = initfunction;
502 return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
503 }
504
505 int
glthread_once_singlethreaded(pth_once_t * once_control)506 glthread_once_singlethreaded (pth_once_t *once_control)
507 {
508 /* We know that pth_once_t is an integer type. */
509 if (*once_control == PTH_ONCE_INIT)
510 {
511 /* First time use of once_control. Invert the marker. */
512 *once_control = ~ PTH_ONCE_INIT;
513 return 1;
514 }
515 else
516 return 0;
517 }
518
519 #endif
520
521 /* ========================================================================= */
522
523 #if USE_SOLARIS_THREADS
524
525 /* Use the old Solaris threads library. */
526
527 /* -------------------------- gl_lock_t datatype -------------------------- */
528
529 /* ------------------------- gl_rwlock_t datatype ------------------------- */
530
531 /* --------------------- gl_recursive_lock_t datatype --------------------- */
532
533 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)534 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
535 {
536 int err;
537
538 err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
539 if (err != 0)
540 return err;
541 lock->owner = (thread_t) 0;
542 lock->depth = 0;
543 return 0;
544 }
545
546 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)547 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
548 {
549 thread_t self = thr_self ();
550 if (lock->owner != self)
551 {
552 int err;
553
554 err = mutex_lock (&lock->mutex);
555 if (err != 0)
556 return err;
557 lock->owner = self;
558 }
559 if (++(lock->depth) == 0) /* wraparound? */
560 {
561 lock->depth--;
562 return EAGAIN;
563 }
564 return 0;
565 }
566
567 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)568 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
569 {
570 if (lock->owner != thr_self ())
571 return EPERM;
572 if (lock->depth == 0)
573 return EINVAL;
574 if (--(lock->depth) == 0)
575 {
576 lock->owner = (thread_t) 0;
577 return mutex_unlock (&lock->mutex);
578 }
579 else
580 return 0;
581 }
582
583 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)584 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
585 {
586 if (lock->owner != (thread_t) 0)
587 return EBUSY;
588 return mutex_destroy (&lock->mutex);
589 }
590
591 /* -------------------------- gl_once_t datatype -------------------------- */
592
593 int
glthread_once_multithreaded(gl_once_t * once_control,void (* initfunction)(void))594 glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
595 {
596 if (!once_control->inited)
597 {
598 int err;
599
600 /* Use the mutex to guarantee that if another thread is already calling
601 the initfunction, this thread waits until it's finished. */
602 err = mutex_lock (&once_control->mutex);
603 if (err != 0)
604 return err;
605 if (!once_control->inited)
606 {
607 once_control->inited = 1;
608 initfunction ();
609 }
610 return mutex_unlock (&once_control->mutex);
611 }
612 else
613 return 0;
614 }
615
616 int
glthread_once_singlethreaded(gl_once_t * once_control)617 glthread_once_singlethreaded (gl_once_t *once_control)
618 {
619 /* We know that gl_once_t contains an integer type. */
620 if (!once_control->inited)
621 {
622 /* First time use of once_control. Invert the marker. */
623 once_control->inited = ~ 0;
624 return 1;
625 }
626 else
627 return 0;
628 }
629
630 #endif
631
632 /* ========================================================================= */
633
634 #if USE_WIN32_THREADS
635
636 /* -------------------------- gl_lock_t datatype -------------------------- */
637
638 void
glthread_lock_init_func(gl_lock_t * lock)639 glthread_lock_init_func (gl_lock_t *lock)
640 {
641 InitializeCriticalSection (&lock->lock);
642 lock->guard.done = 1;
643 }
644
645 int
glthread_lock_lock_func(gl_lock_t * lock)646 glthread_lock_lock_func (gl_lock_t *lock)
647 {
648 if (!lock->guard.done)
649 {
650 if (InterlockedIncrement (&lock->guard.started) == 0)
651 /* This thread is the first one to need this lock. Initialize it. */
652 glthread_lock_init (lock);
653 else
654 /* Yield the CPU while waiting for another thread to finish
655 initializing this lock. */
656 while (!lock->guard.done)
657 Sleep (0);
658 }
659 EnterCriticalSection (&lock->lock);
660 return 0;
661 }
662
663 int
glthread_lock_unlock_func(gl_lock_t * lock)664 glthread_lock_unlock_func (gl_lock_t *lock)
665 {
666 if (!lock->guard.done)
667 return EINVAL;
668 LeaveCriticalSection (&lock->lock);
669 return 0;
670 }
671
672 int
glthread_lock_destroy_func(gl_lock_t * lock)673 glthread_lock_destroy_func (gl_lock_t *lock)
674 {
675 if (!lock->guard.done)
676 return EINVAL;
677 DeleteCriticalSection (&lock->lock);
678 lock->guard.done = 0;
679 return 0;
680 }
681
682 /* ------------------------- gl_rwlock_t datatype ------------------------- */
683
684 /* In this file, the waitqueues are implemented as circular arrays. */
685 #define gl_waitqueue_t gl_carray_waitqueue_t
686
687 static inline void
gl_waitqueue_init(gl_waitqueue_t * wq)688 gl_waitqueue_init (gl_waitqueue_t *wq)
689 {
690 wq->array = NULL;
691 wq->count = 0;
692 wq->alloc = 0;
693 wq->offset = 0;
694 }
695
696 /* Enqueues the current thread, represented by an event, in a wait queue.
697 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
698 static HANDLE
gl_waitqueue_add(gl_waitqueue_t * wq)699 gl_waitqueue_add (gl_waitqueue_t *wq)
700 {
701 HANDLE event;
702 unsigned int index;
703
704 if (wq->count == wq->alloc)
705 {
706 unsigned int new_alloc = 2 * wq->alloc + 1;
707 HANDLE *new_array =
708 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
709 if (new_array == NULL)
710 /* No more memory. */
711 return INVALID_HANDLE_VALUE;
712 /* Now is a good opportunity to rotate the array so that its contents
713 starts at offset 0. */
714 if (wq->offset > 0)
715 {
716 unsigned int old_count = wq->count;
717 unsigned int old_alloc = wq->alloc;
718 unsigned int old_offset = wq->offset;
719 unsigned int i;
720 if (old_offset + old_count > old_alloc)
721 {
722 unsigned int limit = old_offset + old_count - old_alloc;
723 for (i = 0; i < limit; i++)
724 new_array[old_alloc + i] = new_array[i];
725 }
726 for (i = 0; i < old_count; i++)
727 new_array[i] = new_array[old_offset + i];
728 wq->offset = 0;
729 }
730 wq->array = new_array;
731 wq->alloc = new_alloc;
732 }
733 /* Whether the created event is a manual-reset one or an auto-reset one,
734 does not matter, since we will wait on it only once. */
735 event = CreateEvent (NULL, TRUE, FALSE, NULL);
736 if (event == INVALID_HANDLE_VALUE)
737 /* No way to allocate an event. */
738 return INVALID_HANDLE_VALUE;
739 index = wq->offset + wq->count;
740 if (index >= wq->alloc)
741 index -= wq->alloc;
742 wq->array[index] = event;
743 wq->count++;
744 return event;
745 }
746
747 /* Notifies the first thread from a wait queue and dequeues it. */
748 static inline void
gl_waitqueue_notify_first(gl_waitqueue_t * wq)749 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
750 {
751 SetEvent (wq->array[wq->offset + 0]);
752 wq->offset++;
753 wq->count--;
754 if (wq->count == 0 || wq->offset == wq->alloc)
755 wq->offset = 0;
756 }
757
758 /* Notifies all threads from a wait queue and dequeues them all. */
759 static inline void
gl_waitqueue_notify_all(gl_waitqueue_t * wq)760 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
761 {
762 unsigned int i;
763
764 for (i = 0; i < wq->count; i++)
765 {
766 unsigned int index = wq->offset + i;
767 if (index >= wq->alloc)
768 index -= wq->alloc;
769 SetEvent (wq->array[index]);
770 }
771 wq->count = 0;
772 wq->offset = 0;
773 }
774
775 void
glthread_rwlock_init_func(gl_rwlock_t * lock)776 glthread_rwlock_init_func (gl_rwlock_t *lock)
777 {
778 InitializeCriticalSection (&lock->lock);
779 gl_waitqueue_init (&lock->waiting_readers);
780 gl_waitqueue_init (&lock->waiting_writers);
781 lock->runcount = 0;
782 lock->guard.done = 1;
783 }
784
785 int
glthread_rwlock_rdlock_func(gl_rwlock_t * lock)786 glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
787 {
788 if (!lock->guard.done)
789 {
790 if (InterlockedIncrement (&lock->guard.started) == 0)
791 /* This thread is the first one to need this lock. Initialize it. */
792 glthread_rwlock_init (lock);
793 else
794 /* Yield the CPU while waiting for another thread to finish
795 initializing this lock. */
796 while (!lock->guard.done)
797 Sleep (0);
798 }
799 EnterCriticalSection (&lock->lock);
800 /* Test whether only readers are currently running, and whether the runcount
801 field will not overflow. */
802 if (!(lock->runcount + 1 > 0))
803 {
804 /* This thread has to wait for a while. Enqueue it among the
805 waiting_readers. */
806 HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
807 if (event != INVALID_HANDLE_VALUE)
808 {
809 DWORD result;
810 LeaveCriticalSection (&lock->lock);
811 /* Wait until another thread signals this event. */
812 result = WaitForSingleObject (event, INFINITE);
813 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
814 abort ();
815 CloseHandle (event);
816 /* The thread which signalled the event already did the bookkeeping:
817 removed us from the waiting_readers, incremented lock->runcount. */
818 if (!(lock->runcount > 0))
819 abort ();
820 return 0;
821 }
822 else
823 {
824 /* Allocation failure. Weird. */
825 do
826 {
827 LeaveCriticalSection (&lock->lock);
828 Sleep (1);
829 EnterCriticalSection (&lock->lock);
830 }
831 while (!(lock->runcount + 1 > 0));
832 }
833 }
834 lock->runcount++;
835 LeaveCriticalSection (&lock->lock);
836 return 0;
837 }
838
839 int
glthread_rwlock_wrlock_func(gl_rwlock_t * lock)840 glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
841 {
842 if (!lock->guard.done)
843 {
844 if (InterlockedIncrement (&lock->guard.started) == 0)
845 /* This thread is the first one to need this lock. Initialize it. */
846 glthread_rwlock_init (lock);
847 else
848 /* Yield the CPU while waiting for another thread to finish
849 initializing this lock. */
850 while (!lock->guard.done)
851 Sleep (0);
852 }
853 EnterCriticalSection (&lock->lock);
854 /* Test whether no readers or writers are currently running. */
855 if (!(lock->runcount == 0))
856 {
857 /* This thread has to wait for a while. Enqueue it among the
858 waiting_writers. */
859 HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
860 if (event != INVALID_HANDLE_VALUE)
861 {
862 DWORD result;
863 LeaveCriticalSection (&lock->lock);
864 /* Wait until another thread signals this event. */
865 result = WaitForSingleObject (event, INFINITE);
866 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
867 abort ();
868 CloseHandle (event);
869 /* The thread which signalled the event already did the bookkeeping:
870 removed us from the waiting_writers, set lock->runcount = -1. */
871 if (!(lock->runcount == -1))
872 abort ();
873 return 0;
874 }
875 else
876 {
877 /* Allocation failure. Weird. */
878 do
879 {
880 LeaveCriticalSection (&lock->lock);
881 Sleep (1);
882 EnterCriticalSection (&lock->lock);
883 }
884 while (!(lock->runcount == 0));
885 }
886 }
887 lock->runcount--; /* runcount becomes -1 */
888 LeaveCriticalSection (&lock->lock);
889 return 0;
890 }
891
892 int
glthread_rwlock_unlock_func(gl_rwlock_t * lock)893 glthread_rwlock_unlock_func (gl_rwlock_t *lock)
894 {
895 if (!lock->guard.done)
896 return EINVAL;
897 EnterCriticalSection (&lock->lock);
898 if (lock->runcount < 0)
899 {
900 /* Drop a writer lock. */
901 if (!(lock->runcount == -1))
902 abort ();
903 lock->runcount = 0;
904 }
905 else
906 {
907 /* Drop a reader lock. */
908 if (!(lock->runcount > 0))
909 {
910 LeaveCriticalSection (&lock->lock);
911 return EPERM;
912 }
913 lock->runcount--;
914 }
915 if (lock->runcount == 0)
916 {
917 /* POSIX recommends that "write locks shall take precedence over read
918 locks", to avoid "writer starvation". */
919 if (lock->waiting_writers.count > 0)
920 {
921 /* Wake up one of the waiting writers. */
922 lock->runcount--;
923 gl_waitqueue_notify_first (&lock->waiting_writers);
924 }
925 else
926 {
927 /* Wake up all waiting readers. */
928 lock->runcount += lock->waiting_readers.count;
929 gl_waitqueue_notify_all (&lock->waiting_readers);
930 }
931 }
932 LeaveCriticalSection (&lock->lock);
933 return 0;
934 }
935
936 int
glthread_rwlock_destroy_func(gl_rwlock_t * lock)937 glthread_rwlock_destroy_func (gl_rwlock_t *lock)
938 {
939 if (!lock->guard.done)
940 return EINVAL;
941 if (lock->runcount != 0)
942 return EBUSY;
943 DeleteCriticalSection (&lock->lock);
944 if (lock->waiting_readers.array != NULL)
945 free (lock->waiting_readers.array);
946 if (lock->waiting_writers.array != NULL)
947 free (lock->waiting_writers.array);
948 lock->guard.done = 0;
949 return 0;
950 }
951
952 /* --------------------- gl_recursive_lock_t datatype --------------------- */
953
954 void
glthread_recursive_lock_init_func(gl_recursive_lock_t * lock)955 glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
956 {
957 lock->owner = 0;
958 lock->depth = 0;
959 InitializeCriticalSection (&lock->lock);
960 lock->guard.done = 1;
961 }
962
963 int
glthread_recursive_lock_lock_func(gl_recursive_lock_t * lock)964 glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
965 {
966 if (!lock->guard.done)
967 {
968 if (InterlockedIncrement (&lock->guard.started) == 0)
969 /* This thread is the first one to need this lock. Initialize it. */
970 glthread_recursive_lock_init (lock);
971 else
972 /* Yield the CPU while waiting for another thread to finish
973 initializing this lock. */
974 while (!lock->guard.done)
975 Sleep (0);
976 }
977 {
978 DWORD self = GetCurrentThreadId ();
979 if (lock->owner != self)
980 {
981 EnterCriticalSection (&lock->lock);
982 lock->owner = self;
983 }
984 if (++(lock->depth) == 0) /* wraparound? */
985 {
986 lock->depth--;
987 return EAGAIN;
988 }
989 }
990 return 0;
991 }
992
993 int
glthread_recursive_lock_unlock_func(gl_recursive_lock_t * lock)994 glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
995 {
996 if (lock->owner != GetCurrentThreadId ())
997 return EPERM;
998 if (lock->depth == 0)
999 return EINVAL;
1000 if (--(lock->depth) == 0)
1001 {
1002 lock->owner = 0;
1003 LeaveCriticalSection (&lock->lock);
1004 }
1005 return 0;
1006 }
1007
1008 int
glthread_recursive_lock_destroy_func(gl_recursive_lock_t * lock)1009 glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
1010 {
1011 if (lock->owner != 0)
1012 return EBUSY;
1013 DeleteCriticalSection (&lock->lock);
1014 lock->guard.done = 0;
1015 return 0;
1016 }
1017
1018 /* -------------------------- gl_once_t datatype -------------------------- */
1019
1020 void
glthread_once_func(gl_once_t * once_control,void (* initfunction)(void))1021 glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
1022 {
1023 if (once_control->inited <= 0)
1024 {
1025 if (InterlockedIncrement (&once_control->started) == 0)
1026 {
1027 /* This thread is the first one to come to this once_control. */
1028 InitializeCriticalSection (&once_control->lock);
1029 EnterCriticalSection (&once_control->lock);
1030 once_control->inited = 0;
1031 initfunction ();
1032 once_control->inited = 1;
1033 LeaveCriticalSection (&once_control->lock);
1034 }
1035 else
1036 {
1037 /* Undo last operation. */
1038 InterlockedDecrement (&once_control->started);
1039 /* Some other thread has already started the initialization.
1040 Yield the CPU while waiting for the other thread to finish
1041 initializing and taking the lock. */
1042 while (once_control->inited < 0)
1043 Sleep (0);
1044 if (once_control->inited <= 0)
1045 {
1046 /* Take the lock. This blocks until the other thread has
1047 finished calling the initfunction. */
1048 EnterCriticalSection (&once_control->lock);
1049 LeaveCriticalSection (&once_control->lock);
1050 if (!(once_control->inited > 0))
1051 abort ();
1052 }
1053 }
1054 }
1055 }
1056
1057 #endif
1058
1059 /* ========================================================================= */
1060