1 /* $Id: threads-sjlj.c,v 1.18 2010/06/05 19:10:28 fredette Exp $ */
2
3 /* libtme/threads-sjlj.c - implementation of setjmp/longjmp threads: */
4
5 /*
6 * Copyright (c) 2003 Matt Fredette
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Matt Fredette.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #include <tme/common.h>
37 _TME_RCSID("$Id: threads-sjlj.c,v 1.18 2010/06/05 19:10:28 fredette Exp $");
38
39 /* includes: */
40 #include <tme/threads.h>
41 #include <stdlib.h>
42 #include <unistd.h>
43 #include <sys/types.h>
44 #include <sys/time.h>
45 #include <setjmp.h>
46
47 /* if we don't have GTK, fake a few definitions to keep things
48 compiling: */
49 #ifdef HAVE_GTK
50 #ifndef G_ENABLE_DEBUG
51 #define G_ENABLE_DEBUG (0)
52 #endif /* !G_ENABLE_DEBUG */
53 #include <gtk/gtk.h>
54 #else /* !HAVE_GTK */
55 typedef int gint;
56 typedef int GdkInputCondition;
57 typedef void *gpointer;
58 #define GDK_INPUT_READ TME_BIT(0)
59 #define GDK_INPUT_WRITE TME_BIT(1)
60 #define GDK_INPUT_EXCEPTION TME_BIT(2)
61 #endif /* !HAVE_GTK */
62
63 /* thread states: */
64 #define TME_SJLJ_THREAD_STATE_BLOCKED (1)
65 #define TME_SJLJ_THREAD_STATE_RUNNABLE (2)
66 #define TME_SJLJ_THREAD_STATE_DISPATCHING (3)
67
68 /* types: */
69
70 /* a thread: */
71 struct tme_sjlj_thread {
72
73 /* the all-threads list: */
74 struct tme_sjlj_thread *next;
75 struct tme_sjlj_thread **prev;
76
77 /* the current state of the thread, and any state-related list that
78 it is on: */
79 int tme_sjlj_thread_state;
80 struct tme_sjlj_thread *state_next;
81 struct tme_sjlj_thread **state_prev;
82
83 /* the thread function: */
84 void *tme_sjlj_thread_func_private;
85 tme_thread_t tme_sjlj_thread_func;
86
87 /* any condition that this thread is waiting on: */
88 tme_cond_t *tme_sjlj_thread_cond;
89
90 /* the file descriptors that this thread is waiting on: */
91 int tme_sjlj_thread_max_fd;
92 fd_set tme_sjlj_thread_fdset_read;
93 fd_set tme_sjlj_thread_fdset_write;
94 fd_set tme_sjlj_thread_fdset_except;
95
96 /* if nonzero, the amount of time that this thread is sleeping,
97 followed by the time the sleep will timeout. all threads with
98 timeouts are kept on a sorted list: */
99 struct timeval tme_sjlj_thread_sleep;
100 struct timeval tme_sjlj_thread_timeout;
101 struct tme_sjlj_thread *timeout_next;
102 struct tme_sjlj_thread **timeout_prev;
103
104 /* the last dispatch number for this thread: */
105 tme_uint32_t tme_sjlj_thread_dispatch_number;
106 };
107
108 /* globals: */
109
110 /* the all-threads list: */
111 static struct tme_sjlj_thread *tme_sjlj_threads_all;
112
113 /* the timeout-threads list: */
114 static struct tme_sjlj_thread *tme_sjlj_threads_timeout;
115
116 /* the runnable-threads list: */
117 static struct tme_sjlj_thread *tme_sjlj_threads_runnable;
118
119 /* the dispatching-threads list: */
120 static struct tme_sjlj_thread *tme_sjlj_threads_dispatching;
121
122 /* the active thread: */
123 static struct tme_sjlj_thread *tme_sjlj_thread_active;
124
125 /* this dummy thread structure is filled before a yield to represent
126 what, if anything, the active thread is blocking on when it yields: */
127 static struct tme_sjlj_thread tme_sjlj_thread_blocked;
128
129 /* this is set if the active thread is exiting: */
130 static int tme_sjlj_thread_exiting;
131
132 /* this is a jmp_buf back to the dispatcher: */
133 static jmp_buf tme_sjlj_dispatcher_jmp;
134
135 /* the main loop fd sets: */
136 static int tme_sjlj_main_max_fd;
137 static fd_set tme_sjlj_main_fdset_read;
138 static fd_set tme_sjlj_main_fdset_write;
139 static fd_set tme_sjlj_main_fdset_except;
140
141 /* for each file descriptor, any threads blocked on it: */
142 static struct {
143 GdkInputCondition tme_sjlj_fd_thread_conditions;
144 struct tme_sjlj_thread *tme_sjlj_fd_thread_read;
145 struct tme_sjlj_thread *tme_sjlj_fd_thread_write;
146 struct tme_sjlj_thread *tme_sjlj_fd_thread_except;
147 } tme_sjlj_fd_thread[FD_SETSIZE];
148
149 /* the dispatch number: */
150 static tme_uint32_t _tme_sjlj_thread_dispatch_number;
151
152 /* a reasonably current time: */
153 static struct timeval _tme_sjlj_now;
154
155 /* if nonzero, the last dispatched thread ran for only a short time: */
156 int tme_sjlj_thread_short;
157
158 #ifdef HAVE_GTK
159
160 /* nonzero iff we're using the gtk main loop: */
161 static int tme_sjlj_using_gtk;
162
163 /* for each file descriptor, the GTK tag for the fd event source: */
164 static gint tme_sjlj_fd_tag[FD_SETSIZE];
165
166 /* this set iff the idle callback is set: */
167 static int tme_sjlj_idle_set;
168
169 /* any timeout source ID: */
170 static guint _tme_sjlj_gtk_timeout_id;
171
172 /* any timeout time: */
173 static struct timeval _tme_sjlj_gtk_timeout;
174
175 #endif /* HAVE_GTK */
176
177 /* this initializes the threads system: */
178 void
tme_sjlj_threads_init(void)179 tme_sjlj_threads_init(void)
180 {
181 int fd;
182
183 #ifdef HAVE_GTK
184 /* assume that we won't be using the GTK main loop: */
185 tme_sjlj_using_gtk = FALSE;
186 tme_sjlj_idle_set = FALSE;
187 #endif /* HAVE_GTK */
188
189 /* there are no threads: */
190 tme_sjlj_threads_all = NULL;
191 tme_sjlj_threads_timeout = NULL;
192 tme_sjlj_threads_runnable = NULL;
193 tme_sjlj_threads_dispatching = NULL;
194 tme_sjlj_thread_active = NULL;
195 tme_sjlj_thread_exiting = FALSE;
196
197 /* no threads are waiting on any fds: */
198 tme_sjlj_main_max_fd = -1;
199 FD_ZERO(&tme_sjlj_main_fdset_read);
200 FD_ZERO(&tme_sjlj_main_fdset_write);
201 FD_ZERO(&tme_sjlj_main_fdset_except);
202 for (fd = 0; fd < FD_SETSIZE; fd++) {
203 tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_conditions = 0;
204 tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_read = NULL;
205 tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_write = NULL;
206 tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_except = NULL;
207 }
208
209 /* initialize the thread-blocked structure: */
210 tme_sjlj_thread_blocked.tme_sjlj_thread_cond = NULL;
211 tme_sjlj_thread_blocked.tme_sjlj_thread_max_fd = -1;
212 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_sec = 0;
213 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_usec = 0;
214 }
215
216 #ifdef HAVE_GTK
217 /* this initializes the threads system to use the GTK event loop: */
218 void
tme_sjlj_threads_gtk_init(void)219 tme_sjlj_threads_gtk_init(void)
220 {
221 char **argv;
222 char *argv_buffer[3];
223 int argc;
224
225 /* if we've already initialized GTK: */
226 if (tme_sjlj_using_gtk) {
227 return;
228 }
229
230 /* conjure up an argv. this is pretty bad: */
231 argv = argv_buffer;
232 argc = 0;
233 argv[argc++] = "tmesh";
234 #if 1
235 argv[argc++] = "--gtk-debug=signals";
236 #endif
237 argv[argc] = NULL;
238 gtk_init(&argc, &argv);
239
240 /* we are now using GTK: */
241 tme_sjlj_using_gtk = TRUE;
242 }
243 #endif /* HAVE_GTK */
244
245 /* this returns a reasonably current time: */
246 void
tme_sjlj_gettimeofday(struct timeval * now)247 tme_sjlj_gettimeofday(struct timeval *now)
248 {
249
250 /* if we need to, call gettimeofday(): */
251 if (__tme_predict_false(!tme_sjlj_thread_short)) {
252 gettimeofday(&_tme_sjlj_now, NULL);
253 tme_sjlj_thread_short = TRUE;
254 }
255
256 /* return the reasonably current time: */
257 *now = _tme_sjlj_now;
258 }
259
260 /* this changes a thread's state: */
261 static void
_tme_sjlj_change_state(struct tme_sjlj_thread * thread,int state)262 _tme_sjlj_change_state(struct tme_sjlj_thread *thread, int state)
263 {
264 struct tme_sjlj_thread **_thread_prev;
265 struct tme_sjlj_thread *thread_next;
266
267 /* the active thread is the only thread that can become blocked.
268 the active thread cannot become runnable or dispatching: */
269 assert (state == TME_SJLJ_THREAD_STATE_BLOCKED
270 ? (thread->state_next == tme_sjlj_thread_active)
271 : (thread != tme_sjlj_thread_active));
272
273 /* if the thread's current state is not BLOCKED: */
274 _thread_prev = thread->state_prev;
275 if (_thread_prev != NULL) {
276
277 /* remove it from that list: */
278 thread_next = thread->state_next;
279 *_thread_prev = thread_next;
280 if (thread_next != NULL) {
281 thread_next->state_prev = _thread_prev;
282 }
283
284 /* this thread is now on no list: */
285 thread->state_prev = NULL;
286 thread->state_next = NULL;
287 }
288
289 /* if the thread's desired state is not BLOCKED: */
290 if (state != TME_SJLJ_THREAD_STATE_BLOCKED) {
291
292 /* this thread must be runnable, or this thread must be
293 dispatching before threads are being dispatched: */
294 assert (state == TME_SJLJ_THREAD_STATE_RUNNABLE
295 || (state == TME_SJLJ_THREAD_STATE_DISPATCHING
296 && tme_sjlj_thread_active == NULL));
297
298 /* if threads are being dispatched, and this thread wasn't already
299 in this dispatch: */
300 if (tme_sjlj_thread_active != NULL
301 && thread->tme_sjlj_thread_dispatch_number != _tme_sjlj_thread_dispatch_number) {
302
303 /* add this thread to the dispatching list after the current
304 thread: */
305 _thread_prev = &tme_sjlj_thread_active->state_next;
306 }
307
308 /* otherwise, if this thread is dispatching: */
309 else if (state == TME_SJLJ_THREAD_STATE_DISPATCHING) {
310
311 /* add this thread to the dispatching list at the head: */
312 _thread_prev = &tme_sjlj_threads_dispatching;
313 }
314
315 /* otherwise, this thread is runnable: */
316 else {
317
318 /* add this thread to the runnable list at the head: */
319 _thread_prev = &tme_sjlj_threads_runnable;
320 }
321
322 /* add this thread to the list: */
323 thread_next = *_thread_prev;
324 *_thread_prev = thread;
325 thread->state_prev = _thread_prev;
326 thread->state_next = thread_next;
327 if (thread_next != NULL) {
328 thread_next->state_prev = &thread->state_next;
329 }
330
331 /* all nonblocked threads appear to be runnable: */
332 state = TME_SJLJ_THREAD_STATE_RUNNABLE;
333 }
334
335 /* set the new state of the thread: */
336 thread->tme_sjlj_thread_state = state;
337 }
338
339 /* this moves the runnable list to the dispatching list: */
340 static void
_tme_sjlj_threads_dispatching_runnable(void)341 _tme_sjlj_threads_dispatching_runnable(void)
342 {
343 struct tme_sjlj_thread *threads_dispatching;
344
345 /* the dispatching list must be empty: */
346 assert (tme_sjlj_threads_dispatching == NULL);
347
348 /* move the runnable list to the dispatching list: */
349 threads_dispatching = tme_sjlj_threads_runnable;
350 tme_sjlj_threads_runnable = NULL;
351 tme_sjlj_threads_dispatching = threads_dispatching;
352 if (threads_dispatching != NULL) {
353 threads_dispatching->state_prev = &tme_sjlj_threads_dispatching;
354 }
355 }
356
357 /* this moves all threads that have timed out to the dispatching list: */
358 static void
_tme_sjlj_threads_dispatching_timeout(void)359 _tme_sjlj_threads_dispatching_timeout(void)
360 {
361 struct timeval now;
362 struct tme_sjlj_thread *thread_timeout;
363
364 /* get the current time: */
365 tme_gettimeofday(&now);
366
367 /* loop over the timeout list: */
368 for (thread_timeout = tme_sjlj_threads_timeout;
369 thread_timeout != NULL;
370 thread_timeout = thread_timeout->timeout_next) {
371
372 /* if this timeout has not expired: */
373 if (thread_timeout->tme_sjlj_thread_timeout.tv_sec > now.tv_sec
374 || (thread_timeout->tme_sjlj_thread_timeout.tv_sec == now.tv_sec
375 && thread_timeout->tme_sjlj_thread_timeout.tv_usec > now.tv_usec)) {
376 break;
377 }
378
379 /* move this thread to the dispatching list: */
380 _tme_sjlj_change_state(thread_timeout, TME_SJLJ_THREAD_STATE_DISPATCHING);
381 }
382 }
383
384 /* this moves all threads with the given file descriptor conditions to
385 the dispatching list: */
386 static void
_tme_sjlj_threads_dispatching_fd(int fd,GdkInputCondition fd_conditions)387 _tme_sjlj_threads_dispatching_fd(int fd,
388 GdkInputCondition fd_conditions)
389 {
390 struct tme_sjlj_thread *thread;
391
392 /* loop over all set conditions: */
393 for (fd_conditions &= tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_conditions;
394 fd_conditions != 0;
395 fd_conditions &= (fd_conditions - 1)) {
396
397 /* move the thread for this condition to the dispatching list: */
398 thread = ((fd_conditions & GDK_INPUT_READ)
399 ? tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_read
400 : (fd_conditions & GDK_INPUT_WRITE)
401 ? tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_write
402 : tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_except);
403 assert (thread != NULL);
404 _tme_sjlj_change_state(thread, TME_SJLJ_THREAD_STATE_DISPATCHING);
405 }
406 }
407
408 /* this makes the timeout time: */
409 static void
_tme_sjlj_timeout_time(struct timeval * timeout)410 _tme_sjlj_timeout_time(struct timeval *timeout)
411 {
412 struct timeval now;
413 struct tme_sjlj_thread *thread_timeout;
414 tme_int32_t usecs;
415 unsigned long secs;
416 unsigned long secs_other;
417
418 /* get the current time: */
419 tme_gettimeofday(&now);
420
421 /* the timeout list must not be empty: */
422 thread_timeout = tme_sjlj_threads_timeout;
423 assert (thread_timeout != NULL);
424
425 /* subtract the now microseconds from the timeout microseconds: */
426 assert (thread_timeout->tme_sjlj_thread_timeout.tv_usec < 1000000);
427 usecs = thread_timeout->tme_sjlj_thread_timeout.tv_usec;
428 assert (now.tv_usec < 1000000);
429 usecs -= now.tv_usec;
430
431 /* make any seconds carry: */
432 secs_other = (usecs < 0);
433 if (usecs < 0) {
434 usecs += 1000000;
435 }
436
437 /* if the earliest timeout has already timed out: */
438 secs_other += now.tv_sec;
439 secs = thread_timeout->tme_sjlj_thread_timeout.tv_sec;
440 if (__tme_predict_false(secs_other > secs
441 || ((secs -= secs_other) == 0
442 && usecs == 0))) {
443
444 /* this thread is runnable: */
445 _tme_sjlj_change_state(thread_timeout, TME_SJLJ_THREAD_STATE_RUNNABLE);
446
447 /* make this a poll: */
448 secs = 0;
449 usecs = 0;
450 }
451
452 /* return the timeout time: */
453 timeout->tv_sec = secs;
454 timeout->tv_usec = usecs;
455 }
456
457 /* this dispatches all dispatching threads: */
458 static void
tme_sjlj_dispatch(volatile int passes)459 tme_sjlj_dispatch(volatile int passes)
460 {
461 struct tme_sjlj_thread * volatile thread;
462 struct tme_sjlj_thread **_thread_timeout_prev;
463 struct tme_sjlj_thread *thread_timeout_next;
464 struct tme_sjlj_thread *thread_other;
465 int rc_one;
466
467 /* dispatch the given number of passes over the dispatching threads: */
468 for (; passes-- > 0; ) {
469 for (tme_sjlj_thread_active = tme_sjlj_threads_dispatching;
470 (thread = tme_sjlj_thread_active) != NULL; ) {
471
472 /* if this thread is on the timeout list: */
473 _thread_timeout_prev = thread->timeout_prev;
474 assert ((_thread_timeout_prev != NULL)
475 == (thread->tme_sjlj_thread_sleep.tv_sec != 0
476 || thread->tme_sjlj_thread_sleep.tv_usec != 0));
477 if (_thread_timeout_prev != NULL) {
478
479 /* remove this thread from the timeout list: */
480 thread_timeout_next = thread->timeout_next;
481 *_thread_timeout_prev = thread_timeout_next;
482 if (thread_timeout_next != NULL) {
483 thread_timeout_next->timeout_prev = _thread_timeout_prev;
484 }
485
486 /* this thread is no longer on the timeout list: */
487 thread->timeout_prev = NULL;
488 thread->timeout_next = NULL;
489 }
490
491 /* set the dispatch number on this thread: */
492 thread->tme_sjlj_thread_dispatch_number = _tme_sjlj_thread_dispatch_number;
493
494 /* when this active thread yields, we'll return here, where we
495 will continue the inner dispatching loop: */
496 rc_one = setjmp(tme_sjlj_dispatcher_jmp);
497 if (rc_one) {
498 continue;
499 }
500
501 /* run this thread. if it happens to return, just call
502 tme_sjlj_exit(): */
503 (*thread->tme_sjlj_thread_func)(thread->tme_sjlj_thread_func_private);
504 tme_sjlj_exit();
505 }
506 }
507
508 /* if there are still dispatching threads, move them en masse to the
509 runnable list: */
510 thread = tme_sjlj_threads_dispatching;
511 if (thread != NULL) {
512 thread_other = tme_sjlj_threads_runnable;
513 thread->state_prev = &tme_sjlj_threads_runnable;
514 tme_sjlj_threads_runnable = thread;
515 tme_sjlj_threads_dispatching = NULL;
516 for (;; thread = thread->state_next) {
517 if (thread->state_next == NULL) {
518 thread->state_next = thread_other;
519 if (thread_other != NULL) {
520 thread_other->state_prev = &thread->state_next;
521 }
522 break;
523 }
524 }
525 }
526
527 /* the next dispatch will use the next number: */
528 _tme_sjlj_thread_dispatch_number++;
529 }
530
531 #ifdef HAVE_GTK
532
533 /* this handles a GTK callback for a timeout: */
534 static gint
_tme_sjlj_gtk_callback_timeout(gpointer callback_pointer)535 _tme_sjlj_gtk_callback_timeout(gpointer callback_pointer)
536 {
537
538 /* we were in GTK for an unknown amount of time: */
539 tme_thread_long();
540
541 /* this GTK timeout will soon be removed, so forget it: */
542 _tme_sjlj_gtk_timeout_id = 0;
543
544 /* move all threads that have timed out to the dispatching list: */
545 _tme_sjlj_threads_dispatching_timeout();
546
547 /* dispatch: */
548 tme_sjlj_dispatch(1);
549
550 /* yield to GTK: */
551 tme_threads_gtk_yield();
552
553 /* remove this timeout: */
554 return (FALSE);
555
556 /* unused: */
557 callback_pointer = 0;
558 }
559
560 /* this handles a GTK callback for a file descriptor: */
561 static void
_tme_sjlj_gtk_callback_fd(gpointer callback_pointer,gint fd,GdkInputCondition fd_conditions)562 _tme_sjlj_gtk_callback_fd(gpointer callback_pointer,
563 gint fd,
564 GdkInputCondition fd_conditions)
565 {
566
567 /* we were in GTK for an unknown amount of time: */
568 tme_thread_long();
569
570 /* move all threads that match the conditions on this file
571 descriptor to the dispatching list: */
572 _tme_sjlj_threads_dispatching_fd(fd, fd_conditions);
573
574 /* dispatch: */
575 tme_sjlj_dispatch(1);
576
577 /* yield to GTK: */
578 tme_threads_gtk_yield();
579
580 /* unused: */
581 callback_pointer = 0;
582 }
583
584 /* this handles a GTK callback for an idle: */
585 static gint
_tme_sjlj_gtk_callback_idle(gpointer callback_pointer)586 _tme_sjlj_gtk_callback_idle(gpointer callback_pointer)
587 {
588
589 /* we were in GTK for an unknown amount of time: */
590 tme_thread_long();
591
592 /* move all runnable threads to the dispatching list: */
593 _tme_sjlj_threads_dispatching_runnable();
594
595 /* move all threads that have timed out to the dispatching list: */
596 _tme_sjlj_threads_dispatching_timeout();
597
598 /* dispatch: */
599 tme_sjlj_dispatch(1);
600
601 /* yield to GTK: */
602 tme_threads_gtk_yield();
603
604 /* if there are no runnable threads: */
605 if (tme_sjlj_threads_runnable == NULL) {
606
607 /* remove this idle: */
608 tme_sjlj_idle_set = FALSE;
609 return (FALSE);
610 }
611
612 /* preserve this idle: */
613 return (TRUE);
614
615 /* unused: */
616 callback_pointer = 0;
617 }
618
619 /* this yields to GTK: */
620 void
tme_sjlj_threads_gtk_yield(void)621 tme_sjlj_threads_gtk_yield(void)
622 {
623 struct tme_sjlj_thread *thread_timeout;
624 struct timeval timeout;
625 unsigned long secs;
626 tme_uint32_t msecs;
627
628 /* if there are no runnable threads: */
629 if (tme_sjlj_threads_runnable == NULL) {
630
631 /* if there are no threads at all: */
632 if (__tme_predict_false(tme_sjlj_threads_all == NULL)) {
633
634 /* quit the GTK main loop: */
635 gtk_main_quit();
636 return;
637 }
638
639 /* if there is a GTK timeout, but the timeout list is empty or the
640 GTK timeout isn't for the earliest timeout: */
641 thread_timeout = tme_sjlj_threads_timeout;
642 if (_tme_sjlj_gtk_timeout_id != 0
643 && (thread_timeout == NULL
644 || _tme_sjlj_gtk_timeout.tv_sec != thread_timeout->tme_sjlj_thread_timeout.tv_sec
645 || _tme_sjlj_gtk_timeout.tv_usec != thread_timeout->tme_sjlj_thread_timeout.tv_usec)) {
646
647 /* remove the GTK timeout: */
648 g_source_remove(_tme_sjlj_gtk_timeout_id);
649 _tme_sjlj_gtk_timeout_id = 0;
650 }
651
652 /* if the timeout list is not empty, but there is no GTK timeout: */
653 if (tme_sjlj_threads_timeout != NULL
654 && _tme_sjlj_gtk_timeout_id == 0) {
655
656 /* get the timeout: */
657 _tme_sjlj_timeout_time(&timeout);
658
659 /* if there are still no runnable threads: */
660 /* NB: if the earliest timeout has already timed out,
661 _tme_sjlj_timeout_time() has already made the thread
662 runnable: */
663 if (tme_sjlj_threads_runnable == NULL) {
664
665 /* convert the timeout into milliseconds, and clip it at ten
666 seconds: */
667 secs = timeout.tv_sec;
668 msecs = (timeout.tv_usec + 999) / 1000;
669 if (msecs == 1000) {
670 secs++;
671 msecs = 0;
672 }
673 msecs
674 = (secs >= 10
675 ? (10 * 1000)
676 : ((secs * 1000) + msecs));
677
678 /* GTK timeouts can expire up to one millisecond early, so we
679 always add one: */
680 msecs++;
681
682 /* add the timeout: */
683 /* XXX we have to call g_timeout_add_full here, because
684 there are no gtk_timeout_add_ functions that allow you to
685 specify the priority, and gtk_timeout_add() uses
686 G_PRIORITY_DEFAULT, which means our (usually very
687 frequent) timeouts always win over gtk's event handling,
688 meaning the gtk windows never update: */
689 _tme_sjlj_gtk_timeout_id
690 = g_timeout_add_full(G_PRIORITY_DEFAULT_IDLE,
691 msecs,
692 _tme_sjlj_gtk_callback_timeout,
693 NULL,
694 NULL);
695 assert (_tme_sjlj_gtk_timeout_id != 0);
696 _tme_sjlj_gtk_timeout = tme_sjlj_threads_timeout->tme_sjlj_thread_timeout;
697 }
698 }
699 }
700
701 /* if there are runnable threads: */
702 if (tme_sjlj_threads_runnable != NULL) {
703
704 /* if the idle callback isn't set */
705 if (!tme_sjlj_idle_set) {
706
707 /* set the idle callback: */
708 gtk_idle_add_priority(G_PRIORITY_DEFAULT_IDLE,
709 _tme_sjlj_gtk_callback_idle,
710 NULL);
711 tme_sjlj_idle_set = TRUE;
712 }
713 }
714 }
715
716 #endif /* HAVE_GTK */
717
718 /* this starts the threads dispatching: */
719 void
tme_sjlj_threads_run(void)720 tme_sjlj_threads_run(void)
721 {
722 int fd;
723 fd_set fdset_read_out;
724 fd_set fdset_write_out;
725 fd_set fdset_except_out;
726 GdkInputCondition fd_conditions;
727 struct timeval timeout_buffer;
728 struct timeval *timeout;
729 int rc;
730
731 #ifdef HAVE_GTK
732 /* if we're using the GTK main loop, yield to GTK and
733 call gtk_main(): */
734 if (tme_sjlj_using_gtk) {
735 tme_threads_gtk_yield();
736 gtk_main();
737 return;
738 }
739 #endif /* HAVE_GTK */
740
741 /* otherwise, we have to use our own main loop: */
742
743 /* loop while we have threads: */
744 for (; tme_sjlj_threads_all != NULL; ) {
745
746 /* if we have file descriptors to select on: */
747 if (tme_sjlj_main_max_fd >= 0) {
748
749 /* make the fd sets: */
750 fdset_read_out = tme_sjlj_main_fdset_read;
751 fdset_write_out = tme_sjlj_main_fdset_write;
752 fdset_except_out = tme_sjlj_main_fdset_except;
753 }
754
755 /* make the select timeout: */
756
757 /* if the timeout list is empty: */
758 if (tme_sjlj_threads_timeout == NULL) {
759
760 /* assume that we will block in select indefinitely. there must
761 either be runnable threads (in which case we will not block
762 at all in select), or we must be selecting on file
763 descriptors: */
764 timeout = NULL;
765 assert (tme_sjlj_threads_runnable != NULL
766 || tme_sjlj_main_max_fd >= 0);
767 }
768
769 /* otherwise, the timeout list is not empty: */
770 else {
771
772 /* make the timeout: */
773 _tme_sjlj_timeout_time(&timeout_buffer);
774 timeout = &timeout_buffer;
775 }
776
777 /* if there are runnable threads, make this a poll: */
778 if (tme_sjlj_threads_runnable != NULL) {
779 timeout_buffer.tv_sec = 0;
780 timeout_buffer.tv_usec = 0;
781 timeout = &timeout_buffer;
782 }
783
784 /* do the select: */
785 rc = select(tme_sjlj_main_max_fd + 1,
786 &fdset_read_out,
787 &fdset_write_out,
788 &fdset_except_out,
789 timeout);
790
791 /* we were in select() for an unknown amount of time: */
792 tme_thread_long();
793
794 /* move all runnable threads to the dispatching list: */
795 _tme_sjlj_threads_dispatching_runnable();
796
797 /* move all threads that have timed out to the dispatching list: */
798 _tme_sjlj_threads_dispatching_timeout();
799
800 /* if some fds are ready, dispatch them: */
801 if (rc > 0) {
802 for (fd = tme_sjlj_main_max_fd; fd >= 0; fd--) {
803 fd_conditions = 0;
804 if (FD_ISSET(fd, &fdset_read_out)) {
805 fd_conditions |= GDK_INPUT_READ;
806 }
807 if (FD_ISSET(fd, &fdset_write_out)) {
808 fd_conditions |= GDK_INPUT_WRITE;
809 }
810 if (FD_ISSET(fd, &fdset_except_out)) {
811 fd_conditions |= GDK_INPUT_EXCEPTION;
812 }
813 if (fd_conditions != 0) {
814
815 /* move all threads that match the conditions on this file
816 descriptor to the dispatching list: */
817 _tme_sjlj_threads_dispatching_fd(fd, fd_conditions);
818
819 /* stop if there are no more file descriptors left in the
820 sets: */
821 if (--rc == 0) {
822 break;
823 }
824 }
825 }
826 }
827
828 /* dispatch: */
829 tme_sjlj_dispatch(1);
830 }
831
832 /* all threads have exited: */
833 }
834
835 /* this creates a new thread: */
836 void
tme_sjlj_thread_create(tme_thread_t func,void * func_private)837 tme_sjlj_thread_create(tme_thread_t func, void *func_private)
838 {
839 struct tme_sjlj_thread *thread;
840
841 /* allocate a new thread and put it on the all-threads list: */
842 thread = tme_new(struct tme_sjlj_thread, 1);
843 thread->prev = &tme_sjlj_threads_all;
844 thread->next = *thread->prev;
845 *thread->prev = thread;
846 if (thread->next != NULL) {
847 thread->next->prev = &thread->next;
848 }
849
850 /* initialize the thread: */
851 thread->tme_sjlj_thread_func_private = func_private;
852 thread->tme_sjlj_thread_func = func;
853 thread->tme_sjlj_thread_cond = NULL;
854 thread->tme_sjlj_thread_max_fd = -1;
855 thread->tme_sjlj_thread_sleep.tv_sec = 0;
856 thread->tme_sjlj_thread_sleep.tv_usec = 0;
857 thread->timeout_prev = NULL;
858
859 /* make this thread runnable: */
860 thread->tme_sjlj_thread_state = TME_SJLJ_THREAD_STATE_BLOCKED;
861 thread->state_prev = NULL;
862 thread->state_next = NULL;
863 thread->tme_sjlj_thread_dispatch_number = _tme_sjlj_thread_dispatch_number - 1;
864 _tme_sjlj_change_state(thread,
865 TME_SJLJ_THREAD_STATE_RUNNABLE);
866 }
867
868 /* this makes a thread wait on a condition: */
869 void
tme_sjlj_cond_wait_yield(tme_cond_t * cond,tme_mutex_t * mutex)870 tme_sjlj_cond_wait_yield(tme_cond_t *cond, tme_mutex_t *mutex)
871 {
872
873 /* unlock the mutex: */
874 tme_mutex_unlock(mutex);
875
876 /* remember that this thread is waiting on this condition: */
877 tme_sjlj_thread_blocked.tme_sjlj_thread_cond = cond;
878
879 /* yield: */
880 tme_thread_yield();
881 }
882
883 /* this makes a thread sleep on a condition: */
884 void
tme_sjlj_cond_sleep_yield(tme_cond_t * cond,tme_mutex_t * mutex,const struct timeval * sleep)885 tme_sjlj_cond_sleep_yield(tme_cond_t *cond, tme_mutex_t *mutex, const struct timeval *sleep)
886 {
887
888 /* unlock the mutex: */
889 tme_mutex_unlock(mutex);
890
891 /* remember that this thread is waiting on this condition: */
892 tme_sjlj_thread_blocked.tme_sjlj_thread_cond = cond;
893
894 /* sleep and yield: */
895 tme_sjlj_sleep_yield(sleep->tv_sec, sleep->tv_usec);
896 }
897
898 /* this notifies one or more threads waiting on a condition: */
899 void
tme_sjlj_cond_notify(tme_cond_t * cond,int broadcast)900 tme_sjlj_cond_notify(tme_cond_t *cond, int broadcast)
901 {
902 struct tme_sjlj_thread *thread;
903
904 for (thread = tme_sjlj_threads_all;
905 thread != NULL;
906 thread = thread->next) {
907 if (thread->tme_sjlj_thread_state == TME_SJLJ_THREAD_STATE_BLOCKED
908 && thread->tme_sjlj_thread_cond == cond) {
909
910 /* this thread is runnable: */
911 _tme_sjlj_change_state(thread,
912 TME_SJLJ_THREAD_STATE_RUNNABLE);
913
914 /* if we're not broadcasting this notification, stop now: */
915 if (!broadcast) {
916 break;
917 }
918 }
919 }
920 }
921
922 /* this yields the current thread: */
923 void
tme_sjlj_yield(void)924 tme_sjlj_yield(void)
925 {
926 struct tme_sjlj_thread *thread;
927 int blocked;
928 int max_fd_old;
929 int max_fd_new;
930 int max_fd, fd;
931 GdkInputCondition fd_condition_old;
932 GdkInputCondition fd_condition_new;
933 struct tme_sjlj_thread **_thread_prev;
934 struct tme_sjlj_thread *thread_other;
935
936 /* get the active thread: */
937 thread = tme_sjlj_thread_active;
938
939 /* the thread ran for an unknown amount of time: */
940 tme_thread_long();
941
942 /* assume that this thread is not blocked: */
943 blocked = FALSE;
944
945 /* see if this thread is blocked on a condition: */
946 if (tme_sjlj_thread_blocked.tme_sjlj_thread_cond != NULL) {
947 blocked = TRUE;
948 }
949 thread->tme_sjlj_thread_cond = tme_sjlj_thread_blocked.tme_sjlj_thread_cond;
950 tme_sjlj_thread_blocked.tme_sjlj_thread_cond = NULL;
951
952 /* see if this thread is blocked on any file descriptors: */
953 max_fd_old = thread->tme_sjlj_thread_max_fd;
954 max_fd_new = tme_sjlj_thread_blocked.tme_sjlj_thread_max_fd;
955 max_fd = TME_MAX(max_fd_old, max_fd_new);
956 for (fd = 0; fd <= max_fd; fd++) {
957
958 /* the old and new conditions on this fd start out empty: */
959 fd_condition_old = 0;
960 fd_condition_new = 0;
961
962 /* check the old fd sets: */
963 if (fd <= max_fd_old) {
964 #define CHECK_FD_SET(fd_set, condition) \
965 do { \
966 if (FD_ISSET(fd, &thread->fd_set)) { \
967 fd_condition_old |= condition; \
968 } \
969 } while (/* CONSTCOND */ 0)
970 CHECK_FD_SET(tme_sjlj_thread_fdset_read, GDK_INPUT_READ);
971 CHECK_FD_SET(tme_sjlj_thread_fdset_write, GDK_INPUT_WRITE);
972 CHECK_FD_SET(tme_sjlj_thread_fdset_except, GDK_INPUT_EXCEPTION);
973 #undef CHECK_FD_SET
974 }
975
976 /* check the new fd sets: */
977 if (fd <= max_fd_new) {
978 #define CHECK_FD_SET(fd_set, condition) \
979 do { \
980 if (FD_ISSET(fd, &tme_sjlj_thread_blocked.fd_set)) { \
981 fd_condition_new |= condition; \
982 FD_SET(fd, &thread->fd_set); \
983 } \
984 else { \
985 FD_CLR(fd, &thread->fd_set); \
986 } \
987 } while (/* CONSTCOND */ 0)
988 CHECK_FD_SET(tme_sjlj_thread_fdset_read, GDK_INPUT_READ);
989 CHECK_FD_SET(tme_sjlj_thread_fdset_write, GDK_INPUT_WRITE);
990 CHECK_FD_SET(tme_sjlj_thread_fdset_except, GDK_INPUT_EXCEPTION);
991 #undef CHECK_FD_SET
992 }
993
994 /* if this thread is blocked on this file descriptor: */
995 if (fd_condition_new != 0) {
996 blocked = TRUE;
997 }
998
999 /* if the conditions have changed: */
1000 if (fd_condition_new != fd_condition_old) {
1001
1002 /* if there is any blocking on this file descriptor, remove it: */
1003 if (tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_conditions != 0) {
1004
1005 #ifdef HAVE_GTK
1006 if (tme_sjlj_using_gtk) {
1007
1008 /* it should be safe to remove this fd, even if we're
1009 currently in a callback for it. if we happen to get a
1010 callback for it later anyways, _tme_sjlj_gtk_callback_fd()
1011 will ignore it: */
1012 gdk_input_remove(tme_sjlj_fd_tag[fd]);
1013 }
1014 else
1015 #endif /* HAVE_GTK */
1016 {
1017
1018 /* remove this fd from our main loop's fd sets: */
1019 assert(fd <= tme_sjlj_main_max_fd);
1020 FD_CLR(fd, &tme_sjlj_main_fdset_read);
1021 FD_CLR(fd, &tme_sjlj_main_fdset_write);
1022 FD_CLR(fd, &tme_sjlj_main_fdset_except);
1023 if (fd == tme_sjlj_main_max_fd) {
1024 for (; --tme_sjlj_main_max_fd > 0; ) {
1025 if (tme_sjlj_fd_thread[tme_sjlj_main_max_fd].tme_sjlj_fd_thread_conditions != 0) {
1026 break;
1027 }
1028 }
1029 }
1030 }
1031 }
1032
1033 /* update the blocking by this thread on this file descriptor: */
1034 assert ((tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_conditions
1035 & fd_condition_old)
1036 == fd_condition_old);
1037 tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_conditions
1038 = ((tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_conditions
1039 & ~fd_condition_old)
1040 | fd_condition_new);
1041 #define UPDATE_FD_THREAD(fd_thread, condition) \
1042 do { \
1043 if (fd_condition_old & condition) { \
1044 assert(tme_sjlj_fd_thread[fd].fd_thread == thread); \
1045 tme_sjlj_fd_thread[fd].fd_thread = NULL; \
1046 } \
1047 if (fd_condition_new & condition) { \
1048 assert(tme_sjlj_fd_thread[fd].fd_thread == NULL); \
1049 tme_sjlj_fd_thread[fd].fd_thread = thread; \
1050 } \
1051 } while (/* CONSTCOND */ 0)
1052 UPDATE_FD_THREAD(tme_sjlj_fd_thread_read, GDK_INPUT_READ);
1053 UPDATE_FD_THREAD(tme_sjlj_fd_thread_write, GDK_INPUT_WRITE);
1054 UPDATE_FD_THREAD(tme_sjlj_fd_thread_except, GDK_INPUT_EXCEPTION);
1055 #undef UPDATE_FD_THREAD
1056
1057 /* get the conditions for all threads for this fd: */
1058 fd_condition_new = tme_sjlj_fd_thread[fd].tme_sjlj_fd_thread_conditions;
1059
1060 /* if there is any blocking on this file descriptor, add it: */
1061 if (fd_condition_new != 0) {
1062
1063 #ifdef HAVE_GTK
1064 if (tme_sjlj_using_gtk) {
1065 tme_sjlj_fd_tag[fd] =
1066 gdk_input_add(fd,
1067 fd_condition_new,
1068 _tme_sjlj_gtk_callback_fd,
1069 NULL);
1070 }
1071 else
1072 #endif /* HAVE_GTK */
1073 {
1074
1075 /* add this fd to main loop's relevant fd sets: */
1076 if (fd_condition_new & GDK_INPUT_READ) {
1077 FD_SET(fd, &tme_sjlj_main_fdset_read);
1078 }
1079 if (fd_condition_new & GDK_INPUT_WRITE) {
1080 FD_SET(fd, &tme_sjlj_main_fdset_write);
1081 }
1082 if (fd_condition_new & GDK_INPUT_EXCEPTION) {
1083 FD_SET(fd, &tme_sjlj_main_fdset_except);
1084 }
1085 if (fd > tme_sjlj_main_max_fd) {
1086 tme_sjlj_main_max_fd = fd;
1087 }
1088 }
1089 }
1090 }
1091 }
1092 thread->tme_sjlj_thread_max_fd = max_fd_new;
1093 tme_sjlj_thread_blocked.tme_sjlj_thread_max_fd = -1;
1094
1095 /* see if this thread is blocked for some amount of time: */
1096 if (tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_sec != 0
1097 || tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_usec != 0) {
1098
1099 assert(tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_usec < 1000000);
1100 blocked = TRUE;
1101
1102 /* set the timeout for this thread: */
1103 tme_gettimeofday(&thread->tme_sjlj_thread_timeout);
1104 thread->tme_sjlj_thread_timeout.tv_sec
1105 += tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_sec;
1106 thread->tme_sjlj_thread_timeout.tv_usec
1107 += tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_usec;
1108 if (thread->tme_sjlj_thread_timeout.tv_usec >= 1000000) {
1109 thread->tme_sjlj_thread_timeout.tv_sec++;
1110 thread->tme_sjlj_thread_timeout.tv_usec -= 1000000;
1111 }
1112
1113 /* insert this thread into the timeout list: */
1114 assert (thread->timeout_prev == NULL);
1115 for (_thread_prev = &tme_sjlj_threads_timeout;
1116 (thread_other = *_thread_prev) != NULL;
1117 _thread_prev = &thread_other->timeout_next) {
1118 if ((thread_other->tme_sjlj_thread_timeout.tv_sec
1119 > thread->tme_sjlj_thread_timeout.tv_sec)
1120 || ((thread_other->tme_sjlj_thread_timeout.tv_sec
1121 == thread->tme_sjlj_thread_timeout.tv_sec)
1122 && (thread_other->tme_sjlj_thread_timeout.tv_usec
1123 >= thread->tme_sjlj_thread_timeout.tv_usec))) {
1124 break;
1125 }
1126 }
1127 *_thread_prev = thread;
1128 thread->timeout_prev = _thread_prev;
1129 thread->timeout_next = thread_other;
1130 if (thread_other != NULL) {
1131 thread_other->timeout_prev = &thread->timeout_next;
1132 }
1133 }
1134 thread->tme_sjlj_thread_sleep = tme_sjlj_thread_blocked.tme_sjlj_thread_sleep;
1135 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_sec = 0;
1136 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_usec = 0;
1137
1138 /* if this thread is actually exiting, it must appear to be
1139 runnable, and it only isn't because it's exiting: */
1140 if (tme_sjlj_thread_exiting) {
1141 assert(!blocked);
1142 blocked = TRUE;
1143 }
1144
1145 /* make any following thread on the runnable list the next active
1146 thread: */
1147 tme_sjlj_thread_active = thread->state_next;
1148
1149 /* if this thread is blocked, move it to the blocked list: */
1150 if (blocked) {
1151 _tme_sjlj_change_state(thread,
1152 TME_SJLJ_THREAD_STATE_BLOCKED);
1153
1154 /* if this thread is exiting: */
1155 if (tme_sjlj_thread_exiting) {
1156
1157 /* remove this thread from the all-threads list: */
1158 *thread->prev = thread->next;
1159 if (thread->next != NULL) {
1160 thread->next->prev = thread->prev;
1161 }
1162
1163 /* free this thread: */
1164 tme_free(thread);
1165
1166 /* nothing is exiting any more: */
1167 tme_sjlj_thread_exiting = FALSE;
1168 }
1169 }
1170
1171 /* jump back to the dispatcher: */
1172 longjmp(tme_sjlj_dispatcher_jmp, TRUE);
1173 }
1174
1175 /* this sleeps: */
1176 void
tme_sjlj_sleep(unsigned long sec,unsigned long usec)1177 tme_sjlj_sleep(unsigned long sec, unsigned long usec)
1178 {
1179 struct timeval then, now, timeout;
1180 int rc;
1181
1182 /* the thread ran for an unknown amount of time: */
1183 tme_thread_long();
1184
1185 /* get the wakeup time for the thread: */
1186 tme_gettimeofday(&then);
1187 for (; usec >= 1000000; sec++, usec -= 1000000);
1188 if ((then.tv_usec += usec) >= 1000000) {
1189 sec++;
1190 then.tv_usec -= 1000000;
1191 }
1192 then.tv_sec += sec;
1193
1194 /* select for the sleep period: */
1195 for (;;) {
1196
1197 /* calculate the current timeout: */
1198 tme_gettimeofday(&now);
1199 if ((now.tv_sec > then.tv_sec)
1200 || (now.tv_sec == then.tv_sec
1201 && now.tv_usec >= then.tv_usec)) {
1202 break;
1203 }
1204 timeout = then;
1205 if (timeout.tv_usec < now.tv_usec) {
1206 timeout.tv_sec--;
1207 timeout.tv_usec += 1000000;
1208 }
1209 timeout.tv_sec -= now.tv_sec;
1210 timeout.tv_usec -= now.tv_usec;
1211
1212 /* do the select. select returns 0 iff the timeout expires, so we
1213 can skip another gettimeofday and loop: */
1214 rc = select(-1, NULL, NULL, NULL, &timeout);
1215 tme_thread_long();
1216 if (rc == 0) {
1217 break;
1218 }
1219
1220 /* loop to see if the timeout really expired: */
1221 }
1222 }
1223
1224 /* this sleeps and yields: */
1225 void
tme_sjlj_sleep_yield(unsigned long sec,unsigned long usec)1226 tme_sjlj_sleep_yield(unsigned long sec, unsigned long usec)
1227 {
1228
1229 /* set the sleep interval: */
1230 for (; usec >= 1000000; ) {
1231 sec++;
1232 usec -= 1000000;
1233 }
1234 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_sec = sec;
1235 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_usec = usec;
1236
1237 /* yield: */
1238 tme_thread_yield();
1239 }
1240
1241 /* this selects and yields: */
1242 int
tme_sjlj_select_yield(int nfds,fd_set * fdset_read_in,fd_set * fdset_write_in,fd_set * fdset_except_in,struct timeval * timeout_in)1243 tme_sjlj_select_yield(int nfds,
1244 fd_set *fdset_read_in,
1245 fd_set *fdset_write_in,
1246 fd_set *fdset_except_in,
1247 struct timeval *timeout_in)
1248 {
1249 struct timeval timeout_out;
1250 int rc;
1251
1252 /* we can't deal if there are more than FD_SETSIZE fds: */
1253 assert(nfds <= FD_SETSIZE);
1254
1255 /* in case we end up yielding, we need to save the original
1256 descriptor sets: */
1257 if (fdset_read_in != NULL) {
1258 tme_sjlj_thread_blocked.tme_sjlj_thread_fdset_read = *fdset_read_in;
1259 }
1260 if (fdset_write_in != NULL) {
1261 tme_sjlj_thread_blocked.tme_sjlj_thread_fdset_write = *fdset_write_in;
1262 }
1263 if (fdset_except_in != NULL) {
1264 tme_sjlj_thread_blocked.tme_sjlj_thread_fdset_except = *fdset_except_in;
1265 }
1266
1267 /* do a polling select: */
1268 timeout_out.tv_sec = timeout_out.tv_usec = 0;
1269 rc = select(nfds, fdset_read_in, fdset_write_in, fdset_except_in, &timeout_out);
1270 tme_thread_long();
1271 if (rc != 0
1272 || (timeout_in != NULL
1273 && timeout_in->tv_sec == 0
1274 && timeout_in->tv_usec == 0)) {
1275 return (rc);
1276 }
1277
1278 /* we are yielding. zero any unused descriptor sets and set the
1279 timeout time: */
1280 tme_sjlj_thread_blocked.tme_sjlj_thread_max_fd = nfds - 1;
1281 if (fdset_read_in == NULL) {
1282 FD_ZERO(&tme_sjlj_thread_blocked.tme_sjlj_thread_fdset_read);
1283 }
1284 if (fdset_write_in == NULL) {
1285 FD_ZERO(&tme_sjlj_thread_blocked.tme_sjlj_thread_fdset_write);
1286 }
1287 if (fdset_except_in == NULL) {
1288 FD_ZERO(&tme_sjlj_thread_blocked.tme_sjlj_thread_fdset_except);
1289 }
1290 if (timeout_in != NULL) {
1291 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep = *timeout_in;
1292 for (; tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_usec >= 1000000; ) {
1293 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_sec++;
1294 tme_sjlj_thread_blocked.tme_sjlj_thread_sleep.tv_usec -= 1000000;
1295 }
1296 }
1297
1298 /* yield: */
1299 tme_thread_yield();
1300 /* NOTREACHED */
1301 return (0);
1302 }
1303
1304 /* this reads, yielding if the fd is not ready: */
1305 ssize_t
tme_sjlj_read_yield(int fd,void * data,size_t count)1306 tme_sjlj_read_yield(int fd, void *data, size_t count)
1307 {
1308 fd_set fdset_read_in;
1309 int rc;
1310
1311 /* select on the fd for reading: */
1312 FD_ZERO(&fdset_read_in);
1313 FD_SET(fd, &fdset_read_in);
1314 rc = tme_sjlj_select_yield(fd + 1,
1315 &fdset_read_in,
1316 NULL,
1317 NULL,
1318 NULL);
1319 if (rc != 1) {
1320 return (rc);
1321 }
1322
1323 /* do the read: */
1324 return (read(fd, data, count));
1325 }
1326
1327 /* this writes, yielding if the fd is not ready: */
1328 ssize_t
tme_sjlj_write_yield(int fd,void * data,size_t count)1329 tme_sjlj_write_yield(int fd, void *data, size_t count)
1330 {
1331 fd_set fdset_write_in;
1332 int rc;
1333
1334 /* select on the fd for writing: */
1335 FD_ZERO(&fdset_write_in);
1336 FD_SET(fd, &fdset_write_in);
1337 rc = tme_sjlj_select_yield(fd + 1,
1338 NULL,
1339 &fdset_write_in,
1340 NULL,
1341 NULL);
1342 if (rc != 1) {
1343 return (rc);
1344 }
1345
1346 /* do the write: */
1347 return (write(fd, data, count));
1348 }
1349
1350 /* this exits a thread: */
1351 void
tme_sjlj_exit(void)1352 tme_sjlj_exit(void)
1353 {
1354
1355 /* mark that this thread is exiting: */
1356 tme_sjlj_thread_exiting = TRUE;
1357
1358 /* yield: */
1359 tme_thread_yield();
1360 }
1361
1362 #ifndef TME_NO_DEBUG_LOCKS
1363
1364 /* lock operations: */
1365 int
tme_sjlj_rwlock_init(struct tme_sjlj_rwlock * lock)1366 tme_sjlj_rwlock_init(struct tme_sjlj_rwlock *lock)
1367 {
1368 /* initialize the lock: */
1369 lock->_tme_sjlj_rwlock_locked = FALSE;
1370 lock->_tme_sjlj_rwlock_file = NULL;
1371 lock->_tme_sjlj_rwlock_line = 0;
1372 return (TME_OK);
1373 }
1374 int
tme_sjlj_rwlock_lock(struct tme_sjlj_rwlock * lock,_tme_const char * file,unsigned long line,int try)1375 tme_sjlj_rwlock_lock(struct tme_sjlj_rwlock *lock, _tme_const char *file, unsigned long line, int try)
1376 {
1377
1378 /* if this lock is already locked: */
1379 if (lock->_tme_sjlj_rwlock_locked) {
1380 if (try) {
1381 return (TME_EDEADLK);
1382 }
1383 abort();
1384 }
1385
1386 /* lock the lock: */
1387 lock->_tme_sjlj_rwlock_locked = TRUE;
1388 lock->_tme_sjlj_rwlock_file = file;
1389 lock->_tme_sjlj_rwlock_line = line;
1390 return (TME_OK);
1391 }
1392 int
tme_sjlj_rwlock_unlock(struct tme_sjlj_rwlock * lock,_tme_const char * file,unsigned long line)1393 tme_sjlj_rwlock_unlock(struct tme_sjlj_rwlock *lock, _tme_const char *file, unsigned long line)
1394 {
1395
1396 /* if this lock isn't locked: */
1397 if (!lock->_tme_sjlj_rwlock_locked) {
1398 abort();
1399 }
1400
1401 /* unlock the lock: */
1402 lock->_tme_sjlj_rwlock_locked = FALSE;
1403 lock->_tme_sjlj_rwlock_file = file;
1404 lock->_tme_sjlj_rwlock_line = line;
1405 return (TME_OK);
1406 }
1407
1408 #endif /* !TME_NO_DEBUG_LOCKS */
1409