1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19
20 #ifdef HAVE_CONFIG_H
21 # include <config.h>
22 #endif
23
24 #include "libguile/_scm.h"
25
26 #if HAVE_UNISTD_H
27 #include <unistd.h>
28 #endif
29 #include <stdio.h>
30 #include <assert.h>
31
32 #ifdef HAVE_STRING_H
33 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
34 #endif
35
36 #if HAVE_SYS_TIME_H
37 #include <sys/time.h>
38 #endif
39
40 #include "libguile/validate.h"
41 #include "libguile/root.h"
42 #include "libguile/eval.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/threads.h"
46 #include "libguile/dynwind.h"
47 #include "libguile/iselect.h"
48 #include "libguile/fluids.h"
49 #include "libguile/continuations.h"
50 #include "libguile/gc.h"
51 #include "libguile/init.h"
52
53 #ifdef __MINGW32__
54 #ifndef ETIMEDOUT
55 # define ETIMEDOUT WSAETIMEDOUT
56 #endif
57 # include <fcntl.h>
58 # include <process.h>
59 # define pipe(fd) _pipe (fd, 256, O_BINARY)
60 #endif /* __MINGW32__ */
61
62 /*** Queues */
63
64 /* Make an empty queue data structure.
65 */
66 static SCM
make_queue()67 make_queue ()
68 {
69 return scm_cons (SCM_EOL, SCM_EOL);
70 }
71
72 /* Put T at the back of Q and return a handle that can be used with
73 remqueue to remove T from Q again.
74 */
75 static SCM
enqueue(SCM q,SCM t)76 enqueue (SCM q, SCM t)
77 {
78 SCM c = scm_cons (t, SCM_EOL);
79 if (scm_is_null (SCM_CDR (q)))
80 SCM_SETCDR (q, c);
81 else
82 SCM_SETCDR (SCM_CAR (q), c);
83 SCM_SETCAR (q, c);
84 return c;
85 }
86
87 /* Remove the element that the handle C refers to from the queue Q. C
88 must have been returned from a call to enqueue. The return value
89 is zero when the element referred to by C has already been removed.
90 Otherwise, 1 is returned.
91 */
92 static int
remqueue(SCM q,SCM c)93 remqueue (SCM q, SCM c)
94 {
95 SCM p, prev = q;
96 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
97 {
98 if (scm_is_eq (p, c))
99 {
100 if (scm_is_eq (c, SCM_CAR (q)))
101 SCM_SETCAR (q, SCM_CDR (c));
102 SCM_SETCDR (prev, SCM_CDR (c));
103 return 1;
104 }
105 prev = p;
106 }
107 return 0;
108 }
109
110 /* Remove the front-most element from the queue Q and return it.
111 Return SCM_BOOL_F when Q is empty.
112 */
113 static SCM
dequeue(SCM q)114 dequeue (SCM q)
115 {
116 SCM c = SCM_CDR (q);
117 if (scm_is_null (c))
118 return SCM_BOOL_F;
119 else
120 {
121 SCM_SETCDR (q, SCM_CDR (c));
122 if (scm_is_null (SCM_CDR (q)))
123 SCM_SETCAR (q, SCM_EOL);
124 return SCM_CAR (c);
125 }
126 }
127
128 /*** Thread smob routines */
129
130 static SCM
thread_mark(SCM obj)131 thread_mark (SCM obj)
132 {
133 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
134 scm_gc_mark (t->result);
135 scm_gc_mark (t->join_queue);
136 scm_gc_mark (t->dynwinds);
137 scm_gc_mark (t->active_asyncs);
138 scm_gc_mark (t->continuation_root);
139 return t->dynamic_state;
140 }
141
142 static int
thread_print(SCM exp,SCM port,scm_print_state * pstate SCM_UNUSED)143 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
144 {
145 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
146 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
147 the struct case, hence we go via a union, and extract according to the
148 size of pthread_t. */
149 union {
150 scm_i_pthread_t p;
151 unsigned short us;
152 unsigned int ui;
153 unsigned long ul;
154 scm_t_uintmax um;
155 } u;
156 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
157 scm_i_pthread_t p = t->pthread;
158 scm_t_uintmax id;
159 u.p = p;
160 if (sizeof (p) == sizeof (unsigned short))
161 id = u.us;
162 else if (sizeof (p) == sizeof (unsigned int))
163 id = u.ui;
164 else if (sizeof (p) == sizeof (unsigned long))
165 id = u.ul;
166 else
167 id = u.um;
168
169 scm_puts ("#<thread ", port);
170 scm_uintprint (id, 10, port);
171 scm_puts (" (", port);
172 scm_uintprint ((scm_t_bits)t, 16, port);
173 scm_puts (")>", port);
174 return 1;
175 }
176
177 static size_t
thread_free(SCM obj)178 thread_free (SCM obj)
179 {
180 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
181 assert (t->exited);
182 scm_gc_free (t, sizeof (*t), "thread");
183 return 0;
184 }
185
186 /*** Blocking on queues. */
187
188 /* See also scm_i_queue_async_cell for how such a block is
189 interrputed.
190 */
191
192 /* Put the current thread on QUEUE and go to sleep, waiting for it to
193 be woken up by a call to 'unblock_from_queue', or to be
194 interrupted. Upon return of this function, the current thread is
195 no longer on QUEUE, even when the sleep has been interrupted.
196
197 The QUEUE data structure is assumed to be protected by MUTEX and
198 the caller of block_self must hold MUTEX. It will be atomically
199 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
200
201 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
202 as MUTEX is needed.
203
204 When WAITTIME is not NULL, the sleep will be aborted at that time.
205
206 The return value of block_self is an errno value. It will be zero
207 when the sleep has been successfully completed by a call to
208 unblock_from_queue, EINTR when it has been interrupted by the
209 delivery of a system async, and ETIMEDOUT when the timeout has
210 expired.
211
212 The system asyncs themselves are not executed by block_self.
213 */
214 static int
block_self(SCM queue,SCM sleep_object,scm_i_pthread_mutex_t * mutex,const scm_t_timespec * waittime)215 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
216 const scm_t_timespec *waittime)
217 {
218 scm_i_thread *t = SCM_I_CURRENT_THREAD;
219 SCM q_handle;
220 int err;
221
222 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
223 err = EINTR;
224 else
225 {
226 t->block_asyncs++;
227 q_handle = enqueue (queue, t->handle);
228 if (waittime == NULL)
229 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
230 else
231 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
232
233 /* When we are still on QUEUE, we have been interrupted. We
234 report this only when no other error (such as a timeout) has
235 happened above.
236 */
237 if (remqueue (queue, q_handle) && err == 0)
238 err = EINTR;
239 t->block_asyncs--;
240 scm_i_reset_sleep (t);
241 }
242
243 return err;
244 }
245
246 /* Wake up the first thread on QUEUE, if any. The caller must hold
247 the mutex that protects QUEUE. The awoken thread is returned, or
248 #f when the queue was empty.
249 */
250 static SCM
unblock_from_queue(SCM queue)251 unblock_from_queue (SCM queue)
252 {
253 SCM thread = dequeue (queue);
254 if (scm_is_true (thread))
255 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
256 return thread;
257 }
258
259 /* Getting into and out of guile mode.
260 */
261
262 /* Ken Raeburn observes that the implementation of suspend and resume
263 (and the things that build on top of them) are very likely not
264 correct (see below). We will need fix this eventually, and that's
265 why scm_leave_guile/scm_enter_guile are not exported in the API.
266
267 Ken writes:
268
269 Consider this sequence:
270
271 Function foo, called in Guile mode, calls suspend (maybe indirectly
272 through scm_leave_guile), which does this:
273
274 // record top of stack for the GC
275 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
276 var 't'
277 // save registers.
278 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
279 SCM_I_SETJMP (t->regs); // here's most of the magic
280
281 ... and returns.
282
283 Function foo has a SCM value X, a handle on a non-immediate object, in
284 a caller-saved register R, and it's the only reference to the object
285 currently.
286
287 The compiler wants to use R in suspend, so it pushes the current
288 value, X, into a stack slot which will be reloaded on exit from
289 suspend; then it loads stuff into R and goes about its business. The
290 setjmp call saves (some of) the current registers, including R, which
291 no longer contains X. (This isn't a problem for a normal
292 setjmp/longjmp situation, where longjmp would be called before
293 setjmp's caller returns; the old value for X would be loaded back from
294 the stack after the longjmp, before the function returned.)
295
296 So, suspend returns, loading X back into R (and invalidating the jump
297 buffer) in the process. The caller foo then goes off and calls a
298 bunch of other functions out of Guile mode, occasionally storing X on
299 the stack again, but, say, much deeper on the stack than suspend's
300 stack frame went, and the stack slot where suspend had written X has
301 long since been overwritten with other values.
302
303 Okay, nothing actively broken so far. Now, let garbage collection
304 run, triggered by another thread.
305
306 The thread calling foo is out of Guile mode at the time, so the
307 garbage collector just scans a range of stack addresses. Too bad that
308 X isn't stored there. So the pointed-to storage goes onto the free
309 list, and I think you can see where things go from there.
310
311 Is there anything I'm missing that'll prevent this scenario from
312 happening? I mean, aside from, "well, suspend and scm_leave_guile
313 don't have many local variables, so they probably won't need to save
314 any registers on most systems, so we hope everything will wind up in
315 the jump buffer and we'll just get away with it"?
316
317 (And, going the other direction, if scm_leave_guile and suspend push
318 the stack pointer over onto a new page, and foo doesn't make further
319 function calls and thus the stack pointer no longer includes that
320 page, are we guaranteed that the kernel cannot release the now-unused
321 stack page that contains the top-of-stack pointer we just saved? I
322 don't know if any OS actually does that. If it does, we could get
323 faults in garbage collection.)
324
325 I don't think scm_without_guile has to have this problem, as it gets
326 more control over the stack handling -- but it should call setjmp
327 itself. I'd probably try something like:
328
329 // record top of stack for the GC
330 t->top = SCM_STACK_PTR (&t);
331 // save registers.
332 SCM_FLUSH_REGISTER_WINDOWS;
333 SCM_I_SETJMP (t->regs);
334 res = func(data);
335 scm_enter_guile (t);
336
337 ... though even that's making some assumptions about the stack
338 ordering of local variables versus caller-saved registers.
339
340 For something like scm_leave_guile to work, I don't think it can just
341 rely on invalidated jump buffers. A valid jump buffer, and a handle
342 on the stack state at the point when the jump buffer was initialized,
343 together, would work fine, but I think then we're talking about macros
344 invoking setjmp in the caller's stack frame, and requiring that the
345 caller of scm_leave_guile also call scm_enter_guile before returning,
346 kind of like pthread_cleanup_push/pop calls that have to be paired up
347 in a function. (In fact, the pthread ones have to be paired up
348 syntactically, as if they might expand to a compound statement
349 incorporating the user's code, and invoking a compiler's
350 exception-handling primitives. Which might be something to think
351 about for cases where Guile is used with C++ exceptions or
352 pthread_cancel.)
353 */
354
355 scm_i_pthread_key_t scm_i_thread_key;
356
357 static void
resume(scm_i_thread * t)358 resume (scm_i_thread *t)
359 {
360 t->top = NULL;
361 if (t->clear_freelists_p)
362 {
363 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
364 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
365 t->clear_freelists_p = 0;
366 }
367 }
368
369 typedef void* scm_t_guile_ticket;
370
371 static void
scm_enter_guile(scm_t_guile_ticket ticket)372 scm_enter_guile (scm_t_guile_ticket ticket)
373 {
374 scm_i_thread *t = (scm_i_thread *)ticket;
375 if (t)
376 {
377 scm_i_pthread_mutex_lock (&t->heap_mutex);
378 resume (t);
379 }
380 }
381
382 static scm_i_thread *
suspend(void)383 suspend (void)
384 {
385 scm_i_thread *t = SCM_I_CURRENT_THREAD;
386
387 /* record top of stack for the GC */
388 t->top = SCM_STACK_PTR (&t);
389 /* save registers. */
390 SCM_FLUSH_REGISTER_WINDOWS;
391 SCM_I_SETJMP (t->regs);
392 return t;
393 }
394
395 static scm_t_guile_ticket
scm_leave_guile()396 scm_leave_guile ()
397 {
398 scm_i_thread *t = suspend ();
399 scm_i_pthread_mutex_unlock (&t->heap_mutex);
400 return (scm_t_guile_ticket) t;
401 }
402
403 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
404 static scm_i_thread *all_threads = NULL;
405 static int thread_count;
406
407 static SCM scm_i_default_dynamic_state;
408
409 /* Perform first stage of thread initialisation, in non-guile mode.
410 */
411 static void
guilify_self_1(SCM_STACKITEM * base)412 guilify_self_1 (SCM_STACKITEM *base)
413 {
414 scm_i_thread *t = malloc (sizeof (scm_i_thread));
415
416 t->pthread = scm_i_pthread_self ();
417 t->handle = SCM_BOOL_F;
418 t->result = SCM_BOOL_F;
419 t->join_queue = SCM_EOL;
420 t->dynamic_state = SCM_BOOL_F;
421 t->dynwinds = SCM_EOL;
422 t->active_asyncs = SCM_EOL;
423 t->block_asyncs = 1;
424 t->pending_asyncs = 1;
425 t->critical_section_level = 0;
426 t->last_debug_frame = NULL;
427 t->base = base;
428 #ifdef __ia64__
429 /* Calculate and store off the base of this thread's register
430 backing store (RBS). Unfortunately our implementation(s) of
431 scm_ia64_register_backing_store_base are only reliable for the
432 main thread. For other threads, therefore, find out the current
433 top of the RBS, and use that as a maximum. */
434 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
435 {
436 ucontext_t ctx;
437 void *bsp;
438 getcontext (&ctx);
439 bsp = scm_ia64_ar_bsp (&ctx);
440 if (t->register_backing_store_base > bsp)
441 t->register_backing_store_base = bsp;
442 }
443 #endif
444 t->continuation_root = SCM_EOL;
445 t->continuation_base = base;
446 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
447 t->sleep_mutex = NULL;
448 t->sleep_object = SCM_BOOL_F;
449 t->sleep_fd = -1;
450
451 if (pipe (t->sleep_pipe) != 0)
452 /* FIXME: Error conditions during the initialization phase are handled
453 gracelessly since public functions such as `scm_init_guile ()'
454 currently have type `void'. */
455 abort ();
456
457 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
458 t->clear_freelists_p = 0;
459 t->gc_running_p = 0;
460 t->exited = 0;
461
462 t->freelist = SCM_EOL;
463 t->freelist2 = SCM_EOL;
464 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
465 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
466
467 scm_i_pthread_setspecific (scm_i_thread_key, t);
468
469 /* As soon as this thread adds itself to the global thread list, the
470 GC may think that it has a stack that needs marking. Therefore
471 initialize t->top to be the same as t->base, just in case GC runs
472 before the thread can lock its heap_mutex for the first time. */
473 t->top = t->base;
474 scm_i_pthread_mutex_lock (&thread_admin_mutex);
475 t->next_thread = all_threads;
476 all_threads = t;
477 thread_count++;
478 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
479
480 /* Enter Guile mode. */
481 scm_enter_guile (t);
482 }
483
484 /* Perform second stage of thread initialisation, in guile mode.
485 */
486 static void
guilify_self_2(SCM parent)487 guilify_self_2 (SCM parent)
488 {
489 scm_i_thread *t = SCM_I_CURRENT_THREAD;
490
491 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
492 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
493 t->continuation_root = scm_cons (t->handle, SCM_EOL);
494 t->continuation_base = t->base;
495
496 if (scm_is_true (parent))
497 t->dynamic_state = scm_make_dynamic_state (parent);
498 else
499 t->dynamic_state = scm_i_make_initial_dynamic_state ();
500
501 t->join_queue = make_queue ();
502 t->block_asyncs = 0;
503 }
504
505 /* Perform thread tear-down, in guile mode.
506 */
507 static void *
do_thread_exit(void * v)508 do_thread_exit (void *v)
509 {
510 scm_i_thread *t = (scm_i_thread *)v;
511
512 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
513
514 t->exited = 1;
515 close (t->sleep_pipe[0]);
516 close (t->sleep_pipe[1]);
517 while (scm_is_true (unblock_from_queue (t->join_queue)))
518 ;
519
520 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
521 return NULL;
522 }
523
524 static void
on_thread_exit(void * v)525 on_thread_exit (void *v)
526 {
527 /* This handler is executed in non-guile mode. */
528 scm_i_thread *t = (scm_i_thread *)v, **tp;
529
530 scm_i_pthread_setspecific (scm_i_thread_key, v);
531
532 /* Unblocking the joining threads needs to happen in guile mode
533 since the queue is a SCM data structure. */
534 scm_with_guile (do_thread_exit, v);
535
536 /* Removing ourself from the list of all threads needs to happen in
537 non-guile mode since all SCM values on our stack become
538 unprotected once we are no longer in the list. */
539 scm_i_pthread_mutex_lock (&thread_admin_mutex);
540 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
541 if (*tp == t)
542 {
543 *tp = t->next_thread;
544 break;
545 }
546 thread_count--;
547 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
548
549 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
550 }
551
552 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
553
554 static void
init_thread_key(void)555 init_thread_key (void)
556 {
557 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
558 }
559
560 /* Perform any initializations necessary to bring the current thread
561 into guile mode, initializing Guile itself, if necessary.
562
563 BASE is the stack base to use with GC.
564
565 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
566 which case the default dynamic state is used.
567
568 Return zero when the thread was in guile mode already; otherwise
569 return 1.
570 */
571
572 static int
scm_i_init_thread_for_guile(SCM_STACKITEM * base,SCM parent)573 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
574 {
575 scm_i_thread *t;
576
577 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
578
579 if ((t = SCM_I_CURRENT_THREAD) == NULL)
580 {
581 /* This thread has not been guilified yet.
582 */
583
584 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
585 if (scm_initialized_p == 0)
586 {
587 /* First thread ever to enter Guile. Run the full
588 initialization.
589 */
590 scm_i_init_guile (base);
591 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
592 }
593 else
594 {
595 /* Guile is already initialized, but this thread enters it for
596 the first time. Only initialize this thread.
597 */
598 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
599 guilify_self_1 (base);
600 guilify_self_2 (parent);
601 }
602 return 1;
603 }
604 else if (t->top)
605 {
606 /* This thread is already guilified but not in guile mode, just
607 resume it.
608
609 A user call to scm_with_guile() will lead us to here. This could
610 happen from anywhere on the stack, and in particular lower on the
611 stack than when it was when this thread was first guilified. Thus,
612 `base' must be updated. */
613 #if SCM_STACK_GROWS_UP
614 if (base < t->base)
615 t->base = base;
616 #else
617 if (base > t->base)
618 t->base = base;
619 #endif
620
621 scm_enter_guile ((scm_t_guile_ticket) t);
622 return 1;
623 }
624 else
625 {
626 /* Thread is already in guile mode. Nothing to do.
627 */
628 return 0;
629 }
630 }
631
632 #if SCM_USE_PTHREAD_THREADS
633
634 #if HAVE_PTHREAD_ATTR_GETSTACK && (HAVE_PTHREAD_GETATTR_NP || HAVE_PTHREAD_ATTR_GET_NP)
635 /* This method for GNU/Linux and perhaps some other systems.
636 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
637 available on them. */
638 #define HAVE_GET_THREAD_STACK_BASE
639
640 static SCM_STACKITEM *
get_thread_stack_base()641 get_thread_stack_base ()
642 {
643 pthread_attr_t attr;
644 void *start, *end;
645 size_t size;
646
647 #if HAVE_PTHREAD_ATTR_GET_NP
648 pthread_attr_init (&attr);
649 pthread_attr_get_np (pthread_self (), &attr);
650 pthread_attr_getstack (&attr, &start, &size);
651 pthread_attr_destroy (&attr);
652 #elif HAVE_PTHREAD_GETATTR_NP
653 pthread_getattr_np (pthread_self (), &attr);
654 pthread_attr_getstack (&attr, &start, &size);
655 #endif
656 end = (char *)start + size;
657
658 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
659 for the main thread, but we can use scm_get_stack_base in that
660 case.
661 */
662
663 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
664 if ((void *)&attr < start || (void *)&attr >= end)
665 return scm_get_stack_base ();
666 else
667 #endif
668 {
669 #if SCM_STACK_GROWS_UP
670 return start;
671 #else
672 return end;
673 #endif
674 }
675 }
676
677 #elif HAVE_PTHREAD_GET_STACKADDR_NP
678 /* This method for MacOS X.
679 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
680 but as of 2006 there's nothing obvious at apple.com. */
681 #define HAVE_GET_THREAD_STACK_BASE
682 static SCM_STACKITEM *
get_thread_stack_base()683 get_thread_stack_base ()
684 {
685 return pthread_get_stackaddr_np (pthread_self ());
686 }
687
688 #elif defined (__MINGW32__)
689 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
690 in any thread. We don't like hard-coding the name of a system, but there
691 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
692 work. */
693 #define HAVE_GET_THREAD_STACK_BASE
694 static SCM_STACKITEM *
get_thread_stack_base()695 get_thread_stack_base ()
696 {
697 return scm_get_stack_base ();
698 }
699
700 #endif /* pthread methods of get_thread_stack_base */
701
702 #else /* !SCM_USE_PTHREAD_THREADS */
703
704 #define HAVE_GET_THREAD_STACK_BASE
705
706 static SCM_STACKITEM *
get_thread_stack_base()707 get_thread_stack_base ()
708 {
709 return scm_get_stack_base ();
710 }
711
712 #endif /* !SCM_USE_PTHREAD_THREADS */
713
714 #ifdef HAVE_GET_THREAD_STACK_BASE
715
716 void
scm_init_guile()717 scm_init_guile ()
718 {
719 scm_i_init_thread_for_guile (get_thread_stack_base (),
720 scm_i_default_dynamic_state);
721 }
722
723 #endif
724
725 void *
scm_with_guile(void * (* func)(void *),void * data)726 scm_with_guile (void *(*func)(void *), void *data)
727 {
728 return scm_i_with_guile_and_parent (func, data,
729 scm_i_default_dynamic_state);
730 }
731
732 void *
scm_i_with_guile_and_parent(void * (* func)(void *),void * data,SCM parent)733 scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
734 SCM parent)
735 {
736 void *res;
737 int really_entered;
738 SCM_STACKITEM base_item;
739 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
740 res = scm_c_with_continuation_barrier (func, data);
741 if (really_entered)
742 scm_leave_guile ();
743 return res;
744 }
745
746 void *
scm_without_guile(void * (* func)(void *),void * data)747 scm_without_guile (void *(*func)(void *), void *data)
748 {
749 void *res;
750 scm_t_guile_ticket t;
751 t = scm_leave_guile ();
752 res = func (data);
753 scm_enter_guile (t);
754 return res;
755 }
756
757 /*** Thread creation */
758
759 typedef struct {
760 SCM parent;
761 SCM thunk;
762 SCM handler;
763 SCM thread;
764 scm_i_pthread_mutex_t mutex;
765 scm_i_pthread_cond_t cond;
766 } launch_data;
767
768 static void *
really_launch(void * d)769 really_launch (void *d)
770 {
771 launch_data *data = (launch_data *)d;
772 SCM thunk = data->thunk, handler = data->handler;
773 scm_i_thread *t;
774
775 t = SCM_I_CURRENT_THREAD;
776
777 scm_i_scm_pthread_mutex_lock (&data->mutex);
778 data->thread = scm_current_thread ();
779 scm_i_pthread_cond_signal (&data->cond);
780 scm_i_pthread_mutex_unlock (&data->mutex);
781
782 if (SCM_UNBNDP (handler))
783 t->result = scm_call_0 (thunk);
784 else
785 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
786
787 return 0;
788 }
789
790 static void *
launch_thread(void * d)791 launch_thread (void *d)
792 {
793 launch_data *data = (launch_data *)d;
794 scm_i_pthread_detach (scm_i_pthread_self ());
795 scm_i_with_guile_and_parent (really_launch, d, data->parent);
796 return NULL;
797 }
798
799 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
800 (SCM thunk, SCM handler),
801 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
802 "returning a new thread object representing the thread. The procedure\n"
803 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
804 "\n"
805 "When @var{handler} is specified, then @var{thunk} is called from\n"
806 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
807 "handler. This catch is established inside the continuation barrier.\n"
808 "\n"
809 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
810 "the @emph{exit value} of the thread and the thread is terminated.")
811 #define FUNC_NAME s_scm_call_with_new_thread
812 {
813 launch_data data;
814 scm_i_pthread_t id;
815 int err;
816
817 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
818 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
819 handler, SCM_ARG2, FUNC_NAME);
820
821 data.parent = scm_current_dynamic_state ();
822 data.thunk = thunk;
823 data.handler = handler;
824 data.thread = SCM_BOOL_F;
825 scm_i_pthread_mutex_init (&data.mutex, NULL);
826 scm_i_pthread_cond_init (&data.cond, NULL);
827
828 scm_i_scm_pthread_mutex_lock (&data.mutex);
829 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
830 if (err)
831 {
832 scm_i_pthread_mutex_unlock (&data.mutex);
833 errno = err;
834 scm_syserror (NULL);
835 }
836 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
837 scm_i_pthread_mutex_unlock (&data.mutex);
838
839 return data.thread;
840 }
841 #undef FUNC_NAME
842
843 typedef struct {
844 SCM parent;
845 scm_t_catch_body body;
846 void *body_data;
847 scm_t_catch_handler handler;
848 void *handler_data;
849 SCM thread;
850 scm_i_pthread_mutex_t mutex;
851 scm_i_pthread_cond_t cond;
852 } spawn_data;
853
854 static void *
really_spawn(void * d)855 really_spawn (void *d)
856 {
857 spawn_data *data = (spawn_data *)d;
858 scm_t_catch_body body = data->body;
859 void *body_data = data->body_data;
860 scm_t_catch_handler handler = data->handler;
861 void *handler_data = data->handler_data;
862 scm_i_thread *t = SCM_I_CURRENT_THREAD;
863
864 scm_i_scm_pthread_mutex_lock (&data->mutex);
865 data->thread = scm_current_thread ();
866 scm_i_pthread_cond_signal (&data->cond);
867 scm_i_pthread_mutex_unlock (&data->mutex);
868
869 if (handler == NULL)
870 t->result = body (body_data);
871 else
872 t->result = scm_internal_catch (SCM_BOOL_T,
873 body, body_data,
874 handler, handler_data);
875
876 return 0;
877 }
878
879 static void *
spawn_thread(void * d)880 spawn_thread (void *d)
881 {
882 spawn_data *data = (spawn_data *)d;
883 scm_i_pthread_detach (scm_i_pthread_self ());
884 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
885 return NULL;
886 }
887
888 SCM
scm_spawn_thread(scm_t_catch_body body,void * body_data,scm_t_catch_handler handler,void * handler_data)889 scm_spawn_thread (scm_t_catch_body body, void *body_data,
890 scm_t_catch_handler handler, void *handler_data)
891 {
892 spawn_data data;
893 scm_i_pthread_t id;
894 int err;
895
896 data.parent = scm_current_dynamic_state ();
897 data.body = body;
898 data.body_data = body_data;
899 data.handler = handler;
900 data.handler_data = handler_data;
901 data.thread = SCM_BOOL_F;
902 scm_i_pthread_mutex_init (&data.mutex, NULL);
903 scm_i_pthread_cond_init (&data.cond, NULL);
904
905 scm_i_scm_pthread_mutex_lock (&data.mutex);
906 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
907 if (err)
908 {
909 scm_i_pthread_mutex_unlock (&data.mutex);
910 errno = err;
911 scm_syserror (NULL);
912 }
913 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
914 scm_i_pthread_mutex_unlock (&data.mutex);
915
916 return data.thread;
917 }
918
919 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
920 (),
921 "Move the calling thread to the end of the scheduling queue.")
922 #define FUNC_NAME s_scm_yield
923 {
924 return scm_from_bool (scm_i_sched_yield ());
925 }
926 #undef FUNC_NAME
927
928 SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
929 (SCM thread),
930 "Suspend execution of the calling thread until the target @var{thread} "
931 "terminates, unless the target @var{thread} has already terminated. ")
932 #define FUNC_NAME s_scm_join_thread
933 {
934 scm_i_thread *t;
935 SCM res;
936
937 SCM_VALIDATE_THREAD (1, thread);
938 if (scm_is_eq (scm_current_thread (), thread))
939 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
940
941 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
942
943 t = SCM_I_THREAD_DATA (thread);
944 while (!t->exited)
945 {
946 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
947 if (t->exited)
948 break;
949 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
950 SCM_TICK;
951 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
952 }
953 res = t->result;
954
955 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
956 return res;
957 }
958 #undef FUNC_NAME
959
960 /*** Fat mutexes */
961
962 /* We implement our own mutex type since we want them to be 'fair', we
963 want to do fancy things while waiting for them (like running
964 asyncs) and we might want to add things that are nice for
965 debugging.
966 */
967
968 typedef struct {
969 scm_i_pthread_mutex_t lock;
970 SCM owner;
971 int level; /* how much the owner owns us.
972 < 0 for non-recursive mutexes */
973 SCM waiting; /* the threads waiting for this mutex. */
974 } fat_mutex;
975
976 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
977 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
978
979 static SCM
fat_mutex_mark(SCM mx)980 fat_mutex_mark (SCM mx)
981 {
982 fat_mutex *m = SCM_MUTEX_DATA (mx);
983 scm_gc_mark (m->owner);
984 return m->waiting;
985 }
986
987 static size_t
fat_mutex_free(SCM mx)988 fat_mutex_free (SCM mx)
989 {
990 fat_mutex *m = SCM_MUTEX_DATA (mx);
991 scm_i_pthread_mutex_destroy (&m->lock);
992 scm_gc_free (m, sizeof (fat_mutex), "mutex");
993 return 0;
994 }
995
996 static int
fat_mutex_print(SCM mx,SCM port,scm_print_state * pstate SCM_UNUSED)997 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
998 {
999 fat_mutex *m = SCM_MUTEX_DATA (mx);
1000 scm_puts ("#<mutex ", port);
1001 scm_uintprint ((scm_t_bits)m, 16, port);
1002 scm_puts (">", port);
1003 return 1;
1004 }
1005
1006 static SCM
make_fat_mutex(int recursive)1007 make_fat_mutex (int recursive)
1008 {
1009 fat_mutex *m;
1010 SCM mx;
1011
1012 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1013 scm_i_pthread_mutex_init (&m->lock, NULL);
1014 m->owner = SCM_BOOL_F;
1015 m->level = recursive? 0 : -1;
1016 m->waiting = SCM_EOL;
1017 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1018 m->waiting = make_queue ();
1019 return mx;
1020 }
1021
1022 SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
1023 (void),
1024 "Create a new mutex. ")
1025 #define FUNC_NAME s_scm_make_mutex
1026 {
1027 return make_fat_mutex (0);
1028 }
1029 #undef FUNC_NAME
1030
1031 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1032 (void),
1033 "Create a new recursive mutex. ")
1034 #define FUNC_NAME s_scm_make_recursive_mutex
1035 {
1036 return make_fat_mutex (1);
1037 }
1038 #undef FUNC_NAME
1039
1040 static char *
fat_mutex_lock(SCM mutex)1041 fat_mutex_lock (SCM mutex)
1042 {
1043 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1044 SCM thread = scm_current_thread ();
1045 char *msg = NULL;
1046
1047 scm_i_scm_pthread_mutex_lock (&m->lock);
1048 if (scm_is_false (m->owner))
1049 m->owner = thread;
1050 else if (scm_is_eq (m->owner, thread))
1051 {
1052 if (m->level >= 0)
1053 m->level++;
1054 else
1055 msg = "mutex already locked by current thread";
1056 }
1057 else
1058 {
1059 while (1)
1060 {
1061 if (scm_is_eq (m->owner, thread))
1062 break;
1063 block_self (m->waiting, mutex, &m->lock, NULL);
1064 scm_i_pthread_mutex_unlock (&m->lock);
1065 SCM_TICK;
1066 scm_i_scm_pthread_mutex_lock (&m->lock);
1067 }
1068 }
1069 scm_i_pthread_mutex_unlock (&m->lock);
1070 return msg;
1071 }
1072
1073 SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
1074 (SCM mx),
1075 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1076 "blocks until the mutex becomes available. The function returns when "
1077 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1078 "a thread already owns will succeed right away and will not block the "
1079 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1080 #define FUNC_NAME s_scm_lock_mutex
1081 {
1082 char *msg;
1083
1084 SCM_VALIDATE_MUTEX (1, mx);
1085 msg = fat_mutex_lock (mx);
1086 if (msg)
1087 scm_misc_error (NULL, msg, SCM_EOL);
1088 return SCM_BOOL_T;
1089 }
1090 #undef FUNC_NAME
1091
1092 void
scm_dynwind_lock_mutex(SCM mutex)1093 scm_dynwind_lock_mutex (SCM mutex)
1094 {
1095 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1096 SCM_F_WIND_EXPLICITLY);
1097 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1098 SCM_F_WIND_EXPLICITLY);
1099 }
1100
1101 static char *
fat_mutex_trylock(fat_mutex * m,int * resp)1102 fat_mutex_trylock (fat_mutex *m, int *resp)
1103 {
1104 char *msg = NULL;
1105 SCM thread = scm_current_thread ();
1106
1107 *resp = 1;
1108 scm_i_pthread_mutex_lock (&m->lock);
1109 if (scm_is_false (m->owner))
1110 m->owner = thread;
1111 else if (scm_is_eq (m->owner, thread))
1112 {
1113 if (m->level >= 0)
1114 m->level++;
1115 else
1116 msg = "mutex already locked by current thread";
1117 }
1118 else
1119 *resp = 0;
1120 scm_i_pthread_mutex_unlock (&m->lock);
1121 return msg;
1122 }
1123
1124 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1125 (SCM mutex),
1126 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1127 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1128 #define FUNC_NAME s_scm_try_mutex
1129 {
1130 char *msg;
1131 int res;
1132
1133 SCM_VALIDATE_MUTEX (1, mutex);
1134
1135 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
1136 if (msg)
1137 scm_misc_error (NULL, msg, SCM_EOL);
1138 return scm_from_bool (res);
1139 }
1140 #undef FUNC_NAME
1141
1142 static char *
fat_mutex_unlock(fat_mutex * m)1143 fat_mutex_unlock (fat_mutex *m)
1144 {
1145 char *msg = NULL;
1146
1147 scm_i_scm_pthread_mutex_lock (&m->lock);
1148 if (!scm_is_eq (m->owner, scm_current_thread ()))
1149 {
1150 if (scm_is_false (m->owner))
1151 msg = "mutex not locked";
1152 else
1153 msg = "mutex not locked by current thread";
1154 }
1155 else if (m->level > 0)
1156 m->level--;
1157 else
1158 m->owner = unblock_from_queue (m->waiting);
1159 scm_i_pthread_mutex_unlock (&m->lock);
1160
1161 return msg;
1162 }
1163
1164 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1165 (SCM mx),
1166 "Unlocks @var{mutex} if the calling thread owns the lock on "
1167 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1168 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1169 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1170 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1171 "with a call to @code{unlock-mutex}. Only the last call to "
1172 "@code{unlock-mutex} will actually unlock the mutex. ")
1173 #define FUNC_NAME s_scm_unlock_mutex
1174 {
1175 char *msg;
1176 SCM_VALIDATE_MUTEX (1, mx);
1177
1178 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1179 if (msg)
1180 scm_misc_error (NULL, msg, SCM_EOL);
1181 return SCM_BOOL_T;
1182 }
1183 #undef FUNC_NAME
1184
1185 #if 0
1186
1187 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1188 (SCM mx),
1189 "Return the thread owning @var{mx}, or @code{#f}.")
1190 #define FUNC_NAME s_scm_mutex_owner
1191 {
1192 SCM_VALIDATE_MUTEX (1, mx);
1193 return (SCM_MUTEX_DATA(mx))->owner;
1194 }
1195 #undef FUNC_NAME
1196
1197 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1198 (SCM mx),
1199 "Return the lock level of a recursive mutex, or -1\n"
1200 "for a standard mutex.")
1201 #define FUNC_NAME s_scm_mutex_level
1202 {
1203 SCM_VALIDATE_MUTEX (1, mx);
1204 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1205 }
1206 #undef FUNC_NAME
1207
1208 #endif
1209
1210 /*** Fat condition variables */
1211
1212 typedef struct {
1213 scm_i_pthread_mutex_t lock;
1214 SCM waiting; /* the threads waiting for this condition. */
1215 } fat_cond;
1216
1217 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1218 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1219
1220 static SCM
fat_cond_mark(SCM cv)1221 fat_cond_mark (SCM cv)
1222 {
1223 fat_cond *c = SCM_CONDVAR_DATA (cv);
1224 return c->waiting;
1225 }
1226
1227 static size_t
fat_cond_free(SCM mx)1228 fat_cond_free (SCM mx)
1229 {
1230 fat_cond *c = SCM_CONDVAR_DATA (mx);
1231 scm_i_pthread_mutex_destroy (&c->lock);
1232 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1233 return 0;
1234 }
1235
1236 static int
fat_cond_print(SCM cv,SCM port,scm_print_state * pstate SCM_UNUSED)1237 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1238 {
1239 fat_cond *c = SCM_CONDVAR_DATA (cv);
1240 scm_puts ("#<condition-variable ", port);
1241 scm_uintprint ((scm_t_bits)c, 16, port);
1242 scm_puts (">", port);
1243 return 1;
1244 }
1245
1246 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1247 (void),
1248 "Make a new condition variable.")
1249 #define FUNC_NAME s_scm_make_condition_variable
1250 {
1251 fat_cond *c;
1252 SCM cv;
1253
1254 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1255 scm_i_pthread_mutex_init (&c->lock, 0);
1256 c->waiting = SCM_EOL;
1257 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1258 c->waiting = make_queue ();
1259 return cv;
1260 }
1261 #undef FUNC_NAME
1262
1263 static int
fat_cond_timedwait(SCM cond,SCM mutex,const scm_t_timespec * waittime)1264 fat_cond_timedwait (SCM cond, SCM mutex,
1265 const scm_t_timespec *waittime)
1266 {
1267 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1268 fat_cond *c = SCM_CONDVAR_DATA (cond);
1269 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1270 const char *msg;
1271 int err = 0;
1272
1273 while (1)
1274 {
1275 scm_i_scm_pthread_mutex_lock (&c->lock);
1276 msg = fat_mutex_unlock (m);
1277 t->block_asyncs++;
1278 if (msg == NULL)
1279 {
1280 err = block_self (c->waiting, cond, &c->lock, waittime);
1281 scm_i_pthread_mutex_unlock (&c->lock);
1282 fat_mutex_lock (mutex);
1283 }
1284 else
1285 scm_i_pthread_mutex_unlock (&c->lock);
1286 t->block_asyncs--;
1287 scm_async_click ();
1288
1289 if (msg)
1290 scm_misc_error (NULL, msg, SCM_EOL);
1291
1292 scm_remember_upto_here_2 (cond, mutex);
1293
1294 if (err == 0)
1295 return 1;
1296 if (err == ETIMEDOUT)
1297 return 0;
1298 if (err != EINTR)
1299 {
1300 errno = err;
1301 scm_syserror (NULL);
1302 }
1303 }
1304 }
1305
1306 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1307 (SCM cv, SCM mx, SCM t),
1308 "Wait until @var{cond-var} has been signalled. While waiting, "
1309 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1310 "is locked again when this function returns. When @var{time} is given, "
1311 "it specifies a point in time where the waiting should be aborted. It "
1312 "can be either a integer as returned by @code{current-time} or a pair "
1313 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1314 "mutex is locked and @code{#f} is returned. When the condition "
1315 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1316 "is returned. ")
1317 #define FUNC_NAME s_scm_timed_wait_condition_variable
1318 {
1319 scm_t_timespec waittime, *waitptr = NULL;
1320
1321 SCM_VALIDATE_CONDVAR (1, cv);
1322 SCM_VALIDATE_MUTEX (2, mx);
1323
1324 if (!SCM_UNBNDP (t))
1325 {
1326 if (scm_is_pair (t))
1327 {
1328 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1329 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
1330 }
1331 else
1332 {
1333 waittime.tv_sec = scm_to_ulong (t);
1334 waittime.tv_nsec = 0;
1335 }
1336 waitptr = &waittime;
1337 }
1338
1339 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
1340 }
1341 #undef FUNC_NAME
1342
1343 static void
fat_cond_signal(fat_cond * c)1344 fat_cond_signal (fat_cond *c)
1345 {
1346 scm_i_scm_pthread_mutex_lock (&c->lock);
1347 unblock_from_queue (c->waiting);
1348 scm_i_pthread_mutex_unlock (&c->lock);
1349 }
1350
1351 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1352 (SCM cv),
1353 "Wake up one thread that is waiting for @var{cv}")
1354 #define FUNC_NAME s_scm_signal_condition_variable
1355 {
1356 SCM_VALIDATE_CONDVAR (1, cv);
1357 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1358 return SCM_BOOL_T;
1359 }
1360 #undef FUNC_NAME
1361
1362 static void
fat_cond_broadcast(fat_cond * c)1363 fat_cond_broadcast (fat_cond *c)
1364 {
1365 scm_i_scm_pthread_mutex_lock (&c->lock);
1366 while (scm_is_true (unblock_from_queue (c->waiting)))
1367 ;
1368 scm_i_pthread_mutex_unlock (&c->lock);
1369 }
1370
1371 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1372 (SCM cv),
1373 "Wake up all threads that are waiting for @var{cv}. ")
1374 #define FUNC_NAME s_scm_broadcast_condition_variable
1375 {
1376 SCM_VALIDATE_CONDVAR (1, cv);
1377 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1378 return SCM_BOOL_T;
1379 }
1380 #undef FUNC_NAME
1381
1382 /*** Marking stacks */
1383
1384 /* XXX - what to do with this? Do we need to handle this for blocked
1385 threads as well?
1386 */
1387 #ifdef __ia64__
1388 # define SCM_MARK_BACKING_STORE() do { \
1389 ucontext_t ctx; \
1390 SCM_STACKITEM * top, * bot; \
1391 getcontext (&ctx); \
1392 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1393 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1394 / sizeof (SCM_STACKITEM))); \
1395 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1396 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1397 scm_mark_locations (bot, top - bot); } while (0)
1398 #else
1399 # define SCM_MARK_BACKING_STORE()
1400 #endif
1401
1402 void
scm_threads_mark_stacks(void)1403 scm_threads_mark_stacks (void)
1404 {
1405 scm_i_thread *t;
1406 for (t = all_threads; t; t = t->next_thread)
1407 {
1408 /* Check that thread has indeed been suspended.
1409 */
1410 assert (t->top);
1411
1412 scm_gc_mark (t->handle);
1413
1414 #if SCM_STACK_GROWS_UP
1415 scm_mark_locations (t->base, t->top - t->base);
1416 #else
1417 scm_mark_locations (t->top, t->base - t->top);
1418 #endif
1419 scm_mark_locations ((void *) &t->regs,
1420 ((size_t) sizeof(t->regs)
1421 / sizeof (SCM_STACKITEM)));
1422 }
1423
1424 SCM_MARK_BACKING_STORE ();
1425 }
1426
1427 /*** Select */
1428
1429 int
scm_std_select(int nfds,SELECT_TYPE * readfds,SELECT_TYPE * writefds,SELECT_TYPE * exceptfds,struct timeval * timeout)1430 scm_std_select (int nfds,
1431 SELECT_TYPE *readfds,
1432 SELECT_TYPE *writefds,
1433 SELECT_TYPE *exceptfds,
1434 struct timeval *timeout)
1435 {
1436 fd_set my_readfds;
1437 int res, eno, wakeup_fd;
1438 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1439 scm_t_guile_ticket ticket;
1440
1441 if (readfds == NULL)
1442 {
1443 FD_ZERO (&my_readfds);
1444 readfds = &my_readfds;
1445 }
1446
1447 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1448 SCM_TICK;
1449
1450 wakeup_fd = t->sleep_pipe[0];
1451 ticket = scm_leave_guile ();
1452 FD_SET (wakeup_fd, readfds);
1453 if (wakeup_fd >= nfds)
1454 nfds = wakeup_fd+1;
1455 res = select (nfds, readfds, writefds, exceptfds, timeout);
1456 t->sleep_fd = -1;
1457 eno = errno;
1458 scm_enter_guile (ticket);
1459
1460 scm_i_reset_sleep (t);
1461
1462 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1463 {
1464 char dummy;
1465 size_t count;
1466
1467 count = read (wakeup_fd, &dummy, 1);
1468
1469 FD_CLR (wakeup_fd, readfds);
1470 res -= 1;
1471 if (res == 0)
1472 {
1473 eno = EINTR;
1474 res = -1;
1475 }
1476 }
1477 errno = eno;
1478 return res;
1479 }
1480
1481 /* Convenience API for blocking while in guile mode. */
1482
1483 #if SCM_USE_PTHREAD_THREADS
1484
1485 int
scm_pthread_mutex_lock(scm_i_pthread_mutex_t * mutex)1486 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1487 {
1488 scm_t_guile_ticket t = scm_leave_guile ();
1489 int res = scm_i_pthread_mutex_lock (mutex);
1490 scm_enter_guile (t);
1491 return res;
1492 }
1493
1494 static void
do_unlock(void * data)1495 do_unlock (void *data)
1496 {
1497 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1498 }
1499
1500 void
scm_dynwind_pthread_mutex_lock(scm_i_pthread_mutex_t * mutex)1501 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1502 {
1503 scm_i_scm_pthread_mutex_lock (mutex);
1504 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1505 }
1506
1507 int
scm_pthread_cond_wait(scm_i_pthread_cond_t * cond,scm_i_pthread_mutex_t * mutex)1508 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1509 {
1510 scm_t_guile_ticket t = scm_leave_guile ();
1511 int res = scm_i_pthread_cond_wait (cond, mutex);
1512 scm_enter_guile (t);
1513 return res;
1514 }
1515
1516 int
scm_pthread_cond_timedwait(scm_i_pthread_cond_t * cond,scm_i_pthread_mutex_t * mutex,const scm_t_timespec * wt)1517 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1518 scm_i_pthread_mutex_t *mutex,
1519 const scm_t_timespec *wt)
1520 {
1521 scm_t_guile_ticket t = scm_leave_guile ();
1522 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1523 scm_enter_guile (t);
1524 return res;
1525 }
1526
1527 #endif
1528
1529 unsigned long
scm_std_usleep(unsigned long usecs)1530 scm_std_usleep (unsigned long usecs)
1531 {
1532 struct timeval tv;
1533 tv.tv_usec = usecs % 1000000;
1534 tv.tv_sec = usecs / 1000000;
1535 scm_std_select (0, NULL, NULL, NULL, &tv);
1536 return tv.tv_sec * 1000000 + tv.tv_usec;
1537 }
1538
1539 unsigned int
scm_std_sleep(unsigned int secs)1540 scm_std_sleep (unsigned int secs)
1541 {
1542 struct timeval tv;
1543 tv.tv_usec = 0;
1544 tv.tv_sec = secs;
1545 scm_std_select (0, NULL, NULL, NULL, &tv);
1546 return tv.tv_sec;
1547 }
1548
1549 /*** Misc */
1550
1551 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1552 (void),
1553 "Return the thread that called this function.")
1554 #define FUNC_NAME s_scm_current_thread
1555 {
1556 return SCM_I_CURRENT_THREAD->handle;
1557 }
1558 #undef FUNC_NAME
1559
1560 static SCM
scm_c_make_list(size_t n,SCM fill)1561 scm_c_make_list (size_t n, SCM fill)
1562 {
1563 SCM res = SCM_EOL;
1564 while (n-- > 0)
1565 res = scm_cons (fill, res);
1566 return res;
1567 }
1568
1569 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1570 (void),
1571 "Return a list of all threads.")
1572 #define FUNC_NAME s_scm_all_threads
1573 {
1574 /* We can not allocate while holding the thread_admin_mutex because
1575 of the way GC is done.
1576 */
1577 int n = thread_count;
1578 scm_i_thread *t;
1579 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1580
1581 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1582 l = &list;
1583 for (t = all_threads; t && n > 0; t = t->next_thread)
1584 {
1585 SCM_SETCAR (*l, t->handle);
1586 l = SCM_CDRLOC (*l);
1587 n--;
1588 }
1589 *l = SCM_EOL;
1590 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1591 return list;
1592 }
1593 #undef FUNC_NAME
1594
1595 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1596 (SCM thread),
1597 "Return @code{#t} iff @var{thread} has exited.\n")
1598 #define FUNC_NAME s_scm_thread_exited_p
1599 {
1600 return scm_from_bool (scm_c_thread_exited_p (thread));
1601 }
1602 #undef FUNC_NAME
1603
1604 int
scm_c_thread_exited_p(SCM thread)1605 scm_c_thread_exited_p (SCM thread)
1606 #define FUNC_NAME s_scm_thread_exited_p
1607 {
1608 scm_i_thread *t;
1609 SCM_VALIDATE_THREAD (1, thread);
1610 t = SCM_I_THREAD_DATA (thread);
1611 return t->exited;
1612 }
1613 #undef FUNC_NAME
1614
1615 static scm_i_pthread_cond_t wake_up_cond;
1616 int scm_i_thread_go_to_sleep;
1617 static int threads_initialized_p = 0;
1618
1619 void
scm_i_thread_put_to_sleep()1620 scm_i_thread_put_to_sleep ()
1621 {
1622 if (threads_initialized_p)
1623 {
1624 scm_i_thread *t;
1625
1626 scm_leave_guile ();
1627 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1628
1629 /* Signal all threads to go to sleep
1630 */
1631 scm_i_thread_go_to_sleep = 1;
1632 for (t = all_threads; t; t = t->next_thread)
1633 scm_i_pthread_mutex_lock (&t->heap_mutex);
1634 scm_i_thread_go_to_sleep = 0;
1635 }
1636 }
1637
1638 void
scm_i_thread_invalidate_freelists()1639 scm_i_thread_invalidate_freelists ()
1640 {
1641 /* thread_admin_mutex is already locked. */
1642
1643 scm_i_thread *t;
1644 for (t = all_threads; t; t = t->next_thread)
1645 if (t != SCM_I_CURRENT_THREAD)
1646 t->clear_freelists_p = 1;
1647 }
1648
1649 void
scm_i_thread_wake_up()1650 scm_i_thread_wake_up ()
1651 {
1652 if (threads_initialized_p)
1653 {
1654 scm_i_thread *t;
1655
1656 scm_i_pthread_cond_broadcast (&wake_up_cond);
1657 for (t = all_threads; t; t = t->next_thread)
1658 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1659 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1660 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1661 }
1662 }
1663
1664 void
scm_i_thread_sleep_for_gc()1665 scm_i_thread_sleep_for_gc ()
1666 {
1667 scm_i_thread *t = suspend ();
1668 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1669 resume (t);
1670 }
1671
1672 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1673 */
1674 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1675
1676 static SCM dynwind_critical_section_mutex;
1677
1678 void
scm_dynwind_critical_section(SCM mutex)1679 scm_dynwind_critical_section (SCM mutex)
1680 {
1681 if (scm_is_false (mutex))
1682 mutex = dynwind_critical_section_mutex;
1683 scm_dynwind_lock_mutex (mutex);
1684 scm_dynwind_block_asyncs ();
1685 }
1686
1687 /*** Initialization */
1688
1689 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1690 #ifdef __MINGW32__
1691 scm_i_pthread_key_t *scm_i_freelist_ptr = &scm_i_freelist;
1692 scm_i_pthread_key_t *scm_i_freelist2_ptr = &scm_i_freelist2;
1693 #endif
1694 scm_i_pthread_mutex_t scm_i_misc_mutex;
1695
1696 #if SCM_USE_PTHREAD_THREADS
1697 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1698 #endif
1699
1700 void
scm_threads_prehistory(SCM_STACKITEM * base)1701 scm_threads_prehistory (SCM_STACKITEM *base)
1702 {
1703 #if SCM_USE_PTHREAD_THREADS
1704 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1705 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1706 PTHREAD_MUTEX_RECURSIVE);
1707 #endif
1708
1709 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1710 scm_i_pthread_mutexattr_recursive);
1711 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1712 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1713 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1714 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1715
1716 guilify_self_1 (base);
1717 }
1718
1719 scm_t_bits scm_tc16_thread;
1720 scm_t_bits scm_tc16_mutex;
1721 scm_t_bits scm_tc16_condvar;
1722
1723 void
scm_init_threads()1724 scm_init_threads ()
1725 {
1726 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1727 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1728 scm_set_smob_print (scm_tc16_thread, thread_print);
1729 scm_set_smob_free (scm_tc16_thread, thread_free);
1730
1731 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1732 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1733 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1734 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1735
1736 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1737 sizeof (fat_cond));
1738 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1739 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1740 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1741
1742 scm_i_default_dynamic_state = SCM_BOOL_F;
1743 guilify_self_2 (SCM_BOOL_F);
1744 threads_initialized_p = 1;
1745
1746 dynwind_critical_section_mutex =
1747 scm_permanent_object (scm_make_recursive_mutex ());
1748 }
1749
1750 void
scm_init_threads_default_dynamic_state()1751 scm_init_threads_default_dynamic_state ()
1752 {
1753 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1754 scm_i_default_dynamic_state = scm_permanent_object (state);
1755 }
1756
1757 void
scm_init_thread_procs()1758 scm_init_thread_procs ()
1759 {
1760 #include "libguile/threads.x"
1761 }
1762
1763 /*
1764 Local Variables:
1765 c-file-style: "gnu"
1766 End:
1767 */
1768