1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013,
3 * 2014 Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
30 #include "libguile/deprecation.h"
31
32 #include <stdlib.h>
33 #include <unistd.h>
34 #include <stdio.h>
35
36 #ifdef HAVE_STRING_H
37 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
38 #endif
39
40 #if HAVE_SYS_TIME_H
41 #include <sys/time.h>
42 #endif
43
44 #if HAVE_PTHREAD_NP_H
45 # include <pthread_np.h>
46 #endif
47
48 #include <sys/select.h>
49
50 #include <assert.h>
51 #include <fcntl.h>
52 #include <nproc.h>
53
54 #include "libguile/validate.h"
55 #include "libguile/eval.h"
56 #include "libguile/async.h"
57 #include "libguile/ports.h"
58 #include "libguile/threads.h"
59 #include "libguile/dynwind.h"
60 #include "libguile/iselect.h"
61 #include "libguile/fluids.h"
62 #include "libguile/continuations.h"
63 #include "libguile/gc.h"
64 #include "libguile/gc-inline.h"
65 #include "libguile/init.h"
66 #include "libguile/scmsigs.h"
67 #include "libguile/strings.h"
68 #include "libguile/vm.h"
69
70 #include <full-read.h>
71
72
73
74
75 /* The GC "kind" for threads that allow them to mark their VM
76 stacks. */
77 static int thread_gc_kind;
78
79 static struct GC_ms_entry *
thread_mark(GC_word * addr,struct GC_ms_entry * mark_stack_ptr,struct GC_ms_entry * mark_stack_limit,GC_word env)80 thread_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
81 struct GC_ms_entry *mark_stack_limit, GC_word env)
82 {
83 int word;
84 const struct scm_i_thread *t = (struct scm_i_thread *) addr;
85
86 if (SCM_UNPACK (t->handle) == 0)
87 /* T must be on the free-list; ignore. (See warning in
88 gc_mark.h.) */
89 return mark_stack_ptr;
90
91 /* Mark T. We could be more precise, but it doesn't matter. */
92 for (word = 0; word * sizeof (*addr) < sizeof (*t); word++)
93 mark_stack_ptr = GC_MARK_AND_PUSH ((void *) addr[word],
94 mark_stack_ptr, mark_stack_limit,
95 NULL);
96
97 /* The pointerless freelists are threaded through their first word,
98 but GC doesn't know to trace them (as they are pointerless), so we
99 need to do that here. See the comments at the top of libgc's
100 gc_inline.h. */
101 if (t->pointerless_freelists)
102 {
103 size_t n;
104 for (n = 0; n < SCM_INLINE_GC_FREELIST_COUNT; n++)
105 {
106 void *chain = t->pointerless_freelists[n];
107 if (chain)
108 {
109 /* The first link is already marked by the freelist vector,
110 so we just have to mark the tail. */
111 while ((chain = *(void **)chain))
112 mark_stack_ptr = GC_mark_and_push (chain, mark_stack_ptr,
113 mark_stack_limit, NULL);
114 }
115 }
116 }
117
118 if (t->vp)
119 mark_stack_ptr = scm_i_vm_mark_stack (t->vp, mark_stack_ptr,
120 mark_stack_limit);
121
122 return mark_stack_ptr;
123 }
124
125
126
127 static void
to_timespec(SCM t,scm_t_timespec * waittime)128 to_timespec (SCM t, scm_t_timespec *waittime)
129 {
130 if (scm_is_pair (t))
131 {
132 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
133 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
134 }
135 else
136 {
137 double time = scm_to_double (t);
138 double sec = scm_c_truncate (time);
139
140 waittime->tv_sec = (long) sec;
141 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
142 }
143 }
144
145
146
147 /*** Queues */
148
149 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
150 the risk of false references leading to unbounded retained space as
151 described in "Bounding Space Usage of Conservative Garbage Collectors",
152 H.J. Boehm, 2001. */
153
154 /* Make an empty queue data structure.
155 */
156 static SCM
make_queue()157 make_queue ()
158 {
159 return scm_cons (SCM_EOL, SCM_EOL);
160 }
161
162 static scm_i_pthread_mutex_t queue_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
163
164 /* Put T at the back of Q and return a handle that can be used with
165 remqueue to remove T from Q again.
166 */
167 static SCM
enqueue(SCM q,SCM t)168 enqueue (SCM q, SCM t)
169 {
170 SCM c = scm_cons (t, SCM_EOL);
171 scm_i_pthread_mutex_lock (&queue_lock);
172 if (scm_is_null (SCM_CDR (q)))
173 SCM_SETCDR (q, c);
174 else
175 SCM_SETCDR (SCM_CAR (q), c);
176 SCM_SETCAR (q, c);
177 scm_i_pthread_mutex_unlock (&queue_lock);
178 return c;
179 }
180
181 /* Remove the element that the handle C refers to from the queue Q. C
182 must have been returned from a call to enqueue. The return value
183 is zero when the element referred to by C has already been removed.
184 Otherwise, 1 is returned.
185 */
186 static int
remqueue(SCM q,SCM c)187 remqueue (SCM q, SCM c)
188 {
189 SCM p, prev = q;
190 scm_i_pthread_mutex_lock (&queue_lock);
191 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
192 {
193 if (scm_is_eq (p, c))
194 {
195 if (scm_is_eq (c, SCM_CAR (q)))
196 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
197 SCM_SETCDR (prev, SCM_CDR (c));
198
199 /* GC-robust */
200 SCM_SETCDR (c, SCM_EOL);
201
202 scm_i_pthread_mutex_unlock (&queue_lock);
203 return 1;
204 }
205 prev = p;
206 }
207 scm_i_pthread_mutex_unlock (&queue_lock);
208 return 0;
209 }
210
211 /* Remove the front-most element from the queue Q and return it.
212 Return SCM_BOOL_F when Q is empty.
213 */
214 static SCM
dequeue(SCM q)215 dequeue (SCM q)
216 {
217 SCM c;
218 scm_i_pthread_mutex_lock (&queue_lock);
219 c = SCM_CDR (q);
220 if (scm_is_null (c))
221 {
222 scm_i_pthread_mutex_unlock (&queue_lock);
223 return SCM_BOOL_F;
224 }
225 else
226 {
227 SCM_SETCDR (q, SCM_CDR (c));
228 if (scm_is_null (SCM_CDR (q)))
229 SCM_SETCAR (q, SCM_EOL);
230 scm_i_pthread_mutex_unlock (&queue_lock);
231
232 /* GC-robust */
233 SCM_SETCDR (c, SCM_EOL);
234
235 return SCM_CAR (c);
236 }
237 }
238
239 /*** Thread smob routines */
240
241
242 static int
thread_print(SCM exp,SCM port,scm_print_state * pstate SCM_UNUSED)243 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
244 {
245 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
246 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
247 the struct case, hence we go via a union, and extract according to the
248 size of pthread_t. */
249 union {
250 scm_i_pthread_t p;
251 unsigned short us;
252 unsigned int ui;
253 unsigned long ul;
254 scm_t_uintmax um;
255 } u;
256 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
257 scm_i_pthread_t p = t->pthread;
258 scm_t_uintmax id;
259 u.p = p;
260 if (sizeof (p) == sizeof (unsigned short))
261 id = u.us;
262 else if (sizeof (p) == sizeof (unsigned int))
263 id = u.ui;
264 else if (sizeof (p) == sizeof (unsigned long))
265 id = u.ul;
266 else
267 id = u.um;
268
269 scm_puts ("#<thread ", port);
270 scm_uintprint (id, 10, port);
271 scm_puts (" (", port);
272 scm_uintprint ((scm_t_bits)t, 16, port);
273 scm_puts (")>", port);
274 return 1;
275 }
276
277
278 /*** Blocking on queues. */
279
280 /* See also scm_system_async_mark_for_thread for how such a block is
281 interrputed.
282 */
283
284 /* Put the current thread on QUEUE and go to sleep, waiting for it to
285 be woken up by a call to 'unblock_from_queue', or to be
286 interrupted. Upon return of this function, the current thread is
287 no longer on QUEUE, even when the sleep has been interrupted.
288
289 The caller of block_self must hold MUTEX. It will be atomically
290 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
291
292 When WAITTIME is not NULL, the sleep will be aborted at that time.
293
294 The return value of block_self is an errno value. It will be zero
295 when the sleep has been successfully completed by a call to
296 unblock_from_queue, EINTR when it has been interrupted by the
297 delivery of a system async, and ETIMEDOUT when the timeout has
298 expired.
299
300 The system asyncs themselves are not executed by block_self.
301 */
302 static int
block_self(SCM queue,scm_i_pthread_mutex_t * mutex,const scm_t_timespec * waittime)303 block_self (SCM queue, scm_i_pthread_mutex_t *mutex,
304 const scm_t_timespec *waittime)
305 {
306 scm_i_thread *t = SCM_I_CURRENT_THREAD;
307 SCM q_handle;
308 int err;
309
310 if (scm_i_prepare_to_wait_on_cond (t, mutex, &t->sleep_cond))
311 return EINTR;
312
313 t->block_asyncs++;
314 q_handle = enqueue (queue, t->handle);
315 if (waittime == NULL)
316 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
317 else
318 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
319
320 /* When we are still on QUEUE, we have been interrupted. We
321 report this only when no other error (such as a timeout) has
322 happened above.
323 */
324 if (remqueue (queue, q_handle) && err == 0)
325 err = EINTR;
326 t->block_asyncs--;
327 scm_i_wait_finished (t);
328
329 return err;
330 }
331
332 /* Wake up the first thread on QUEUE, if any. The awoken thread is
333 returned, or #f if the queue was empty.
334 */
335 static SCM
unblock_from_queue(SCM queue)336 unblock_from_queue (SCM queue)
337 {
338 SCM thread = dequeue (queue);
339 if (scm_is_true (thread))
340 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
341 return thread;
342 }
343
344
345 /* Getting into and out of guile mode.
346 */
347
348 /* Key used to attach a cleanup handler to a given thread. Also, if
349 thread-local storage is unavailable, this key is used to retrieve the
350 current thread with `pthread_getspecific ()'. */
351 scm_i_pthread_key_t scm_i_thread_key;
352
353
354 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
355
356 /* When thread-local storage (TLS) is available, a pointer to the
357 current-thread object is kept in TLS. Note that storing the thread-object
358 itself in TLS (rather than a pointer to some malloc'd memory) is not
359 possible since thread objects may live longer than the actual thread they
360 represent. */
361 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
362
363 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
364
365
366 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
367 static scm_i_thread *all_threads = NULL;
368 static int thread_count;
369
370 static SCM default_dynamic_state;
371
372 /* Perform first stage of thread initialisation, in non-guile mode.
373 */
374 static void
guilify_self_1(struct GC_stack_base * base,int needs_unregister)375 guilify_self_1 (struct GC_stack_base *base, int needs_unregister)
376 {
377 scm_i_thread t;
378
379 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
380 before allocating anything in this thread, because allocation could
381 cause GC to run, and GC could cause finalizers, which could invoke
382 Scheme functions, which need the current thread to be set. */
383
384 t.pthread = scm_i_pthread_self ();
385 t.handle = SCM_BOOL_F;
386 t.result = SCM_BOOL_F;
387 t.freelists = NULL;
388 t.pointerless_freelists = NULL;
389 t.dynamic_state = NULL;
390 t.dynstack.base = NULL;
391 t.dynstack.top = NULL;
392 t.dynstack.limit = NULL;
393 t.pending_asyncs = SCM_EOL;
394 t.block_asyncs = 1;
395 t.base = base->mem_base;
396 #ifdef __ia64__
397 t.register_backing_store_base = base->reg_base;
398 t.pending_rbs_continuation = 0;
399 #endif
400 t.continuation_root = SCM_EOL;
401 t.continuation_base = t.base;
402 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
403 t.wake = NULL;
404 t.vp = NULL;
405
406 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
407 /* FIXME: Error conditions during the initialization phase are handled
408 gracelessly since public functions such as `scm_init_guile ()'
409 currently have type `void'. */
410 abort ();
411
412 t.exited = 0;
413 t.guile_mode = 0;
414 t.needs_unregister = needs_unregister;
415
416 /* The switcheroo. */
417 {
418 scm_i_thread *t_ptr = &t;
419
420 GC_disable ();
421 t_ptr = GC_generic_malloc (sizeof (*t_ptr), thread_gc_kind);
422 memcpy (t_ptr, &t, sizeof t);
423
424 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
425
426 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
427 /* Cache the current thread in TLS for faster lookup. */
428 scm_i_current_thread = t_ptr;
429 #endif
430
431 scm_i_pthread_mutex_lock (&thread_admin_mutex);
432 t_ptr->next_thread = all_threads;
433 all_threads = t_ptr;
434 thread_count++;
435 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
436
437 GC_enable ();
438 }
439 }
440
441 /* Perform second stage of thread initialisation, in guile mode.
442 */
443 static void
guilify_self_2(SCM dynamic_state)444 guilify_self_2 (SCM dynamic_state)
445 {
446 scm_i_thread *t = SCM_I_CURRENT_THREAD;
447
448 t->guile_mode = 1;
449
450 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
451
452 t->continuation_root = scm_cons (t->handle, SCM_EOL);
453 t->continuation_base = t->base;
454
455 {
456 size_t size = SCM_INLINE_GC_FREELIST_COUNT * sizeof (void *);
457 t->freelists = scm_gc_malloc (size, "freelists");
458 t->pointerless_freelists = scm_gc_malloc (size, "atomic freelists");
459 }
460
461 t->dynamic_state = scm_gc_typed_calloc (scm_t_dynamic_state);
462 t->dynamic_state->thread_local_values = scm_c_make_hash_table (0);
463 scm_set_current_dynamic_state (dynamic_state);
464
465 t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
466 t->dynstack.limit = t->dynstack.base + 16;
467 t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
468
469 t->block_asyncs = 0;
470
471 /* See note in finalizers.c:queue_finalizer_async(). */
472 GC_invoke_finalizers ();
473 }
474
475
476
477
478 static void
on_thread_exit(void * v)479 on_thread_exit (void *v)
480 {
481 /* This handler is executed in non-guile mode. Note that although
482 libgc isn't guaranteed to see thread-locals, for this thread-local
483 that isn't an issue as we have the all_threads list. */
484 scm_i_thread *t = (scm_i_thread *) v, **tp;
485
486 t->exited = 1;
487
488 close (t->sleep_pipe[0]);
489 close (t->sleep_pipe[1]);
490 t->sleep_pipe[0] = t->sleep_pipe[1] = -1;
491
492 scm_i_pthread_mutex_lock (&thread_admin_mutex);
493 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
494 if (*tp == t)
495 {
496 *tp = t->next_thread;
497
498 /* GC-robust */
499 t->next_thread = NULL;
500
501 break;
502 }
503 thread_count--;
504
505 /* If there's only one other thread, it could be the signal delivery
506 thread, so we need to notify it to shut down by closing its read pipe.
507 If it's not the signal delivery thread, then closing the read pipe isn't
508 going to hurt. */
509 if (thread_count <= 1)
510 scm_i_close_signal_pipe ();
511
512 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
513
514 /* Although this thread has exited, the thread object might still be
515 alive. Release unused memory. */
516 t->freelists = NULL;
517 t->pointerless_freelists = NULL;
518 t->dynamic_state = NULL;
519 t->dynstack.base = NULL;
520 t->dynstack.top = NULL;
521 t->dynstack.limit = NULL;
522 {
523 struct scm_vm *vp = t->vp;
524 t->vp = NULL;
525 if (vp)
526 scm_i_vm_free_stack (vp);
527 }
528
529 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
530 scm_i_current_thread = NULL;
531 #endif
532
533 #if SCM_USE_PTHREAD_THREADS
534 if (t->needs_unregister)
535 GC_unregister_my_thread ();
536 #endif
537 }
538
539 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
540
541 static void
init_thread_key(void)542 init_thread_key (void)
543 {
544 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
545 }
546
547 /* Perform any initializations necessary to make the current thread
548 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
549 if necessary.
550
551 BASE is the stack base to use with GC.
552
553 DYNAMIC_STATE is the set of fluid values to start with.
554
555 Returns zero when the thread was known to guile already; otherwise
556 return 1.
557
558 Note that it could be the case that the thread was known
559 to Guile, but not in guile mode (because we are within a
560 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
561 be sure. New threads are put into guile mode implicitly. */
562
563 static int
scm_i_init_thread_for_guile(struct GC_stack_base * base,SCM dynamic_state)564 scm_i_init_thread_for_guile (struct GC_stack_base *base,
565 SCM dynamic_state)
566 {
567 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
568
569 if (SCM_I_CURRENT_THREAD)
570 {
571 /* Thread is already known to Guile.
572 */
573 return 0;
574 }
575 else
576 {
577 /* This thread has not been guilified yet.
578 */
579
580 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
581 if (scm_initialized_p == 0)
582 {
583 /* First thread ever to enter Guile. Run the full
584 initialization.
585 */
586 scm_i_init_guile (base);
587
588 #if SCM_USE_PTHREAD_THREADS
589 /* Allow other threads to come in later. */
590 GC_allow_register_threads ();
591 #endif
592
593 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
594 }
595 else
596 {
597 int needs_unregister = 0;
598
599 /* Guile is already initialized, but this thread enters it for
600 the first time. Only initialize this thread.
601 */
602 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
603
604 /* Register this thread with libgc. */
605 #if SCM_USE_PTHREAD_THREADS
606 if (GC_register_my_thread (base) == GC_SUCCESS)
607 needs_unregister = 1;
608 #endif
609
610 guilify_self_1 (base, needs_unregister);
611 guilify_self_2 (dynamic_state);
612 }
613 return 1;
614 }
615 }
616
617 void
scm_init_guile()618 scm_init_guile ()
619 {
620 struct GC_stack_base stack_base;
621
622 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
623 scm_i_init_thread_for_guile (&stack_base, default_dynamic_state);
624 else
625 {
626 fprintf (stderr, "Failed to get stack base for current thread.\n");
627 exit (EXIT_FAILURE);
628 }
629 }
630
631 struct with_guile_args
632 {
633 GC_fn_type func;
634 void *data;
635 SCM dynamic_state;
636 };
637
638 static void *
with_guile_trampoline(void * data)639 with_guile_trampoline (void *data)
640 {
641 struct with_guile_args *args = data;
642
643 return scm_c_with_continuation_barrier (args->func, args->data);
644 }
645
646 static void *
with_guile(struct GC_stack_base * base,void * data)647 with_guile (struct GC_stack_base *base, void *data)
648 {
649 void *res;
650 int new_thread;
651 scm_i_thread *t;
652 struct with_guile_args *args = data;
653
654 new_thread = scm_i_init_thread_for_guile (base, args->dynamic_state);
655 t = SCM_I_CURRENT_THREAD;
656 if (new_thread)
657 {
658 /* We are in Guile mode. */
659 assert (t->guile_mode);
660
661 res = scm_c_with_continuation_barrier (args->func, args->data);
662
663 /* Leave Guile mode. */
664 t->guile_mode = 0;
665 }
666 else if (t->guile_mode)
667 {
668 /* Already in Guile mode. */
669 res = scm_c_with_continuation_barrier (args->func, args->data);
670 }
671 else
672 {
673 /* We are not in Guile mode, either because we are not within a
674 scm_with_guile, or because we are within a scm_without_guile.
675
676 This call to scm_with_guile() could happen from anywhere on the
677 stack, and in particular lower on the stack than when it was
678 when this thread was first guilified. Thus, `base' must be
679 updated. */
680 #if SCM_STACK_GROWS_UP
681 if (SCM_STACK_PTR (base->mem_base) < t->base)
682 t->base = SCM_STACK_PTR (base->mem_base);
683 #else
684 if (SCM_STACK_PTR (base->mem_base) > t->base)
685 t->base = SCM_STACK_PTR (base->mem_base);
686 #endif
687
688 t->guile_mode = 1;
689 res = GC_call_with_gc_active (with_guile_trampoline, args);
690 t->guile_mode = 0;
691 }
692 return res;
693 }
694
695 static void *
scm_i_with_guile(void * (* func)(void *),void * data,SCM dynamic_state)696 scm_i_with_guile (void *(*func)(void *), void *data, SCM dynamic_state)
697 {
698 struct with_guile_args args;
699
700 args.func = func;
701 args.data = data;
702 args.dynamic_state = dynamic_state;
703
704 return GC_call_with_stack_base (with_guile, &args);
705 }
706
707 void *
scm_with_guile(void * (* func)(void *),void * data)708 scm_with_guile (void *(*func)(void *), void *data)
709 {
710 return scm_i_with_guile (func, data, default_dynamic_state);
711 }
712
713 void *
scm_without_guile(void * (* func)(void *),void * data)714 scm_without_guile (void *(*func)(void *), void *data)
715 {
716 void *result;
717 scm_i_thread *t = SCM_I_CURRENT_THREAD;
718
719 if (t->guile_mode)
720 {
721 SCM_I_CURRENT_THREAD->guile_mode = 0;
722 result = GC_do_blocking (func, data);
723 SCM_I_CURRENT_THREAD->guile_mode = 1;
724 }
725 else
726 /* Otherwise we're not in guile mode, so nothing to do. */
727 result = func (data);
728
729 return result;
730 }
731
732
733 /*** Thread creation */
734
735 /* Because (ice-9 boot-9) loads up (ice-9 threads), we know that this
736 variable will get loaded before a call to scm_call_with_new_thread
737 and therefore no lock or pthread_once_t is needed. */
738 static SCM call_with_new_thread_var;
739
740 SCM
scm_call_with_new_thread(SCM thunk,SCM handler)741 scm_call_with_new_thread (SCM thunk, SCM handler)
742 {
743 SCM call_with_new_thread = scm_variable_ref (call_with_new_thread_var);
744 if (SCM_UNBNDP (handler))
745 return scm_call_1 (call_with_new_thread, thunk);
746 return scm_call_2 (call_with_new_thread, thunk, handler);
747 }
748
749 typedef struct launch_data launch_data;
750
751 struct launch_data {
752 launch_data *prev;
753 launch_data *next;
754 SCM dynamic_state;
755 SCM thunk;
756 };
757
758 /* GC-protect the launch data for new threads. */
759 static launch_data *protected_launch_data;
760 static scm_i_pthread_mutex_t protected_launch_data_lock =
761 SCM_I_PTHREAD_MUTEX_INITIALIZER;
762
763 static void
protect_launch_data(launch_data * data)764 protect_launch_data (launch_data *data)
765 {
766 scm_i_pthread_mutex_lock (&protected_launch_data_lock);
767 data->next = protected_launch_data;
768 if (protected_launch_data)
769 protected_launch_data->prev = data;
770 protected_launch_data = data;
771 scm_i_pthread_mutex_unlock (&protected_launch_data_lock);
772 }
773
774 static void
unprotect_launch_data(launch_data * data)775 unprotect_launch_data (launch_data *data)
776 {
777 scm_i_pthread_mutex_lock (&protected_launch_data_lock);
778 if (data->next)
779 data->next->prev = data->prev;
780 if (data->prev)
781 data->prev->next = data->next;
782 else
783 protected_launch_data = data->next;
784 scm_i_pthread_mutex_unlock (&protected_launch_data_lock);
785 }
786
787 static void *
really_launch(void * d)788 really_launch (void *d)
789 {
790 scm_i_thread *t = SCM_I_CURRENT_THREAD;
791 unprotect_launch_data (d);
792 /* The thread starts with asyncs blocked. */
793 t->block_asyncs++;
794 SCM_I_CURRENT_THREAD->result = scm_call_0 (((launch_data *)d)->thunk);
795 return 0;
796 }
797
798 static void *
launch_thread(void * d)799 launch_thread (void *d)
800 {
801 launch_data *data = (launch_data *)d;
802 scm_i_pthread_detach (scm_i_pthread_self ());
803 scm_i_with_guile (really_launch, d, data->dynamic_state);
804 return NULL;
805 }
806
807 SCM_INTERNAL SCM scm_sys_call_with_new_thread (SCM);
808 SCM_DEFINE (scm_sys_call_with_new_thread, "%call-with-new-thread", 1, 0, 0,
809 (SCM thunk), "")
810 #define FUNC_NAME s_scm_sys_call_with_new_thread
811 {
812 launch_data *data;
813 scm_i_pthread_t id;
814 int err;
815
816 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
817
818 GC_collect_a_little ();
819 data = scm_gc_typed_calloc (launch_data);
820 data->dynamic_state = scm_current_dynamic_state ();
821 data->thunk = thunk;
822 protect_launch_data (data);
823 err = scm_i_pthread_create (&id, NULL, launch_thread, data);
824 if (err)
825 {
826 errno = err;
827 scm_syserror (NULL);
828 }
829
830 return SCM_UNSPECIFIED;
831 }
832 #undef FUNC_NAME
833
834 SCM
scm_spawn_thread(scm_t_catch_body body,void * body_data,scm_t_catch_handler handler,void * handler_data)835 scm_spawn_thread (scm_t_catch_body body, void *body_data,
836 scm_t_catch_handler handler, void *handler_data)
837 {
838 SCM body_closure, handler_closure;
839
840 body_closure = scm_i_make_catch_body_closure (body, body_data);
841 handler_closure = handler == NULL ? SCM_UNDEFINED :
842 scm_i_make_catch_handler_closure (handler, handler_data);
843
844 return scm_call_with_new_thread (body_closure, handler_closure);
845 }
846
847 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
848 (),
849 "Move the calling thread to the end of the scheduling queue.")
850 #define FUNC_NAME s_scm_yield
851 {
852 return scm_from_bool (scm_i_sched_yield ());
853 }
854 #undef FUNC_NAME
855
856 static SCM cancel_thread_var;
857
858 SCM
scm_cancel_thread(SCM thread)859 scm_cancel_thread (SCM thread)
860 {
861 scm_call_1 (scm_variable_ref (cancel_thread_var), thread);
862 return SCM_UNSPECIFIED;
863 }
864
865 static SCM join_thread_var;
866
867 SCM
scm_join_thread(SCM thread)868 scm_join_thread (SCM thread)
869 {
870 return scm_call_1 (scm_variable_ref (join_thread_var), thread);
871 }
872
873 SCM
scm_join_thread_timed(SCM thread,SCM timeout,SCM timeoutval)874 scm_join_thread_timed (SCM thread, SCM timeout, SCM timeoutval)
875 {
876 SCM join_thread = scm_variable_ref (join_thread_var);
877
878 if (SCM_UNBNDP (timeout))
879 return scm_call_1 (join_thread, thread);
880 else if (SCM_UNBNDP (timeoutval))
881 return scm_call_2 (join_thread, thread, timeout);
882 else
883 return scm_call_3 (join_thread, thread, timeout, timeoutval);
884 }
885
886 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
887 (SCM obj),
888 "Return @code{#t} if @var{obj} is a thread.")
889 #define FUNC_NAME s_scm_thread_p
890 {
891 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
892 }
893 #undef FUNC_NAME
894
895
896
897
898 /* We implement our own mutex type since we want them to be 'fair', we
899 want to do fancy things while waiting for them (like running
900 asyncs) and we might want to add things that are nice for
901 debugging.
902 */
903
904 enum scm_mutex_kind {
905 /* A standard mutex can only be locked once. If you try to lock it
906 again from the thread that locked it to begin with (the "owner"
907 thread), it throws an error. It can only be unlocked from the
908 thread that locked it in the first place. */
909 SCM_MUTEX_STANDARD,
910 /* A recursive mutex can be locked multiple times by its owner. It
911 then has to be unlocked the corresponding number of times, and like
912 standard mutexes can only be unlocked by the owner thread. */
913 SCM_MUTEX_RECURSIVE,
914 /* An unowned mutex is like a standard mutex, except that it can be
915 unlocked by any thread. A corrolary of this behavior is that a
916 thread's attempt to lock a mutex that it already owns will block
917 instead of signalling an error, as it could be that some other
918 thread unlocks the mutex, allowing the owner thread to proceed.
919 This kind of mutex is a bit strange and is here for use by
920 SRFI-18. */
921 SCM_MUTEX_UNOWNED
922 };
923
924 struct scm_mutex {
925 scm_i_pthread_mutex_t lock;
926 /* The thread that owns this mutex, or #f if the mutex is unlocked. */
927 SCM owner;
928 /* Queue of threads waiting for this mutex. */
929 SCM waiting;
930 /* For SCM_MUTEX_RECURSIVE (and only SCM_MUTEX_RECURSIVE), the
931 recursive lock count. The first lock does not count. */
932 int level;
933 };
934
935 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
936 #define SCM_MUTEX_DATA(x) ((struct scm_mutex *) SCM_SMOB_DATA (x))
937 #define SCM_MUTEX_KIND(x) ((enum scm_mutex_kind) (SCM_SMOB_FLAGS (x) & 0x3))
938
939 static int
scm_mutex_print(SCM mx,SCM port,scm_print_state * pstate SCM_UNUSED)940 scm_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
941 {
942 struct scm_mutex *m = SCM_MUTEX_DATA (mx);
943 scm_puts ("#<mutex ", port);
944 scm_uintprint ((scm_t_bits)m, 16, port);
945 scm_puts (">", port);
946 return 1;
947 }
948
949 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
950 SCM_SYMBOL (recursive_sym, "recursive");
951
952 SCM_DEFINE (scm_make_mutex_with_kind, "make-mutex", 0, 1, 0,
953 (SCM kind),
954 "Create a new mutex. If @var{kind} is not given, the mutex\n"
955 "will be a standard non-recursive mutex. Otherwise pass\n"
956 "@code{recursive} to make a recursive mutex, or\n"
957 "@code{allow-external-unlock} to make a non-recursive mutex\n"
958 "that can be unlocked from any thread.")
959 #define FUNC_NAME s_scm_make_mutex_with_kind
960 {
961 enum scm_mutex_kind mkind = SCM_MUTEX_STANDARD;
962 struct scm_mutex *m;
963 scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
964
965 if (!SCM_UNBNDP (kind))
966 {
967 if (scm_is_eq (kind, allow_external_unlock_sym))
968 mkind = SCM_MUTEX_UNOWNED;
969 else if (scm_is_eq (kind, recursive_sym))
970 mkind = SCM_MUTEX_RECURSIVE;
971 else
972 SCM_MISC_ERROR ("unsupported mutex kind: ~a", scm_list_1 (kind));
973 }
974
975 m = scm_gc_malloc (sizeof (struct scm_mutex), "mutex");
976 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
977 and so we can just copy it. */
978 memcpy (&m->lock, &lock, sizeof (m->lock));
979 m->owner = SCM_BOOL_F;
980 m->level = 0;
981 m->waiting = make_queue ();
982
983 return scm_new_smob (scm_tc16_mutex | (mkind << 16), (scm_t_bits) m);
984 }
985 #undef FUNC_NAME
986
987 SCM
scm_make_mutex(void)988 scm_make_mutex (void)
989 {
990 return scm_make_mutex_with_kind (SCM_UNDEFINED);
991 }
992
993 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
994 (void),
995 "Create a new recursive mutex. ")
996 #define FUNC_NAME s_scm_make_recursive_mutex
997 {
998 return scm_make_mutex_with_kind (recursive_sym);
999 }
1000 #undef FUNC_NAME
1001
1002 SCM
scm_lock_mutex(SCM mx)1003 scm_lock_mutex (SCM mx)
1004 {
1005 return scm_timed_lock_mutex (mx, SCM_UNDEFINED);
1006 }
1007
1008 static inline SCM
lock_mutex(enum scm_mutex_kind kind,struct scm_mutex * m,scm_i_thread * current_thread,scm_t_timespec * waittime)1009 lock_mutex (enum scm_mutex_kind kind, struct scm_mutex *m,
1010 scm_i_thread *current_thread, scm_t_timespec *waittime)
1011 #define FUNC_NAME "lock-mutex"
1012 {
1013 scm_i_scm_pthread_mutex_lock (&m->lock);
1014
1015 if (scm_is_eq (m->owner, SCM_BOOL_F))
1016 {
1017 m->owner = current_thread->handle;
1018 scm_i_pthread_mutex_unlock (&m->lock);
1019 return SCM_BOOL_T;
1020 }
1021 else if (kind == SCM_MUTEX_RECURSIVE &&
1022 scm_is_eq (m->owner, current_thread->handle))
1023 {
1024 m->level++;
1025 scm_i_pthread_mutex_unlock (&m->lock);
1026 return SCM_BOOL_T;
1027 }
1028 else if (kind == SCM_MUTEX_STANDARD &&
1029 scm_is_eq (m->owner, current_thread->handle))
1030 {
1031 scm_i_pthread_mutex_unlock (&m->lock);
1032 SCM_MISC_ERROR ("mutex already locked by thread", SCM_EOL);
1033 }
1034 else
1035 while (1)
1036 {
1037 int err = block_self (m->waiting, &m->lock, waittime);
1038
1039 if (err == 0)
1040 {
1041 if (scm_is_eq (m->owner, SCM_BOOL_F))
1042 {
1043 m->owner = current_thread->handle;
1044 scm_i_pthread_mutex_unlock (&m->lock);
1045 return SCM_BOOL_T;
1046 }
1047 else
1048 continue;
1049 }
1050 else if (err == ETIMEDOUT)
1051 {
1052 scm_i_pthread_mutex_unlock (&m->lock);
1053 return SCM_BOOL_F;
1054 }
1055 else if (err == EINTR)
1056 {
1057 scm_i_pthread_mutex_unlock (&m->lock);
1058 scm_async_tick ();
1059 scm_i_scm_pthread_mutex_lock (&m->lock);
1060 continue;
1061 }
1062 else
1063 {
1064 /* Shouldn't happen. */
1065 scm_i_pthread_mutex_unlock (&m->lock);
1066 errno = err;
1067 SCM_SYSERROR;
1068 }
1069 }
1070 }
1071 #undef FUNC_NAME
1072
1073 SCM_DEFINE (scm_timed_lock_mutex, "lock-mutex", 1, 1, 0,
1074 (SCM mutex, SCM timeout),
1075 "Lock mutex @var{mutex}. If the mutex is already locked, "
1076 "the calling thread blocks until the mutex becomes available.")
1077 #define FUNC_NAME s_scm_timed_lock_mutex
1078 {
1079 scm_t_timespec cwaittime, *waittime = NULL;
1080 struct scm_mutex *m;
1081 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1082 SCM ret;
1083
1084 SCM_VALIDATE_MUTEX (1, mutex);
1085 m = SCM_MUTEX_DATA (mutex);
1086
1087 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1088 {
1089 to_timespec (timeout, &cwaittime);
1090 waittime = &cwaittime;
1091 }
1092
1093 /* Specialized lock_mutex implementations according to the mutex
1094 kind. */
1095 switch (SCM_MUTEX_KIND (mutex))
1096 {
1097 case SCM_MUTEX_STANDARD:
1098 ret = lock_mutex (SCM_MUTEX_STANDARD, m, t, waittime);
1099 break;
1100 case SCM_MUTEX_RECURSIVE:
1101 ret = lock_mutex (SCM_MUTEX_RECURSIVE, m, t, waittime);
1102 break;
1103 case SCM_MUTEX_UNOWNED:
1104 ret = lock_mutex (SCM_MUTEX_UNOWNED, m, t, waittime);
1105 break;
1106 default:
1107 abort ();
1108 }
1109
1110 scm_remember_upto_here_1 (mutex);
1111
1112 return ret;
1113 }
1114 #undef FUNC_NAME
1115
1116 static void
lock_mutex_return_void(SCM mx)1117 lock_mutex_return_void (SCM mx)
1118 {
1119 (void) scm_lock_mutex (mx);
1120 }
1121
1122 static void
unlock_mutex_return_void(SCM mx)1123 unlock_mutex_return_void (SCM mx)
1124 {
1125 (void) scm_unlock_mutex (mx);
1126 }
1127
1128 void
scm_dynwind_lock_mutex(SCM mutex)1129 scm_dynwind_lock_mutex (SCM mutex)
1130 {
1131 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1132 SCM_F_WIND_EXPLICITLY);
1133 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1134 SCM_F_WIND_EXPLICITLY);
1135 }
1136
1137 SCM
scm_try_mutex(SCM mutex)1138 scm_try_mutex (SCM mutex)
1139 {
1140 return scm_timed_lock_mutex (mutex, SCM_INUM0);
1141 }
1142
1143 /* This function is static inline so that the compiler can specialize it
1144 against the mutex kind. */
1145 static inline void
unlock_mutex(enum scm_mutex_kind kind,struct scm_mutex * m,scm_i_thread * current_thread)1146 unlock_mutex (enum scm_mutex_kind kind, struct scm_mutex *m,
1147 scm_i_thread *current_thread)
1148 #define FUNC_NAME "unlock-mutex"
1149 {
1150 scm_i_scm_pthread_mutex_lock (&m->lock);
1151
1152 if (!scm_is_eq (m->owner, current_thread->handle))
1153 {
1154 if (scm_is_eq (m->owner, SCM_BOOL_F))
1155 {
1156 scm_i_pthread_mutex_unlock (&m->lock);
1157 SCM_MISC_ERROR ("mutex not locked", SCM_EOL);
1158 }
1159
1160 if (kind != SCM_MUTEX_UNOWNED)
1161 {
1162 scm_i_pthread_mutex_unlock (&m->lock);
1163 SCM_MISC_ERROR ("mutex not locked by current thread", SCM_EOL);
1164 }
1165 }
1166
1167 if (kind == SCM_MUTEX_RECURSIVE && m->level > 0)
1168 m->level--;
1169 else
1170 {
1171 m->owner = SCM_BOOL_F;
1172 /* Wake up one waiter. */
1173 unblock_from_queue (m->waiting);
1174 }
1175
1176 scm_i_pthread_mutex_unlock (&m->lock);
1177 }
1178 #undef FUNC_NAME
1179
1180 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0, (SCM mutex),
1181 "Unlocks @var{mutex}. The calling thread must already hold\n"
1182 "the lock on @var{mutex}, unless the mutex was created with\n"
1183 "the @code{allow-external-unlock} option; otherwise an error\n"
1184 "will be signalled.")
1185 #define FUNC_NAME s_scm_unlock_mutex
1186 {
1187 struct scm_mutex *m;
1188 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1189
1190 SCM_VALIDATE_MUTEX (1, mutex);
1191
1192 m = SCM_MUTEX_DATA (mutex);
1193
1194 /* Specialized unlock_mutex implementations according to the mutex
1195 kind. */
1196 switch (SCM_MUTEX_KIND (mutex))
1197 {
1198 case SCM_MUTEX_STANDARD:
1199 unlock_mutex (SCM_MUTEX_STANDARD, m, t);
1200 break;
1201 case SCM_MUTEX_RECURSIVE:
1202 unlock_mutex (SCM_MUTEX_RECURSIVE, m, t);
1203 break;
1204 case SCM_MUTEX_UNOWNED:
1205 unlock_mutex (SCM_MUTEX_UNOWNED, m, t);
1206 break;
1207 default:
1208 abort ();
1209 }
1210
1211 scm_remember_upto_here_1 (mutex);
1212
1213 return SCM_BOOL_T;
1214 }
1215 #undef FUNC_NAME
1216
1217 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1218 (SCM obj),
1219 "Return @code{#t} if @var{obj} is a mutex.")
1220 #define FUNC_NAME s_scm_mutex_p
1221 {
1222 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1223 }
1224 #undef FUNC_NAME
1225
1226 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1227 (SCM mx),
1228 "Return the thread owning @var{mx}, or @code{#f}.")
1229 #define FUNC_NAME s_scm_mutex_owner
1230 {
1231 SCM owner;
1232 struct scm_mutex *m = NULL;
1233
1234 SCM_VALIDATE_MUTEX (1, mx);
1235 m = SCM_MUTEX_DATA (mx);
1236 scm_i_pthread_mutex_lock (&m->lock);
1237 owner = m->owner;
1238 scm_i_pthread_mutex_unlock (&m->lock);
1239
1240 return owner;
1241 }
1242 #undef FUNC_NAME
1243
1244 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1245 (SCM mx),
1246 "Return the lock level of mutex @var{mx}.")
1247 #define FUNC_NAME s_scm_mutex_level
1248 {
1249 SCM_VALIDATE_MUTEX (1, mx);
1250 if (SCM_MUTEX_KIND (mx) == SCM_MUTEX_RECURSIVE)
1251 return scm_from_int (SCM_MUTEX_DATA (mx)->level + 1);
1252 else if (scm_is_eq (SCM_MUTEX_DATA (mx)->owner, SCM_BOOL_F))
1253 return SCM_INUM0;
1254 else
1255 return SCM_INUM1;
1256 }
1257 #undef FUNC_NAME
1258
1259 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1260 (SCM mx),
1261 "Returns @code{#t} if the mutex @var{mx} is locked.")
1262 #define FUNC_NAME s_scm_mutex_locked_p
1263 {
1264 SCM_VALIDATE_MUTEX (1, mx);
1265 if (scm_is_eq (SCM_MUTEX_DATA (mx)->owner, SCM_BOOL_F))
1266 return SCM_BOOL_F;
1267 else
1268 return SCM_BOOL_T;
1269 }
1270 #undef FUNC_NAME
1271
1272
1273
1274
1275 struct scm_cond {
1276 scm_i_pthread_mutex_t lock;
1277 SCM waiting; /* the threads waiting for this condition. */
1278 };
1279
1280 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1281 #define SCM_CONDVAR_DATA(x) ((struct scm_cond *) SCM_SMOB_DATA (x))
1282
1283 static int
scm_cond_print(SCM cv,SCM port,scm_print_state * pstate SCM_UNUSED)1284 scm_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1285 {
1286 struct scm_cond *c = SCM_CONDVAR_DATA (cv);
1287 scm_puts ("#<condition-variable ", port);
1288 scm_uintprint ((scm_t_bits)c, 16, port);
1289 scm_puts (">", port);
1290 return 1;
1291 }
1292
1293 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1294 (void),
1295 "Make a new condition variable.")
1296 #define FUNC_NAME s_scm_make_condition_variable
1297 {
1298 struct scm_cond *c;
1299 SCM cv;
1300
1301 c = scm_gc_malloc (sizeof (struct scm_cond), "condition variable");
1302 c->waiting = SCM_EOL;
1303 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1304 c->waiting = make_queue ();
1305 return cv;
1306 }
1307 #undef FUNC_NAME
1308
1309 static inline SCM
timed_wait(enum scm_mutex_kind kind,struct scm_mutex * m,struct scm_cond * c,scm_i_thread * current_thread,scm_t_timespec * waittime)1310 timed_wait (enum scm_mutex_kind kind, struct scm_mutex *m, struct scm_cond *c,
1311 scm_i_thread *current_thread, scm_t_timespec *waittime)
1312 #define FUNC_NAME "wait-condition-variable"
1313 {
1314 scm_i_scm_pthread_mutex_lock (&m->lock);
1315
1316 if (!scm_is_eq (m->owner, current_thread->handle))
1317 {
1318 if (scm_is_eq (m->owner, SCM_BOOL_F))
1319 {
1320 scm_i_pthread_mutex_unlock (&m->lock);
1321 SCM_MISC_ERROR ("mutex not locked", SCM_EOL);
1322 }
1323
1324 if (kind != SCM_MUTEX_UNOWNED)
1325 {
1326 scm_i_pthread_mutex_unlock (&m->lock);
1327 SCM_MISC_ERROR ("mutex not locked by current thread", SCM_EOL);
1328 }
1329 }
1330
1331 while (1)
1332 {
1333 int err = 0;
1334
1335 /* Unlock the mutex. */
1336 if (kind == SCM_MUTEX_RECURSIVE && m->level > 0)
1337 m->level--;
1338 else
1339 {
1340 m->owner = SCM_BOOL_F;
1341 /* Wake up one waiter. */
1342 unblock_from_queue (m->waiting);
1343 }
1344
1345 /* Wait for someone to signal the cond, a timeout, or an
1346 interrupt. */
1347 err = block_self (c->waiting, &m->lock, waittime);
1348
1349 /* We woke up for some reason. Reacquire the mutex before doing
1350 anything else.
1351
1352 FIXME: We disable interrupts while reacquiring the mutex. If
1353 we allow interrupts here, there's the risk of a nonlocal exit
1354 before we reaquire the mutex, which would be visible to user
1355 code.
1356
1357 For example the unwind handler in
1358
1359 (with-mutex m (wait-condition-variable c m))
1360
1361 that tries to unlock M could see M in an already-unlocked
1362 state, if an interrupt while waiting on C caused the wait to
1363 abort and the woke thread lost the race to reacquire M. That's
1364 not great. Maybe it's necessary but for now we just disable
1365 interrupts while reaquiring a mutex after a wait. */
1366 current_thread->block_asyncs++;
1367 if (kind == SCM_MUTEX_RECURSIVE &&
1368 scm_is_eq (m->owner, current_thread->handle))
1369 {
1370 m->level++;
1371 scm_i_pthread_mutex_unlock (&m->lock);
1372 }
1373 else
1374 while (1)
1375 {
1376 if (scm_is_eq (m->owner, SCM_BOOL_F))
1377 {
1378 m->owner = current_thread->handle;
1379 scm_i_pthread_mutex_unlock (&m->lock);
1380 break;
1381 }
1382 block_self (m->waiting, &m->lock, waittime);
1383 }
1384 current_thread->block_asyncs--;
1385
1386 /* Now that we have the mutex again, handle the return value. */
1387 if (err == 0)
1388 return SCM_BOOL_T;
1389 else if (err == ETIMEDOUT)
1390 return SCM_BOOL_F;
1391 else if (err == EINTR)
1392 /* Let caller run scm_async_tick() and loop. */
1393 return SCM_BOOL_T;
1394 else
1395 {
1396 /* Shouldn't happen. */
1397 errno = err;
1398 SCM_SYSERROR;
1399 }
1400 }
1401 }
1402 #undef FUNC_NAME
1403
1404 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1405 (SCM cond, SCM mutex, SCM timeout),
1406 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1407 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1408 "is locked again when this function returns. When @var{t} is given, "
1409 "it specifies a point in time where the waiting should be aborted. It "
1410 "can be either a integer as returned by @code{current-time} or a pair "
1411 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1412 "mutex is locked and @code{#f} is returned. When the condition "
1413 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1414 "is returned. ")
1415 #define FUNC_NAME s_scm_timed_wait_condition_variable
1416 {
1417 scm_t_timespec waittime_val, *waittime = NULL;
1418 struct scm_cond *c;
1419 struct scm_mutex *m;
1420 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1421 SCM ret;
1422
1423 SCM_VALIDATE_CONDVAR (1, cond);
1424 SCM_VALIDATE_MUTEX (2, mutex);
1425
1426 c = SCM_CONDVAR_DATA (cond);
1427 m = SCM_MUTEX_DATA (mutex);
1428
1429 if (!SCM_UNBNDP (timeout))
1430 {
1431 to_timespec (timeout, &waittime_val);
1432 waittime = &waittime_val;
1433 }
1434
1435 /* Specialized timed_wait implementations according to the mutex
1436 kind. */
1437 switch (SCM_MUTEX_KIND (mutex))
1438 {
1439 case SCM_MUTEX_STANDARD:
1440 ret = timed_wait (SCM_MUTEX_STANDARD, m, c, t, waittime);
1441 break;
1442 case SCM_MUTEX_RECURSIVE:
1443 ret = timed_wait (SCM_MUTEX_RECURSIVE, m, c, t, waittime);
1444 break;
1445 case SCM_MUTEX_UNOWNED:
1446 ret = timed_wait (SCM_MUTEX_UNOWNED, m, c, t, waittime);
1447 break;
1448 default:
1449 abort ();
1450 }
1451
1452 scm_remember_upto_here_2 (mutex, cond);
1453
1454 return ret;
1455 }
1456 #undef FUNC_NAME
1457
1458 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1459 (SCM cv),
1460 "Wake up one thread that is waiting for @var{cv}")
1461 #define FUNC_NAME s_scm_signal_condition_variable
1462 {
1463 struct scm_cond *c;
1464 SCM_VALIDATE_CONDVAR (1, cv);
1465 c = SCM_CONDVAR_DATA (cv);
1466 unblock_from_queue (c->waiting);
1467 return SCM_BOOL_T;
1468 }
1469 #undef FUNC_NAME
1470
1471 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1472 (SCM cv),
1473 "Wake up all threads that are waiting for @var{cv}. ")
1474 #define FUNC_NAME s_scm_broadcast_condition_variable
1475 {
1476 struct scm_cond *c;
1477 SCM_VALIDATE_CONDVAR (1, cv);
1478 c = SCM_CONDVAR_DATA (cv);
1479 while (scm_is_true (unblock_from_queue (c->waiting)))
1480 ;
1481 return SCM_BOOL_T;
1482 }
1483 #undef FUNC_NAME
1484
1485 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1486 (SCM obj),
1487 "Return @code{#t} if @var{obj} is a condition variable.")
1488 #define FUNC_NAME s_scm_condition_variable_p
1489 {
1490 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1491 }
1492 #undef FUNC_NAME
1493
1494
1495
1496 /*** Select */
1497
1498 struct select_args
1499 {
1500 int nfds;
1501 fd_set *read_fds;
1502 fd_set *write_fds;
1503 fd_set *except_fds;
1504 struct timeval *timeout;
1505
1506 int result;
1507 int errno_value;
1508 };
1509
1510 static void *
do_std_select(void * args)1511 do_std_select (void *args)
1512 {
1513 struct select_args *select_args;
1514
1515 select_args = (struct select_args *) args;
1516
1517 select_args->result =
1518 select (select_args->nfds,
1519 select_args->read_fds, select_args->write_fds,
1520 select_args->except_fds, select_args->timeout);
1521 select_args->errno_value = errno;
1522
1523 return NULL;
1524 }
1525
1526 int
scm_std_select(int nfds,fd_set * readfds,fd_set * writefds,fd_set * exceptfds,struct timeval * timeout)1527 scm_std_select (int nfds,
1528 fd_set *readfds,
1529 fd_set *writefds,
1530 fd_set *exceptfds,
1531 struct timeval *timeout)
1532 {
1533 fd_set my_readfds;
1534 int res, eno, wakeup_fd;
1535 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1536 struct select_args args;
1537
1538 if (readfds == NULL)
1539 {
1540 FD_ZERO (&my_readfds);
1541 readfds = &my_readfds;
1542 }
1543
1544 if (scm_i_prepare_to_wait_on_fd (t, t->sleep_pipe[1]))
1545 {
1546 eno = EINTR;
1547 res = -1;
1548 }
1549 else
1550 {
1551 wakeup_fd = t->sleep_pipe[0];
1552 FD_SET (wakeup_fd, readfds);
1553 if (wakeup_fd >= nfds)
1554 nfds = wakeup_fd+1;
1555
1556 args.nfds = nfds;
1557 args.read_fds = readfds;
1558 args.write_fds = writefds;
1559 args.except_fds = exceptfds;
1560 args.timeout = timeout;
1561
1562 /* Explicitly cooperate with the GC. */
1563 scm_without_guile (do_std_select, &args);
1564
1565 res = args.result;
1566 eno = args.errno_value;
1567
1568 scm_i_wait_finished (t);
1569
1570 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1571 {
1572 char dummy;
1573 full_read (wakeup_fd, &dummy, 1);
1574
1575 FD_CLR (wakeup_fd, readfds);
1576 res -= 1;
1577 if (res == 0)
1578 {
1579 eno = EINTR;
1580 res = -1;
1581 }
1582 }
1583 }
1584 errno = eno;
1585 return res;
1586 }
1587
1588 /* Convenience API for blocking while in guile mode. */
1589
1590 #if SCM_USE_PTHREAD_THREADS
1591
1592 /* It seems reasonable to not run procedures related to mutex and condition
1593 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1594 without it, and (ii) the only potential gain would be GC latency. See
1595 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1596 for a discussion of the pros and cons. */
1597
1598 int
scm_pthread_mutex_lock(scm_i_pthread_mutex_t * mutex)1599 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1600 {
1601 int res = scm_i_pthread_mutex_lock (mutex);
1602 return res;
1603 }
1604
1605 static void
do_unlock(void * data)1606 do_unlock (void *data)
1607 {
1608 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1609 }
1610
1611 void
scm_dynwind_pthread_mutex_lock(scm_i_pthread_mutex_t * mutex)1612 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1613 {
1614 scm_i_scm_pthread_mutex_lock (mutex);
1615 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1616 }
1617
1618 int
scm_pthread_cond_wait(scm_i_pthread_cond_t * cond,scm_i_pthread_mutex_t * mutex)1619 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1620 {
1621 return scm_i_pthread_cond_wait (cond, mutex);
1622 }
1623
1624 int
scm_pthread_cond_timedwait(scm_i_pthread_cond_t * cond,scm_i_pthread_mutex_t * mutex,const scm_t_timespec * wt)1625 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1626 scm_i_pthread_mutex_t *mutex,
1627 const scm_t_timespec *wt)
1628 {
1629 return scm_i_pthread_cond_timedwait (cond, mutex, wt);
1630 }
1631
1632 #endif
1633
1634 static void
do_unlock_with_asyncs(void * data)1635 do_unlock_with_asyncs (void *data)
1636 {
1637 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1638 SCM_I_CURRENT_THREAD->block_asyncs--;
1639 }
1640
1641 void
scm_i_dynwind_pthread_mutex_lock_block_asyncs(scm_i_pthread_mutex_t * mutex)1642 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
1643 {
1644 SCM_I_CURRENT_THREAD->block_asyncs++;
1645 scm_i_scm_pthread_mutex_lock (mutex);
1646 scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
1647 SCM_F_WIND_EXPLICITLY);
1648 }
1649
1650 unsigned long
scm_std_usleep(unsigned long usecs)1651 scm_std_usleep (unsigned long usecs)
1652 {
1653 struct timeval tv;
1654 tv.tv_usec = usecs % 1000000;
1655 tv.tv_sec = usecs / 1000000;
1656 scm_std_select (0, NULL, NULL, NULL, &tv);
1657 return tv.tv_sec * 1000000 + tv.tv_usec;
1658 }
1659
1660 unsigned int
scm_std_sleep(unsigned int secs)1661 scm_std_sleep (unsigned int secs)
1662 {
1663 struct timeval tv;
1664 tv.tv_usec = 0;
1665 tv.tv_sec = secs;
1666 scm_std_select (0, NULL, NULL, NULL, &tv);
1667 return tv.tv_sec;
1668 }
1669
1670 /*** Misc */
1671
1672 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1673 (void),
1674 "Return the thread that called this function.")
1675 #define FUNC_NAME s_scm_current_thread
1676 {
1677 return SCM_I_CURRENT_THREAD->handle;
1678 }
1679 #undef FUNC_NAME
1680
1681 static SCM
scm_c_make_list(size_t n,SCM fill)1682 scm_c_make_list (size_t n, SCM fill)
1683 {
1684 SCM res = SCM_EOL;
1685 while (n-- > 0)
1686 res = scm_cons (fill, res);
1687 return res;
1688 }
1689
1690 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1691 (void),
1692 "Return a list of all threads.")
1693 #define FUNC_NAME s_scm_all_threads
1694 {
1695 /* We can not allocate while holding the thread_admin_mutex because
1696 of the way GC is done.
1697 */
1698 int n = thread_count;
1699 scm_i_thread *t;
1700 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1701
1702 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1703 l = &list;
1704 for (t = all_threads; t && n > 0; t = t->next_thread)
1705 {
1706 if (t != scm_i_signal_delivery_thread)
1707 {
1708 SCM_SETCAR (*l, t->handle);
1709 l = SCM_CDRLOC (*l);
1710 }
1711 n--;
1712 }
1713 *l = SCM_EOL;
1714 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1715 return list;
1716 }
1717 #undef FUNC_NAME
1718
1719 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1720 (SCM thread),
1721 "Return @code{#t} iff @var{thread} has exited.\n")
1722 #define FUNC_NAME s_scm_thread_exited_p
1723 {
1724 return scm_from_bool (scm_c_thread_exited_p (thread));
1725 }
1726 #undef FUNC_NAME
1727
1728 int
scm_c_thread_exited_p(SCM thread)1729 scm_c_thread_exited_p (SCM thread)
1730 #define FUNC_NAME s_scm_thread_exited_p
1731 {
1732 scm_i_thread *t;
1733 SCM_VALIDATE_THREAD (1, thread);
1734 t = SCM_I_THREAD_DATA (thread);
1735 return t->exited;
1736 }
1737 #undef FUNC_NAME
1738
1739 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
1740 (void),
1741 "Return the total number of processors of the machine, which\n"
1742 "is guaranteed to be at least 1. A ``processor'' here is a\n"
1743 "thread execution unit, which can be either:\n\n"
1744 "@itemize\n"
1745 "@item an execution core in a (possibly multi-core) chip, in a\n"
1746 " (possibly multi- chip) module, in a single computer, or\n"
1747 "@item a thread execution unit inside a core in the case of\n"
1748 " @dfn{hyper-threaded} CPUs.\n"
1749 "@end itemize\n\n"
1750 "Which of the two definitions is used, is unspecified.\n")
1751 #define FUNC_NAME s_scm_total_processor_count
1752 {
1753 return scm_from_ulong (num_processors (NPROC_ALL));
1754 }
1755 #undef FUNC_NAME
1756
1757 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
1758 (void),
1759 "Like @code{total-processor-count}, but return the number of\n"
1760 "processors available to the current process. See\n"
1761 "@code{setaffinity} and @code{getaffinity} for more\n"
1762 "information.\n")
1763 #define FUNC_NAME s_scm_current_processor_count
1764 {
1765 return scm_from_ulong (num_processors (NPROC_CURRENT));
1766 }
1767 #undef FUNC_NAME
1768
1769
1770
1771
1772 static scm_i_pthread_cond_t wake_up_cond;
1773 static int threads_initialized_p = 0;
1774
1775
1776 /*** Initialization */
1777
1778 scm_i_pthread_mutex_t scm_i_misc_mutex;
1779
1780 #if SCM_USE_PTHREAD_THREADS
1781 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1782 #endif
1783
1784 void
scm_threads_prehistory(void * base)1785 scm_threads_prehistory (void *base)
1786 {
1787 #if SCM_USE_PTHREAD_THREADS
1788 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1789 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1790 PTHREAD_MUTEX_RECURSIVE);
1791 #endif
1792
1793 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1794 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1795
1796 thread_gc_kind =
1797 GC_new_kind (GC_new_free_list (),
1798 GC_MAKE_PROC (GC_new_proc (thread_mark), 0),
1799 0, 1);
1800
1801 guilify_self_1 ((struct GC_stack_base *) base, 0);
1802 }
1803
1804 scm_t_bits scm_tc16_thread;
1805 scm_t_bits scm_tc16_mutex;
1806 scm_t_bits scm_tc16_condvar;
1807
1808 static void
scm_init_ice_9_threads(void * unused)1809 scm_init_ice_9_threads (void *unused)
1810 {
1811 #include "libguile/threads.x"
1812
1813 cancel_thread_var =
1814 scm_module_variable (scm_current_module (),
1815 scm_from_latin1_symbol ("cancel-thread"));
1816 join_thread_var =
1817 scm_module_variable (scm_current_module (),
1818 scm_from_latin1_symbol ("join-thread"));
1819 call_with_new_thread_var =
1820 scm_module_variable (scm_current_module (),
1821 scm_from_latin1_symbol ("call-with-new-thread"));
1822 }
1823
1824 void
scm_init_threads()1825 scm_init_threads ()
1826 {
1827 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1828 scm_set_smob_print (scm_tc16_thread, thread_print);
1829
1830 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (struct scm_mutex));
1831 scm_set_smob_print (scm_tc16_mutex, scm_mutex_print);
1832
1833 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1834 sizeof (struct scm_cond));
1835 scm_set_smob_print (scm_tc16_condvar, scm_cond_print);
1836
1837 default_dynamic_state = SCM_BOOL_F;
1838 guilify_self_2 (scm_i_make_initial_dynamic_state ());
1839 threads_initialized_p = 1;
1840
1841 scm_c_register_extension ("libguile-" SCM_EFFECTIVE_VERSION,
1842 "scm_init_ice_9_threads",
1843 scm_init_ice_9_threads, NULL);
1844 }
1845
1846 void
scm_init_threads_default_dynamic_state()1847 scm_init_threads_default_dynamic_state ()
1848 {
1849 default_dynamic_state = scm_current_dynamic_state ();
1850 }
1851
1852
1853 /* IA64-specific things. */
1854
1855 #ifdef __ia64__
1856 # ifdef __hpux
1857 # include <sys/param.h>
1858 # include <sys/pstat.h>
1859 void *
scm_ia64_register_backing_store_base(void)1860 scm_ia64_register_backing_store_base (void)
1861 {
1862 struct pst_vm_status vm_status;
1863 int i = 0;
1864 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
1865 if (vm_status.pst_type == PS_RSESTACK)
1866 return (void *) vm_status.pst_vaddr;
1867 abort ();
1868 }
1869 void *
scm_ia64_ar_bsp(const void * ctx)1870 scm_ia64_ar_bsp (const void *ctx)
1871 {
1872 uint64_t bsp;
1873 __uc_get_ar_bsp (ctx, &bsp);
1874 return (void *) bsp;
1875 }
1876 # endif /* hpux */
1877 # ifdef linux
1878 # include <ucontext.h>
1879 void *
scm_ia64_register_backing_store_base(void)1880 scm_ia64_register_backing_store_base (void)
1881 {
1882 extern void *__libc_ia64_register_backing_store_base;
1883 return __libc_ia64_register_backing_store_base;
1884 }
1885 void *
scm_ia64_ar_bsp(const void * opaque)1886 scm_ia64_ar_bsp (const void *opaque)
1887 {
1888 const ucontext_t *ctx = opaque;
1889 return (void *) ctx->uc_mcontext.sc_ar_bsp;
1890 }
1891 # endif /* linux */
1892 # if defined __FreeBSD__ || defined __DragonFly__
1893 # include <ucontext.h>
1894 void *
scm_ia64_register_backing_store_base(void)1895 scm_ia64_register_backing_store_base (void)
1896 {
1897 return (void *)0x8000000000000000;
1898 }
1899 void *
scm_ia64_ar_bsp(const void * opaque)1900 scm_ia64_ar_bsp (const void *opaque)
1901 {
1902 const ucontext_t *ctx = opaque;
1903 return (void *)(ctx->uc_mcontext.mc_special.bspstore
1904 + ctx->uc_mcontext.mc_special.ndirty);
1905 }
1906 # endif /* __FreeBSD__ */
1907 #endif /* __ia64__ */
1908
1909
1910 /*
1911 Local Variables:
1912 c-file-style: "gnu"
1913 End:
1914 */
1915