1 /**************************************************************************/
2 /* */
3 /* OCaml */
4 /* */
5 /* Xavier Leroy and Damien Doligez, INRIA Rocquencourt */
6 /* */
7 /* Copyright 1995 Institut National de Recherche en Informatique et */
8 /* en Automatique. */
9 /* */
10 /* All rights reserved. This file is distributed under the terms of */
11 /* the GNU Lesser General Public License version 2.1, with the */
12 /* special exception on linking described in the file LICENSE. */
13 /* */
14 /**************************************************************************/
15
16 #define CAML_INTERNALS
17
18 #include "caml/alloc.h"
19 #include "caml/backtrace.h"
20 #include "caml/callback.h"
21 #include "caml/custom.h"
22 #include "caml/fail.h"
23 #include "caml/io.h"
24 #include "caml/memory.h"
25 #include "caml/misc.h"
26 #include "caml/mlvalues.h"
27 #include "caml/printexc.h"
28 #include "caml/roots.h"
29 #include "caml/signals.h"
30 #ifdef NATIVE_CODE
31 #include "caml/stack.h"
32 #else
33 #include "caml/stacks.h"
34 #endif
35 #include "caml/sys.h"
36 #include "threads.h"
37
38 #if defined(NATIVE_CODE) && defined(WITH_SPACETIME)
39 #include "caml/spacetime.h"
40 #endif
41
42 /* Initial size of bytecode stack when a thread is created (4 Ko) */
43 #define Thread_stack_size (Stack_size / 4)
44
45 /* Max computation time before rescheduling, in milliseconds */
46 #define Thread_timeout 50
47
48 /* OS-specific code */
49 #ifdef _WIN32
50 #include "st_win32.h"
51 #else
52 #include "st_posix.h"
53 #endif
54
55 /* The ML value describing a thread (heap-allocated) */
56
57 struct caml_thread_descr {
58 value ident; /* Unique integer ID */
59 value start_closure; /* The closure to start this thread */
60 value terminated; /* Triggered event for thread termination */
61 };
62
63 #define Ident(v) (((struct caml_thread_descr *)(v))->ident)
64 #define Start_closure(v) (((struct caml_thread_descr *)(v))->start_closure)
65 #define Terminated(v) (((struct caml_thread_descr *)(v))->terminated)
66
67 /* The infos on threads (allocated via malloc()) */
68
69 struct caml_thread_struct {
70 value descr; /* The heap-allocated descriptor (root) */
71 struct caml_thread_struct * next; /* Double linking of running threads */
72 struct caml_thread_struct * prev;
73 #ifdef NATIVE_CODE
74 char * top_of_stack; /* Top of stack for this thread (approx.) */
75 char * bottom_of_stack; /* Saved value of caml_bottom_of_stack */
76 uintnat last_retaddr; /* Saved value of caml_last_return_address */
77 value * gc_regs; /* Saved value of caml_gc_regs */
78 char * exception_pointer; /* Saved value of caml_exception_pointer */
79 struct caml__roots_block * local_roots; /* Saved value of local_roots */
80 struct longjmp_buffer * exit_buf; /* For thread exit */
81 #if defined(NATIVE_CODE) && defined(WITH_SPACETIME)
82 value internal_spacetime_trie_root;
83 value internal_spacetime_finaliser_trie_root;
84 value* spacetime_trie_node_ptr;
85 value* spacetime_finaliser_trie_root;
86 #endif
87 #else
88 value * stack_low; /* The execution stack for this thread */
89 value * stack_high;
90 value * stack_threshold;
91 value * sp; /* Saved value of caml_extern_sp for this thread */
92 value * trapsp; /* Saved value of caml_trapsp for this thread */
93 struct caml__roots_block * local_roots; /* Saved value of caml_local_roots */
94 struct longjmp_buffer * external_raise; /* Saved caml_external_raise */
95 #endif
96 int backtrace_pos; /* Saved caml_backtrace_pos */
97 backtrace_slot * backtrace_buffer; /* Saved caml_backtrace_buffer */
98 value backtrace_last_exn; /* Saved caml_backtrace_last_exn (root) */
99 };
100
101 typedef struct caml_thread_struct * caml_thread_t;
102
103 /* The "head" of the circular list of thread descriptors */
104 static caml_thread_t all_threads = NULL;
105
106 /* The descriptor for the currently executing thread */
107 static caml_thread_t curr_thread = NULL;
108
109 /* The master lock protecting the OCaml runtime system */
110 static st_masterlock caml_master_lock;
111
112 /* Whether the "tick" thread is already running */
113 static int caml_tick_thread_running = 0;
114
115 /* The thread identifier of the "tick" thread */
116 static st_thread_id caml_tick_thread_id;
117
118 /* The key used for storing the thread descriptor in the specific data
119 of the corresponding system thread. */
120 static st_tlskey thread_descriptor_key;
121
122 /* The key used for unlocking I/O channels on exceptions */
123 static st_tlskey last_channel_locked_key;
124
125 /* Identifier for next thread creation */
126 static intnat thread_next_ident = 0;
127
128 /* Forward declarations */
129 static value caml_threadstatus_new (void);
130 static void caml_threadstatus_terminate (value);
131 static st_retcode caml_threadstatus_wait (value);
132
133 /* Imports from the native-code runtime system */
134 #ifdef NATIVE_CODE
135 extern struct longjmp_buffer caml_termination_jmpbuf;
136 extern void (*caml_termination_hook)(void);
137 #endif
138
139 /* Hook for scanning the stacks of the other threads */
140
141 static void (*prev_scan_roots_hook) (scanning_action);
142
caml_thread_scan_roots(scanning_action action)143 static void caml_thread_scan_roots(scanning_action action)
144 {
145 caml_thread_t th;
146
147 th = curr_thread;
148 do {
149 (*action)(th->descr, &th->descr);
150 (*action)(th->backtrace_last_exn, &th->backtrace_last_exn);
151 /* Don't rescan the stack of the current thread, it was done already */
152 if (th != curr_thread) {
153 #ifdef NATIVE_CODE
154 if (th->bottom_of_stack != NULL)
155 caml_do_local_roots(action, th->bottom_of_stack, th->last_retaddr,
156 th->gc_regs, th->local_roots);
157 #else
158 caml_do_local_roots(action, th->sp, th->stack_high, th->local_roots);
159 #endif
160 }
161 th = th->next;
162 } while (th != curr_thread);
163 /* Hook */
164 if (prev_scan_roots_hook != NULL) (*prev_scan_roots_hook)(action);
165 }
166
167 /* Saving and restoring runtime state in curr_thread */
168
caml_thread_save_runtime_state(void)169 static inline void caml_thread_save_runtime_state(void)
170 {
171 #ifdef NATIVE_CODE
172 curr_thread->top_of_stack = caml_top_of_stack;
173 curr_thread->bottom_of_stack = caml_bottom_of_stack;
174 curr_thread->last_retaddr = caml_last_return_address;
175 curr_thread->gc_regs = caml_gc_regs;
176 curr_thread->exception_pointer = caml_exception_pointer;
177 curr_thread->local_roots = caml_local_roots;
178 #ifdef WITH_SPACETIME
179 curr_thread->spacetime_trie_node_ptr
180 = caml_spacetime_trie_node_ptr;
181 curr_thread->spacetime_finaliser_trie_root
182 = caml_spacetime_finaliser_trie_root;
183 #endif
184 #else
185 curr_thread->stack_low = caml_stack_low;
186 curr_thread->stack_high = caml_stack_high;
187 curr_thread->stack_threshold = caml_stack_threshold;
188 curr_thread->sp = caml_extern_sp;
189 curr_thread->trapsp = caml_trapsp;
190 curr_thread->local_roots = caml_local_roots;
191 curr_thread->external_raise = caml_external_raise;
192 #endif
193 curr_thread->backtrace_pos = caml_backtrace_pos;
194 curr_thread->backtrace_buffer = caml_backtrace_buffer;
195 curr_thread->backtrace_last_exn = caml_backtrace_last_exn;
196 }
197
caml_thread_restore_runtime_state(void)198 static inline void caml_thread_restore_runtime_state(void)
199 {
200 #ifdef NATIVE_CODE
201 caml_top_of_stack = curr_thread->top_of_stack;
202 caml_bottom_of_stack= curr_thread->bottom_of_stack;
203 caml_last_return_address = curr_thread->last_retaddr;
204 caml_gc_regs = curr_thread->gc_regs;
205 caml_exception_pointer = curr_thread->exception_pointer;
206 caml_local_roots = curr_thread->local_roots;
207 #ifdef WITH_SPACETIME
208 caml_spacetime_trie_node_ptr
209 = curr_thread->spacetime_trie_node_ptr;
210 caml_spacetime_finaliser_trie_root
211 = curr_thread->spacetime_finaliser_trie_root;
212 #endif
213 #else
214 caml_stack_low = curr_thread->stack_low;
215 caml_stack_high = curr_thread->stack_high;
216 caml_stack_threshold = curr_thread->stack_threshold;
217 caml_extern_sp = curr_thread->sp;
218 caml_trapsp = curr_thread->trapsp;
219 caml_local_roots = curr_thread->local_roots;
220 caml_external_raise = curr_thread->external_raise;
221 #endif
222 caml_backtrace_pos = curr_thread->backtrace_pos;
223 caml_backtrace_buffer = curr_thread->backtrace_buffer;
224 caml_backtrace_last_exn = curr_thread->backtrace_last_exn;
225 }
226
227 /* Hooks for caml_enter_blocking_section and caml_leave_blocking_section */
228
229
caml_thread_enter_blocking_section(void)230 static void caml_thread_enter_blocking_section(void)
231 {
232 /* Save the current runtime state in the thread descriptor
233 of the current thread */
234 caml_thread_save_runtime_state();
235 /* Tell other threads that the runtime is free */
236 st_masterlock_release(&caml_master_lock);
237 }
238
caml_thread_leave_blocking_section(void)239 static void caml_thread_leave_blocking_section(void)
240 {
241 /* Wait until the runtime is free */
242 st_masterlock_acquire(&caml_master_lock);
243 /* Update curr_thread to point to the thread descriptor corresponding
244 to the thread currently executing */
245 curr_thread = st_tls_get(thread_descriptor_key);
246 /* Restore the runtime state from the curr_thread descriptor */
247 caml_thread_restore_runtime_state();
248 }
249
caml_thread_try_leave_blocking_section(void)250 static int caml_thread_try_leave_blocking_section(void)
251 {
252 /* Disable immediate processing of signals (PR#3659).
253 try_leave_blocking_section always fails, forcing the signal to be
254 recorded and processed at the next leave_blocking_section or
255 polling. */
256 return 0;
257 }
258
259 /* Hooks for I/O locking */
260
caml_io_mutex_free(struct channel * chan)261 static void caml_io_mutex_free(struct channel *chan)
262 {
263 st_mutex mutex = chan->mutex;
264 if (mutex != NULL) {
265 st_mutex_destroy(mutex);
266 chan->mutex = NULL;
267 }
268 }
269
caml_io_mutex_lock(struct channel * chan)270 static void caml_io_mutex_lock(struct channel *chan)
271 {
272 st_mutex mutex = chan->mutex;
273
274 if (mutex == NULL) {
275 st_check_error(st_mutex_create(&mutex), "channel locking"); /*PR#7038*/
276 chan->mutex = mutex;
277 }
278 /* PR#4351: first try to acquire mutex without releasing the master lock */
279 if (st_mutex_trylock(mutex) == PREVIOUSLY_UNLOCKED) {
280 st_tls_set(last_channel_locked_key, (void *) chan);
281 return;
282 }
283 /* If unsuccessful, block on mutex */
284 caml_enter_blocking_section();
285 st_mutex_lock(mutex);
286 /* Problem: if a signal occurs at this point,
287 and the signal handler raises an exception, we will not
288 unlock the mutex. The alternative (doing the setspecific
289 before locking the mutex is also incorrect, since we could
290 then unlock a mutex that is unlocked or locked by someone else. */
291 st_tls_set(last_channel_locked_key, (void *) chan);
292 caml_leave_blocking_section();
293 }
294
caml_io_mutex_unlock(struct channel * chan)295 static void caml_io_mutex_unlock(struct channel *chan)
296 {
297 st_mutex_unlock(chan->mutex);
298 st_tls_set(last_channel_locked_key, NULL);
299 }
300
caml_io_mutex_unlock_exn(void)301 static void caml_io_mutex_unlock_exn(void)
302 {
303 struct channel * chan = st_tls_get(last_channel_locked_key);
304 if (chan != NULL) caml_io_mutex_unlock(chan);
305 }
306
307 /* Hook for estimating stack usage */
308
309 static uintnat (*prev_stack_usage_hook)(void);
310
caml_thread_stack_usage(void)311 static uintnat caml_thread_stack_usage(void)
312 {
313 uintnat sz;
314 caml_thread_t th;
315
316 /* Don't add stack for current thread, this is done elsewhere */
317 for (sz = 0, th = curr_thread->next;
318 th != curr_thread;
319 th = th->next) {
320 #ifdef NATIVE_CODE
321 if(th->top_of_stack != NULL && th->bottom_of_stack != NULL &&
322 th->top_of_stack > th->bottom_of_stack)
323 sz += (value *) th->top_of_stack - (value *) th->bottom_of_stack;
324 #else
325 sz += th->stack_high - th->sp;
326 #endif
327 }
328 if (prev_stack_usage_hook != NULL)
329 sz += prev_stack_usage_hook();
330 return sz;
331 }
332
333 /* Create and setup a new thread info block.
334 This block has no associated thread descriptor and
335 is not inserted in the list of threads. */
336
caml_thread_new_info(void)337 static caml_thread_t caml_thread_new_info(void)
338 {
339 caml_thread_t th;
340 th = (caml_thread_t) malloc(sizeof(struct caml_thread_struct));
341 if (th == NULL) return NULL;
342 th->descr = Val_unit; /* filled later */
343 #ifdef NATIVE_CODE
344 th->bottom_of_stack = NULL;
345 th->top_of_stack = NULL;
346 th->last_retaddr = 1;
347 th->exception_pointer = NULL;
348 th->local_roots = NULL;
349 th->exit_buf = NULL;
350 #ifdef WITH_SPACETIME
351 /* CR-someday mshinwell: The commented-out changes here are for multicore,
352 where we think we should have one trie per domain. */
353 th->internal_spacetime_trie_root = Val_unit;
354 th->spacetime_trie_node_ptr =
355 &caml_spacetime_trie_root; /* &th->internal_spacetime_trie_root; */
356 th->internal_spacetime_finaliser_trie_root = Val_unit;
357 th->spacetime_finaliser_trie_root
358 = caml_spacetime_finaliser_trie_root;
359 /* &th->internal_spacetime_finaliser_trie_root; */
360 caml_spacetime_register_thread(
361 th->spacetime_trie_node_ptr,
362 th->spacetime_finaliser_trie_root);
363 #endif
364 #else
365 /* Allocate the stacks */
366 th->stack_low = (value *) caml_stat_alloc(Thread_stack_size);
367 th->stack_high = th->stack_low + Thread_stack_size / sizeof(value);
368 th->stack_threshold = th->stack_low + Stack_threshold / sizeof(value);
369 th->sp = th->stack_high;
370 th->trapsp = th->stack_high;
371 th->local_roots = NULL;
372 th->external_raise = NULL;
373 #endif
374 th->backtrace_pos = 0;
375 th->backtrace_buffer = NULL;
376 th->backtrace_last_exn = Val_unit;
377 return th;
378 }
379
380 /* Allocate a thread descriptor block. */
381
caml_thread_new_descriptor(value clos)382 static value caml_thread_new_descriptor(value clos)
383 {
384 value mu = Val_unit;
385 value descr;
386 Begin_roots2 (clos, mu)
387 /* Create and initialize the termination semaphore */
388 mu = caml_threadstatus_new();
389 /* Create a descriptor for the new thread */
390 descr = caml_alloc_small(3, 0);
391 Ident(descr) = Val_long(thread_next_ident);
392 Start_closure(descr) = clos;
393 Terminated(descr) = mu;
394 thread_next_ident++;
395 End_roots();
396 return descr;
397 }
398
399 /* Remove a thread info block from the list of threads.
400 Free it and its stack resources. */
401
caml_thread_remove_info(caml_thread_t th)402 static void caml_thread_remove_info(caml_thread_t th)
403 {
404 if (th->next == th)
405 all_threads = NULL; /* last OCaml thread exiting */
406 else if (all_threads == th)
407 all_threads = th->next; /* PR#5295 */
408 th->next->prev = th->prev;
409 th->prev->next = th->next;
410 #ifndef NATIVE_CODE
411 caml_stat_free(th->stack_low);
412 #endif
413 if (th->backtrace_buffer != NULL) free(th->backtrace_buffer);
414 #ifndef WITH_SPACETIME
415 caml_stat_free(th);
416 /* CR-soon mshinwell: consider what to do about the Spacetime trace. Could
417 perhaps have a hook to save a snapshot on thread termination.
418 For the moment we can't even free [th], since it contains the trie
419 roots. */
420 #endif
421 }
422
423 /* Reinitialize the thread machinery after a fork() (PR#4577) */
424
caml_thread_reinitialize(void)425 static void caml_thread_reinitialize(void)
426 {
427 caml_thread_t thr, next;
428 struct channel * chan;
429
430 /* Remove all other threads (now nonexistent)
431 from the doubly-linked list of threads */
432 thr = curr_thread->next;
433 while (thr != curr_thread) {
434 next = thr->next;
435 caml_stat_free(thr);
436 thr = next;
437 }
438 curr_thread->next = curr_thread;
439 curr_thread->prev = curr_thread;
440 all_threads = curr_thread;
441 /* Reinitialize the master lock machinery,
442 just in case the fork happened while other threads were doing
443 caml_leave_blocking_section */
444 st_masterlock_init(&caml_master_lock);
445 /* Tick thread is not currently running in child process, will be
446 re-created at next Thread.create */
447 caml_tick_thread_running = 0;
448 /* Destroy all IO mutexes; will be reinitialized on demand */
449 for (chan = caml_all_opened_channels;
450 chan != NULL;
451 chan = chan->next) {
452 if (chan->mutex != NULL) {
453 st_mutex_destroy(chan->mutex);
454 chan->mutex = NULL;
455 }
456 }
457 }
458
459 /* Initialize the thread machinery */
460
caml_thread_initialize(value unit)461 CAMLprim value caml_thread_initialize(value unit) /* ML */
462 {
463 /* Protect against repeated initialization (PR#1325) */
464 if (curr_thread != NULL) return Val_unit;
465 /* OS-specific initialization */
466 st_initialize();
467 /* Initialize and acquire the master lock */
468 st_masterlock_init(&caml_master_lock);
469 /* Initialize the keys */
470 st_tls_newkey(&thread_descriptor_key);
471 st_tls_newkey(&last_channel_locked_key);
472 /* Set up a thread info block for the current thread */
473 curr_thread =
474 (caml_thread_t) caml_stat_alloc(sizeof(struct caml_thread_struct));
475 curr_thread->descr = caml_thread_new_descriptor(Val_unit);
476 curr_thread->next = curr_thread;
477 curr_thread->prev = curr_thread;
478 all_threads = curr_thread;
479 curr_thread->backtrace_last_exn = Val_unit;
480 #ifdef NATIVE_CODE
481 curr_thread->exit_buf = &caml_termination_jmpbuf;
482 #endif
483 /* The stack-related fields will be filled in at the next
484 caml_enter_blocking_section */
485 /* Associate the thread descriptor with the thread */
486 st_tls_set(thread_descriptor_key, (void *) curr_thread);
487 /* Set up the hooks */
488 prev_scan_roots_hook = caml_scan_roots_hook;
489 caml_scan_roots_hook = caml_thread_scan_roots;
490 caml_enter_blocking_section_hook = caml_thread_enter_blocking_section;
491 caml_leave_blocking_section_hook = caml_thread_leave_blocking_section;
492 caml_try_leave_blocking_section_hook = caml_thread_try_leave_blocking_section;
493 #ifdef NATIVE_CODE
494 caml_termination_hook = st_thread_exit;
495 #endif
496 caml_channel_mutex_free = caml_io_mutex_free;
497 caml_channel_mutex_lock = caml_io_mutex_lock;
498 caml_channel_mutex_unlock = caml_io_mutex_unlock;
499 caml_channel_mutex_unlock_exn = caml_io_mutex_unlock_exn;
500 prev_stack_usage_hook = caml_stack_usage_hook;
501 caml_stack_usage_hook = caml_thread_stack_usage;
502 /* Set up fork() to reinitialize the thread machinery in the child
503 (PR#4577) */
504 st_atfork(caml_thread_reinitialize);
505 return Val_unit;
506 }
507
508 /* Cleanup the thread machinery on program exit or DLL unload. */
509
caml_thread_cleanup(value unit)510 CAMLprim value caml_thread_cleanup(value unit) /* ML */
511 {
512 if (caml_tick_thread_running){
513 caml_tick_thread_stop = 1;
514 st_thread_join(caml_tick_thread_id);
515 caml_tick_thread_stop = 0;
516 caml_tick_thread_running = 0;
517 }
518 return Val_unit;
519 }
520
521 /* Thread cleanup at termination */
522
caml_thread_stop(void)523 static void caml_thread_stop(void)
524 {
525 /* PR#5188, PR#7220: some of the global runtime state may have
526 changed as the thread was running, so we save it in the
527 curr_thread data to make sure that the cleanup logic
528 below uses accurate information. */
529 caml_thread_save_runtime_state();
530 /* Signal that the thread has terminated */
531 caml_threadstatus_terminate(Terminated(curr_thread->descr));
532 /* Remove th from the doubly-linked list of threads and free its info block */
533 caml_thread_remove_info(curr_thread);
534 /* OS-specific cleanups */
535 st_thread_cleanup();
536 /* Release the runtime system */
537 st_masterlock_release(&caml_master_lock);
538 }
539
540 /* Create a thread */
541
caml_thread_start(void * arg)542 static ST_THREAD_FUNCTION caml_thread_start(void * arg)
543 {
544 caml_thread_t th = (caml_thread_t) arg;
545 value clos;
546 #ifdef NATIVE_CODE
547 struct longjmp_buffer termination_buf;
548 char tos;
549 #endif
550
551 /* Associate the thread descriptor with the thread */
552 st_tls_set(thread_descriptor_key, (void *) th);
553 /* Acquire the global mutex */
554 caml_leave_blocking_section();
555 #ifdef NATIVE_CODE
556 /* Record top of stack (approximative) */
557 th->top_of_stack = &tos;
558 /* Setup termination handler (for caml_thread_exit) */
559 if (sigsetjmp(termination_buf.buf, 0) == 0) {
560 th->exit_buf = &termination_buf;
561 #endif
562 /* Callback the closure */
563 clos = Start_closure(th->descr);
564 caml_modify(&(Start_closure(th->descr)), Val_unit);
565 caml_callback_exn(clos, Val_unit);
566 caml_thread_stop();
567 #ifdef NATIVE_CODE
568 }
569 #endif
570 /* The thread now stops running */
571 return 0;
572 }
573
caml_thread_new(value clos)574 CAMLprim value caml_thread_new(value clos) /* ML */
575 {
576 caml_thread_t th;
577 st_retcode err;
578
579 /* Create a thread info block */
580 th = caml_thread_new_info();
581 if (th == NULL) caml_raise_out_of_memory();
582 /* Equip it with a thread descriptor */
583 th->descr = caml_thread_new_descriptor(clos);
584 /* Add thread info block to the list of threads */
585 th->next = curr_thread->next;
586 th->prev = curr_thread;
587 curr_thread->next->prev = th;
588 curr_thread->next = th;
589 /* Create the new thread */
590 err = st_thread_create(NULL, caml_thread_start, (void *) th);
591 if (err != 0) {
592 /* Creation failed, remove thread info block from list of threads */
593 caml_thread_remove_info(th);
594 st_check_error(err, "Thread.create");
595 }
596 /* Create the tick thread if not already done.
597 Because of PR#4666, we start the tick thread late, only when we create
598 the first additional thread in the current process*/
599 if (! caml_tick_thread_running) {
600 err = st_thread_create(&caml_tick_thread_id, caml_thread_tick, NULL);
601 st_check_error(err, "Thread.create");
602 caml_tick_thread_running = 1;
603 }
604 return th->descr;
605 }
606
607 /* Register a thread already created from C */
608
caml_c_thread_register(void)609 CAMLexport int caml_c_thread_register(void)
610 {
611 caml_thread_t th;
612 st_retcode err;
613
614 /* Already registered? */
615 if (st_tls_get(thread_descriptor_key) != NULL) return 0;
616 /* Create a thread info block */
617 th = caml_thread_new_info();
618 if (th == NULL) return 0;
619 #ifdef NATIVE_CODE
620 th->top_of_stack = (char *) &err;
621 #endif
622 /* Take master lock to protect access to the chaining of threads */
623 st_masterlock_acquire(&caml_master_lock);
624 /* Add thread info block to the list of threads */
625 if (all_threads == NULL) {
626 th->next = th;
627 th->prev = th;
628 all_threads = th;
629 } else {
630 th->next = all_threads->next;
631 th->prev = all_threads;
632 all_threads->next->prev = th;
633 all_threads->next = th;
634 }
635 /* Associate the thread descriptor with the thread */
636 st_tls_set(thread_descriptor_key, (void *) th);
637 /* Release the master lock */
638 st_masterlock_release(&caml_master_lock);
639 /* Now we can re-enter the run-time system and heap-allocate the descriptor */
640 caml_leave_blocking_section();
641 th->descr = caml_thread_new_descriptor(Val_unit); /* no closure */
642 /* Create the tick thread if not already done. */
643 if (! caml_tick_thread_running) {
644 err = st_thread_create(&caml_tick_thread_id, caml_thread_tick, NULL);
645 if (err == 0) caml_tick_thread_running = 1;
646 }
647 /* Exit the run-time system */
648 caml_enter_blocking_section();
649 return 1;
650 }
651
652 /* Unregister a thread that was created from C and registered with
653 the function above */
654
caml_c_thread_unregister(void)655 CAMLexport int caml_c_thread_unregister(void)
656 {
657 caml_thread_t th = st_tls_get(thread_descriptor_key);
658 /* Not registered? */
659 if (th == NULL) return 0;
660 /* Wait until the runtime is available */
661 st_masterlock_acquire(&caml_master_lock);
662 /* Forget the thread descriptor */
663 st_tls_set(thread_descriptor_key, NULL);
664 /* Remove thread info block from list of threads, and free it */
665 caml_thread_remove_info(th);
666 /* Release the runtime */
667 st_masterlock_release(&caml_master_lock);
668 return 1;
669 }
670
671 /* Return the current thread */
672
caml_thread_self(value unit)673 CAMLprim value caml_thread_self(value unit) /* ML */
674 {
675 if (curr_thread == NULL) caml_invalid_argument("Thread.self: not initialized");
676 return curr_thread->descr;
677 }
678
679 /* Return the identifier of a thread */
680
caml_thread_id(value th)681 CAMLprim value caml_thread_id(value th) /* ML */
682 {
683 return Ident(th);
684 }
685
686 /* Print uncaught exception and backtrace */
687
caml_thread_uncaught_exception(value exn)688 CAMLprim value caml_thread_uncaught_exception(value exn) /* ML */
689 {
690 char * msg = caml_format_exception(exn);
691 fprintf(stderr, "Thread %d killed on uncaught exception %s\n",
692 Int_val(Ident(curr_thread->descr)), msg);
693 free(msg);
694 if (caml_backtrace_active) caml_print_exception_backtrace();
695 fflush(stderr);
696 return Val_unit;
697 }
698
699 /* Terminate current thread */
700
caml_thread_exit(value unit)701 CAMLprim value caml_thread_exit(value unit) /* ML */
702 {
703 struct longjmp_buffer * exit_buf = NULL;
704
705 if (curr_thread == NULL) caml_invalid_argument("Thread.exit: not initialized");
706
707 /* In native code, we cannot call pthread_exit here because on some
708 systems this raises a C++ exception, and ocamlopt-generated stack
709 frames cannot be unwound. Instead, we longjmp to the thread
710 creation point (in caml_thread_start) or to the point in
711 caml_main where caml_termination_hook will be called.
712 Note that threads created in C then registered do not have
713 a creation point (exit_buf == NULL).
714 */
715 #ifdef NATIVE_CODE
716 exit_buf = curr_thread->exit_buf;
717 #endif
718 caml_thread_stop();
719 if (exit_buf != NULL) {
720 /* Native-code and (main thread or thread created by OCaml) */
721 siglongjmp(exit_buf->buf, 1);
722 } else {
723 /* Bytecode, or thread created from C */
724 st_thread_exit();
725 }
726 return Val_unit; /* not reached */
727 }
728
729 /* Allow re-scheduling */
730
caml_thread_yield(value unit)731 CAMLprim value caml_thread_yield(value unit) /* ML */
732 {
733 if (st_masterlock_waiters(&caml_master_lock) == 0) return Val_unit;
734 caml_enter_blocking_section();
735 st_thread_yield();
736 caml_leave_blocking_section();
737 return Val_unit;
738 }
739
740 /* Suspend the current thread until another thread terminates */
741
caml_thread_join(value th)742 CAMLprim value caml_thread_join(value th) /* ML */
743 {
744 st_retcode rc = caml_threadstatus_wait(Terminated(th));
745 st_check_error(rc, "Thread.join");
746 return Val_unit;
747 }
748
749 /* Mutex operations */
750
751 #define Mutex_val(v) (* ((st_mutex *) Data_custom_val(v)))
752
caml_mutex_finalize(value wrapper)753 static void caml_mutex_finalize(value wrapper)
754 {
755 st_mutex_destroy(Mutex_val(wrapper));
756 }
757
caml_mutex_compare(value wrapper1,value wrapper2)758 static int caml_mutex_compare(value wrapper1, value wrapper2)
759 {
760 st_mutex mut1 = Mutex_val(wrapper1);
761 st_mutex mut2 = Mutex_val(wrapper2);
762 return mut1 == mut2 ? 0 : mut1 < mut2 ? -1 : 1;
763 }
764
caml_mutex_hash(value wrapper)765 static intnat caml_mutex_hash(value wrapper)
766 {
767 return (intnat) (Mutex_val(wrapper));
768 }
769
770 static struct custom_operations caml_mutex_ops = {
771 "_mutex",
772 caml_mutex_finalize,
773 caml_mutex_compare,
774 caml_mutex_hash,
775 custom_serialize_default,
776 custom_deserialize_default
777 };
778
caml_mutex_new(value unit)779 CAMLprim value caml_mutex_new(value unit) /* ML */
780 {
781 st_mutex mut = NULL; /* suppress warning */
782 value wrapper;
783 st_check_error(st_mutex_create(&mut), "Mutex.create");
784 wrapper = caml_alloc_custom(&caml_mutex_ops, sizeof(st_mutex *),
785 0, 1);
786 Mutex_val(wrapper) = mut;
787 return wrapper;
788 }
789
caml_mutex_lock(value wrapper)790 CAMLprim value caml_mutex_lock(value wrapper) /* ML */
791 {
792 st_mutex mut = Mutex_val(wrapper);
793 st_retcode retcode;
794
795 /* PR#4351: first try to acquire mutex without releasing the master lock */
796 if (st_mutex_trylock(mut) == PREVIOUSLY_UNLOCKED) return Val_unit;
797 /* If unsuccessful, block on mutex */
798 Begin_root(wrapper) /* prevent the deallocation of mutex */
799 caml_enter_blocking_section();
800 retcode = st_mutex_lock(mut);
801 caml_leave_blocking_section();
802 End_roots();
803 st_check_error(retcode, "Mutex.lock");
804 return Val_unit;
805 }
806
caml_mutex_unlock(value wrapper)807 CAMLprim value caml_mutex_unlock(value wrapper) /* ML */
808 {
809 st_mutex mut = Mutex_val(wrapper);
810 st_retcode retcode;
811 /* PR#4351: no need to release and reacquire master lock */
812 retcode = st_mutex_unlock(mut);
813 st_check_error(retcode, "Mutex.unlock");
814 return Val_unit;
815 }
816
caml_mutex_try_lock(value wrapper)817 CAMLprim value caml_mutex_try_lock(value wrapper) /* ML */
818 {
819 st_mutex mut = Mutex_val(wrapper);
820 st_retcode retcode;
821 retcode = st_mutex_trylock(mut);
822 if (retcode == ALREADY_LOCKED) return Val_false;
823 st_check_error(retcode, "Mutex.try_lock");
824 return Val_true;
825 }
826
827 /* Conditions operations */
828
829 #define Condition_val(v) (* (st_condvar *) Data_custom_val(v))
830
caml_condition_finalize(value wrapper)831 static void caml_condition_finalize(value wrapper)
832 {
833 st_condvar_destroy(Condition_val(wrapper));
834 }
835
caml_condition_compare(value wrapper1,value wrapper2)836 static int caml_condition_compare(value wrapper1, value wrapper2)
837 {
838 st_condvar cond1 = Condition_val(wrapper1);
839 st_condvar cond2 = Condition_val(wrapper2);
840 return cond1 == cond2 ? 0 : cond1 < cond2 ? -1 : 1;
841 }
842
caml_condition_hash(value wrapper)843 static intnat caml_condition_hash(value wrapper)
844 {
845 return (intnat) (Condition_val(wrapper));
846 }
847
848 static struct custom_operations caml_condition_ops = {
849 "_condition",
850 caml_condition_finalize,
851 caml_condition_compare,
852 caml_condition_hash,
853 custom_serialize_default,
854 custom_deserialize_default,
855 custom_compare_ext_default
856 };
857
caml_condition_new(value unit)858 CAMLprim value caml_condition_new(value unit) /* ML */
859 {
860 st_condvar cond = NULL; /* suppress warning */
861 value wrapper;
862 st_check_error(st_condvar_create(&cond), "Condition.create");
863 wrapper = caml_alloc_custom(&caml_condition_ops, sizeof(st_condvar *),
864 0, 1);
865 Condition_val(wrapper) = cond;
866 return wrapper;
867 }
868
caml_condition_wait(value wcond,value wmut)869 CAMLprim value caml_condition_wait(value wcond, value wmut) /* ML */
870 {
871 st_condvar cond = Condition_val(wcond);
872 st_mutex mut = Mutex_val(wmut);
873 st_retcode retcode;
874
875 Begin_roots2(wcond, wmut) /* prevent deallocation of cond and mutex */
876 caml_enter_blocking_section();
877 retcode = st_condvar_wait(cond, mut);
878 caml_leave_blocking_section();
879 End_roots();
880 st_check_error(retcode, "Condition.wait");
881 return Val_unit;
882 }
883
caml_condition_signal(value wrapper)884 CAMLprim value caml_condition_signal(value wrapper) /* ML */
885 {
886 st_check_error(st_condvar_signal(Condition_val(wrapper)),
887 "Condition.signal");
888 return Val_unit;
889 }
890
caml_condition_broadcast(value wrapper)891 CAMLprim value caml_condition_broadcast(value wrapper) /* ML */
892 {
893 st_check_error(st_condvar_broadcast(Condition_val(wrapper)),
894 "Condition.broadcast");
895 return Val_unit;
896 }
897
898 /* Thread status blocks */
899
900 #define Threadstatus_val(v) (* ((st_event *) Data_custom_val(v)))
901
caml_threadstatus_finalize(value wrapper)902 static void caml_threadstatus_finalize(value wrapper)
903 {
904 st_event_destroy(Threadstatus_val(wrapper));
905 }
906
caml_threadstatus_compare(value wrapper1,value wrapper2)907 static int caml_threadstatus_compare(value wrapper1, value wrapper2)
908 {
909 st_event ts1 = Threadstatus_val(wrapper1);
910 st_event ts2 = Threadstatus_val(wrapper2);
911 return ts1 == ts2 ? 0 : ts1 < ts2 ? -1 : 1;
912 }
913
914 static struct custom_operations caml_threadstatus_ops = {
915 "_threadstatus",
916 caml_threadstatus_finalize,
917 caml_threadstatus_compare,
918 custom_hash_default,
919 custom_serialize_default,
920 custom_deserialize_default,
921 custom_compare_ext_default
922 };
923
caml_threadstatus_new(void)924 static value caml_threadstatus_new (void)
925 {
926 st_event ts = NULL; /* suppress warning */
927 value wrapper;
928 st_check_error(st_event_create(&ts), "Thread.create");
929 wrapper = caml_alloc_custom(&caml_threadstatus_ops, sizeof(st_event *),
930 0, 1);
931 Threadstatus_val(wrapper) = ts;
932 return wrapper;
933 }
934
caml_threadstatus_terminate(value wrapper)935 static void caml_threadstatus_terminate (value wrapper)
936 {
937 st_event_trigger(Threadstatus_val(wrapper));
938 }
939
caml_threadstatus_wait(value wrapper)940 static st_retcode caml_threadstatus_wait (value wrapper)
941 {
942 st_event ts = Threadstatus_val(wrapper);
943 st_retcode retcode;
944
945 Begin_roots1(wrapper) /* prevent deallocation of ts */
946 caml_enter_blocking_section();
947 retcode = st_event_wait(ts);
948 caml_leave_blocking_section();
949 End_roots();
950 return retcode;
951 }
952