1 /* $OpenBSD: rthread.c,v 1.100 2022/12/27 17:10:07 jmc Exp $ */
2 /*
3 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 /*
19 * The heart of rthreads. Basic functions like creating and joining
20 * threads.
21 */
22
23 #include <sys/types.h>
24 #ifndef NO_PIC
25 #include <elf.h>
26 #pragma weak _DYNAMIC
27 #endif
28
29 #include <stdlib.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <stdio.h>
33 #include <string.h>
34 #include <errno.h>
35 #include <dlfcn.h>
36 #include <tib.h>
37
38 #include <pthread.h>
39
40 #include "cancel.h" /* in libc/include */
41 #include "rthread.h"
42 #include "rthread_cb.h"
43
44 /*
45 * Call nonstandard functions via names in the reserved namespace:
46 * dlctl() -> _dlctl()
47 * getthrid -> _thread_sys_getthrid
48 */
49 typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
50 REDIRECT_SYSCALL(getthrid);
51
52 /* weak stub to be overridden by ld.so */
dlctl(void * handle,int cmd,void * data)53 int dlctl(void *handle, int cmd, void *data) { return 0; }
54
55 /*
56 * libc's signal wrappers hide SIGTHR; we need to call the real syscall
57 * stubs _thread_sys_* directly.
58 */
59 REDIRECT_SYSCALL(sigaction);
60 REDIRECT_SYSCALL(sigprocmask);
61 REDIRECT_SYSCALL(thrkill);
62
63 static int concurrency_level; /* not used */
64
65 int _threads_ready;
66 int _post_threaded;
67 size_t _thread_pagesize;
68 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
69 _atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
70 static struct pthread_queue _thread_gc_list
71 = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
72 static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
73 static struct pthread _initial_thread;
74
75 struct pthread_attr _rthread_attr_default = {
76 .stack_addr = NULL,
77 .stack_size = RTHREAD_STACK_SIZE_DEF,
78 /* .guard_size set in _rthread_init */
79 .detach_state = PTHREAD_CREATE_JOINABLE,
80 .contention_scope = PTHREAD_SCOPE_SYSTEM,
81 .sched_policy = SCHED_OTHER,
82 .sched_param = { .sched_priority = 0 },
83 .sched_inherit = PTHREAD_INHERIT_SCHED,
84 };
85
86 /*
87 * internal support functions
88 */
89
90 static void
_rthread_start(void * v)91 _rthread_start(void *v)
92 {
93 pthread_t thread = v;
94 void *retval;
95
96 retval = thread->fn(thread->arg);
97 pthread_exit(retval);
98 }
99
100 static void
sigthr_handler(__unused int sig)101 sigthr_handler(__unused int sig)
102 {
103 struct tib *tib = TIB_GET();
104 pthread_t self = tib->tib_thread;
105
106 /*
107 * Do nothing unless
108 * 1) pthread_cancel() has been called on this thread,
109 * 2) cancelation is enabled for it, and
110 * 3) we're not already in cancelation processing
111 */
112 if (!tib->tib_canceled || tib->tib_cantcancel)
113 return;
114
115 /*
116 * If delaying cancels inside complex ops (pthread_cond_wait,
117 * pthread_join, etc), just mark that this has happened to
118 * prevent a race with going to sleep
119 */
120 if (tib->tib_cancel_point & CANCEL_POINT_DELAYED) {
121 self->delayed_cancel = 1;
122 return;
123 }
124
125 /*
126 * otherwise, if in a cancel point or async cancels are
127 * enabled, then exit
128 */
129 if (tib->tib_cancel_point ||
130 (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL))
131 pthread_exit(PTHREAD_CANCELED);
132 }
133
134
135 /*
136 * A few basic callbacks for libc. The first couple are only used
137 * on archs where there isn't a fast TCB_GET()
138 */
139 #ifndef TCB_HAVE_MD_GET
140 static int *
multi_threaded_errnoptr(void)141 multi_threaded_errnoptr(void)
142 {
143 return (&TIB_GET()->tib_errno);
144 }
145
146 static void *
multi_threaded_tcb(void)147 multi_threaded_tcb(void)
148 {
149 return (TCB_GET());
150 }
151 #endif /* TCB_HAVE_MD_GET */
152
153 static void
_rthread_free(pthread_t thread)154 _rthread_free(pthread_t thread)
155 {
156 _spinlock(&_thread_gc_lock);
157 TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
158 _spinunlock(&_thread_gc_lock);
159 }
160
161 static void
_thread_release(pthread_t thread)162 _thread_release(pthread_t thread)
163 {
164 _spinlock(&_thread_lock);
165 LIST_REMOVE(thread, threads);
166 _spinunlock(&_thread_lock);
167
168 _spinlock(&thread->flags_lock);
169 if (thread->flags & THREAD_DETACHED) {
170 _spinunlock(&thread->flags_lock);
171 _rthread_free(thread);
172 } else {
173 thread->flags |= THREAD_DONE;
174 _spinunlock(&thread->flags_lock);
175 _sem_post(&thread->donesem);
176 }
177 }
178
179 static void
_thread_key_zero(int key)180 _thread_key_zero(int key)
181 {
182 pthread_t thread;
183 struct rthread_storage *rs;
184
185 LIST_FOREACH(thread, &_thread_list, threads) {
186 for (rs = thread->local_storage; rs; rs = rs->next) {
187 if (rs->keyid == key)
188 rs->data = NULL;
189 }
190 }
191 }
192
193 void
_rthread_init(void)194 _rthread_init(void)
195 {
196 pthread_t thread = pthread_self();
197 struct sigaction sa;
198
199 if (_threads_ready)
200 return;
201
202 LIST_INSERT_HEAD(&_thread_list, thread, threads);
203
204 _thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
205 _rthread_attr_default.guard_size = _thread_pagesize;
206 thread->attr = _rthread_attr_default;
207
208 /* get libc to start using our callbacks */
209 {
210 struct thread_callbacks cb = { 0 };
211
212 #ifndef TCB_HAVE_MD_GET
213 cb.tc_errnoptr = multi_threaded_errnoptr;
214 cb.tc_tcb = multi_threaded_tcb;
215 #endif
216 cb.tc_fork = _thread_fork;
217 cb.tc_vfork = _thread_vfork;
218 cb.tc_thread_release = _thread_release;
219 cb.tc_thread_key_zero = _thread_key_zero;
220 _thread_set_callbacks(&cb, sizeof(cb));
221 }
222
223 #ifndef NO_PIC
224 if (_DYNAMIC) {
225 dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
226 }
227 #endif
228
229 /*
230 * Set the handler on the signal used for cancelation and
231 * suspension, and make sure it's unblocked
232 */
233 memset(&sa, 0, sizeof(sa));
234 sigemptyset(&sa.sa_mask);
235 sa.sa_handler = sigthr_handler;
236 sigaction(SIGTHR, &sa, NULL);
237 sigaddset(&sa.sa_mask, SIGTHR);
238 sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
239
240 _threads_ready = 1;
241
242 _malloc_init(1);
243
244 _rthread_debug(1, "rthread init\n");
245 }
246
247 static void
_rthread_reaper(void)248 _rthread_reaper(void)
249 {
250 pthread_t thread;
251
252 restart:
253 _spinlock(&_thread_gc_lock);
254 TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
255 if (thread->tib->tib_tid != 0)
256 continue;
257 TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
258 _spinunlock(&_thread_gc_lock);
259 if (thread != &_initial_thread) {
260 _rthread_debug(3, "rthread reaping %p stack %p\n",
261 (void *)thread, (void *)thread->stack);
262 _rthread_free_stack(thread->stack);
263 _dl_free_tib(thread->tib, sizeof(*thread));
264 } else {
265 /* initial thread isn't part of TIB allocation */
266 _rthread_debug(3, "rthread reaping %p (initial)\n",
267 (void *)thread);
268 _dl_free_tib(thread->tib, 0);
269 }
270 goto restart;
271 }
272 _spinunlock(&_thread_gc_lock);
273 }
274
275 /*
276 * real pthread functions
277 */
278
279 int
pthread_join(pthread_t thread,void ** retval)280 pthread_join(pthread_t thread, void **retval)
281 {
282 int e;
283 struct tib *tib = TIB_GET();
284 pthread_t self;
285 PREP_CANCEL_POINT(tib);
286
287 if (_post_threaded) {
288 #define GREATSCOTT "great scott! serious repercussions on future events!\n"
289 write(2, GREATSCOTT, sizeof(GREATSCOTT) - 1);
290 abort();
291 }
292 if (!_threads_ready)
293 _rthread_init();
294 self = tib->tib_thread;
295
296 e = 0;
297 ENTER_DELAYED_CANCEL_POINT(tib, self);
298 if (thread == NULL)
299 e = EINVAL;
300 else if (thread == self)
301 e = EDEADLK;
302 else if (thread->flags & THREAD_DETACHED)
303 e = EINVAL;
304 else if ((e = _sem_wait(&thread->donesem, 0, NULL,
305 &self->delayed_cancel)) == 0) {
306 if (retval)
307 *retval = thread->retval;
308
309 /*
310 * We should be the last having a ref to this thread,
311 * but someone stupid or evil might haved detached it;
312 * in that case the thread will clean up itself
313 */
314 if ((thread->flags & THREAD_DETACHED) == 0)
315 _rthread_free(thread);
316 }
317
318 LEAVE_CANCEL_POINT_INNER(tib, e);
319 _rthread_reaper();
320 return (e);
321 }
322
323 int
pthread_detach(pthread_t thread)324 pthread_detach(pthread_t thread)
325 {
326 int rc = 0;
327
328 _spinlock(&thread->flags_lock);
329 if (thread->flags & THREAD_DETACHED) {
330 rc = EINVAL;
331 _spinunlock(&thread->flags_lock);
332 } else if (thread->flags & THREAD_DONE) {
333 _spinunlock(&thread->flags_lock);
334 _rthread_free(thread);
335 } else {
336 thread->flags |= THREAD_DETACHED;
337 _spinunlock(&thread->flags_lock);
338 }
339 _rthread_reaper();
340 return (rc);
341 }
342
343 int
pthread_create(pthread_t * threadp,const pthread_attr_t * attr,void * (* start_routine)(void *),void * arg)344 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
345 void *(*start_routine)(void *), void *arg)
346 {
347 extern int __isthreaded;
348 struct tib *tib;
349 pthread_t thread;
350 struct __tfork param;
351 int rc;
352
353 if (!_threads_ready)
354 _rthread_init();
355
356 _rthread_reaper();
357
358 tib = _dl_allocate_tib(sizeof(*thread));
359 if (tib == NULL)
360 return (ENOMEM);
361 thread = tib->tib_thread;
362 memset(thread, 0, sizeof(*thread));
363 thread->tib = tib;
364 thread->donesem.lock = _SPINLOCK_UNLOCKED;
365 thread->flags_lock = _SPINLOCK_UNLOCKED;
366 thread->fn = start_routine;
367 thread->arg = arg;
368 tib->tib_tid = -1;
369
370 thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
371 if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
372 pthread_t self = pthread_self();
373
374 thread->attr.sched_policy = self->attr.sched_policy;
375 thread->attr.sched_param = self->attr.sched_param;
376 }
377 if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
378 thread->flags |= THREAD_DETACHED;
379
380 thread->stack = _rthread_alloc_stack(thread);
381 if (!thread->stack) {
382 rc = errno;
383 goto fail1;
384 }
385
386 param.tf_tcb = TIB_TO_TCB(tib);
387 param.tf_tid = &tib->tib_tid;
388 param.tf_stack = thread->stack->sp;
389
390 _spinlock(&_thread_lock);
391 LIST_INSERT_HEAD(&_thread_list, thread, threads);
392 _spinunlock(&_thread_lock);
393
394 /* we're going to be multi-threaded real soon now */
395 __isthreaded = 1;
396 rc = __tfork_thread(¶m, sizeof(param), _rthread_start, thread);
397 if (rc != -1) {
398 /* success */
399 *threadp = thread;
400 return (0);
401 }
402
403 rc = errno;
404
405 _spinlock(&_thread_lock);
406 LIST_REMOVE(thread, threads);
407 _spinunlock(&_thread_lock);
408 _rthread_free_stack(thread->stack);
409 fail1:
410 _dl_free_tib(tib, sizeof(*thread));
411
412 return (rc);
413 }
414
415 int
pthread_kill(pthread_t thread,int sig)416 pthread_kill(pthread_t thread, int sig)
417 {
418 struct tib *tib = thread->tib;
419
420 if (sig == SIGTHR)
421 return (EINVAL);
422 if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)))
423 return (errno);
424 return (0);
425 }
426
427 int
pthread_cancel(pthread_t thread)428 pthread_cancel(pthread_t thread)
429 {
430 struct tib *tib = thread->tib;
431 pid_t tid = tib->tib_tid;
432
433 if (tib->tib_canceled == 0 && tid != 0 &&
434 (tib->tib_cantcancel & CANCEL_DYING) == 0) {
435 tib->tib_canceled = 1;
436
437 if ((tib->tib_cantcancel & CANCEL_DISABLED) == 0) {
438 thrkill(tid, SIGTHR, TIB_TO_TCB(tib));
439 return (0);
440 }
441 }
442 return (0);
443 }
444
445 void
pthread_testcancel(void)446 pthread_testcancel(void)
447 {
448 struct tib *tib = TIB_GET();
449
450 if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED) == 0)
451 pthread_exit(PTHREAD_CANCELED);
452 }
453
454 int
pthread_setcancelstate(int state,int * oldstatep)455 pthread_setcancelstate(int state, int *oldstatep)
456 {
457 struct tib *tib = TIB_GET();
458 int oldstate;
459
460 oldstate = tib->tib_cantcancel & CANCEL_DISABLED ?
461 PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
462 if (state == PTHREAD_CANCEL_ENABLE) {
463 tib->tib_cantcancel &= ~CANCEL_DISABLED;
464 } else if (state == PTHREAD_CANCEL_DISABLE) {
465 tib->tib_cantcancel |= CANCEL_DISABLED;
466 } else {
467 return (EINVAL);
468 }
469 if (oldstatep)
470 *oldstatep = oldstate;
471
472 return (0);
473 }
474 DEF_STD(pthread_setcancelstate);
475
476 int
pthread_setcanceltype(int type,int * oldtypep)477 pthread_setcanceltype(int type, int *oldtypep)
478 {
479 struct tib *tib = TIB_GET();
480 int oldtype;
481
482 oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL ?
483 PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
484 if (type == PTHREAD_CANCEL_DEFERRED) {
485 tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL;
486 } else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
487 tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL;
488 } else {
489 return (EINVAL);
490 }
491 if (oldtypep)
492 *oldtypep = oldtype;
493
494 return (0);
495 }
496
497 void
pthread_cleanup_push(void (* fn)(void *),void * arg)498 pthread_cleanup_push(void (*fn)(void *), void *arg)
499 {
500 struct rthread_cleanup_fn *clfn;
501 pthread_t self = pthread_self();
502
503 clfn = calloc(1, sizeof(*clfn));
504 if (!clfn)
505 return;
506 clfn->fn = fn;
507 clfn->arg = arg;
508 clfn->next = self->cleanup_fns;
509 self->cleanup_fns = clfn;
510 }
511
512 void
pthread_cleanup_pop(int execute)513 pthread_cleanup_pop(int execute)
514 {
515 struct rthread_cleanup_fn *clfn;
516 pthread_t self = pthread_self();
517
518 clfn = self->cleanup_fns;
519 if (clfn) {
520 self->cleanup_fns = clfn->next;
521 if (execute)
522 clfn->fn(clfn->arg);
523 free(clfn);
524 }
525 }
526
527 int
pthread_getconcurrency(void)528 pthread_getconcurrency(void)
529 {
530 return (concurrency_level);
531 }
532
533 int
pthread_setconcurrency(int new_level)534 pthread_setconcurrency(int new_level)
535 {
536 if (new_level < 0)
537 return (EINVAL);
538 concurrency_level = new_level;
539 return (0);
540 }
541
542 /*
543 * compat debug stuff
544 */
545 void
_thread_dump_info(void)546 _thread_dump_info(void)
547 {
548 pthread_t thread;
549
550 _spinlock(&_thread_lock);
551 LIST_FOREACH(thread, &_thread_list, threads)
552 printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
553 thread->tib->tib_thread_flags, thread->name);
554 _spinunlock(&_thread_lock);
555 }
556
557 #ifndef NO_PIC
558 /*
559 * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
560 * the function called via atexit() to invoke all destructors. The latter
561 * two call shared-object destructors, which may need to call dlclose(),
562 * so this lock needs to permit recursive locking.
563 * The specific code here was extracted from _rthread_mutex_lock() and
564 * pthread_mutex_unlock() and simplified to use the static variables.
565 */
566 void
_rthread_dl_lock(int what)567 _rthread_dl_lock(int what)
568 {
569 static _atomic_lock_t lock = _SPINLOCK_UNLOCKED;
570 static pthread_t owner = NULL;
571 static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
572 static int count = 0;
573
574 if (what == 0) {
575 pthread_t self = pthread_self();
576
577 /* lock, possibly recursive */
578 _spinlock(&lock);
579 if (owner == NULL) {
580 owner = self;
581 } else if (owner != self) {
582 TAILQ_INSERT_TAIL(&lockers, self, waiting);
583 while (owner != self) {
584 __thrsleep(self, 0, NULL, &lock, NULL);
585 _spinlock(&lock);
586 }
587 }
588 count++;
589 _spinunlock(&lock);
590 } else if (what == 1) {
591 /* unlock, possibly recursive */
592 if (--count == 0) {
593 pthread_t next;
594
595 _spinlock(&lock);
596 owner = next = TAILQ_FIRST(&lockers);
597 if (next != NULL)
598 TAILQ_REMOVE(&lockers, next, waiting);
599 _spinunlock(&lock);
600 if (next != NULL)
601 __thrwakeup(next, 1);
602 }
603 } else {
604 /* reinit: used in child after fork to clear the queue */
605 lock = _SPINLOCK_UNLOCKED;
606 if (--count == 0)
607 owner = NULL;
608 TAILQ_INIT(&lockers);
609 }
610 }
611 #endif
612