xref: /openbsd/lib/librthread/rthread.c (revision 74ae6390)
1 /*	$OpenBSD: rthread.c,v 1.94 2016/09/04 10:13:35 akfaew Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 #ifndef NO_PIC
25 #include <sys/exec_elf.h>
26 #pragma weak _DYNAMIC
27 #endif
28 
29 #include <stdlib.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <stdio.h>
33 #include <string.h>
34 #include <errno.h>
35 #include <dlfcn.h>
36 #include <tib.h>
37 
38 #include <pthread.h>
39 
40 #include "cancel.h"		/* in libc/include */
41 #include "thread_private.h"
42 #include "rthread.h"
43 #include "rthread_cb.h"
44 
45 /*
46  * Call nonstandard functions via names in the reserved namespace:
47  *	dlctl() -> _dlctl()
48  *	getthrid -> _thread_sys_getthrid
49  */
50 typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
51 REDIRECT_SYSCALL(getthrid);
52 
53 /* weak stub to be overriden by ld.so */
54 int	dlctl(void *handle, int cmd, void *data) { return 0; }
55 
56 /*
57  * libc's signal wrappers hide SIGTHR; we need to call the real syscall
58  * stubs _thread_sys_* directly.
59  */
60 REDIRECT_SYSCALL(sigaction);
61 REDIRECT_SYSCALL(sigprocmask);
62 REDIRECT_SYSCALL(thrkill);
63 
64 static int concurrency_level;	/* not used */
65 
66 int _threads_ready;
67 size_t _thread_pagesize;
68 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
69 _atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
70 static struct pthread_queue _thread_gc_list
71     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
72 static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
73 static struct pthread _initial_thread;
74 
75 struct pthread_attr _rthread_attr_default = {
76 	.stack_addr			= NULL,
77 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
78 /*	.guard_size		set in _rthread_init */
79 	.detach_state			= PTHREAD_CREATE_JOINABLE,
80 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
81 	.sched_policy			= SCHED_OTHER,
82 	.sched_param = { .sched_priority = 0 },
83 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
84 };
85 
86 /*
87  * internal support functions
88  */
89 void
90 _spinlock(volatile _atomic_lock_t *lock)
91 {
92 	while (_atomic_lock(lock))
93 		sched_yield();
94 }
95 
96 int
97 _spinlocktry(volatile _atomic_lock_t *lock)
98 {
99 	return 0 == _atomic_lock(lock);
100 }
101 
102 void
103 _spinunlock(volatile _atomic_lock_t *lock)
104 {
105 	*lock = _ATOMIC_LOCK_UNLOCKED;
106 }
107 
108 static void
109 _rthread_start(void *v)
110 {
111 	pthread_t thread = v;
112 	void *retval;
113 
114 	retval = thread->fn(thread->arg);
115 	pthread_exit(retval);
116 }
117 
118 static void
119 sigthr_handler(__unused int sig)
120 {
121 	struct tib *tib = TIB_GET();
122 	pthread_t self = tib->tib_thread;
123 
124 	/*
125 	 * Do nothing unless
126 	 * 1) pthread_cancel() has been called on this thread,
127 	 * 2) cancelation is enabled for it, and
128 	 * 3) we're not already in cancelation processing
129 	 */
130 	if (!tib->tib_canceled || tib->tib_cantcancel)
131 		return;
132 
133 	/*
134 	 * If delaying cancels inside complex ops (pthread_cond_wait,
135 	 * pthread_join, etc), just mark that this has happened to
136 	 * prevent a race with going to sleep
137 	 */
138 	if (tib->tib_cancel_point & CANCEL_POINT_DELAYED) {
139 		self->delayed_cancel = 1;
140 		return;
141 	}
142 
143 	/*
144 	 * otherwise, if in a cancel point or async cancels are
145 	 * enabled, then exit
146 	 */
147 	if (tib->tib_cancel_point ||
148 	    (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL))
149 		pthread_exit(PTHREAD_CANCELED);
150 }
151 
152 
153 /*
154  * A few basic callbacks for libc.  The first couple are only used
155  * on archs where there isn't a fast TCB_GET()
156  */
157 #ifndef TCB_HAVE_MD_GET
158 static int *
159 multi_threaded_errnoptr(void)
160 {
161         return (&TIB_GET()->tib_errno);
162 }
163 
164 static void *
165 multi_threaded_tcb(void)
166 {
167 	return (TCB_GET());
168 }
169 #endif /* TCB_HAVE_MD_GET */
170 
171 void
172 _thread_canceled(void)
173 {
174 	pthread_exit(PTHREAD_CANCELED);
175 }
176 
177 void
178 _rthread_init(void)
179 {
180 	pthread_t thread = &_initial_thread;
181 	struct tib *tib;
182 	struct sigaction sa;
183 
184 	tib = TIB_GET();
185 	tib->tib_thread = thread;
186 	thread->tib = tib;
187 
188 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
189 	tib->tib_thread_flags = TIB_THREAD_INITIAL_STACK;
190 	thread->flags_lock = _SPINLOCK_UNLOCKED;
191 	strlcpy(thread->name, "Main process", sizeof(thread->name));
192 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
193 	_rthread_debug_init();
194 
195 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
196 	_rthread_attr_default.guard_size = _thread_pagesize;
197 	thread->attr = _rthread_attr_default;
198 
199 	/* get libc to start using our callbacks */
200 	{
201 		struct thread_callbacks cb = { 0 };
202 
203 #ifndef TCB_HAVE_MD_GET
204 		cb.tc_errnoptr		= multi_threaded_errnoptr;
205 		cb.tc_tcb		= multi_threaded_tcb;
206 #endif
207 		cb.tc_canceled		= _thread_canceled;
208 		cb.tc_flockfile		= _thread_flockfile;
209 		cb.tc_ftrylockfile	= _thread_ftrylockfile;
210 		cb.tc_funlockfile	= _thread_funlockfile;
211 		cb.tc_malloc_lock	= _thread_malloc_lock;
212 		cb.tc_malloc_unlock	= _thread_malloc_unlock;
213 		cb.tc_atexit_lock	= _thread_atexit_lock;
214 		cb.tc_atexit_unlock	= _thread_atexit_unlock;
215 		cb.tc_atfork_lock	= _thread_atfork_lock;
216 		cb.tc_atfork_unlock	= _thread_atfork_unlock;
217 		cb.tc_arc4_lock		= _thread_arc4_lock;
218 		cb.tc_arc4_unlock	= _thread_arc4_unlock;
219 		cb.tc_mutex_lock	= _thread_mutex_lock;
220 		cb.tc_mutex_unlock	= _thread_mutex_unlock;
221 		cb.tc_mutex_destroy	= _thread_mutex_destroy;
222 		cb.tc_tag_lock		= _thread_tag_lock;
223 		cb.tc_tag_unlock	= _thread_tag_unlock;
224 		cb.tc_tag_storage	= _thread_tag_storage;
225 		cb.tc_fork		= _thread_fork;
226 		cb.tc_vfork		= _thread_vfork;
227 		_thread_set_callbacks(&cb, sizeof(cb));
228 	}
229 
230 #ifndef NO_PIC
231 	if (_DYNAMIC) {
232 		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
233 	}
234 #endif
235 
236 	/*
237 	 * Set the handler on the signal used for cancelation and
238 	 * suspension, and make sure it's unblocked
239 	 */
240 	memset(&sa, 0, sizeof(sa));
241 	sigemptyset(&sa.sa_mask);
242 	sa.sa_handler = sigthr_handler;
243 	sigaction(SIGTHR, &sa, NULL);
244 	sigaddset(&sa.sa_mask, SIGTHR);
245 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
246 
247 	_threads_ready = 1;
248 
249 	_malloc_init(1);
250 
251 	_rthread_debug(1, "rthread init\n");
252 }
253 
254 static void
255 _rthread_free(pthread_t thread)
256 {
257 	_spinlock(&_thread_gc_lock);
258 	TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
259 	_spinunlock(&_thread_gc_lock);
260 }
261 
262 /*
263  * real pthread functions
264  */
265 pthread_t
266 pthread_self(void)
267 {
268 	if (!_threads_ready)
269 		_rthread_init();
270 
271 	return (TIB_GET()->tib_thread);
272 }
273 DEF_STD(pthread_self);
274 
275 static void
276 _rthread_reaper(void)
277 {
278 	pthread_t thread;
279 
280 restart:
281 	_spinlock(&_thread_gc_lock);
282 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
283 		if (thread->tib->tib_tid != 0)
284 			continue;
285 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
286 		_spinunlock(&_thread_gc_lock);
287 		if (thread != &_initial_thread) {
288 			_rthread_debug(3, "rthread reaping %p stack %p\n",
289 			    (void *)thread, (void *)thread->stack);
290 			_rthread_free_stack(thread->stack);
291 			_dl_free_tib(thread->tib, sizeof(*thread));
292 		} else {
293 			/* initial thread isn't part of TIB allocation */
294 			_rthread_debug(3, "rthread reaping %p (initial)\n",
295 			    (void *)thread);
296 			_dl_free_tib(thread->tib, 0);
297 		}
298 		goto restart;
299 	}
300 	_spinunlock(&_thread_gc_lock);
301 }
302 
303 void
304 pthread_exit(void *retval)
305 {
306 	struct rthread_cleanup_fn *clfn;
307 	struct tib *tib = TIB_GET();
308 	pthread_t thread;
309 
310 	if (!_threads_ready)
311 		_rthread_init();
312 	thread = tib->tib_thread;
313 
314 	if (tib->tib_cantcancel & CANCEL_DYING) {
315 		/*
316 		 * Called pthread_exit() from destructor or cancelation
317 		 * handler: blow up.  XXX write something to stderr?
318 		 */
319 		abort();
320 		//_exit(42);
321 	}
322 
323 	tib->tib_cantcancel |= CANCEL_DYING;
324 
325 	thread->retval = retval;
326 
327 	for (clfn = thread->cleanup_fns; clfn; ) {
328 		struct rthread_cleanup_fn *oclfn = clfn;
329 		clfn = clfn->next;
330 		oclfn->fn(oclfn->arg);
331 		free(oclfn);
332 	}
333 	_rthread_tls_destructors(thread);
334 	_spinlock(&_thread_lock);
335 	LIST_REMOVE(thread, threads);
336 	_spinunlock(&_thread_lock);
337 
338 	_spinlock(&thread->flags_lock);
339 	if (thread->flags & THREAD_DETACHED) {
340 		_spinunlock(&thread->flags_lock);
341 		_rthread_free(thread);
342 	} else {
343 		thread->flags |= THREAD_DONE;
344 		_spinunlock(&thread->flags_lock);
345 		_sem_post(&thread->donesem);
346 	}
347 
348 	__threxit(&tib->tib_tid);
349 	for(;;);
350 }
351 DEF_STD(pthread_exit);
352 
353 int
354 pthread_join(pthread_t thread, void **retval)
355 {
356 	int e;
357 	struct tib *tib = TIB_GET();
358 	pthread_t self;
359 	PREP_CANCEL_POINT(tib);
360 
361 	if (!_threads_ready)
362 		_rthread_init();
363 	self = tib->tib_thread;
364 
365 	e = 0;
366 	ENTER_DELAYED_CANCEL_POINT(tib, self);
367 	if (thread == NULL)
368 		e = EINVAL;
369 	else if (thread == self)
370 		e = EDEADLK;
371 	else if (thread->flags & THREAD_DETACHED)
372 		e = EINVAL;
373 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
374 	    &self->delayed_cancel)) == 0) {
375 		if (retval)
376 			*retval = thread->retval;
377 
378 		/*
379 		 * We should be the last having a ref to this thread,
380 		 * but someone stupid or evil might haved detached it;
381 		 * in that case the thread will clean up itself
382 		 */
383 		if ((thread->flags & THREAD_DETACHED) == 0)
384 			_rthread_free(thread);
385 	}
386 
387 	LEAVE_CANCEL_POINT_INNER(tib, e);
388 	_rthread_reaper();
389 	return (e);
390 }
391 
392 int
393 pthread_detach(pthread_t thread)
394 {
395 	int rc = 0;
396 
397 	_spinlock(&thread->flags_lock);
398 	if (thread->flags & THREAD_DETACHED) {
399 		rc = EINVAL;
400 		_spinunlock(&thread->flags_lock);
401 	} else if (thread->flags & THREAD_DONE) {
402 		_spinunlock(&thread->flags_lock);
403 		_rthread_free(thread);
404 	} else {
405 		thread->flags |= THREAD_DETACHED;
406 		_spinunlock(&thread->flags_lock);
407 	}
408 	_rthread_reaper();
409 	return (rc);
410 }
411 
412 int
413 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
414     void *(*start_routine)(void *), void *arg)
415 {
416 	extern int __isthreaded;
417 	struct tib *tib;
418 	pthread_t thread;
419 	struct __tfork param;
420 	int rc;
421 
422 	if (!_threads_ready)
423 		_rthread_init();
424 
425 	_rthread_reaper();
426 
427 	tib = _dl_allocate_tib(sizeof(*thread));
428 	if (tib == NULL)
429 		return (ENOMEM);
430 	thread = tib->tib_thread;
431 	memset(thread, 0, sizeof(*thread));
432 	thread->tib = tib;
433 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
434 	thread->flags_lock = _SPINLOCK_UNLOCKED;
435 	thread->fn = start_routine;
436 	thread->arg = arg;
437 	tib->tib_tid = -1;
438 
439 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
440 	if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
441 		pthread_t self = pthread_self();
442 
443 		thread->attr.sched_policy = self->attr.sched_policy;
444 		thread->attr.sched_param = self->attr.sched_param;
445 	}
446 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
447 		thread->flags |= THREAD_DETACHED;
448 
449 	thread->stack = _rthread_alloc_stack(thread);
450 	if (!thread->stack) {
451 		rc = errno;
452 		goto fail1;
453 	}
454 
455 	param.tf_tcb = TIB_TO_TCB(tib);
456 	param.tf_tid = &tib->tib_tid;
457 	param.tf_stack = thread->stack->sp;
458 
459 	_spinlock(&_thread_lock);
460 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
461 	_spinunlock(&_thread_lock);
462 
463 	/* we're going to be multi-threaded real soon now */
464 	__isthreaded = 1;
465 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
466 	if (rc != -1) {
467 		/* success */
468 		*threadp = thread;
469 		return (0);
470 	}
471 
472 	rc = errno;
473 
474 	_spinlock(&_thread_lock);
475 	LIST_REMOVE(thread, threads);
476 	_spinunlock(&_thread_lock);
477 	_rthread_free_stack(thread->stack);
478 fail1:
479 	_dl_free_tib(tib, sizeof(*thread));
480 
481 	return (rc);
482 }
483 
484 int
485 pthread_kill(pthread_t thread, int sig)
486 {
487 	struct tib *tib = thread->tib;
488 
489 	if (sig == SIGTHR)
490 		return (EINVAL);
491 	if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)))
492 		return (errno);
493 	return (0);
494 }
495 
496 int
497 pthread_equal(pthread_t t1, pthread_t t2)
498 {
499 	return (t1 == t2);
500 }
501 
502 int
503 pthread_cancel(pthread_t thread)
504 {
505 	struct tib *tib = thread->tib;
506 	pid_t tid = tib->tib_tid;
507 
508 	if (tib->tib_canceled == 0 && tid != 0 &&
509 	    (tib->tib_cantcancel & CANCEL_DYING) == 0) {
510 		tib->tib_canceled = 1;
511 
512 		if ((tib->tib_cantcancel & CANCEL_DISABLED) == 0) {
513 			thrkill(tid, SIGTHR, TIB_TO_TCB(tib));
514 			return (0);
515 		}
516 	}
517 	return (0);
518 }
519 
520 void
521 pthread_testcancel(void)
522 {
523 	struct tib *tib = TIB_GET();
524 
525 	if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED) == 0)
526 		pthread_exit(PTHREAD_CANCELED);
527 }
528 
529 int
530 pthread_setcancelstate(int state, int *oldstatep)
531 {
532 	struct tib *tib = TIB_GET();
533 	int oldstate;
534 
535 	oldstate = tib->tib_cantcancel & CANCEL_DISABLED ?
536 	    PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
537 	if (state == PTHREAD_CANCEL_ENABLE) {
538 		tib->tib_cantcancel &= ~CANCEL_DISABLED;
539 	} else if (state == PTHREAD_CANCEL_DISABLE) {
540 		tib->tib_cantcancel |= CANCEL_DISABLED;
541 	} else {
542 		return (EINVAL);
543 	}
544 	if (oldstatep)
545 		*oldstatep = oldstate;
546 
547 	return (0);
548 }
549 DEF_STD(pthread_setcancelstate);
550 
551 int
552 pthread_setcanceltype(int type, int *oldtypep)
553 {
554 	struct tib *tib = TIB_GET();
555 	int oldtype;
556 
557 	oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL ?
558 	    PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
559 	if (type == PTHREAD_CANCEL_DEFERRED) {
560 		tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL;
561 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
562 		tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL;
563 	} else {
564 		return (EINVAL);
565 	}
566 	if (oldtypep)
567 		*oldtypep = oldtype;
568 
569 	return (0);
570 }
571 
572 void
573 pthread_cleanup_push(void (*fn)(void *), void *arg)
574 {
575 	struct rthread_cleanup_fn *clfn;
576 	pthread_t self = pthread_self();
577 
578 	clfn = calloc(1, sizeof(*clfn));
579 	if (!clfn)
580 		return;
581 	clfn->fn = fn;
582 	clfn->arg = arg;
583 	clfn->next = self->cleanup_fns;
584 	self->cleanup_fns = clfn;
585 }
586 
587 void
588 pthread_cleanup_pop(int execute)
589 {
590 	struct rthread_cleanup_fn *clfn;
591 	pthread_t self = pthread_self();
592 
593 	clfn = self->cleanup_fns;
594 	if (clfn) {
595 		self->cleanup_fns = clfn->next;
596 		if (execute)
597 			clfn->fn(clfn->arg);
598 		free(clfn);
599 	}
600 }
601 
602 int
603 pthread_getconcurrency(void)
604 {
605 	return (concurrency_level);
606 }
607 
608 int
609 pthread_setconcurrency(int new_level)
610 {
611 	if (new_level < 0)
612 		return (EINVAL);
613 	concurrency_level = new_level;
614 	return (0);
615 }
616 
617 /*
618  * compat debug stuff
619  */
620 void
621 _thread_dump_info(void)
622 {
623 	pthread_t thread;
624 
625 	_spinlock(&_thread_lock);
626 	LIST_FOREACH(thread, &_thread_list, threads)
627 		printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
628 		    thread->tib->tib_thread_flags, thread->name);
629 	_spinunlock(&_thread_lock);
630 }
631 
632 #ifndef NO_PIC
633 /*
634  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
635  * the function called via atexit() to invoke all destructors.  The latter
636  * two call shared-object destructors, which may need to call dlclose(),
637  * so this lock needs to permit recursive locking.
638  * The specific code here was extracted from _rthread_mutex_lock() and
639  * pthread_mutex_unlock() and simplified to use the static variables.
640  */
641 void
642 _rthread_dl_lock(int what)
643 {
644 	static _atomic_lock_t lock = _SPINLOCK_UNLOCKED;
645 	static pthread_t owner = NULL;
646 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
647 	static int count = 0;
648 
649 	if (what == 0) {
650 		pthread_t self = pthread_self();
651 
652 		/* lock, possibly recursive */
653 		_spinlock(&lock);
654 		if (owner == NULL) {
655 			owner = self;
656 		} else if (owner != self) {
657 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
658 			while (owner != self) {
659 				__thrsleep(self, 0, NULL, &lock, NULL);
660 				_spinlock(&lock);
661 			}
662 		}
663 		count++;
664 		_spinunlock(&lock);
665 	} else if (what == 1) {
666 		/* unlock, possibly recursive */
667 		if (--count == 0) {
668 			pthread_t next;
669 
670 			_spinlock(&lock);
671 			owner = next = TAILQ_FIRST(&lockers);
672 			if (next != NULL)
673 				TAILQ_REMOVE(&lockers, next, waiting);
674 			_spinunlock(&lock);
675 			if (next != NULL)
676 				__thrwakeup(next, 1);
677 		}
678 	} else {
679 		/* reinit: used in child after fork to clear the queue */
680 		lock = _SPINLOCK_UNLOCKED;
681 		if (--count == 0)
682 			owner = NULL;
683 		TAILQ_INIT(&lockers);
684 	}
685 }
686 #endif
687