xref: /openbsd/lib/librthread/rthread.c (revision 3d8817e4)
1 /*	$OpenBSD: rthread.c,v 1.42 2009/11/27 19:45:54 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/param.h>
24 #include <sys/event.h>
25 #include <sys/mman.h>
26 #include <sys/wait.h>
27 
28 #include <machine/spinlock.h>
29 
30 #include <fcntl.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <signal.h>
34 #include <stdio.h>
35 #include <string.h>
36 #include <err.h>
37 #include <errno.h>
38 #include <dlfcn.h>
39 
40 #include <pthread.h>
41 
42 #include "thread_private.h"	/* in libc/include */
43 #include "rthread.h"
44 
45 static int concurrency_level;	/* not used */
46 
47 int _threads_ready;
48 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
49 _spinlock_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
50 static struct pthread_queue _thread_gc_list
51     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
52 static _spinlock_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
53 struct pthread _initial_thread;
54 
55 int rfork_thread(int, void *, void (*)(void *), void *);
56 
57 /*
58  * internal support functions
59  */
60 void
61 _spinlock(_spinlock_lock_t *lock)
62 {
63 
64 	while (_atomic_lock(lock))
65 		pthread_yield();
66 }
67 
68 void
69 _spinunlock(_spinlock_lock_t *lock)
70 {
71 
72 	*lock = _SPINLOCK_UNLOCKED;
73 }
74 
75 static pthread_t
76 _rthread_findself(void)
77 {
78 	pthread_t me;
79 	pid_t tid = getthrid();
80 
81 	LIST_FOREACH(me, &_thread_list, threads)
82 		if (me->tid == tid)
83 			break;
84 
85 	return (me);
86 }
87 
88 
89 static void
90 _rthread_start(void *v)
91 {
92 	pthread_t thread = v;
93 	void *retval;
94 
95 	/* ensure parent returns from rfork, sets up tid */
96 	_spinlock(&_thread_lock);
97 	_spinunlock(&_thread_lock);
98 	retval = thread->fn(thread->arg);
99 	pthread_exit(retval);
100 }
101 
102 static int
103 _rthread_init(void)
104 {
105 	pthread_t thread = &_initial_thread;
106 	extern int __isthreaded;
107 
108 	thread->tid = getthrid();
109 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
110 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
111 	thread->flags_lock = _SPINLOCK_UNLOCKED;
112 	strlcpy(thread->name, "Main process", sizeof(thread->name));
113 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
114 	_rthread_debug_init();
115 	_rthread_debug(1, "rthread init\n");
116 	_threads_ready = 1;
117 	__isthreaded = 1;
118 
119 #if defined(__ELF__) && defined(PIC)
120 	/*
121 	 * To avoid recursion problems in ld.so, we need to trigger the
122 	 * functions once to fully bind them before registering them
123 	 * for use.
124 	 */
125 	_rthread_dl_lock(0);
126 	_rthread_dl_lock(1);
127 	_rthread_bind_lock(0);
128 	_rthread_bind_lock(1);
129 	dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
130 	dlctl(NULL, DL_SETBINDLCK, _rthread_bind_lock);
131 #endif
132 
133 	return (0);
134 }
135 
136 static void
137 _rthread_free(pthread_t thread)
138 {
139 	/* catch wrongdoers for the moment */
140 	/* initial_thread.tid must remain valid */
141 	if (thread != &_initial_thread) {
142 		struct stack *stack = thread->stack;
143 		pid_t tid = thread->tid;
144 
145 		/* catch wrongdoers for the moment */
146 		memset(thread, 0xd0, sizeof(*thread));
147 		thread->stack = stack;
148 		thread->tid = tid;
149 		_spinlock(&_thread_gc_lock);
150 		TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
151 		_spinunlock(&_thread_gc_lock);
152 	}
153 }
154 
155 static void
156 _rthread_setflag(pthread_t thread, int flag)
157 {
158 	_spinlock(&thread->flags_lock);
159 	thread->flags |= flag;
160 	_spinunlock(&thread->flags_lock);
161 }
162 
163 static void
164 _rthread_clearflag(pthread_t thread, int flag)
165 {
166 	_spinlock(&thread->flags_lock);
167 	thread->flags &= ~flag;
168 	_spinunlock(&thread->flags_lock);
169 }
170 
171 /*
172  * real pthread functions
173  */
174 pthread_t
175 pthread_self(void)
176 {
177 	pthread_t thread;
178 
179 	if (!_threads_ready)
180 		if (_rthread_init())
181 			return (NULL);
182 
183 	_spinlock(&_thread_lock);
184 	thread = _rthread_findself();
185 	_spinunlock(&_thread_lock);
186 
187 	return (thread);
188 }
189 
190 static void
191 _rthread_reaper(void)
192 {
193 	pthread_t thread;
194 
195 restart:_spinlock(&_thread_gc_lock);
196 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
197 		if (thread->tid != 0)
198 			continue;
199 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
200 		_spinunlock(&_thread_gc_lock);
201 		_rthread_debug(3, "rthread reaping %p stack %p\n",
202 		    (void *)thread, (void *)thread->stack);
203 		_rthread_free_stack(thread->stack);
204 		free(thread);
205 		goto restart;
206 	}
207 	_spinunlock(&_thread_gc_lock);
208 }
209 
210 void
211 pthread_exit(void *retval)
212 {
213 	struct rthread_cleanup_fn *clfn;
214 	pid_t tid;
215 	struct stack *stack;
216 	pthread_t thread = pthread_self();
217 
218 	thread->retval = retval;
219 
220 	for (clfn = thread->cleanup_fns; clfn; ) {
221 		struct rthread_cleanup_fn *oclfn = clfn;
222 		clfn = clfn->next;
223 		oclfn->fn(oclfn->arg);
224 		free(oclfn);
225 	}
226 	_rthread_tls_destructors(thread);
227 	_spinlock(&_thread_lock);
228 	LIST_REMOVE(thread, threads);
229 	_spinunlock(&_thread_lock);
230 
231 	stack = thread->stack;
232 	tid = thread->tid;
233 	if (thread->flags & THREAD_DETACHED)
234 		_rthread_free(thread);
235 	else {
236 		_rthread_setflag(thread, THREAD_DONE);
237 		_sem_post(&thread->donesem);
238 	}
239 
240 	threxit(&thread->tid);
241 	for(;;);
242 }
243 
244 int
245 pthread_join(pthread_t thread, void **retval)
246 {
247 	int e;
248 
249 	if (thread == NULL)
250 		e = EINVAL;
251 	else if (thread->tid == getthrid())
252 		e = EDEADLK;
253 	else if (thread->flags & THREAD_DETACHED)
254 		e = EINVAL;
255 	else {
256 		_sem_wait(&thread->donesem, 0);
257 		if (retval)
258 			*retval = thread->retval;
259 		e = 0;
260 		/* We should be the last having a ref to this thread, but
261 		 * someone stupid or evil might haved detached it;
262 		 * in that case the thread will cleanup itself */
263 		if ((thread->flags & THREAD_DETACHED) == 0)
264 			_rthread_free(thread);
265 	}
266 
267 	_rthread_reaper();
268 	return (e);
269 }
270 
271 int
272 pthread_detach(pthread_t thread)
273 {
274 	int rc = 0;
275 
276 	_spinlock(&thread->flags_lock);
277 	if (thread->flags & THREAD_DETACHED) {
278 		rc = EINVAL;
279 		_spinunlock(&thread->flags_lock);
280 	} else if (thread->flags & THREAD_DONE) {
281 		_spinunlock(&thread->flags_lock);
282 		_rthread_free(thread);
283 	} else {
284 		thread->flags |= THREAD_DETACHED;
285 		_spinunlock(&thread->flags_lock);
286 	}
287 	_rthread_reaper();
288 	return (rc);
289 }
290 
291 int
292 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
293     void *(*start_routine)(void *), void *arg)
294 {
295 	pthread_t thread;
296 	pid_t tid;
297 	int rc = 0;
298 
299 	if (!_threads_ready)
300 		if ((rc = _rthread_init()))
301 		    return (rc);
302 
303 	_rthread_reaper();
304 
305 	thread = calloc(1, sizeof(*thread));
306 	if (!thread)
307 		return (errno);
308 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
309 	thread->flags_lock = _SPINLOCK_UNLOCKED;
310 	thread->fn = start_routine;
311 	thread->arg = arg;
312 	if (attr)
313 		thread->attr = *(*attr);
314 	else {
315 		thread->attr.stack_size = RTHREAD_STACK_SIZE_DEF;
316 		thread->attr.guard_size = sysconf(_SC_PAGESIZE);
317 		thread->attr.stack_size -= thread->attr.guard_size;
318 	}
319 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
320 		thread->flags |= THREAD_DETACHED;
321 
322 	_spinlock(&_thread_lock);
323 
324 	thread->stack = _rthread_alloc_stack(thread);
325 	if (!thread->stack) {
326 		rc = errno;
327 		goto fail1;
328 	}
329 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
330 
331 	tid = rfork_thread(RFPROC | RFTHREAD | RFMEM | RFNOWAIT,
332 	    thread->stack->sp, _rthread_start, thread);
333 	if (tid == -1) {
334 		rc = errno;
335 		goto fail2;
336 	}
337 	/* new thread will appear _rthread_start */
338 	thread->tid = tid;
339 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
340 	*threadp = thread;
341 
342 	/*
343 	 * Since _rthread_start() aquires the thread lock and due to the way
344 	 * signal delivery is implemented, this is not a race.
345 	 */
346 	if (thread->attr.create_suspended)
347 		kill(thread->tid, SIGSTOP);
348 
349 	_spinunlock(&_thread_lock);
350 
351 	return (0);
352 
353 fail2:
354 	_rthread_free_stack(thread->stack);
355 	LIST_REMOVE(thread, threads);
356 fail1:
357 	_spinunlock(&_thread_lock);
358 	_rthread_free(thread);
359 
360 	return (rc);
361 }
362 
363 int
364 pthread_kill(pthread_t thread, int sig)
365 {
366 	return (kill(thread->tid, sig));
367 }
368 
369 int
370 pthread_equal(pthread_t t1, pthread_t t2)
371 {
372 	return (t1 == t2);
373 }
374 
375 int
376 pthread_cancel(pthread_t thread)
377 {
378 
379 	_rthread_setflag(thread, THREAD_CANCELLED);
380 	return (0);
381 }
382 
383 void
384 pthread_testcancel(void)
385 {
386 	if ((pthread_self()->flags & (THREAD_CANCELLED|THREAD_CANCEL_ENABLE)) ==
387 	    (THREAD_CANCELLED|THREAD_CANCEL_ENABLE))
388 		pthread_exit(PTHREAD_CANCELED);
389 
390 }
391 
392 int
393 pthread_setcancelstate(int state, int *oldstatep)
394 {
395 	pthread_t self = pthread_self();
396 	int oldstate;
397 
398 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
399 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
400 	if (state == PTHREAD_CANCEL_ENABLE) {
401 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
402 		pthread_testcancel();
403 	} else if (state == PTHREAD_CANCEL_DISABLE) {
404 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
405 	} else {
406 		return (EINVAL);
407 	}
408 	if (oldstatep)
409 		*oldstatep = oldstate;
410 
411 	return (0);
412 }
413 
414 int
415 pthread_setcanceltype(int type, int *oldtypep)
416 {
417 	pthread_t self = pthread_self();
418 	int oldtype;
419 
420 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
421 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
422 	if (type == PTHREAD_CANCEL_DEFERRED) {
423 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
424 		pthread_testcancel();
425 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
426 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
427 	} else {
428 		return (EINVAL);
429 	}
430 	if (oldtypep)
431 		*oldtypep = oldtype;
432 
433 	return (0);
434 }
435 
436 void
437 pthread_cleanup_push(void (*fn)(void *), void *arg)
438 {
439 	struct rthread_cleanup_fn *clfn;
440 	pthread_t self = pthread_self();
441 
442 	clfn = calloc(1, sizeof(*clfn));
443 	if (!clfn)
444 		return;
445 	clfn->fn = fn;
446 	clfn->arg = arg;
447 	clfn->next = self->cleanup_fns;
448 	self->cleanup_fns = clfn;
449 }
450 
451 void
452 pthread_cleanup_pop(int execute)
453 {
454 	struct rthread_cleanup_fn *clfn;
455 	pthread_t self = pthread_self();
456 
457 	clfn = self->cleanup_fns;
458 	if (clfn) {
459 		self->cleanup_fns = clfn->next;
460 		if (execute)
461 			clfn->fn(clfn->arg);
462 		free(clfn);
463 	}
464 }
465 
466 int
467 pthread_getconcurrency(void)
468 {
469 	return (concurrency_level);
470 }
471 
472 int
473 pthread_setconcurrency(int new_level)
474 {
475 	if (new_level < 0)
476 		return (EINVAL);
477 	concurrency_level = new_level;
478 	return (0);
479 }
480 
481 /*
482  * compat debug stuff
483  */
484 void
485 _thread_dump_info(void)
486 {
487 	pthread_t thread;
488 
489 	_spinlock(&_thread_lock);
490 	LIST_FOREACH(thread, &_thread_list, threads)
491 		printf("thread %d flags %d name %s\n",
492 		    thread->tid, thread->flags, thread->name);
493 	_spinunlock(&_thread_lock);
494 }
495 
496 #if defined(__ELF__) && defined(PIC)
497 void
498 _rthread_dl_lock(int what)
499 {
500 	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
501 
502 	if (what == 0)
503 		_spinlock(&lock);
504 	else
505 		_spinunlock(&lock);
506 }
507 
508 void
509 _rthread_bind_lock(int what)
510 {
511 	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
512 
513 	if (what == 0)
514 		_spinlock(&lock);
515 	else
516 		_spinunlock(&lock);
517 }
518 #endif
519