xref: /openbsd/lib/librthread/rthread.c (revision 91f110e0)
1 /*	$OpenBSD: rthread.c,v 1.77 2014/03/16 18:38:30 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 #include <sys/uio.h>
25 #include <sys/wait.h>
26 #include <sys/socket.h>
27 #include <sys/mman.h>
28 #include <sys/msg.h>
29 #if defined(__ELF__)
30 #include <sys/exec_elf.h>
31 #pragma weak _DYNAMIC
32 #endif
33 
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <signal.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <dlfcn.h>
41 #include <fcntl.h>
42 #include <poll.h>
43 
44 #include <pthread.h>
45 
46 #include "thread_private.h"	/* in libc/include */
47 #include "rthread.h"
48 #include "tcb.h"
49 
50 static int concurrency_level;	/* not used */
51 
52 struct _spinlock _SPINLOCK_UNLOCKED_ASSIGN = _SPINLOCK_UNLOCKED;
53 
54 int _threads_ready;
55 size_t _thread_pagesize;
56 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
57 struct _spinlock _thread_lock = _SPINLOCK_UNLOCKED;
58 static struct pthread_queue _thread_gc_list
59     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
60 static struct _spinlock _thread_gc_lock = _SPINLOCK_UNLOCKED;
61 struct pthread _initial_thread;
62 struct thread_control_block _initial_thread_tcb;
63 
64 struct pthread_attr _rthread_attr_default = {
65 	.stack_addr			= NULL,
66 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
67 /*	.guard_size		set in _rthread_init */
68 	.detach_state			= PTHREAD_CREATE_JOINABLE,
69 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
70 	.sched_policy			= SCHED_OTHER,
71 	.sched_param = { .sched_priority = 0 },
72 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
73 };
74 
75 /*
76  * internal support functions
77  */
78 void
79 _spinlock(volatile struct _spinlock *lock)
80 {
81 	while (_atomic_lock(&lock->ticket))
82 		sched_yield();
83 }
84 
85 int
86 _spinlocktry(volatile struct _spinlock *lock)
87 {
88 	return 0 == _atomic_lock(&lock->ticket);
89 }
90 
91 void
92 _spinunlock(volatile struct _spinlock *lock)
93 {
94 	lock->ticket = _ATOMIC_LOCK_UNLOCKED;
95 }
96 
97 /*
98  * This sets up the thread base for the initial thread so that it
99  * references the errno location provided by libc.  For other threads
100  * this is handled by __tfork_thread()
101  */
102 void _rthread_initlib(void) __attribute__((constructor));
103 void
104 _rthread_initlib(void)
105 {
106 	static int tcb_set;
107 	struct thread_control_block *tcb;
108 
109 	if (__predict_false(tcb_set == 0) && __get_tcb() == NULL) {
110 		tcb_set = 1;
111 
112 		/* use libc's errno for the main thread */
113 		tcb = &_initial_thread_tcb;
114 		TCB_INIT(tcb, &_initial_thread, ___errno());
115 		TCB_SET(tcb);
116 	}
117 }
118 
119 /*
120  * This is invoked by ___start() in crt0.  Eventually, when ld.so handles
121  * TCB setup for dynamic executables, this will only be called to handle
122  * the TCB setup for static executables and may migrate to libc.  The
123  * envp argument is so that it can (someday) use that to find the Auxinfo
124  * array and thus the ELF phdr and the PT_TLS info.
125  */
126 void __init_tcb(char **_envp);
127 void
128 __init_tcb(__unused char **envp)
129 {
130 	_rthread_initlib();
131 }
132 
133 int *
134 __errno(void)
135 {
136 	return (TCB_ERRNOPTR());
137 }
138 
139 static void
140 _rthread_start(void *v)
141 {
142 	pthread_t thread = v;
143 	void *retval;
144 
145 	retval = thread->fn(thread->arg);
146 	pthread_exit(retval);
147 }
148 
149 /* ARGSUSED0 */
150 static void
151 sigthr_handler(__unused int sig)
152 {
153 	pthread_t self = pthread_self();
154 
155 	/*
156 	 * Do nothing unless
157 	 * 1) pthread_cancel() has been called on this thread,
158 	 * 2) cancelation is enabled for it, and
159 	 * 3) we're not already in cancelation processing
160 	 */
161 	if ((self->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE|THREAD_DYING))
162 	    != (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
163 		return;
164 
165 	/*
166 	 * If delaying cancels inside complex ops (pthread_cond_wait,
167 	 * pthread_join, etc), just mark that this has happened to
168 	 * prevent a race with going to sleep
169 	 */
170 	if (self->flags & THREAD_CANCEL_DELAY) {
171 		self->delayed_cancel = 1;
172 		return;
173 	}
174 
175 	/*
176 	 * otherwise, if in a cancel point or async cancels are
177 	 * enabled, then exit
178 	 */
179 	if (self->cancel_point || (self->flags & THREAD_CANCEL_DEFERRED) == 0)
180 		pthread_exit(PTHREAD_CANCELED);
181 }
182 
183 int
184 _rthread_init(void)
185 {
186 	pthread_t thread = &_initial_thread;
187 	struct sigaction sa;
188 
189 	thread->tid = getthrid();
190 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
191 	thread->flags |= THREAD_CANCEL_ENABLE | THREAD_CANCEL_DEFERRED |
192 	    THREAD_ORIGINAL;
193 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
194 	strlcpy(thread->name, "Main process", sizeof(thread->name));
195 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
196 	_rthread_debug_init();
197 
198 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
199 	_rthread_attr_default.guard_size = _thread_pagesize;
200 
201 	_rthread_initlib();
202 
203 	_threads_ready = 1;
204 
205 	_rthread_debug(1, "rthread init\n");
206 
207 #if defined(__ELF__) && !defined(__vax__)
208 	if (_DYNAMIC) {
209 		/*
210 		 * To avoid recursion problems in ld.so, we need to trigger the
211 		 * functions once to fully bind them before registering them
212 		 * for use.
213 		 */
214 		_rthread_dl_lock(0);
215 		_rthread_dl_lock(1);
216 		_rthread_bind_lock(0);
217 		_rthread_bind_lock(1);
218 		sched_yield();
219 		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
220 		dlctl(NULL, DL_SETBINDLCK, _rthread_bind_lock);
221 	}
222 #endif
223 
224 	/*
225 	 * Set the handler on the signal used for cancelation and
226 	 * suspension, and make sure it's unblocked
227 	 */
228 	memset(&sa, 0, sizeof(sa));
229 	sigemptyset(&sa.sa_mask);
230 	sa.sa_flags = SA_RESTART;
231 	sa.sa_handler = sigthr_handler;
232 	_thread_sys_sigaction(SIGTHR, &sa, NULL);
233 	sigaddset(&sa.sa_mask, SIGTHR);
234 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
235 
236 	return (0);
237 }
238 
239 static void
240 _rthread_free(pthread_t thread)
241 {
242 	/* _initial_thread is static, so don't free it */
243 	if (thread != &_initial_thread) {
244 		/*
245 		 * thread->tid is written to by __threxit in the thread
246 		 * itself, so it's not safe to touch it here
247 		 */
248 		_spinlock(&_thread_gc_lock);
249 		TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
250 		_spinunlock(&_thread_gc_lock);
251 	}
252 }
253 
254 void
255 _rthread_setflag(pthread_t thread, int flag)
256 {
257 	_spinlock(&thread->flags_lock);
258 	thread->flags |= flag;
259 	_spinunlock(&thread->flags_lock);
260 }
261 
262 void
263 _rthread_clearflag(pthread_t thread, int flag)
264 {
265 	_spinlock(&thread->flags_lock);
266 	thread->flags &= ~flag;
267 	_spinunlock(&thread->flags_lock);
268 }
269 
270 /*
271  * real pthread functions
272  */
273 pthread_t
274 pthread_self(void)
275 {
276 	if (!_threads_ready)
277 		if (_rthread_init())
278 			return (NULL);
279 
280 	return (TCB_THREAD());
281 }
282 
283 static void
284 _rthread_reaper(void)
285 {
286 	pthread_t thread;
287 
288 restart:
289 	_spinlock(&_thread_gc_lock);
290 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
291 		if (thread->tid != 0)
292 			continue;
293 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
294 		_spinunlock(&_thread_gc_lock);
295 		_rthread_debug(3, "rthread reaping %p stack %p\n",
296 		    (void *)thread, (void *)thread->stack);
297 		_rthread_free_stack(thread->stack);
298 		_rtld_free_tls(thread->arg,
299 		    sizeof(struct thread_control_block), sizeof(void *));
300 		free(thread);
301 		goto restart;
302 	}
303 	_spinunlock(&_thread_gc_lock);
304 }
305 
306 void
307 pthread_exit(void *retval)
308 {
309 	struct rthread_cleanup_fn *clfn;
310 	pthread_t thread = pthread_self();
311 
312 	if (thread->flags & THREAD_DYING) {
313 		/*
314 		 * Called pthread_exit() from destructor or cancelation
315 		 * handler: blow up.  XXX write something to stderr?
316 		 */
317 		_exit(42);
318 	}
319 
320 	_rthread_setflag(thread, THREAD_DYING);
321 
322 	thread->retval = retval;
323 
324 	for (clfn = thread->cleanup_fns; clfn; ) {
325 		struct rthread_cleanup_fn *oclfn = clfn;
326 		clfn = clfn->next;
327 		oclfn->fn(oclfn->arg);
328 		free(oclfn);
329 	}
330 	_rthread_tls_destructors(thread);
331 	_spinlock(&_thread_lock);
332 	LIST_REMOVE(thread, threads);
333 	_spinunlock(&_thread_lock);
334 
335 #ifdef TCB_GET
336 	thread->arg = TCB_GET();
337 #else
338 	thread->arg = __get_tcb();
339 #endif
340 	_spinlock(&thread->flags_lock);
341 	if (thread->flags & THREAD_DETACHED) {
342 		_spinunlock(&thread->flags_lock);
343 		_rthread_free(thread);
344 	} else {
345 		thread->flags |= THREAD_DONE;
346 		_spinunlock(&thread->flags_lock);
347 		_sem_post(&thread->donesem);
348 	}
349 
350 	__threxit(&thread->tid);
351 	for(;;);
352 }
353 
354 int
355 pthread_join(pthread_t thread, void **retval)
356 {
357 	int e;
358 	pthread_t self = pthread_self();
359 
360 	e = 0;
361 	_enter_delayed_cancel(self);
362 	if (thread == NULL)
363 		e = EINVAL;
364 	else if (thread == self)
365 		e = EDEADLK;
366 	else if (thread->flags & THREAD_DETACHED)
367 		e = EINVAL;
368 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
369 	    &self->delayed_cancel)) == 0) {
370 		if (retval)
371 			*retval = thread->retval;
372 
373 		/*
374 		 * We should be the last having a ref to this thread,
375 		 * but someone stupid or evil might haved detached it;
376 		 * in that case the thread will clean up itself
377 		 */
378 		if ((thread->flags & THREAD_DETACHED) == 0)
379 			_rthread_free(thread);
380 	}
381 
382 	_leave_delayed_cancel(self, e);
383 	_rthread_reaper();
384 	return (e);
385 }
386 
387 int
388 pthread_detach(pthread_t thread)
389 {
390 	int rc = 0;
391 
392 	_spinlock(&thread->flags_lock);
393 	if (thread->flags & THREAD_DETACHED) {
394 		rc = EINVAL;
395 		_spinunlock(&thread->flags_lock);
396 	} else if (thread->flags & THREAD_DONE) {
397 		_spinunlock(&thread->flags_lock);
398 		_rthread_free(thread);
399 	} else {
400 		thread->flags |= THREAD_DETACHED;
401 		_spinunlock(&thread->flags_lock);
402 	}
403 	_rthread_reaper();
404 	return (rc);
405 }
406 
407 int
408 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
409     void *(*start_routine)(void *), void *arg)
410 {
411 	extern int __isthreaded;
412 	struct thread_control_block *tcb;
413 	pthread_t thread;
414 	struct __tfork param;
415 	int rc = 0;
416 
417 	if (!_threads_ready)
418 		if ((rc = _rthread_init()))
419 		    return (rc);
420 
421 	_rthread_reaper();
422 
423 	thread = calloc(1, sizeof(*thread));
424 	if (!thread)
425 		return (errno);
426 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
427 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
428 	thread->fn = start_routine;
429 	thread->arg = arg;
430 	thread->tid = -1;
431 
432 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
433 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
434 		thread->flags |= THREAD_DETACHED;
435 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
436 
437 	thread->stack = _rthread_alloc_stack(thread);
438 	if (!thread->stack) {
439 		rc = errno;
440 		goto fail1;
441 	}
442 
443 	tcb = _rtld_allocate_tls(NULL, sizeof(*tcb), sizeof(void *));
444 	if (tcb == NULL) {
445 		rc = errno;
446 		goto fail2;
447 	}
448 	TCB_INIT(tcb, thread, &thread->myerrno);
449 
450 	param.tf_tcb = tcb;
451 	param.tf_tid = &thread->tid;
452 	param.tf_stack = thread->stack->sp;
453 
454 	_spinlock(&_thread_lock);
455 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
456 	_spinunlock(&_thread_lock);
457 
458 	/* we're going to be multi-threaded real soon now */
459 	__isthreaded = 1;
460 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
461 	if (rc != -1) {
462 		/* success */
463 		*threadp = thread;
464 		return (0);
465 	}
466 
467 	rc = errno;
468 
469 	_spinlock(&_thread_lock);
470 	LIST_REMOVE(thread, threads);
471 	_spinunlock(&_thread_lock);
472 	_rtld_free_tls(tcb, sizeof(*tcb), sizeof(void *));
473 fail2:
474 	_rthread_free_stack(thread->stack);
475 fail1:
476 	_rthread_free(thread);
477 
478 	return (rc);
479 }
480 
481 int
482 pthread_kill(pthread_t thread, int sig)
483 {
484 	pid_t tid;
485 	int ret;
486 
487 	/* killing myself?  do it without locking */
488 	if (thread == TCB_THREAD())
489 		return (kill(thread->tid, sig) == 0 ? 0 : errno);
490 
491 	/* block the other thread from exiting */
492 	_spinlock(&thread->flags_lock);
493 	if (thread->flags & THREAD_DYING)
494 		ret = (thread->flags & THREAD_DETACHED) ? ESRCH : 0;
495 	else {
496 		tid = thread->tid;
497 		if (tid == 0) {
498 			/* should be impossible without DYING being set */
499 			ret = ESRCH;
500 		} else
501 			ret = kill(tid, sig) == 0 ? 0 : errno;
502 	}
503 	_spinunlock(&thread->flags_lock);
504 	return (ret);
505 }
506 
507 int
508 pthread_equal(pthread_t t1, pthread_t t2)
509 {
510 	return (t1 == t2);
511 }
512 
513 int
514 pthread_cancel(pthread_t thread)
515 {
516 	pid_t tid;
517 
518 	_spinlock(&thread->flags_lock);
519 	tid = thread->tid;
520 	if ((thread->flags & (THREAD_DYING | THREAD_CANCELED)) == 0 &&
521 	    tid != 0) {
522 		thread->flags |= THREAD_CANCELED;
523 
524 		if (thread->flags & THREAD_CANCEL_ENABLE) {
525 
526 			/* canceling myself?  release the lock first */
527 			if (thread == TCB_THREAD()) {
528 				_spinunlock(&thread->flags_lock);
529 				kill(tid, SIGTHR);
530 				return (0);
531 			}
532 
533 			kill(tid, SIGTHR);
534 		}
535 	}
536 	_spinunlock(&thread->flags_lock);
537 	return (0);
538 }
539 
540 void
541 pthread_testcancel(void)
542 {
543 	if ((pthread_self()->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE)) ==
544 	    (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
545 		pthread_exit(PTHREAD_CANCELED);
546 
547 }
548 
549 int
550 pthread_setcancelstate(int state, int *oldstatep)
551 {
552 	pthread_t self = pthread_self();
553 	int oldstate;
554 
555 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
556 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
557 	if (state == PTHREAD_CANCEL_ENABLE) {
558 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
559 	} else if (state == PTHREAD_CANCEL_DISABLE) {
560 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
561 	} else {
562 		return (EINVAL);
563 	}
564 	if (oldstatep)
565 		*oldstatep = oldstate;
566 
567 	return (0);
568 }
569 
570 int
571 pthread_setcanceltype(int type, int *oldtypep)
572 {
573 	pthread_t self = pthread_self();
574 	int oldtype;
575 
576 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
577 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
578 	if (type == PTHREAD_CANCEL_DEFERRED) {
579 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
580 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
581 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
582 	} else {
583 		return (EINVAL);
584 	}
585 	if (oldtypep)
586 		*oldtypep = oldtype;
587 
588 	return (0);
589 }
590 
591 void
592 pthread_cleanup_push(void (*fn)(void *), void *arg)
593 {
594 	struct rthread_cleanup_fn *clfn;
595 	pthread_t self = pthread_self();
596 
597 	clfn = calloc(1, sizeof(*clfn));
598 	if (!clfn)
599 		return;
600 	clfn->fn = fn;
601 	clfn->arg = arg;
602 	clfn->next = self->cleanup_fns;
603 	self->cleanup_fns = clfn;
604 }
605 
606 void
607 pthread_cleanup_pop(int execute)
608 {
609 	struct rthread_cleanup_fn *clfn;
610 	pthread_t self = pthread_self();
611 
612 	clfn = self->cleanup_fns;
613 	if (clfn) {
614 		self->cleanup_fns = clfn->next;
615 		if (execute)
616 			clfn->fn(clfn->arg);
617 		free(clfn);
618 	}
619 }
620 
621 int
622 pthread_getconcurrency(void)
623 {
624 	return (concurrency_level);
625 }
626 
627 int
628 pthread_setconcurrency(int new_level)
629 {
630 	if (new_level < 0)
631 		return (EINVAL);
632 	concurrency_level = new_level;
633 	return (0);
634 }
635 
636 /*
637  * compat debug stuff
638  */
639 void
640 _thread_dump_info(void)
641 {
642 	pthread_t thread;
643 
644 	_spinlock(&_thread_lock);
645 	LIST_FOREACH(thread, &_thread_list, threads)
646 		printf("thread %d flags %d name %s\n",
647 		    thread->tid, thread->flags, thread->name);
648 	_spinunlock(&_thread_lock);
649 }
650 
651 #if defined(__ELF__)
652 /*
653  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
654  * the function called via atexit() to invoke all destructors.  The latter
655  * two call shared-object destructors, which may need to call dlclose(),
656  * so this lock needs to permit recursive locking.
657  * The specific code here was extracted from _rthread_mutex_lock() and
658  * pthread_mutex_unlock() and simplified to use the static variables.
659  */
660 void
661 _rthread_dl_lock(int what)
662 {
663 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
664 	static pthread_t owner = NULL;
665 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
666 	static int count = 0;
667 
668 	if (what == 0)
669 	{
670 		pthread_t self = pthread_self();
671 
672 		/* lock, possibly recursive */
673 		_spinlock(&lock);
674 		if (owner == NULL) {
675 			owner = self;
676 		} else if (owner != self) {
677 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
678 			while (owner != self) {
679 				__thrsleep(self, 0 | _USING_TICKETS, NULL,
680 				    &lock.ticket, NULL);
681 				_spinlock(&lock);
682 			}
683 		}
684 		count++;
685 		_spinunlock(&lock);
686 	}
687 	else
688 	{
689 		/* unlock, possibly recursive */
690 		if (--count == 0) {
691 			pthread_t next;
692 
693 			_spinlock(&lock);
694 			owner = next = TAILQ_FIRST(&lockers);
695 			if (next != NULL)
696 				TAILQ_REMOVE(&lockers, next, waiting);
697 			_spinunlock(&lock);
698 			if (next != NULL)
699 				__thrwakeup(next, 1);
700 		}
701 	}
702 }
703 
704 void
705 _rthread_bind_lock(int what)
706 {
707 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
708 
709 	if (what == 0)
710 		_spinlock(&lock);
711 	else
712 		_spinunlock(&lock);
713 }
714 #endif
715 
716 #ifdef __ELF__
717 #define CERROR_SYMBOL __cerror
718 #else
719 #define CERROR_SYMBOL _cerror
720 #endif
721 
722 /*
723  * XXX: Bogus type signature, but we only need to be able to emit a
724  * reference to it below.
725  */
726 extern void CERROR_SYMBOL(void);
727 
728 /*
729  * All weak references used within libc that are redefined in libpthread
730  * MUST be in this table.   This is necessary to force the proper version to
731  * be used when linking -static.
732  */
733 static void *__libc_overrides[] __used = {
734 	&CERROR_SYMBOL,
735 	&__errno,
736 	&_thread_arc4_lock,
737 	&_thread_arc4_unlock,
738 	&_thread_atexit_lock,
739 	&_thread_atexit_unlock,
740 	&_thread_malloc_lock,
741 	&_thread_malloc_unlock,
742 	&_thread_mutex_destroy,
743 	&_thread_mutex_lock,
744 	&_thread_mutex_unlock,
745 	&_thread_tag_lock,
746 	&_thread_tag_storage,
747 	&_thread_tag_unlock,
748 	&accept,
749 	&close,
750 	&closefrom,
751 	&connect,
752 	&fcntl,
753 	&flockfile,
754 	&fork,
755 	&fsync,
756 	&ftrylockfile,
757 	&funlockfile,
758 	&msgrcv,
759 	&msgsnd,
760 	&msync,
761 	&nanosleep,
762 	&open,
763 	&openat,
764 	&poll,
765 	&pread,
766 	&preadv,
767 	&pwrite,
768 	&pwritev,
769 	&read,
770 	&readv,
771 	&recvfrom,
772 	&recvmsg,
773 	&select,
774 	&sendmsg,
775 	&sendto,
776 	&sigaction,
777 	&sigprocmask,
778 	&sigsuspend,
779 	&vfork,
780 	&wait4,
781 	&write,
782 	&writev,
783 };
784