1 /*
2  * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
3  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
4  * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
5  *
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD: head/lib/libthr/thread/thr_private.h 217706 2010-08-23 $
29  */
30 
31 /*
32  * Private thread definitions for the uthread kernel.
33  */
34 
35 #ifndef _THR_PRIVATE_H
36 #define _THR_PRIVATE_H
37 
38 /*
39  * Include files.
40  */
41 #include <sys/types.h>
42 #include <sys/time.h>
43 #include <sys/cdefs.h>
44 #include <sys/queue.h>
45 #include <sys/rtprio.h>
46 #include <sys/mman.h>
47 #include <machine/atomic.h>
48 #include <machine/cpumask.h>
49 #include <errno.h>
50 #include <limits.h>
51 #include <signal.h>
52 #include <sys/sched.h>
53 #include <stdarg.h>
54 #include <unistd.h>
55 #include <pthread.h>
56 #include <pthread_np.h>
57 
58 #if defined(_PTHREADS_DEBUGGING) || defined(_PTHREADS_DEBUGGING2)
59 void	_thr_log(const char *buf, size_t bytes);
60 #endif
61 
62 #include "pthread_md.h"
63 #include "thr_umtx.h"
64 #include "thread_db.h"
65 
66 /* Signal to do cancellation */
67 #define	SIGCANCEL		32
68 
69 /*
70  * Kernel fatal error handler macro.
71  */
72 #define PANIC(args...)		_thread_exitf(__FILE__, __LINE__, ##args)
73 
74 /* Output debug messages like this: */
75 #define stdout_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
76 #define stderr_debug(args...)	_thread_printf(STDERR_FILENO, ##args)
77 
78 #ifdef _PTHREADS_INVARIANTS
79 #define THR_ASSERT(cond, msg) do {	\
80 	if (__predict_false(!(cond)))	\
81 		PANIC(msg);		\
82 } while (0)
83 #else
84 #define THR_ASSERT(cond, msg)
85 #endif
86 
87 #ifdef PIC
88 #define STATIC_LIB_REQUIRE(name)
89 #else
90 #define STATIC_LIB_REQUIRE(name)	__asm(".globl " #name)
91 #endif
92 
93 TAILQ_HEAD(thread_head, pthread)	thread_head;
94 TAILQ_HEAD(atfork_head, pthread_atfork)	atfork_head;
95 
96 #define	TIMESPEC_ADD(dst, src, val)				\
97 	do {							\
98 		(dst)->tv_sec = (src)->tv_sec + (val)->tv_sec;	\
99 		(dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
100 		if ((dst)->tv_nsec >= 1000000000) {		\
101 			(dst)->tv_sec++;			\
102 			(dst)->tv_nsec -= 1000000000;		\
103 		}						\
104 	} while (0)
105 
106 #define	TIMESPEC_SUB(dst, src, val)				\
107 	do {							\
108 		(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec;	\
109 		(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
110 		if ((dst)->tv_nsec < 0) {			\
111 			(dst)->tv_sec--;			\
112 			(dst)->tv_nsec += 1000000000;		\
113 		}						\
114 	} while (0)
115 
116 struct pthread_mutex {
117 	/*
118 	 * Lock for accesses to this structure.
119 	 */
120 	volatile umtx_t			m_lock;
121 #ifdef _PTHREADS_DEBUGGING2
122 	int				m_lastop[32];
123 #endif
124 	enum pthread_mutextype		m_type;
125 	int				m_protocol;
126 	TAILQ_HEAD(mutex_head, pthread)	m_queue;
127 	struct pthread			*m_owner;
128 	long				m_flags;
129 	int				m_count;
130 	int				m_refcount;
131 
132 	/*
133 	 * Used for priority inheritance and protection.
134 	 *
135 	 *   m_prio       - For priority inheritance, the highest active
136 	 *		    priority (threads locking the mutex inherit
137 	 *		    this priority).  For priority protection, the
138 	 *		    ceiling priority of this mutex.
139 	 *   m_saved_prio - mutex owners inherited priority before
140 	 *		    taking the mutex, restored when the owner
141 	 *		    unlocks the mutex.
142 	 */
143 	int				m_prio;
144 	int				m_saved_prio;
145 
146 	/*
147 	 * Link for list of all mutexes a thread currently owns.
148 	 */
149 	TAILQ_ENTRY(pthread_mutex)	m_qe;
150 };
151 
152 #define TAILQ_INITIALIZER	{ NULL, NULL }
153 
154 #define PTHREAD_MUTEX_STATIC_INITIALIZER   		\
155 	{	.m_lock = 0,				\
156 		.m_type = PTHREAD_MUTEX_DEFAULT,	\
157 		.m_protocol = PTHREAD_PRIO_NONE,	\
158 		.m_queue = TAILQ_INITIALIZER,		\
159 		.m_flags = MUTEX_FLAGS_PRIVATE		\
160 	}
161 /*
162  * Flags for mutexes.
163  */
164 #define MUTEX_FLAGS_PRIVATE	0x01
165 #define MUTEX_FLAGS_INITED	0x02
166 
167 struct pthread_mutex_attr {
168 	enum pthread_mutextype	m_type;
169 	int			m_protocol;
170 	int			m_ceiling;
171 	int			m_flags;
172 };
173 
174 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
175 	{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
176 
177 struct cond_cancel_info;
178 
179 struct pthread_cond {
180 	/*
181 	 * Lock for accesses to this structure.
182 	 */
183 	volatile umtx_t	c_lock;
184 	volatile int	c_unused01;
185 	int		c_pshared;
186 	int		c_clockid;
187 	TAILQ_HEAD(, cond_cancel_info)	c_waitlist;
188 };
189 
190 struct pthread_cond_attr {
191 	int		c_pshared;
192 	int		c_clockid;
193 };
194 
195 /*
196  * Flags for condition variables.
197  */
198 #define COND_FLAGS_PRIVATE	0x01
199 #define COND_FLAGS_INITED	0x02
200 
201 struct pthread_barrier {
202 	volatile umtx_t	b_lock;
203 	volatile umtx_t	b_cycle;
204 	volatile int	b_count;
205 	volatile int	b_waiters;
206 };
207 
208 struct pthread_barrierattr {
209 	int		pshared;
210 };
211 
212 struct pthread_spinlock {
213 	volatile umtx_t	s_lock;
214 };
215 
216 /*
217  * Cleanup definitions.
218  */
219 struct pthread_cleanup {
220 	struct pthread_cleanup	*next;
221 	void			(*routine)(void *);
222 	void			*routine_arg;
223 	int			onstack;
224 };
225 
226 #define	THR_CLEANUP_PUSH(td, func, arg) {		\
227 	struct pthread_cleanup __cup;			\
228 							\
229 	__cup.routine = func;				\
230 	__cup.routine_arg = arg;			\
231 	__cup.onstack = 1;				\
232 	__cup.next = (td)->cleanup;			\
233 	(td)->cleanup = &__cup;
234 
235 #define	THR_CLEANUP_POP(td, exec)			\
236 	(td)->cleanup = __cup.next;			\
237 	if ((exec) != 0)				\
238 		__cup.routine(__cup.routine_arg);	\
239 }
240 
241 struct pthread_atfork {
242 	TAILQ_ENTRY(pthread_atfork) qe;
243 	void (*prepare)(void);
244 	void (*parent)(void);
245 	void (*child)(void);
246 };
247 
248 struct pthread_attr {
249 	int	sched_policy;
250 	int	sched_inherit;
251 	int	prio;
252 	int	suspend;
253 #define	THR_STACK_USER		0x100	/* 0xFF reserved for <pthread.h> */
254 #define THR_CPUMASK		0x200	/* cpumask is valid */
255 	int	flags;
256 	void	*stackaddr_attr;
257 	size_t	stacksize_attr;
258 	size_t	guardsize_attr;
259 	cpumask_t cpumask;
260 };
261 
262 /*
263  * Thread creation state attributes.
264  */
265 #define THR_CREATE_RUNNING		0
266 #define THR_CREATE_SUSPENDED		1
267 
268 /*
269  * Miscellaneous definitions.
270  */
271 #define THR_STACK_DEFAULT		(sizeof(void *) / 4 * 1024 * 1024)
272 
273 /*
274  * Maximum size of initial thread's stack.  This perhaps deserves to be larger
275  * than the stacks of other threads, since many applications are likely to run
276  * almost entirely on this stack.
277  */
278 #define THR_STACK_INITIAL		(THR_STACK_DEFAULT * 2)
279 
280 /*
281  * Define the different priority ranges.  All applications have thread
282  * priorities constrained within 0-31.  The threads library raises the
283  * priority when delivering signals in order to ensure that signal
284  * delivery happens (from the POSIX spec) "as soon as possible".
285  * In the future, the threads library will also be able to map specific
286  * threads into real-time (cooperating) processes or kernel threads.
287  * The RT and SIGNAL priorities will be used internally and added to
288  * thread base priorities so that the scheduling queue can handle both
289  * normal and RT priority threads with and without signal handling.
290  *
291  * The approach taken is that, within each class, signal delivery
292  * always has priority over thread execution.
293  */
294 #define THR_DEFAULT_PRIORITY		0
295 #define THR_MUTEX_CEIL_PRIORITY		31	/* dummy */
296 
297 /*
298  * Time slice period in microseconds.
299  */
300 #define TIMESLICE_USEC				20000
301 
302 struct pthread_rwlockattr {
303 	int		pshared;
304 };
305 
306 struct pthread_rwlock {
307 	pthread_mutex_t	lock;	/* monitor lock */
308 	pthread_cond_t	read_signal;
309 	pthread_cond_t	write_signal;
310 	int		state;	/* 0 = idle  >0 = # of readers  -1 = writer */
311 	int		blocked_writers;
312 };
313 
314 /*
315  * Thread states.
316  */
317 enum pthread_state {
318 	PS_RUNNING,
319 	PS_DEAD
320 };
321 
322 struct pthread_specific_elem {
323 	const void	*data;
324 	int		seqno;
325 };
326 
327 struct pthread_key {
328 	volatile int	allocated;
329 	volatile int	count;
330 	int		seqno;
331 	void		(*destructor)(void *);
332 };
333 
334 /*
335  * Thread structure.
336  */
337 struct pthread {
338 	/*
339 	 * Magic value to help recognize a valid thread structure
340 	 * from an invalid one:
341 	 */
342 #define	THR_MAGIC		((u_int32_t) 0xd09ba115)
343 	u_int32_t		magic;
344 	char			*name;
345 	u_int64_t		uniqueid; /* for gdb */
346 
347 	/*
348 	 * Lock for accesses to this thread structure.
349 	 */
350 	umtx_t			lock;
351 
352 	/* Thread is terminated in kernel, written by kernel. */
353 	long			terminated;
354 
355 	/* Kernel thread id. */
356 	lwpid_t			tid;
357 
358 	/* Internal condition variable cycle number. */
359 	umtx_t			cycle;
360 
361 	/* How many low level locks the thread held. */
362 	int			locklevel;
363 
364 	/*
365 	 * Set to non-zero when this thread has entered a critical
366 	 * region.  We allow for recursive entries into critical regions.
367 	 */
368 	int			critical_count;
369 
370 	/* Signal blocked counter. */
371 	int			sigblock;
372 
373 	/* Queue entry for list of all threads. */
374 	TAILQ_ENTRY(pthread)	tle;	/* link for all threads in process */
375 
376 	/* Queue entry for GC lists. */
377 	TAILQ_ENTRY(pthread)	gcle;
378 
379 	/* Hash queue entry. */
380 	LIST_ENTRY(pthread)	hle;
381 
382 	/* Threads reference count. */
383 	int			refcount;
384 
385 	/*
386 	 * Thread start routine, argument, stack pointer and thread
387 	 * attributes.
388 	 */
389 	void			*(*start_routine)(void *);
390 	void			*arg;
391 	struct pthread_attr	attr;
392 
393 	/*
394 	 * Cancelability flags
395 	 */
396 #define	THR_CANCEL_DISABLE		0x0001
397 #define	THR_CANCEL_EXITING		0x0002
398 #define THR_CANCEL_AT_POINT		0x0004
399 #define THR_CANCEL_NEEDED		0x0008
400 #define	SHOULD_CANCEL(val)					\
401 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
402 		 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED)
403 
404 #define	SHOULD_ASYNC_CANCEL(val)				\
405 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
406 		 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) ==	\
407 		 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT))
408 	int			cancelflags;
409 
410 	/* Thread temporary signal mask. */
411 	sigset_t		sigmask;
412 
413 	/* Thread state: */
414 	umtx_t			state;
415 
416 	/*
417 	 * Error variable used instead of errno, used for internal.
418 	 */
419 	int			error;
420 
421 	/*
422 	 * The joiner is the thread that is joining to this thread.  The
423 	 * join status keeps track of a join operation to another thread.
424 	 */
425 	struct pthread		*joiner;
426 
427 	/*
428 	 * The current thread can belong to a priority mutex queue.
429 	 * This is the synchronization queue link.
430 	 */
431 	TAILQ_ENTRY(pthread)	sqe;
432 
433 	/* Miscellaneous flags; only set with scheduling lock held. */
434 	int			flags;
435 #define THR_FLAGS_PRIVATE	0x0001
436 #define	THR_FLAGS_NEED_SUSPEND	0x0002	/* thread should be suspended */
437 #define	THR_FLAGS_SUSPENDED	0x0004	/* thread is suspended */
438 
439 	/* Thread list flags; only set with thread list lock held. */
440 	int			tlflags;
441 #define	TLFLAGS_GC_SAFE		0x0001	/* thread safe for cleaning */
442 #define	TLFLAGS_IN_TDLIST	0x0002	/* thread in all thread list */
443 #define	TLFLAGS_IN_GCLIST	0x0004	/* thread in gc list */
444 #define	TLFLAGS_DETACHED	0x0008	/* thread is detached */
445 
446 	/*
447 	 * Base priority is the user setable and retrievable priority
448 	 * of the thread.  It is only affected by explicit calls to
449 	 * set thread priority and upon thread creation via a thread
450 	 * attribute or default priority.
451 	 */
452 	char			base_priority;
453 
454 	/*
455 	 * Inherited priority is the priority a thread inherits by
456 	 * taking a priority inheritance or protection mutex.  It
457 	 * is not affected by base priority changes.  Inherited
458 	 * priority defaults to and remains 0 until a mutex is taken
459 	 * that is being waited on by any other thread whose priority
460 	 * is non-zero.
461 	 */
462 	char			inherited_priority;
463 
464 	/*
465 	 * Active priority is always the maximum of the threads base
466 	 * priority and inherited priority.  When there is a change
467 	 * in either the base or inherited priority, the active
468 	 * priority must be recalculated.
469 	 */
470 	char			active_priority;
471 
472 	/* Number of priority ceiling or protection mutexes owned. */
473 	int			priority_mutex_count;
474 
475 	/* Queue of currently owned simple type mutexes. */
476 	TAILQ_HEAD(, pthread_mutex)	mutexq;
477 
478 	void				*ret;
479 	struct pthread_specific_elem	*specific;
480 	int				specific_data_count;
481 
482 	/* Number rwlocks rdlocks held. */
483 	int			rdlock_count;
484 
485 	/*
486 	 * Current locks bitmap for rtld. */
487 	int			rtld_bits;
488 
489 	/* Thread control block */
490 	struct tls_tcb		*tcb;
491 
492 	/* Cleanup handlers Link List */
493 	struct pthread_cleanup	*cleanup;
494 
495 	/* Enable event reporting */
496 	int			report_events;
497 
498 	/* Event mask */
499 	td_thr_events_t		event_mask;
500 
501 	/* Event */
502 	td_event_msg_t		event_buf;
503 };
504 
505 #define	THR_IN_CRITICAL(thrd)				\
506 	(((thrd)->locklevel > 0) ||			\
507 	((thrd)->critical_count > 0))
508 
509 #define THR_UMTX_TRYLOCK(thrd, lck)			\
510 	_thr_umtx_trylock((lck), (thrd)->tid)
511 
512 #define	THR_UMTX_LOCK(thrd, lck)			\
513 	_thr_umtx_lock((lck), (thrd)->tid)
514 
515 #define	THR_UMTX_TIMEDLOCK(thrd, lck, timo)		\
516 	_thr_umtx_timedlock((lck), (thrd)->tid, (timo))
517 
518 #define	THR_UMTX_UNLOCK(thrd, lck)			\
519 	_thr_umtx_unlock((lck), (thrd)->tid)
520 
521 #define	THR_LOCK_ACQUIRE(thrd, lck)			\
522 do {							\
523 	(thrd)->locklevel++;				\
524 	_thr_umtx_lock((lck), (thrd)->tid);		\
525 } while (0)
526 
527 #ifdef	_PTHREADS_INVARIANTS
528 #define	THR_ASSERT_LOCKLEVEL(thrd)			\
529 do {							\
530 	if (__predict_false((thrd)->locklevel <= 0))	\
531 		_thr_assert_lock_level();		\
532 } while (0)
533 #else
534 #define THR_ASSERT_LOCKLEVEL(thrd)
535 #endif
536 
537 #define	THR_LOCK_RELEASE(thrd, lck)			\
538 do {							\
539 	THR_ASSERT_LOCKLEVEL(thrd);			\
540 	_thr_umtx_unlock((lck), (thrd)->tid);		\
541 	(thrd)->locklevel--;				\
542 	_thr_ast(thrd);					\
543 } while (0)
544 
545 #define	THR_LOCK(curthrd)		THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
546 #define	THR_UNLOCK(curthrd)		THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
547 #define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
548 #define	THR_THREAD_UNLOCK(curthrd, thr)	THR_LOCK_RELEASE(curthrd, &(thr)->lock)
549 
550 #define	THREAD_LIST_LOCK(curthrd)				\
551 do {								\
552 	THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock);		\
553 } while (0)
554 
555 #define	THREAD_LIST_UNLOCK(curthrd)				\
556 do {								\
557 	THR_LOCK_RELEASE((curthrd), &_thr_list_lock);		\
558 } while (0)
559 
560 /*
561  * Macros to insert/remove threads to the all thread list and
562  * the gc list.
563  */
564 #define	THR_LIST_ADD(thrd) do {					\
565 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) {	\
566 		TAILQ_INSERT_HEAD(&_thread_list, thrd, tle);	\
567 		_thr_hash_add(thrd);				\
568 		(thrd)->tlflags |= TLFLAGS_IN_TDLIST;		\
569 	}							\
570 } while (0)
571 #define	THR_LIST_REMOVE(thrd) do {				\
572 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) {	\
573 		TAILQ_REMOVE(&_thread_list, thrd, tle);		\
574 		_thr_hash_remove(thrd);				\
575 		(thrd)->tlflags &= ~TLFLAGS_IN_TDLIST;		\
576 	}							\
577 } while (0)
578 #define	THR_GCLIST_ADD(thrd) do {				\
579 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) {	\
580 		TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
581 		(thrd)->tlflags |= TLFLAGS_IN_GCLIST;		\
582 		_thr_gc_count++;					\
583 	}							\
584 } while (0)
585 #define	THR_GCLIST_REMOVE(thrd) do {				\
586 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) {	\
587 		TAILQ_REMOVE(&_thread_gc_list, thrd, gcle);	\
588 		(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST;		\
589 		_thr_gc_count--;					\
590 	}							\
591 } while (0)
592 
593 #define GC_NEEDED()	(_thr_gc_count >= 5)
594 
595 #define	THR_IN_SYNCQ(thrd)	(((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
596 
597 #define SHOULD_REPORT_EVENT(curthr, e)			\
598 	(curthr->report_events &&			\
599 	 (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
600 
601 #ifndef __LIBC_ISTHREADED_DECLARED
602 #define __LIBC_ISTHREADED_DECLARED
603 extern int __isthreaded;
604 #endif
605 
606 /*
607  * Global variables for the pthread library.
608  */
609 extern char		*_usrstack;
610 extern struct pthread	*_thr_initial;
611 
612 /* For debugger */
613 extern int		_libthread_xu_debug;
614 extern int		_thread_event_mask;
615 extern struct pthread	*_thread_last_event;
616 
617 /* List of all threads */
618 extern struct thread_head	_thread_list;
619 
620 /* List of threads needing GC */
621 extern struct thread_head	_thread_gc_list;
622 
623 extern int	_thread_active_threads;
624 
625 extern struct	atfork_head	_thr_atfork_list;
626 extern struct	atfork_head	_thr_atfork_kern_list;
627 extern umtx_t	_thr_atfork_lock;
628 
629 /* Default thread attributes */
630 extern struct pthread_attr _pthread_attr_default;
631 
632 /* Default mutex attributes */
633 extern struct pthread_mutex_attr _pthread_mutexattr_default;
634 
635 /* Default condition variable attributes */
636 extern struct pthread_cond_attr _pthread_condattr_default;
637 
638 extern pid_t	_thr_pid;
639 extern size_t	_thr_guard_default;
640 extern size_t	_thr_stack_default;
641 extern size_t	_thr_stack_initial;
642 extern int	_thr_page_size;
643 extern int	_thr_gc_count;
644 
645 extern umtx_t	_mutex_static_lock;
646 extern umtx_t	_cond_static_lock;
647 extern umtx_t	_rwlock_static_lock;
648 extern umtx_t	_keytable_lock;
649 extern umtx_t	_thr_list_lock;
650 extern umtx_t	_thr_event_lock;
651 
652 /*
653  * Function prototype definitions.
654  */
655 __BEGIN_DECLS
656 int	_thr_setthreaded(int);
657 int	_mutex_cv_lock(pthread_mutex_t *, int count);
658 int	_mutex_cv_unlock(pthread_mutex_t *, int *count);
659 void	_mutex_notify_priochange(struct pthread *, struct pthread *, int);
660 void	_mutex_fork(struct pthread *curthread);
661 void	_mutex_unlock_private(struct pthread *);
662 
663 #if 0
664 int	_mutex_reinit(pthread_mutex_t *);
665 void	_cond_reinit(pthread_cond_t pcond);
666 void	_rwlock_reinit(pthread_rwlock_t prwlock);
667 #endif
668 
669 void	_libpthread_init(struct pthread *);
670 struct pthread *_thr_alloc(struct pthread *);
671 void	_thread_exit(const char *, int, const char *) __dead2;
672 void	_thread_exitf(const char *, int, const char *, ...) __dead2
673 	    __printflike(3, 4);
674 void	_thr_exit_cleanup(void);
675 void	_thr_atfork_kern(void (*prepare)(void), void (*parent)(void),
676 			void (*child)(void));
677 int	_thr_ref_add(struct pthread *, struct pthread *, int);
678 void	_thr_ref_delete(struct pthread *, struct pthread *);
679 void	_thr_ref_delete_unlocked(struct pthread *, struct pthread *);
680 int	_thr_find_thread(struct pthread *, struct pthread *, int);
681 void	_thr_malloc_init(void);
682 void	_rtld_setthreaded(int);
683 void	_thr_rtld_init(void);
684 void	_thr_rtld_fini(void);
685 int	_thr_stack_alloc(struct pthread_attr *);
686 void	_thr_stack_free(struct pthread_attr *);
687 void	_thr_stack_cleanup(void);
688 void	_thr_sem_init(void);
689 void	_thr_free(struct pthread *, struct pthread *);
690 void	_thr_gc(struct pthread *);
691 void	_thread_cleanupspecific(void);
692 void	_thread_dump_info(void);
693 void	_thread_printf(int, const char *, ...) __printflike(2, 3);
694 void	_thread_vprintf(int, const char *, va_list);
695 void	_thr_spinlock_init(void);
696 int	_thr_cancel_enter(struct pthread *);
697 void	_thr_cancel_leave(struct pthread *, int);
698 void	_thr_signal_block(struct pthread *);
699 void	_thr_signal_unblock(struct pthread *);
700 void	_thr_signal_init(void);
701 void	_thr_signal_deinit(void);
702 int	_thr_send_sig(struct pthread *, int sig);
703 void	_thr_list_init(void);
704 void	_thr_hash_add(struct pthread *);
705 void	_thr_hash_remove(struct pthread *);
706 struct pthread *_thr_hash_find(struct pthread *);
707 void	_thr_link(struct pthread *curthread, struct pthread *thread);
708 void	_thr_unlink(struct pthread *curthread, struct pthread *thread);
709 void	_thr_suspend_check(struct pthread *curthread);
710 void	_thr_assert_lock_level(void) __dead2;
711 void	_thr_ast(struct pthread *);
712 int	_thr_get_tid(void);
713 void	_thr_report_creation(struct pthread *curthread,
714 			   struct pthread *newthread);
715 void	_thr_report_death(struct pthread *curthread);
716 void	_thread_bp_create(void);
717 void	_thread_bp_death(void);
718 int	_thr_getscheduler(lwpid_t, int *, struct sched_param *);
719 int	_thr_setscheduler(lwpid_t, int, const struct sched_param *);
720 int	_thr_set_sched_other_prio(struct pthread *, int);
721 int	_rtp_to_schedparam(const struct rtprio *rtp, int *policy,
722 	    struct sched_param *param);
723 int	_schedparam_to_rtp(int policy, const struct sched_param *param,
724 	    struct rtprio *rtp);
725 int	_umtx_sleep_err(volatile const int *, int, int);
726 int	_umtx_wakeup_err(volatile const int *, int);
727 
728 /* #include <fcntl.h> */
729 #ifdef  _SYS_FCNTL_H_
730 int     __sys_fcntl(int, int, ...);
731 int     __sys_open(const char *, int, ...);
732 int     __sys_openat(int, const char *, int, ...);
733 #endif
734 
735 /* #include <sys/ioctl.h> */
736 #ifdef _SYS_IOCTL_H_
737 int	__sys_ioctl(int, unsigned long, ...);
738 #endif
739 
740 /* #include <sched.h> */
741 #ifdef	_SCHED_H_
742 int	__sys_sched_yield(void);
743 #endif
744 
745 /* #include <signal.h> */
746 #ifdef _SIGNAL_H_
747 int	__sys_kill(pid_t, int);
748 int     __sys_sigaction(int, const struct sigaction *, struct sigaction *);
749 int     __sys_sigpending(sigset_t *);
750 int     __sys_sigprocmask(int, const sigset_t *, sigset_t *);
751 int     __sys_sigsuspend(const sigset_t *);
752 int     __sys_sigreturn(ucontext_t *);
753 int     __sys_sigaltstack(const stack_t *, stack_t *);
754 #endif
755 
756 /* #include <time.h> */
757 #ifdef	_TIME_H_
758 int	__sys_nanosleep(const struct timespec *, struct timespec *);
759 #endif
760 
761 /* #include <unistd.h> */
762 #ifdef  _UNISTD_H_
763 int	__sys_close(int);
764 int	__sys_execve(const char *, char * const *, char * const *);
765 pid_t	__sys_getpid(void);
766 ssize_t __sys_read(int, void *, size_t);
767 ssize_t __sys_write(int, const void *, size_t);
768 void	__sys_exit(int);
769 int	__sys_sigwait(const sigset_t *, int *);
770 int	__sys_sigtimedwait(const sigset_t *, siginfo_t *,
771 		const struct timespec *);
772 int	__sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
773 #endif
774 
775 static inline int
776 _thr_isthreaded(void)
777 {
778 	return (__isthreaded != 0);
779 }
780 
781 static inline int
782 _thr_is_inited(void)
783 {
784 	return (_thr_initial != NULL);
785 }
786 
787 static inline void
788 _thr_check_init(void)
789 {
790 	if (_thr_initial == NULL)
791 		_libpthread_init(NULL);
792 }
793 
794 struct dl_phdr_info;
795 void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info);
796 
797 /*
798  * Used in low-level init to directly call libc's malloc implementation
799  * instead of a potentially third-party malloc implementation.  Required
800  * for bootstrapping pthreads.
801  */
802 void *__malloc(size_t bytes);
803 void __free(void *ptr);
804 
805 __END_DECLS
806 
807 #endif  /* !_THR_PRIVATE_H */
808