1 /*
2  * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
3  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
4  * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
5  *
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD: head/lib/libthr/thread/thr_private.h 217706 2010-08-23 $
29  */
30 
31 /*
32  * Private thread definitions for the uthread kernel.
33  */
34 
35 #ifndef _THR_PRIVATE_H
36 #define _THR_PRIVATE_H
37 
38 /*
39  * Include files.
40  */
41 #include <sys/types.h>
42 #include <sys/time.h>
43 #include <sys/cdefs.h>
44 #include <sys/queue.h>
45 #include <sys/rtprio.h>
46 #include <machine/atomic.h>
47 #include <machine/cpumask.h>
48 #include <errno.h>
49 #include <limits.h>
50 #include <signal.h>
51 #include <sys/sched.h>
52 #include <stdarg.h>
53 #include <unistd.h>
54 #include <pthread.h>
55 #include <pthread_np.h>
56 
57 #include "pthread_md.h"
58 #include "thr_umtx.h"
59 #include "thread_db.h"
60 
61 /* Signal to do cancellation */
62 #define	SIGCANCEL		32
63 
64 /*
65  * Kernel fatal error handler macro.
66  */
67 #define PANIC(args...)		_thread_exitf(__FILE__, __LINE__, ##args)
68 
69 /* Output debug messages like this: */
70 #define stdout_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
71 #define stderr_debug(args...)	_thread_printf(STDERR_FILENO, ##args)
72 
73 #ifdef _PTHREADS_INVARIANTS
74 #define THR_ASSERT(cond, msg) do {	\
75 	if (__predict_false(!(cond)))	\
76 		PANIC(msg);		\
77 } while (0)
78 #else
79 #define THR_ASSERT(cond, msg)
80 #endif
81 
82 #ifdef PIC
83 #define STATIC_LIB_REQUIRE(name)
84 #else
85 #define STATIC_LIB_REQUIRE(name)	__asm(".globl " #name)
86 #endif
87 
88 TAILQ_HEAD(thread_head, pthread)	thread_head;
89 TAILQ_HEAD(atfork_head, pthread_atfork)	atfork_head;
90 
91 #define	TIMESPEC_ADD(dst, src, val)				\
92 	do {							\
93 		(dst)->tv_sec = (src)->tv_sec + (val)->tv_sec;	\
94 		(dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
95 		if ((dst)->tv_nsec >= 1000000000) {		\
96 			(dst)->tv_sec++;			\
97 			(dst)->tv_nsec -= 1000000000;		\
98 		}						\
99 	} while (0)
100 
101 #define	TIMESPEC_SUB(dst, src, val)				\
102 	do {							\
103 		(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec;	\
104 		(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
105 		if ((dst)->tv_nsec < 0) {			\
106 			(dst)->tv_sec--;			\
107 			(dst)->tv_nsec += 1000000000;		\
108 		}						\
109 	} while (0)
110 
111 struct pthread_mutex {
112 	/*
113 	 * Lock for accesses to this structure.
114 	 */
115 	volatile umtx_t			m_lock;
116 	enum pthread_mutextype		m_type;
117 	int				m_protocol;
118 	TAILQ_HEAD(mutex_head, pthread)	m_queue;
119 	struct pthread			*m_owner;
120 	long				m_flags;
121 	int				m_count;
122 	int				m_refcount;
123 
124 	/*
125 	 * Used for priority inheritence and protection.
126 	 *
127 	 *   m_prio       - For priority inheritence, the highest active
128 	 *		    priority (threads locking the mutex inherit
129 	 *		    this priority).  For priority protection, the
130 	 *		    ceiling priority of this mutex.
131 	 *   m_saved_prio - mutex owners inherited priority before
132 	 *		    taking the mutex, restored when the owner
133 	 *		    unlocks the mutex.
134 	 */
135 	int				m_prio;
136 	int				m_saved_prio;
137 
138 	/*
139 	 * Link for list of all mutexes a thread currently owns.
140 	 */
141 	TAILQ_ENTRY(pthread_mutex)	m_qe;
142 };
143 
144 #define TAILQ_INITIALIZER	{ NULL, NULL }
145 
146 #define PTHREAD_MUTEX_STATIC_INITIALIZER   \
147 	{0, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
148 	NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER }
149 /*
150  * Flags for mutexes.
151  */
152 #define MUTEX_FLAGS_PRIVATE	0x01
153 #define MUTEX_FLAGS_INITED	0x02
154 
155 struct pthread_mutex_attr {
156 	enum pthread_mutextype	m_type;
157 	int			m_protocol;
158 	int			m_ceiling;
159 	int			m_flags;
160 };
161 
162 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
163 	{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
164 
165 struct pthread_cond {
166 	/*
167 	 * Lock for accesses to this structure.
168 	 */
169 	volatile umtx_t	c_lock;
170 	volatile umtx_t	c_seqno;
171 	volatile int	c_waiters;
172 	volatile int	c_broadcast;
173 	int		c_pshared;
174 	int		c_clockid;
175 };
176 
177 struct pthread_cond_attr {
178 	int		c_pshared;
179 	int		c_clockid;
180 };
181 
182 /*
183  * Flags for condition variables.
184  */
185 #define COND_FLAGS_PRIVATE	0x01
186 #define COND_FLAGS_INITED	0x02
187 
188 struct pthread_barrier {
189 	volatile umtx_t	b_lock;
190 	volatile umtx_t	b_cycle;
191 	volatile int	b_count;
192 	volatile int	b_waiters;
193 };
194 
195 struct pthread_barrierattr {
196 	int		pshared;
197 };
198 
199 struct pthread_spinlock {
200 	volatile umtx_t	s_lock;
201 };
202 
203 /*
204  * Cleanup definitions.
205  */
206 struct pthread_cleanup {
207 	struct pthread_cleanup	*next;
208 	void			(*routine)(void *);
209 	void			*routine_arg;
210 	int			onstack;
211 };
212 
213 #define	THR_CLEANUP_PUSH(td, func, arg) {		\
214 	struct pthread_cleanup __cup;			\
215 							\
216 	__cup.routine = func;				\
217 	__cup.routine_arg = arg;			\
218 	__cup.onstack = 1;				\
219 	__cup.next = (td)->cleanup;			\
220 	(td)->cleanup = &__cup;
221 
222 #define	THR_CLEANUP_POP(td, exec)			\
223 	(td)->cleanup = __cup.next;			\
224 	if ((exec) != 0)				\
225 		__cup.routine(__cup.routine_arg);	\
226 }
227 
228 struct pthread_atfork {
229 	TAILQ_ENTRY(pthread_atfork) qe;
230 	void (*prepare)(void);
231 	void (*parent)(void);
232 	void (*child)(void);
233 };
234 
235 struct pthread_attr {
236 	int	sched_policy;
237 	int	sched_inherit;
238 	int	prio;
239 	int	suspend;
240 #define	THR_STACK_USER		0x100	/* 0xFF reserved for <pthread.h> */
241 #define THR_CPUMASK		0x200	/* cpumask is valid */
242 	int	flags;
243 	void	*stackaddr_attr;
244 	size_t	stacksize_attr;
245 	size_t	guardsize_attr;
246 	cpumask_t cpumask;
247 };
248 
249 /*
250  * Thread creation state attributes.
251  */
252 #define THR_CREATE_RUNNING		0
253 #define THR_CREATE_SUSPENDED		1
254 
255 /*
256  * Miscellaneous definitions.
257  */
258 #define THR_STACK_DEFAULT		(sizeof(void *) / 4 * 1024 * 1024)
259 
260 /*
261  * Maximum size of initial thread's stack.  This perhaps deserves to be larger
262  * than the stacks of other threads, since many applications are likely to run
263  * almost entirely on this stack.
264  */
265 #define THR_STACK_INITIAL		(THR_STACK_DEFAULT * 2)
266 
267 /*
268  * Define the different priority ranges.  All applications have thread
269  * priorities constrained within 0-31.  The threads library raises the
270  * priority when delivering signals in order to ensure that signal
271  * delivery happens (from the POSIX spec) "as soon as possible".
272  * In the future, the threads library will also be able to map specific
273  * threads into real-time (cooperating) processes or kernel threads.
274  * The RT and SIGNAL priorities will be used internally and added to
275  * thread base priorities so that the scheduling queue can handle both
276  * normal and RT priority threads with and without signal handling.
277  *
278  * The approach taken is that, within each class, signal delivery
279  * always has priority over thread execution.
280  */
281 #define THR_DEFAULT_PRIORITY		0
282 #define THR_MUTEX_CEIL_PRIORITY		31	/* dummy */
283 
284 /*
285  * Time slice period in microseconds.
286  */
287 #define TIMESLICE_USEC				20000
288 
289 struct pthread_rwlockattr {
290 	int		pshared;
291 };
292 
293 struct pthread_rwlock {
294 	pthread_mutex_t	lock;	/* monitor lock */
295 	pthread_cond_t	read_signal;
296 	pthread_cond_t	write_signal;
297 	int		state;	/* 0 = idle  >0 = # of readers  -1 = writer */
298 	int		blocked_writers;
299 };
300 
301 /*
302  * Thread states.
303  */
304 enum pthread_state {
305 	PS_RUNNING,
306 	PS_DEAD
307 };
308 
309 struct pthread_specific_elem {
310 	const void	*data;
311 	int		seqno;
312 };
313 
314 struct pthread_key {
315 	volatile int	allocated;
316 	volatile int	count;
317 	int		seqno;
318 	void		(*destructor)(void *);
319 };
320 
321 /*
322  * Thread structure.
323  */
324 struct pthread {
325 	/*
326 	 * Magic value to help recognize a valid thread structure
327 	 * from an invalid one:
328 	 */
329 #define	THR_MAGIC		((u_int32_t) 0xd09ba115)
330 	u_int32_t		magic;
331 	char			*name;
332 	u_int64_t		uniqueid; /* for gdb */
333 
334 	/*
335 	 * Lock for accesses to this thread structure.
336 	 */
337 	umtx_t			lock;
338 
339 	/* Thread is terminated in kernel, written by kernel. */
340 	long			terminated;
341 
342 	/* Kernel thread id. */
343 	lwpid_t			tid;
344 
345 	/* Internal condition variable cycle number. */
346 	umtx_t			cycle;
347 
348 	/* How many low level locks the thread held. */
349 	int			locklevel;
350 
351 	/*
352 	 * Set to non-zero when this thread has entered a critical
353 	 * region.  We allow for recursive entries into critical regions.
354 	 */
355 	int			critical_count;
356 
357 	/* Signal blocked counter. */
358 	int			sigblock;
359 
360 	/* Queue entry for list of all threads. */
361 	TAILQ_ENTRY(pthread)	tle;	/* link for all threads in process */
362 
363 	/* Queue entry for GC lists. */
364 	TAILQ_ENTRY(pthread)	gcle;
365 
366 	/* Hash queue entry. */
367 	LIST_ENTRY(pthread)	hle;
368 
369 	/* Threads reference count. */
370 	int			refcount;
371 
372 	/*
373 	 * Thread start routine, argument, stack pointer and thread
374 	 * attributes.
375 	 */
376 	void			*(*start_routine)(void *);
377 	void			*arg;
378 	struct pthread_attr	attr;
379 
380 	/*
381 	 * Cancelability flags
382 	 */
383 #define	THR_CANCEL_DISABLE		0x0001
384 #define	THR_CANCEL_EXITING		0x0002
385 #define THR_CANCEL_AT_POINT		0x0004
386 #define THR_CANCEL_NEEDED		0x0008
387 #define	SHOULD_CANCEL(val)					\
388 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
389 		 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED)
390 
391 #define	SHOULD_ASYNC_CANCEL(val)				\
392 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
393 		 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) ==	\
394 		 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT))
395 	int			cancelflags;
396 
397 	/* Thread temporary signal mask. */
398 	sigset_t		sigmask;
399 
400 	/* Thread state: */
401 	umtx_t			state;
402 
403 	/*
404 	 * Error variable used instead of errno, used for internal.
405 	 */
406 	int			error;
407 
408 	/*
409 	 * The joiner is the thread that is joining to this thread.  The
410 	 * join status keeps track of a join operation to another thread.
411 	 */
412 	struct pthread		*joiner;
413 
414 	/*
415 	 * The current thread can belong to a priority mutex queue.
416 	 * This is the synchronization queue link.
417 	 */
418 	TAILQ_ENTRY(pthread)	sqe;
419 
420 	/* Miscellaneous flags; only set with scheduling lock held. */
421 	int			flags;
422 #define THR_FLAGS_PRIVATE	0x0001
423 #define	THR_FLAGS_NEED_SUSPEND	0x0002	/* thread should be suspended */
424 #define	THR_FLAGS_SUSPENDED	0x0004	/* thread is suspended */
425 
426 	/* Thread list flags; only set with thread list lock held. */
427 	int			tlflags;
428 #define	TLFLAGS_GC_SAFE		0x0001	/* thread safe for cleaning */
429 #define	TLFLAGS_IN_TDLIST	0x0002	/* thread in all thread list */
430 #define	TLFLAGS_IN_GCLIST	0x0004	/* thread in gc list */
431 #define	TLFLAGS_DETACHED	0x0008	/* thread is detached */
432 
433 	/*
434 	 * Base priority is the user setable and retrievable priority
435 	 * of the thread.  It is only affected by explicit calls to
436 	 * set thread priority and upon thread creation via a thread
437 	 * attribute or default priority.
438 	 */
439 	char			base_priority;
440 
441 	/*
442 	 * Inherited priority is the priority a thread inherits by
443 	 * taking a priority inheritence or protection mutex.  It
444 	 * is not affected by base priority changes.  Inherited
445 	 * priority defaults to and remains 0 until a mutex is taken
446 	 * that is being waited on by any other thread whose priority
447 	 * is non-zero.
448 	 */
449 	char			inherited_priority;
450 
451 	/*
452 	 * Active priority is always the maximum of the threads base
453 	 * priority and inherited priority.  When there is a change
454 	 * in either the base or inherited priority, the active
455 	 * priority must be recalculated.
456 	 */
457 	char			active_priority;
458 
459 	/* Number of priority ceiling or protection mutexes owned. */
460 	int			priority_mutex_count;
461 
462 	/* Queue of currently owned simple type mutexes. */
463 	TAILQ_HEAD(, pthread_mutex)	mutexq;
464 
465 	void				*ret;
466 	struct pthread_specific_elem	*specific;
467 	int				specific_data_count;
468 
469 	/* Number rwlocks rdlocks held. */
470 	int			rdlock_count;
471 
472 	/*
473 	 * Current locks bitmap for rtld. */
474 	int			rtld_bits;
475 
476 	/* Thread control block */
477 	struct tls_tcb		*tcb;
478 
479 	/* Cleanup handlers Link List */
480 	struct pthread_cleanup	*cleanup;
481 
482 	/* Enable event reporting */
483 	int			report_events;
484 
485 	/* Event mask */
486 	td_thr_events_t		event_mask;
487 
488 	/* Event */
489 	td_event_msg_t		event_buf;
490 };
491 
492 #define	THR_IN_CRITICAL(thrd)				\
493 	(((thrd)->locklevel > 0) ||			\
494 	((thrd)->critical_count > 0))
495 
496 #define THR_UMTX_TRYLOCK(thrd, lck)			\
497 	_thr_umtx_trylock((lck), (thrd)->tid)
498 
499 #define	THR_UMTX_LOCK(thrd, lck)			\
500 	_thr_umtx_lock((lck), (thrd)->tid)
501 
502 #define	THR_UMTX_TIMEDLOCK(thrd, lck, timo)		\
503 	_thr_umtx_timedlock((lck), (thrd)->tid, (timo))
504 
505 #define	THR_UMTX_UNLOCK(thrd, lck)			\
506 	_thr_umtx_unlock((lck), (thrd)->tid)
507 
508 #define	THR_LOCK_ACQUIRE(thrd, lck)			\
509 do {							\
510 	(thrd)->locklevel++;				\
511 	_thr_umtx_lock((lck), (thrd)->tid);		\
512 } while (0)
513 
514 #ifdef	_PTHREADS_INVARIANTS
515 #define	THR_ASSERT_LOCKLEVEL(thrd)			\
516 do {							\
517 	if (__predict_false((thrd)->locklevel <= 0))	\
518 		_thr_assert_lock_level();		\
519 } while (0)
520 #else
521 #define THR_ASSERT_LOCKLEVEL(thrd)
522 #endif
523 
524 #define	THR_LOCK_RELEASE(thrd, lck)			\
525 do {							\
526 	THR_ASSERT_LOCKLEVEL(thrd);			\
527 	_thr_umtx_unlock((lck), (thrd)->tid);		\
528 	(thrd)->locklevel--;				\
529 	_thr_ast(thrd);					\
530 } while (0)
531 
532 #define	THR_LOCK(curthrd)		THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
533 #define	THR_UNLOCK(curthrd)		THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
534 #define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
535 #define	THR_THREAD_UNLOCK(curthrd, thr)	THR_LOCK_RELEASE(curthrd, &(thr)->lock)
536 
537 #define	THREAD_LIST_LOCK(curthrd)				\
538 do {								\
539 	THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock);		\
540 } while (0)
541 
542 #define	THREAD_LIST_UNLOCK(curthrd)				\
543 do {								\
544 	THR_LOCK_RELEASE((curthrd), &_thr_list_lock);		\
545 } while (0)
546 
547 /*
548  * Macros to insert/remove threads to the all thread list and
549  * the gc list.
550  */
551 #define	THR_LIST_ADD(thrd) do {					\
552 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) {	\
553 		TAILQ_INSERT_HEAD(&_thread_list, thrd, tle);	\
554 		_thr_hash_add(thrd);				\
555 		(thrd)->tlflags |= TLFLAGS_IN_TDLIST;		\
556 	}							\
557 } while (0)
558 #define	THR_LIST_REMOVE(thrd) do {				\
559 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) {	\
560 		TAILQ_REMOVE(&_thread_list, thrd, tle);		\
561 		_thr_hash_remove(thrd);				\
562 		(thrd)->tlflags &= ~TLFLAGS_IN_TDLIST;		\
563 	}							\
564 } while (0)
565 #define	THR_GCLIST_ADD(thrd) do {				\
566 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) {	\
567 		TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
568 		(thrd)->tlflags |= TLFLAGS_IN_GCLIST;		\
569 		_thr_gc_count++;					\
570 	}							\
571 } while (0)
572 #define	THR_GCLIST_REMOVE(thrd) do {				\
573 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) {	\
574 		TAILQ_REMOVE(&_thread_gc_list, thrd, gcle);	\
575 		(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST;		\
576 		_thr_gc_count--;					\
577 	}							\
578 } while (0)
579 
580 #define GC_NEEDED()	(_thr_gc_count >= 5)
581 
582 #define	THR_IN_SYNCQ(thrd)	(((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
583 
584 #define SHOULD_REPORT_EVENT(curthr, e)			\
585 	(curthr->report_events &&			\
586 	 (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
587 
588 #if !defined(_LIBC_PRIVATE_H_) && !defined(_STDIO_H_)
589 extern int __isthreaded;
590 #endif
591 
592 /*
593  * Global variables for the pthread library.
594  */
595 extern char		*_usrstack;
596 extern struct pthread	*_thr_initial;
597 
598 /* For debugger */
599 extern int		_libthread_xu_debug;
600 extern int		_thread_event_mask;
601 extern struct pthread	*_thread_last_event;
602 
603 /* List of all threads */
604 extern struct thread_head	_thread_list;
605 
606 /* List of threads needing GC */
607 extern struct thread_head	_thread_gc_list;
608 
609 extern int	_thread_active_threads;
610 
611 extern struct	atfork_head	_thr_atfork_list;
612 extern umtx_t	_thr_atfork_lock;
613 
614 /* Default thread attributes */
615 extern struct pthread_attr _pthread_attr_default;
616 
617 /* Default mutex attributes */
618 extern struct pthread_mutex_attr _pthread_mutexattr_default;
619 
620 /* Default condition variable attributes */
621 extern struct pthread_cond_attr _pthread_condattr_default;
622 
623 extern pid_t	_thr_pid;
624 extern size_t	_thr_guard_default;
625 extern size_t	_thr_stack_default;
626 extern size_t	_thr_stack_initial;
627 extern int	_thr_page_size;
628 extern int	_thr_gc_count;
629 
630 extern umtx_t	_mutex_static_lock;
631 extern umtx_t	_cond_static_lock;
632 extern umtx_t	_rwlock_static_lock;
633 extern umtx_t	_keytable_lock;
634 extern umtx_t	_thr_list_lock;
635 extern umtx_t	_thr_event_lock;
636 
637 /*
638  * Function prototype definitions.
639  */
640 __BEGIN_DECLS
641 int	_thr_setthreaded(int);
642 int	_mutex_cv_lock(pthread_mutex_t *, int count);
643 int	_mutex_cv_unlock(pthread_mutex_t *, int *count);
644 void	_mutex_notify_priochange(struct pthread *, struct pthread *, int);
645 int	_mutex_reinit(pthread_mutex_t *);
646 void	_mutex_fork(struct pthread *curthread);
647 void	_mutex_unlock_private(struct pthread *);
648 void	_libpthread_init(struct pthread *);
649 struct pthread *_thr_alloc(struct pthread *);
650 void	_thread_exit(const char *, int, const char *) __dead2;
651 void	_thread_exitf(const char *, int, const char *, ...) __dead2
652 	    __printflike(3, 4);
653 void	_thr_exit_cleanup(void);
654 int	_thr_ref_add(struct pthread *, struct pthread *, int);
655 void	_thr_ref_delete(struct pthread *, struct pthread *);
656 void	_thr_ref_delete_unlocked(struct pthread *, struct pthread *);
657 int	_thr_find_thread(struct pthread *, struct pthread *, int);
658 void	_thr_rtld_init(void);
659 void	_thr_rtld_fini(void);
660 int	_thr_stack_alloc(struct pthread_attr *);
661 void	_thr_stack_free(struct pthread_attr *);
662 void	_thr_stack_cleanup(void);
663 void	_thr_free(struct pthread *, struct pthread *);
664 void	_thr_gc(struct pthread *);
665 void	_thread_cleanupspecific(void);
666 void	_thread_dump_info(void);
667 void	_thread_printf(int, const char *, ...) __printflike(2, 3);
668 void	_thread_vprintf(int, const char *, va_list);
669 void	_thr_spinlock_init(void);
670 int	_thr_cancel_enter(struct pthread *);
671 void	_thr_cancel_leave(struct pthread *, int);
672 void	_thr_signal_block(struct pthread *);
673 void	_thr_signal_unblock(struct pthread *);
674 void	_thr_signal_init(void);
675 void	_thr_signal_deinit(void);
676 int	_thr_send_sig(struct pthread *, int sig);
677 void	_thr_list_init(void);
678 void	_thr_hash_add(struct pthread *);
679 void	_thr_hash_remove(struct pthread *);
680 struct pthread *_thr_hash_find(struct pthread *);
681 void	_thr_link(struct pthread *curthread, struct pthread *thread);
682 void	_thr_unlink(struct pthread *curthread, struct pthread *thread);
683 void	_thr_suspend_check(struct pthread *curthread);
684 void	_thr_assert_lock_level(void) __dead2;
685 void	_thr_ast(struct pthread *);
686 int	_thr_get_tid(void);
687 void	_thr_report_creation(struct pthread *curthread,
688 			   struct pthread *newthread);
689 void	_thr_report_death(struct pthread *curthread);
690 void	_thread_bp_create(void);
691 void	_thread_bp_death(void);
692 int	_thr_getscheduler(lwpid_t, int *, struct sched_param *);
693 int	_thr_setscheduler(lwpid_t, int, const struct sched_param *);
694 int	_thr_set_sched_other_prio(struct pthread *, int);
695 int	_rtp_to_schedparam(const struct rtprio *rtp, int *policy,
696 	    struct sched_param *param);
697 int	_schedparam_to_rtp(int policy, const struct sched_param *param,
698 	    struct rtprio *rtp);
699 int	_umtx_sleep_err(volatile const int *, int, int);
700 int	_umtx_wakeup_err(volatile const int *, int);
701 
702 /* #include <fcntl.h> */
703 #ifdef  _SYS_FCNTL_H_
704 int     __sys_fcntl(int, int, ...);
705 int     __sys_open(const char *, int, ...);
706 int     __sys_openat(int, const char *, int, ...);
707 #endif
708 
709 /* #include <sys/ioctl.h> */
710 #ifdef _SYS_IOCTL_H_
711 int	__sys_ioctl(int, unsigned long, ...);
712 #endif
713 
714 /* #inclde <sched.h> */
715 #ifdef	_SCHED_H_
716 int	__sys_sched_yield(void);
717 #endif
718 
719 /* #include <signal.h> */
720 #ifdef _SIGNAL_H_
721 int	__sys_kill(pid_t, int);
722 int     __sys_sigaction(int, const struct sigaction *, struct sigaction *);
723 int     __sys_sigpending(sigset_t *);
724 int     __sys_sigprocmask(int, const sigset_t *, sigset_t *);
725 int     __sys_sigsuspend(const sigset_t *);
726 int     __sys_sigreturn(ucontext_t *);
727 int     __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
728 #endif
729 
730 /* #include <time.h> */
731 #ifdef	_TIME_H_
732 int	__sys_nanosleep(const struct timespec *, struct timespec *);
733 #endif
734 
735 /* #include <unistd.h> */
736 #ifdef  _UNISTD_H_
737 int	__sys_close(int);
738 int	__sys_execve(const char *, char * const *, char * const *);
739 pid_t	__sys_getpid(void);
740 ssize_t __sys_read(int, void *, size_t);
741 ssize_t __sys_write(int, const void *, size_t);
742 void	__sys_exit(int);
743 int	__sys_sigwait(const sigset_t *, int *);
744 int	__sys_sigtimedwait(const sigset_t *, siginfo_t *,
745 		const struct timespec *);
746 int	__sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
747 #endif
748 
749 static inline int
750 _thr_isthreaded(void)
751 {
752 	return (__isthreaded != 0);
753 }
754 
755 static inline int
756 _thr_is_inited(void)
757 {
758 	return (_thr_initial != NULL);
759 }
760 
761 static inline void
762 _thr_check_init(void)
763 {
764 	if (_thr_initial == NULL)
765 		_libpthread_init(NULL);
766 }
767 
768 struct dl_phdr_info;
769 void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info);
770 
771 __END_DECLS
772 
773 #endif  /* !_THR_PRIVATE_H */
774