1 /*
2  * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
3  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
4  * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
5  *
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD: head/lib/libthr/thread/thr_private.h 217706 2010-08-23 $
29  */
30 
31 /*
32  * Private thread definitions for the uthread kernel.
33  */
34 
35 #ifndef _THR_PRIVATE_H
36 #define _THR_PRIVATE_H
37 
38 /*
39  * Include files.
40  */
41 #include <sys/types.h>
42 #include <sys/time.h>
43 #include <sys/cdefs.h>
44 #include <sys/queue.h>
45 #include <sys/rtprio.h>
46 #include <machine/atomic.h>
47 #include <errno.h>
48 #include <limits.h>
49 #include <signal.h>
50 #include <sys/sched.h>
51 #include <stdarg.h>
52 #include <unistd.h>
53 #include <pthread.h>
54 #include <pthread_np.h>
55 
56 #include "pthread_md.h"
57 #include "thr_umtx.h"
58 #include "thread_db.h"
59 
60 /* Signal to do cancellation */
61 #define	SIGCANCEL		32
62 
63 /*
64  * Kernel fatal error handler macro.
65  */
66 #define PANIC(args...)		_thread_exitf(__FILE__, __LINE__, ##args)
67 
68 /* Output debug messages like this: */
69 #define stdout_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
70 #define stderr_debug(args...)	_thread_printf(STDERR_FILENO, ##args)
71 
72 #ifdef _PTHREADS_INVARIANTS
73 #define THR_ASSERT(cond, msg) do {	\
74 	if (__predict_false(!(cond)))	\
75 		PANIC(msg);		\
76 } while (0)
77 #else
78 #define THR_ASSERT(cond, msg)
79 #endif
80 
81 #ifdef PIC
82 #define STATIC_LIB_REQUIRE(name)
83 #else
84 #define STATIC_LIB_REQUIRE(name)	__asm(".globl " #name)
85 #endif
86 
87 TAILQ_HEAD(thread_head, pthread)	thread_head;
88 TAILQ_HEAD(atfork_head, pthread_atfork)	atfork_head;
89 
90 #define	TIMESPEC_ADD(dst, src, val)				\
91 	do {							\
92 		(dst)->tv_sec = (src)->tv_sec + (val)->tv_sec;	\
93 		(dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
94 		if ((dst)->tv_nsec >= 1000000000) {		\
95 			(dst)->tv_sec++;			\
96 			(dst)->tv_nsec -= 1000000000;		\
97 		}						\
98 	} while (0)
99 
100 #define	TIMESPEC_SUB(dst, src, val)				\
101 	do {							\
102 		(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec;	\
103 		(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
104 		if ((dst)->tv_nsec < 0) {			\
105 			(dst)->tv_sec--;			\
106 			(dst)->tv_nsec += 1000000000;		\
107 		}						\
108 	} while (0)
109 
110 struct pthread_mutex {
111 	/*
112 	 * Lock for accesses to this structure.
113 	 */
114 	volatile umtx_t			m_lock;
115 	enum pthread_mutextype		m_type;
116 	int				m_protocol;
117 	TAILQ_HEAD(mutex_head, pthread)	m_queue;
118 	struct pthread			*m_owner;
119 	long				m_flags;
120 	int				m_count;
121 	int				m_refcount;
122 
123 	/*
124 	 * Used for priority inheritence and protection.
125 	 *
126 	 *   m_prio       - For priority inheritence, the highest active
127 	 *		    priority (threads locking the mutex inherit
128 	 *		    this priority).  For priority protection, the
129 	 *		    ceiling priority of this mutex.
130 	 *   m_saved_prio - mutex owners inherited priority before
131 	 *		    taking the mutex, restored when the owner
132 	 *		    unlocks the mutex.
133 	 */
134 	int				m_prio;
135 	int				m_saved_prio;
136 
137 	/*
138 	 * Link for list of all mutexes a thread currently owns.
139 	 */
140 	TAILQ_ENTRY(pthread_mutex)	m_qe;
141 };
142 
143 #define TAILQ_INITIALIZER	{ NULL, NULL }
144 
145 #define PTHREAD_MUTEX_STATIC_INITIALIZER   \
146 	{0, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
147 	NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER }
148 /*
149  * Flags for mutexes.
150  */
151 #define MUTEX_FLAGS_PRIVATE	0x01
152 #define MUTEX_FLAGS_INITED	0x02
153 
154 struct pthread_mutex_attr {
155 	enum pthread_mutextype	m_type;
156 	int			m_protocol;
157 	int			m_ceiling;
158 	int			m_flags;
159 };
160 
161 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
162 	{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
163 
164 struct pthread_cond {
165 	/*
166 	 * Lock for accesses to this structure.
167 	 */
168 	volatile umtx_t	c_lock;
169 	volatile umtx_t	c_seqno;
170 	volatile int	c_waiters;
171 	volatile int	c_broadcast;
172 	int		c_pshared;
173 	int		c_clockid;
174 };
175 
176 struct pthread_cond_attr {
177 	int		c_pshared;
178 	int		c_clockid;
179 };
180 
181 /*
182  * Flags for condition variables.
183  */
184 #define COND_FLAGS_PRIVATE	0x01
185 #define COND_FLAGS_INITED	0x02
186 
187 struct pthread_barrier {
188 	volatile umtx_t	b_lock;
189 	volatile umtx_t	b_cycle;
190 	volatile int	b_count;
191 	volatile int	b_waiters;
192 };
193 
194 struct pthread_barrierattr {
195 	int		pshared;
196 };
197 
198 struct pthread_spinlock {
199 	volatile umtx_t	s_lock;
200 };
201 
202 /*
203  * Cleanup definitions.
204  */
205 struct pthread_cleanup {
206 	struct pthread_cleanup	*next;
207 	void			(*routine)(void *);
208 	void			*routine_arg;
209 	int			onstack;
210 };
211 
212 #define	THR_CLEANUP_PUSH(td, func, arg) {		\
213 	struct pthread_cleanup __cup;			\
214 							\
215 	__cup.routine = func;				\
216 	__cup.routine_arg = arg;			\
217 	__cup.onstack = 1;				\
218 	__cup.next = (td)->cleanup;			\
219 	(td)->cleanup = &__cup;
220 
221 #define	THR_CLEANUP_POP(td, exec)			\
222 	(td)->cleanup = __cup.next;			\
223 	if ((exec) != 0)				\
224 		__cup.routine(__cup.routine_arg);	\
225 }
226 
227 struct pthread_atfork {
228 	TAILQ_ENTRY(pthread_atfork) qe;
229 	void (*prepare)(void);
230 	void (*parent)(void);
231 	void (*child)(void);
232 };
233 
234 struct pthread_attr {
235 	int	sched_policy;
236 	int	sched_inherit;
237 	int	prio;
238 	int	suspend;
239 #define	THR_STACK_USER		0x100	/* 0xFF reserved for <pthread.h> */
240 	int	flags;
241 	void	*stackaddr_attr;
242 	size_t	stacksize_attr;
243 	size_t	guardsize_attr;
244 };
245 
246 /*
247  * Thread creation state attributes.
248  */
249 #define THR_CREATE_RUNNING		0
250 #define THR_CREATE_SUSPENDED		1
251 
252 /*
253  * Miscellaneous definitions.
254  */
255 #define THR_STACK_DEFAULT		(sizeof(void *) / 4 * 1024 * 1024)
256 
257 /*
258  * Maximum size of initial thread's stack.  This perhaps deserves to be larger
259  * than the stacks of other threads, since many applications are likely to run
260  * almost entirely on this stack.
261  */
262 #define THR_STACK_INITIAL		(THR_STACK_DEFAULT * 2)
263 
264 /*
265  * Define the different priority ranges.  All applications have thread
266  * priorities constrained within 0-31.  The threads library raises the
267  * priority when delivering signals in order to ensure that signal
268  * delivery happens (from the POSIX spec) "as soon as possible".
269  * In the future, the threads library will also be able to map specific
270  * threads into real-time (cooperating) processes or kernel threads.
271  * The RT and SIGNAL priorities will be used internally and added to
272  * thread base priorities so that the scheduling queue can handle both
273  * normal and RT priority threads with and without signal handling.
274  *
275  * The approach taken is that, within each class, signal delivery
276  * always has priority over thread execution.
277  */
278 #define THR_DEFAULT_PRIORITY		0
279 #define THR_MUTEX_CEIL_PRIORITY		31	/* dummy */
280 
281 /*
282  * Time slice period in microseconds.
283  */
284 #define TIMESLICE_USEC				20000
285 
286 struct pthread_rwlockattr {
287 	int		pshared;
288 };
289 
290 struct pthread_rwlock {
291 	pthread_mutex_t	lock;	/* monitor lock */
292 	pthread_cond_t	read_signal;
293 	pthread_cond_t	write_signal;
294 	int		state;	/* 0 = idle  >0 = # of readers  -1 = writer */
295 	int		blocked_writers;
296 };
297 
298 /*
299  * Thread states.
300  */
301 enum pthread_state {
302 	PS_RUNNING,
303 	PS_DEAD
304 };
305 
306 struct pthread_specific_elem {
307 	const void	*data;
308 	int		seqno;
309 };
310 
311 struct pthread_key {
312 	volatile int	allocated;
313 	volatile int	count;
314 	int		seqno;
315 	void		(*destructor)(void *);
316 };
317 
318 /*
319  * Thread structure.
320  */
321 struct pthread {
322 	/*
323 	 * Magic value to help recognize a valid thread structure
324 	 * from an invalid one:
325 	 */
326 #define	THR_MAGIC		((u_int32_t) 0xd09ba115)
327 	u_int32_t		magic;
328 	char			*name;
329 	u_int64_t		uniqueid; /* for gdb */
330 
331 	/*
332 	 * Lock for accesses to this thread structure.
333 	 */
334 	umtx_t			lock;
335 
336 	/* Thread is terminated in kernel, written by kernel. */
337 	long			terminated;
338 
339 	/* Kernel thread id. */
340 	lwpid_t			tid;
341 
342 	/* Internal condition variable cycle number. */
343 	umtx_t			cycle;
344 
345 	/* How many low level locks the thread held. */
346 	int			locklevel;
347 
348 	/*
349 	 * Set to non-zero when this thread has entered a critical
350 	 * region.  We allow for recursive entries into critical regions.
351 	 */
352 	int			critical_count;
353 
354 	/* Signal blocked counter. */
355 	int			sigblock;
356 
357 	/* Queue entry for list of all threads. */
358 	TAILQ_ENTRY(pthread)	tle;	/* link for all threads in process */
359 
360 	/* Queue entry for GC lists. */
361 	TAILQ_ENTRY(pthread)	gcle;
362 
363 	/* Hash queue entry. */
364 	LIST_ENTRY(pthread)	hle;
365 
366 	/* Threads reference count. */
367 	int			refcount;
368 
369 	/*
370 	 * Thread start routine, argument, stack pointer and thread
371 	 * attributes.
372 	 */
373 	void			*(*start_routine)(void *);
374 	void			*arg;
375 	struct pthread_attr	attr;
376 
377 	/*
378 	 * Cancelability flags
379 	 */
380 #define	THR_CANCEL_DISABLE		0x0001
381 #define	THR_CANCEL_EXITING		0x0002
382 #define THR_CANCEL_AT_POINT		0x0004
383 #define THR_CANCEL_NEEDED		0x0008
384 #define	SHOULD_CANCEL(val)					\
385 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
386 		 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED)
387 
388 #define	SHOULD_ASYNC_CANCEL(val)				\
389 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
390 		 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) ==	\
391 		 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT))
392 	int			cancelflags;
393 
394 	/* Thread temporary signal mask. */
395 	sigset_t		sigmask;
396 
397 	/* Thread state: */
398 	umtx_t			state;
399 
400 	/*
401 	 * Error variable used instead of errno, used for internal.
402 	 */
403 	int			error;
404 
405 	/*
406 	 * The joiner is the thread that is joining to this thread.  The
407 	 * join status keeps track of a join operation to another thread.
408 	 */
409 	struct pthread		*joiner;
410 
411 	/*
412 	 * The current thread can belong to a priority mutex queue.
413 	 * This is the synchronization queue link.
414 	 */
415 	TAILQ_ENTRY(pthread)	sqe;
416 
417 	/* Miscellaneous flags; only set with scheduling lock held. */
418 	int			flags;
419 #define THR_FLAGS_PRIVATE	0x0001
420 #define	THR_FLAGS_NEED_SUSPEND	0x0002	/* thread should be suspended */
421 #define	THR_FLAGS_SUSPENDED	0x0004	/* thread is suspended */
422 
423 	/* Thread list flags; only set with thread list lock held. */
424 	int			tlflags;
425 #define	TLFLAGS_GC_SAFE		0x0001	/* thread safe for cleaning */
426 #define	TLFLAGS_IN_TDLIST	0x0002	/* thread in all thread list */
427 #define	TLFLAGS_IN_GCLIST	0x0004	/* thread in gc list */
428 #define	TLFLAGS_DETACHED	0x0008	/* thread is detached */
429 
430 	/*
431 	 * Base priority is the user setable and retrievable priority
432 	 * of the thread.  It is only affected by explicit calls to
433 	 * set thread priority and upon thread creation via a thread
434 	 * attribute or default priority.
435 	 */
436 	char			base_priority;
437 
438 	/*
439 	 * Inherited priority is the priority a thread inherits by
440 	 * taking a priority inheritence or protection mutex.  It
441 	 * is not affected by base priority changes.  Inherited
442 	 * priority defaults to and remains 0 until a mutex is taken
443 	 * that is being waited on by any other thread whose priority
444 	 * is non-zero.
445 	 */
446 	char			inherited_priority;
447 
448 	/*
449 	 * Active priority is always the maximum of the threads base
450 	 * priority and inherited priority.  When there is a change
451 	 * in either the base or inherited priority, the active
452 	 * priority must be recalculated.
453 	 */
454 	char			active_priority;
455 
456 	/* Number of priority ceiling or protection mutexes owned. */
457 	int			priority_mutex_count;
458 
459 	/* Queue of currently owned simple type mutexes. */
460 	TAILQ_HEAD(, pthread_mutex)	mutexq;
461 
462 	void				*ret;
463 	struct pthread_specific_elem	*specific;
464 	int				specific_data_count;
465 
466 	/* Number rwlocks rdlocks held. */
467 	int			rdlock_count;
468 
469 	/*
470 	 * Current locks bitmap for rtld. */
471 	int			rtld_bits;
472 
473 	/* Thread control block */
474 	struct tls_tcb		*tcb;
475 
476 	/* Cleanup handlers Link List */
477 	struct pthread_cleanup	*cleanup;
478 
479 	/* Enable event reporting */
480 	int			report_events;
481 
482 	/* Event mask */
483 	td_thr_events_t		event_mask;
484 
485 	/* Event */
486 	td_event_msg_t		event_buf;
487 };
488 
489 #define	THR_IN_CRITICAL(thrd)				\
490 	(((thrd)->locklevel > 0) ||			\
491 	((thrd)->critical_count > 0))
492 
493 #define THR_UMTX_TRYLOCK(thrd, lck)			\
494 	_thr_umtx_trylock((lck), (thrd)->tid)
495 
496 #define	THR_UMTX_LOCK(thrd, lck)			\
497 	_thr_umtx_lock((lck), (thrd)->tid)
498 
499 #define	THR_UMTX_TIMEDLOCK(thrd, lck, timo)		\
500 	_thr_umtx_timedlock((lck), (thrd)->tid, (timo))
501 
502 #define	THR_UMTX_UNLOCK(thrd, lck)			\
503 	_thr_umtx_unlock((lck), (thrd)->tid)
504 
505 #define	THR_LOCK_ACQUIRE(thrd, lck)			\
506 do {							\
507 	(thrd)->locklevel++;				\
508 	_thr_umtx_lock((lck), (thrd)->tid);		\
509 } while (0)
510 
511 #ifdef	_PTHREADS_INVARIANTS
512 #define	THR_ASSERT_LOCKLEVEL(thrd)			\
513 do {							\
514 	if (__predict_false((thrd)->locklevel <= 0))	\
515 		_thr_assert_lock_level();		\
516 } while (0)
517 #else
518 #define THR_ASSERT_LOCKLEVEL(thrd)
519 #endif
520 
521 #define	THR_LOCK_RELEASE(thrd, lck)			\
522 do {							\
523 	THR_ASSERT_LOCKLEVEL(thrd);			\
524 	_thr_umtx_unlock((lck), (thrd)->tid);		\
525 	(thrd)->locklevel--;				\
526 	_thr_ast(thrd);					\
527 } while (0)
528 
529 #define	THR_LOCK(curthrd)		THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
530 #define	THR_UNLOCK(curthrd)		THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
531 #define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
532 #define	THR_THREAD_UNLOCK(curthrd, thr)	THR_LOCK_RELEASE(curthrd, &(thr)->lock)
533 
534 #define	THREAD_LIST_LOCK(curthrd)				\
535 do {								\
536 	THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock);		\
537 } while (0)
538 
539 #define	THREAD_LIST_UNLOCK(curthrd)				\
540 do {								\
541 	THR_LOCK_RELEASE((curthrd), &_thr_list_lock);		\
542 } while (0)
543 
544 /*
545  * Macros to insert/remove threads to the all thread list and
546  * the gc list.
547  */
548 #define	THR_LIST_ADD(thrd) do {					\
549 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) {	\
550 		TAILQ_INSERT_HEAD(&_thread_list, thrd, tle);	\
551 		_thr_hash_add(thrd);				\
552 		(thrd)->tlflags |= TLFLAGS_IN_TDLIST;		\
553 	}							\
554 } while (0)
555 #define	THR_LIST_REMOVE(thrd) do {				\
556 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) {	\
557 		TAILQ_REMOVE(&_thread_list, thrd, tle);		\
558 		_thr_hash_remove(thrd);				\
559 		(thrd)->tlflags &= ~TLFLAGS_IN_TDLIST;		\
560 	}							\
561 } while (0)
562 #define	THR_GCLIST_ADD(thrd) do {				\
563 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) {	\
564 		TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
565 		(thrd)->tlflags |= TLFLAGS_IN_GCLIST;		\
566 		_thr_gc_count++;					\
567 	}							\
568 } while (0)
569 #define	THR_GCLIST_REMOVE(thrd) do {				\
570 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) {	\
571 		TAILQ_REMOVE(&_thread_gc_list, thrd, gcle);	\
572 		(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST;		\
573 		_thr_gc_count--;					\
574 	}							\
575 } while (0)
576 
577 #define GC_NEEDED()	(_thr_gc_count >= 5)
578 
579 #define	THR_IN_SYNCQ(thrd)	(((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
580 
581 #define SHOULD_REPORT_EVENT(curthr, e)			\
582 	(curthr->report_events &&			\
583 	 (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
584 
585 #if !defined(_LIBC_PRIVATE_H_) && !defined(_STDIO_H_)
586 extern int __isthreaded;
587 #endif
588 
589 /*
590  * Global variables for the pthread library.
591  */
592 extern char		*_usrstack;
593 extern struct pthread	*_thr_initial;
594 
595 /* For debugger */
596 extern int		_libthread_xu_debug;
597 extern int		_thread_event_mask;
598 extern struct pthread	*_thread_last_event;
599 
600 /* List of all threads */
601 extern struct thread_head	_thread_list;
602 
603 /* List of threads needing GC */
604 extern struct thread_head	_thread_gc_list;
605 
606 extern int	_thread_active_threads;
607 
608 extern struct	atfork_head	_thr_atfork_list;
609 extern umtx_t	_thr_atfork_lock;
610 
611 /* Default thread attributes */
612 extern struct pthread_attr _pthread_attr_default;
613 
614 /* Default mutex attributes */
615 extern struct pthread_mutex_attr _pthread_mutexattr_default;
616 
617 /* Default condition variable attributes */
618 extern struct pthread_cond_attr _pthread_condattr_default;
619 
620 extern pid_t	_thr_pid;
621 extern size_t	_thr_guard_default;
622 extern size_t	_thr_stack_default;
623 extern size_t	_thr_stack_initial;
624 extern int	_thr_page_size;
625 extern int	_thr_gc_count;
626 
627 extern umtx_t	_mutex_static_lock;
628 extern umtx_t	_cond_static_lock;
629 extern umtx_t	_rwlock_static_lock;
630 extern umtx_t	_keytable_lock;
631 extern umtx_t	_thr_list_lock;
632 extern umtx_t	_thr_event_lock;
633 
634 /*
635  * Function prototype definitions.
636  */
637 __BEGIN_DECLS
638 int	_thr_setthreaded(int);
639 int	_mutex_cv_lock(pthread_mutex_t *, int count);
640 int	_mutex_cv_unlock(pthread_mutex_t *, int *count);
641 void	_mutex_notify_priochange(struct pthread *, struct pthread *, int);
642 int	_mutex_reinit(pthread_mutex_t *);
643 void	_mutex_fork(struct pthread *curthread);
644 void	_mutex_unlock_private(struct pthread *);
645 void	_libpthread_init(struct pthread *);
646 struct pthread *_thr_alloc(struct pthread *);
647 void	_thread_exit(const char *, int, const char *) __dead2;
648 void	_thread_exitf(const char *, int, const char *, ...) __dead2
649 	    __printflike(3, 4);
650 void	_thr_exit_cleanup(void);
651 int	_thr_ref_add(struct pthread *, struct pthread *, int);
652 void	_thr_ref_delete(struct pthread *, struct pthread *);
653 void	_thr_ref_delete_unlocked(struct pthread *, struct pthread *);
654 int	_thr_find_thread(struct pthread *, struct pthread *, int);
655 void	_thr_rtld_init(void);
656 void	_thr_rtld_fini(void);
657 int	_thr_stack_alloc(struct pthread_attr *);
658 void	_thr_stack_free(struct pthread_attr *);
659 void	_thr_stack_cleanup(void);
660 void	_thr_free(struct pthread *, struct pthread *);
661 void	_thr_gc(struct pthread *);
662 void	_thread_cleanupspecific(void);
663 void	_thread_dump_info(void);
664 void	_thread_printf(int, const char *, ...) __printflike(2, 3);
665 void	_thread_vprintf(int, const char *, va_list);
666 void	_thr_spinlock_init(void);
667 int	_thr_cancel_enter(struct pthread *);
668 void	_thr_cancel_leave(struct pthread *, int);
669 void	_thr_signal_block(struct pthread *);
670 void	_thr_signal_unblock(struct pthread *);
671 void	_thr_signal_init(void);
672 void	_thr_signal_deinit(void);
673 int	_thr_send_sig(struct pthread *, int sig);
674 void	_thr_list_init(void);
675 void	_thr_hash_add(struct pthread *);
676 void	_thr_hash_remove(struct pthread *);
677 struct pthread *_thr_hash_find(struct pthread *);
678 void	_thr_link(struct pthread *curthread, struct pthread *thread);
679 void	_thr_unlink(struct pthread *curthread, struct pthread *thread);
680 void	_thr_suspend_check(struct pthread *curthread);
681 void	_thr_assert_lock_level(void) __dead2;
682 void	_thr_ast(struct pthread *);
683 int	_thr_get_tid(void);
684 void	_thr_report_creation(struct pthread *curthread,
685 			   struct pthread *newthread);
686 void	_thr_report_death(struct pthread *curthread);
687 void	_thread_bp_create(void);
688 void	_thread_bp_death(void);
689 int	_thr_getscheduler(lwpid_t, int *, struct sched_param *);
690 int	_thr_setscheduler(lwpid_t, int, const struct sched_param *);
691 int	_thr_set_sched_other_prio(struct pthread *, int);
692 int	_rtp_to_schedparam(const struct rtprio *rtp, int *policy,
693 	    struct sched_param *param);
694 int	_schedparam_to_rtp(int policy, const struct sched_param *param,
695 	    struct rtprio *rtp);
696 int	_umtx_sleep_err(volatile const int *, int, int);
697 int	_umtx_wakeup_err(volatile const int *, int);
698 
699 /* #include <fcntl.h> */
700 #ifdef  _SYS_FCNTL_H_
701 int     __sys_fcntl(int, int, ...);
702 int     __sys_open(const char *, int, ...);
703 int     __sys_openat(int, const char *, int, ...);
704 #endif
705 
706 /* #include <sys/ioctl.h> */
707 #ifdef _SYS_IOCTL_H_
708 int	__sys_ioctl(int, unsigned long, ...);
709 #endif
710 
711 /* #inclde <sched.h> */
712 #ifdef	_SCHED_H_
713 int	__sys_sched_yield(void);
714 #endif
715 
716 /* #include <signal.h> */
717 #ifdef _SIGNAL_H_
718 int	__sys_kill(pid_t, int);
719 int     __sys_sigaction(int, const struct sigaction *, struct sigaction *);
720 int     __sys_sigpending(sigset_t *);
721 int     __sys_sigprocmask(int, const sigset_t *, sigset_t *);
722 int     __sys_sigsuspend(const sigset_t *);
723 int     __sys_sigreturn(ucontext_t *);
724 int     __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
725 #endif
726 
727 /* #include <time.h> */
728 #ifdef	_TIME_H_
729 int	__sys_nanosleep(const struct timespec *, struct timespec *);
730 #endif
731 
732 /* #include <unistd.h> */
733 #ifdef  _UNISTD_H_
734 int	__sys_close(int);
735 int	__sys_execve(const char *, char * const *, char * const *);
736 pid_t	__sys_getpid(void);
737 ssize_t __sys_read(int, void *, size_t);
738 ssize_t __sys_write(int, const void *, size_t);
739 void	__sys_exit(int);
740 int	__sys_sigwait(const sigset_t *, int *);
741 int	__sys_sigtimedwait(const sigset_t *, siginfo_t *,
742 		const struct timespec *);
743 int	__sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
744 #endif
745 
746 static inline int
747 _thr_isthreaded(void)
748 {
749 	return (__isthreaded != 0);
750 }
751 
752 static inline int
753 _thr_is_inited(void)
754 {
755 	return (_thr_initial != NULL);
756 }
757 
758 static inline void
759 _thr_check_init(void)
760 {
761 	if (_thr_initial == NULL)
762 		_libpthread_init(NULL);
763 }
764 
765 struct dl_phdr_info;
766 void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info);
767 
768 __END_DECLS
769 
770 #endif  /* !_THR_PRIVATE_H */
771