1 /*
2  * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
3  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
4  * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
5  *
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD: head/lib/libthr/thread/thr_private.h 217706 2010-08-23 $
29  */
30 
31 /*
32  * Private thread definitions for the uthread kernel.
33  */
34 
35 #ifndef _THR_PRIVATE_H
36 #define _THR_PRIVATE_H
37 
38 /*
39  * Include files.
40  */
41 #include <sys/types.h>
42 #include <sys/time.h>
43 #include <sys/cdefs.h>
44 #include <sys/queue.h>
45 #include <sys/rtprio.h>
46 #include <sys/mman.h>
47 #include <machine/atomic.h>
48 #include <errno.h>
49 #include <limits.h>
50 #include <signal.h>
51 #include <sys/cpumask.h>
52 #include <sys/sched.h>
53 #include <stdarg.h>
54 #include <unistd.h>
55 #include <pthread.h>
56 #include <pthread_np.h>
57 
58 #if defined(_PTHREADS_DEBUGGING) || defined(_PTHREADS_DEBUGGING2)
59 void	_thr_log(const char *buf, size_t bytes);
60 #endif
61 
62 #include "pthread_md.h"
63 #include "thr_umtx.h"
64 #include "thread_db.h"
65 
66 /* Signal to do cancellation */
67 #define	SIGCANCEL		32
68 
69 /*
70  * Kernel fatal error handler macro.
71  */
72 #define PANIC(args...)		_thread_exitf(__FILE__, __LINE__, ##args)
73 
74 /* Output debug messages like this: */
75 #define stdout_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
76 #define stderr_debug(args...)	_thread_printf(STDERR_FILENO, ##args)
77 
78 #ifdef _PTHREADS_INVARIANTS
79 #define THR_ASSERT(cond, msg) do {	\
80 	if (__predict_false(!(cond)))	\
81 		PANIC(msg);		\
82 } while (0)
83 #else
84 #define THR_ASSERT(cond, msg)
85 #endif
86 
87 #ifdef PIC
88 #define STATIC_LIB_REQUIRE(name)
89 #else
90 #define STATIC_LIB_REQUIRE(name)	__asm(".globl " #name)
91 #endif
92 
93 typedef TAILQ_HEAD(thread_head, __pthread_s)	thread_head;
94 typedef TAILQ_HEAD(atfork_head, pthread_atfork)	atfork_head;
95 
96 struct __pthread_mutex_s {
97 	/*
98 	 * Lock for accesses to this structure.
99 	 */
100 	volatile umtx_t			m_lock;
101 #ifdef _PTHREADS_DEBUGGING2
102 	int				m_lastop[32];
103 #endif
104 	enum pthread_mutextype		m_type;
105 	int				m_protocol;
106 	TAILQ_HEAD(mutex_head, __pthread_s) m_queue;
107 	struct __pthread_s			*m_owner;
108 	long				m_flags;
109 	int				m_count;
110 	int				m_refcount;
111 
112 	/*
113 	 * Used for priority inheritance and protection.
114 	 *
115 	 *   m_prio       - For priority inheritance, the highest active
116 	 *		    priority (threads locking the mutex inherit
117 	 *		    this priority).  For priority protection, the
118 	 *		    ceiling priority of this mutex.
119 	 *   m_saved_prio - mutex owners inherited priority before
120 	 *		    taking the mutex, restored when the owner
121 	 *		    unlocks the mutex.
122 	 */
123 	int				m_prio;
124 	int				m_saved_prio;
125 
126 	/*
127 	 * Link for list of all mutexes a thread currently owns.
128 	 */
129 	TAILQ_ENTRY(__pthread_mutex_s)	m_qe;
130 };
131 
132 #define TAILQ_INITIALIZER	{ NULL, NULL }
133 
134 #define PTHREAD_MUTEX_STATIC_INITIALIZER   		\
135 	{	.m_lock = 0,				\
136 		.m_type = PTHREAD_MUTEX_DEFAULT,	\
137 		.m_protocol = PTHREAD_PRIO_NONE,	\
138 		.m_queue = TAILQ_INITIALIZER,		\
139 		.m_flags = MUTEX_FLAGS_PRIVATE		\
140 	}
141 /*
142  * Flags for mutexes.
143  */
144 #define MUTEX_FLAGS_PRIVATE	0x01
145 #define MUTEX_FLAGS_INITED	0x02
146 
147 struct __pthread_mutexattr_s {
148 	enum pthread_mutextype	m_type;
149 	int			m_protocol;
150 	int			m_ceiling;
151 	int			m_flags;
152 };
153 
154 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
155 	{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
156 
157 struct cond_cancel_info;
158 
159 struct __pthread_cond_s {
160 	/*
161 	 * Lock for accesses to this structure.
162 	 */
163 	volatile umtx_t	c_lock;
164 	volatile int	c_unused01;
165 	int		c_pshared;
166 	int		c_clockid;
167 	TAILQ_HEAD(, cond_cancel_info)	c_waitlist;
168 };
169 
170 struct __pthread_condattr_s {
171 	int		c_pshared;
172 	int		c_clockid;
173 };
174 
175 /*
176  * Flags for condition variables.
177  */
178 #define COND_FLAGS_PRIVATE	0x01
179 #define COND_FLAGS_INITED	0x02
180 
181 struct __pthread_barrier_s {
182 	volatile umtx_t	b_lock;
183 	volatile umtx_t	b_cycle;
184 	volatile int	b_count;
185 	volatile int	b_waiters;
186 };
187 
188 struct __pthread_barrierattr_s {
189 	int		pshared;
190 };
191 
192 struct __pthread_spinlock_s {
193 	volatile umtx_t	s_lock;
194 };
195 
196 /*
197  * Cleanup definitions.
198  */
199 struct pthread_cleanup {
200 	struct pthread_cleanup	*next;
201 	void			(*routine)(void *);
202 	void			*routine_arg;
203 	int			onstack;
204 };
205 
206 #define	THR_CLEANUP_PUSH(td, func, arg) {		\
207 	struct pthread_cleanup __cup;			\
208 							\
209 	__cup.routine = func;				\
210 	__cup.routine_arg = arg;			\
211 	__cup.onstack = 1;				\
212 	__cup.next = (td)->cleanup;			\
213 	(td)->cleanup = &__cup;
214 
215 #define	THR_CLEANUP_POP(td, exec)			\
216 	(td)->cleanup = __cup.next;			\
217 	if ((exec) != 0)				\
218 		__cup.routine(__cup.routine_arg);	\
219 }
220 
221 struct pthread_atfork {
222 	TAILQ_ENTRY(pthread_atfork) qe;
223 	void (*prepare)(void);
224 	void (*parent)(void);
225 	void (*child)(void);
226 };
227 
228 struct __pthread_attr_s {
229 	int	sched_policy;
230 	int	sched_inherit;
231 	int	prio;
232 	int	suspend;
233 #define	THR_STACK_USER		0x100	/* 0xFF reserved for <pthread.h> */
234 #define THR_CPUMASK		0x200	/* cpumask is valid */
235 	int	flags;
236 	void	*stackaddr_attr;
237 	size_t	stacksize_attr;
238 	size_t	guardsize_attr;
239 	cpumask_t cpumask;
240 };
241 
242 /*
243  * Thread creation state attributes.
244  */
245 #define THR_CREATE_RUNNING		0
246 #define THR_CREATE_SUSPENDED		1
247 
248 /*
249  * Miscellaneous definitions.
250  */
251 #define THR_STACK_DEFAULT		(sizeof(void *) / 4 * 1024 * 1024)
252 
253 /*
254  * Maximum size of initial thread's stack.  This perhaps deserves to be larger
255  * than the stacks of other threads, since many applications are likely to run
256  * almost entirely on this stack.
257  */
258 #define THR_STACK_INITIAL		(THR_STACK_DEFAULT * 2)
259 
260 /*
261  * Define the different priority ranges.  All applications have thread
262  * priorities constrained within 0-31.  The threads library raises the
263  * priority when delivering signals in order to ensure that signal
264  * delivery happens (from the POSIX spec) "as soon as possible".
265  * In the future, the threads library will also be able to map specific
266  * threads into real-time (cooperating) processes or kernel threads.
267  * The RT and SIGNAL priorities will be used internally and added to
268  * thread base priorities so that the scheduling queue can handle both
269  * normal and RT priority threads with and without signal handling.
270  *
271  * The approach taken is that, within each class, signal delivery
272  * always has priority over thread execution.
273  */
274 #define THR_DEFAULT_PRIORITY		0
275 #define THR_MUTEX_CEIL_PRIORITY		31	/* dummy */
276 
277 /*
278  * Time slice period in microseconds.
279  */
280 #define TIMESLICE_USEC				20000
281 
282 struct __pthread_rwlockattr_s {
283 	int		pshared;
284 };
285 
286 struct __pthread_rwlock_s {
287 	pthread_mutex_t	lock;	/* monitor lock */
288 	pthread_cond_t	read_signal;
289 	pthread_cond_t	write_signal;
290 	int		state;	/* 0 = idle  >0 = # of readers  -1 = writer */
291 	int		blocked_writers;
292 };
293 
294 /*
295  * Thread states.
296  */
297 enum pthread_state {
298 	PS_RUNNING,
299 	PS_DEAD
300 };
301 
302 struct pthread_specific_elem {
303 	const void	*data;
304 	int		seqno;
305 };
306 
307 struct pthread_key {
308 	volatile int	allocated;
309 	volatile int	count;
310 	int		seqno;
311 	void		(*destructor)(void *);
312 };
313 
314 /*
315  * Thread structure.
316  */
317 struct __pthread_s {
318 	/*
319 	 * Magic value to help recognize a valid thread structure
320 	 * from an invalid one:
321 	 */
322 #define	THR_MAGIC		((u_int32_t) 0xd09ba115)
323 	u_int32_t		magic;
324 	char			*name;
325 	u_int64_t		uniqueid; /* for gdb */
326 
327 	/*
328 	 * Lock for accesses to this thread structure.
329 	 */
330 	umtx_t			lock;
331 
332 	/* Thread is terminated in kernel, written by kernel. */
333 	long			terminated;
334 
335 	/* Kernel thread id. */
336 	lwpid_t			tid;
337 
338 	/* Internal condition variable cycle number. */
339 	umtx_t			cycle;
340 
341 	/* How many low level locks the thread held. */
342 	int			locklevel;
343 
344 	/*
345 	 * Set to non-zero when this thread has entered a critical
346 	 * region.  We allow for recursive entries into critical regions.
347 	 */
348 	int			critical_count;
349 
350 	/* Signal blocked counter. */
351 	int			sigblock;
352 
353 	/* Queue entry for list of all threads. */
354 	TAILQ_ENTRY(__pthread_s) tle;	/* link for all threads in process */
355 
356 	/* Queue entry for GC lists. */
357 	TAILQ_ENTRY(__pthread_s) gcle;
358 
359 	/* Hash queue entry. */
360 	LIST_ENTRY(__pthread_s)	hle;
361 
362 	/* Threads reference count. */
363 	int			refcount;
364 
365 	/*
366 	 * Thread start routine, argument, stack pointer and thread
367 	 * attributes.
368 	 */
369 	void			*(*start_routine)(void *);
370 	void			*arg;
371 	struct __pthread_attr_s	attr;
372 
373 	/*
374 	 * Cancelability flags
375 	 */
376 #define	THR_CANCEL_DISABLE		0x0001
377 #define	THR_CANCEL_EXITING		0x0002
378 #define THR_CANCEL_AT_POINT		0x0004
379 #define THR_CANCEL_NEEDED		0x0008
380 #define	SHOULD_CANCEL(val)					\
381 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
382 		 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED)
383 
384 #define	SHOULD_ASYNC_CANCEL(val)				\
385 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
386 		 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) ==	\
387 		 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT))
388 	int			cancelflags;
389 
390 	/* Thread temporary signal mask. */
391 	sigset_t		sigmask;
392 
393 	/* Thread state: */
394 	umtx_t			state;
395 
396 	/*
397 	 * Error variable used instead of errno, used for internal.
398 	 */
399 	int			error;
400 
401 	/*
402 	 * The joiner is the thread that is joining to this thread.  The
403 	 * join status keeps track of a join operation to another thread.
404 	 */
405 	struct __pthread_s	*joiner;
406 
407 	/*
408 	 * The current thread can belong to a priority mutex queue.
409 	 * This is the synchronization queue link.
410 	 */
411 	TAILQ_ENTRY(__pthread_s) sqe;
412 
413 	/* Miscellaneous flags; only set with scheduling lock held. */
414 	int			flags;
415 #define THR_FLAGS_PRIVATE	0x0001
416 #define	THR_FLAGS_NEED_SUSPEND	0x0002	/* thread should be suspended */
417 #define	THR_FLAGS_SUSPENDED	0x0004	/* thread is suspended */
418 
419 	/* Thread list flags; only set with thread list lock held. */
420 	int			tlflags;
421 #define	TLFLAGS_GC_SAFE		0x0001	/* thread safe for cleaning */
422 #define	TLFLAGS_IN_TDLIST	0x0002	/* thread in all thread list */
423 #define	TLFLAGS_IN_GCLIST	0x0004	/* thread in gc list */
424 #define	TLFLAGS_DETACHED	0x0008	/* thread is detached */
425 
426 	/*
427 	 * Base priority is the user setable and retrievable priority
428 	 * of the thread.  It is only affected by explicit calls to
429 	 * set thread priority and upon thread creation via a thread
430 	 * attribute or default priority.
431 	 */
432 	char			base_priority;
433 
434 	/*
435 	 * Inherited priority is the priority a thread inherits by
436 	 * taking a priority inheritance or protection mutex.  It
437 	 * is not affected by base priority changes.  Inherited
438 	 * priority defaults to and remains 0 until a mutex is taken
439 	 * that is being waited on by any other thread whose priority
440 	 * is non-zero.
441 	 */
442 	char			inherited_priority;
443 
444 	/*
445 	 * Active priority is always the maximum of the threads base
446 	 * priority and inherited priority.  When there is a change
447 	 * in either the base or inherited priority, the active
448 	 * priority must be recalculated.
449 	 */
450 	char			active_priority;
451 
452 	/* Number of priority ceiling or protection mutexes owned. */
453 	int			priority_mutex_count;
454 
455 	/* Queue of currently owned simple type mutexes. */
456 	TAILQ_HEAD(, __pthread_mutex_s)	mutexq;
457 
458 	void				*ret;
459 	struct pthread_specific_elem	*specific;
460 	int				specific_data_count;
461 
462 	/* Number rwlocks rdlocks held. */
463 	int			rdlock_count;
464 
465 	/*
466 	 * Current locks bitmap for rtld. */
467 	int			rtld_bits;
468 
469 	/* Thread control block */
470 	struct tls_tcb		*tcb;
471 
472 	/* Cleanup handlers Link List */
473 	struct pthread_cleanup	*cleanup;
474 
475 	/* Enable event reporting */
476 	int			report_events;
477 
478 	/* Event mask */
479 	td_thr_events_t		event_mask;
480 
481 	/* Event */
482 	td_event_msg_t		event_buf;
483 };
484 
485 #define	THR_IN_CRITICAL(thrd)				\
486 	(((thrd)->locklevel > 0) ||			\
487 	((thrd)->critical_count > 0))
488 
489 /*
490  * Internal temporary locks without suspend check
491  */
492 #define THR_UMTX_TRYLOCK(thrd, lck)			\
493 	_thr_umtx_trylock((lck), (thrd)->tid, 1)
494 
495 #define	THR_UMTX_LOCK(thrd, lck)			\
496 	_thr_umtx_lock((lck), (thrd)->tid, 1)
497 
498 #define	THR_UMTX_TIMEDLOCK(thrd, lck, timo)		\
499 	_thr_umtx_timedlock((lck), (thrd)->tid, (timo), 1)
500 
501 #define	THR_UMTX_UNLOCK(thrd, lck)			\
502 	_thr_umtx_unlock((lck), (thrd)->tid, 1)
503 
504 /*
505  * Interal locks without suspend check, used when the lock
506  * state needs to persist (i.e. to help implement things
507  * like pthread_mutex_lock()).  Non-temporary.
508  */
509 #define THR_UMTX_TRYLOCK_PERSIST(thrd, lck)		\
510 	_thr_umtx_trylock((lck), (thrd)->tid, 0)
511 
512 #define	THR_UMTX_LOCK_PERSIST(thrd, lck)		\
513 	_thr_umtx_lock((lck), (thrd)->tid, 0)
514 
515 #define	THR_UMTX_TIMEDLOCK_PERSIST(thrd, lck, timo)	\
516 	_thr_umtx_timedlock((lck), (thrd)->tid, (timo), 0)
517 
518 #define	THR_UMTX_UNLOCK_PERSIST(thrd, lck)		\
519 	_thr_umtx_unlock((lck), (thrd)->tid, 0)
520 
521 /*
522  * Internal temporary locks with suspend check
523  */
524 #define	THR_LOCK_ACQUIRE(thrd, lck)			\
525 do {							\
526 	(thrd)->locklevel++;				\
527 	_thr_umtx_lock((lck), (thrd)->tid, 1);		\
528 } while (0)
529 
530 #ifdef	_PTHREADS_INVARIANTS
531 #define	THR_ASSERT_LOCKLEVEL(thrd)			\
532 do {							\
533 	if (__predict_false((thrd)->locklevel <= 0))	\
534 		_thr_assert_lock_level();		\
535 } while (0)
536 #else
537 #define THR_ASSERT_LOCKLEVEL(thrd)
538 #endif
539 
540 #define	THR_LOCK_RELEASE(thrd, lck)			\
541 do {							\
542 	THR_ASSERT_LOCKLEVEL(thrd);			\
543 	_thr_umtx_unlock((lck), (thrd)->tid, 1);	\
544 	(thrd)->locklevel--;				\
545 	_thr_ast(thrd);					\
546 } while (0)
547 
548 #define	THR_LOCK(curthrd)		THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
549 #define	THR_UNLOCK(curthrd)		THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
550 #define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
551 #define	THR_THREAD_UNLOCK(curthrd, thr)	THR_LOCK_RELEASE(curthrd, &(thr)->lock)
552 
553 #define	THREAD_LIST_LOCK(curthrd)				\
554 do {								\
555 	THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock);		\
556 } while (0)
557 
558 #define	THREAD_LIST_UNLOCK(curthrd)				\
559 do {								\
560 	THR_LOCK_RELEASE((curthrd), &_thr_list_lock);		\
561 } while (0)
562 
563 /*
564  * Macros to insert/remove threads to the all thread list and
565  * the gc list.
566  */
567 #define	THR_LIST_ADD(thrd) do {					\
568 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) {	\
569 		TAILQ_INSERT_HEAD(&_thread_list, thrd, tle);	\
570 		_thr_hash_add(thrd);				\
571 		(thrd)->tlflags |= TLFLAGS_IN_TDLIST;		\
572 	}							\
573 } while (0)
574 #define	THR_LIST_REMOVE(thrd) do {				\
575 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) {	\
576 		TAILQ_REMOVE(&_thread_list, thrd, tle);		\
577 		_thr_hash_remove(thrd);				\
578 		(thrd)->tlflags &= ~TLFLAGS_IN_TDLIST;		\
579 	}							\
580 } while (0)
581 #define	THR_GCLIST_ADD(thrd) do {				\
582 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) {	\
583 		TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
584 		(thrd)->tlflags |= TLFLAGS_IN_GCLIST;		\
585 		_thr_gc_count++;					\
586 	}							\
587 } while (0)
588 #define	THR_GCLIST_REMOVE(thrd) do {				\
589 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) {	\
590 		TAILQ_REMOVE(&_thread_gc_list, thrd, gcle);	\
591 		(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST;		\
592 		_thr_gc_count--;					\
593 	}							\
594 } while (0)
595 
596 #define GC_NEEDED()	(_thr_gc_count >= 5)
597 
598 #define	THR_IN_SYNCQ(thrd)	(((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
599 
600 #define SHOULD_REPORT_EVENT(curthr, e)			\
601 	(curthr->report_events &&			\
602 	 (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
603 
604 #ifndef __LIBC_ISTHREADED_DECLARED
605 #define __LIBC_ISTHREADED_DECLARED
606 extern int __isthreaded;
607 #endif
608 
609 /*
610  * Global variables for the pthread library.
611  */
612 extern char		*_usrstack;
613 extern pthread_t	_thr_initial;
614 
615 /* For debugger */
616 extern int		_libthread_xu_debug;
617 extern int		_thread_event_mask;
618 extern pthread_t	_thread_last_event;
619 
620 /* List of all threads */
621 extern struct thread_head	_thread_list;
622 
623 /* List of threads needing GC */
624 extern struct thread_head	_thread_gc_list;
625 
626 extern int	_thread_active_threads;
627 
628 extern struct	atfork_head	_thr_atfork_list;
629 extern struct	atfork_head	_thr_atfork_kern_list;
630 extern umtx_t	_thr_atfork_lock;
631 
632 /* Default thread attributes */
633 extern struct __pthread_attr_s _pthread_attr_default;
634 
635 /* Default mutex attributes */
636 extern struct __pthread_mutexattr_s _pthread_mutexattr_default;
637 
638 /* Default condition variable attributes */
639 extern struct __pthread_condattr_s _pthread_condattr_default;
640 
641 extern pid_t	_thr_pid;
642 extern size_t	_thr_guard_default;
643 extern size_t	_thr_stack_default;
644 extern size_t	_thr_stack_initial;
645 extern int	_thr_page_size;
646 extern int	_thr_gc_count;
647 
648 extern umtx_t	_mutex_static_lock;
649 extern umtx_t	_cond_static_lock;
650 extern umtx_t	_rwlock_static_lock;
651 extern umtx_t	_keytable_lock;
652 extern umtx_t	_thr_list_lock;
653 extern umtx_t	_thr_event_lock;
654 
655 /*
656  * Function prototype definitions.
657  */
658 __BEGIN_DECLS
659 int	_thr_setthreaded(int);
660 int	_mutex_cv_lock(pthread_mutex_t *, int count);
661 int	_mutex_cv_unlock(pthread_mutex_t *, int *count);
662 void	_mutex_notify_priochange(pthread_t, pthread_t, int);
663 void	_mutex_fork(pthread_t, lwpid_t tid);
664 void	_mutex_unlock_private(pthread_t);
665 
666 #if 0
667 int	_mutex_reinit(pthread_mutex_t *);
668 void	_cond_reinit(pthread_cond_t pcond);
669 void	_rwlock_reinit(pthread_rwlock_t prwlock);
670 #endif
671 
672 void	_libpthread_init(pthread_t);
673 pthread_t _thr_alloc(pthread_t);
674 void	_thread_exit(const char *, int, const char *) __dead2;
675 void	_thread_exitf(const char *, int, const char *, ...) __dead2
676 	    __printflike(3, 4);
677 void	_thr_exit_cleanup(void);
678 void	_thr_atfork_kern(void (*prepare)(void), void (*parent)(void),
679 			void (*child)(void));
680 int	_thr_ref_add(pthread_t, pthread_t, int);
681 void	_thr_ref_delete(pthread_t, pthread_t);
682 void	_thr_ref_delete_unlocked(pthread_t, pthread_t);
683 int	_thr_find_thread(pthread_t, pthread_t, int);
684 void	_thr_malloc_init(void);
685 void	_rtld_setthreaded(int);
686 void	_thr_rtld_init(void);
687 void	_thr_rtld_fini(void);
688 int	_thr_stack_alloc(pthread_attr_t);
689 void	_thr_stack_free(pthread_attr_t);
690 void	_thr_stack_cleanup(void);
691 void	_thr_sem_init(void);
692 void	_thr_free(pthread_t, pthread_t);
693 void	_thr_gc(pthread_t);
694 void	_thread_cleanupspecific(void);
695 void	_thread_dump_info(void);
696 void	_thread_printf(int, const char *, ...) __printflike(2, 3);
697 void	_thread_vprintf(int, const char *, va_list);
698 void	_thr_spinlock_init(void);
699 int	_thr_cancel_enter(pthread_t);
700 void	_thr_cancel_leave(pthread_t, int);
701 void	_thr_signal_block(pthread_t);
702 void	_thr_signal_unblock(pthread_t);
703 void	_thr_signal_init(void);
704 void	_thr_signal_deinit(void);
705 int	_thr_send_sig(pthread_t, int sig);
706 void	_thr_list_init(void);
707 void	_thr_hash_add(pthread_t);
708 void	_thr_hash_remove(pthread_t);
709 pthread_t _thr_hash_find(pthread_t);
710 void	_thr_link(pthread_t, pthread_t);
711 void	_thr_unlink(pthread_t, pthread_t);
712 void	_thr_suspend_check(pthread_t);
713 void	_thr_assert_lock_level(void) __dead2;
714 void	_thr_ast(pthread_t);
715 int	_thr_get_tid(void);
716 void	_thr_report_creation(pthread_t, pthread_t);
717 void	_thr_report_death(pthread_t);
718 void	_thread_bp_create(void);
719 void	_thread_bp_death(void);
720 int	_thr_getscheduler(lwpid_t, int *, struct sched_param *);
721 int	_thr_setscheduler(lwpid_t, int, const struct sched_param *);
722 int	_thr_set_sched_other_prio(pthread_t, int);
723 int	_rtp_to_schedparam(const struct rtprio *rtp, int *policy,
724 	    struct sched_param *param);
725 int	_schedparam_to_rtp(int policy, const struct sched_param *param,
726 	    struct rtprio *rtp);
727 int	_umtx_sleep_err(volatile const int *, int, int);
728 int	_umtx_wakeup_err(volatile const int *, int);
729 
730 /* #include <fcntl.h> */
731 #ifdef  _SYS_FCNTL_H_
732 int     __sys_fcntl(int, int, ...);
733 int     __sys_open(const char *, int, ...);
734 int     __sys_openat(int, const char *, int, ...);
735 #endif
736 
737 /* #include <sys/ioctl.h> */
738 #ifdef _SYS_IOCTL_H_
739 int	__sys_ioctl(int, unsigned long, ...);
740 #endif
741 
742 /* #include <sched.h> */
743 #ifdef	_SYS_SCHED_H_
744 int	__sys_sched_yield(void);
745 #endif
746 
747 /* #include <signal.h> */
748 #ifdef _SIGNAL_H_
749 int     __sys_sigaction(int, const struct sigaction *, struct sigaction *);
750 int     __sys_sigprocmask(int, const sigset_t *, sigset_t *);
751 int     __sys_sigsuspend(const sigset_t *);
752 #endif
753 
754 /* #include <time.h> */
755 #ifdef	_TIME_H_
756 int	__sys_nanosleep(const struct timespec *, struct timespec *);
757 int	__sys_clock_nanosleep(clockid_t, int, const struct timespec *,
758 		struct timespec *);
759 #endif
760 
761 /* #include <unistd.h> */
762 #ifdef  _UNISTD_H_
763 int	__sys_close(int);
764 pid_t	__sys_getpid(void);
765 ssize_t __sys_read(int, void *, size_t);
766 ssize_t __sys_write(int, const void *, size_t);
767 int	__sys_sigtimedwait(const sigset_t *, siginfo_t *,
768 		const struct timespec *);
769 int	__sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
770 #endif
771 
772 static inline int
_thr_isthreaded(void)773 _thr_isthreaded(void)
774 {
775 	return (__isthreaded != 0);
776 }
777 
778 static inline int
_thr_is_inited(void)779 _thr_is_inited(void)
780 {
781 	return (_thr_initial != NULL);
782 }
783 
784 static inline void
_thr_check_init(void)785 _thr_check_init(void)
786 {
787 	if (_thr_initial == NULL)
788 		_libpthread_init(NULL);
789 }
790 
791 struct dl_phdr_info;
792 void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info);
793 
794 /*
795  * Used in low-level init to directly call libc's malloc implementation
796  * instead of a potentially third-party malloc implementation.  Required
797  * for bootstrapping pthreads.
798  */
799 void *__malloc(size_t bytes);
800 void __free(void *ptr);
801 
802 __END_DECLS
803 
804 #endif  /* !_THR_PRIVATE_H */
805