1 /* 2 * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org> 3 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>. 5 * 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD: head/lib/libthr/thread/thr_private.h 217706 2010-08-23 $ 29 */ 30 31 /* 32 * Private thread definitions for the uthread kernel. 33 */ 34 35 #ifndef _THR_PRIVATE_H 36 #define _THR_PRIVATE_H 37 38 /* 39 * Include files. 40 */ 41 #include <sys/types.h> 42 #include <sys/time.h> 43 #include <sys/cdefs.h> 44 #include <sys/queue.h> 45 #include <sys/rtprio.h> 46 #include <machine/atomic.h> 47 #include <machine/cpumask.h> 48 #include <errno.h> 49 #include <limits.h> 50 #include <signal.h> 51 #include <sys/sched.h> 52 #include <stdarg.h> 53 #include <unistd.h> 54 #include <pthread.h> 55 #include <pthread_np.h> 56 57 #if defined(_PTHREADS_DEBUGGING) || defined(_PTHREADS_DEBUGGING2) 58 void _thr_log(const char *buf, size_t bytes); 59 #endif 60 61 #include "pthread_md.h" 62 #include "thr_umtx.h" 63 #include "thread_db.h" 64 65 /* Signal to do cancellation */ 66 #define SIGCANCEL 32 67 68 /* 69 * Kernel fatal error handler macro. 70 */ 71 #define PANIC(args...) _thread_exitf(__FILE__, __LINE__, ##args) 72 73 /* Output debug messages like this: */ 74 #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) 75 #define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args) 76 77 #ifdef _PTHREADS_INVARIANTS 78 #define THR_ASSERT(cond, msg) do { \ 79 if (__predict_false(!(cond))) \ 80 PANIC(msg); \ 81 } while (0) 82 #else 83 #define THR_ASSERT(cond, msg) 84 #endif 85 86 #ifdef PIC 87 #define STATIC_LIB_REQUIRE(name) 88 #else 89 #define STATIC_LIB_REQUIRE(name) __asm(".globl " #name) 90 #endif 91 92 TAILQ_HEAD(thread_head, pthread) thread_head; 93 TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head; 94 95 #define TIMESPEC_ADD(dst, src, val) \ 96 do { \ 97 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ 98 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ 99 if ((dst)->tv_nsec >= 1000000000) { \ 100 (dst)->tv_sec++; \ 101 (dst)->tv_nsec -= 1000000000; \ 102 } \ 103 } while (0) 104 105 #define TIMESPEC_SUB(dst, src, val) \ 106 do { \ 107 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ 108 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ 109 if ((dst)->tv_nsec < 0) { \ 110 (dst)->tv_sec--; \ 111 (dst)->tv_nsec += 1000000000; \ 112 } \ 113 } while (0) 114 115 struct pthread_mutex { 116 /* 117 * Lock for accesses to this structure. 118 */ 119 volatile umtx_t m_lock; 120 #ifdef _PTHREADS_DEBUGGING2 121 int m_lastop[32]; 122 #endif 123 enum pthread_mutextype m_type; 124 int m_protocol; 125 TAILQ_HEAD(mutex_head, pthread) m_queue; 126 struct pthread *m_owner; 127 long m_flags; 128 int m_count; 129 int m_refcount; 130 131 /* 132 * Used for priority inheritence and protection. 133 * 134 * m_prio - For priority inheritence, the highest active 135 * priority (threads locking the mutex inherit 136 * this priority). For priority protection, the 137 * ceiling priority of this mutex. 138 * m_saved_prio - mutex owners inherited priority before 139 * taking the mutex, restored when the owner 140 * unlocks the mutex. 141 */ 142 int m_prio; 143 int m_saved_prio; 144 145 /* 146 * Link for list of all mutexes a thread currently owns. 147 */ 148 TAILQ_ENTRY(pthread_mutex) m_qe; 149 }; 150 151 #define TAILQ_INITIALIZER { NULL, NULL } 152 153 #define PTHREAD_MUTEX_STATIC_INITIALIZER \ 154 { .m_lock = 0, \ 155 .m_type = PTHREAD_MUTEX_DEFAULT, \ 156 .m_protocol = PTHREAD_PRIO_NONE, \ 157 .m_queue = TAILQ_INITIALIZER, \ 158 .m_flags = MUTEX_FLAGS_PRIVATE \ 159 } 160 /* 161 * Flags for mutexes. 162 */ 163 #define MUTEX_FLAGS_PRIVATE 0x01 164 #define MUTEX_FLAGS_INITED 0x02 165 166 struct pthread_mutex_attr { 167 enum pthread_mutextype m_type; 168 int m_protocol; 169 int m_ceiling; 170 int m_flags; 171 }; 172 173 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ 174 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } 175 176 struct cond_cancel_info; 177 178 struct pthread_cond { 179 /* 180 * Lock for accesses to this structure. 181 */ 182 volatile umtx_t c_lock; 183 volatile int c_unused01; 184 int c_pshared; 185 int c_clockid; 186 TAILQ_HEAD(, cond_cancel_info) c_waitlist; 187 }; 188 189 struct pthread_cond_attr { 190 int c_pshared; 191 int c_clockid; 192 }; 193 194 /* 195 * Flags for condition variables. 196 */ 197 #define COND_FLAGS_PRIVATE 0x01 198 #define COND_FLAGS_INITED 0x02 199 200 struct pthread_barrier { 201 volatile umtx_t b_lock; 202 volatile umtx_t b_cycle; 203 volatile int b_count; 204 volatile int b_waiters; 205 }; 206 207 struct pthread_barrierattr { 208 int pshared; 209 }; 210 211 struct pthread_spinlock { 212 volatile umtx_t s_lock; 213 }; 214 215 /* 216 * Cleanup definitions. 217 */ 218 struct pthread_cleanup { 219 struct pthread_cleanup *next; 220 void (*routine)(void *); 221 void *routine_arg; 222 int onstack; 223 }; 224 225 #define THR_CLEANUP_PUSH(td, func, arg) { \ 226 struct pthread_cleanup __cup; \ 227 \ 228 __cup.routine = func; \ 229 __cup.routine_arg = arg; \ 230 __cup.onstack = 1; \ 231 __cup.next = (td)->cleanup; \ 232 (td)->cleanup = &__cup; 233 234 #define THR_CLEANUP_POP(td, exec) \ 235 (td)->cleanup = __cup.next; \ 236 if ((exec) != 0) \ 237 __cup.routine(__cup.routine_arg); \ 238 } 239 240 struct pthread_atfork { 241 TAILQ_ENTRY(pthread_atfork) qe; 242 void (*prepare)(void); 243 void (*parent)(void); 244 void (*child)(void); 245 }; 246 247 struct pthread_attr { 248 int sched_policy; 249 int sched_inherit; 250 int prio; 251 int suspend; 252 #define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */ 253 #define THR_CPUMASK 0x200 /* cpumask is valid */ 254 int flags; 255 void *stackaddr_attr; 256 size_t stacksize_attr; 257 size_t guardsize_attr; 258 cpumask_t cpumask; 259 }; 260 261 /* 262 * Thread creation state attributes. 263 */ 264 #define THR_CREATE_RUNNING 0 265 #define THR_CREATE_SUSPENDED 1 266 267 /* 268 * Miscellaneous definitions. 269 */ 270 #define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024) 271 272 /* 273 * Maximum size of initial thread's stack. This perhaps deserves to be larger 274 * than the stacks of other threads, since many applications are likely to run 275 * almost entirely on this stack. 276 */ 277 #define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2) 278 279 /* 280 * Define the different priority ranges. All applications have thread 281 * priorities constrained within 0-31. The threads library raises the 282 * priority when delivering signals in order to ensure that signal 283 * delivery happens (from the POSIX spec) "as soon as possible". 284 * In the future, the threads library will also be able to map specific 285 * threads into real-time (cooperating) processes or kernel threads. 286 * The RT and SIGNAL priorities will be used internally and added to 287 * thread base priorities so that the scheduling queue can handle both 288 * normal and RT priority threads with and without signal handling. 289 * 290 * The approach taken is that, within each class, signal delivery 291 * always has priority over thread execution. 292 */ 293 #define THR_DEFAULT_PRIORITY 0 294 #define THR_MUTEX_CEIL_PRIORITY 31 /* dummy */ 295 296 /* 297 * Time slice period in microseconds. 298 */ 299 #define TIMESLICE_USEC 20000 300 301 struct pthread_rwlockattr { 302 int pshared; 303 }; 304 305 struct pthread_rwlock { 306 pthread_mutex_t lock; /* monitor lock */ 307 pthread_cond_t read_signal; 308 pthread_cond_t write_signal; 309 int state; /* 0 = idle >0 = # of readers -1 = writer */ 310 int blocked_writers; 311 }; 312 313 /* 314 * Thread states. 315 */ 316 enum pthread_state { 317 PS_RUNNING, 318 PS_DEAD 319 }; 320 321 struct pthread_specific_elem { 322 const void *data; 323 int seqno; 324 }; 325 326 struct pthread_key { 327 volatile int allocated; 328 volatile int count; 329 int seqno; 330 void (*destructor)(void *); 331 }; 332 333 /* 334 * Thread structure. 335 */ 336 struct pthread { 337 /* 338 * Magic value to help recognize a valid thread structure 339 * from an invalid one: 340 */ 341 #define THR_MAGIC ((u_int32_t) 0xd09ba115) 342 u_int32_t magic; 343 char *name; 344 u_int64_t uniqueid; /* for gdb */ 345 346 /* 347 * Lock for accesses to this thread structure. 348 */ 349 umtx_t lock; 350 351 /* Thread is terminated in kernel, written by kernel. */ 352 long terminated; 353 354 /* Kernel thread id. */ 355 lwpid_t tid; 356 357 /* Internal condition variable cycle number. */ 358 umtx_t cycle; 359 360 /* How many low level locks the thread held. */ 361 int locklevel; 362 363 /* 364 * Set to non-zero when this thread has entered a critical 365 * region. We allow for recursive entries into critical regions. 366 */ 367 int critical_count; 368 369 /* Signal blocked counter. */ 370 int sigblock; 371 372 /* Queue entry for list of all threads. */ 373 TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ 374 375 /* Queue entry for GC lists. */ 376 TAILQ_ENTRY(pthread) gcle; 377 378 /* Hash queue entry. */ 379 LIST_ENTRY(pthread) hle; 380 381 /* Threads reference count. */ 382 int refcount; 383 384 /* 385 * Thread start routine, argument, stack pointer and thread 386 * attributes. 387 */ 388 void *(*start_routine)(void *); 389 void *arg; 390 struct pthread_attr attr; 391 392 /* 393 * Cancelability flags 394 */ 395 #define THR_CANCEL_DISABLE 0x0001 396 #define THR_CANCEL_EXITING 0x0002 397 #define THR_CANCEL_AT_POINT 0x0004 398 #define THR_CANCEL_NEEDED 0x0008 399 #define SHOULD_CANCEL(val) \ 400 (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \ 401 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED) 402 403 #define SHOULD_ASYNC_CANCEL(val) \ 404 (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \ 405 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) == \ 406 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) 407 int cancelflags; 408 409 /* Thread temporary signal mask. */ 410 sigset_t sigmask; 411 412 /* Thread state: */ 413 umtx_t state; 414 415 /* 416 * Error variable used instead of errno, used for internal. 417 */ 418 int error; 419 420 /* 421 * The joiner is the thread that is joining to this thread. The 422 * join status keeps track of a join operation to another thread. 423 */ 424 struct pthread *joiner; 425 426 /* 427 * The current thread can belong to a priority mutex queue. 428 * This is the synchronization queue link. 429 */ 430 TAILQ_ENTRY(pthread) sqe; 431 432 /* Miscellaneous flags; only set with scheduling lock held. */ 433 int flags; 434 #define THR_FLAGS_PRIVATE 0x0001 435 #define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */ 436 #define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */ 437 438 /* Thread list flags; only set with thread list lock held. */ 439 int tlflags; 440 #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ 441 #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ 442 #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ 443 #define TLFLAGS_DETACHED 0x0008 /* thread is detached */ 444 445 /* 446 * Base priority is the user setable and retrievable priority 447 * of the thread. It is only affected by explicit calls to 448 * set thread priority and upon thread creation via a thread 449 * attribute or default priority. 450 */ 451 char base_priority; 452 453 /* 454 * Inherited priority is the priority a thread inherits by 455 * taking a priority inheritence or protection mutex. It 456 * is not affected by base priority changes. Inherited 457 * priority defaults to and remains 0 until a mutex is taken 458 * that is being waited on by any other thread whose priority 459 * is non-zero. 460 */ 461 char inherited_priority; 462 463 /* 464 * Active priority is always the maximum of the threads base 465 * priority and inherited priority. When there is a change 466 * in either the base or inherited priority, the active 467 * priority must be recalculated. 468 */ 469 char active_priority; 470 471 /* Number of priority ceiling or protection mutexes owned. */ 472 int priority_mutex_count; 473 474 /* Queue of currently owned simple type mutexes. */ 475 TAILQ_HEAD(, pthread_mutex) mutexq; 476 477 void *ret; 478 struct pthread_specific_elem *specific; 479 int specific_data_count; 480 481 /* Number rwlocks rdlocks held. */ 482 int rdlock_count; 483 484 /* 485 * Current locks bitmap for rtld. */ 486 int rtld_bits; 487 488 /* Thread control block */ 489 struct tls_tcb *tcb; 490 491 /* Cleanup handlers Link List */ 492 struct pthread_cleanup *cleanup; 493 494 /* Enable event reporting */ 495 int report_events; 496 497 /* Event mask */ 498 td_thr_events_t event_mask; 499 500 /* Event */ 501 td_event_msg_t event_buf; 502 }; 503 504 #define THR_IN_CRITICAL(thrd) \ 505 (((thrd)->locklevel > 0) || \ 506 ((thrd)->critical_count > 0)) 507 508 #define THR_UMTX_TRYLOCK(thrd, lck) \ 509 _thr_umtx_trylock((lck), (thrd)->tid) 510 511 #define THR_UMTX_LOCK(thrd, lck) \ 512 _thr_umtx_lock((lck), (thrd)->tid) 513 514 #define THR_UMTX_TIMEDLOCK(thrd, lck, timo) \ 515 _thr_umtx_timedlock((lck), (thrd)->tid, (timo)) 516 517 #define THR_UMTX_UNLOCK(thrd, lck) \ 518 _thr_umtx_unlock((lck), (thrd)->tid) 519 520 #define THR_LOCK_ACQUIRE(thrd, lck) \ 521 do { \ 522 (thrd)->locklevel++; \ 523 _thr_umtx_lock((lck), (thrd)->tid); \ 524 } while (0) 525 526 #ifdef _PTHREADS_INVARIANTS 527 #define THR_ASSERT_LOCKLEVEL(thrd) \ 528 do { \ 529 if (__predict_false((thrd)->locklevel <= 0)) \ 530 _thr_assert_lock_level(); \ 531 } while (0) 532 #else 533 #define THR_ASSERT_LOCKLEVEL(thrd) 534 #endif 535 536 #define THR_LOCK_RELEASE(thrd, lck) \ 537 do { \ 538 THR_ASSERT_LOCKLEVEL(thrd); \ 539 _thr_umtx_unlock((lck), (thrd)->tid); \ 540 (thrd)->locklevel--; \ 541 _thr_ast(thrd); \ 542 } while (0) 543 544 #define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock) 545 #define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock) 546 #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) 547 #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) 548 549 #define THREAD_LIST_LOCK(curthrd) \ 550 do { \ 551 THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \ 552 } while (0) 553 554 #define THREAD_LIST_UNLOCK(curthrd) \ 555 do { \ 556 THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \ 557 } while (0) 558 559 /* 560 * Macros to insert/remove threads to the all thread list and 561 * the gc list. 562 */ 563 #define THR_LIST_ADD(thrd) do { \ 564 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ 565 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ 566 _thr_hash_add(thrd); \ 567 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ 568 } \ 569 } while (0) 570 #define THR_LIST_REMOVE(thrd) do { \ 571 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ 572 TAILQ_REMOVE(&_thread_list, thrd, tle); \ 573 _thr_hash_remove(thrd); \ 574 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ 575 } \ 576 } while (0) 577 #define THR_GCLIST_ADD(thrd) do { \ 578 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ 579 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ 580 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ 581 _thr_gc_count++; \ 582 } \ 583 } while (0) 584 #define THR_GCLIST_REMOVE(thrd) do { \ 585 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ 586 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ 587 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ 588 _thr_gc_count--; \ 589 } \ 590 } while (0) 591 592 #define GC_NEEDED() (_thr_gc_count >= 5) 593 594 #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 595 596 #define SHOULD_REPORT_EVENT(curthr, e) \ 597 (curthr->report_events && \ 598 (((curthr)->event_mask | _thread_event_mask ) & e) != 0) 599 600 #if !defined(_LIBC_PRIVATE_H_) && !defined(_STDIO_H_) 601 extern int __isthreaded; 602 #endif 603 604 /* 605 * Global variables for the pthread library. 606 */ 607 extern char *_usrstack; 608 extern struct pthread *_thr_initial; 609 610 /* For debugger */ 611 extern int _libthread_xu_debug; 612 extern int _thread_event_mask; 613 extern struct pthread *_thread_last_event; 614 615 /* List of all threads */ 616 extern struct thread_head _thread_list; 617 618 /* List of threads needing GC */ 619 extern struct thread_head _thread_gc_list; 620 621 extern int _thread_active_threads; 622 623 extern struct atfork_head _thr_atfork_list; 624 extern struct atfork_head _thr_atfork_kern_list; 625 extern umtx_t _thr_atfork_lock; 626 627 /* Default thread attributes */ 628 extern struct pthread_attr _pthread_attr_default; 629 630 /* Default mutex attributes */ 631 extern struct pthread_mutex_attr _pthread_mutexattr_default; 632 633 /* Default condition variable attributes */ 634 extern struct pthread_cond_attr _pthread_condattr_default; 635 636 extern pid_t _thr_pid; 637 extern size_t _thr_guard_default; 638 extern size_t _thr_stack_default; 639 extern size_t _thr_stack_initial; 640 extern int _thr_page_size; 641 extern int _thr_gc_count; 642 643 extern umtx_t _mutex_static_lock; 644 extern umtx_t _cond_static_lock; 645 extern umtx_t _rwlock_static_lock; 646 extern umtx_t _keytable_lock; 647 extern umtx_t _thr_list_lock; 648 extern umtx_t _thr_event_lock; 649 650 /* 651 * Function prototype definitions. 652 */ 653 __BEGIN_DECLS 654 int _thr_setthreaded(int); 655 int _mutex_cv_lock(pthread_mutex_t *, int count); 656 int _mutex_cv_unlock(pthread_mutex_t *, int *count); 657 void _mutex_notify_priochange(struct pthread *, struct pthread *, int); 658 void _mutex_fork(struct pthread *curthread); 659 void _mutex_unlock_private(struct pthread *); 660 661 #if 0 662 int _mutex_reinit(pthread_mutex_t *); 663 void _cond_reinit(pthread_cond_t pcond); 664 void _rwlock_reinit(pthread_rwlock_t prwlock); 665 #endif 666 667 void _libpthread_init(struct pthread *); 668 struct pthread *_thr_alloc(struct pthread *); 669 void _thread_exit(const char *, int, const char *) __dead2; 670 void _thread_exitf(const char *, int, const char *, ...) __dead2 671 __printflike(3, 4); 672 void _thr_exit_cleanup(void); 673 void _thr_atfork_kern(void (*prepare)(void), void (*parent)(void), 674 void (*child)(void)); 675 int _thr_ref_add(struct pthread *, struct pthread *, int); 676 void _thr_ref_delete(struct pthread *, struct pthread *); 677 void _thr_ref_delete_unlocked(struct pthread *, struct pthread *); 678 int _thr_find_thread(struct pthread *, struct pthread *, int); 679 void _thr_malloc_init(void); 680 void _thr_rtld_init(void); 681 void _thr_rtld_fini(void); 682 int _thr_stack_alloc(struct pthread_attr *); 683 void _thr_stack_free(struct pthread_attr *); 684 void _thr_stack_cleanup(void); 685 void _thr_sem_init(void); 686 void _thr_free(struct pthread *, struct pthread *); 687 void _thr_gc(struct pthread *); 688 void _thread_cleanupspecific(void); 689 void _thread_dump_info(void); 690 void _thread_printf(int, const char *, ...) __printflike(2, 3); 691 void _thread_vprintf(int, const char *, va_list); 692 void _thr_spinlock_init(void); 693 int _thr_cancel_enter(struct pthread *); 694 void _thr_cancel_leave(struct pthread *, int); 695 void _thr_signal_block(struct pthread *); 696 void _thr_signal_unblock(struct pthread *); 697 void _thr_signal_init(void); 698 void _thr_signal_deinit(void); 699 int _thr_send_sig(struct pthread *, int sig); 700 void _thr_list_init(void); 701 void _thr_hash_add(struct pthread *); 702 void _thr_hash_remove(struct pthread *); 703 struct pthread *_thr_hash_find(struct pthread *); 704 void _thr_link(struct pthread *curthread, struct pthread *thread); 705 void _thr_unlink(struct pthread *curthread, struct pthread *thread); 706 void _thr_suspend_check(struct pthread *curthread); 707 void _thr_assert_lock_level(void) __dead2; 708 void _thr_ast(struct pthread *); 709 int _thr_get_tid(void); 710 void _thr_report_creation(struct pthread *curthread, 711 struct pthread *newthread); 712 void _thr_report_death(struct pthread *curthread); 713 void _thread_bp_create(void); 714 void _thread_bp_death(void); 715 int _thr_getscheduler(lwpid_t, int *, struct sched_param *); 716 int _thr_setscheduler(lwpid_t, int, const struct sched_param *); 717 int _thr_set_sched_other_prio(struct pthread *, int); 718 int _rtp_to_schedparam(const struct rtprio *rtp, int *policy, 719 struct sched_param *param); 720 int _schedparam_to_rtp(int policy, const struct sched_param *param, 721 struct rtprio *rtp); 722 int _umtx_sleep_err(volatile const int *, int, int); 723 int _umtx_wakeup_err(volatile const int *, int); 724 725 /* #include <fcntl.h> */ 726 #ifdef _SYS_FCNTL_H_ 727 int __sys_fcntl(int, int, ...); 728 int __sys_open(const char *, int, ...); 729 int __sys_openat(int, const char *, int, ...); 730 #endif 731 732 /* #include <sys/ioctl.h> */ 733 #ifdef _SYS_IOCTL_H_ 734 int __sys_ioctl(int, unsigned long, ...); 735 #endif 736 737 /* #inclde <sched.h> */ 738 #ifdef _SCHED_H_ 739 int __sys_sched_yield(void); 740 #endif 741 742 /* #include <signal.h> */ 743 #ifdef _SIGNAL_H_ 744 int __sys_kill(pid_t, int); 745 int __sys_sigaction(int, const struct sigaction *, struct sigaction *); 746 int __sys_sigpending(sigset_t *); 747 int __sys_sigprocmask(int, const sigset_t *, sigset_t *); 748 int __sys_sigsuspend(const sigset_t *); 749 int __sys_sigreturn(ucontext_t *); 750 int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); 751 #endif 752 753 /* #include <time.h> */ 754 #ifdef _TIME_H_ 755 int __sys_nanosleep(const struct timespec *, struct timespec *); 756 #endif 757 758 /* #include <unistd.h> */ 759 #ifdef _UNISTD_H_ 760 int __sys_close(int); 761 int __sys_execve(const char *, char * const *, char * const *); 762 pid_t __sys_getpid(void); 763 ssize_t __sys_read(int, void *, size_t); 764 ssize_t __sys_write(int, const void *, size_t); 765 void __sys_exit(int); 766 int __sys_sigwait(const sigset_t *, int *); 767 int __sys_sigtimedwait(const sigset_t *, siginfo_t *, 768 const struct timespec *); 769 int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info); 770 #endif 771 772 static inline int 773 _thr_isthreaded(void) 774 { 775 return (__isthreaded != 0); 776 } 777 778 static inline int 779 _thr_is_inited(void) 780 { 781 return (_thr_initial != NULL); 782 } 783 784 static inline void 785 _thr_check_init(void) 786 { 787 if (_thr_initial == NULL) 788 _libpthread_init(NULL); 789 } 790 791 struct dl_phdr_info; 792 void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info); 793 794 __END_DECLS 795 796 #endif /* !_THR_PRIVATE_H */ 797