1 /* 2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Private thread definitions for the uthread kernel. 33 * 34 * $FreeBSD: src/lib/libpthread/thread/thr_private.h,v 1.120 2004/11/01 10:49:34 davidxu Exp $ 35 * $DragonFly: src/lib/libthread_xu/thread/thr_private.h,v 1.2 2005/02/21 13:47:21 davidxu Exp $ 36 */ 37 38 #ifndef _THR_PRIVATE_H 39 #define _THR_PRIVATE_H 40 41 /* 42 * Include files. 43 */ 44 #include <sys/types.h> 45 #include <sys/time.h> 46 #include <sys/cdefs.h> 47 #include <sys/queue.h> 48 #include <machine/atomic.h> 49 #include <errno.h> 50 #include <limits.h> 51 #include <signal.h> 52 #include <stdio.h> 53 #include <sched.h> 54 #include <unistd.h> 55 #include <pthread.h> 56 #include <pthread_np.h> 57 58 #include "pthread_md.h" 59 #include "thr_umtx.h" 60 61 /* 62 * Evaluate the storage class specifier. 63 */ 64 #ifdef GLOBAL_PTHREAD_PRIVATE 65 #define SCLASS 66 #define SCLASS_PRESET(x...) = x 67 #else 68 #define SCLASS extern 69 #define SCLASS_PRESET(x...) 70 #endif 71 72 /* Signal to do cancellation */ 73 #define SIGCANCEL 32 74 75 /* 76 * Kernel fatal error handler macro. 77 */ 78 #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) 79 80 /* Output debug messages like this: */ 81 #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) 82 #define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args) 83 84 #ifdef __DragonFly__ 85 #define __predict_true(exp) (exp) 86 #define __predict_false(exp) (exp) 87 #endif 88 89 #ifdef _PTHREADS_INVARIANTS 90 #define THR_ASSERT(cond, msg) do { \ 91 if (__predict_false(!(cond))) \ 92 PANIC(msg); \ 93 } while (0) 94 #else 95 #define THR_ASSERT(cond, msg) 96 #endif 97 98 #define TIMESPEC_ADD(dst, src, val) \ 99 do { \ 100 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ 101 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ 102 if ((dst)->tv_nsec > 1000000000) { \ 103 (dst)->tv_sec++; \ 104 (dst)->tv_nsec -= 1000000000; \ 105 } \ 106 } while (0) 107 108 #define TIMESPEC_SUB(dst, src, val) \ 109 do { \ 110 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ 111 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ 112 if ((dst)->tv_nsec < 0) { \ 113 (dst)->tv_sec--; \ 114 (dst)->tv_nsec += 1000000000; \ 115 } \ 116 } while (0) 117 118 struct pthread_mutex { 119 /* 120 * Lock for accesses to this structure. 121 */ 122 volatile umtx_t m_lock; 123 enum pthread_mutextype m_type; 124 int m_protocol; 125 TAILQ_HEAD(mutex_head, pthread) m_queue; 126 struct pthread *m_owner; 127 long m_flags; 128 int m_count; 129 int m_refcount; 130 131 /* 132 * Used for priority inheritence and protection. 133 * 134 * m_prio - For priority inheritence, the highest active 135 * priority (threads locking the mutex inherit 136 * this priority). For priority protection, the 137 * ceiling priority of this mutex. 138 * m_saved_prio - mutex owners inherited priority before 139 * taking the mutex, restored when the owner 140 * unlocks the mutex. 141 */ 142 int m_prio; 143 int m_saved_prio; 144 145 /* 146 * Link for list of all mutexes a thread currently owns. 147 */ 148 TAILQ_ENTRY(pthread_mutex) m_qe; 149 }; 150 151 #define TAILQ_INITIALIZER { NULL, NULL } 152 153 #define PTHREAD_MUTEX_STATIC_INITIALIZER \ 154 {0, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ 155 NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER } 156 /* 157 * Flags for mutexes. 158 */ 159 #define MUTEX_FLAGS_PRIVATE 0x01 160 #define MUTEX_FLAGS_INITED 0x02 161 #define MUTEX_FLAGS_BUSY 0x04 162 163 struct pthread_mutex_attr { 164 enum pthread_mutextype m_type; 165 int m_protocol; 166 int m_ceiling; 167 long m_flags; 168 }; 169 170 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ 171 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } 172 173 struct pthread_cond { 174 /* 175 * Lock for accesses to this structure. 176 */ 177 volatile umtx_t c_lock; 178 volatile umtx_t c_seqno; 179 volatile int c_waiters; 180 volatile int c_wakeups; 181 int c_pshared; 182 int c_clockid; 183 }; 184 185 struct pthread_cond_attr { 186 int c_pshared; 187 int c_clockid; 188 }; 189 190 struct pthread_barrier { 191 volatile umtx_t b_lock; 192 volatile umtx_t b_cycle; 193 volatile int b_count; 194 volatile int b_waiters; 195 }; 196 197 struct pthread_barrierattr { 198 int pshared; 199 }; 200 201 struct pthread_spinlock { 202 volatile umtx_t s_lock; 203 }; 204 205 /* 206 * Flags for condition variables. 207 */ 208 #define COND_FLAGS_PRIVATE 0x01 209 #define COND_FLAGS_INITED 0x02 210 #define COND_FLAGS_BUSY 0x04 211 212 /* 213 * Cleanup definitions. 214 */ 215 struct pthread_cleanup { 216 struct pthread_cleanup *next; 217 void (*routine)(); 218 void *routine_arg; 219 int onstack; 220 }; 221 222 #define THR_CLEANUP_PUSH(td, func, arg) { \ 223 struct pthread_cleanup __cup; \ 224 \ 225 __cup.routine = func; \ 226 __cup.routine_arg = arg; \ 227 __cup.onstack = 1; \ 228 __cup.next = (td)->cleanup; \ 229 (td)->cleanup = &__cup; 230 231 #define THR_CLEANUP_POP(td, exec) \ 232 (td)->cleanup = __cup.next; \ 233 if ((exec) != 0) \ 234 __cup.routine(__cup.routine_arg); \ 235 } 236 237 struct pthread_atfork { 238 TAILQ_ENTRY(pthread_atfork) qe; 239 void (*prepare)(void); 240 void (*parent)(void); 241 void (*child)(void); 242 }; 243 244 struct pthread_attr { 245 int sched_policy; 246 int sched_inherit; 247 int sched_interval; 248 int prio; 249 int suspend; 250 #define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */ 251 int flags; 252 void *arg_attr; 253 void (*cleanup_attr)(); 254 void *stackaddr_attr; 255 size_t stacksize_attr; 256 size_t guardsize_attr; 257 }; 258 259 /* 260 * Thread creation state attributes. 261 */ 262 #define THR_CREATE_RUNNING 0 263 #define THR_CREATE_SUSPENDED 1 264 265 /* 266 * Miscellaneous definitions. 267 */ 268 #define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024) 269 270 /* 271 * Maximum size of initial thread's stack. This perhaps deserves to be larger 272 * than the stacks of other threads, since many applications are likely to run 273 * almost entirely on this stack. 274 */ 275 #define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2) 276 277 /* 278 * Define the different priority ranges. All applications have thread 279 * priorities constrained within 0-31. The threads library raises the 280 * priority when delivering signals in order to ensure that signal 281 * delivery happens (from the POSIX spec) "as soon as possible". 282 * In the future, the threads library will also be able to map specific 283 * threads into real-time (cooperating) processes or kernel threads. 284 * The RT and SIGNAL priorities will be used internally and added to 285 * thread base priorities so that the scheduling queue can handle both 286 * normal and RT priority threads with and without signal handling. 287 * 288 * The approach taken is that, within each class, signal delivery 289 * always has priority over thread execution. 290 */ 291 #define THR_DEFAULT_PRIORITY 15 292 #define THR_MIN_PRIORITY 0 293 #define THR_MAX_PRIORITY 31 /* 0x1F */ 294 #define THR_SIGNAL_PRIORITY 32 /* 0x20 */ 295 #define THR_RT_PRIORITY 64 /* 0x40 */ 296 #define THR_FIRST_PRIORITY THR_MIN_PRIORITY 297 #define THR_LAST_PRIORITY \ 298 (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY) 299 #define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY) 300 301 /* 302 * Time slice period in microseconds. 303 */ 304 #define TIMESLICE_USEC 20000 305 306 struct pthread_rwlockattr { 307 int pshared; 308 }; 309 310 struct pthread_rwlock { 311 pthread_mutex_t lock; /* monitor lock */ 312 pthread_cond_t read_signal; 313 pthread_cond_t write_signal; 314 int state; /* 0 = idle >0 = # of readers -1 = writer */ 315 int blocked_writers; 316 }; 317 318 /* 319 * Thread states. 320 */ 321 enum pthread_state { 322 PS_RUNNING, 323 PS_DEAD 324 }; 325 326 union pthread_wait_data { 327 pthread_mutex_t mutex; 328 }; 329 330 struct pthread_specific_elem { 331 const void *data; 332 int seqno; 333 }; 334 335 struct pthread_key { 336 volatile int allocated; 337 volatile int count; 338 int seqno; 339 void (*destructor)(void *); 340 }; 341 342 /* 343 * Thread structure. 344 */ 345 struct pthread { 346 /* 347 * Magic value to help recognize a valid thread structure 348 * from an invalid one: 349 */ 350 #define THR_MAGIC ((u_int32_t) 0xd09ba115) 351 u_int32_t magic; 352 char *name; 353 u_int64_t uniqueid; /* for gdb */ 354 355 /* 356 * Lock for accesses to this thread structure. 357 */ 358 umtx_t lock; 359 360 /* Thread is terminated in kernel, written by kernel. */ 361 long terminated; 362 363 /* Kernel thread id. */ 364 long tid; 365 366 /* Internal condition variable cycle number. */ 367 umtx_t cycle; 368 369 /* How many low level locks the thread held. */ 370 int locklevel; 371 372 /* Signal blocked counter. */ 373 int sigblock; 374 375 /* Queue entry for list of all threads. */ 376 TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ 377 378 /* Queue entry for GC lists. */ 379 TAILQ_ENTRY(pthread) gcle; 380 381 /* Hash queue entry. */ 382 LIST_ENTRY(pthread) hle; 383 384 /* Threads reference count. */ 385 int refcount; 386 387 /* 388 * Thread start routine, argument, stack pointer and thread 389 * attributes. 390 */ 391 void *(*start_routine)(void *); 392 void *arg; 393 struct pthread_attr attr; 394 395 /* 396 * Cancelability flags 397 */ 398 #define THR_CANCEL_DISABLE 0x0001 399 #define THR_CANCEL_EXITING 0x0002 400 #define THR_CANCEL_AT_POINT 0x0004 401 #define THR_CANCEL_NEEDED 0x0008 402 #define SHOULD_CANCEL(val) \ 403 (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \ 404 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED) 405 406 #define SHOULD_ASYNC_CANCEL(val) \ 407 (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \ 408 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) == \ 409 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) 410 int cancelflags; 411 412 /* Thread temporary signal mask. */ 413 sigset_t sigmask; 414 415 /* Thread state: */ 416 umtx_t state; 417 418 /* 419 * Error variable used instead of errno. The function __error() 420 * returns a pointer to this. 421 */ 422 int error; 423 424 /* 425 * The joiner is the thread that is joining to this thread. The 426 * join status keeps track of a join operation to another thread. 427 */ 428 struct pthread *joiner; 429 430 /* 431 * The current thread can belong to a priority mutex queue. 432 * This is the synchronization queue link. 433 */ 434 TAILQ_ENTRY(pthread) sqe; 435 436 /* Wait data. */ 437 union pthread_wait_data data; 438 439 int sflags; 440 #define THR_FLAGS_IN_SYNCQ 0x0001 441 442 /* Miscellaneous flags; only set with scheduling lock held. */ 443 int flags; 444 #define THR_FLAGS_PRIVATE 0x0001 445 #define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */ 446 #define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */ 447 448 /* Thread list flags; only set with thread list lock held. */ 449 int tlflags; 450 #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ 451 #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ 452 #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ 453 #define TLFLAGS_DETACHED 0x0008 /* thread is detached */ 454 455 /* 456 * Base priority is the user setable and retrievable priority 457 * of the thread. It is only affected by explicit calls to 458 * set thread priority and upon thread creation via a thread 459 * attribute or default priority. 460 */ 461 char base_priority; 462 463 /* 464 * Inherited priority is the priority a thread inherits by 465 * taking a priority inheritence or protection mutex. It 466 * is not affected by base priority changes. Inherited 467 * priority defaults to and remains 0 until a mutex is taken 468 * that is being waited on by any other thread whose priority 469 * is non-zero. 470 */ 471 char inherited_priority; 472 473 /* 474 * Active priority is always the maximum of the threads base 475 * priority and inherited priority. When there is a change 476 * in either the base or inherited priority, the active 477 * priority must be recalculated. 478 */ 479 char active_priority; 480 481 /* Number of priority ceiling or protection mutexes owned. */ 482 int priority_mutex_count; 483 484 /* Queue of currently owned simple type mutexes. */ 485 TAILQ_HEAD(, pthread_mutex) mutexq; 486 487 /* Queue of currently owned priority type mutexs. */ 488 TAILQ_HEAD(, pthread_mutex) pri_mutexq; 489 490 void *ret; 491 struct pthread_specific_elem *specific; 492 int specific_data_count; 493 494 /* Number rwlocks rdlocks held. */ 495 int rdlock_count; 496 497 /* 498 * Current locks bitmap for rtld. */ 499 int rtld_bits; 500 501 /* Thread control block */ 502 struct tcb *tcb; 503 504 /* Cleanup handlers Link List */ 505 struct pthread_cleanup *cleanup; 506 }; 507 508 #define THR_UMTX_TRYLOCK(thrd, lck) \ 509 _thr_umtx_trylock((lck), (thrd)->tid) 510 511 #define THR_UMTX_LOCK(thrd, lck) \ 512 _thr_umtx_lock((lck), (thrd)->tid) 513 514 #define THR_UMTX_TIMEDLOCK(thrd, lck, timo) \ 515 _thr_umtx_timedlock((lck), (thrd)->tid, (timo)) 516 517 #define THR_UMTX_UNLOCK(thrd, lck) \ 518 _thr_umtx_unlock((lck), (thrd)->tid) 519 520 #define THR_LOCK_ACQUIRE(thrd, lck) \ 521 do { \ 522 (thrd)->locklevel++; \ 523 _thr_umtx_lock(lck, (thrd)->tid); \ 524 } while (0) 525 526 #define THR_LOCK_RELEASE(thrd, lck) \ 527 do { \ 528 if ((thrd)->locklevel > 0) { \ 529 _thr_umtx_unlock((lck), (thrd)->tid); \ 530 (thrd)->locklevel--; \ 531 } else { \ 532 _thr_assert_lock_level(); \ 533 } \ 534 } while (0) 535 536 #define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock) 537 #define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock) 538 #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) 539 #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) 540 541 #define THREAD_LIST_LOCK(curthrd) \ 542 do { \ 543 THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \ 544 } while (0) 545 546 #define THREAD_LIST_UNLOCK(curthrd) \ 547 do { \ 548 THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \ 549 } while (0) 550 551 /* 552 * Macros to insert/remove threads to the all thread list and 553 * the gc list. 554 */ 555 #define THR_LIST_ADD(thrd) do { \ 556 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ 557 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ 558 _thr_hash_add(thrd); \ 559 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ 560 } \ 561 } while (0) 562 #define THR_LIST_REMOVE(thrd) do { \ 563 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ 564 TAILQ_REMOVE(&_thread_list, thrd, tle); \ 565 _thr_hash_remove(thrd); \ 566 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ 567 } \ 568 } while (0) 569 #define THR_GCLIST_ADD(thrd) do { \ 570 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ 571 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ 572 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ 573 _gc_count++; \ 574 } \ 575 } while (0) 576 #define THR_GCLIST_REMOVE(thrd) do { \ 577 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ 578 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ 579 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ 580 _gc_count--; \ 581 } \ 582 } while (0) 583 584 #define GC_NEEDED() (_gc_count >= 5) 585 586 #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 587 588 extern int __isthreaded; 589 590 /* 591 * Global variables for the pthread kernel. 592 */ 593 594 SCLASS void *_usrstack SCLASS_PRESET(NULL); 595 SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL); 596 /* For debugger */ 597 SCLASS int _libthread_xu_debug SCLASS_PRESET(0); 598 SCLASS int _thread_scope_system SCLASS_PRESET(0); 599 600 /* List of all threads: */ 601 SCLASS TAILQ_HEAD(, pthread) _thread_list 602 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list)); 603 604 /* List of threads needing GC: */ 605 SCLASS TAILQ_HEAD(, pthread) _thread_gc_list 606 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list)); 607 608 SCLASS int _thread_active_threads SCLASS_PRESET(1); 609 610 SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list; 611 SCLASS umtx_t _thr_atfork_lock; 612 613 /* Default thread attributes: */ 614 SCLASS struct pthread_attr _pthread_attr_default 615 SCLASS_PRESET({ 616 .sched_policy = SCHED_RR, 617 .sched_inherit = 0, 618 .sched_interval = TIMESLICE_USEC, 619 .prio = THR_DEFAULT_PRIORITY, 620 .suspend = THR_CREATE_RUNNING, 621 .flags = 0, 622 .arg_attr = NULL, 623 .cleanup_attr = NULL, 624 .stackaddr_attr = NULL, 625 .stacksize_attr = THR_STACK_DEFAULT, 626 .guardsize_attr = 0 627 }); 628 629 /* Default mutex attributes: */ 630 SCLASS struct pthread_mutex_attr _pthread_mutexattr_default 631 SCLASS_PRESET({ 632 .m_type = PTHREAD_MUTEX_DEFAULT, 633 .m_protocol = PTHREAD_PRIO_NONE, 634 .m_ceiling = 0, 635 .m_flags = 0 636 }); 637 638 /* Default condition variable attributes: */ 639 SCLASS struct pthread_cond_attr _pthread_condattr_default 640 SCLASS_PRESET({ 641 .c_pshared = PTHREAD_PROCESS_PRIVATE, 642 .c_clockid = CLOCK_REALTIME 643 }); 644 645 SCLASS pid_t _thr_pid SCLASS_PRESET(0); 646 SCLASS int _thr_guard_default; 647 SCLASS int _thr_stack_default SCLASS_PRESET(THR_STACK_DEFAULT); 648 SCLASS int _thr_stack_initial SCLASS_PRESET(THR_STACK_INITIAL); 649 SCLASS int _thr_page_size; 650 /* Garbage thread count. */ 651 SCLASS int _gc_count SCLASS_PRESET(0); 652 653 SCLASS umtx_t _mutex_static_lock; 654 SCLASS umtx_t _cond_static_lock; 655 SCLASS umtx_t _rwlock_static_lock; 656 SCLASS umtx_t _keytable_lock; 657 SCLASS umtx_t _thr_list_lock; 658 659 /* Undefine the storage class and preset specifiers: */ 660 #undef SCLASS 661 #undef SCLASS_PRESET 662 663 /* 664 * Function prototype definitions. 665 */ 666 __BEGIN_DECLS 667 int _thr_setthreaded(int); 668 int _mutex_cv_lock(pthread_mutex_t *); 669 int _mutex_cv_unlock(pthread_mutex_t *); 670 void _mutex_notify_priochange(struct pthread *, struct pthread *, int); 671 int _mutex_reinit(pthread_mutex_t *); 672 void _mutex_fork(struct pthread *curthread); 673 void _mutex_unlock_private(struct pthread *); 674 void _libpthread_init(struct pthread *); 675 void *_pthread_getspecific(pthread_key_t); 676 int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *); 677 int _pthread_cond_destroy(pthread_cond_t *); 678 int _pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); 679 int _pthread_cond_timedwait(pthread_cond_t *, pthread_mutex_t *, 680 const struct timespec *); 681 int _pthread_cond_signal(pthread_cond_t *); 682 int _pthread_cond_broadcast(pthread_cond_t *); 683 int _pthread_key_create(pthread_key_t *, void (*) (void *)); 684 int _pthread_key_delete(pthread_key_t); 685 int _pthread_mutex_destroy(pthread_mutex_t *); 686 int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); 687 int _pthread_mutex_lock(pthread_mutex_t *); 688 int _pthread_mutex_trylock(pthread_mutex_t *); 689 int _pthread_mutex_unlock(pthread_mutex_t *); 690 int _pthread_mutexattr_init(pthread_mutexattr_t *); 691 int _pthread_mutexattr_destroy(pthread_mutexattr_t *); 692 int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); 693 int _pthread_once(pthread_once_t *, void (*) (void)); 694 int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *); 695 int _pthread_rwlock_destroy (pthread_rwlock_t *); 696 struct pthread *_pthread_self(void); 697 int _pthread_setspecific(pthread_key_t, const void *); 698 void _pthread_testcancel(void); 699 void _pthread_yield(void); 700 void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg); 701 void _pthread_cleanup_pop(int execute); 702 struct pthread *_thr_alloc(struct pthread *); 703 void _thread_exit(char *, int, char *) __dead2; 704 void _thr_exit_cleanup(void); 705 int _thr_ref_add(struct pthread *, struct pthread *, int); 706 void _thr_ref_delete(struct pthread *, struct pthread *); 707 int _thr_find_thread(struct pthread *, struct pthread *, int); 708 void _thr_rtld_init(void); 709 void _thr_rtld_fini(void); 710 int _thr_stack_alloc(struct pthread_attr *); 711 void _thr_stack_free(struct pthread_attr *); 712 void _thr_free(struct pthread *, struct pthread *); 713 void _thr_gc(struct pthread *); 714 void _thread_cleanupspecific(void); 715 void _thread_dump_info(void); 716 void _thread_printf(int, const char *, ...); 717 void _thr_spinlock_init(void); 718 int _thr_cancel_enter(struct pthread *); 719 void _thr_cancel_leave(struct pthread *, int); 720 void _thr_signal_block(struct pthread *); 721 void _thr_signal_unblock(struct pthread *); 722 void _thr_signal_init(void); 723 void _thr_signal_deinit(void); 724 int _thr_send_sig(struct pthread *, int sig); 725 void _thr_list_init(); 726 void _thr_hash_add(struct pthread *); 727 void _thr_hash_remove(struct pthread *); 728 struct pthread *_thr_hash_find(struct pthread *); 729 void _thr_link(struct pthread *curthread, struct pthread *thread); 730 void _thr_unlink(struct pthread *curthread, struct pthread *thread); 731 void _thr_suspend_check(struct pthread *curthread); 732 void _thr_assert_lock_level() __dead2; 733 int _thr_get_tid(void); 734 735 /* 736 * Aliases for _pthread functions. Should be called instead of 737 * originals if PLT replocation is unwanted at runtme. 738 */ 739 int _thr_cond_broadcast(pthread_cond_t *); 740 int _thr_cond_signal(pthread_cond_t *); 741 int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *); 742 int _thr_mutex_lock(pthread_mutex_t *); 743 int _thr_mutex_unlock(pthread_mutex_t *); 744 int _thr_rwlock_rdlock(pthread_rwlock_t *); 745 int _thr_rwlock_wrlock(pthread_rwlock_t *); 746 int _thr_rwlock_unlock(pthread_rwlock_t *); 747 748 /* #include <sys/aio.h> */ 749 #ifdef _SYS_AIO_H_ 750 int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); 751 #endif 752 753 /* #include <fcntl.h> */ 754 #ifdef _SYS_FCNTL_H_ 755 int __sys_fcntl(int, int, ...); 756 int __sys_open(const char *, int, ...); 757 #endif 758 759 /* #include <sys/ioctl.h> */ 760 #ifdef _SYS_IOCTL_H_ 761 int __sys_ioctl(int, unsigned long, ...); 762 #endif 763 764 /* #inclde <sched.h> */ 765 #ifdef _SCHED_H_ 766 int __sys_sched_yield(void); 767 #endif 768 769 /* #include <signal.h> */ 770 #ifdef _SIGNAL_H_ 771 int __sys_kill(pid_t, int); 772 int __sys_sigaction(int, const struct sigaction *, struct sigaction *); 773 int __sys_sigpending(sigset_t *); 774 int __sys_sigprocmask(int, const sigset_t *, sigset_t *); 775 int __sys_sigsuspend(const sigset_t *); 776 int __sys_sigreturn(ucontext_t *); 777 int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); 778 #endif 779 780 /* #include <sys/socket.h> */ 781 #ifdef _SYS_SOCKET_H_ 782 int __sys_accept(int, struct sockaddr *, socklen_t *); 783 int __sys_connect(int, const struct sockaddr *, socklen_t); 784 ssize_t __sys_recv(int, void *, size_t, int); 785 ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *); 786 ssize_t __sys_recvmsg(int, struct msghdr *, int); 787 int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, 788 off_t *, int); 789 ssize_t __sys_sendmsg(int, const struct msghdr *, int); 790 ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t); 791 #endif 792 793 /* #include <sys/uio.h> */ 794 #ifdef _SYS_UIO_H_ 795 ssize_t __sys_readv(int, const struct iovec *, int); 796 ssize_t __sys_writev(int, const struct iovec *, int); 797 #endif 798 799 /* #include <time.h> */ 800 #ifdef _TIME_H_ 801 int __sys_nanosleep(const struct timespec *, struct timespec *); 802 #endif 803 804 /* #include <unistd.h> */ 805 #ifdef _UNISTD_H_ 806 int __sys_close(int); 807 int __sys_execve(const char *, char * const *, char * const *); 808 int __sys_fork(void); 809 int __sys_fsync(int); 810 pid_t __sys_getpid(void); 811 int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); 812 ssize_t __sys_read(int, void *, size_t); 813 ssize_t __sys_write(int, const void *, size_t); 814 void __sys_exit(int); 815 int __sys_sigwait(const sigset_t *, int *); 816 int __sys_sigtimedwait(const sigset_t *, siginfo_t *, 817 const struct timespec *); 818 int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info); 819 #endif 820 821 /* #include <poll.h> */ 822 #ifdef _SYS_POLL_H_ 823 int __sys_poll(struct pollfd *, unsigned, int); 824 #endif 825 826 /* #include <sys/mman.h> */ 827 #ifdef _SYS_MMAN_H_ 828 int __sys_msync(void *, size_t, int); 829 #endif 830 831 static inline int 832 _thr_isthreaded(void) 833 { 834 return (__isthreaded != 0); 835 } 836 837 static inline int 838 _thr_is_inited(void) 839 { 840 return (_thr_initial != 0); 841 } 842 843 static inline void 844 _thr_check_init(void) 845 { 846 if (_thr_initial == 0) 847 _libpthread_init(0); 848 } 849 850 __END_DECLS 851 852 #endif /* !_THR_PRIVATE_H */ 853