1 /* 2 * SYS/THREAD.H 3 * 4 * Implements the architecture independant portion of the LWKT 5 * subsystem. 6 * 7 * Types which must already be defined when this header is included by 8 * userland: struct md_thread 9 */ 10 11 #ifndef _SYS_THREAD_H_ 12 #define _SYS_THREAD_H_ 13 14 #ifndef _SYS_STDINT_H_ 15 #include <sys/stdint.h> /* __int types */ 16 #endif 17 #ifndef _SYS_PARAM_H_ 18 #include <sys/param.h> /* MAXCOMLEN */ 19 #endif 20 #ifndef _SYS_QUEUE_H_ 21 #include <sys/queue.h> /* TAILQ_* macros */ 22 #endif 23 #ifndef _SYS_MSGPORT_H_ 24 #include <sys/msgport.h> /* lwkt_port */ 25 #endif 26 #ifndef _SYS_TIME_H_ 27 #include <sys/time.h> /* struct timeval */ 28 #endif 29 #ifndef _SYS_LOCK_H 30 #include <sys/lock.h> 31 #endif 32 #ifndef _SYS_SPINLOCK_H_ 33 #include <sys/spinlock.h> 34 #endif 35 #ifndef _SYS_IOSCHED_H_ 36 #include <sys/iosched.h> 37 #endif 38 #include <machine/thread.h> 39 40 struct globaldata; 41 struct lwp; 42 struct proc; 43 struct thread; 44 struct lwkt_queue; 45 struct lwkt_token; 46 struct lwkt_tokref; 47 struct lwkt_ipiq; 48 struct lwkt_cpu_msg; 49 struct lwkt_cpu_port; 50 struct lwkt_cpusync; 51 union sysunion; 52 53 typedef struct lwkt_queue *lwkt_queue_t; 54 typedef struct lwkt_token *lwkt_token_t; 55 typedef struct lwkt_tokref *lwkt_tokref_t; 56 typedef struct lwkt_cpu_msg *lwkt_cpu_msg_t; 57 typedef struct lwkt_cpu_port *lwkt_cpu_port_t; 58 typedef struct lwkt_ipiq *lwkt_ipiq_t; 59 typedef struct lwkt_cpusync *lwkt_cpusync_t; 60 typedef struct thread *thread_t; 61 62 typedef TAILQ_HEAD(lwkt_queue, thread) lwkt_queue; 63 64 /* 65 * Differentiation between kernel threads and user threads. Userland 66 * programs which want to access to kernel structures have to define 67 * _KERNEL_STRUCTURES. This is a kinda safety valve to prevent badly 68 * written user programs from getting an LWKT thread that is neither the 69 * kernel nor the user version. 70 */ 71 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 72 #ifndef _CPU_FRAME_H_ 73 #include <machine/frame.h> 74 #endif 75 #else 76 struct intrframe; 77 #endif 78 79 /* 80 * Tokens are used to serialize access to information. They are 'soft' 81 * serialization entities that only stay in effect while a thread is 82 * running. If the thread blocks, other threads can run holding the same 83 * token(s). The tokens are reacquired when the original thread resumes. 84 * 85 * Tokens guarantee that no deadlock can happen regardless of type or 86 * ordering. However, obtaining the same token first shared, then 87 * stacking exclusive, is not allowed and will panic. 88 * 89 * A thread can depend on its serialization remaining intact through a 90 * preemption. An interrupt which attempts to use the same token as the 91 * thread being preempted will reschedule itself for non-preemptive 92 * operation, so the new token code is capable of interlocking against 93 * interrupts as well as other cpus. This means that your token can only 94 * be (temporarily) lost if you *explicitly* block. 95 * 96 * Tokens are managed through a helper reference structure, lwkt_tokref. Each 97 * thread has a stack of tokref's to keep track of acquired tokens. Multiple 98 * tokref's may reference the same token. 99 * 100 * EXCLUSIVE TOKENS 101 * Acquiring an exclusive token requires acquiring the EXCLUSIVE bit 102 * with count == 0. If the exclusive bit cannot be acquired, EXCLREQ 103 * is set. Once acquired, EXCLREQ is cleared (but could get set by 104 * another thread also trying for an exclusive lock at any time). 105 * 106 * SHARED TOKENS 107 * Acquiring a shared token requires waiting for the EXCLUSIVE bit 108 * to be cleared and then acquiring a count. A shared lock request 109 * can temporarily acquire a count and then back it out if it is 110 * unable to obtain the EXCLUSIVE bit, allowing fetchadd to be used. 111 * 112 * A thread attempting to get a single shared token will defer to 113 * pending exclusive requesters. However, a thread already holding 114 * one or more tokens and trying to get an additional shared token 115 * cannot defer to exclusive requesters because doing so can lead 116 * to a deadlock. 117 * 118 * Multiple exclusive tokens are handled by treating the additional tokens 119 * as a special case of the shared token, incrementing the count value. This 120 * reduces the complexity of the token release code. 121 */ 122 123 typedef struct lwkt_token { 124 long t_count; /* Shared/exclreq/exclusive access */ 125 struct lwkt_tokref *t_ref; /* Exclusive ref */ 126 long t_collisions; /* Collision counter */ 127 const char *t_desc; /* Descriptive name */ 128 } lwkt_token; 129 130 #define TOK_EXCLUSIVE 0x00000001 /* Exclusive lock held */ 131 #define TOK_EXCLREQ 0x00000002 /* Exclusive request pending */ 132 #define TOK_INCR 4 /* Shared count increment */ 133 #define TOK_COUNTMASK (~(long)(TOK_EXCLUSIVE|TOK_EXCLREQ)) 134 135 /* 136 * Static initialization for a lwkt_token. 137 */ 138 #define LWKT_TOKEN_INITIALIZER(name) \ 139 { \ 140 .t_count = 0, \ 141 .t_ref = NULL, \ 142 .t_collisions = 0, \ 143 .t_desc = #name \ 144 } 145 146 /* 147 * Assert that a particular token is held 148 */ 149 #define LWKT_TOKEN_HELD_ANY(tok) _lwkt_token_held_any(tok, curthread) 150 #define LWKT_TOKEN_HELD_EXCL(tok) _lwkt_token_held_excl(tok, curthread) 151 152 #define ASSERT_LWKT_TOKEN_HELD(tok) \ 153 KKASSERT(LWKT_TOKEN_HELD_ANY(tok)) 154 155 #define ASSERT_LWKT_TOKEN_HELD_EXCL(tok) \ 156 KKASSERT(LWKT_TOKEN_HELD_EXCL(tok)) 157 158 #define ASSERT_NO_TOKENS_HELD(td) \ 159 KKASSERT((td)->td_toks_stop == &td->td_toks_array[0]) 160 161 struct lwkt_tokref { 162 lwkt_token_t tr_tok; /* token in question */ 163 long tr_count; /* TOK_EXCLUSIVE|TOK_EXCLREQ or 0 */ 164 struct thread *tr_owner; /* me */ 165 }; 166 167 #define MAXCPUFIFO 32 /* power of 2 */ 168 #define MAXCPUFIFO_MASK (MAXCPUFIFO - 1) 169 #define LWKT_MAXTOKENS 32 /* max tokens beneficially held by thread */ 170 171 /* 172 * Always cast to ipifunc_t when registering an ipi. The actual ipi function 173 * is called with both the data and an interrupt frame, but the ipi function 174 * that is registered might only declare a data argument. 175 */ 176 typedef void (*ipifunc1_t)(void *arg); 177 typedef void (*ipifunc2_t)(void *arg, int arg2); 178 typedef void (*ipifunc3_t)(void *arg, int arg2, struct intrframe *frame); 179 180 struct lwkt_ipiq { 181 int ip_rindex; /* only written by target cpu */ 182 int ip_xindex; /* written by target, indicates completion */ 183 int ip_windex; /* only written by source cpu */ 184 int ip_drain; /* drain source limit */ 185 struct { 186 ipifunc3_t func; 187 void *arg1; 188 int arg2; 189 char filler[32 - sizeof(int) - sizeof(void *) * 2]; 190 } ip_info[MAXCPUFIFO]; 191 }; 192 193 /* 194 * CPU Synchronization structure. See lwkt_cpusync_start() and 195 * lwkt_cpusync_finish() for more information. 196 */ 197 typedef void (*cpusync_func_t)(void *arg); 198 199 struct lwkt_cpusync { 200 cpumask_t cs_mask; /* cpus running the sync */ 201 cpumask_t cs_mack; /* mask acknowledge */ 202 cpusync_func_t cs_func; /* function to execute */ 203 void *cs_data; /* function data */ 204 }; 205 206 /* 207 * The standard message and queue structure used for communications between 208 * cpus. Messages are typically queued via a machine-specific non-linked 209 * FIFO matrix allowing any cpu to send a message to any other cpu without 210 * blocking. 211 */ 212 typedef struct lwkt_cpu_msg { 213 void (*cm_func)(lwkt_cpu_msg_t msg); /* primary dispatch function */ 214 int cm_code; /* request code if applicable */ 215 int cm_cpu; /* reply to cpu */ 216 thread_t cm_originator; /* originating thread for wakeup */ 217 } lwkt_cpu_msg; 218 219 /* 220 * Thread structure. Note that ownership of a thread structure is special 221 * cased and there is no 'token'. A thread is always owned by the cpu 222 * represented by td_gd, any manipulation of the thread by some other cpu 223 * must be done through cpu_*msg() functions. e.g. you could request 224 * ownership of a thread that way, or hand a thread off to another cpu. 225 * 226 * NOTE: td_ucred is synchronized from the p_ucred on user->kernel syscall, 227 * trap, and AST/signal transitions to provide a stable ucred for 228 * (primarily) system calls. This field will be NULL for pure kernel 229 * threads. 230 */ 231 struct md_intr_info; 232 233 struct thread { 234 TAILQ_ENTRY(thread) td_threadq; 235 TAILQ_ENTRY(thread) td_allq; 236 TAILQ_ENTRY(thread) td_sleepq; 237 lwkt_port td_msgport; /* built-in message port for replies */ 238 struct lwp *td_lwp; /* (optional) associated lwp */ 239 struct proc *td_proc; /* (optional) associated process */ 240 struct pcb *td_pcb; /* points to pcb and top of kstack */ 241 struct globaldata *td_gd; /* associated with this cpu */ 242 const char *td_wmesg; /* string name for blockage */ 243 const volatile void *td_wchan; /* waiting on channel */ 244 int td_pri; /* 0-31, 31=highest priority (note 1) */ 245 int td_critcount; /* critical section priority */ 246 u_int td_flags; /* TDF flags */ 247 int td_wdomain; /* domain for wchan address (typ 0) */ 248 void (*td_preemptable)(struct thread *td, int critcount); 249 void (*td_release)(struct thread *td); 250 char *td_kstack; /* kernel stack */ 251 int td_kstack_size; /* size of kernel stack */ 252 char *td_sp; /* kernel stack pointer for LWKT restore */ 253 thread_t (*td_switch)(struct thread *ntd); 254 __uint64_t td_uticks; /* Statclock hits in user mode (uS) */ 255 __uint64_t td_sticks; /* Statclock hits in system mode (uS) */ 256 __uint64_t td_iticks; /* Statclock hits processing intr (uS) */ 257 int td_locks; /* lockmgr lock debugging */ 258 void *td_unused01; /* (future I/O scheduler heuristic) */ 259 int td_refs; /* hold position in gd_tdallq / hold free */ 260 int td_nest_count; /* prevent splz nesting */ 261 u_int td_contended; /* token contention count */ 262 u_int td_mpflags; /* flags can be set by foreign cpus */ 263 int td_cscount; /* cpu synchronization master */ 264 int td_wakefromcpu; /* who woke me up? */ 265 int td_upri; /* user priority (sub-priority under td_pri) */ 266 int td_type; /* thread type, TD_TYPE_ */ 267 int td_tracker; /* for callers to debug lock counts */ 268 int td_unused03[4]; /* for future fields */ 269 struct iosched_data td_iosdata; /* Dynamic I/O scheduling data */ 270 struct timeval td_start; /* start time for a thread/process */ 271 char td_comm[MAXCOMLEN+1]; /* typ 16+1 bytes */ 272 struct thread *td_preempted; /* we preempted this thread */ 273 struct ucred *td_ucred; /* synchronized from p_ucred */ 274 void *td_vmm; /* vmm private data */ 275 lwkt_tokref_t td_toks_have; /* tokens we own */ 276 lwkt_tokref_t td_toks_stop; /* tokens we want */ 277 struct lwkt_tokref td_toks_array[LWKT_MAXTOKENS]; 278 int td_fairq_load; /* fairq */ 279 int td_fairq_count; /* fairq */ 280 struct globaldata *td_migrate_gd; /* target gd for thread migration */ 281 #ifdef DEBUG_CRIT_SECTIONS 282 #define CRIT_DEBUG_ARRAY_SIZE 32 283 #define CRIT_DEBUG_ARRAY_MASK (CRIT_DEBUG_ARRAY_SIZE - 1) 284 const char *td_crit_debug_array[CRIT_DEBUG_ARRAY_SIZE]; 285 int td_crit_debug_index; 286 int td_in_crit_report; 287 #endif 288 struct md_thread td_mach; 289 #ifdef DEBUG_LOCKS 290 #define SPINLOCK_DEBUG_ARRAY_SIZE 32 291 int td_spinlock_stack_id[SPINLOCK_DEBUG_ARRAY_SIZE]; 292 struct spinlock *td_spinlock_stack[SPINLOCK_DEBUG_ARRAY_SIZE]; 293 void *td_spinlock_caller_pc[SPINLOCK_DEBUG_ARRAY_SIZE]; 294 295 /* 296 * Track lockmgr locks held; lk->lk_filename:lk->lk_lineno is the holder 297 */ 298 #define LOCKMGR_DEBUG_ARRAY_SIZE 8 299 int td_lockmgr_stack_id[LOCKMGR_DEBUG_ARRAY_SIZE]; 300 struct lock *td_lockmgr_stack[LOCKMGR_DEBUG_ARRAY_SIZE]; 301 #endif 302 }; 303 304 #define td_toks_base td_toks_array[0] 305 #define td_toks_end td_toks_array[LWKT_MAXTOKENS] 306 307 #define TD_TOKS_HELD(td) ((td)->td_toks_stop != &(td)->td_toks_base) 308 #define TD_TOKS_NOT_HELD(td) ((td)->td_toks_stop == &(td)->td_toks_base) 309 310 /* 311 * Thread flags. Note that TDF_RUNNING is cleared on the old thread after 312 * we switch to the new one, which is necessary because LWKTs don't need 313 * to hold the BGL. This flag is used by the exit code and the managed 314 * thread migration code. Note in addition that preemption will cause 315 * TDF_RUNNING to be cleared temporarily, so any code checking TDF_RUNNING 316 * must also check TDF_PREEMPT_LOCK. 317 * 318 * LWKT threads stay on their (per-cpu) run queue while running, not to 319 * be confused with user processes which are removed from the user scheduling 320 * run queue while actually running. 321 * 322 * td_threadq can represent the thread on one of three queues... the LWKT 323 * run queue, a tsleep queue, or an lwkt blocking queue. The LWKT subsystem 324 * does not allow a thread to be scheduled if it already resides on some 325 * queue. 326 */ 327 #define TDF_RUNNING 0x00000001 /* thread still active */ 328 #define TDF_RUNQ 0x00000002 /* on an LWKT run queue */ 329 #define TDF_PREEMPT_LOCK 0x00000004 /* I have been preempted */ 330 #define TDF_PREEMPT_DONE 0x00000008 /* ac preemption complete */ 331 #define TDF_NOSTART 0x00000010 /* do not schedule on create */ 332 #define TDF_MIGRATING 0x00000020 /* thread is being migrated */ 333 #define TDF_SINTR 0x00000040 /* interruptability for 'ps' */ 334 #define TDF_TSLEEPQ 0x00000080 /* on a tsleep wait queue */ 335 336 #define TDF_SYSTHREAD 0x00000100 /* reserve memory may be used */ 337 #define TDF_ALLOCATED_THREAD 0x00000200 /* objcache allocated thread */ 338 #define TDF_ALLOCATED_STACK 0x00000400 /* objcache allocated stack */ 339 #define TDF_VERBOSE 0x00000800 /* verbose on exit */ 340 #define TDF_DEADLKTREAT 0x00001000 /* special lockmgr treatment */ 341 #define TDF_MARKER 0x00002000 /* tdallq list scan marker */ 342 #define TDF_TIMEOUT_RUNNING 0x00004000 /* tsleep timeout race */ 343 #define TDF_TIMEOUT 0x00008000 /* tsleep timeout */ 344 #define TDF_INTTHREAD 0x00010000 /* interrupt thread */ 345 #define TDF_TSLEEP_DESCHEDULED 0x00020000 /* tsleep core deschedule */ 346 #define TDF_BLOCKED 0x00040000 /* Thread is blocked */ 347 #define TDF_PANICWARN 0x00080000 /* panic warning in switch */ 348 #define TDF_BLOCKQ 0x00100000 /* on block queue */ 349 #define TDF_FORCE_SPINPORT 0x00200000 350 #define TDF_EXITING 0x00400000 /* thread exiting */ 351 #define TDF_USINGFP 0x00800000 /* thread using fp coproc */ 352 #define TDF_KERNELFP 0x01000000 /* kernel using fp coproc */ 353 #define TDF_DELAYED_WAKEUP 0x02000000 354 #define TDF_FIXEDCPU 0x04000000 /* running cpu is fixed */ 355 #define TDF_USERMODE 0x08000000 /* in or entering user mode */ 356 #define TDF_NOFAULT 0x10000000 /* force onfault on fault */ 357 358 #define TDF_MP_STOPREQ 0x00000001 /* suspend_kproc */ 359 #define TDF_MP_WAKEREQ 0x00000002 /* resume_kproc */ 360 #define TDF_MP_EXITWAIT 0x00000004 /* reaper, see lwp_wait() */ 361 #define TDF_MP_EXITSIG 0x00000008 /* reaper, see lwp_wait() */ 362 #define TDF_MP_BATCH_DEMARC 0x00000010 /* batch mode handling */ 363 #define TDF_MP_DIDYIELD 0x00000020 /* effects scheduling */ 364 365 #define TD_TYPE_GENERIC 0 /* generic thread */ 366 #define TD_TYPE_CRYPTO 1 /* crypto thread */ 367 #define TD_TYPE_NETISR 2 /* netisr thread */ 368 369 /* 370 * Thread priorities. Typically only one thread from any given 371 * user process scheduling queue is on the LWKT run queue at a time. 372 * Remember that there is one LWKT run queue per cpu. 373 * 374 * Critical sections are handled by bumping td_pri above TDPRI_MAX, which 375 * causes interrupts to be masked as they occur. When this occurs a 376 * rollup flag will be set in mycpu->gd_reqflags. 377 */ 378 #define TDPRI_IDLE_THREAD 0 /* the idle thread */ 379 #define TDPRI_IDLE_WORK 1 /* idle work (page zero, etc) */ 380 #define TDPRI_USER_SCHEDULER 2 /* user scheduler helper */ 381 #define TDPRI_USER_IDLE 4 /* user scheduler idle */ 382 #define TDPRI_USER_NORM 6 /* user scheduler normal */ 383 #define TDPRI_USER_REAL 8 /* user scheduler real time */ 384 #define TDPRI_KERN_LPSCHED 9 /* (comparison point only) */ 385 #define TDPRI_KERN_USER 10 /* kernel / block in syscall */ 386 #define TDPRI_KERN_DAEMON 12 /* kernel daemon (pageout, etc) */ 387 #define TDPRI_SOFT_NORM 14 /* kernel / normal */ 388 #define TDPRI_SOFT_TIMER 16 /* kernel / timer */ 389 #define TDPRI_UNUSED19 19 390 #define TDPRI_INT_SUPPORT 20 /* kernel / high priority support */ 391 #define TDPRI_INT_LOW 27 /* low priority interrupt */ 392 #define TDPRI_INT_MED 28 /* medium priority interrupt */ 393 #define TDPRI_INT_HIGH 29 /* high priority interrupt */ 394 #define TDPRI_MAX 31 395 396 #define LWKT_THREAD_STACK (UPAGES * PAGE_SIZE) 397 398 #define IN_CRITICAL_SECT(td) ((td)->td_critcount) 399 400 #ifdef _KERNEL 401 402 /* 403 * Global tokens 404 */ 405 extern struct lwkt_token mp_token; 406 extern struct lwkt_token pmap_token; 407 extern struct lwkt_token dev_token; 408 extern struct lwkt_token vm_token; 409 extern struct lwkt_token vmspace_token; 410 extern struct lwkt_token kvm_token; 411 extern struct lwkt_token sigio_token; 412 extern struct lwkt_token tty_token; 413 extern struct lwkt_token vnode_token; 414 extern struct lwkt_token revoke_token; 415 416 /* 417 * Procedures 418 */ 419 extern struct thread *lwkt_alloc_thread(struct thread *, int, int, int); 420 extern void lwkt_init_thread(struct thread *, void *, int, int, 421 struct globaldata *); 422 extern void lwkt_set_interrupt_support_thread(void); 423 extern void lwkt_set_comm(thread_t, const char *, ...) __printflike(2, 3); 424 extern void lwkt_free_thread(struct thread *); 425 extern void lwkt_gdinit(struct globaldata *); 426 extern void lwkt_switch(void); 427 extern void lwkt_switch_return(struct thread *); 428 extern void lwkt_preempt(thread_t, int); 429 extern void lwkt_schedule(thread_t); 430 extern void lwkt_schedule_noresched(thread_t); 431 extern void lwkt_schedule_self(thread_t); 432 extern void lwkt_deschedule(thread_t); 433 extern void lwkt_deschedule_self(thread_t); 434 extern void lwkt_yield(void); 435 extern void lwkt_yield_quick(void); 436 extern void lwkt_user_yield(void); 437 extern void lwkt_hold(thread_t); 438 extern void lwkt_rele(thread_t); 439 extern void lwkt_passive_release(thread_t); 440 extern void lwkt_maybe_splz(thread_t); 441 442 extern void lwkt_gettoken(lwkt_token_t); 443 extern void lwkt_gettoken_shared(lwkt_token_t); 444 extern int lwkt_trytoken(lwkt_token_t); 445 extern void lwkt_reltoken(lwkt_token_t); 446 extern int lwkt_cnttoken(lwkt_token_t, thread_t); 447 extern int lwkt_getalltokens(thread_t, int); 448 extern void lwkt_relalltokens(thread_t); 449 extern void lwkt_token_init(lwkt_token_t, const char *); 450 extern void lwkt_token_uninit(lwkt_token_t); 451 452 extern void lwkt_token_pool_init(void); 453 extern lwkt_token_t lwkt_token_pool_lookup(void *); 454 extern lwkt_token_t lwkt_getpooltoken(void *); 455 extern void lwkt_relpooltoken(void *); 456 457 extern void lwkt_token_swap(void); 458 459 extern void lwkt_setpri(thread_t, int); 460 extern void lwkt_setpri_initial(thread_t, int); 461 extern void lwkt_setpri_self(int); 462 extern void lwkt_schedulerclock(thread_t td); 463 extern void lwkt_setcpu_self(struct globaldata *); 464 extern void lwkt_migratecpu(int); 465 466 extern void lwkt_giveaway(struct thread *); 467 extern void lwkt_acquire(struct thread *); 468 extern int lwkt_send_ipiq3(struct globaldata *, ipifunc3_t, void *, int); 469 extern int lwkt_send_ipiq3_passive(struct globaldata *, ipifunc3_t, 470 void *, int); 471 extern int lwkt_send_ipiq3_bycpu(int, ipifunc3_t, void *, int); 472 extern int lwkt_send_ipiq3_mask(cpumask_t, ipifunc3_t, void *, int); 473 extern void lwkt_wait_ipiq(struct globaldata *, int); 474 extern void lwkt_process_ipiq(void); 475 extern void lwkt_process_ipiq_frame(struct intrframe *); 476 extern void lwkt_smp_stopped(void); 477 extern void lwkt_synchronize_ipiqs(const char *); 478 479 /* lwkt_cpusync_init() - inline function in sys/thread2.h */ 480 extern void lwkt_cpusync_simple(cpumask_t, cpusync_func_t, void *); 481 extern void lwkt_cpusync_interlock(lwkt_cpusync_t); 482 extern void lwkt_cpusync_deinterlock(lwkt_cpusync_t); 483 extern void lwkt_cpusync_quick(lwkt_cpusync_t); 484 485 extern void crit_panic(void) __dead2; 486 extern struct lwp *lwkt_preempted_proc(void); 487 488 extern int lwkt_create (void (*func)(void *), void *, struct thread **, 489 struct thread *, int, int, 490 const char *, ...) __printflike(7, 8); 491 extern void lwkt_exit (void) __dead2; 492 extern void lwkt_remove_tdallq (struct thread *); 493 494 #endif 495 496 #endif 497 498