1 /* 2 * SYS/THREAD.H 3 * 4 * Implements the architecture independant portion of the LWKT 5 * subsystem. 6 * 7 * Types which must already be defined when this header is included by 8 * userland: struct md_thread 9 */ 10 11 #ifndef _SYS_THREAD_H_ 12 #define _SYS_THREAD_H_ 13 14 #ifndef _SYS_STDINT_H_ 15 #include <sys/stdint.h> /* __int types */ 16 #endif 17 #ifndef _SYS_PARAM_H_ 18 #include <sys/param.h> /* MAXCOMLEN */ 19 #endif 20 #ifndef _SYS_QUEUE_H_ 21 #include <sys/queue.h> /* TAILQ_* macros */ 22 #endif 23 #ifndef _SYS_MSGPORT_H_ 24 #include <sys/msgport.h> /* lwkt_port */ 25 #endif 26 #ifndef _SYS_TIME_H_ 27 #include <sys/time.h> /* struct timeval */ 28 #endif 29 #ifndef _SYS_LOCK_H 30 #include <sys/lock.h> 31 #endif 32 #ifndef _SYS_SPINLOCK_H_ 33 #include <sys/spinlock.h> 34 #endif 35 #ifndef _SYS_IOSCHED_H_ 36 #include <sys/iosched.h> 37 #endif 38 #include <machine/thread.h> 39 40 struct globaldata; 41 struct lwp; 42 struct proc; 43 struct thread; 44 struct lwkt_queue; 45 struct lwkt_token; 46 struct lwkt_tokref; 47 struct lwkt_ipiq; 48 struct lwkt_cpu_msg; 49 struct lwkt_cpu_port; 50 struct lwkt_cpusync; 51 struct fdnode; 52 union sysunion; 53 54 typedef struct lwkt_queue *lwkt_queue_t; 55 typedef struct lwkt_token *lwkt_token_t; 56 typedef struct lwkt_tokref *lwkt_tokref_t; 57 typedef struct lwkt_cpu_msg *lwkt_cpu_msg_t; 58 typedef struct lwkt_cpu_port *lwkt_cpu_port_t; 59 typedef struct lwkt_ipiq *lwkt_ipiq_t; 60 typedef struct lwkt_cpusync *lwkt_cpusync_t; 61 typedef struct thread *thread_t; 62 63 typedef TAILQ_HEAD(lwkt_queue, thread) lwkt_queue; 64 65 /* 66 * Differentiation between kernel threads and user threads. Userland 67 * programs which want to access to kernel structures have to define 68 * _KERNEL_STRUCTURES. This is a kinda safety valve to prevent badly 69 * written user programs from getting an LWKT thread that is neither the 70 * kernel nor the user version. 71 */ 72 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 73 #ifndef _CPU_FRAME_H_ 74 #include <machine/frame.h> 75 #endif 76 #else 77 struct intrframe; 78 #endif 79 80 /* 81 * Tokens are used to serialize access to information. They are 'soft' 82 * serialization entities that only stay in effect while a thread is 83 * running. If the thread blocks, other threads can run holding the same 84 * token(s). The tokens are reacquired when the original thread resumes. 85 * 86 * Tokens guarantee that no deadlock can happen regardless of type or 87 * ordering. However, obtaining the same token first shared, then 88 * stacking exclusive, is not allowed and will panic. 89 * 90 * A thread can depend on its serialization remaining intact through a 91 * preemption. An interrupt which attempts to use the same token as the 92 * thread being preempted will reschedule itself for non-preemptive 93 * operation, so the new token code is capable of interlocking against 94 * interrupts as well as other cpus. This means that your token can only 95 * be (temporarily) lost if you *explicitly* block. 96 * 97 * Tokens are managed through a helper reference structure, lwkt_tokref. Each 98 * thread has a stack of tokref's to keep track of acquired tokens. Multiple 99 * tokref's may reference the same token. 100 * 101 * EXCLUSIVE TOKENS 102 * Acquiring an exclusive token requires acquiring the EXCLUSIVE bit 103 * with count == 0. If the exclusive bit cannot be acquired, EXCLREQ 104 * is set. Once acquired, EXCLREQ is cleared (but could get set by 105 * another thread also trying for an exclusive lock at any time). 106 * 107 * SHARED TOKENS 108 * Acquiring a shared token requires waiting for the EXCLUSIVE bit 109 * to be cleared and then acquiring a count. A shared lock request 110 * can temporarily acquire a count and then back it out if it is 111 * unable to obtain the EXCLUSIVE bit, allowing fetchadd to be used. 112 * 113 * A thread attempting to get a single shared token will defer to 114 * pending exclusive requesters. However, a thread already holding 115 * one or more tokens and trying to get an additional shared token 116 * cannot defer to exclusive requesters because doing so can lead 117 * to a deadlock. 118 * 119 * Multiple exclusive tokens are handled by treating the additional tokens 120 * as a special case of the shared token, incrementing the count value. This 121 * reduces the complexity of the token release code. 122 */ 123 124 typedef struct lwkt_token { 125 long t_count; /* Shared/exclreq/exclusive access */ 126 struct lwkt_tokref *t_ref; /* Exclusive ref */ 127 long t_collisions; /* Collision counter */ 128 const char *t_desc; /* Descriptive name */ 129 } lwkt_token; 130 131 #define TOK_EXCLUSIVE 0x00000001 /* Exclusive lock held */ 132 #define TOK_EXCLREQ 0x00000002 /* Exclusive request pending */ 133 #define TOK_INCR 4 /* Shared count increment */ 134 #define TOK_COUNTMASK (~(long)(TOK_EXCLUSIVE|TOK_EXCLREQ)) 135 136 /* 137 * Static initialization for a lwkt_token. 138 */ 139 #define LWKT_TOKEN_INITIALIZER(name) \ 140 { \ 141 .t_count = 0, \ 142 .t_ref = NULL, \ 143 .t_collisions = 0, \ 144 .t_desc = #name \ 145 } 146 147 /* 148 * Assert that a particular token is held 149 */ 150 #define LWKT_TOKEN_HELD_ANY(tok) _lwkt_token_held_any(tok, curthread) 151 #define LWKT_TOKEN_HELD_EXCL(tok) _lwkt_token_held_excl(tok, curthread) 152 153 #define ASSERT_LWKT_TOKEN_HELD(tok) \ 154 KKASSERT(LWKT_TOKEN_HELD_ANY(tok)) 155 156 #define ASSERT_LWKT_TOKEN_HELD_EXCL(tok) \ 157 KKASSERT(LWKT_TOKEN_HELD_EXCL(tok)) 158 159 #define ASSERT_NO_TOKENS_HELD(td) \ 160 KKASSERT((td)->td_toks_stop == &td->td_toks_array[0]) 161 162 struct lwkt_tokref { 163 lwkt_token_t tr_tok; /* token in question */ 164 long tr_count; /* TOK_EXCLUSIVE|TOK_EXCLREQ or 0 */ 165 struct thread *tr_owner; /* me */ 166 }; 167 168 #define MAXCPUFIFO 32 /* power of 2 */ 169 #define MAXCPUFIFO_MASK (MAXCPUFIFO - 1) 170 #define LWKT_MAXTOKENS 32 /* max tokens beneficially held by thread */ 171 172 /* 173 * Always cast to ipifunc_t when registering an ipi. The actual ipi function 174 * is called with both the data and an interrupt frame, but the ipi function 175 * that is registered might only declare a data argument. 176 */ 177 typedef void (*ipifunc1_t)(void *arg); 178 typedef void (*ipifunc2_t)(void *arg, int arg2); 179 typedef void (*ipifunc3_t)(void *arg, int arg2, struct intrframe *frame); 180 181 struct lwkt_ipiq { 182 int ip_rindex; /* only written by target cpu */ 183 int ip_xindex; /* written by target, indicates completion */ 184 int ip_windex; /* only written by source cpu */ 185 int ip_drain; /* drain source limit */ 186 struct { 187 ipifunc3_t func; 188 void *arg1; 189 int arg2; 190 char filler[32 - sizeof(int) - sizeof(void *) * 2]; 191 } ip_info[MAXCPUFIFO]; 192 }; 193 194 /* 195 * CPU Synchronization structure. See lwkt_cpusync_start() and 196 * lwkt_cpusync_finish() for more information. 197 */ 198 typedef void (*cpusync_func_t)(void *arg); 199 200 struct lwkt_cpusync { 201 cpumask_t cs_mask; /* cpus running the sync */ 202 cpumask_t cs_mack; /* mask acknowledge */ 203 cpusync_func_t cs_func; /* function to execute */ 204 void *cs_data; /* function data */ 205 }; 206 207 /* 208 * The standard message and queue structure used for communications between 209 * cpus. Messages are typically queued via a machine-specific non-linked 210 * FIFO matrix allowing any cpu to send a message to any other cpu without 211 * blocking. 212 */ 213 typedef struct lwkt_cpu_msg { 214 void (*cm_func)(lwkt_cpu_msg_t msg); /* primary dispatch function */ 215 int cm_code; /* request code if applicable */ 216 int cm_cpu; /* reply to cpu */ 217 thread_t cm_originator; /* originating thread for wakeup */ 218 } lwkt_cpu_msg; 219 220 /* 221 * per-thread file descriptor cache 222 */ 223 struct fdcache { 224 int fd; /* descriptor being cached */ 225 int locked; 226 struct file *fp; /* cached referenced fp */ 227 int lru; 228 int unused[3]; 229 } __cachealign; 230 231 #define NFDCACHE 4 /* max fd's cached by a thread */ 232 233 /* 234 * Thread structure. Note that ownership of a thread structure is special 235 * cased and there is no 'token'. A thread is always owned by the cpu 236 * represented by td_gd, any manipulation of the thread by some other cpu 237 * must be done through cpu_*msg() functions. e.g. you could request 238 * ownership of a thread that way, or hand a thread off to another cpu. 239 * 240 * NOTE: td_ucred is synchronized from the p_ucred on user->kernel syscall, 241 * trap, and AST/signal transitions to provide a stable ucred for 242 * (primarily) system calls. This field will be NULL for pure kernel 243 * threads. 244 */ 245 struct md_intr_info; 246 247 struct thread { 248 TAILQ_ENTRY(thread) td_threadq; 249 TAILQ_ENTRY(thread) td_allq; 250 TAILQ_ENTRY(thread) td_sleepq; 251 lwkt_port td_msgport; /* built-in message port for replies */ 252 struct lwp *td_lwp; /* (optional) associated lwp */ 253 struct proc *td_proc; /* (optional) associated process */ 254 struct pcb *td_pcb; /* points to pcb and top of kstack */ 255 struct globaldata *td_gd; /* associated with this cpu */ 256 const char *td_wmesg; /* string name for blockage */ 257 const volatile void *td_wchan; /* waiting on channel */ 258 int td_pri; /* 0-31, 31=highest priority (note 1) */ 259 int td_critcount; /* critical section priority */ 260 u_int td_flags; /* TDF flags */ 261 int td_wdomain; /* domain for wchan address (typ 0) */ 262 void (*td_preemptable)(struct thread *td, int critcount); 263 void (*td_release)(struct thread *td); 264 char *td_kstack; /* kernel stack */ 265 int td_kstack_size; /* size of kernel stack */ 266 char *td_sp; /* kernel stack pointer for LWKT restore */ 267 thread_t (*td_switch)(struct thread *ntd); 268 __uint64_t td_uticks; /* Statclock hits in user mode (uS) */ 269 __uint64_t td_sticks; /* Statclock hits in system mode (uS) */ 270 __uint64_t td_iticks; /* Statclock hits processing intr (uS) */ 271 int td_locks; /* lockmgr lock debugging */ 272 struct plimit *td_limit; /* synchronized from proc->p_limit */ 273 int td_refs; /* hold position in gd_tdallq / hold free */ 274 int td_nest_count; /* prevent splz nesting */ 275 u_int td_contended; /* token contention count */ 276 u_int td_mpflags; /* flags can be set by foreign cpus */ 277 int td_cscount; /* cpu synchronization master */ 278 int td_wakefromcpu; /* who woke me up? */ 279 int td_upri; /* user priority (sub-priority under td_pri) */ 280 int td_type; /* thread type, TD_TYPE_ */ 281 int td_tracker; /* for callers to debug lock counts */ 282 int td_fdcache_lru; 283 int td_unused03[3]; /* for future fields */ 284 struct iosched_data td_iosdata; /* Dynamic I/O scheduling data */ 285 struct timeval td_start; /* start time for a thread/process */ 286 char td_comm[MAXCOMLEN+1]; /* typ 16+1 bytes */ 287 struct thread *td_preempted; /* we preempted this thread */ 288 struct ucred *td_ucred; /* synchronized from proc->p_ucred */ 289 void *td_vmm; /* vmm private data */ 290 lwkt_tokref_t td_toks_have; /* tokens we own */ 291 lwkt_tokref_t td_toks_stop; /* tokens we want */ 292 struct lwkt_tokref td_toks_array[LWKT_MAXTOKENS]; 293 int td_fairq_load; /* fairq */ 294 int td_fairq_count; /* fairq */ 295 struct globaldata *td_migrate_gd; /* target gd for thread migration */ 296 struct fdcache td_fdcache[NFDCACHE]; 297 #ifdef DEBUG_CRIT_SECTIONS 298 #define CRIT_DEBUG_ARRAY_SIZE 32 299 #define CRIT_DEBUG_ARRAY_MASK (CRIT_DEBUG_ARRAY_SIZE - 1) 300 const char *td_crit_debug_array[CRIT_DEBUG_ARRAY_SIZE]; 301 int td_crit_debug_index; 302 int td_in_crit_report; 303 #endif 304 struct md_thread td_mach; 305 #ifdef DEBUG_LOCKS 306 #define SPINLOCK_DEBUG_ARRAY_SIZE 32 307 int td_spinlock_stack_id[SPINLOCK_DEBUG_ARRAY_SIZE]; 308 struct spinlock *td_spinlock_stack[SPINLOCK_DEBUG_ARRAY_SIZE]; 309 void *td_spinlock_caller_pc[SPINLOCK_DEBUG_ARRAY_SIZE]; 310 311 /* 312 * Track lockmgr locks held; lk->lk_filename:lk->lk_lineno is the holder 313 */ 314 #define LOCKMGR_DEBUG_ARRAY_SIZE 8 315 int td_lockmgr_stack_id[LOCKMGR_DEBUG_ARRAY_SIZE]; 316 struct lock *td_lockmgr_stack[LOCKMGR_DEBUG_ARRAY_SIZE]; 317 #endif 318 }; 319 320 #define td_toks_base td_toks_array[0] 321 #define td_toks_end td_toks_array[LWKT_MAXTOKENS] 322 323 #define TD_TOKS_HELD(td) ((td)->td_toks_stop != &(td)->td_toks_base) 324 #define TD_TOKS_NOT_HELD(td) ((td)->td_toks_stop == &(td)->td_toks_base) 325 326 /* 327 * Thread flags. Note that TDF_RUNNING is cleared on the old thread after 328 * we switch to the new one, which is necessary because LWKTs don't need 329 * to hold the BGL. This flag is used by the exit code and the managed 330 * thread migration code. Note in addition that preemption will cause 331 * TDF_RUNNING to be cleared temporarily, so any code checking TDF_RUNNING 332 * must also check TDF_PREEMPT_LOCK. 333 * 334 * LWKT threads stay on their (per-cpu) run queue while running, not to 335 * be confused with user processes which are removed from the user scheduling 336 * run queue while actually running. 337 * 338 * td_threadq can represent the thread on one of three queues... the LWKT 339 * run queue, a tsleep queue, or an lwkt blocking queue. The LWKT subsystem 340 * does not allow a thread to be scheduled if it already resides on some 341 * queue. 342 */ 343 #define TDF_RUNNING 0x00000001 /* thread still active */ 344 #define TDF_RUNQ 0x00000002 /* on an LWKT run queue */ 345 #define TDF_PREEMPT_LOCK 0x00000004 /* I have been preempted */ 346 #define TDF_PREEMPT_DONE 0x00000008 /* ac preemption complete */ 347 #define TDF_NOSTART 0x00000010 /* do not schedule on create */ 348 #define TDF_MIGRATING 0x00000020 /* thread is being migrated */ 349 #define TDF_SINTR 0x00000040 /* interruptability for 'ps' */ 350 #define TDF_TSLEEPQ 0x00000080 /* on a tsleep wait queue */ 351 352 #define TDF_SYSTHREAD 0x00000100 /* reserve memory may be used */ 353 #define TDF_ALLOCATED_THREAD 0x00000200 /* objcache allocated thread */ 354 #define TDF_ALLOCATED_STACK 0x00000400 /* objcache allocated stack */ 355 #define TDF_UNUSED0800 0x00000800 356 #define TDF_DEADLKTREAT 0x00001000 /* special lockmgr treatment */ 357 #define TDF_MARKER 0x00002000 /* tdallq list scan marker */ 358 #define TDF_TIMEOUT_RUNNING 0x00004000 /* tsleep timeout race */ 359 #define TDF_TIMEOUT 0x00008000 /* tsleep timeout */ 360 #define TDF_INTTHREAD 0x00010000 /* interrupt thread */ 361 #define TDF_TSLEEP_DESCHEDULED 0x00020000 /* tsleep core deschedule */ 362 #define TDF_BLOCKED 0x00040000 /* Thread is blocked */ 363 #define TDF_PANICWARN 0x00080000 /* panic warning in switch */ 364 #define TDF_BLOCKQ 0x00100000 /* on block queue */ 365 #define TDF_FORCE_SPINPORT 0x00200000 366 #define TDF_EXITING 0x00400000 /* thread exiting */ 367 #define TDF_USINGFP 0x00800000 /* thread using fp coproc */ 368 #define TDF_KERNELFP 0x01000000 /* kernel using fp coproc */ 369 #define TDF_DELAYED_WAKEUP 0x02000000 370 #define TDF_FIXEDCPU 0x04000000 /* running cpu is fixed */ 371 #define TDF_USERMODE 0x08000000 /* in or entering user mode */ 372 #define TDF_NOFAULT 0x10000000 /* force onfault on fault */ 373 374 #define TDF_MP_STOPREQ 0x00000001 /* suspend_kproc */ 375 #define TDF_MP_WAKEREQ 0x00000002 /* resume_kproc */ 376 #define TDF_MP_EXITWAIT 0x00000004 /* reaper, see lwp_wait() */ 377 #define TDF_MP_EXITSIG 0x00000008 /* reaper, see lwp_wait() */ 378 #define TDF_MP_BATCH_DEMARC 0x00000010 /* batch mode handling */ 379 #define TDF_MP_DIDYIELD 0x00000020 /* effects scheduling */ 380 381 #define TD_TYPE_GENERIC 0 /* generic thread */ 382 #define TD_TYPE_CRYPTO 1 /* crypto thread */ 383 #define TD_TYPE_NETISR 2 /* netisr thread */ 384 385 /* 386 * Thread priorities. Typically only one thread from any given 387 * user process scheduling queue is on the LWKT run queue at a time. 388 * Remember that there is one LWKT run queue per cpu. 389 * 390 * Critical sections are handled by bumping td_pri above TDPRI_MAX, which 391 * causes interrupts to be masked as they occur. When this occurs a 392 * rollup flag will be set in mycpu->gd_reqflags. 393 */ 394 #define TDPRI_IDLE_THREAD 0 /* the idle thread */ 395 #define TDPRI_IDLE_WORK 1 /* idle work (page zero, etc) */ 396 #define TDPRI_USER_SCHEDULER 2 /* user scheduler helper */ 397 #define TDPRI_USER_IDLE 4 /* user scheduler idle */ 398 #define TDPRI_USER_NORM 6 /* user scheduler normal */ 399 #define TDPRI_USER_REAL 8 /* user scheduler real time */ 400 #define TDPRI_KERN_LPSCHED 9 /* (comparison point only) */ 401 #define TDPRI_KERN_USER 10 /* kernel / block in syscall */ 402 #define TDPRI_KERN_DAEMON 12 /* kernel daemon (pageout, etc) */ 403 #define TDPRI_SOFT_NORM 14 /* kernel / normal */ 404 #define TDPRI_SOFT_TIMER 16 /* kernel / timer */ 405 #define TDPRI_UNUSED19 19 406 #define TDPRI_INT_SUPPORT 20 /* kernel / high priority support */ 407 #define TDPRI_INT_LOW 27 /* low priority interrupt */ 408 #define TDPRI_INT_MED 28 /* medium priority interrupt */ 409 #define TDPRI_INT_HIGH 29 /* high priority interrupt */ 410 #define TDPRI_MAX 31 411 412 #define LWKT_THREAD_STACK (UPAGES * PAGE_SIZE) 413 414 #define IN_CRITICAL_SECT(td) ((td)->td_critcount) 415 416 #ifdef _KERNEL 417 418 /* 419 * Global tokens 420 */ 421 extern struct lwkt_token mp_token; 422 extern struct lwkt_token pmap_token; 423 extern struct lwkt_token dev_token; 424 extern struct lwkt_token vm_token; 425 extern struct lwkt_token vmspace_token; 426 extern struct lwkt_token kvm_token; 427 extern struct lwkt_token sigio_token; 428 extern struct lwkt_token tty_token; 429 extern struct lwkt_token vnode_token; 430 extern struct lwkt_token revoke_token; 431 432 /* 433 * Procedures 434 */ 435 extern struct thread *lwkt_alloc_thread(struct thread *, int, int, int); 436 extern void lwkt_init_thread(struct thread *, void *, int, int, 437 struct globaldata *); 438 extern void lwkt_set_interrupt_support_thread(void); 439 extern void lwkt_set_comm(thread_t, const char *, ...) __printflike(2, 3); 440 extern void lwkt_free_thread(struct thread *); 441 extern void lwkt_gdinit(struct globaldata *); 442 extern void lwkt_switch(void); 443 extern void lwkt_switch_return(struct thread *); 444 extern void lwkt_preempt(thread_t, int); 445 extern void lwkt_schedule(thread_t); 446 extern void lwkt_schedule_noresched(thread_t); 447 extern void lwkt_schedule_self(thread_t); 448 extern void lwkt_deschedule(thread_t); 449 extern void lwkt_deschedule_self(thread_t); 450 extern void lwkt_yield(void); 451 extern void lwkt_yield_quick(void); 452 extern void lwkt_user_yield(void); 453 extern void lwkt_hold(thread_t); 454 extern void lwkt_rele(thread_t); 455 extern void lwkt_passive_release(thread_t); 456 extern void lwkt_maybe_splz(thread_t); 457 458 extern void lwkt_gettoken(lwkt_token_t); 459 extern void lwkt_gettoken_shared(lwkt_token_t); 460 extern int lwkt_trytoken(lwkt_token_t); 461 extern void lwkt_reltoken(lwkt_token_t); 462 extern int lwkt_cnttoken(lwkt_token_t, thread_t); 463 extern int lwkt_getalltokens(thread_t, int); 464 extern void lwkt_relalltokens(thread_t); 465 extern void lwkt_token_init(lwkt_token_t, const char *); 466 extern void lwkt_token_uninit(lwkt_token_t); 467 468 extern void lwkt_token_pool_init(void); 469 extern lwkt_token_t lwkt_token_pool_lookup(void *); 470 extern lwkt_token_t lwkt_getpooltoken(void *); 471 extern void lwkt_relpooltoken(void *); 472 473 extern void lwkt_token_swap(void); 474 475 extern void lwkt_setpri(thread_t, int); 476 extern void lwkt_setpri_initial(thread_t, int); 477 extern void lwkt_setpri_self(int); 478 extern void lwkt_schedulerclock(thread_t td); 479 extern void lwkt_setcpu_self(struct globaldata *); 480 extern void lwkt_migratecpu(int); 481 482 extern void lwkt_giveaway(struct thread *); 483 extern void lwkt_acquire(struct thread *); 484 extern int lwkt_send_ipiq3(struct globaldata *, ipifunc3_t, void *, int); 485 extern int lwkt_send_ipiq3_passive(struct globaldata *, ipifunc3_t, 486 void *, int); 487 extern int lwkt_send_ipiq3_bycpu(int, ipifunc3_t, void *, int); 488 extern int lwkt_send_ipiq3_mask(cpumask_t, ipifunc3_t, void *, int); 489 extern void lwkt_wait_ipiq(struct globaldata *, int); 490 extern void lwkt_process_ipiq(void); 491 extern void lwkt_process_ipiq_frame(struct intrframe *); 492 extern void lwkt_smp_stopped(void); 493 extern void lwkt_synchronize_ipiqs(const char *); 494 495 /* lwkt_cpusync_init() - inline function in sys/thread2.h */ 496 extern void lwkt_cpusync_simple(cpumask_t, cpusync_func_t, void *); 497 extern void lwkt_cpusync_interlock(lwkt_cpusync_t); 498 extern void lwkt_cpusync_deinterlock(lwkt_cpusync_t); 499 extern void lwkt_cpusync_quick(lwkt_cpusync_t); 500 501 extern void crit_panic(void) __dead2; 502 extern struct lwp *lwkt_preempted_proc(void); 503 504 extern int lwkt_create (void (*func)(void *), void *, struct thread **, 505 struct thread *, int, int, 506 const char *, ...) __printflike(7, 8); 507 extern void lwkt_exit (void) __dead2; 508 extern void lwkt_remove_tdallq (struct thread *); 509 510 #endif 511 512 #endif 513 514