1 /* 2 * SYS/THREAD.H 3 * 4 * Implements the architecture independant portion of the LWKT 5 * subsystem. 6 */ 7 8 #ifndef _SYS_THREAD_H_ 9 #define _SYS_THREAD_H_ 10 11 #ifndef _SYS_PARAM_H_ 12 #include <sys/param.h> /* MAXCOMLEN */ 13 #endif 14 #ifndef _SYS_QUEUE_H_ 15 #include <sys/queue.h> /* TAILQ_* macros */ 16 #endif 17 #ifndef _SYS_MSGPORT_H_ 18 #include <sys/msgport.h> /* lwkt_port */ 19 #endif 20 #ifndef _SYS_TIME_H_ 21 #include <sys/time.h> /* struct timeval */ 22 #endif 23 #ifndef _SYS_LOCK_H 24 #include <sys/lock.h> 25 #endif 26 #ifndef _SYS_SPINLOCK_H_ 27 #include <sys/spinlock.h> 28 #endif 29 #ifndef _SYS_IOSCHED_H_ 30 #include <sys/iosched.h> 31 #endif 32 #include <machine/thread.h> /* md_thread */ 33 #include <machine/stdint.h> 34 35 struct globaldata; 36 struct lwp; 37 struct proc; 38 struct thread; 39 struct lwkt_queue; 40 struct lwkt_token; 41 struct lwkt_tokref; 42 struct lwkt_ipiq; 43 #if 0 44 struct lwkt_cpu_msg; 45 struct lwkt_cpu_port; 46 #endif 47 struct lwkt_cpusync; 48 struct fdnode; 49 union sysunion; 50 51 typedef struct lwkt_queue *lwkt_queue_t; 52 typedef struct lwkt_token *lwkt_token_t; 53 typedef struct lwkt_tokref *lwkt_tokref_t; 54 #if 0 55 typedef struct lwkt_cpu_msg *lwkt_cpu_msg_t; 56 typedef struct lwkt_cpu_port *lwkt_cpu_port_t; 57 #endif 58 typedef struct lwkt_ipiq *lwkt_ipiq_t; 59 typedef struct lwkt_cpusync *lwkt_cpusync_t; 60 typedef struct thread *thread_t; 61 62 typedef TAILQ_HEAD(lwkt_queue, thread) lwkt_queue; 63 64 /* 65 * Differentiation between kernel threads and user threads. Userland 66 * programs which want to access to kernel structures have to define 67 * _KERNEL_STRUCTURES. This is a kinda safety valve to prevent badly 68 * written user programs from getting an LWKT thread that is neither the 69 * kernel nor the user version. 70 */ 71 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 72 #ifndef _SYS_CPUMASK_H_ 73 #include <sys/cpumask.h> /* cpumask_t */ 74 #endif 75 #ifndef _CPU_FRAME_H_ 76 #include <machine/frame.h> 77 #endif 78 #else 79 struct intrframe; 80 #endif 81 82 /* 83 * Tokens are used to serialize access to information. They are 'soft' 84 * serialization entities that only stay in effect while a thread is 85 * running. If the thread blocks, other threads can run holding the same 86 * token(s). The tokens are reacquired when the original thread resumes. 87 * 88 * Tokens guarantee that no deadlock can happen regardless of type or 89 * ordering. However, obtaining the same token first shared, then 90 * stacking exclusive, is not allowed and will panic. 91 * 92 * A thread can depend on its serialization remaining intact through a 93 * preemption. An interrupt which attempts to use the same token as the 94 * thread being preempted will reschedule itself for non-preemptive 95 * operation, so the new token code is capable of interlocking against 96 * interrupts as well as other cpus. This means that your token can only 97 * be (temporarily) lost if you *explicitly* block. 98 * 99 * Tokens are managed through a helper reference structure, lwkt_tokref. Each 100 * thread has a stack of tokref's to keep track of acquired tokens. Multiple 101 * tokref's may reference the same token. 102 * 103 * EXCLUSIVE TOKENS 104 * Acquiring an exclusive token requires acquiring the EXCLUSIVE bit 105 * with count == 0. If the exclusive bit cannot be acquired, EXCLREQ 106 * is set. Once acquired, EXCLREQ is cleared (but could get set by 107 * another thread also trying for an exclusive lock at any time). 108 * 109 * SHARED TOKENS 110 * Acquiring a shared token requires waiting for the EXCLUSIVE bit 111 * to be cleared and then acquiring a count. A shared lock request 112 * can temporarily acquire a count and then back it out if it is 113 * unable to obtain the EXCLUSIVE bit, allowing fetchadd to be used. 114 * 115 * A thread attempting to get a single shared token will defer to 116 * pending exclusive requesters. However, a thread already holding 117 * one or more tokens and trying to get an additional shared token 118 * cannot defer to exclusive requesters because doing so can lead 119 * to a deadlock. 120 * 121 * Multiple exclusive tokens are handled by treating the additional tokens 122 * as a special case of the shared token, incrementing the count value. This 123 * reduces the complexity of the token release code. 124 */ 125 126 struct lwkt_token { 127 long t_count; /* Shared/exclreq/exclusive access */ 128 struct lwkt_tokref *t_ref; /* Exclusive ref */ 129 long t_collisions; /* Collision counter */ 130 const char *t_desc; /* Descriptive name */ 131 }; 132 133 #define TOK_EXCLUSIVE 0x00000001 /* Exclusive lock held */ 134 #define TOK_EXCLREQ 0x00000002 /* Exclusive request pending */ 135 #define TOK_INCR 4 /* Shared count increment */ 136 #define TOK_COUNTMASK (~(long)(TOK_EXCLUSIVE|TOK_EXCLREQ)) 137 138 /* 139 * Static initialization for a lwkt_token. 140 */ 141 #define LWKT_TOKEN_INITIALIZER(name) \ 142 { \ 143 .t_count = 0, \ 144 .t_ref = NULL, \ 145 .t_collisions = 0, \ 146 .t_desc = #name \ 147 } 148 149 /* 150 * Assert that a particular token is held 151 */ 152 #define LWKT_TOKEN_HELD_ANY(tok) _lwkt_token_held_any(tok, curthread) 153 #define LWKT_TOKEN_HELD_EXCL(tok) _lwkt_token_held_excl(tok, curthread) 154 155 #define ASSERT_LWKT_TOKEN_HELD(tok) \ 156 KKASSERT(LWKT_TOKEN_HELD_ANY(tok)) 157 158 #define ASSERT_LWKT_TOKEN_HELD_EXCL(tok) \ 159 KKASSERT(LWKT_TOKEN_HELD_EXCL(tok)) 160 161 #define ASSERT_NO_TOKENS_HELD(td) \ 162 KKASSERT((td)->td_toks_stop == &td->td_toks_array[0]) 163 164 struct lwkt_tokref { 165 lwkt_token_t tr_tok; /* token in question */ 166 long tr_count; /* TOK_EXCLUSIVE|TOK_EXCLREQ or 0 */ 167 struct thread *tr_owner; /* me */ 168 }; 169 170 #define MAXCPUFIFO 256 /* power of 2 */ 171 #define MAXCPUFIFO_MASK (MAXCPUFIFO - 1) 172 #define LWKT_MAXTOKENS 32 /* max tokens beneficially held by thread */ 173 174 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 175 /* 176 * Always cast to ipifunc_t when registering an ipi. The actual ipi function 177 * is called with both the data and an interrupt frame, but the ipi function 178 * that is registered might only declare a data argument. 179 */ 180 typedef void (*ipifunc1_t)(void *arg); 181 typedef void (*ipifunc2_t)(void *arg, int arg2); 182 typedef void (*ipifunc3_t)(void *arg, int arg2, struct intrframe *frame); 183 184 struct lwkt_ipiq { 185 int ip_rindex; /* only written by target cpu */ 186 int ip_xindex; /* written by target, indicates completion */ 187 int ip_windex; /* only written by source cpu */ 188 int ip_drain; /* drain source limit */ 189 struct { 190 ipifunc3_t func; 191 void *arg1; 192 int arg2; 193 char filler[32 - sizeof(int) - sizeof(void *) * 2]; 194 } ip_info[MAXCPUFIFO]; 195 }; 196 197 /* 198 * CPU Synchronization structure. See lwkt_cpusync_init() and 199 * lwkt_cpusync_interlock() for more information. 200 */ 201 typedef void (*cpusync_func_t)(void *arg); 202 203 struct lwkt_cpusync { 204 cpumask_t cs_mask; /* cpus running the sync */ 205 cpumask_t cs_mack; /* mask acknowledge */ 206 cpusync_func_t cs_func; /* function to execute */ 207 void *cs_data; /* function data */ 208 }; 209 #endif /* _KERNEL || _KERNEL_STRUCTURES */ 210 211 /* 212 * The standard message and queue structure used for communications between 213 * cpus. Messages are typically queued via a machine-specific non-linked 214 * FIFO matrix allowing any cpu to send a message to any other cpu without 215 * blocking. 216 */ 217 #if 0 218 typedef struct lwkt_cpu_msg { 219 void (*cm_func)(lwkt_cpu_msg_t msg); /* primary dispatch function */ 220 int cm_code; /* request code if applicable */ 221 int cm_cpu; /* reply to cpu */ 222 thread_t cm_originator; /* originating thread for wakeup */ 223 } lwkt_cpu_msg; 224 #endif 225 226 /* 227 * per-thread file descriptor cache 228 */ 229 struct fdcache { 230 int fd; /* descriptor being cached */ 231 int locked; 232 struct file *fp; /* cached referenced fp */ 233 int lru; 234 int unused[3]; 235 } __cachealign; 236 237 #define NFDCACHE 4 /* max fd's cached by a thread */ 238 239 /* 240 * Thread structure. Note that ownership of a thread structure is special 241 * cased and there is no 'token'. A thread is always owned by the cpu 242 * represented by td_gd, any manipulation of the thread by some other cpu 243 * must be done through cpu_*msg() functions. e.g. you could request 244 * ownership of a thread that way, or hand a thread off to another cpu. 245 * 246 * NOTE: td_ucred is synchronized from the p_ucred on user->kernel syscall, 247 * trap, and AST/signal transitions to provide a stable ucred for 248 * (primarily) system calls. This field will be NULL for pure kernel 249 * threads. 250 */ 251 struct md_intr_info; 252 253 struct thread { 254 TAILQ_ENTRY(thread) td_threadq; 255 TAILQ_ENTRY(thread) td_allq; 256 TAILQ_ENTRY(thread) td_sleepq; 257 lwkt_port td_msgport; /* built-in message port for replies */ 258 struct lwp *td_lwp; /* (optional) associated lwp */ 259 struct proc *td_proc; /* (optional) associated process */ 260 struct pcb *td_pcb; /* points to pcb and top of kstack */ 261 struct globaldata *td_gd; /* associated with this cpu */ 262 const char *td_wmesg; /* string name for blockage */ 263 const volatile void *td_wchan; /* waiting on channel */ 264 int td_pri; /* 0-31, 31=highest priority (note 1) */ 265 int td_critcount; /* critical section priority */ 266 u_int td_flags; /* TDF flags */ 267 int td_wdomain; /* domain for wchan address (typ 0) */ 268 void (*td_preemptable)(struct thread *td, int critcount); 269 void (*td_release)(struct thread *td); 270 char *td_kstack; /* kernel stack */ 271 int td_kstack_size; /* size of kernel stack */ 272 char *td_sp; /* kernel stack pointer for LWKT restore */ 273 thread_t (*td_switch)(struct thread *ntd); 274 __uint64_t td_uticks; /* Statclock hits in user mode (uS) */ 275 __uint64_t td_sticks; /* Statclock hits in system mode (uS) */ 276 __uint64_t td_iticks; /* Statclock hits processing intr (uS) */ 277 int td_locks; /* lockmgr lock debugging */ 278 struct plimit *td_limit; /* synchronized from proc->p_limit */ 279 int td_refs; /* hold position in gd_tdallq / hold free */ 280 int td_nest_count; /* prevent splz nesting */ 281 u_int td_contended; /* token contention count */ 282 u_int td_mpflags; /* flags can be set by foreign cpus */ 283 int td_cscount; /* cpu synchronization master */ 284 int td_wakefromcpu; /* who woke me up? */ 285 int td_upri; /* user priority (sub-priority under td_pri) */ 286 int td_type; /* thread type, TD_TYPE_ */ 287 int td_tracker; /* misc use (base value 0), recursion count */ 288 int td_fdcache_lru; 289 int td_unused03[3]; /* for future fields */ 290 struct iosched_data td_iosdata; /* Dynamic I/O scheduling data */ 291 struct timeval td_start; /* start time for a thread/process */ 292 char td_comm[MAXCOMLEN+1]; /* typ 16+1 bytes */ 293 struct thread *td_preempted; /* we preempted this thread */ 294 struct ucred *td_ucred; /* synchronized from proc->p_ucred */ 295 void *td_vmm; /* vmm private data */ 296 lwkt_tokref_t td_toks_have; /* tokens we own */ 297 lwkt_tokref_t td_toks_stop; /* tokens we want */ 298 struct lwkt_tokref td_toks_array[LWKT_MAXTOKENS]; 299 int td_fairq_load; /* fairq */ 300 int td_fairq_count; /* fairq */ 301 struct globaldata *td_migrate_gd; /* target gd for thread migration */ 302 struct fdcache td_fdcache[NFDCACHE]; 303 void *td_linux_task; /* drm/linux support */ 304 #ifdef DEBUG_CRIT_SECTIONS 305 #define CRIT_DEBUG_ARRAY_SIZE 32 306 #define CRIT_DEBUG_ARRAY_MASK (CRIT_DEBUG_ARRAY_SIZE - 1) 307 const char *td_crit_debug_array[CRIT_DEBUG_ARRAY_SIZE]; 308 int td_crit_debug_index; 309 int td_in_crit_report; 310 #endif 311 struct md_thread td_mach; 312 #ifdef DEBUG_LOCKS 313 #define SPINLOCK_DEBUG_ARRAY_SIZE 32 314 int td_spinlock_stack_id[SPINLOCK_DEBUG_ARRAY_SIZE]; 315 struct spinlock *td_spinlock_stack[SPINLOCK_DEBUG_ARRAY_SIZE]; 316 void *td_spinlock_caller_pc[SPINLOCK_DEBUG_ARRAY_SIZE]; 317 318 /* 319 * Track lockmgr locks held; lk->lk_filename:lk->lk_lineno is the holder 320 */ 321 #define LOCKMGR_DEBUG_ARRAY_SIZE 8 322 int td_lockmgr_stack_id[LOCKMGR_DEBUG_ARRAY_SIZE]; 323 struct lock *td_lockmgr_stack[LOCKMGR_DEBUG_ARRAY_SIZE]; 324 #endif 325 }; 326 327 #define td_toks_base td_toks_array[0] 328 #define td_toks_end td_toks_array[LWKT_MAXTOKENS] 329 330 #define TD_TOKS_HELD(td) ((td)->td_toks_stop != &(td)->td_toks_base) 331 #define TD_TOKS_NOT_HELD(td) ((td)->td_toks_stop == &(td)->td_toks_base) 332 333 /* 334 * Thread flags. Note that TDF_RUNNING is cleared on the old thread after 335 * we switch to the new one, which is necessary because LWKTs don't need 336 * to hold the BGL. This flag is used by the exit code and the managed 337 * thread migration code. Note in addition that preemption will cause 338 * TDF_RUNNING to be cleared temporarily, so any code checking TDF_RUNNING 339 * must also check TDF_PREEMPT_LOCK. 340 * 341 * LWKT threads stay on their (per-cpu) run queue while running, not to 342 * be confused with user processes which are removed from the user scheduling 343 * run queue while actually running. 344 * 345 * td_threadq can represent the thread on one of three queues... the LWKT 346 * run queue, a tsleep queue, or an lwkt blocking queue. The LWKT subsystem 347 * does not allow a thread to be scheduled if it already resides on some 348 * queue. 349 */ 350 #define TDF_RUNNING 0x00000001 /* thread still active */ 351 #define TDF_RUNQ 0x00000002 /* on an LWKT run queue */ 352 #define TDF_PREEMPT_LOCK 0x00000004 /* I have been preempted */ 353 #define TDF_PREEMPT_DONE 0x00000008 /* ac preemption complete */ 354 #define TDF_NOSTART 0x00000010 /* do not schedule on create */ 355 #define TDF_MIGRATING 0x00000020 /* thread is being migrated */ 356 #define TDF_SINTR 0x00000040 /* interruptability for 'ps' */ 357 #define TDF_TSLEEPQ 0x00000080 /* on a tsleep wait queue */ 358 359 #define TDF_SYSTHREAD 0x00000100 /* reserve memory may be used */ 360 #define TDF_ALLOCATED_THREAD 0x00000200 /* objcache allocated thread */ 361 #define TDF_ALLOCATED_STACK 0x00000400 /* objcache allocated stack */ 362 #define TDF_FPU_HEUR 0x00000800 /* active restore on switch */ 363 #define TDF_DEADLKTREAT 0x00001000 /* special lockmgr treatment */ 364 #define TDF_MARKER 0x00002000 /* tdallq list scan marker */ 365 #define TDF_TIMEOUT_RUNNING 0x00004000 /* tsleep timeout race */ 366 #define TDF_TIMEOUT 0x00008000 /* tsleep timeout */ 367 #define TDF_INTTHREAD 0x00010000 /* interrupt thread */ 368 #define TDF_TSLEEP_DESCHEDULED 0x00020000 /* tsleep core deschedule */ 369 #define TDF_BLOCKED 0x00040000 /* Thread is blocked */ 370 #define TDF_PANICWARN 0x00080000 /* panic warning in switch */ 371 #define TDF_BLOCKQ 0x00100000 /* on block queue */ 372 #define TDF_FORCE_SPINPORT 0x00200000 373 #define TDF_EXITING 0x00400000 /* thread exiting */ 374 #define TDF_USINGFP 0x00800000 /* thread using fp coproc */ 375 #define TDF_KERNELFP 0x01000000 /* kernel using fp coproc */ 376 #define TDF_DELAYED_WAKEUP 0x02000000 377 #define TDF_FIXEDCPU 0x04000000 /* running cpu is fixed */ 378 #define TDF_USERMODE 0x08000000 /* in or entering user mode */ 379 #define TDF_NOFAULT 0x10000000 /* force onfault on fault */ 380 #define TDF_CLKTHREAD 0x20000000 /* detect INTTHREAD clock */ 381 382 #define TDF_MP_STOPREQ 0x00000001 /* suspend_kproc */ 383 #define TDF_MP_WAKEREQ 0x00000002 /* resume_kproc */ 384 #define TDF_MP_EXITWAIT 0x00000004 /* reaper, see lwp_wait() */ 385 #define TDF_MP_EXITSIG 0x00000008 /* reaper, see lwp_wait() */ 386 #define TDF_MP_BATCH_DEMARC 0x00000010 /* batch mode handling */ 387 #define TDF_MP_DIDYIELD 0x00000020 /* effects scheduling */ 388 389 #define TD_TYPE_GENERIC 0 /* generic thread */ 390 #define TD_TYPE_CRYPTO 1 /* crypto thread */ 391 #define TD_TYPE_NETISR 2 /* netisr thread */ 392 393 /* 394 * Thread priorities. Typically only one thread from any given 395 * user process scheduling queue is on the LWKT run queue at a time. 396 * Remember that there is one LWKT run queue per cpu. 397 * 398 * Critical sections are handled by bumping td_pri above TDPRI_MAX, which 399 * causes interrupts to be masked as they occur. When this occurs a 400 * rollup flag will be set in mycpu->gd_reqflags. 401 */ 402 #define TDPRI_IDLE_THREAD 0 /* the idle thread */ 403 #define TDPRI_IDLE_WORK 1 /* idle work (page zero, etc) */ 404 #define TDPRI_USER_SCHEDULER 2 /* user scheduler helper */ 405 #define TDPRI_USER_IDLE 4 /* user scheduler idle */ 406 #define TDPRI_USER_NORM 6 /* user scheduler normal */ 407 #define TDPRI_USER_REAL 8 /* user scheduler real time */ 408 #define TDPRI_KERN_LPSCHED 9 /* (comparison point only) */ 409 #define TDPRI_KERN_USER 10 /* kernel / block in syscall */ 410 #define TDPRI_KERN_DAEMON 12 /* kernel daemon (pageout, etc) */ 411 #define TDPRI_SOFT_NORM 14 /* kernel / normal */ 412 #define TDPRI_SOFT_TIMER 16 /* kernel / timer */ 413 #define TDPRI_UNUSED19 19 414 #define TDPRI_INT_SUPPORT 20 /* kernel / high priority support */ 415 #define TDPRI_INT_LOW 27 /* low priority interrupt */ 416 #define TDPRI_INT_MED 28 /* medium priority interrupt */ 417 #define TDPRI_INT_HIGH 29 /* high priority interrupt */ 418 #define TDPRI_MAX 31 419 420 #define LWKT_THREAD_STACK (UPAGES * PAGE_SIZE) 421 422 #define IN_CRITICAL_SECT(td) ((td)->td_critcount) 423 424 #ifdef _KERNEL 425 426 extern void (*linux_task_drop_callback)(struct thread *); 427 extern void (*linux_proc_drop_callback)(struct proc *); 428 429 /* 430 * Global tokens 431 */ 432 extern struct lwkt_token mp_token; 433 extern struct lwkt_token pmap_token; 434 extern struct lwkt_token dev_token; 435 extern struct lwkt_token vm_token; 436 extern struct lwkt_token vmspace_token; 437 extern struct lwkt_token kvm_token; 438 extern struct lwkt_token sigio_token; 439 extern struct lwkt_token tty_token; 440 extern struct lwkt_token vnode_token; 441 extern struct lwkt_token revoke_token; 442 extern struct lwkt_token kbd_token; 443 extern struct lwkt_token vga_token; 444 445 /* 446 * Procedures 447 */ 448 struct thread *lwkt_alloc_thread(struct thread *, int, int, int); 449 void lwkt_init_thread(struct thread *, void *, int, int, struct globaldata *); 450 void lwkt_set_interrupt_support_thread(void); 451 void lwkt_set_comm(thread_t, const char *, ...) __printflike(2, 3); 452 void lwkt_free_thread(struct thread *); 453 void lwkt_gdinit(struct globaldata *); 454 void lwkt_switch(void); 455 void lwkt_switch_return(struct thread *); 456 void lwkt_preempt(thread_t, int); 457 void lwkt_schedule(thread_t); 458 void lwkt_schedule_noresched(thread_t); 459 void lwkt_schedule_self(thread_t); 460 void lwkt_deschedule(thread_t); 461 void lwkt_deschedule_self(thread_t); 462 void lwkt_yield(void); 463 void lwkt_yield_quick(void); 464 void lwkt_user_yield(void); 465 void lwkt_hold(thread_t); 466 void lwkt_rele(thread_t); 467 void lwkt_passive_release(thread_t); 468 void lwkt_maybe_splz(thread_t); 469 470 void lwkt_gettoken(lwkt_token_t); 471 void lwkt_gettoken_shared(lwkt_token_t); 472 int lwkt_trytoken(lwkt_token_t); 473 void lwkt_reltoken(lwkt_token_t); 474 int lwkt_cnttoken(lwkt_token_t, thread_t); 475 int lwkt_getalltokens(thread_t, int); 476 void lwkt_relalltokens(thread_t); 477 void lwkt_token_init(lwkt_token_t, const char *); 478 void lwkt_token_uninit(lwkt_token_t); 479 480 void lwkt_token_pool_init(void); 481 lwkt_token_t lwkt_token_pool_lookup(void *); 482 lwkt_token_t lwkt_getpooltoken(void *); 483 void lwkt_relpooltoken(void *); 484 485 void lwkt_token_swap(void); 486 487 void lwkt_setpri(thread_t, int); 488 void lwkt_setpri_initial(thread_t, int); 489 void lwkt_setpri_self(int); 490 void lwkt_schedulerclock(thread_t td); 491 void lwkt_setcpu_self(struct globaldata *); 492 void lwkt_migratecpu(int); 493 494 void lwkt_giveaway(struct thread *); 495 void lwkt_acquire(struct thread *); 496 int lwkt_send_ipiq3(struct globaldata *, ipifunc3_t, void *, int); 497 int lwkt_send_ipiq3_passive(struct globaldata *, ipifunc3_t, void *, int); 498 int lwkt_send_ipiq3_bycpu(int, ipifunc3_t, void *, int); 499 int lwkt_send_ipiq3_mask(cpumask_t, ipifunc3_t, void *, int); 500 void lwkt_wait_ipiq(struct globaldata *, int); 501 void lwkt_process_ipiq(void); 502 void lwkt_process_ipiq_frame(struct intrframe *); 503 void lwkt_smp_stopped(void); 504 void lwkt_synchronize_ipiqs(const char *); 505 506 /* lwkt_cpusync_init() - inline function in sys/thread2.h */ 507 void lwkt_cpusync_simple(cpumask_t, cpusync_func_t, void *); 508 void lwkt_cpusync_interlock(lwkt_cpusync_t); 509 void lwkt_cpusync_deinterlock(lwkt_cpusync_t); 510 void lwkt_cpusync_quick(lwkt_cpusync_t); 511 512 void crit_panic(void) __dead2; 513 struct lwp *lwkt_preempted_proc(void); 514 515 int lwkt_create(void (*)(void *), void *, struct thread **, struct thread *, 516 int, int, const char *, ...) __printflike(7, 8); 517 void lwkt_exit(void) __dead2; 518 void lwkt_remove_tdallq(struct thread *); 519 520 #endif /* _KERNEL */ 521 522 #endif /* !_SYS_THREAD_H_ */ 523 524