1 /* 2 * SYS/THREAD.H 3 * 4 * Implements the architecture independant portion of the LWKT 5 * subsystem. 6 */ 7 8 #ifndef _SYS_THREAD_H_ 9 #define _SYS_THREAD_H_ 10 11 #ifndef _SYS_PARAM_H_ 12 #include <sys/param.h> /* MAXCOMLEN */ 13 #endif 14 #ifndef _SYS_QUEUE_H_ 15 #include <sys/queue.h> /* TAILQ_* macros */ 16 #endif 17 #ifndef _SYS_MSGPORT_H_ 18 #include <sys/msgport.h> /* lwkt_port */ 19 #endif 20 #ifndef _SYS_TIME_H_ 21 #include <sys/time.h> /* struct timeval */ 22 #endif 23 #ifndef _SYS_LOCK_H 24 #include <sys/lock.h> 25 #endif 26 #ifndef _SYS_SPINLOCK_H_ 27 #include <sys/spinlock.h> 28 #endif 29 #ifndef _SYS_IOSCHED_H_ 30 #include <sys/iosched.h> 31 #endif 32 #include <machine/thread.h> /* md_thread */ 33 #include <machine/stdint.h> 34 #include <machine/ucontext.h> 35 36 struct globaldata; 37 struct lwp; 38 struct proc; 39 struct thread; 40 struct lwkt_queue; 41 struct lwkt_token; 42 struct lwkt_tokref; 43 struct lwkt_ipiq; 44 #if 0 45 struct lwkt_cpu_msg; 46 struct lwkt_cpu_port; 47 #endif 48 struct lwkt_cpusync; 49 struct fdnode; 50 union sysunion; 51 52 typedef struct lwkt_queue *lwkt_queue_t; 53 typedef struct lwkt_token *lwkt_token_t; 54 typedef struct lwkt_tokref *lwkt_tokref_t; 55 #if 0 56 typedef struct lwkt_cpu_msg *lwkt_cpu_msg_t; 57 typedef struct lwkt_cpu_port *lwkt_cpu_port_t; 58 #endif 59 typedef struct lwkt_ipiq *lwkt_ipiq_t; 60 typedef struct lwkt_cpusync *lwkt_cpusync_t; 61 typedef struct thread *thread_t; 62 63 typedef TAILQ_HEAD(lwkt_queue, thread) lwkt_queue; 64 65 /* 66 * Differentiation between kernel threads and user threads. Userland 67 * programs which want to access to kernel structures have to define 68 * _KERNEL_STRUCTURES. This is a kinda safety valve to prevent badly 69 * written user programs from getting an LWKT thread that is neither the 70 * kernel nor the user version. 71 */ 72 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 73 #ifndef _SYS_CPUMASK_H_ 74 #include <sys/cpumask.h> /* cpumask_t */ 75 #endif 76 #ifndef _CPU_FRAME_H_ 77 #include <machine/frame.h> 78 #endif 79 #else 80 struct intrframe; 81 #endif 82 83 /* 84 * Tokens are used to serialize access to information. They are 'soft' 85 * serialization entities that only stay in effect while a thread is 86 * running. If the thread blocks, other threads can run holding the same 87 * token(s). The tokens are reacquired when the original thread resumes. 88 * 89 * Tokens guarantee that no deadlock can happen regardless of type or 90 * ordering. However, obtaining the same token first shared, then 91 * stacking exclusive, is not allowed and will panic. 92 * 93 * A thread can depend on its serialization remaining intact through a 94 * preemption. An interrupt which attempts to use the same token as the 95 * thread being preempted will reschedule itself for non-preemptive 96 * operation, so the new token code is capable of interlocking against 97 * interrupts as well as other cpus. This means that your token can only 98 * be (temporarily) lost if you *explicitly* block. 99 * 100 * Tokens are managed through a helper reference structure, lwkt_tokref. Each 101 * thread has a stack of tokref's to keep track of acquired tokens. Multiple 102 * tokref's may reference the same token. 103 * 104 * EXCLUSIVE TOKENS 105 * Acquiring an exclusive token requires acquiring the EXCLUSIVE bit 106 * with count == 0. If the exclusive bit cannot be acquired, EXCLREQ 107 * is set. Once acquired, EXCLREQ is cleared (but could get set by 108 * another thread also trying for an exclusive lock at any time). 109 * 110 * SHARED TOKENS 111 * Acquiring a shared token requires waiting for the EXCLUSIVE bit 112 * to be cleared and then acquiring a count. A shared lock request 113 * can temporarily acquire a count and then back it out if it is 114 * unable to obtain the EXCLUSIVE bit, allowing fetchadd to be used. 115 * 116 * A thread attempting to get a single shared token will defer to 117 * pending exclusive requesters. However, a thread already holding 118 * one or more tokens and trying to get an additional shared token 119 * cannot defer to exclusive requesters because doing so can lead 120 * to a deadlock. 121 * 122 * Multiple exclusive tokens are handled by treating the additional tokens 123 * as a special case of the shared token, incrementing the count value. This 124 * reduces the complexity of the token release code. 125 */ 126 127 struct lwkt_token { 128 long t_count; /* Shared/exclreq/exclusive access */ 129 struct lwkt_tokref *t_ref; /* Exclusive ref */ 130 long t_collisions; /* Collision counter */ 131 const char *t_desc; /* Descriptive name */ 132 }; 133 134 #define TOK_EXCLUSIVE 0x00000001 /* Exclusive lock held */ 135 #define TOK_EXCLREQ 0x00000002 /* Exclusive request pending */ 136 #define TOK_INCR 4 /* Shared count increment */ 137 #define TOK_COUNTMASK (~(long)(TOK_EXCLUSIVE|TOK_EXCLREQ)) 138 139 /* 140 * Static initialization for a lwkt_token. 141 */ 142 #define LWKT_TOKEN_INITIALIZER(name) \ 143 { \ 144 .t_count = 0, \ 145 .t_ref = NULL, \ 146 .t_collisions = 0, \ 147 .t_desc = #name \ 148 } 149 150 /* 151 * Assert that a particular token is held 152 */ 153 #define LWKT_TOKEN_HELD_ANY(tok) _lwkt_token_held_any(tok, curthread) 154 #define LWKT_TOKEN_HELD_EXCL(tok) _lwkt_token_held_excl(tok, curthread) 155 156 #define ASSERT_LWKT_TOKEN_HELD(tok) \ 157 KKASSERT(LWKT_TOKEN_HELD_ANY(tok)) 158 159 #define ASSERT_LWKT_TOKEN_HELD_EXCL(tok) \ 160 KKASSERT(LWKT_TOKEN_HELD_EXCL(tok)) 161 162 #define ASSERT_NO_TOKENS_HELD(td) \ 163 KKASSERT((td)->td_toks_stop == &td->td_toks_array[0]) 164 165 struct lwkt_tokref { 166 lwkt_token_t tr_tok; /* token in question */ 167 long tr_count; /* TOK_EXCLUSIVE|TOK_EXCLREQ or 0 */ 168 struct thread *tr_owner; /* me */ 169 }; 170 171 #define MAXCPUFIFO 256 /* power of 2 */ 172 #define MAXCPUFIFO_MASK (MAXCPUFIFO - 1) 173 #define LWKT_MAXTOKENS 32 /* max tokens beneficially held by thread */ 174 175 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 176 /* 177 * Always cast to ipifunc_t when registering an ipi. The actual ipi function 178 * is called with both the data and an interrupt frame, but the ipi function 179 * that is registered might only declare a data argument. 180 */ 181 typedef void (*ipifunc1_t)(void *arg); 182 typedef void (*ipifunc2_t)(void *arg, int arg2); 183 typedef void (*ipifunc3_t)(void *arg, int arg2, struct intrframe *frame); 184 185 struct lwkt_ipiq { 186 int ip_rindex; /* only written by target cpu */ 187 int ip_xindex; /* written by target, indicates completion */ 188 int ip_windex; /* only written by source cpu */ 189 int ip_drain; /* drain source limit */ 190 struct { 191 ipifunc3_t func; 192 void *arg1; 193 int arg2; 194 char filler[32 - sizeof(int) - sizeof(void *) * 2]; 195 } ip_info[MAXCPUFIFO]; 196 }; 197 198 /* 199 * CPU Synchronization structure. See lwkt_cpusync_init() and 200 * lwkt_cpusync_interlock() for more information. 201 */ 202 typedef void (*cpusync_func_t)(void *arg); 203 204 struct lwkt_cpusync { 205 cpumask_t cs_mask; /* cpus running the sync */ 206 cpumask_t cs_mack; /* mask acknowledge */ 207 cpusync_func_t cs_func; /* function to execute */ 208 void *cs_data; /* function data */ 209 }; 210 #endif /* _KERNEL || _KERNEL_STRUCTURES */ 211 212 /* 213 * The standard message and queue structure used for communications between 214 * cpus. Messages are typically queued via a machine-specific non-linked 215 * FIFO matrix allowing any cpu to send a message to any other cpu without 216 * blocking. 217 */ 218 #if 0 219 typedef struct lwkt_cpu_msg { 220 void (*cm_func)(lwkt_cpu_msg_t msg); /* primary dispatch function */ 221 int cm_code; /* request code if applicable */ 222 int cm_cpu; /* reply to cpu */ 223 thread_t cm_originator; /* originating thread for wakeup */ 224 } lwkt_cpu_msg; 225 #endif 226 227 /* 228 * per-thread file descriptor cache 229 */ 230 struct fdcache { 231 int fd; /* descriptor being cached */ 232 int locked; 233 struct file *fp; /* cached referenced fp */ 234 int lru; 235 int unused[3]; 236 } __cachealign; 237 238 #define NFDCACHE 4 /* max fd's cached by a thread */ 239 240 /* 241 * Thread structure. Note that ownership of a thread structure is special 242 * cased and there is no 'token'. A thread is always owned by the cpu 243 * represented by td_gd, any manipulation of the thread by some other cpu 244 * must be done through cpu_*msg() functions. e.g. you could request 245 * ownership of a thread that way, or hand a thread off to another cpu. 246 * 247 * NOTE: td_ucred is synchronized from the p_ucred on user->kernel syscall, 248 * trap, and AST/signal transitions to provide a stable ucred for 249 * (primarily) system calls. This field will be NULL for pure kernel 250 * threads. 251 */ 252 struct md_intr_info; 253 254 struct thread { 255 TAILQ_ENTRY(thread) td_threadq; 256 TAILQ_ENTRY(thread) td_allq; 257 TAILQ_ENTRY(thread) td_sleepq; 258 lwkt_port td_msgport; /* built-in message port for replies */ 259 struct lwp *td_lwp; /* (optional) associated lwp */ 260 struct proc *td_proc; /* (optional) associated process */ 261 struct pcb *td_pcb; /* points to pcb and top of kstack */ 262 struct globaldata *td_gd; /* associated with this cpu */ 263 const char *td_wmesg; /* string name for blockage */ 264 const volatile void *td_wchan; /* waiting on channel */ 265 int td_pri; /* 0-31, 31=highest priority (note 1) */ 266 int td_critcount; /* critical section priority */ 267 u_int td_flags; /* TDF flags */ 268 int td_wdomain; /* domain for wchan address (typ 0) */ 269 void (*td_preemptable)(struct thread *td, int critcount); 270 void (*td_release)(struct thread *td); 271 char *td_kstack; /* kernel stack */ 272 int td_kstack_size; /* size of kernel stack */ 273 char *td_sp; /* kernel stack pointer for LWKT restore */ 274 thread_t (*td_switch)(struct thread *ntd); 275 __uint64_t td_uticks; /* Statclock hits in user mode (uS) */ 276 __uint64_t td_sticks; /* Statclock hits in system mode (uS) */ 277 __uint64_t td_iticks; /* Statclock hits processing intr (uS) */ 278 int td_locks; /* lockmgr lock debugging */ 279 struct plimit *td_limit; /* synchronized from proc->p_limit */ 280 int td_refs; /* hold position in gd_tdallq / hold free */ 281 int td_nest_count; /* prevent splz nesting */ 282 u_int td_contended; /* token contention count */ 283 u_int td_mpflags; /* flags can be set by foreign cpus */ 284 int td_cscount; /* cpu synchronization master */ 285 int td_wakefromcpu; /* who woke me up? */ 286 int td_upri; /* user priority (sub-priority under td_pri) */ 287 int td_type; /* thread type, TD_TYPE_ */ 288 int td_tracker; /* misc use (base value 0), recursion count */ 289 int td_fdcache_lru; 290 int td_unused03[3]; /* for future fields */ 291 struct iosched_data td_iosdata; /* Dynamic I/O scheduling data */ 292 struct timeval td_start; /* start time for a thread/process */ 293 char td_comm[MAXCOMLEN+1]; /* typ 16+1 bytes */ 294 struct thread *td_preempted; /* we preempted this thread */ 295 struct ucred *td_ucred; /* synchronized from proc->p_ucred */ 296 mcontext_t *td_kfpuctx; /* kernel_fpu_begin()/kernel_fpu_end() */ 297 lwkt_tokref_t td_toks_have; /* tokens we own */ 298 lwkt_tokref_t td_toks_stop; /* tokens we want */ 299 struct lwkt_tokref td_toks_array[LWKT_MAXTOKENS]; 300 int td_fairq_load; /* fairq */ 301 int td_fairq_count; /* fairq */ 302 struct globaldata *td_migrate_gd; /* target gd for thread migration */ 303 struct fdcache td_fdcache[NFDCACHE]; 304 void *td_linux_task; /* drm/linux support */ 305 #ifdef DEBUG_CRIT_SECTIONS 306 #define CRIT_DEBUG_ARRAY_SIZE 32 307 #define CRIT_DEBUG_ARRAY_MASK (CRIT_DEBUG_ARRAY_SIZE - 1) 308 const char *td_crit_debug_array[CRIT_DEBUG_ARRAY_SIZE]; 309 int td_crit_debug_index; 310 int td_in_crit_report; 311 #endif 312 struct md_thread td_mach; 313 #ifdef DEBUG_LOCKS 314 #define SPINLOCK_DEBUG_ARRAY_SIZE 32 315 int td_spinlock_stack_id[SPINLOCK_DEBUG_ARRAY_SIZE]; 316 struct spinlock *td_spinlock_stack[SPINLOCK_DEBUG_ARRAY_SIZE]; 317 void *td_spinlock_caller_pc[SPINLOCK_DEBUG_ARRAY_SIZE]; 318 319 /* 320 * Track lockmgr locks held; lk->lk_filename:lk->lk_lineno is the holder 321 */ 322 #define LOCKMGR_DEBUG_ARRAY_SIZE 8 323 int td_lockmgr_stack_id[LOCKMGR_DEBUG_ARRAY_SIZE]; 324 struct lock *td_lockmgr_stack[LOCKMGR_DEBUG_ARRAY_SIZE]; 325 #endif 326 }; 327 328 #define td_toks_base td_toks_array[0] 329 #define td_toks_end td_toks_array[LWKT_MAXTOKENS] 330 331 #define TD_TOKS_HELD(td) ((td)->td_toks_stop != &(td)->td_toks_base) 332 #define TD_TOKS_NOT_HELD(td) ((td)->td_toks_stop == &(td)->td_toks_base) 333 334 /* 335 * Thread flags. Note that TDF_RUNNING is cleared on the old thread after 336 * we switch to the new one, which is necessary because LWKTs don't need 337 * to hold the BGL. This flag is used by the exit code and the managed 338 * thread migration code. Note in addition that preemption will cause 339 * TDF_RUNNING to be cleared temporarily, so any code checking TDF_RUNNING 340 * must also check TDF_PREEMPT_LOCK. 341 * 342 * LWKT threads stay on their (per-cpu) run queue while running, not to 343 * be confused with user processes which are removed from the user scheduling 344 * run queue while actually running. 345 * 346 * td_threadq can represent the thread on one of three queues... the LWKT 347 * run queue, a tsleep queue, or an lwkt blocking queue. The LWKT subsystem 348 * does not allow a thread to be scheduled if it already resides on some 349 * queue. 350 */ 351 #define TDF_RUNNING 0x00000001 /* thread still active */ 352 #define TDF_RUNQ 0x00000002 /* on an LWKT run queue */ 353 #define TDF_PREEMPT_LOCK 0x00000004 /* I have been preempted */ 354 #define TDF_PREEMPT_DONE 0x00000008 /* ac preemption complete */ 355 #define TDF_NOSTART 0x00000010 /* do not schedule on create */ 356 #define TDF_MIGRATING 0x00000020 /* thread is being migrated */ 357 #define TDF_SINTR 0x00000040 /* interruptability for 'ps' */ 358 #define TDF_TSLEEPQ 0x00000080 /* on a tsleep wait queue */ 359 360 #define TDF_SYSTHREAD 0x00000100 /* reserve memory may be used */ 361 #define TDF_ALLOCATED_THREAD 0x00000200 /* objcache allocated thread */ 362 #define TDF_ALLOCATED_STACK 0x00000400 /* objcache allocated stack */ 363 #define TDF_FPU_HEUR 0x00000800 /* active restore on switch */ 364 #define TDF_DEADLKTREAT 0x00001000 /* special lockmgr treatment */ 365 #define TDF_MARKER 0x00002000 /* tdallq list scan marker */ 366 #define TDF_TIMEOUT_RUNNING 0x00004000 /* tsleep timeout race */ 367 #define TDF_TIMEOUT 0x00008000 /* tsleep timeout */ 368 #define TDF_INTTHREAD 0x00010000 /* interrupt thread */ 369 #define TDF_TSLEEP_DESCHEDULED 0x00020000 /* tsleep core deschedule */ 370 #define TDF_BLOCKED 0x00040000 /* Thread is blocked */ 371 #define TDF_PANICWARN 0x00080000 /* panic warning in switch */ 372 #define TDF_BLOCKQ 0x00100000 /* on block queue */ 373 #define TDF_FORCE_SPINPORT 0x00200000 374 #define TDF_EXITING 0x00400000 /* thread exiting */ 375 #define TDF_USINGFP 0x00800000 /* thread using fp coproc */ 376 #define TDF_KERNELFP 0x01000000 /* kernel using fp coproc */ 377 #define TDF_DELAYED_WAKEUP 0x02000000 378 #define TDF_FIXEDCPU 0x04000000 /* running cpu is fixed */ 379 #define TDF_USERMODE 0x08000000 /* in or entering user mode */ 380 #define TDF_NOFAULT 0x10000000 /* force onfault on fault */ 381 #define TDF_CLKTHREAD 0x20000000 /* detect INTTHREAD clock */ 382 383 #define TDF_MP_STOPREQ 0x00000001 /* suspend_kproc */ 384 #define TDF_MP_WAKEREQ 0x00000002 /* resume_kproc */ 385 #define TDF_MP_EXITWAIT 0x00000004 /* reaper, see lwp_wait() */ 386 #define TDF_MP_EXITSIG 0x00000008 /* reaper, see lwp_wait() */ 387 #define TDF_MP_BATCH_DEMARC 0x00000010 /* batch mode handling */ 388 #define TDF_MP_DIDYIELD 0x00000020 /* effects scheduling */ 389 390 #define TD_TYPE_GENERIC 0 /* generic thread */ 391 #define TD_TYPE_CRYPTO 1 /* crypto thread */ 392 #define TD_TYPE_NETISR 2 /* netisr thread */ 393 394 /* 395 * Thread priorities. Typically only one thread from any given 396 * user process scheduling queue is on the LWKT run queue at a time. 397 * Remember that there is one LWKT run queue per cpu. 398 * 399 * Critical sections are handled by bumping td_pri above TDPRI_MAX, which 400 * causes interrupts to be masked as they occur. When this occurs a 401 * rollup flag will be set in mycpu->gd_reqflags. 402 */ 403 #define TDPRI_IDLE_THREAD 0 /* the idle thread */ 404 #define TDPRI_IDLE_WORK 1 /* idle work (page zero, etc) */ 405 #define TDPRI_USER_SCHEDULER 2 /* user scheduler helper */ 406 #define TDPRI_USER_IDLE 4 /* user scheduler idle */ 407 #define TDPRI_USER_NORM 6 /* user scheduler normal */ 408 #define TDPRI_USER_REAL 8 /* user scheduler real time */ 409 #define TDPRI_KERN_LPSCHED 9 /* (comparison point only) */ 410 #define TDPRI_KERN_USER 10 /* kernel / block in syscall */ 411 #define TDPRI_KERN_DAEMON 12 /* kernel daemon (pageout, etc) */ 412 #define TDPRI_SOFT_NORM 14 /* kernel / normal */ 413 #define TDPRI_SOFT_TIMER 16 /* kernel / timer */ 414 #define TDPRI_UNUSED19 19 415 #define TDPRI_INT_SUPPORT 20 /* kernel / high priority support */ 416 #define TDPRI_INT_LOW 27 /* low priority interrupt */ 417 #define TDPRI_INT_MED 28 /* medium priority interrupt */ 418 #define TDPRI_INT_HIGH 29 /* high priority interrupt */ 419 #define TDPRI_MAX 31 420 421 #define LWKT_THREAD_STACK (UPAGES * PAGE_SIZE) 422 423 #define IN_CRITICAL_SECT(td) ((td)->td_critcount) 424 425 #ifdef _KERNEL 426 427 extern void (*linux_task_drop_callback)(struct thread *); 428 extern void (*linux_proc_drop_callback)(struct proc *); 429 430 /* 431 * Global tokens 432 */ 433 extern struct lwkt_token mp_token; 434 extern struct lwkt_token pmap_token; 435 extern struct lwkt_token dev_token; 436 extern struct lwkt_token vm_token; 437 extern struct lwkt_token vmspace_token; 438 extern struct lwkt_token kvm_token; 439 extern struct lwkt_token sigio_token; 440 extern struct lwkt_token tty_token; 441 extern struct lwkt_token vnode_token; 442 extern struct lwkt_token revoke_token; 443 extern struct lwkt_token kbd_token; 444 extern struct lwkt_token vga_token; 445 446 /* 447 * Procedures 448 */ 449 struct thread *lwkt_alloc_thread(struct thread *, int, int, int); 450 void lwkt_init_thread(struct thread *, void *, int, int, struct globaldata *); 451 void lwkt_set_interrupt_support_thread(void); 452 void lwkt_set_comm(thread_t, const char *, ...) __printflike(2, 3); 453 void lwkt_free_thread(struct thread *); 454 void lwkt_gdinit(struct globaldata *); 455 void lwkt_switch(void); 456 void lwkt_switch_return(struct thread *); 457 void lwkt_preempt(thread_t, int); 458 void lwkt_schedule(thread_t); 459 void lwkt_schedule_noresched(thread_t); 460 void lwkt_schedule_self(thread_t); 461 void lwkt_deschedule(thread_t); 462 void lwkt_deschedule_self(thread_t); 463 void lwkt_yield(void); 464 void lwkt_yield_quick(void); 465 void lwkt_user_yield(void); 466 void lwkt_hold(thread_t); 467 void lwkt_rele(thread_t); 468 void lwkt_passive_release(thread_t); 469 void lwkt_maybe_splz(thread_t); 470 471 void lwkt_gettoken(lwkt_token_t); 472 void lwkt_gettoken_shared(lwkt_token_t); 473 int lwkt_trytoken(lwkt_token_t); 474 void lwkt_reltoken(lwkt_token_t); 475 int lwkt_cnttoken(lwkt_token_t, thread_t); 476 int lwkt_getalltokens(thread_t, int); 477 void lwkt_relalltokens(thread_t); 478 void lwkt_token_init(lwkt_token_t, const char *); 479 void lwkt_token_uninit(lwkt_token_t); 480 481 void lwkt_token_pool_init(void); 482 lwkt_token_t lwkt_token_pool_lookup(void *); 483 lwkt_token_t lwkt_getpooltoken(void *); 484 void lwkt_relpooltoken(void *); 485 486 void lwkt_token_swap(void); 487 488 void lwkt_setpri(thread_t, int); 489 void lwkt_setpri_initial(thread_t, int); 490 void lwkt_setpri_self(int); 491 void lwkt_schedulerclock(thread_t td); 492 void lwkt_setcpu_self(struct globaldata *); 493 void lwkt_migratecpu(int); 494 495 void lwkt_giveaway(struct thread *); 496 void lwkt_acquire(struct thread *); 497 int lwkt_send_ipiq3(struct globaldata *, ipifunc3_t, void *, int); 498 int lwkt_send_ipiq3_passive(struct globaldata *, ipifunc3_t, void *, int); 499 int lwkt_send_ipiq3_bycpu(int, ipifunc3_t, void *, int); 500 int lwkt_send_ipiq3_mask(cpumask_t, ipifunc3_t, void *, int); 501 void lwkt_wait_ipiq(struct globaldata *, int); 502 void lwkt_process_ipiq(void); 503 void lwkt_process_ipiq_frame(struct intrframe *); 504 void lwkt_smp_stopped(void); 505 void lwkt_synchronize_ipiqs(const char *); 506 507 /* lwkt_cpusync_init() - inline function in sys/thread2.h */ 508 void lwkt_cpusync_simple(cpumask_t, cpusync_func_t, void *); 509 void lwkt_cpusync_interlock(lwkt_cpusync_t); 510 void lwkt_cpusync_deinterlock(lwkt_cpusync_t); 511 void lwkt_cpusync_quick(lwkt_cpusync_t); 512 513 void crit_panic(void) __dead2; 514 struct lwp *lwkt_preempted_proc(void); 515 516 int lwkt_create(void (*)(void *), void *, struct thread **, struct thread *, 517 int, int, const char *, ...) __printflike(7, 8); 518 void lwkt_exit(void) __dead2; 519 void lwkt_remove_tdallq(struct thread *); 520 521 #endif /* _KERNEL */ 522 523 #endif /* !_SYS_THREAD_H_ */ 524 525