Lines Matching refs:td

134 	struct thread *td __ktrace_used;  in _sleep()
141 td = curthread; in _sleep()
143 if (KTRPOINT(td, KTR_CSW)) in _sleep()
152 KASSERT(TD_IS_RUNNING(td), ("_sleep: curthread not running")); in _sleep()
169 KASSERT(!TD_ON_SLEEPQ(td), ("recursive sleep")); in _sleep()
181 td->td_tid, td->td_proc->p_pid, td->td_name, wmesg, ident); in _sleep()
225 if (KTRPOINT(td, KTR_CSW)) in _sleep()
241 struct thread *td __ktrace_used; in msleep_spin_sbt()
245 td = curthread; in msleep_spin_sbt()
248 KASSERT(TD_IS_RUNNING(td), ("msleep_spin_sbt: curthread not running")); in msleep_spin_sbt()
255 td->td_tid, td->td_proc->p_pid, td->td_name, wmesg, ident); in msleep_spin_sbt()
277 if (KTRPOINT(td, KTR_CSW)) { in msleep_spin_sbt()
296 if (KTRPOINT(td, KTR_CSW)) in msleep_spin_sbt()
490 struct thread *td; in mi_switch() local
492 td = curthread; /* XXX */ in mi_switch()
493 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); in mi_switch()
494 KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code")); in mi_switch()
496 if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td)) in mi_switch()
500 KASSERT(td->td_critnest == 1 || KERNEL_PANICKED(), in mi_switch()
517 td->td_ru.ru_nvcsw++; in mi_switch()
518 td->td_swvoltick = ticks; in mi_switch()
520 td->td_ru.ru_nivcsw++; in mi_switch()
521 td->td_swinvoltick = ticks; in mi_switch()
532 td->td_runtime += runtime; in mi_switch()
533 td->td_incruntime += runtime; in mi_switch()
535 td->td_generation++; /* bump preempt-detect counter */ in mi_switch()
539 td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); in mi_switch()
546 sched_switch(td, flags); in mi_switch()
548 td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); in mi_switch()
553 if ((td = PCPU_GET(deadthread))) { in mi_switch()
555 thread_stash(td); in mi_switch()
568 setrunnable(struct thread *td, int srqflags) in setrunnable() argument
572 THREAD_LOCK_ASSERT(td, MA_OWNED); in setrunnable()
573 KASSERT(td->td_proc->p_state != PRS_ZOMBIE, in setrunnable()
574 ("setrunnable: pid %d is a zombie", td->td_proc->p_pid)); in setrunnable()
577 switch (TD_GET_STATE(td)) { in setrunnable()
582 KASSERT((td->td_flags & TDF_INMEM) != 0, in setrunnable()
584 td, td->td_flags, td->td_inhibitors)); in setrunnable()
586 sched_wakeup(td, srqflags); in setrunnable()
593 if (td->td_inhibitors == TDI_SWAPPED && in setrunnable()
594 (td->td_flags & TDF_SWAPINREQ) == 0) { in setrunnable()
595 td->td_flags |= TDF_SWAPINREQ; in setrunnable()
600 panic("setrunnable: state 0x%x", TD_GET_STATE(td)); in setrunnable()
603 thread_unlock(td); in setrunnable()
637 ast_scheduler(struct thread *td, int tda __unused) in ast_scheduler() argument
640 if (KTRPOINT(td, KTR_CSW)) in ast_scheduler()
643 thread_lock(td); in ast_scheduler()
644 sched_prio(td, td->td_user_pri); in ast_scheduler()
647 if (KTRPOINT(td, KTR_CSW)) in ast_scheduler()
680 struct thread *td; in kern_yield() local
682 td = curthread; in kern_yield()
684 thread_lock(td); in kern_yield()
686 prio = td->td_user_pri; in kern_yield()
688 sched_prio(td, prio); in kern_yield()
697 sys_yield(struct thread *td, struct yield_args *uap) in sys_yield() argument
700 thread_lock(td); in sys_yield()
701 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) in sys_yield()
702 sched_prio(td, PRI_MAX_TIMESHARE); in sys_yield()
704 td->td_retval[0] = 0; in sys_yield()
709 sys_sched_getcpu(struct thread *td, struct sched_getcpu_args *uap) in sys_sched_getcpu() argument
711 td->td_retval[0] = td->td_oncpu; in sys_sched_getcpu()