Lines Matching refs:kq

391 #define	KQ_FLUX_WAIT(kq)	(void)cv_wait(&kq->kq_cv, &kq->kq_lock)  argument
392 #define KQ_FLUX_WAKEUP(kq) cv_broadcast(&kq->kq_cv) argument
460 struct kqueue *kq = kn->kn_kq; in knote_detach_quiesce() local
461 filedesc_t *fdp = kq->kq_fdp; in knote_detach_quiesce()
465 mutex_spin_enter(&kq->kq_lock); in knote_detach_quiesce()
498 mutex_spin_exit(&kq->kq_lock); in knote_detach_quiesce()
501 mutex_spin_exit(&kq->kq_lock); in knote_detach_quiesce()
531 mutex_spin_exit(&kq->kq_lock); in knote_detach_quiesce()
534 mutex_spin_exit(&kq->kq_lock); in knote_detach_quiesce()
870 struct kqueue *kq; in filt_kqdetach() local
872 kq = ((file_t *)kn->kn_obj)->f_kqueue; in filt_kqdetach()
874 mutex_spin_enter(&kq->kq_lock); in filt_kqdetach()
875 selremove_knote(&kq->kq_sel, kn); in filt_kqdetach()
876 mutex_spin_exit(&kq->kq_lock); in filt_kqdetach()
886 struct kqueue *kq; in filt_kqueue() local
889 kq = ((file_t *)kn->kn_obj)->f_kqueue; in filt_kqueue()
892 mutex_spin_enter(&kq->kq_lock); in filt_kqueue()
893 kn->kn_data = KQ_COUNT(kq); in filt_kqueue()
896 mutex_spin_exit(&kq->kq_lock); in filt_kqueue()
956 struct kqueue *kq = kn->kn_kq; in filt_procdetach() local
966 mutex_spin_enter(&kq->kq_lock); in filt_procdetach()
970 mutex_spin_exit(&kq->kq_lock); in filt_procdetach()
978 mutex_spin_exit(&kq->kq_lock); in filt_procdetach()
991 struct kqueue *kq = kn->kn_kq; in filt_proc() local
1001 mutex_spin_enter(&kq->kq_lock); in filt_proc()
1003 mutex_spin_exit(&kq->kq_lock); in filt_proc()
1012 struct kqueue *kq; in knote_proc_exec() local
1024 kq = kn->kn_kq; in knote_proc_exec()
1025 mutex_spin_enter(&kq->kq_lock); in knote_proc_exec()
1030 mutex_spin_exit(&kq->kq_lock); in knote_proc_exec()
1039 struct kqueue *kq = okn->kn_kq; in knote_proc_fork_track() local
1041 KASSERT(mutex_owned(&kq->kq_lock)); in knote_proc_fork_track()
1054 mutex_spin_exit(&kq->kq_lock); in knote_proc_fork_track()
1095 filedesc_t *fdp = kq->kq_fdp; in knote_proc_fork_track()
1108 kntrack->kn_kq = kq; in knote_proc_fork_track()
1145 if (__predict_false(kq->kq_count & KQ_CLOSING)) { in knote_proc_fork_track()
1194 mutex_spin_enter(&kq->kq_lock); in knote_proc_fork_track()
1197 KQ_FLUX_WAKEUP(kq); in knote_proc_fork_track()
1207 struct kqueue *kq; in knote_proc_fork() local
1230 kq = kn->kn_kq; in knote_proc_fork()
1231 mutex_spin_enter(&kq->kq_lock); in knote_proc_fork()
1242 KASSERT(mutex_owned(&kq->kq_lock)); in knote_proc_fork()
1248 mutex_spin_exit(&kq->kq_lock); in knote_proc_fork()
1258 struct kqueue *kq; in knote_proc_exit() local
1264 kq = kn->kn_kq; in knote_proc_exit()
1268 mutex_spin_enter(&kq->kq_lock); in knote_proc_exit()
1298 mutex_spin_exit(&kq->kq_lock); in knote_proc_exit()
1399 struct kqueue *kq = kn->kn_kq; in filt_timerexpire() local
1401 mutex_spin_enter(&kq->kq_lock); in filt_timerexpire()
1410 mutex_spin_exit(&kq->kq_lock); in filt_timerexpire()
1433 struct kqueue *kq; in filt_timerattach() local
1455 kq = kn->kn_kq; in filt_timerattach()
1456 mutex_spin_enter(&kq->kq_lock); in filt_timerattach()
1465 mutex_spin_exit(&kq->kq_lock); in filt_timerattach()
1474 struct kqueue *kq = kn->kn_kq; in filt_timerdetach() local
1477 mutex_spin_enter(&kq->kq_lock); in filt_timerdetach()
1479 mutex_spin_exit(&kq->kq_lock); in filt_timerdetach()
1497 struct kqueue *kq = kn->kn_kq; in filt_timertouch() local
1502 KASSERT(mutex_owned(&kq->kq_lock)); in filt_timertouch()
1520 KASSERT(mutex_owned(&kq->kq_fdp->fd_lock)); in filt_timertouch()
1523 callout_halt(calloutp, &kq->kq_lock); in filt_timertouch()
1524 KASSERT(mutex_owned(&kq->kq_lock)); in filt_timertouch()
1552 struct kqueue *kq = kn->kn_kq; in filt_timer() local
1555 mutex_spin_enter(&kq->kq_lock); in filt_timer()
1557 mutex_spin_exit(&kq->kq_lock); in filt_timer()
1565 struct kqueue *kq = kn->kn_kq; in filt_userattach() local
1570 mutex_spin_enter(&kq->kq_lock); in filt_userattach()
1576 mutex_spin_exit(&kq->kq_lock); in filt_userattach()
1592 struct kqueue *kq = kn->kn_kq; in filt_user() local
1595 mutex_spin_enter(&kq->kq_lock); in filt_user()
1597 mutex_spin_exit(&kq->kq_lock); in filt_user()
1720 struct kqueue *kq; in kqueue1() local
1729 kq = kmem_zalloc(sizeof(*kq), KM_SLEEP); in kqueue1()
1730 mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED); in kqueue1()
1731 cv_init(&kq->kq_cv, "kqueue"); in kqueue1()
1732 selinit(&kq->kq_sel); in kqueue1()
1733 TAILQ_INIT(&kq->kq_head); in kqueue1()
1734 fp->f_kqueue = kq; in kqueue1()
1736 kq->kq_fdp = curlwp->l_fd; in kqueue1()
1813 struct kqueue *kq; in kevent1() local
1837 kq = fp->f_kqueue; in kevent1()
1852 error = kqueue_register(kq, kevp); in kevent1()
1888 kqueue_register(struct kqueue *kq, struct kevent *kev) in kqueue_register() argument
1898 fdp = kq->kq_fdp; in kqueue_register()
1933 if (kq == kn->kn_kq && in kqueue_register()
1949 kq == kn->kn_kq && in kqueue_register()
1958 KASSERT((kq->kq_count & KQ_CLOSING) == 0); in kqueue_register()
1970 kn->kn_kq = kq; in kqueue_register()
2046 mutex_enter(&kq->kq_lock); in kqueue_register()
2049 mutex_exit(&kq->kq_lock); in kqueue_register()
2068 mutex_spin_enter(&kq->kq_lock); in kqueue_register()
2074 mutex_spin_exit(&kq->kq_lock); in kqueue_register()
2087 mutex_spin_exit(&kq->kq_lock); in kqueue_register()
2090 mutex_spin_exit(&kq->kq_lock); in kqueue_register()
2108 mutex_spin_enter(&kq->kq_lock); in kqueue_register()
2110 mutex_spin_exit(&kq->kq_lock); in kqueue_register()
2137 mutex_spin_enter(&kq->kq_lock); in kqueue_register()
2140 mutex_spin_exit(&kq->kq_lock); in kqueue_register()
2163 kqueue_printit(struct kqueue *kq, bool full, void (*pr)(const char *, ...)) in kqueue_printit() argument
2173 (*pr)("kqueue %p (restart=%d count=%u):\n", kq, in kqueue_printit()
2174 !!(kq->kq_count & KQ_RESTART), KQ_COUNT(kq)); in kqueue_printit()
2176 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) { in kqueue_printit()
2186 if (kn->kn_kq != kq) { in kqueue_printit()
2190 if (count != KQ_COUNT(kq)) { in kqueue_printit()
2192 count, KQ_COUNT(kq)); in kqueue_printit()
2199 kqueue_check(const char *func, size_t line, const struct kqueue *kq) in kqueue_check() argument
2206 KASSERT(mutex_owned(&kq->kq_lock)); in kqueue_check()
2210 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) { in kqueue_check()
2213 func, line, kq, kn, KN_FMT(buf, kn)); in kqueue_check()
2216 if (kn->kn_kq != kq) { in kqueue_check()
2218 func, line, kq, kn, kn->kn_kq, in kqueue_check()
2223 func, line, kq, kn, KN_FMT(buf, kn)); in kqueue_check()
2226 if (count > KQ_COUNT(kq)) { in kqueue_check()
2229 func, line, kq, KQ_COUNT(kq), count, in kqueue_check()
2245 struct kqueue *kq = fp->f_kqueue; in kqueue_restart() local
2246 KASSERT(kq != NULL); in kqueue_restart()
2248 mutex_spin_enter(&kq->kq_lock); in kqueue_restart()
2249 kq->kq_count |= KQ_RESTART; in kqueue_restart()
2250 cv_broadcast(&kq->kq_cv); in kqueue_restart()
2251 mutex_spin_exit(&kq->kq_lock); in kqueue_restart()
2273 struct kqueue *kq; in kqueue_scan() local
2283 kq = fp->f_kqueue; in kqueue_scan()
2307 marker->kn_kq = kq; in kqueue_scan()
2309 mutex_spin_enter(&kq->kq_lock); in kqueue_scan()
2312 if (KQ_COUNT(kq) == 0) { in kqueue_scan()
2314 error = cv_timedwait_sig(&kq->kq_cv, in kqueue_scan()
2315 &kq->kq_lock, timeout); in kqueue_scan()
2317 if (KQ_COUNT(kq) == 0 && in kqueue_scan()
2318 (kq->kq_count & KQ_RESTART)) { in kqueue_scan()
2333 mutex_spin_exit(&kq->kq_lock); in kqueue_scan()
2338 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); in kqueue_scan()
2345 mutex_spin_exit(&kq->kq_lock); in kqueue_scan()
2348 mutex_spin_enter(&kq->kq_lock); in kqueue_scan()
2355 kn = TAILQ_FIRST(&kq->kq_head); in kqueue_scan()
2370 KQ_FLUX_WAKEUP(kq); in kqueue_scan()
2374 KQ_FLUX_WAIT(kq); in kqueue_scan()
2375 mutex_spin_exit(&kq->kq_lock); in kqueue_scan()
2383 mutex_spin_exit(&kq->kq_lock); in kqueue_scan()
2389 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); in kqueue_scan()
2392 KQ_FLUX_WAKEUP(kq); in kqueue_scan()
2401 kq_check(kq); in kqueue_scan()
2404 kq_check(kq); in kqueue_scan()
2407 kq->kq_count--; in kqueue_scan()
2412 mutex_spin_exit(&kq->kq_lock); in kqueue_scan()
2417 mutex_spin_enter(&kq->kq_lock); in kqueue_scan()
2422 kq->kq_count--; in kqueue_scan()
2432 kq->kq_count--; in kqueue_scan()
2459 kq->kq_count--; in kqueue_scan()
2463 mutex_spin_exit(&kq->kq_lock); in kqueue_scan()
2466 mutex_spin_enter(&kq->kq_lock); in kqueue_scan()
2480 kq->kq_count--; in kqueue_scan()
2484 kq->kq_count--; in kqueue_scan()
2487 kq_check(kq); in kqueue_scan()
2490 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); in kqueue_scan()
2491 kq_check(kq); in kqueue_scan()
2497 KQ_FLUX_WAKEUP(kq); in kqueue_scan()
2498 mutex_spin_exit(&kq->kq_lock); in kqueue_scan()
2504 mutex_spin_enter(&kq->kq_lock); in kqueue_scan()
2512 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); in kqueue_scan()
2516 KQ_FLUX_WAKEUP(kq); in kqueue_scan()
2517 mutex_spin_exit(&kq->kq_lock); in kqueue_scan()
2606 struct kqueue *kq; in kqueue_poll() local
2609 kq = fp->f_kqueue; in kqueue_poll()
2613 mutex_spin_enter(&kq->kq_lock); in kqueue_poll()
2614 if (KQ_COUNT(kq) != 0) { in kqueue_poll()
2617 selrecord(curlwp, &kq->kq_sel); in kqueue_poll()
2619 kq_check(kq); in kqueue_poll()
2620 mutex_spin_exit(&kq->kq_lock); in kqueue_poll()
2633 struct kqueue *kq; in kqueue_stat() local
2635 kq = fp->f_kqueue; in kqueue_stat()
2638 st->st_size = KQ_COUNT(kq); in kqueue_stat()
2649 kqueue_doclose(struct kqueue *kq, struct klist *list, int fd) in kqueue_doclose() argument
2654 fdp = kq->kq_fdp; in kqueue_doclose()
2660 if (kq != kn->kn_kq) { in kqueue_doclose()
2680 struct kqueue *kq; in kqueue_close() local
2685 kq = fp->f_kqueue; in kqueue_close()
2690 KASSERT(kq->kq_fdp == fdp); in kqueue_close()
2707 mutex_enter(&kq->kq_lock); in kqueue_close()
2708 kq->kq_count |= KQ_CLOSING; in kqueue_close()
2709 mutex_exit(&kq->kq_lock); in kqueue_close()
2714 kqueue_doclose(kq, (struct klist *)&ff->ff_knlist, i); in kqueue_close()
2718 kqueue_doclose(kq, &fdp->fd_knhash[i], -1); in kqueue_close()
2725 mutex_enter(&kq->kq_lock); in kqueue_close()
2726 kq_check(kq); in kqueue_close()
2727 mutex_exit(&kq->kq_lock); in kqueue_close()
2729 KASSERT(TAILQ_EMPTY(&kq->kq_head)); in kqueue_close()
2730 KASSERT(KQ_COUNT(kq) == 0); in kqueue_close()
2731 mutex_destroy(&kq->kq_lock); in kqueue_close()
2732 cv_destroy(&kq->kq_cv); in kqueue_close()
2733 seldestroy(&kq->kq_sel); in kqueue_close()
2734 kmem_free(kq, sizeof(*kq)); in kqueue_close()
2746 struct kqueue *kq; in kqueue_kqfilter() local
2748 kq = ((file_t *)kn->kn_obj)->f_kqueue; in kqueue_kqfilter()
2756 mutex_enter(&kq->kq_lock); in kqueue_kqfilter()
2757 selrecord_knote(&kq->kq_sel, kn); in kqueue_kqfilter()
2758 mutex_exit(&kq->kq_lock); in kqueue_kqfilter()
2821 struct kqueue *kq; in knote_detach() local
2823 kq = kn->kn_kq; in knote_detach()
2847 mutex_spin_enter(&kq->kq_lock); in knote_detach()
2850 kq_check(kq); in knote_detach()
2851 KASSERT(KQ_COUNT(kq) != 0); in knote_detach()
2852 kq->kq_count--; in knote_detach()
2853 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); in knote_detach()
2855 kq_check(kq); in knote_detach()
2857 mutex_spin_exit(&kq->kq_lock); in knote_detach()
2860 mutex_spin_exit(&kq->kq_lock); in knote_detach()
2875 struct kqueue *kq; in knote_enqueue() local
2879 kq = kn->kn_kq; in knote_enqueue()
2881 mutex_spin_enter(&kq->kq_lock); in knote_enqueue()
2890 kq_check(kq); in knote_enqueue()
2892 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); in knote_enqueue()
2893 KASSERT(KQ_COUNT(kq) < KQ_MAXCOUNT); in knote_enqueue()
2894 kq->kq_count++; in knote_enqueue()
2895 kq_check(kq); in knote_enqueue()
2896 cv_broadcast(&kq->kq_cv); in knote_enqueue()
2897 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); in knote_enqueue()
2900 mutex_spin_exit(&kq->kq_lock); in knote_enqueue()
2908 struct kqueue *kq; in knote_activate_locked() local
2912 kq = kn->kn_kq; in knote_activate_locked()
2920 kq_check(kq); in knote_activate_locked()
2922 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); in knote_activate_locked()
2923 KASSERT(KQ_COUNT(kq) < KQ_MAXCOUNT); in knote_activate_locked()
2924 kq->kq_count++; in knote_activate_locked()
2925 kq_check(kq); in knote_activate_locked()
2926 cv_broadcast(&kq->kq_cv); in knote_activate_locked()
2927 selnotify(&kq->kq_sel, 0, NOTE_SUBMIT); in knote_activate_locked()
2934 struct kqueue *kq = kn->kn_kq; in knote_activate() local
2936 mutex_spin_enter(&kq->kq_lock); in knote_activate()
2938 mutex_spin_exit(&kq->kq_lock); in knote_activate()
2944 struct kqueue *kq = kn->kn_kq; in knote_deactivate_locked() local
2947 kq_check(kq); in knote_deactivate_locked()
2949 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); in knote_deactivate_locked()
2950 KASSERT(KQ_COUNT(kq) > 0); in knote_deactivate_locked()
2951 kq->kq_count--; in knote_deactivate_locked()
2952 kq_check(kq); in knote_deactivate_locked()
2964 struct kqueue *kq = kn->kn_kq; in knote_set_eof() local
2966 mutex_spin_enter(&kq->kq_lock); in knote_set_eof()
2968 mutex_spin_exit(&kq->kq_lock); in knote_set_eof()
2977 struct kqueue *kq = kn->kn_kq; in knote_clear_eof() local
2979 mutex_spin_enter(&kq->kq_lock); in knote_clear_eof()
2981 mutex_spin_exit(&kq->kq_lock); in knote_clear_eof()