xref: /dragonfly/sys/kern/kern_event.c (revision fcf53d9b)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27  * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
36 #include <sys/file.h>
37 #include <sys/lock.h>
38 #include <sys/fcntl.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
41 #include <sys/eventvar.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/stat.h>
46 #include <sys/sysctl.h>
47 #include <sys/sysproto.h>
48 #include <sys/thread.h>
49 #include <sys/uio.h>
50 #include <sys/signalvar.h>
51 #include <sys/filio.h>
52 #include <sys/ktr.h>
53 
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
57 
58 /*
59  * Global token for kqueue subsystem
60  */
61 struct lwkt_token kq_token = LWKT_TOKEN_INITIALIZER(kq_token);
62 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions,
63     CTLFLAG_RW, &kq_token.t_collisions, 0,
64     "Collision counter of kq_token");
65 
66 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
67 
68 struct kevent_copyin_args {
69 	struct kevent_args	*ka;
70 	int			pchanges;
71 };
72 
73 static int	kqueue_sleep(struct kqueue *kq, struct timespec *tsp);
74 static int	kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
75 		    struct knote *marker);
76 static int 	kqueue_read(struct file *fp, struct uio *uio,
77 		    struct ucred *cred, int flags);
78 static int	kqueue_write(struct file *fp, struct uio *uio,
79 		    struct ucred *cred, int flags);
80 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
81 		    struct ucred *cred, struct sysmsg *msg);
82 static int 	kqueue_kqfilter(struct file *fp, struct knote *kn);
83 static int 	kqueue_stat(struct file *fp, struct stat *st,
84 		    struct ucred *cred);
85 static int 	kqueue_close(struct file *fp);
86 static void	kqueue_wakeup(struct kqueue *kq);
87 static int	filter_attach(struct knote *kn);
88 static int	filter_event(struct knote *kn, long hint);
89 
90 /*
91  * MPSAFE
92  */
93 static struct fileops kqueueops = {
94 	.fo_read = kqueue_read,
95 	.fo_write = kqueue_write,
96 	.fo_ioctl = kqueue_ioctl,
97 	.fo_kqfilter = kqueue_kqfilter,
98 	.fo_stat = kqueue_stat,
99 	.fo_close = kqueue_close,
100 	.fo_shutdown = nofo_shutdown
101 };
102 
103 static void 	knote_attach(struct knote *kn);
104 static void 	knote_drop(struct knote *kn);
105 static void	knote_detach_and_drop(struct knote *kn);
106 static void 	knote_enqueue(struct knote *kn);
107 static void 	knote_dequeue(struct knote *kn);
108 static struct 	knote *knote_alloc(void);
109 static void 	knote_free(struct knote *kn);
110 
111 static void	filt_kqdetach(struct knote *kn);
112 static int	filt_kqueue(struct knote *kn, long hint);
113 static int	filt_procattach(struct knote *kn);
114 static void	filt_procdetach(struct knote *kn);
115 static int	filt_proc(struct knote *kn, long hint);
116 static int	filt_fileattach(struct knote *kn);
117 static void	filt_timerexpire(void *knx);
118 static int	filt_timerattach(struct knote *kn);
119 static void	filt_timerdetach(struct knote *kn);
120 static int	filt_timer(struct knote *kn, long hint);
121 
122 static struct filterops file_filtops =
123 	{ FILTEROP_ISFD, filt_fileattach, NULL, NULL };
124 static struct filterops kqread_filtops =
125 	{ FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue };
126 static struct filterops proc_filtops =
127 	{ 0, filt_procattach, filt_procdetach, filt_proc };
128 static struct filterops timer_filtops =
129 	{ 0, filt_timerattach, filt_timerdetach, filt_timer };
130 
131 static int 		kq_ncallouts = 0;
132 static int 		kq_calloutmax = (4 * 1024);
133 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
134     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
135 static int		kq_checkloop = 1000000;
136 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
137     &kq_checkloop, 0, "Maximum number of callouts allocated for kqueue");
138 
139 #define KNOTE_ACTIVATE(kn) do { 					\
140 	kn->kn_status |= KN_ACTIVE;					\
141 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
142 		knote_enqueue(kn);					\
143 } while(0)
144 
145 #define	KN_HASHSIZE		64		/* XXX should be tunable */
146 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
147 
148 extern struct filterops aio_filtops;
149 extern struct filterops sig_filtops;
150 
151 /*
152  * Table for for all system-defined filters.
153  */
154 static struct filterops *sysfilt_ops[] = {
155 	&file_filtops,			/* EVFILT_READ */
156 	&file_filtops,			/* EVFILT_WRITE */
157 	&aio_filtops,			/* EVFILT_AIO */
158 	&file_filtops,			/* EVFILT_VNODE */
159 	&proc_filtops,			/* EVFILT_PROC */
160 	&sig_filtops,			/* EVFILT_SIGNAL */
161 	&timer_filtops,			/* EVFILT_TIMER */
162 	&file_filtops,			/* EVFILT_EXCEPT */
163 };
164 
165 static int
166 filt_fileattach(struct knote *kn)
167 {
168 	return (fo_kqfilter(kn->kn_fp, kn));
169 }
170 
171 /*
172  * MPSAFE
173  */
174 static int
175 kqueue_kqfilter(struct file *fp, struct knote *kn)
176 {
177 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
178 
179 	if (kn->kn_filter != EVFILT_READ)
180 		return (EOPNOTSUPP);
181 
182 	kn->kn_fop = &kqread_filtops;
183 	knote_insert(&kq->kq_kqinfo.ki_note, kn);
184 	return (0);
185 }
186 
187 static void
188 filt_kqdetach(struct knote *kn)
189 {
190 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
191 
192 	knote_remove(&kq->kq_kqinfo.ki_note, kn);
193 }
194 
195 /*ARGSUSED*/
196 static int
197 filt_kqueue(struct knote *kn, long hint)
198 {
199 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
200 
201 	kn->kn_data = kq->kq_count;
202 	return (kn->kn_data > 0);
203 }
204 
205 static int
206 filt_procattach(struct knote *kn)
207 {
208 	struct proc *p;
209 	int immediate;
210 
211 	immediate = 0;
212 	p = pfind(kn->kn_id);
213 	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
214 		p = zpfind(kn->kn_id);
215 		immediate = 1;
216 	}
217 	if (p == NULL) {
218 		return (ESRCH);
219 	}
220 	if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
221 		if (p)
222 			PRELE(p);
223 		return (EACCES);
224 	}
225 
226 	lwkt_gettoken(&p->p_token);
227 	kn->kn_ptr.p_proc = p;
228 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
229 
230 	/*
231 	 * internal flag indicating registration done by kernel
232 	 */
233 	if (kn->kn_flags & EV_FLAG1) {
234 		kn->kn_data = kn->kn_sdata;		/* ppid */
235 		kn->kn_fflags = NOTE_CHILD;
236 		kn->kn_flags &= ~EV_FLAG1;
237 	}
238 
239 	knote_insert(&p->p_klist, kn);
240 
241 	/*
242 	 * Immediately activate any exit notes if the target process is a
243 	 * zombie.  This is necessary to handle the case where the target
244 	 * process, e.g. a child, dies before the kevent is negistered.
245 	 */
246 	if (immediate && filt_proc(kn, NOTE_EXIT))
247 		KNOTE_ACTIVATE(kn);
248 	lwkt_reltoken(&p->p_token);
249 	PRELE(p);
250 
251 	return (0);
252 }
253 
254 /*
255  * The knote may be attached to a different process, which may exit,
256  * leaving nothing for the knote to be attached to.  So when the process
257  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
258  * it will be deleted when read out.  However, as part of the knote deletion,
259  * this routine is called, so a check is needed to avoid actually performing
260  * a detach, because the original process does not exist any more.
261  */
262 static void
263 filt_procdetach(struct knote *kn)
264 {
265 	struct proc *p;
266 
267 	if (kn->kn_status & KN_DETACHED)
268 		return;
269 	/* XXX locking? take proc_token here? */
270 	p = kn->kn_ptr.p_proc;
271 	knote_remove(&p->p_klist, kn);
272 }
273 
274 static int
275 filt_proc(struct knote *kn, long hint)
276 {
277 	u_int event;
278 
279 	/*
280 	 * mask off extra data
281 	 */
282 	event = (u_int)hint & NOTE_PCTRLMASK;
283 
284 	/*
285 	 * if the user is interested in this event, record it.
286 	 */
287 	if (kn->kn_sfflags & event)
288 		kn->kn_fflags |= event;
289 
290 	/*
291 	 * Process is gone, so flag the event as finished.  Detach the
292 	 * knote from the process now because the process will be poof,
293 	 * gone later on.
294 	 */
295 	if (event == NOTE_EXIT) {
296 		struct proc *p = kn->kn_ptr.p_proc;
297 		if ((kn->kn_status & KN_DETACHED) == 0) {
298 			knote_remove(&p->p_klist, kn);
299 			kn->kn_status |= KN_DETACHED;
300 			kn->kn_data = p->p_xstat;
301 			kn->kn_ptr.p_proc = NULL;
302 		}
303 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
304 		return (1);
305 	}
306 
307 	/*
308 	 * process forked, and user wants to track the new process,
309 	 * so attach a new knote to it, and immediately report an
310 	 * event with the parent's pid.
311 	 */
312 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
313 		struct kevent kev;
314 		int error;
315 
316 		/*
317 		 * register knote with new process.
318 		 */
319 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
320 		kev.filter = kn->kn_filter;
321 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
322 		kev.fflags = kn->kn_sfflags;
323 		kev.data = kn->kn_id;			/* parent */
324 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
325 		error = kqueue_register(kn->kn_kq, &kev);
326 		if (error)
327 			kn->kn_fflags |= NOTE_TRACKERR;
328 	}
329 
330 	return (kn->kn_fflags != 0);
331 }
332 
333 /*
334  * The callout interlocks with callout_stop() (or should), so the
335  * knote should still be a valid structure.  However the timeout
336  * can race a deletion so if KN_DELETING is set we just don't touch
337  * the knote.
338  */
339 static void
340 filt_timerexpire(void *knx)
341 {
342 	struct knote *kn = knx;
343 	struct callout *calloutp;
344 	struct timeval tv;
345 	int tticks;
346 
347 	lwkt_gettoken(&kq_token);
348 	if ((kn->kn_status & KN_DELETING) == 0) {
349 		kn->kn_data++;
350 		KNOTE_ACTIVATE(kn);
351 
352 		if ((kn->kn_flags & EV_ONESHOT) == 0) {
353 			tv.tv_sec = kn->kn_sdata / 1000;
354 			tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
355 			tticks = tvtohz_high(&tv);
356 			calloutp = (struct callout *)kn->kn_hook;
357 			callout_reset(calloutp, tticks, filt_timerexpire, kn);
358 		}
359 	}
360 	lwkt_reltoken(&kq_token);
361 }
362 
363 /*
364  * data contains amount of time to sleep, in milliseconds
365  */
366 static int
367 filt_timerattach(struct knote *kn)
368 {
369 	struct callout *calloutp;
370 	struct timeval tv;
371 	int tticks;
372 
373 	if (kq_ncallouts >= kq_calloutmax) {
374 		kn->kn_hook = NULL;
375 		return (ENOMEM);
376 	}
377 	kq_ncallouts++;
378 
379 	tv.tv_sec = kn->kn_sdata / 1000;
380 	tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
381 	tticks = tvtohz_high(&tv);
382 
383 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
384 	MALLOC(calloutp, struct callout *, sizeof(*calloutp),
385 	    M_KQUEUE, M_WAITOK);
386 	callout_init(calloutp);
387 	kn->kn_hook = (caddr_t)calloutp;
388 	callout_reset(calloutp, tticks, filt_timerexpire, kn);
389 
390 	return (0);
391 }
392 
393 static void
394 filt_timerdetach(struct knote *kn)
395 {
396 	struct callout *calloutp;
397 
398 	calloutp = (struct callout *)kn->kn_hook;
399 	callout_stop(calloutp);
400 	FREE(calloutp, M_KQUEUE);
401 	kq_ncallouts--;
402 }
403 
404 static int
405 filt_timer(struct knote *kn, long hint)
406 {
407 
408 	return (kn->kn_data != 0);
409 }
410 
411 /*
412  * Acquire a knote, return non-zero on success, 0 on failure.
413  *
414  * If we cannot acquire the knote we sleep and return 0.  The knote
415  * may be stale on return in this case and the caller must restart
416  * whatever loop they are in.
417  */
418 static __inline
419 int
420 knote_acquire(struct knote *kn)
421 {
422 	if (kn->kn_status & KN_PROCESSING) {
423 		kn->kn_status |= KN_WAITING | KN_REPROCESS;
424 		tsleep(kn, 0, "kqepts", hz);
425 		/* knote may be stale now */
426 		return(0);
427 	}
428 	kn->kn_status |= KN_PROCESSING;
429 	return(1);
430 }
431 
432 /*
433  * Release an acquired knote, clearing KN_PROCESSING and handling any
434  * KN_REPROCESS events.
435  *
436  * Non-zero is returned if the knote is destroyed.
437  */
438 static __inline
439 int
440 knote_release(struct knote *kn)
441 {
442 	while (kn->kn_status & KN_REPROCESS) {
443 		kn->kn_status &= ~KN_REPROCESS;
444 		if (kn->kn_status & KN_WAITING) {
445 			kn->kn_status &= ~KN_WAITING;
446 			wakeup(kn);
447 		}
448 		if (kn->kn_status & KN_DELETING) {
449 			knote_detach_and_drop(kn);
450 			return(1);
451 			/* NOT REACHED */
452 		}
453 		if (filter_event(kn, 0))
454 			KNOTE_ACTIVATE(kn);
455 	}
456 	kn->kn_status &= ~KN_PROCESSING;
457 	return(0);
458 }
459 
460 /*
461  * Initialize a kqueue.
462  *
463  * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
464  *
465  * MPSAFE
466  */
467 void
468 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
469 {
470 	TAILQ_INIT(&kq->kq_knpend);
471 	TAILQ_INIT(&kq->kq_knlist);
472 	kq->kq_count = 0;
473 	kq->kq_fdp = fdp;
474 	SLIST_INIT(&kq->kq_kqinfo.ki_note);
475 }
476 
477 /*
478  * Terminate a kqueue.  Freeing the actual kq itself is left up to the
479  * caller (it might be embedded in a lwp so we don't do it here).
480  *
481  * The kq's knlist must be completely eradicated so block on any
482  * processing races.
483  */
484 void
485 kqueue_terminate(struct kqueue *kq)
486 {
487 	struct knote *kn;
488 
489 	lwkt_gettoken(&kq_token);
490 	while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
491 		if (knote_acquire(kn))
492 			knote_detach_and_drop(kn);
493 	}
494 	if (kq->kq_knhash) {
495 		kfree(kq->kq_knhash, M_KQUEUE);
496 		kq->kq_knhash = NULL;
497 		kq->kq_knhashmask = 0;
498 	}
499 	lwkt_reltoken(&kq_token);
500 }
501 
502 /*
503  * MPSAFE
504  */
505 int
506 sys_kqueue(struct kqueue_args *uap)
507 {
508 	struct thread *td = curthread;
509 	struct kqueue *kq;
510 	struct file *fp;
511 	int fd, error;
512 
513 	error = falloc(td->td_lwp, &fp, &fd);
514 	if (error)
515 		return (error);
516 	fp->f_flag = FREAD | FWRITE;
517 	fp->f_type = DTYPE_KQUEUE;
518 	fp->f_ops = &kqueueops;
519 
520 	kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
521 	kqueue_init(kq, td->td_proc->p_fd);
522 	fp->f_data = kq;
523 
524 	fsetfd(kq->kq_fdp, fp, fd);
525 	uap->sysmsg_result = fd;
526 	fdrop(fp);
527 	return (error);
528 }
529 
530 /*
531  * Copy 'count' items into the destination list pointed to by uap->eventlist.
532  */
533 static int
534 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
535 {
536 	struct kevent_copyin_args *kap;
537 	int error;
538 
539 	kap = (struct kevent_copyin_args *)arg;
540 
541 	error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
542 	if (error == 0) {
543 		kap->ka->eventlist += count;
544 		*res += count;
545 	} else {
546 		*res = -1;
547 	}
548 
549 	return (error);
550 }
551 
552 /*
553  * Copy at most 'max' items from the list pointed to by kap->changelist,
554  * return number of items in 'events'.
555  */
556 static int
557 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
558 {
559 	struct kevent_copyin_args *kap;
560 	int error, count;
561 
562 	kap = (struct kevent_copyin_args *)arg;
563 
564 	count = min(kap->ka->nchanges - kap->pchanges, max);
565 	error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
566 	if (error == 0) {
567 		kap->ka->changelist += count;
568 		kap->pchanges += count;
569 		*events = count;
570 	}
571 
572 	return (error);
573 }
574 
575 /*
576  * MPSAFE
577  */
578 int
579 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
580 	    k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
581 	    struct timespec *tsp_in)
582 {
583 	struct kevent *kevp;
584 	struct timespec *tsp;
585 	int i, n, total, error, nerrors = 0;
586 	int lres;
587 	int limit = kq_checkloop;
588 	struct kevent kev[KQ_NEVENTS];
589 	struct knote marker;
590 
591 	tsp = tsp_in;
592 	*res = 0;
593 
594 	lwkt_gettoken(&kq_token);
595 	for ( ;; ) {
596 		n = 0;
597 		error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
598 		if (error)
599 			goto done;
600 		if (n == 0)
601 			break;
602 		for (i = 0; i < n; i++) {
603 			kevp = &kev[i];
604 			kevp->flags &= ~EV_SYSFLAGS;
605 			error = kqueue_register(kq, kevp);
606 
607 			/*
608 			 * If a registration returns an error we
609 			 * immediately post the error.  The kevent()
610 			 * call itself will fail with the error if
611 			 * no space is available for posting.
612 			 *
613 			 * Such errors normally bypass the timeout/blocking
614 			 * code.  However, if the copyoutfn function refuses
615 			 * to post the error (see sys_poll()), then we
616 			 * ignore it too.
617 			 */
618 			if (error) {
619 				kevp->flags = EV_ERROR;
620 				kevp->data = error;
621 				lres = *res;
622 				kevent_copyoutfn(uap, kevp, 1, res);
623 				if (lres != *res) {
624 					nevents--;
625 					nerrors++;
626 				}
627 			}
628 		}
629 	}
630 	if (nerrors) {
631 		error = 0;
632 		goto done;
633 	}
634 
635 	/*
636 	 * Acquire/wait for events - setup timeout
637 	 */
638 	if (tsp != NULL) {
639 		struct timespec ats;
640 
641 		if (tsp->tv_sec || tsp->tv_nsec) {
642 			nanouptime(&ats);
643 			timespecadd(tsp, &ats);		/* tsp = target time */
644 		}
645 	}
646 
647 	/*
648 	 * Loop as required.
649 	 *
650 	 * Collect as many events as we can. Sleeping on successive
651 	 * loops is disabled if copyoutfn has incremented (*res).
652 	 *
653 	 * The loop stops if an error occurs, all events have been
654 	 * scanned (the marker has been reached), or fewer than the
655 	 * maximum number of events is found.
656 	 *
657 	 * The copyoutfn function does not have to increment (*res) in
658 	 * order for the loop to continue.
659 	 *
660 	 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
661 	 */
662 	total = 0;
663 	error = 0;
664 	marker.kn_filter = EVFILT_MARKER;
665 	marker.kn_status = KN_PROCESSING;
666 	TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
667 	while ((n = nevents - total) > 0) {
668 		if (n > KQ_NEVENTS)
669 			n = KQ_NEVENTS;
670 
671 		/*
672 		 * If no events are pending sleep until timeout (if any)
673 		 * or an event occurs.
674 		 *
675 		 * After the sleep completes the marker is moved to the
676 		 * end of the list, making any received events available
677 		 * to our scan.
678 		 */
679 		if (kq->kq_count == 0 && *res == 0) {
680 			error = kqueue_sleep(kq, tsp);
681 			if (error)
682 				break;
683 
684 			TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
685 			TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
686 		}
687 
688 		/*
689 		 * Process all received events
690 		 * Account for all non-spurious events in our total
691 		 */
692 		i = kqueue_scan(kq, kev, n, &marker);
693 		if (i) {
694 			lres = *res;
695 			error = kevent_copyoutfn(uap, kev, i, res);
696 			total += *res - lres;
697 			if (error)
698 				break;
699 		}
700 		if (limit && --limit == 0)
701 			panic("kqueue: checkloop failed i=%d", i);
702 
703 		/*
704 		 * Normally when fewer events are returned than requested
705 		 * we can stop.  However, if only spurious events were
706 		 * collected the copyout will not bump (*res) and we have
707 		 * to continue.
708 		 */
709 		if (i < n && *res)
710 			break;
711 
712 		/*
713 		 * Deal with an edge case where spurious events can cause
714 		 * a loop to occur without moving the marker.  This can
715 		 * prevent kqueue_scan() from picking up new events which
716 		 * race us.  We must be sure to move the marker for this
717 		 * case.
718 		 *
719 		 * NOTE: We do not want to move the marker if events
720 		 *	 were scanned because normal kqueue operations
721 		 *	 may reactivate events.  Moving the marker in
722 		 *	 that case could result in duplicates for the
723 		 *	 same event.
724 		 */
725 		if (i == 0) {
726 			TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
727 			TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
728 		}
729 	}
730 	TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
731 
732 	/* Timeouts do not return EWOULDBLOCK. */
733 	if (error == EWOULDBLOCK)
734 		error = 0;
735 
736 done:
737 	lwkt_reltoken(&kq_token);
738 	return (error);
739 }
740 
741 /*
742  * MPALMOSTSAFE
743  */
744 int
745 sys_kevent(struct kevent_args *uap)
746 {
747 	struct thread *td = curthread;
748 	struct proc *p = td->td_proc;
749 	struct timespec ts, *tsp;
750 	struct kqueue *kq;
751 	struct file *fp = NULL;
752 	struct kevent_copyin_args *kap, ka;
753 	int error;
754 
755 	if (uap->timeout) {
756 		error = copyin(uap->timeout, &ts, sizeof(ts));
757 		if (error)
758 			return (error);
759 		tsp = &ts;
760 	} else {
761 		tsp = NULL;
762 	}
763 
764 	fp = holdfp(p->p_fd, uap->fd, -1);
765 	if (fp == NULL)
766 		return (EBADF);
767 	if (fp->f_type != DTYPE_KQUEUE) {
768 		fdrop(fp);
769 		return (EBADF);
770 	}
771 
772 	kq = (struct kqueue *)fp->f_data;
773 
774 	kap = &ka;
775 	kap->ka = uap;
776 	kap->pchanges = 0;
777 
778 	error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
779 			    kevent_copyin, kevent_copyout, tsp);
780 
781 	fdrop(fp);
782 
783 	return (error);
784 }
785 
786 int
787 kqueue_register(struct kqueue *kq, struct kevent *kev)
788 {
789 	struct filedesc *fdp = kq->kq_fdp;
790 	struct filterops *fops;
791 	struct file *fp = NULL;
792 	struct knote *kn = NULL;
793 	int error = 0;
794 
795 	if (kev->filter < 0) {
796 		if (kev->filter + EVFILT_SYSCOUNT < 0)
797 			return (EINVAL);
798 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
799 	} else {
800 		/*
801 		 * XXX
802 		 * filter attach routine is responsible for insuring that
803 		 * the identifier can be attached to it.
804 		 */
805 		kprintf("unknown filter: %d\n", kev->filter);
806 		return (EINVAL);
807 	}
808 
809 	lwkt_gettoken(&kq_token);
810 	if (fops->f_flags & FILTEROP_ISFD) {
811 		/* validate descriptor */
812 		fp = holdfp(fdp, kev->ident, -1);
813 		if (fp == NULL) {
814 			lwkt_reltoken(&kq_token);
815 			return (EBADF);
816 		}
817 
818 again1:
819 		SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
820 			if (kn->kn_kq == kq &&
821 			    kn->kn_filter == kev->filter &&
822 			    kn->kn_id == kev->ident) {
823 				if (knote_acquire(kn) == 0)
824 					goto again1;
825 				break;
826 			}
827 		}
828 	} else {
829 		if (kq->kq_knhashmask) {
830 			struct klist *list;
831 
832 			list = &kq->kq_knhash[
833 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
834 again2:
835 			SLIST_FOREACH(kn, list, kn_link) {
836 				if (kn->kn_id == kev->ident &&
837 				    kn->kn_filter == kev->filter) {
838 					if (knote_acquire(kn) == 0)
839 						goto again2;
840 					break;
841 				}
842 			}
843 		}
844 	}
845 
846 	/*
847 	 * NOTE: At this point if kn is non-NULL we will have acquired
848 	 *	 it and set KN_PROCESSING.
849 	 */
850 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
851 		error = ENOENT;
852 		goto done;
853 	}
854 
855 	/*
856 	 * kn now contains the matching knote, or NULL if no match
857 	 */
858 	if (kev->flags & EV_ADD) {
859 		if (kn == NULL) {
860 			kn = knote_alloc();
861 			if (kn == NULL) {
862 				error = ENOMEM;
863 				goto done;
864 			}
865 			kn->kn_fp = fp;
866 			kn->kn_kq = kq;
867 			kn->kn_fop = fops;
868 
869 			/*
870 			 * apply reference count to knote structure, and
871 			 * do not release it at the end of this routine.
872 			 */
873 			fp = NULL;
874 
875 			kn->kn_sfflags = kev->fflags;
876 			kn->kn_sdata = kev->data;
877 			kev->fflags = 0;
878 			kev->data = 0;
879 			kn->kn_kevent = *kev;
880 
881 			/*
882 			 * KN_PROCESSING prevents the knote from getting
883 			 * ripped out from under us while we are trying
884 			 * to attach it, in case the attach blocks.
885 			 */
886 			kn->kn_status = KN_PROCESSING;
887 			knote_attach(kn);
888 			if ((error = filter_attach(kn)) != 0) {
889 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
890 				knote_drop(kn);
891 				goto done;
892 			}
893 
894 			/*
895 			 * Interlock against close races which either tried
896 			 * to remove our knote while we were blocked or missed
897 			 * it entirely prior to our attachment.  We do not
898 			 * want to end up with a knote on a closed descriptor.
899 			 */
900 			if ((fops->f_flags & FILTEROP_ISFD) &&
901 			    checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
902 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
903 			}
904 		} else {
905 			/*
906 			 * The user may change some filter values after the
907 			 * initial EV_ADD, but doing so will not reset any
908 			 * filter which have already been triggered.
909 			 */
910 			KKASSERT(kn->kn_status & KN_PROCESSING);
911 			kn->kn_sfflags = kev->fflags;
912 			kn->kn_sdata = kev->data;
913 			kn->kn_kevent.udata = kev->udata;
914 		}
915 
916 		/*
917 		 * Execute the filter event to immediately activate the
918 		 * knote if necessary.  If reprocessing events are pending
919 		 * due to blocking above we do not run the filter here
920 		 * but instead let knote_release() do it.  Otherwise we
921 		 * might run the filter on a deleted event.
922 		 */
923 		if ((kn->kn_status & KN_REPROCESS) == 0) {
924 			if (filter_event(kn, 0))
925 				KNOTE_ACTIVATE(kn);
926 		}
927 	} else if (kev->flags & EV_DELETE) {
928 		/*
929 		 * Delete the existing knote
930 		 */
931 		knote_detach_and_drop(kn);
932 		goto done;
933 	}
934 
935 	/*
936 	 * Disablement does not deactivate a knote here.
937 	 */
938 	if ((kev->flags & EV_DISABLE) &&
939 	    ((kn->kn_status & KN_DISABLED) == 0)) {
940 		kn->kn_status |= KN_DISABLED;
941 	}
942 
943 	/*
944 	 * Re-enablement may have to immediately enqueue an active knote.
945 	 */
946 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
947 		kn->kn_status &= ~KN_DISABLED;
948 		if ((kn->kn_status & KN_ACTIVE) &&
949 		    ((kn->kn_status & KN_QUEUED) == 0)) {
950 			knote_enqueue(kn);
951 		}
952 	}
953 
954 	/*
955 	 * Handle any required reprocessing
956 	 */
957 	knote_release(kn);
958 	/* kn may be invalid now */
959 
960 done:
961 	lwkt_reltoken(&kq_token);
962 	if (fp != NULL)
963 		fdrop(fp);
964 	return (error);
965 }
966 
967 /*
968  * Block as necessary until the target time is reached.
969  * If tsp is NULL we block indefinitely.  If tsp->ts_secs/nsecs are both
970  * 0 we do not block at all.
971  */
972 static int
973 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
974 {
975 	int error = 0;
976 
977 	if (tsp == NULL) {
978 		kq->kq_state |= KQ_SLEEP;
979 		error = tsleep(kq, PCATCH, "kqread", 0);
980 	} else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
981 		error = EWOULDBLOCK;
982 	} else {
983 		struct timespec ats;
984 		struct timespec atx = *tsp;
985 		int timeout;
986 
987 		nanouptime(&ats);
988 		timespecsub(&atx, &ats);
989 		if (ats.tv_sec < 0) {
990 			error = EWOULDBLOCK;
991 		} else {
992 			timeout = atx.tv_sec > 24 * 60 * 60 ?
993 				24 * 60 * 60 * hz : tstohz_high(&atx);
994 			kq->kq_state |= KQ_SLEEP;
995 			error = tsleep(kq, PCATCH, "kqread", timeout);
996 		}
997 	}
998 
999 	/* don't restart after signals... */
1000 	if (error == ERESTART)
1001 		return (EINTR);
1002 
1003 	return (error);
1004 }
1005 
1006 /*
1007  * Scan the kqueue, return the number of active events placed in kevp up
1008  * to count.
1009  *
1010  * Continuous mode events may get recycled, do not continue scanning past
1011  * marker unless no events have been collected.
1012  */
1013 static int
1014 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1015             struct knote *marker)
1016 {
1017         struct knote *kn, local_marker;
1018         int total;
1019 
1020         total = 0;
1021 	local_marker.kn_filter = EVFILT_MARKER;
1022 	local_marker.kn_status = KN_PROCESSING;
1023 
1024 	/*
1025 	 * Collect events.
1026 	 */
1027 	TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1028 	while (count) {
1029 		kn = TAILQ_NEXT(&local_marker, kn_tqe);
1030 		if (kn->kn_filter == EVFILT_MARKER) {
1031 			/* Marker reached, we are done */
1032 			if (kn == marker)
1033 				break;
1034 
1035 			/* Move local marker past some other threads marker */
1036 			kn = TAILQ_NEXT(kn, kn_tqe);
1037 			TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1038 			TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1039 			continue;
1040 		}
1041 
1042 		/*
1043 		 * We can't skip a knote undergoing processing, otherwise
1044 		 * we risk not returning it when the user process expects
1045 		 * it should be returned.  Sleep and retry.
1046 		 */
1047 		if (knote_acquire(kn) == 0)
1048 			continue;
1049 
1050 		/*
1051 		 * Remove the event for processing.
1052 		 *
1053 		 * WARNING!  We must leave KN_QUEUED set to prevent the
1054 		 *	     event from being KNOTE_ACTIVATE()d while
1055 		 *	     the queue state is in limbo, in case we
1056 		 *	     block.
1057 		 *
1058 		 * WARNING!  We must set KN_PROCESSING to avoid races
1059 		 *	     against deletion or another thread's
1060 		 *	     processing.
1061 		 */
1062 		TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1063 		kq->kq_count--;
1064 
1065 		/*
1066 		 * We have to deal with an extremely important race against
1067 		 * file descriptor close()s here.  The file descriptor can
1068 		 * disappear MPSAFE, and there is a small window of
1069 		 * opportunity between that and the call to knote_fdclose().
1070 		 *
1071 		 * If we hit that window here while doselect or dopoll is
1072 		 * trying to delete a spurious event they will not be able
1073 		 * to match up the event against a knote and will go haywire.
1074 		 */
1075 		if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1076 		    checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1077 			kn->kn_status |= KN_DELETING | KN_REPROCESS;
1078 		}
1079 
1080 		if (kn->kn_status & KN_DISABLED) {
1081 			/*
1082 			 * If disabled we ensure the event is not queued
1083 			 * but leave its active bit set.  On re-enablement
1084 			 * the event may be immediately triggered.
1085 			 */
1086 			kn->kn_status &= ~KN_QUEUED;
1087 		} else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1088 			   (kn->kn_status & KN_DELETING) == 0 &&
1089 			   filter_event(kn, 0) == 0) {
1090 			/*
1091 			 * If not running in one-shot mode and the event
1092 			 * is no longer present we ensure it is removed
1093 			 * from the queue and ignore it.
1094 			 */
1095 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1096 		} else {
1097 			/*
1098 			 * Post the event
1099 			 */
1100 			*kevp++ = kn->kn_kevent;
1101 			++total;
1102 			--count;
1103 
1104 			if (kn->kn_flags & EV_ONESHOT) {
1105 				kn->kn_status &= ~KN_QUEUED;
1106 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1107 			} else if (kn->kn_flags & EV_CLEAR) {
1108 				kn->kn_data = 0;
1109 				kn->kn_fflags = 0;
1110 				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1111 			} else {
1112 				TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1113 				kq->kq_count++;
1114 			}
1115 		}
1116 
1117 		/*
1118 		 * Handle any post-processing states
1119 		 */
1120 		knote_release(kn);
1121 	}
1122 	TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1123 
1124 	return (total);
1125 }
1126 
1127 /*
1128  * XXX
1129  * This could be expanded to call kqueue_scan, if desired.
1130  *
1131  * MPSAFE
1132  */
1133 static int
1134 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1135 {
1136 	return (ENXIO);
1137 }
1138 
1139 /*
1140  * MPSAFE
1141  */
1142 static int
1143 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1144 {
1145 	return (ENXIO);
1146 }
1147 
1148 /*
1149  * MPALMOSTSAFE
1150  */
1151 static int
1152 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1153 	     struct ucred *cred, struct sysmsg *msg)
1154 {
1155 	struct kqueue *kq;
1156 	int error;
1157 
1158 	lwkt_gettoken(&kq_token);
1159 	kq = (struct kqueue *)fp->f_data;
1160 
1161 	switch(com) {
1162 	case FIOASYNC:
1163 		if (*(int *)data)
1164 			kq->kq_state |= KQ_ASYNC;
1165 		else
1166 			kq->kq_state &= ~KQ_ASYNC;
1167 		error = 0;
1168 		break;
1169 	case FIOSETOWN:
1170 		error = fsetown(*(int *)data, &kq->kq_sigio);
1171 		break;
1172 	default:
1173 		error = ENOTTY;
1174 		break;
1175 	}
1176 	lwkt_reltoken(&kq_token);
1177 	return (error);
1178 }
1179 
1180 /*
1181  * MPSAFE
1182  */
1183 static int
1184 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1185 {
1186 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1187 
1188 	bzero((void *)st, sizeof(*st));
1189 	st->st_size = kq->kq_count;
1190 	st->st_blksize = sizeof(struct kevent);
1191 	st->st_mode = S_IFIFO;
1192 	return (0);
1193 }
1194 
1195 /*
1196  * MPSAFE
1197  */
1198 static int
1199 kqueue_close(struct file *fp)
1200 {
1201 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1202 
1203 	kqueue_terminate(kq);
1204 
1205 	fp->f_data = NULL;
1206 	funsetown(&kq->kq_sigio);
1207 
1208 	kfree(kq, M_KQUEUE);
1209 	return (0);
1210 }
1211 
1212 static void
1213 kqueue_wakeup(struct kqueue *kq)
1214 {
1215 	if (kq->kq_state & KQ_SLEEP) {
1216 		kq->kq_state &= ~KQ_SLEEP;
1217 		wakeup(kq);
1218 	}
1219 	KNOTE(&kq->kq_kqinfo.ki_note, 0);
1220 }
1221 
1222 /*
1223  * Calls filterops f_attach function, acquiring mplock if filter is not
1224  * marked as FILTEROP_MPSAFE.
1225  */
1226 static int
1227 filter_attach(struct knote *kn)
1228 {
1229 	int ret;
1230 
1231 	if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) {
1232 		get_mplock();
1233 		ret = kn->kn_fop->f_attach(kn);
1234 		rel_mplock();
1235 	} else {
1236 		ret = kn->kn_fop->f_attach(kn);
1237 	}
1238 
1239 	return (ret);
1240 }
1241 
1242 /*
1243  * Detach the knote and drop it, destroying the knote.
1244  *
1245  * Calls filterops f_detach function, acquiring mplock if filter is not
1246  * marked as FILTEROP_MPSAFE.
1247  */
1248 static void
1249 knote_detach_and_drop(struct knote *kn)
1250 {
1251 	kn->kn_status |= KN_DELETING | KN_REPROCESS;
1252 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1253 		kn->kn_fop->f_detach(kn);
1254 	} else {
1255 		get_mplock();
1256 		kn->kn_fop->f_detach(kn);
1257 		rel_mplock();
1258 	}
1259 	knote_drop(kn);
1260 }
1261 
1262 /*
1263  * Calls filterops f_event function, acquiring mplock if filter is not
1264  * marked as FILTEROP_MPSAFE.
1265  *
1266  * If the knote is in the middle of being created or deleted we cannot
1267  * safely call the filter op.
1268  */
1269 static int
1270 filter_event(struct knote *kn, long hint)
1271 {
1272 	int ret;
1273 
1274 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1275 		ret = kn->kn_fop->f_event(kn, hint);
1276 	} else {
1277 		get_mplock();
1278 		ret = kn->kn_fop->f_event(kn, hint);
1279 		rel_mplock();
1280 	}
1281 	return (ret);
1282 }
1283 
1284 /*
1285  * Walk down a list of knotes, activating them if their event has triggered.
1286  *
1287  * If we encounter any knotes which are undergoing processing we just mark
1288  * them for reprocessing and do not try to [re]activate the knote.  However,
1289  * if a hint is being passed we have to wait and that makes things a bit
1290  * sticky.
1291  */
1292 void
1293 knote(struct klist *list, long hint)
1294 {
1295 	struct knote *kn;
1296 
1297 	lwkt_gettoken(&kq_token);
1298 restart:
1299 	SLIST_FOREACH(kn, list, kn_next) {
1300 		if (kn->kn_status & KN_PROCESSING) {
1301 			/*
1302 			 * Someone else is processing the knote, ask the
1303 			 * other thread to reprocess it and don't mess
1304 			 * with it otherwise.
1305 			 */
1306 			if (hint == 0) {
1307 				kn->kn_status |= KN_REPROCESS;
1308 				continue;
1309 			}
1310 
1311 			/*
1312 			 * If the hint is non-zero we have to wait or risk
1313 			 * losing the state the caller is trying to update.
1314 			 *
1315 			 * XXX This is a real problem, certain process
1316 			 *     and signal filters will bump kn_data for
1317 			 *     already-processed notes more than once if
1318 			 *     we restart the list scan.  FIXME.
1319 			 */
1320 			kn->kn_status |= KN_WAITING | KN_REPROCESS;
1321 			tsleep(kn, 0, "knotec", hz);
1322 			goto restart;
1323 		}
1324 
1325 		/*
1326 		 * Become the reprocessing master ourselves.
1327 		 *
1328 		 * If hint is non-zer running the event is mandatory
1329 		 * when not deleting so do it whether reprocessing is
1330 		 * set or not.
1331 		 */
1332 		kn->kn_status |= KN_PROCESSING;
1333 		if ((kn->kn_status & KN_DELETING) == 0) {
1334 			if (filter_event(kn, hint))
1335 				KNOTE_ACTIVATE(kn);
1336 		}
1337 		if (knote_release(kn))
1338 			goto restart;
1339 	}
1340 	lwkt_reltoken(&kq_token);
1341 }
1342 
1343 /*
1344  * Insert knote at head of klist.
1345  *
1346  * This function may only be called via a filter function and thus
1347  * kq_token should already be held and marked for processing.
1348  */
1349 void
1350 knote_insert(struct klist *klist, struct knote *kn)
1351 {
1352 	KKASSERT(kn->kn_status & KN_PROCESSING);
1353 	ASSERT_LWKT_TOKEN_HELD(&kq_token);
1354 	SLIST_INSERT_HEAD(klist, kn, kn_next);
1355 }
1356 
1357 /*
1358  * Remove knote from a klist
1359  *
1360  * This function may only be called via a filter function and thus
1361  * kq_token should already be held and marked for processing.
1362  */
1363 void
1364 knote_remove(struct klist *klist, struct knote *kn)
1365 {
1366 	KKASSERT(kn->kn_status & KN_PROCESSING);
1367 	ASSERT_LWKT_TOKEN_HELD(&kq_token);
1368 	SLIST_REMOVE(klist, kn, knote, kn_next);
1369 }
1370 
1371 /*
1372  * Remove all knotes from a specified klist
1373  *
1374  * Only called from aio.
1375  */
1376 void
1377 knote_empty(struct klist *list)
1378 {
1379 	struct knote *kn;
1380 
1381 	lwkt_gettoken(&kq_token);
1382 	while ((kn = SLIST_FIRST(list)) != NULL) {
1383 		if (knote_acquire(kn))
1384 			knote_detach_and_drop(kn);
1385 	}
1386 	lwkt_reltoken(&kq_token);
1387 }
1388 
1389 void
1390 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1391 		    struct filterops *ops, void *hook)
1392 {
1393 	struct knote *kn;
1394 
1395 	lwkt_gettoken(&kq_token);
1396 	while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1397 		if (knote_acquire(kn)) {
1398 			knote_remove(&src->ki_note, kn);
1399 			kn->kn_fop = ops;
1400 			kn->kn_hook = hook;
1401 			knote_insert(&dst->ki_note, kn);
1402 			knote_release(kn);
1403 			/* kn may be invalid now */
1404 		}
1405 	}
1406 	lwkt_reltoken(&kq_token);
1407 }
1408 
1409 /*
1410  * Remove all knotes referencing a specified fd
1411  */
1412 void
1413 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1414 {
1415 	struct knote *kn;
1416 
1417 	lwkt_gettoken(&kq_token);
1418 restart:
1419 	SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1420 		if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1421 			if (knote_acquire(kn))
1422 				knote_detach_and_drop(kn);
1423 			goto restart;
1424 		}
1425 	}
1426 	lwkt_reltoken(&kq_token);
1427 }
1428 
1429 /*
1430  * Low level attach function.
1431  *
1432  * The knote should already be marked for processing.
1433  */
1434 static void
1435 knote_attach(struct knote *kn)
1436 {
1437 	struct klist *list;
1438 	struct kqueue *kq = kn->kn_kq;
1439 
1440 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1441 		KKASSERT(kn->kn_fp);
1442 		list = &kn->kn_fp->f_klist;
1443 	} else {
1444 		if (kq->kq_knhashmask == 0)
1445 			kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1446 						 &kq->kq_knhashmask);
1447 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1448 	}
1449 	SLIST_INSERT_HEAD(list, kn, kn_link);
1450 	TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1451 }
1452 
1453 /*
1454  * Low level drop function.
1455  *
1456  * The knote should already be marked for processing.
1457  */
1458 static void
1459 knote_drop(struct knote *kn)
1460 {
1461 	struct kqueue *kq;
1462 	struct klist *list;
1463 
1464 	kq = kn->kn_kq;
1465 
1466 	if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1467 		list = &kn->kn_fp->f_klist;
1468 	else
1469 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1470 
1471 	SLIST_REMOVE(list, kn, knote, kn_link);
1472 	TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1473 	if (kn->kn_status & KN_QUEUED)
1474 		knote_dequeue(kn);
1475 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1476 		fdrop(kn->kn_fp);
1477 		kn->kn_fp = NULL;
1478 	}
1479 	knote_free(kn);
1480 }
1481 
1482 /*
1483  * Low level enqueue function.
1484  *
1485  * The knote should already be marked for processing.
1486  */
1487 static void
1488 knote_enqueue(struct knote *kn)
1489 {
1490 	struct kqueue *kq = kn->kn_kq;
1491 
1492 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1493 	TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1494 	kn->kn_status |= KN_QUEUED;
1495 	++kq->kq_count;
1496 
1497 	/*
1498 	 * Send SIGIO on request (typically set up as a mailbox signal)
1499 	 */
1500 	if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1501 		pgsigio(kq->kq_sigio, SIGIO, 0);
1502 
1503 	kqueue_wakeup(kq);
1504 }
1505 
1506 /*
1507  * Low level dequeue function.
1508  *
1509  * The knote should already be marked for processing.
1510  */
1511 static void
1512 knote_dequeue(struct knote *kn)
1513 {
1514 	struct kqueue *kq = kn->kn_kq;
1515 
1516 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1517 	TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1518 	kn->kn_status &= ~KN_QUEUED;
1519 	kq->kq_count--;
1520 }
1521 
1522 static struct knote *
1523 knote_alloc(void)
1524 {
1525 	return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1526 }
1527 
1528 static void
1529 knote_free(struct knote *kn)
1530 {
1531 	kfree(kn, M_KQUEUE);
1532 }
1533