xref: /dragonfly/sys/kern/kern_event.c (revision f2c43266)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
35 #include <sys/file.h>
36 #include <sys/lock.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
48 #include <sys/uio.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
51 #include <sys/ktr.h>
52 
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
56 
57 #define EVENT_REGISTER	1
58 #define EVENT_PROCESS	2
59 
60 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
61 
62 struct kevent_copyin_args {
63 	struct kevent_args	*ka;
64 	int			pchanges;
65 };
66 
67 #define KNOTE_CACHE_MAX		8
68 
69 struct knote_cache_list {
70 	struct klist		knote_cache;
71 	int			knote_cache_cnt;
72 } __cachealign;
73 
74 static int	kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
75 		    struct knote *marker);
76 static int 	kqueue_read(struct file *fp, struct uio *uio,
77 		    struct ucred *cred, int flags);
78 static int	kqueue_write(struct file *fp, struct uio *uio,
79 		    struct ucred *cred, int flags);
80 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
81 		    struct ucred *cred, struct sysmsg *msg);
82 static int 	kqueue_kqfilter(struct file *fp, struct knote *kn);
83 static int 	kqueue_stat(struct file *fp, struct stat *st,
84 		    struct ucred *cred);
85 static int 	kqueue_close(struct file *fp);
86 static void	kqueue_wakeup(struct kqueue *kq);
87 static int	filter_attach(struct knote *kn);
88 static int	filter_event(struct knote *kn, long hint);
89 
90 /*
91  * MPSAFE
92  */
93 static struct fileops kqueueops = {
94 	.fo_read = kqueue_read,
95 	.fo_write = kqueue_write,
96 	.fo_ioctl = kqueue_ioctl,
97 	.fo_kqfilter = kqueue_kqfilter,
98 	.fo_stat = kqueue_stat,
99 	.fo_close = kqueue_close,
100 	.fo_shutdown = nofo_shutdown
101 };
102 
103 static void 	knote_attach(struct knote *kn);
104 static void 	knote_drop(struct knote *kn);
105 static void	knote_detach_and_drop(struct knote *kn);
106 static void 	knote_enqueue(struct knote *kn);
107 static void 	knote_dequeue(struct knote *kn);
108 static struct 	knote *knote_alloc(void);
109 static void 	knote_free(struct knote *kn);
110 
111 static void	filt_kqdetach(struct knote *kn);
112 static int	filt_kqueue(struct knote *kn, long hint);
113 static int	filt_procattach(struct knote *kn);
114 static void	filt_procdetach(struct knote *kn);
115 static int	filt_proc(struct knote *kn, long hint);
116 static int	filt_fileattach(struct knote *kn);
117 static void	filt_timerexpire(void *knx);
118 static int	filt_timerattach(struct knote *kn);
119 static void	filt_timerdetach(struct knote *kn);
120 static int	filt_timer(struct knote *kn, long hint);
121 static int	filt_userattach(struct knote *kn);
122 static void	filt_userdetach(struct knote *kn);
123 static int	filt_user(struct knote *kn, long hint);
124 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
125 				u_long type);
126 
127 static struct filterops file_filtops =
128 	{ FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL };
129 static struct filterops kqread_filtops =
130 	{ FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue };
131 static struct filterops proc_filtops =
132 	{ 0, filt_procattach, filt_procdetach, filt_proc };
133 static struct filterops timer_filtops =
134 	{ FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer };
135 static struct filterops user_filtops =
136 	{ FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user };
137 
138 static int 		kq_ncallouts = 0;
139 static int 		kq_calloutmax = (4 * 1024);
140 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
141     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
142 static int		kq_checkloop = 1000000;
143 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
144     &kq_checkloop, 0, "Maximum number of loops for kqueue scan");
145 
146 #define KNOTE_ACTIVATE(kn) do { 					\
147 	kn->kn_status |= KN_ACTIVE;					\
148 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
149 		knote_enqueue(kn);					\
150 } while(0)
151 
152 #define	KN_HASHSIZE		64		/* XXX should be tunable */
153 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
154 
155 extern struct filterops aio_filtops;
156 extern struct filterops sig_filtops;
157 
158 /*
159  * Table for for all system-defined filters.
160  */
161 static struct filterops *sysfilt_ops[] = {
162 	&file_filtops,			/* EVFILT_READ */
163 	&file_filtops,			/* EVFILT_WRITE */
164 	&aio_filtops,			/* EVFILT_AIO */
165 	&file_filtops,			/* EVFILT_VNODE */
166 	&proc_filtops,			/* EVFILT_PROC */
167 	&sig_filtops,			/* EVFILT_SIGNAL */
168 	&timer_filtops,			/* EVFILT_TIMER */
169 	&file_filtops,			/* EVFILT_EXCEPT */
170 	&user_filtops,			/* EVFILT_USER */
171 };
172 
173 static struct knote_cache_list	knote_cache_lists[MAXCPU];
174 
175 /*
176  * Acquire a knote, return non-zero on success, 0 on failure.
177  *
178  * If we cannot acquire the knote we sleep and return 0.  The knote
179  * may be stale on return in this case and the caller must restart
180  * whatever loop they are in.
181  *
182  * Related kq token must be held.
183  */
184 static __inline int
185 knote_acquire(struct knote *kn)
186 {
187 	if (kn->kn_status & KN_PROCESSING) {
188 		kn->kn_status |= KN_WAITING | KN_REPROCESS;
189 		tsleep(kn, 0, "kqepts", hz);
190 		/* knote may be stale now */
191 		return(0);
192 	}
193 	kn->kn_status |= KN_PROCESSING;
194 	return(1);
195 }
196 
197 /*
198  * Release an acquired knote, clearing KN_PROCESSING and handling any
199  * KN_REPROCESS events.
200  *
201  * Caller must be holding the related kq token
202  *
203  * Non-zero is returned if the knote is destroyed or detached.
204  */
205 static __inline int
206 knote_release(struct knote *kn)
207 {
208 	int ret;
209 
210 	while (kn->kn_status & KN_REPROCESS) {
211 		kn->kn_status &= ~KN_REPROCESS;
212 		if (kn->kn_status & KN_WAITING) {
213 			kn->kn_status &= ~KN_WAITING;
214 			wakeup(kn);
215 		}
216 		if (kn->kn_status & KN_DELETING) {
217 			knote_detach_and_drop(kn);
218 			return(1);
219 			/* NOT REACHED */
220 		}
221 		if (filter_event(kn, 0))
222 			KNOTE_ACTIVATE(kn);
223 	}
224 	if (kn->kn_status & KN_DETACHED)
225 		ret = 1;
226 	else
227 		ret = 0;
228 	kn->kn_status &= ~KN_PROCESSING;
229 	/* kn should not be accessed anymore */
230 	return ret;
231 }
232 
233 static int
234 filt_fileattach(struct knote *kn)
235 {
236 	return (fo_kqfilter(kn->kn_fp, kn));
237 }
238 
239 /*
240  * MPSAFE
241  */
242 static int
243 kqueue_kqfilter(struct file *fp, struct knote *kn)
244 {
245 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
246 
247 	if (kn->kn_filter != EVFILT_READ)
248 		return (EOPNOTSUPP);
249 
250 	kn->kn_fop = &kqread_filtops;
251 	knote_insert(&kq->kq_kqinfo.ki_note, kn);
252 	return (0);
253 }
254 
255 static void
256 filt_kqdetach(struct knote *kn)
257 {
258 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
259 
260 	knote_remove(&kq->kq_kqinfo.ki_note, kn);
261 }
262 
263 /*ARGSUSED*/
264 static int
265 filt_kqueue(struct knote *kn, long hint)
266 {
267 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
268 
269 	kn->kn_data = kq->kq_count;
270 	return (kn->kn_data > 0);
271 }
272 
273 static int
274 filt_procattach(struct knote *kn)
275 {
276 	struct proc *p;
277 	int immediate;
278 
279 	immediate = 0;
280 	p = pfind(kn->kn_id);
281 	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
282 		p = zpfind(kn->kn_id);
283 		immediate = 1;
284 	}
285 	if (p == NULL) {
286 		return (ESRCH);
287 	}
288 	if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
289 		if (p)
290 			PRELE(p);
291 		return (EACCES);
292 	}
293 
294 	lwkt_gettoken(&p->p_token);
295 	kn->kn_ptr.p_proc = p;
296 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
297 
298 	/*
299 	 * internal flag indicating registration done by kernel
300 	 */
301 	if (kn->kn_flags & EV_FLAG1) {
302 		kn->kn_data = kn->kn_sdata;		/* ppid */
303 		kn->kn_fflags = NOTE_CHILD;
304 		kn->kn_flags &= ~EV_FLAG1;
305 	}
306 
307 	knote_insert(&p->p_klist, kn);
308 
309 	/*
310 	 * Immediately activate any exit notes if the target process is a
311 	 * zombie.  This is necessary to handle the case where the target
312 	 * process, e.g. a child, dies before the kevent is negistered.
313 	 */
314 	if (immediate && filt_proc(kn, NOTE_EXIT))
315 		KNOTE_ACTIVATE(kn);
316 	lwkt_reltoken(&p->p_token);
317 	PRELE(p);
318 
319 	return (0);
320 }
321 
322 /*
323  * The knote may be attached to a different process, which may exit,
324  * leaving nothing for the knote to be attached to.  So when the process
325  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
326  * it will be deleted when read out.  However, as part of the knote deletion,
327  * this routine is called, so a check is needed to avoid actually performing
328  * a detach, because the original process does not exist any more.
329  */
330 static void
331 filt_procdetach(struct knote *kn)
332 {
333 	struct proc *p;
334 
335 	if (kn->kn_status & KN_DETACHED)
336 		return;
337 	p = kn->kn_ptr.p_proc;
338 	knote_remove(&p->p_klist, kn);
339 }
340 
341 static int
342 filt_proc(struct knote *kn, long hint)
343 {
344 	u_int event;
345 
346 	/*
347 	 * mask off extra data
348 	 */
349 	event = (u_int)hint & NOTE_PCTRLMASK;
350 
351 	/*
352 	 * if the user is interested in this event, record it.
353 	 */
354 	if (kn->kn_sfflags & event)
355 		kn->kn_fflags |= event;
356 
357 	/*
358 	 * Process is gone, so flag the event as finished.  Detach the
359 	 * knote from the process now because the process will be poof,
360 	 * gone later on.
361 	 */
362 	if (event == NOTE_EXIT) {
363 		struct proc *p = kn->kn_ptr.p_proc;
364 		if ((kn->kn_status & KN_DETACHED) == 0) {
365 			PHOLD(p);
366 			knote_remove(&p->p_klist, kn);
367 			kn->kn_status |= KN_DETACHED;
368 			kn->kn_data = p->p_xstat;
369 			kn->kn_ptr.p_proc = NULL;
370 			PRELE(p);
371 		}
372 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
373 		return (1);
374 	}
375 
376 	/*
377 	 * process forked, and user wants to track the new process,
378 	 * so attach a new knote to it, and immediately report an
379 	 * event with the parent's pid.
380 	 */
381 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
382 		struct kevent kev;
383 		int error;
384 
385 		/*
386 		 * register knote with new process.
387 		 */
388 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
389 		kev.filter = kn->kn_filter;
390 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
391 		kev.fflags = kn->kn_sfflags;
392 		kev.data = kn->kn_id;			/* parent */
393 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
394 		error = kqueue_register(kn->kn_kq, &kev);
395 		if (error)
396 			kn->kn_fflags |= NOTE_TRACKERR;
397 	}
398 
399 	return (kn->kn_fflags != 0);
400 }
401 
402 static void
403 filt_timerreset(struct knote *kn)
404 {
405 	struct callout *calloutp;
406 	struct timeval tv;
407 	int tticks;
408 
409 	tv.tv_sec = kn->kn_sdata / 1000;
410 	tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
411 	tticks = tvtohz_high(&tv);
412 	calloutp = (struct callout *)kn->kn_hook;
413 	callout_reset(calloutp, tticks, filt_timerexpire, kn);
414 }
415 
416 /*
417  * The callout interlocks with callout_terminate() but can still
418  * race a deletion so if KN_DELETING is set we just don't touch
419  * the knote.
420  */
421 static void
422 filt_timerexpire(void *knx)
423 {
424 	struct knote *kn = knx;
425 	struct kqueue *kq = kn->kn_kq;
426 
427 	lwkt_getpooltoken(kq);
428 
429 	/*
430 	 * Open knote_acquire(), since we can't sleep in callout,
431 	 * however, we do need to record this expiration.
432 	 */
433 	kn->kn_data++;
434 	if (kn->kn_status & KN_PROCESSING) {
435 		kn->kn_status |= KN_REPROCESS;
436 		if ((kn->kn_status & KN_DELETING) == 0 &&
437 		    (kn->kn_flags & EV_ONESHOT) == 0)
438 			filt_timerreset(kn);
439 		lwkt_relpooltoken(kq);
440 		return;
441 	}
442 	KASSERT((kn->kn_status & KN_DELETING) == 0,
443 	    ("acquire a deleting knote %#x", kn->kn_status));
444 	kn->kn_status |= KN_PROCESSING;
445 
446 	KNOTE_ACTIVATE(kn);
447 	if ((kn->kn_flags & EV_ONESHOT) == 0)
448 		filt_timerreset(kn);
449 
450 	knote_release(kn);
451 
452 	lwkt_relpooltoken(kq);
453 }
454 
455 /*
456  * data contains amount of time to sleep, in milliseconds
457  */
458 static int
459 filt_timerattach(struct knote *kn)
460 {
461 	struct callout *calloutp;
462 	int prev_ncallouts;
463 
464 	prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1);
465 	if (prev_ncallouts >= kq_calloutmax) {
466 		atomic_subtract_int(&kq_ncallouts, 1);
467 		kn->kn_hook = NULL;
468 		return (ENOMEM);
469 	}
470 
471 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
472 	calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
473 	callout_init_mp(calloutp);
474 	kn->kn_hook = (caddr_t)calloutp;
475 
476 	filt_timerreset(kn);
477 	return (0);
478 }
479 
480 /*
481  * This function is called with the knote flagged locked but it is
482  * still possible to race a callout event due to the callback blocking.
483  * We must call callout_terminate() instead of callout_stop() to deal
484  * with the race.
485  */
486 static void
487 filt_timerdetach(struct knote *kn)
488 {
489 	struct callout *calloutp;
490 
491 	calloutp = (struct callout *)kn->kn_hook;
492 	callout_terminate(calloutp);
493 	kfree(calloutp, M_KQUEUE);
494 	atomic_subtract_int(&kq_ncallouts, 1);
495 }
496 
497 static int
498 filt_timer(struct knote *kn, long hint)
499 {
500 
501 	return (kn->kn_data != 0);
502 }
503 
504 /*
505  * EVFILT_USER
506  */
507 static int
508 filt_userattach(struct knote *kn)
509 {
510 	kn->kn_hook = NULL;
511 	if (kn->kn_fflags & NOTE_TRIGGER)
512 		kn->kn_ptr.hookid = 1;
513 	else
514 		kn->kn_ptr.hookid = 0;
515 	return 0;
516 }
517 
518 static void
519 filt_userdetach(struct knote *kn)
520 {
521 	/* nothing to do */
522 }
523 
524 static int
525 filt_user(struct knote *kn, long hint)
526 {
527 	return (kn->kn_ptr.hookid);
528 }
529 
530 static void
531 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
532 {
533 	u_int ffctrl;
534 
535 	switch (type) {
536 	case EVENT_REGISTER:
537 		if (kev->fflags & NOTE_TRIGGER)
538 			kn->kn_ptr.hookid = 1;
539 
540 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
541 		kev->fflags &= NOTE_FFLAGSMASK;
542 		switch (ffctrl) {
543 		case NOTE_FFNOP:
544 			break;
545 
546 		case NOTE_FFAND:
547 			kn->kn_sfflags &= kev->fflags;
548 			break;
549 
550 		case NOTE_FFOR:
551 			kn->kn_sfflags |= kev->fflags;
552 			break;
553 
554 		case NOTE_FFCOPY:
555 			kn->kn_sfflags = kev->fflags;
556 			break;
557 
558 		default:
559 			/* XXX Return error? */
560 			break;
561 		}
562 		kn->kn_sdata = kev->data;
563 
564 		/*
565 		 * This is not the correct use of EV_CLEAR in an event
566 		 * modification, it should have been passed as a NOTE instead.
567 		 * But we need to maintain compatibility with Apple & FreeBSD.
568 		 *
569 		 * Note however that EV_CLEAR can still be used when doing
570 		 * the initial registration of the event and works as expected
571 		 * (clears the event on reception).
572 		 */
573 		if (kev->flags & EV_CLEAR) {
574 			kn->kn_ptr.hookid = 0;
575 			kn->kn_data = 0;
576 			kn->kn_fflags = 0;
577 		}
578 		break;
579 
580         case EVENT_PROCESS:
581 		*kev = kn->kn_kevent;
582 		kev->fflags = kn->kn_sfflags;
583 		kev->data = kn->kn_sdata;
584 		if (kn->kn_flags & EV_CLEAR) {
585 			kn->kn_ptr.hookid = 0;
586 			/* kn_data, kn_fflags handled by parent */
587 		}
588 		break;
589 
590 	default:
591 		panic("filt_usertouch() - invalid type (%ld)", type);
592 		break;
593 	}
594 }
595 
596 /*
597  * Initialize a kqueue.
598  *
599  * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
600  *
601  * MPSAFE
602  */
603 void
604 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
605 {
606 	TAILQ_INIT(&kq->kq_knpend);
607 	TAILQ_INIT(&kq->kq_knlist);
608 	kq->kq_count = 0;
609 	kq->kq_fdp = fdp;
610 	SLIST_INIT(&kq->kq_kqinfo.ki_note);
611 }
612 
613 /*
614  * Terminate a kqueue.  Freeing the actual kq itself is left up to the
615  * caller (it might be embedded in a lwp so we don't do it here).
616  *
617  * The kq's knlist must be completely eradicated so block on any
618  * processing races.
619  */
620 void
621 kqueue_terminate(struct kqueue *kq)
622 {
623 	struct knote *kn;
624 
625 	lwkt_getpooltoken(kq);
626 	while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
627 		if (knote_acquire(kn))
628 			knote_detach_and_drop(kn);
629 	}
630 	lwkt_relpooltoken(kq);
631 
632 	if (kq->kq_knhash) {
633 		hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
634 		kq->kq_knhash = NULL;
635 		kq->kq_knhashmask = 0;
636 	}
637 }
638 
639 /*
640  * MPSAFE
641  */
642 int
643 sys_kqueue(struct kqueue_args *uap)
644 {
645 	struct thread *td = curthread;
646 	struct kqueue *kq;
647 	struct file *fp;
648 	int fd, error;
649 
650 	error = falloc(td->td_lwp, &fp, &fd);
651 	if (error)
652 		return (error);
653 	fp->f_flag = FREAD | FWRITE;
654 	fp->f_type = DTYPE_KQUEUE;
655 	fp->f_ops = &kqueueops;
656 
657 	kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
658 	kqueue_init(kq, td->td_proc->p_fd);
659 	fp->f_data = kq;
660 
661 	fsetfd(kq->kq_fdp, fp, fd);
662 	uap->sysmsg_result = fd;
663 	fdrop(fp);
664 	return (error);
665 }
666 
667 /*
668  * Copy 'count' items into the destination list pointed to by uap->eventlist.
669  */
670 static int
671 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
672 {
673 	struct kevent_copyin_args *kap;
674 	int error;
675 
676 	kap = (struct kevent_copyin_args *)arg;
677 
678 	error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
679 	if (error == 0) {
680 		kap->ka->eventlist += count;
681 		*res += count;
682 	} else {
683 		*res = -1;
684 	}
685 
686 	return (error);
687 }
688 
689 /*
690  * Copy at most 'max' items from the list pointed to by kap->changelist,
691  * return number of items in 'events'.
692  */
693 static int
694 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
695 {
696 	struct kevent_copyin_args *kap;
697 	int error, count;
698 
699 	kap = (struct kevent_copyin_args *)arg;
700 
701 	count = min(kap->ka->nchanges - kap->pchanges, max);
702 	error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
703 	if (error == 0) {
704 		kap->ka->changelist += count;
705 		kap->pchanges += count;
706 		*events = count;
707 	}
708 
709 	return (error);
710 }
711 
712 /*
713  * MPSAFE
714  */
715 int
716 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
717 	    k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
718 	    struct timespec *tsp_in)
719 {
720 	struct kevent *kevp;
721 	struct timespec *tsp, ats;
722 	int i, n, total, error, nerrors = 0;
723 	int lres;
724 	int limit = kq_checkloop;
725 	struct kevent kev[KQ_NEVENTS];
726 	struct knote marker;
727 	struct lwkt_token *tok;
728 
729 	if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
730 		atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
731 
732 	tsp = tsp_in;
733 	*res = 0;
734 
735 	for (;;) {
736 		n = 0;
737 		error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
738 		if (error)
739 			return error;
740 		if (n == 0)
741 			break;
742 		for (i = 0; i < n; i++) {
743 			kevp = &kev[i];
744 			kevp->flags &= ~EV_SYSFLAGS;
745 			error = kqueue_register(kq, kevp);
746 
747 			/*
748 			 * If a registration returns an error we
749 			 * immediately post the error.  The kevent()
750 			 * call itself will fail with the error if
751 			 * no space is available for posting.
752 			 *
753 			 * Such errors normally bypass the timeout/blocking
754 			 * code.  However, if the copyoutfn function refuses
755 			 * to post the error (see sys_poll()), then we
756 			 * ignore it too.
757 			 */
758 			if (error || (kevp->flags & EV_RECEIPT)) {
759 				kevp->flags = EV_ERROR;
760 				kevp->data = error;
761 				lres = *res;
762 				kevent_copyoutfn(uap, kevp, 1, res);
763 				if (*res < 0) {
764 					return error;
765 				} else if (lres != *res) {
766 					nevents--;
767 					nerrors++;
768 				}
769 			}
770 		}
771 	}
772 	if (nerrors)
773 		return 0;
774 
775 	/*
776 	 * Acquire/wait for events - setup timeout
777 	 */
778 	if (tsp != NULL) {
779 		if (tsp->tv_sec || tsp->tv_nsec) {
780 			getnanouptime(&ats);
781 			timespecadd(tsp, &ats);		/* tsp = target time */
782 		}
783 	}
784 
785 	/*
786 	 * Loop as required.
787 	 *
788 	 * Collect as many events as we can. Sleeping on successive
789 	 * loops is disabled if copyoutfn has incremented (*res).
790 	 *
791 	 * The loop stops if an error occurs, all events have been
792 	 * scanned (the marker has been reached), or fewer than the
793 	 * maximum number of events is found.
794 	 *
795 	 * The copyoutfn function does not have to increment (*res) in
796 	 * order for the loop to continue.
797 	 *
798 	 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
799 	 */
800 	total = 0;
801 	error = 0;
802 	marker.kn_filter = EVFILT_MARKER;
803 	marker.kn_status = KN_PROCESSING;
804 	tok = lwkt_token_pool_lookup(kq);
805 	lwkt_gettoken(tok);
806 	TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
807 	lwkt_reltoken(tok);
808 	while ((n = nevents - total) > 0) {
809 		if (n > KQ_NEVENTS)
810 			n = KQ_NEVENTS;
811 
812 		/*
813 		 * If no events are pending sleep until timeout (if any)
814 		 * or an event occurs.
815 		 *
816 		 * After the sleep completes the marker is moved to the
817 		 * end of the list, making any received events available
818 		 * to our scan.
819 		 */
820 		if (kq->kq_count == 0 && *res == 0) {
821 			int timeout;
822 
823 			if (tsp == NULL) {
824 				timeout = 0;
825 			} else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
826 				error = EWOULDBLOCK;
827 				break;
828 			} else {
829 				struct timespec atx = *tsp;
830 
831 				getnanouptime(&ats);
832 				timespecsub(&atx, &ats);
833 				if (atx.tv_sec < 0) {
834 					error = EWOULDBLOCK;
835 					break;
836 				} else {
837 					timeout = atx.tv_sec > 24 * 60 * 60 ?
838 					    24 * 60 * 60 * hz :
839 					    tstohz_high(&atx);
840 				}
841 			}
842 
843 			lwkt_gettoken(tok);
844 			if (kq->kq_count == 0) {
845 				kq->kq_sleep_cnt++;
846 				if (__predict_false(kq->kq_sleep_cnt == 0)) {
847 					/*
848 					 * Guard against possible wrapping.  And
849 					 * set it to 2, so that kqueue_wakeup()
850 					 * can wake everyone up.
851 					 */
852 					kq->kq_sleep_cnt = 2;
853 				}
854 				error = tsleep(kq, PCATCH, "kqread", timeout);
855 
856 				/* don't restart after signals... */
857 				if (error == ERESTART)
858 					error = EINTR;
859 				if (error) {
860 					lwkt_reltoken(tok);
861 					break;
862 				}
863 
864 				TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
865 				TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker,
866 				    kn_tqe);
867 			}
868 			lwkt_reltoken(tok);
869 		}
870 
871 		/*
872 		 * Process all received events
873 		 * Account for all non-spurious events in our total
874 		 */
875 		i = kqueue_scan(kq, kev, n, &marker);
876 		if (i) {
877 			lres = *res;
878 			error = kevent_copyoutfn(uap, kev, i, res);
879 			total += *res - lres;
880 			if (error)
881 				break;
882 		}
883 		if (limit && --limit == 0)
884 			panic("kqueue: checkloop failed i=%d", i);
885 
886 		/*
887 		 * Normally when fewer events are returned than requested
888 		 * we can stop.  However, if only spurious events were
889 		 * collected the copyout will not bump (*res) and we have
890 		 * to continue.
891 		 */
892 		if (i < n && *res)
893 			break;
894 
895 		/*
896 		 * Deal with an edge case where spurious events can cause
897 		 * a loop to occur without moving the marker.  This can
898 		 * prevent kqueue_scan() from picking up new events which
899 		 * race us.  We must be sure to move the marker for this
900 		 * case.
901 		 *
902 		 * NOTE: We do not want to move the marker if events
903 		 *	 were scanned because normal kqueue operations
904 		 *	 may reactivate events.  Moving the marker in
905 		 *	 that case could result in duplicates for the
906 		 *	 same event.
907 		 */
908 		if (i == 0) {
909 			lwkt_gettoken(tok);
910 			TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
911 			TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
912 			lwkt_reltoken(tok);
913 		}
914 	}
915 	lwkt_gettoken(tok);
916 	TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
917 	lwkt_reltoken(tok);
918 
919 	/* Timeouts do not return EWOULDBLOCK. */
920 	if (error == EWOULDBLOCK)
921 		error = 0;
922 	return error;
923 }
924 
925 /*
926  * MPALMOSTSAFE
927  */
928 int
929 sys_kevent(struct kevent_args *uap)
930 {
931 	struct thread *td = curthread;
932 	struct proc *p = td->td_proc;
933 	struct timespec ts, *tsp;
934 	struct kqueue *kq;
935 	struct file *fp = NULL;
936 	struct kevent_copyin_args *kap, ka;
937 	int error;
938 
939 	if (uap->timeout) {
940 		error = copyin(uap->timeout, &ts, sizeof(ts));
941 		if (error)
942 			return (error);
943 		tsp = &ts;
944 	} else {
945 		tsp = NULL;
946 	}
947 	fp = holdfp(p->p_fd, uap->fd, -1);
948 	if (fp == NULL)
949 		return (EBADF);
950 	if (fp->f_type != DTYPE_KQUEUE) {
951 		fdrop(fp);
952 		return (EBADF);
953 	}
954 
955 	kq = (struct kqueue *)fp->f_data;
956 
957 	kap = &ka;
958 	kap->ka = uap;
959 	kap->pchanges = 0;
960 
961 	error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
962 			    kevent_copyin, kevent_copyout, tsp);
963 
964 	fdrop(fp);
965 
966 	return (error);
967 }
968 
969 int
970 kqueue_register(struct kqueue *kq, struct kevent *kev)
971 {
972 	struct filedesc *fdp = kq->kq_fdp;
973 	struct klist *list = NULL;
974 	struct filterops *fops;
975 	struct file *fp = NULL;
976 	struct knote *kn = NULL;
977 	struct thread *td;
978 	int error = 0;
979 	struct knote_cache_list *cache_list;
980 
981 	if (kev->filter < 0) {
982 		if (kev->filter + EVFILT_SYSCOUNT < 0)
983 			return (EINVAL);
984 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
985 	} else {
986 		/*
987 		 * XXX
988 		 * filter attach routine is responsible for insuring that
989 		 * the identifier can be attached to it.
990 		 */
991 		return (EINVAL);
992 	}
993 
994 	if (fops->f_flags & FILTEROP_ISFD) {
995 		/* validate descriptor */
996 		fp = holdfp(fdp, kev->ident, -1);
997 		if (fp == NULL)
998 			return (EBADF);
999 	}
1000 
1001 	cache_list = &knote_cache_lists[mycpuid];
1002 	if (SLIST_EMPTY(&cache_list->knote_cache)) {
1003 		struct knote *new_kn;
1004 
1005 		new_kn = knote_alloc();
1006 		crit_enter();
1007 		SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link);
1008 		cache_list->knote_cache_cnt++;
1009 		crit_exit();
1010 	}
1011 
1012 	td = curthread;
1013 	lwkt_getpooltoken(kq);
1014 
1015 	/*
1016 	 * Make sure that only one thread can register event on this kqueue,
1017 	 * so that we would not suffer any race, even if the registration
1018 	 * blocked, i.e. kq token was released, and the kqueue was shared
1019 	 * between threads (this should be rare though).
1020 	 */
1021 	while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) {
1022 		kq->kq_state |= KQ_REGWAIT;
1023 		tsleep(&kq->kq_regtd, 0, "kqreg", 0);
1024 	}
1025 	if (__predict_false(kq->kq_regtd != NULL)) {
1026 		/* Recursive calling of kqueue_register() */
1027 		td = NULL;
1028 	} else {
1029 		/* Owner of the kq_regtd, i.e. td != NULL */
1030 		kq->kq_regtd = td;
1031 	}
1032 
1033 	if (fp != NULL) {
1034 		list = &fp->f_klist;
1035 	} else if (kq->kq_knhashmask) {
1036 		list = &kq->kq_knhash[
1037 		    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1038 	}
1039 	if (list != NULL) {
1040 		lwkt_getpooltoken(list);
1041 again:
1042 		SLIST_FOREACH(kn, list, kn_link) {
1043 			if (kn->kn_kq == kq &&
1044 			    kn->kn_filter == kev->filter &&
1045 			    kn->kn_id == kev->ident) {
1046 				if (knote_acquire(kn) == 0)
1047 					goto again;
1048 				break;
1049 			}
1050 		}
1051 		lwkt_relpooltoken(list);
1052 	}
1053 
1054 	/*
1055 	 * NOTE: At this point if kn is non-NULL we will have acquired
1056 	 *	 it and set KN_PROCESSING.
1057 	 */
1058 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
1059 		error = ENOENT;
1060 		goto done;
1061 	}
1062 
1063 	/*
1064 	 * kn now contains the matching knote, or NULL if no match
1065 	 */
1066 	if (kev->flags & EV_ADD) {
1067 		if (kn == NULL) {
1068 			crit_enter();
1069 			kn = SLIST_FIRST(&cache_list->knote_cache);
1070 			if (kn == NULL) {
1071 				crit_exit();
1072 				kn = knote_alloc();
1073 			} else {
1074 				SLIST_REMOVE_HEAD(&cache_list->knote_cache,
1075 				    kn_link);
1076 				cache_list->knote_cache_cnt--;
1077 				crit_exit();
1078 			}
1079 			kn->kn_fp = fp;
1080 			kn->kn_kq = kq;
1081 			kn->kn_fop = fops;
1082 
1083 			/*
1084 			 * apply reference count to knote structure, and
1085 			 * do not release it at the end of this routine.
1086 			 */
1087 			fp = NULL;
1088 
1089 			kn->kn_sfflags = kev->fflags;
1090 			kn->kn_sdata = kev->data;
1091 			kev->fflags = 0;
1092 			kev->data = 0;
1093 			kn->kn_kevent = *kev;
1094 
1095 			/*
1096 			 * KN_PROCESSING prevents the knote from getting
1097 			 * ripped out from under us while we are trying
1098 			 * to attach it, in case the attach blocks.
1099 			 */
1100 			kn->kn_status = KN_PROCESSING;
1101 			knote_attach(kn);
1102 			if ((error = filter_attach(kn)) != 0) {
1103 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1104 				knote_drop(kn);
1105 				goto done;
1106 			}
1107 
1108 			/*
1109 			 * Interlock against close races which either tried
1110 			 * to remove our knote while we were blocked or missed
1111 			 * it entirely prior to our attachment.  We do not
1112 			 * want to end up with a knote on a closed descriptor.
1113 			 */
1114 			if ((fops->f_flags & FILTEROP_ISFD) &&
1115 			    checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
1116 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1117 			}
1118 		} else {
1119 			/*
1120 			 * The user may change some filter values after the
1121 			 * initial EV_ADD, but doing so will not reset any
1122 			 * filter which have already been triggered.
1123 			 */
1124 			KKASSERT(kn->kn_status & KN_PROCESSING);
1125 			if (fops == &user_filtops) {
1126 				filt_usertouch(kn, kev, EVENT_REGISTER);
1127 			} else {
1128 				kn->kn_sfflags = kev->fflags;
1129 				kn->kn_sdata = kev->data;
1130 				kn->kn_kevent.udata = kev->udata;
1131 			}
1132 		}
1133 
1134 		/*
1135 		 * Execute the filter event to immediately activate the
1136 		 * knote if necessary.  If reprocessing events are pending
1137 		 * due to blocking above we do not run the filter here
1138 		 * but instead let knote_release() do it.  Otherwise we
1139 		 * might run the filter on a deleted event.
1140 		 */
1141 		if ((kn->kn_status & KN_REPROCESS) == 0) {
1142 			if (filter_event(kn, 0))
1143 				KNOTE_ACTIVATE(kn);
1144 		}
1145 	} else if (kev->flags & EV_DELETE) {
1146 		/*
1147 		 * Delete the existing knote
1148 		 */
1149 		knote_detach_and_drop(kn);
1150 		goto done;
1151 	} else {
1152 		/*
1153 		 * Modify an existing event.
1154 		 *
1155 		 * The user may change some filter values after the
1156 		 * initial EV_ADD, but doing so will not reset any
1157 		 * filter which have already been triggered.
1158 		 */
1159 		KKASSERT(kn->kn_status & KN_PROCESSING);
1160 		if (fops == &user_filtops) {
1161 			filt_usertouch(kn, kev, EVENT_REGISTER);
1162 		} else {
1163 			kn->kn_sfflags = kev->fflags;
1164 			kn->kn_sdata = kev->data;
1165 			kn->kn_kevent.udata = kev->udata;
1166 		}
1167 
1168 		/*
1169 		 * Execute the filter event to immediately activate the
1170 		 * knote if necessary.  If reprocessing events are pending
1171 		 * due to blocking above we do not run the filter here
1172 		 * but instead let knote_release() do it.  Otherwise we
1173 		 * might run the filter on a deleted event.
1174 		 */
1175 		if ((kn->kn_status & KN_REPROCESS) == 0) {
1176 			if (filter_event(kn, 0))
1177 				KNOTE_ACTIVATE(kn);
1178 		}
1179 	}
1180 
1181 	/*
1182 	 * Disablement does not deactivate a knote here.
1183 	 */
1184 	if ((kev->flags & EV_DISABLE) &&
1185 	    ((kn->kn_status & KN_DISABLED) == 0)) {
1186 		kn->kn_status |= KN_DISABLED;
1187 	}
1188 
1189 	/*
1190 	 * Re-enablement may have to immediately enqueue an active knote.
1191 	 */
1192 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1193 		kn->kn_status &= ~KN_DISABLED;
1194 		if ((kn->kn_status & KN_ACTIVE) &&
1195 		    ((kn->kn_status & KN_QUEUED) == 0)) {
1196 			knote_enqueue(kn);
1197 		}
1198 	}
1199 
1200 	/*
1201 	 * Handle any required reprocessing
1202 	 */
1203 	knote_release(kn);
1204 	/* kn may be invalid now */
1205 
1206 done:
1207 	if (td != NULL) { /* Owner of the kq_regtd */
1208 		kq->kq_regtd = NULL;
1209 		if (__predict_false(kq->kq_state & KQ_REGWAIT)) {
1210 			kq->kq_state &= ~KQ_REGWAIT;
1211 			wakeup(&kq->kq_regtd);
1212 		}
1213 	}
1214 	lwkt_relpooltoken(kq);
1215 	if (fp != NULL)
1216 		fdrop(fp);
1217 	return (error);
1218 }
1219 
1220 /*
1221  * Scan the kqueue, return the number of active events placed in kevp up
1222  * to count.
1223  *
1224  * Continuous mode events may get recycled, do not continue scanning past
1225  * marker unless no events have been collected.
1226  */
1227 static int
1228 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1229             struct knote *marker)
1230 {
1231         struct knote *kn, local_marker;
1232         int total;
1233 
1234 	total = 0;
1235 	local_marker.kn_filter = EVFILT_MARKER;
1236 	local_marker.kn_status = KN_PROCESSING;
1237 
1238 	lwkt_getpooltoken(kq);
1239 
1240 	/*
1241 	 * Collect events.
1242 	 */
1243 	TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1244 	while (count) {
1245 		kn = TAILQ_NEXT(&local_marker, kn_tqe);
1246 		if (kn->kn_filter == EVFILT_MARKER) {
1247 			/* Marker reached, we are done */
1248 			if (kn == marker)
1249 				break;
1250 
1251 			/* Move local marker past some other threads marker */
1252 			kn = TAILQ_NEXT(kn, kn_tqe);
1253 			TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1254 			TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1255 			continue;
1256 		}
1257 
1258 		/*
1259 		 * We can't skip a knote undergoing processing, otherwise
1260 		 * we risk not returning it when the user process expects
1261 		 * it should be returned.  Sleep and retry.
1262 		 */
1263 		if (knote_acquire(kn) == 0)
1264 			continue;
1265 
1266 		/*
1267 		 * Remove the event for processing.
1268 		 *
1269 		 * WARNING!  We must leave KN_QUEUED set to prevent the
1270 		 *	     event from being KNOTE_ACTIVATE()d while
1271 		 *	     the queue state is in limbo, in case we
1272 		 *	     block.
1273 		 */
1274 		TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1275 		kq->kq_count--;
1276 
1277 		/*
1278 		 * We have to deal with an extremely important race against
1279 		 * file descriptor close()s here.  The file descriptor can
1280 		 * disappear MPSAFE, and there is a small window of
1281 		 * opportunity between that and the call to knote_fdclose().
1282 		 *
1283 		 * If we hit that window here while doselect or dopoll is
1284 		 * trying to delete a spurious event they will not be able
1285 		 * to match up the event against a knote and will go haywire.
1286 		 */
1287 		if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1288 		    checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1289 			kn->kn_status |= KN_DELETING | KN_REPROCESS;
1290 		}
1291 
1292 		if (kn->kn_status & KN_DISABLED) {
1293 			/*
1294 			 * If disabled we ensure the event is not queued
1295 			 * but leave its active bit set.  On re-enablement
1296 			 * the event may be immediately triggered.
1297 			 */
1298 			kn->kn_status &= ~KN_QUEUED;
1299 		} else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1300 			   (kn->kn_status & KN_DELETING) == 0 &&
1301 			   filter_event(kn, 0) == 0) {
1302 			/*
1303 			 * If not running in one-shot mode and the event
1304 			 * is no longer present we ensure it is removed
1305 			 * from the queue and ignore it.
1306 			 */
1307 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1308 		} else {
1309 			/*
1310 			 * Post the event
1311 			 */
1312 			if (kn->kn_fop == &user_filtops)
1313 				filt_usertouch(kn, kevp, EVENT_PROCESS);
1314 			else
1315 				*kevp = kn->kn_kevent;
1316 			++kevp;
1317 			++total;
1318 			--count;
1319 
1320 			if (kn->kn_flags & EV_ONESHOT) {
1321 				kn->kn_status &= ~KN_QUEUED;
1322 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1323 			} else {
1324 				if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1325 					if (kn->kn_flags & EV_CLEAR) {
1326 						kn->kn_data = 0;
1327 						kn->kn_fflags = 0;
1328 					}
1329 					if (kn->kn_flags & EV_DISPATCH) {
1330 						kn->kn_status |= KN_DISABLED;
1331 					}
1332 					kn->kn_status &= ~(KN_QUEUED |
1333 							   KN_ACTIVE);
1334 				} else {
1335 					TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1336 					kq->kq_count++;
1337 				}
1338 			}
1339 		}
1340 
1341 		/*
1342 		 * Handle any post-processing states
1343 		 */
1344 		knote_release(kn);
1345 	}
1346 	TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1347 
1348 	lwkt_relpooltoken(kq);
1349 	return (total);
1350 }
1351 
1352 /*
1353  * XXX
1354  * This could be expanded to call kqueue_scan, if desired.
1355  *
1356  * MPSAFE
1357  */
1358 static int
1359 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1360 {
1361 	return (ENXIO);
1362 }
1363 
1364 /*
1365  * MPSAFE
1366  */
1367 static int
1368 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1369 {
1370 	return (ENXIO);
1371 }
1372 
1373 /*
1374  * MPALMOSTSAFE
1375  */
1376 static int
1377 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1378 	     struct ucred *cred, struct sysmsg *msg)
1379 {
1380 	struct kqueue *kq;
1381 	int error;
1382 
1383 	kq = (struct kqueue *)fp->f_data;
1384 	lwkt_getpooltoken(kq);
1385 	switch(com) {
1386 	case FIOASYNC:
1387 		if (*(int *)data)
1388 			kq->kq_state |= KQ_ASYNC;
1389 		else
1390 			kq->kq_state &= ~KQ_ASYNC;
1391 		error = 0;
1392 		break;
1393 	case FIOSETOWN:
1394 		error = fsetown(*(int *)data, &kq->kq_sigio);
1395 		break;
1396 	default:
1397 		error = ENOTTY;
1398 		break;
1399 	}
1400 	lwkt_relpooltoken(kq);
1401 	return (error);
1402 }
1403 
1404 /*
1405  * MPSAFE
1406  */
1407 static int
1408 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1409 {
1410 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1411 
1412 	bzero((void *)st, sizeof(*st));
1413 	st->st_size = kq->kq_count;
1414 	st->st_blksize = sizeof(struct kevent);
1415 	st->st_mode = S_IFIFO;
1416 	return (0);
1417 }
1418 
1419 /*
1420  * MPSAFE
1421  */
1422 static int
1423 kqueue_close(struct file *fp)
1424 {
1425 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1426 
1427 	kqueue_terminate(kq);
1428 
1429 	fp->f_data = NULL;
1430 	funsetown(&kq->kq_sigio);
1431 
1432 	kfree(kq, M_KQUEUE);
1433 	return (0);
1434 }
1435 
1436 static void
1437 kqueue_wakeup(struct kqueue *kq)
1438 {
1439 	if (kq->kq_sleep_cnt) {
1440 		u_int sleep_cnt = kq->kq_sleep_cnt;
1441 
1442 		kq->kq_sleep_cnt = 0;
1443 		if (sleep_cnt == 1)
1444 			wakeup_one(kq);
1445 		else
1446 			wakeup(kq);
1447 	}
1448 	KNOTE(&kq->kq_kqinfo.ki_note, 0);
1449 }
1450 
1451 /*
1452  * Calls filterops f_attach function, acquiring mplock if filter is not
1453  * marked as FILTEROP_MPSAFE.
1454  *
1455  * Caller must be holding the related kq token
1456  */
1457 static int
1458 filter_attach(struct knote *kn)
1459 {
1460 	int ret;
1461 
1462 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1463 		ret = kn->kn_fop->f_attach(kn);
1464 	} else {
1465 		get_mplock();
1466 		ret = kn->kn_fop->f_attach(kn);
1467 		rel_mplock();
1468 	}
1469 	return (ret);
1470 }
1471 
1472 /*
1473  * Detach the knote and drop it, destroying the knote.
1474  *
1475  * Calls filterops f_detach function, acquiring mplock if filter is not
1476  * marked as FILTEROP_MPSAFE.
1477  *
1478  * Caller must be holding the related kq token
1479  */
1480 static void
1481 knote_detach_and_drop(struct knote *kn)
1482 {
1483 	kn->kn_status |= KN_DELETING | KN_REPROCESS;
1484 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1485 		kn->kn_fop->f_detach(kn);
1486 	} else {
1487 		get_mplock();
1488 		kn->kn_fop->f_detach(kn);
1489 		rel_mplock();
1490 	}
1491 	knote_drop(kn);
1492 }
1493 
1494 /*
1495  * Calls filterops f_event function, acquiring mplock if filter is not
1496  * marked as FILTEROP_MPSAFE.
1497  *
1498  * If the knote is in the middle of being created or deleted we cannot
1499  * safely call the filter op.
1500  *
1501  * Caller must be holding the related kq token
1502  */
1503 static int
1504 filter_event(struct knote *kn, long hint)
1505 {
1506 	int ret;
1507 
1508 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1509 		ret = kn->kn_fop->f_event(kn, hint);
1510 	} else {
1511 		get_mplock();
1512 		ret = kn->kn_fop->f_event(kn, hint);
1513 		rel_mplock();
1514 	}
1515 	return (ret);
1516 }
1517 
1518 /*
1519  * Walk down a list of knotes, activating them if their event has triggered.
1520  *
1521  * If we encounter any knotes which are undergoing processing we just mark
1522  * them for reprocessing and do not try to [re]activate the knote.  However,
1523  * if a hint is being passed we have to wait and that makes things a bit
1524  * sticky.
1525  */
1526 void
1527 knote(struct klist *list, long hint)
1528 {
1529 	struct kqueue *kq;
1530 	struct knote *kn;
1531 	struct knote *kntmp;
1532 
1533 	lwkt_getpooltoken(list);
1534 restart:
1535 	SLIST_FOREACH(kn, list, kn_next) {
1536 		kq = kn->kn_kq;
1537 		lwkt_getpooltoken(kq);
1538 
1539 		/* temporary verification hack */
1540 		SLIST_FOREACH(kntmp, list, kn_next) {
1541 			if (kn == kntmp)
1542 				break;
1543 		}
1544 		if (kn != kntmp || kn->kn_kq != kq) {
1545 			lwkt_relpooltoken(kq);
1546 			goto restart;
1547 		}
1548 
1549 		if (kn->kn_status & KN_PROCESSING) {
1550 			/*
1551 			 * Someone else is processing the knote, ask the
1552 			 * other thread to reprocess it and don't mess
1553 			 * with it otherwise.
1554 			 */
1555 			if (hint == 0) {
1556 				kn->kn_status |= KN_REPROCESS;
1557 				lwkt_relpooltoken(kq);
1558 				continue;
1559 			}
1560 
1561 			/*
1562 			 * If the hint is non-zero we have to wait or risk
1563 			 * losing the state the caller is trying to update.
1564 			 *
1565 			 * XXX This is a real problem, certain process
1566 			 *     and signal filters will bump kn_data for
1567 			 *     already-processed notes more than once if
1568 			 *     we restart the list scan.  FIXME.
1569 			 */
1570 			kn->kn_status |= KN_WAITING | KN_REPROCESS;
1571 			tsleep(kn, 0, "knotec", hz);
1572 			lwkt_relpooltoken(kq);
1573 			goto restart;
1574 		}
1575 
1576 		/*
1577 		 * Become the reprocessing master ourselves.
1578 		 *
1579 		 * If hint is non-zero running the event is mandatory
1580 		 * when not deleting so do it whether reprocessing is
1581 		 * set or not.
1582 		 */
1583 		kn->kn_status |= KN_PROCESSING;
1584 		if ((kn->kn_status & KN_DELETING) == 0) {
1585 			if (filter_event(kn, hint))
1586 				KNOTE_ACTIVATE(kn);
1587 		}
1588 		if (knote_release(kn)) {
1589 			lwkt_relpooltoken(kq);
1590 			goto restart;
1591 		}
1592 		lwkt_relpooltoken(kq);
1593 	}
1594 	lwkt_relpooltoken(list);
1595 }
1596 
1597 /*
1598  * Insert knote at head of klist.
1599  *
1600  * This function may only be called via a filter function and thus
1601  * kq_token should already be held and marked for processing.
1602  */
1603 void
1604 knote_insert(struct klist *klist, struct knote *kn)
1605 {
1606 	lwkt_getpooltoken(klist);
1607 	KKASSERT(kn->kn_status & KN_PROCESSING);
1608 	SLIST_INSERT_HEAD(klist, kn, kn_next);
1609 	lwkt_relpooltoken(klist);
1610 }
1611 
1612 /*
1613  * Remove knote from a klist
1614  *
1615  * This function may only be called via a filter function and thus
1616  * kq_token should already be held and marked for processing.
1617  */
1618 void
1619 knote_remove(struct klist *klist, struct knote *kn)
1620 {
1621 	lwkt_getpooltoken(klist);
1622 	KKASSERT(kn->kn_status & KN_PROCESSING);
1623 	SLIST_REMOVE(klist, kn, knote, kn_next);
1624 	lwkt_relpooltoken(klist);
1625 }
1626 
1627 void
1628 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1629 		    struct filterops *ops, void *hook)
1630 {
1631 	struct kqueue *kq;
1632 	struct knote *kn;
1633 
1634 	lwkt_getpooltoken(&src->ki_note);
1635 	lwkt_getpooltoken(&dst->ki_note);
1636 	while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1637 		kq = kn->kn_kq;
1638 		lwkt_getpooltoken(kq);
1639 		if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) {
1640 			lwkt_relpooltoken(kq);
1641 			continue;
1642 		}
1643 		if (knote_acquire(kn)) {
1644 			knote_remove(&src->ki_note, kn);
1645 			kn->kn_fop = ops;
1646 			kn->kn_hook = hook;
1647 			knote_insert(&dst->ki_note, kn);
1648 			knote_release(kn);
1649 			/* kn may be invalid now */
1650 		}
1651 		lwkt_relpooltoken(kq);
1652 	}
1653 	lwkt_relpooltoken(&dst->ki_note);
1654 	lwkt_relpooltoken(&src->ki_note);
1655 }
1656 
1657 /*
1658  * Remove all knotes referencing a specified fd
1659  */
1660 void
1661 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1662 {
1663 	struct kqueue *kq;
1664 	struct knote *kn;
1665 	struct knote *kntmp;
1666 
1667 	lwkt_getpooltoken(&fp->f_klist);
1668 restart:
1669 	SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1670 		if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1671 			kq = kn->kn_kq;
1672 			lwkt_getpooltoken(kq);
1673 
1674 			/* temporary verification hack */
1675 			SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1676 				if (kn == kntmp)
1677 					break;
1678 			}
1679 			if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1680 			    kn->kn_id != fd || kn->kn_kq != kq) {
1681 				lwkt_relpooltoken(kq);
1682 				goto restart;
1683 			}
1684 			if (knote_acquire(kn))
1685 				knote_detach_and_drop(kn);
1686 			lwkt_relpooltoken(kq);
1687 			goto restart;
1688 		}
1689 	}
1690 	lwkt_relpooltoken(&fp->f_klist);
1691 }
1692 
1693 /*
1694  * Low level attach function.
1695  *
1696  * The knote should already be marked for processing.
1697  * Caller must hold the related kq token.
1698  */
1699 static void
1700 knote_attach(struct knote *kn)
1701 {
1702 	struct klist *list;
1703 	struct kqueue *kq = kn->kn_kq;
1704 
1705 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1706 		KKASSERT(kn->kn_fp);
1707 		list = &kn->kn_fp->f_klist;
1708 	} else {
1709 		if (kq->kq_knhashmask == 0)
1710 			kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1711 						 &kq->kq_knhashmask);
1712 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1713 	}
1714 	lwkt_getpooltoken(list);
1715 	SLIST_INSERT_HEAD(list, kn, kn_link);
1716 	lwkt_relpooltoken(list);
1717 	TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1718 }
1719 
1720 /*
1721  * Low level drop function.
1722  *
1723  * The knote should already be marked for processing.
1724  * Caller must hold the related kq token.
1725  */
1726 static void
1727 knote_drop(struct knote *kn)
1728 {
1729 	struct kqueue *kq;
1730 	struct klist *list;
1731 
1732 	kq = kn->kn_kq;
1733 
1734 	if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1735 		list = &kn->kn_fp->f_klist;
1736 	else
1737 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1738 
1739 	lwkt_getpooltoken(list);
1740 	SLIST_REMOVE(list, kn, knote, kn_link);
1741 	lwkt_relpooltoken(list);
1742 	TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1743 	if (kn->kn_status & KN_QUEUED)
1744 		knote_dequeue(kn);
1745 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1746 		fdrop(kn->kn_fp);
1747 		kn->kn_fp = NULL;
1748 	}
1749 	knote_free(kn);
1750 }
1751 
1752 /*
1753  * Low level enqueue function.
1754  *
1755  * The knote should already be marked for processing.
1756  * Caller must be holding the kq token
1757  */
1758 static void
1759 knote_enqueue(struct knote *kn)
1760 {
1761 	struct kqueue *kq = kn->kn_kq;
1762 
1763 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1764 	TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1765 	kn->kn_status |= KN_QUEUED;
1766 	++kq->kq_count;
1767 
1768 	/*
1769 	 * Send SIGIO on request (typically set up as a mailbox signal)
1770 	 */
1771 	if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1772 		pgsigio(kq->kq_sigio, SIGIO, 0);
1773 
1774 	kqueue_wakeup(kq);
1775 }
1776 
1777 /*
1778  * Low level dequeue function.
1779  *
1780  * The knote should already be marked for processing.
1781  * Caller must be holding the kq token
1782  */
1783 static void
1784 knote_dequeue(struct knote *kn)
1785 {
1786 	struct kqueue *kq = kn->kn_kq;
1787 
1788 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1789 	TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1790 	kn->kn_status &= ~KN_QUEUED;
1791 	kq->kq_count--;
1792 }
1793 
1794 static struct knote *
1795 knote_alloc(void)
1796 {
1797 	return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1798 }
1799 
1800 static void
1801 knote_free(struct knote *kn)
1802 {
1803 	struct knote_cache_list *cache_list;
1804 
1805 	cache_list = &knote_cache_lists[mycpuid];
1806 	if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) {
1807 		crit_enter();
1808 		SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link);
1809 		cache_list->knote_cache_cnt++;
1810 		crit_exit();
1811 		return;
1812 	}
1813 	kfree(kn, M_KQUEUE);
1814 }
1815