xref: /dragonfly/sys/kern/kern_event.c (revision 9f47dde1)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
35 #include <sys/file.h>
36 #include <sys/lock.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
48 #include <sys/uio.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
51 #include <sys/ktr.h>
52 #include <sys/spinlock.h>
53 
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
57 #include <sys/spinlock2.h>
58 
59 #define EVENT_REGISTER	1
60 #define EVENT_PROCESS	2
61 
62 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
63 
64 struct kevent_copyin_args {
65 	struct kevent_args	*ka;
66 	int			pchanges;
67 };
68 
69 #define KNOTE_CACHE_MAX		8
70 
71 struct knote_cache_list {
72 	struct klist		knote_cache;
73 	int			knote_cache_cnt;
74 } __cachealign;
75 
76 static int	kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
77 		    struct knote *marker, int closedcounter);
78 static int 	kqueue_read(struct file *fp, struct uio *uio,
79 		    struct ucred *cred, int flags);
80 static int	kqueue_write(struct file *fp, struct uio *uio,
81 		    struct ucred *cred, int flags);
82 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
83 		    struct ucred *cred, struct sysmsg *msg);
84 static int 	kqueue_kqfilter(struct file *fp, struct knote *kn);
85 static int 	kqueue_stat(struct file *fp, struct stat *st,
86 		    struct ucred *cred);
87 static int 	kqueue_close(struct file *fp);
88 static void	kqueue_wakeup(struct kqueue *kq);
89 static int	filter_attach(struct knote *kn);
90 static int	filter_event(struct knote *kn, long hint);
91 
92 /*
93  * MPSAFE
94  */
95 static struct fileops kqueueops = {
96 	.fo_read = kqueue_read,
97 	.fo_write = kqueue_write,
98 	.fo_ioctl = kqueue_ioctl,
99 	.fo_kqfilter = kqueue_kqfilter,
100 	.fo_stat = kqueue_stat,
101 	.fo_close = kqueue_close,
102 	.fo_shutdown = nofo_shutdown
103 };
104 
105 static void 	knote_attach(struct knote *kn);
106 static void 	knote_drop(struct knote *kn);
107 static void	knote_detach_and_drop(struct knote *kn);
108 static void 	knote_enqueue(struct knote *kn);
109 static void 	knote_dequeue(struct knote *kn);
110 static struct 	knote *knote_alloc(void);
111 static void 	knote_free(struct knote *kn);
112 
113 static void	precise_sleep_intr(systimer_t info, int in_ipi,
114 				   struct intrframe *frame);
115 static int	precise_sleep(void *ident, int flags, const char *wmesg,
116 			      int us);
117 
118 static void	filt_kqdetach(struct knote *kn);
119 static int	filt_kqueue(struct knote *kn, long hint);
120 static int	filt_procattach(struct knote *kn);
121 static void	filt_procdetach(struct knote *kn);
122 static int	filt_proc(struct knote *kn, long hint);
123 static int	filt_fileattach(struct knote *kn);
124 static void	filt_timerexpire(void *knx);
125 static int	filt_timerattach(struct knote *kn);
126 static void	filt_timerdetach(struct knote *kn);
127 static int	filt_timer(struct knote *kn, long hint);
128 static int	filt_userattach(struct knote *kn);
129 static void	filt_userdetach(struct knote *kn);
130 static int	filt_user(struct knote *kn, long hint);
131 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
132 				u_long type);
133 static int	filt_fsattach(struct knote *kn);
134 static void	filt_fsdetach(struct knote *kn);
135 static int	filt_fs(struct knote *kn, long hint);
136 
137 static struct filterops file_filtops =
138 	{ FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL };
139 static struct filterops kqread_filtops =
140 	{ FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue };
141 static struct filterops proc_filtops =
142 	{ FILTEROP_MPSAFE, filt_procattach, filt_procdetach, filt_proc };
143 static struct filterops timer_filtops =
144 	{ FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer };
145 static struct filterops user_filtops =
146 	{ FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user };
147 static struct filterops fs_filtops =
148 	{ FILTEROP_MPSAFE, filt_fsattach, filt_fsdetach, filt_fs };
149 
150 static int 		kq_ncallouts = 0;
151 static int 		kq_calloutmax = 65536;
152 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
153     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
154 static int		kq_checkloop = 1000000;
155 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
156     &kq_checkloop, 0, "Maximum number of loops for kqueue scan");
157 static int		kq_sleep_threshold = 20000;
158 SYSCTL_INT(_kern, OID_AUTO, kq_sleep_threshold, CTLFLAG_RW,
159     &kq_sleep_threshold, 0, "Minimum sleep duration without busy-looping");
160 
161 #define KNOTE_ACTIVATE(kn) do { 					\
162 	kn->kn_status |= KN_ACTIVE;					\
163 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
164 		knote_enqueue(kn);					\
165 } while(0)
166 
167 #define	KN_HASHSIZE		64		/* XXX should be tunable */
168 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
169 
170 extern struct filterops aio_filtops;
171 extern struct filterops sig_filtops;
172 
173 /*
174  * Table for for all system-defined filters.
175  */
176 static struct filterops *sysfilt_ops[] = {
177 	&file_filtops,			/* EVFILT_READ */
178 	&file_filtops,			/* EVFILT_WRITE */
179 	&aio_filtops,			/* EVFILT_AIO */
180 	&file_filtops,			/* EVFILT_VNODE */
181 	&proc_filtops,			/* EVFILT_PROC */
182 	&sig_filtops,			/* EVFILT_SIGNAL */
183 	&timer_filtops,			/* EVFILT_TIMER */
184 	&file_filtops,			/* EVFILT_EXCEPT */
185 	&user_filtops,			/* EVFILT_USER */
186 	&fs_filtops,			/* EVFILT_FS */
187 };
188 
189 static struct knote_cache_list	knote_cache_lists[MAXCPU];
190 
191 /*
192  * Acquire a knote, return non-zero on success, 0 on failure.
193  *
194  * If we cannot acquire the knote we sleep and return 0.  The knote
195  * may be stale on return in this case and the caller must restart
196  * whatever loop they are in.
197  *
198  * Related kq token must be held.
199  */
200 static __inline int
201 knote_acquire(struct knote *kn)
202 {
203 	if (kn->kn_status & KN_PROCESSING) {
204 		kn->kn_status |= KN_WAITING | KN_REPROCESS;
205 		tsleep(kn, 0, "kqepts", hz);
206 		/* knote may be stale now */
207 		return(0);
208 	}
209 	kn->kn_status |= KN_PROCESSING;
210 	return(1);
211 }
212 
213 /*
214  * Release an acquired knote, clearing KN_PROCESSING and handling any
215  * KN_REPROCESS events.
216  *
217  * Caller must be holding the related kq token
218  *
219  * Non-zero is returned if the knote is destroyed or detached.
220  */
221 static __inline int
222 knote_release(struct knote *kn)
223 {
224 	int ret;
225 
226 	while (kn->kn_status & KN_REPROCESS) {
227 		kn->kn_status &= ~KN_REPROCESS;
228 		if (kn->kn_status & KN_WAITING) {
229 			kn->kn_status &= ~KN_WAITING;
230 			wakeup(kn);
231 		}
232 		if (kn->kn_status & KN_DELETING) {
233 			knote_detach_and_drop(kn);
234 			return(1);
235 			/* NOT REACHED */
236 		}
237 		if (filter_event(kn, 0))
238 			KNOTE_ACTIVATE(kn);
239 	}
240 	if (kn->kn_status & KN_DETACHED)
241 		ret = 1;
242 	else
243 		ret = 0;
244 	kn->kn_status &= ~KN_PROCESSING;
245 	/* kn should not be accessed anymore */
246 	return ret;
247 }
248 
249 static int
250 filt_fileattach(struct knote *kn)
251 {
252 	return (fo_kqfilter(kn->kn_fp, kn));
253 }
254 
255 /*
256  * MPSAFE
257  */
258 static int
259 kqueue_kqfilter(struct file *fp, struct knote *kn)
260 {
261 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
262 
263 	if (kn->kn_filter != EVFILT_READ)
264 		return (EOPNOTSUPP);
265 
266 	kn->kn_fop = &kqread_filtops;
267 	knote_insert(&kq->kq_kqinfo.ki_note, kn);
268 	return (0);
269 }
270 
271 static void
272 filt_kqdetach(struct knote *kn)
273 {
274 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
275 
276 	knote_remove(&kq->kq_kqinfo.ki_note, kn);
277 }
278 
279 /*ARGSUSED*/
280 static int
281 filt_kqueue(struct knote *kn, long hint)
282 {
283 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
284 
285 	kn->kn_data = kq->kq_count;
286 	return (kn->kn_data > 0);
287 }
288 
289 static int
290 filt_procattach(struct knote *kn)
291 {
292 	struct proc *p;
293 	int immediate;
294 
295 	immediate = 0;
296 	p = pfind(kn->kn_id);
297 	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
298 		p = zpfind(kn->kn_id);
299 		immediate = 1;
300 	}
301 	if (p == NULL) {
302 		return (ESRCH);
303 	}
304 	if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
305 		if (p)
306 			PRELE(p);
307 		return (EACCES);
308 	}
309 
310 	lwkt_gettoken(&p->p_token);
311 	kn->kn_ptr.p_proc = p;
312 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
313 
314 	/*
315 	 * internal flag indicating registration done by kernel
316 	 */
317 	if (kn->kn_flags & EV_FLAG1) {
318 		kn->kn_data = kn->kn_sdata;		/* ppid */
319 		kn->kn_fflags = NOTE_CHILD;
320 		kn->kn_flags &= ~EV_FLAG1;
321 	}
322 
323 	knote_insert(&p->p_klist, kn);
324 
325 	/*
326 	 * Immediately activate any exit notes if the target process is a
327 	 * zombie.  This is necessary to handle the case where the target
328 	 * process, e.g. a child, dies before the kevent is negistered.
329 	 */
330 	if (immediate && filt_proc(kn, NOTE_EXIT))
331 		KNOTE_ACTIVATE(kn);
332 	lwkt_reltoken(&p->p_token);
333 	PRELE(p);
334 
335 	return (0);
336 }
337 
338 /*
339  * The knote may be attached to a different process, which may exit,
340  * leaving nothing for the knote to be attached to.  So when the process
341  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
342  * it will be deleted when read out.  However, as part of the knote deletion,
343  * this routine is called, so a check is needed to avoid actually performing
344  * a detach, because the original process does not exist any more.
345  */
346 static void
347 filt_procdetach(struct knote *kn)
348 {
349 	struct proc *p;
350 
351 	if (kn->kn_status & KN_DETACHED)
352 		return;
353 	p = kn->kn_ptr.p_proc;
354 	knote_remove(&p->p_klist, kn);
355 }
356 
357 static int
358 filt_proc(struct knote *kn, long hint)
359 {
360 	u_int event;
361 
362 	/*
363 	 * mask off extra data
364 	 */
365 	event = (u_int)hint & NOTE_PCTRLMASK;
366 
367 	/*
368 	 * if the user is interested in this event, record it.
369 	 */
370 	if (kn->kn_sfflags & event)
371 		kn->kn_fflags |= event;
372 
373 	/*
374 	 * Process is gone, so flag the event as finished.  Detach the
375 	 * knote from the process now because the process will be poof,
376 	 * gone later on.
377 	 */
378 	if (event == NOTE_EXIT) {
379 		struct proc *p = kn->kn_ptr.p_proc;
380 		if ((kn->kn_status & KN_DETACHED) == 0) {
381 			PHOLD(p);
382 			knote_remove(&p->p_klist, kn);
383 			kn->kn_status |= KN_DETACHED;
384 			kn->kn_data = p->p_xstat;
385 			kn->kn_ptr.p_proc = NULL;
386 			PRELE(p);
387 		}
388 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
389 		return (1);
390 	}
391 
392 	/*
393 	 * process forked, and user wants to track the new process,
394 	 * so attach a new knote to it, and immediately report an
395 	 * event with the parent's pid.
396 	 */
397 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
398 		struct kevent kev;
399 		int error;
400 		int n;
401 
402 		/*
403 		 * register knote with new process.
404 		 */
405 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
406 		kev.filter = kn->kn_filter;
407 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
408 		kev.fflags = kn->kn_sfflags;
409 		kev.data = kn->kn_id;			/* parent */
410 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
411 		n = 1;
412 		error = kqueue_register(kn->kn_kq, &kev, &n);
413 		if (error)
414 			kn->kn_fflags |= NOTE_TRACKERR;
415 	}
416 
417 	return (kn->kn_fflags != 0);
418 }
419 
420 static void
421 filt_timerreset(struct knote *kn)
422 {
423 	struct callout *calloutp;
424 	struct timeval tv;
425 	int tticks;
426 
427 	tv.tv_sec = kn->kn_sdata / 1000;
428 	tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
429 	tticks = tvtohz_high(&tv);
430 	calloutp = (struct callout *)kn->kn_hook;
431 	callout_reset(calloutp, tticks, filt_timerexpire, kn);
432 }
433 
434 /*
435  * The callout interlocks with callout_stop() but can still
436  * race a deletion so if KN_DELETING is set we just don't touch
437  * the knote.
438  */
439 static void
440 filt_timerexpire(void *knx)
441 {
442 	struct knote *kn = knx;
443 	struct kqueue *kq = kn->kn_kq;
444 
445 	lwkt_getpooltoken(kq);
446 
447 	/*
448 	 * Open knote_acquire(), since we can't sleep in callout,
449 	 * however, we do need to record this expiration.
450 	 */
451 	kn->kn_data++;
452 	if (kn->kn_status & KN_PROCESSING) {
453 		kn->kn_status |= KN_REPROCESS;
454 		if ((kn->kn_status & KN_DELETING) == 0 &&
455 		    (kn->kn_flags & EV_ONESHOT) == 0)
456 			filt_timerreset(kn);
457 		lwkt_relpooltoken(kq);
458 		return;
459 	}
460 	KASSERT((kn->kn_status & KN_DELETING) == 0,
461 	    ("acquire a deleting knote %#x", kn->kn_status));
462 	kn->kn_status |= KN_PROCESSING;
463 
464 	KNOTE_ACTIVATE(kn);
465 	if ((kn->kn_flags & EV_ONESHOT) == 0)
466 		filt_timerreset(kn);
467 
468 	knote_release(kn);
469 
470 	lwkt_relpooltoken(kq);
471 }
472 
473 /*
474  * data contains amount of time to sleep, in milliseconds
475  */
476 static int
477 filt_timerattach(struct knote *kn)
478 {
479 	struct callout *calloutp;
480 	int prev_ncallouts;
481 
482 	prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1);
483 	if (prev_ncallouts >= kq_calloutmax) {
484 		atomic_subtract_int(&kq_ncallouts, 1);
485 		kn->kn_hook = NULL;
486 		return (ENOMEM);
487 	}
488 
489 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
490 	calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
491 	callout_init_mp(calloutp);
492 	kn->kn_hook = (caddr_t)calloutp;
493 
494 	filt_timerreset(kn);
495 	return (0);
496 }
497 
498 /*
499  * This function is called with the knote flagged locked but it is
500  * still possible to race a callout event due to the callback blocking.
501  *
502  * NOTE: Even though the note is locked via KN_PROCSESING, filt_timerexpire()
503  *	 can still race us requeue the callout due to potential token cycling
504  *	 from various blocking conditions.  If this situation arises,
505  *	 callout_stop_sync() will always return non-zero and we can simply
506  *	 retry the operation.
507  */
508 static void
509 filt_timerdetach(struct knote *kn)
510 {
511 	struct callout *calloutp;
512 
513 	calloutp = (struct callout *)kn->kn_hook;
514 	while (callout_stop_sync(calloutp)) {
515 		kprintf("debug: kqueue timer race fixed, pid %d %s\n",
516 			(curthread->td_proc ? curthread->td_proc->p_pid : 0),
517 			curthread->td_comm);
518 	}
519 	kn->kn_hook = NULL;
520 	kfree(calloutp, M_KQUEUE);
521 	atomic_subtract_int(&kq_ncallouts, 1);
522 }
523 
524 static int
525 filt_timer(struct knote *kn, long hint)
526 {
527 	return (kn->kn_data != 0);
528 }
529 
530 /*
531  * EVFILT_USER
532  */
533 static int
534 filt_userattach(struct knote *kn)
535 {
536 	u_int ffctrl;
537 
538 	kn->kn_hook = NULL;
539 	if (kn->kn_sfflags & NOTE_TRIGGER)
540 		kn->kn_ptr.hookid = 1;
541 	else
542 		kn->kn_ptr.hookid = 0;
543 
544 	ffctrl = kn->kn_sfflags & NOTE_FFCTRLMASK;
545 	kn->kn_sfflags &= NOTE_FFLAGSMASK;
546 	switch (ffctrl) {
547 	case NOTE_FFNOP:
548 		break;
549 
550 	case NOTE_FFAND:
551 		kn->kn_fflags &= kn->kn_sfflags;
552 		break;
553 
554 	case NOTE_FFOR:
555 		kn->kn_fflags |= kn->kn_sfflags;
556 		break;
557 
558 	case NOTE_FFCOPY:
559 		kn->kn_fflags = kn->kn_sfflags;
560 		break;
561 
562 	default:
563 		/* XXX Return error? */
564 		break;
565 	}
566 	/* We just happen to copy this value as well. Undocumented. */
567 	kn->kn_data = kn->kn_sdata;
568 
569 	return 0;
570 }
571 
572 static void
573 filt_userdetach(struct knote *kn)
574 {
575 	/* nothing to do */
576 }
577 
578 static int
579 filt_user(struct knote *kn, long hint)
580 {
581 	return (kn->kn_ptr.hookid);
582 }
583 
584 static void
585 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
586 {
587 	u_int ffctrl;
588 
589 	switch (type) {
590 	case EVENT_REGISTER:
591 		if (kev->fflags & NOTE_TRIGGER)
592 			kn->kn_ptr.hookid = 1;
593 
594 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
595 		kev->fflags &= NOTE_FFLAGSMASK;
596 		switch (ffctrl) {
597 		case NOTE_FFNOP:
598 			break;
599 
600 		case NOTE_FFAND:
601 			kn->kn_fflags &= kev->fflags;
602 			break;
603 
604 		case NOTE_FFOR:
605 			kn->kn_fflags |= kev->fflags;
606 			break;
607 
608 		case NOTE_FFCOPY:
609 			kn->kn_fflags = kev->fflags;
610 			break;
611 
612 		default:
613 			/* XXX Return error? */
614 			break;
615 		}
616 		/* We just happen to copy this value as well. Undocumented. */
617 		kn->kn_data = kev->data;
618 
619 		/*
620 		 * This is not the correct use of EV_CLEAR in an event
621 		 * modification, it should have been passed as a NOTE instead.
622 		 * But we need to maintain compatibility with Apple & FreeBSD.
623 		 *
624 		 * Note however that EV_CLEAR can still be used when doing
625 		 * the initial registration of the event and works as expected
626 		 * (clears the event on reception).
627 		 */
628 		if (kev->flags & EV_CLEAR) {
629 			kn->kn_ptr.hookid = 0;
630 			/*
631 			 * Clearing kn->kn_data is fine, since it gets set
632 			 * every time anyway. We just shouldn't clear
633 			 * kn->kn_fflags here, since that would limit the
634 			 * possible uses of this API. NOTE_FFAND or
635 			 * NOTE_FFCOPY should be used for explicitly clearing
636 			 * kn->kn_fflags.
637 			 */
638 			kn->kn_data = 0;
639 		}
640 		break;
641 
642         case EVENT_PROCESS:
643 		*kev = kn->kn_kevent;
644 		kev->fflags = kn->kn_fflags;
645 		kev->data = kn->kn_data;
646 		if (kn->kn_flags & EV_CLEAR) {
647 			kn->kn_ptr.hookid = 0;
648 			/* kn_data, kn_fflags handled by parent */
649 		}
650 		break;
651 
652 	default:
653 		panic("filt_usertouch() - invalid type (%ld)", type);
654 		break;
655 	}
656 }
657 
658 /*
659  * EVFILT_FS
660  */
661 struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist);
662 
663 static int
664 filt_fsattach(struct knote *kn)
665 {
666 	kn->kn_flags |= EV_CLEAR;
667 	knote_insert(&fs_klist, kn);
668 
669 	return (0);
670 }
671 
672 static void
673 filt_fsdetach(struct knote *kn)
674 {
675 	knote_remove(&fs_klist, kn);
676 }
677 
678 static int
679 filt_fs(struct knote *kn, long hint)
680 {
681 	kn->kn_fflags |= hint;
682 	return (kn->kn_fflags != 0);
683 }
684 
685 /*
686  * Initialize a kqueue.
687  *
688  * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
689  *
690  * MPSAFE
691  */
692 void
693 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
694 {
695 	TAILQ_INIT(&kq->kq_knpend);
696 	TAILQ_INIT(&kq->kq_knlist);
697 	kq->kq_count = 0;
698 	kq->kq_fdp = fdp;
699 	SLIST_INIT(&kq->kq_kqinfo.ki_note);
700 }
701 
702 /*
703  * Terminate a kqueue.  Freeing the actual kq itself is left up to the
704  * caller (it might be embedded in a lwp so we don't do it here).
705  *
706  * The kq's knlist must be completely eradicated so block on any
707  * processing races.
708  */
709 void
710 kqueue_terminate(struct kqueue *kq)
711 {
712 	struct knote *kn;
713 
714 	lwkt_getpooltoken(kq);
715 	while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
716 		if (knote_acquire(kn))
717 			knote_detach_and_drop(kn);
718 	}
719 	lwkt_relpooltoken(kq);
720 
721 	if (kq->kq_knhash) {
722 		hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
723 		kq->kq_knhash = NULL;
724 		kq->kq_knhashmask = 0;
725 	}
726 }
727 
728 /*
729  * MPSAFE
730  */
731 int
732 sys_kqueue(struct kqueue_args *uap)
733 {
734 	struct thread *td = curthread;
735 	struct kqueue *kq;
736 	struct file *fp;
737 	int fd, error;
738 
739 	error = falloc(td->td_lwp, &fp, &fd);
740 	if (error)
741 		return (error);
742 	fp->f_flag = FREAD | FWRITE;
743 	fp->f_type = DTYPE_KQUEUE;
744 	fp->f_ops = &kqueueops;
745 
746 	kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
747 	kqueue_init(kq, td->td_proc->p_fd);
748 	fp->f_data = kq;
749 
750 	fsetfd(kq->kq_fdp, fp, fd);
751 	uap->sysmsg_result = fd;
752 	fdrop(fp);
753 	return (error);
754 }
755 
756 /*
757  * Copy 'count' items into the destination list pointed to by uap->eventlist.
758  */
759 static int
760 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
761 {
762 	struct kevent_copyin_args *kap;
763 	int error;
764 
765 	kap = (struct kevent_copyin_args *)arg;
766 
767 	error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
768 	if (error == 0) {
769 		kap->ka->eventlist += count;
770 		*res += count;
771 	} else {
772 		*res = -1;
773 	}
774 
775 	return (error);
776 }
777 
778 /*
779  * Copy at most 'max' items from the list pointed to by kap->changelist,
780  * return number of items in 'events'.
781  */
782 static int
783 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
784 {
785 	struct kevent_copyin_args *kap;
786 	int error, count;
787 
788 	kap = (struct kevent_copyin_args *)arg;
789 
790 	count = min(kap->ka->nchanges - kap->pchanges, max);
791 	error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
792 	if (error == 0) {
793 		kap->ka->changelist += count;
794 		kap->pchanges += count;
795 		*events = count;
796 	}
797 
798 	return (error);
799 }
800 
801 /*
802  * MPSAFE
803  */
804 int
805 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
806 	    k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
807 	    struct timespec *tsp_in, int flags)
808 {
809 	struct kevent *kevp;
810 	struct timespec *tsp, ats;
811 	int i, n, total, error, nerrors = 0;
812 	int gobbled;
813 	int lres;
814 	int limit = kq_checkloop;
815 	int closedcounter;
816 	struct kevent kev[KQ_NEVENTS];
817 	struct knote marker;
818 	struct lwkt_token *tok;
819 
820 	if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
821 		atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
822 
823 	tsp = tsp_in;
824 	*res = 0;
825 
826 	closedcounter = kq->kq_fdp->fd_closedcounter;
827 
828 	for (;;) {
829 		n = 0;
830 		error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
831 		if (error)
832 			return error;
833 		if (n == 0)
834 			break;
835 		for (i = 0; i < n; ++i)
836 			kev[i].flags &= ~EV_SYSFLAGS;
837 		for (i = 0; i < n; ++i) {
838 			gobbled = n - i;
839 			error = kqueue_register(kq, &kev[i], &gobbled);
840 			i += gobbled - 1;
841 			kevp = &kev[i];
842 
843 			/*
844 			 * If a registration returns an error we
845 			 * immediately post the error.  The kevent()
846 			 * call itself will fail with the error if
847 			 * no space is available for posting.
848 			 *
849 			 * Such errors normally bypass the timeout/blocking
850 			 * code.  However, if the copyoutfn function refuses
851 			 * to post the error (see sys_poll()), then we
852 			 * ignore it too.
853 			 */
854 			if (error || (kevp->flags & EV_RECEIPT)) {
855 				kevp->flags = EV_ERROR;
856 				kevp->data = error;
857 				lres = *res;
858 				kevent_copyoutfn(uap, kevp, 1, res);
859 				if (*res < 0) {
860 					return error;
861 				} else if (lres != *res) {
862 					nevents--;
863 					nerrors++;
864 				}
865 			}
866 		}
867 	}
868 	if (nerrors)
869 		return 0;
870 
871 	/*
872 	 * Acquire/wait for events - setup timeout
873 	 */
874 	if (tsp != NULL) {
875 		if (tsp->tv_sec || tsp->tv_nsec) {
876 			getnanouptime(&ats);
877 			timespecadd(tsp, &ats);		/* tsp = target time */
878 		}
879 	}
880 
881 	/*
882 	 * Loop as required.
883 	 *
884 	 * Collect as many events as we can. Sleeping on successive
885 	 * loops is disabled if copyoutfn has incremented (*res).
886 	 *
887 	 * The loop stops if an error occurs, all events have been
888 	 * scanned (the marker has been reached), or fewer than the
889 	 * maximum number of events is found.
890 	 *
891 	 * The copyoutfn function does not have to increment (*res) in
892 	 * order for the loop to continue.
893 	 *
894 	 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
895 	 */
896 	total = 0;
897 	error = 0;
898 	marker.kn_filter = EVFILT_MARKER;
899 	marker.kn_status = KN_PROCESSING;
900 	tok = lwkt_token_pool_lookup(kq);
901 	lwkt_gettoken(tok);
902 	TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
903 	lwkt_reltoken(tok);
904 	while ((n = nevents - total) > 0) {
905 		if (n > KQ_NEVENTS)
906 			n = KQ_NEVENTS;
907 
908 		/*
909 		 * If no events are pending sleep until timeout (if any)
910 		 * or an event occurs.
911 		 *
912 		 * After the sleep completes the marker is moved to the
913 		 * end of the list, making any received events available
914 		 * to our scan.
915 		 */
916 		if (kq->kq_count == 0 && *res == 0) {
917 			int timeout, ustimeout = 0;
918 
919 			if (tsp == NULL) {
920 				timeout = 0;
921 			} else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
922 				error = EWOULDBLOCK;
923 				break;
924 			} else {
925 				struct timespec atx = *tsp;
926 
927 				getnanouptime(&ats);
928 				timespecsub(&atx, &ats);
929 				if (atx.tv_sec < 0) {
930 					error = EWOULDBLOCK;
931 					break;
932 				} else {
933 					timeout = atx.tv_sec > 24 * 60 * 60 ?
934 					    24 * 60 * 60 * hz :
935 					    tstohz_high(&atx);
936 				}
937 				if (flags & KEVENT_TIMEOUT_PRECISE &&
938 				    timeout != 0) {
939 					if (atx.tv_sec == 0 &&
940 					    atx.tv_nsec < kq_sleep_threshold) {
941 						DELAY(atx.tv_nsec / 1000);
942 						error = EWOULDBLOCK;
943 						break;
944 					} else if (atx.tv_sec < 2000) {
945 						ustimeout = atx.tv_sec *
946 						    1000000 + atx.tv_nsec/1000;
947 					} else {
948 						ustimeout = 2000000000;
949 					}
950 				}
951 			}
952 
953 			lwkt_gettoken(tok);
954 			if (kq->kq_count == 0) {
955 				kq->kq_sleep_cnt++;
956 				if (__predict_false(kq->kq_sleep_cnt == 0)) {
957 					/*
958 					 * Guard against possible wrapping.  And
959 					 * set it to 2, so that kqueue_wakeup()
960 					 * can wake everyone up.
961 					 */
962 					kq->kq_sleep_cnt = 2;
963 				}
964 				if ((flags & KEVENT_TIMEOUT_PRECISE) &&
965 				    timeout != 0) {
966 					error = precise_sleep(kq, PCATCH,
967 					    "kqread", ustimeout);
968 				} else {
969 					error = tsleep(kq, PCATCH, "kqread",
970 					    timeout);
971 				}
972 
973 				/* don't restart after signals... */
974 				if (error == ERESTART)
975 					error = EINTR;
976 				if (error) {
977 					lwkt_reltoken(tok);
978 					break;
979 				}
980 
981 				TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
982 				TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker,
983 				    kn_tqe);
984 			}
985 			lwkt_reltoken(tok);
986 		}
987 
988 		/*
989 		 * Process all received events
990 		 * Account for all non-spurious events in our total
991 		 */
992 		i = kqueue_scan(kq, kev, n, &marker, closedcounter);
993 		if (i) {
994 			lres = *res;
995 			error = kevent_copyoutfn(uap, kev, i, res);
996 			total += *res - lres;
997 			if (error)
998 				break;
999 		}
1000 		if (limit && --limit == 0)
1001 			panic("kqueue: checkloop failed i=%d", i);
1002 
1003 		/*
1004 		 * Normally when fewer events are returned than requested
1005 		 * we can stop.  However, if only spurious events were
1006 		 * collected the copyout will not bump (*res) and we have
1007 		 * to continue.
1008 		 */
1009 		if (i < n && *res)
1010 			break;
1011 
1012 		/*
1013 		 * Deal with an edge case where spurious events can cause
1014 		 * a loop to occur without moving the marker.  This can
1015 		 * prevent kqueue_scan() from picking up new events which
1016 		 * race us.  We must be sure to move the marker for this
1017 		 * case.
1018 		 *
1019 		 * NOTE: We do not want to move the marker if events
1020 		 *	 were scanned because normal kqueue operations
1021 		 *	 may reactivate events.  Moving the marker in
1022 		 *	 that case could result in duplicates for the
1023 		 *	 same event.
1024 		 */
1025 		if (i == 0) {
1026 			lwkt_gettoken(tok);
1027 			TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
1028 			TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
1029 			lwkt_reltoken(tok);
1030 		}
1031 	}
1032 	lwkt_gettoken(tok);
1033 	TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
1034 	lwkt_reltoken(tok);
1035 
1036 	/* Timeouts do not return EWOULDBLOCK. */
1037 	if (error == EWOULDBLOCK)
1038 		error = 0;
1039 	return error;
1040 }
1041 
1042 /*
1043  * MPALMOSTSAFE
1044  */
1045 int
1046 sys_kevent(struct kevent_args *uap)
1047 {
1048 	struct thread *td = curthread;
1049 	struct timespec ts, *tsp;
1050 	struct kqueue *kq;
1051 	struct file *fp = NULL;
1052 	struct kevent_copyin_args *kap, ka;
1053 	int error;
1054 
1055 	if (uap->timeout) {
1056 		error = copyin(uap->timeout, &ts, sizeof(ts));
1057 		if (error)
1058 			return (error);
1059 		tsp = &ts;
1060 	} else {
1061 		tsp = NULL;
1062 	}
1063 	fp = holdfp(td, uap->fd, -1);
1064 	if (fp == NULL)
1065 		return (EBADF);
1066 	if (fp->f_type != DTYPE_KQUEUE) {
1067 		fdrop(fp);
1068 		return (EBADF);
1069 	}
1070 
1071 	kq = (struct kqueue *)fp->f_data;
1072 
1073 	kap = &ka;
1074 	kap->ka = uap;
1075 	kap->pchanges = 0;
1076 
1077 	error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
1078 			    kevent_copyin, kevent_copyout, tsp, 0);
1079 
1080 	dropfp(td, uap->fd, fp);
1081 
1082 	return (error);
1083 }
1084 
1085 /*
1086  * Efficiently load multiple file pointers.  This significantly reduces
1087  * threaded overhead.  When doing simple polling we can depend on the
1088  * per-thread (fd,fp) cache.  With more descriptors, we batch.
1089  */
1090 static
1091 void
1092 floadkevfps(thread_t td, struct filedesc *fdp, struct kevent *kev,
1093 	    struct file **fp, int climit)
1094 {
1095 	struct filterops *fops;
1096 	int tdcache;
1097 
1098 	if (climit <= 2 && td->td_proc && td->td_proc->p_fd == fdp) {
1099 		tdcache = 1;
1100 	} else {
1101 		tdcache = 0;
1102 		spin_lock_shared(&fdp->fd_spin);
1103 	}
1104 
1105 	while (climit) {
1106 		*fp = NULL;
1107 		if (kev->filter < 0 &&
1108 		    kev->filter + EVFILT_SYSCOUNT >= 0) {
1109 			fops = sysfilt_ops[~kev->filter];
1110 			if (fops->f_flags & FILTEROP_ISFD) {
1111 				if (tdcache) {
1112 					*fp = holdfp(td, kev->ident, -1);
1113 				} else {
1114 					*fp = holdfp_fdp_locked(fdp,
1115 								kev->ident, -1);
1116 				}
1117 			}
1118 		}
1119 		--climit;
1120 		++fp;
1121 		++kev;
1122 	}
1123 	if (tdcache == 0)
1124 		spin_unlock_shared(&fdp->fd_spin);
1125 }
1126 
1127 /*
1128  * Register up to *countp kev's.  Always registers at least 1.
1129  *
1130  * The number registered is returned in *countp.
1131  *
1132  * If an error occurs or a kev is flagged EV_RECEIPT, it is
1133  * processed and included in *countp, and processing then
1134  * stops.
1135  */
1136 int
1137 kqueue_register(struct kqueue *kq, struct kevent *kev, int *countp)
1138 {
1139 	struct filedesc *fdp = kq->kq_fdp;
1140 	struct klist *list = NULL;
1141 	struct filterops *fops;
1142 	struct file *fp[KQ_NEVENTS];
1143 	struct knote *kn = NULL;
1144 	struct thread *td;
1145 	int error;
1146 	int count;
1147 	int climit;
1148 	int closedcounter;
1149 	struct knote_cache_list *cache_list;
1150 
1151 	td = curthread;
1152 	climit = *countp;
1153 	if (climit > KQ_NEVENTS)
1154 		climit = KQ_NEVENTS;
1155 	closedcounter = fdp->fd_closedcounter;
1156 	floadkevfps(td, fdp, kev, fp, climit);
1157 
1158 	lwkt_getpooltoken(kq);
1159 	count = 0;
1160 
1161 	/*
1162 	 * To avoid races, only one thread can register events on this
1163 	 * kqueue at a time.
1164 	 */
1165 	while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) {
1166 		kq->kq_state |= KQ_REGWAIT;
1167 		tsleep(&kq->kq_regtd, 0, "kqreg", 0);
1168 	}
1169 	if (__predict_false(kq->kq_regtd != NULL)) {
1170 		/* Recursive calling of kqueue_register() */
1171 		td = NULL;
1172 	} else {
1173 		/* Owner of the kq_regtd, i.e. td != NULL */
1174 		kq->kq_regtd = td;
1175 	}
1176 
1177 loop:
1178 	if (kev->filter < 0) {
1179 		if (kev->filter + EVFILT_SYSCOUNT < 0) {
1180 			error = EINVAL;
1181 			++count;
1182 			goto done;
1183 		}
1184 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
1185 	} else {
1186 		/*
1187 		 * XXX
1188 		 * filter attach routine is responsible for insuring that
1189 		 * the identifier can be attached to it.
1190 		 */
1191 		error = EINVAL;
1192 		++count;
1193 		goto done;
1194 	}
1195 
1196 	if (fops->f_flags & FILTEROP_ISFD) {
1197 		/* validate descriptor */
1198 		if (fp[count] == NULL) {
1199 			error = EBADF;
1200 			++count;
1201 			goto done;
1202 		}
1203 	}
1204 
1205 	cache_list = &knote_cache_lists[mycpuid];
1206 	if (SLIST_EMPTY(&cache_list->knote_cache)) {
1207 		struct knote *new_kn;
1208 
1209 		new_kn = knote_alloc();
1210 		crit_enter();
1211 		SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link);
1212 		cache_list->knote_cache_cnt++;
1213 		crit_exit();
1214 	}
1215 
1216 	if (fp[count] != NULL) {
1217 		list = &fp[count]->f_klist;
1218 	} else if (kq->kq_knhashmask) {
1219 		list = &kq->kq_knhash[
1220 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1221 	}
1222 	if (list != NULL) {
1223 		lwkt_getpooltoken(list);
1224 again:
1225 		SLIST_FOREACH(kn, list, kn_link) {
1226 			if (kn->kn_kq == kq &&
1227 			    kn->kn_filter == kev->filter &&
1228 			    kn->kn_id == kev->ident) {
1229 				if (knote_acquire(kn) == 0)
1230 					goto again;
1231 				break;
1232 			}
1233 		}
1234 		lwkt_relpooltoken(list);
1235 	}
1236 
1237 	/*
1238 	 * NOTE: At this point if kn is non-NULL we will have acquired
1239 	 *	 it and set KN_PROCESSING.
1240 	 */
1241 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
1242 		error = ENOENT;
1243 		++count;
1244 		goto done;
1245 	}
1246 
1247 	/*
1248 	 * kn now contains the matching knote, or NULL if no match
1249 	 */
1250 	if (kev->flags & EV_ADD) {
1251 		if (kn == NULL) {
1252 			crit_enter();
1253 			kn = SLIST_FIRST(&cache_list->knote_cache);
1254 			if (kn == NULL) {
1255 				crit_exit();
1256 				kn = knote_alloc();
1257 			} else {
1258 				SLIST_REMOVE_HEAD(&cache_list->knote_cache,
1259 				    kn_link);
1260 				cache_list->knote_cache_cnt--;
1261 				crit_exit();
1262 			}
1263 			kn->kn_fp = fp[count];
1264 			kn->kn_kq = kq;
1265 			kn->kn_fop = fops;
1266 
1267 			/*
1268 			 * apply reference count to knote structure, and
1269 			 * do not release it at the end of this routine.
1270 			 */
1271 			fp[count] = NULL;	/* safety */
1272 
1273 			kn->kn_sfflags = kev->fflags;
1274 			kn->kn_sdata = kev->data;
1275 			kev->fflags = 0;
1276 			kev->data = 0;
1277 			kn->kn_kevent = *kev;
1278 
1279 			/*
1280 			 * KN_PROCESSING prevents the knote from getting
1281 			 * ripped out from under us while we are trying
1282 			 * to attach it, in case the attach blocks.
1283 			 */
1284 			kn->kn_status = KN_PROCESSING;
1285 			knote_attach(kn);
1286 			if ((error = filter_attach(kn)) != 0) {
1287 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1288 				knote_drop(kn);
1289 				++count;
1290 				goto done;
1291 			}
1292 
1293 			/*
1294 			 * Interlock against close races which either tried
1295 			 * to remove our knote while we were blocked or missed
1296 			 * it entirely prior to our attachment.  We do not
1297 			 * want to end up with a knote on a closed descriptor.
1298 			 */
1299 			if ((fops->f_flags & FILTEROP_ISFD) &&
1300 			    checkfdclosed(curthread, fdp, kev->ident, kn->kn_fp,
1301 					  closedcounter)) {
1302 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1303 			}
1304 		} else {
1305 			/*
1306 			 * The user may change some filter values after the
1307 			 * initial EV_ADD, but doing so will not reset any
1308 			 * filter which have already been triggered.
1309 			 */
1310 			KKASSERT(kn->kn_status & KN_PROCESSING);
1311 			if (fops == &user_filtops) {
1312 				filt_usertouch(kn, kev, EVENT_REGISTER);
1313 			} else {
1314 				kn->kn_sfflags = kev->fflags;
1315 				kn->kn_sdata = kev->data;
1316 				kn->kn_kevent.udata = kev->udata;
1317 			}
1318 		}
1319 
1320 		/*
1321 		 * Execute the filter event to immediately activate the
1322 		 * knote if necessary.  If reprocessing events are pending
1323 		 * due to blocking above we do not run the filter here
1324 		 * but instead let knote_release() do it.  Otherwise we
1325 		 * might run the filter on a deleted event.
1326 		 */
1327 		if ((kn->kn_status & KN_REPROCESS) == 0) {
1328 			if (filter_event(kn, 0))
1329 				KNOTE_ACTIVATE(kn);
1330 		}
1331 	} else if (kev->flags & EV_DELETE) {
1332 		/*
1333 		 * Delete the existing knote
1334 		 */
1335 		knote_detach_and_drop(kn);
1336 		error = 0;
1337 		++count;
1338 		goto done;
1339 	} else {
1340 		/*
1341 		 * Modify an existing event.
1342 		 *
1343 		 * The user may change some filter values after the
1344 		 * initial EV_ADD, but doing so will not reset any
1345 		 * filter which have already been triggered.
1346 		 */
1347 		KKASSERT(kn->kn_status & KN_PROCESSING);
1348 		if (fops == &user_filtops) {
1349 			filt_usertouch(kn, kev, EVENT_REGISTER);
1350 		} else {
1351 			kn->kn_sfflags = kev->fflags;
1352 			kn->kn_sdata = kev->data;
1353 			kn->kn_kevent.udata = kev->udata;
1354 		}
1355 
1356 		/*
1357 		 * Execute the filter event to immediately activate the
1358 		 * knote if necessary.  If reprocessing events are pending
1359 		 * due to blocking above we do not run the filter here
1360 		 * but instead let knote_release() do it.  Otherwise we
1361 		 * might run the filter on a deleted event.
1362 		 */
1363 		if ((kn->kn_status & KN_REPROCESS) == 0) {
1364 			if (filter_event(kn, 0))
1365 				KNOTE_ACTIVATE(kn);
1366 		}
1367 	}
1368 
1369 	/*
1370 	 * Disablement does not deactivate a knote here.
1371 	 */
1372 	if ((kev->flags & EV_DISABLE) &&
1373 	    ((kn->kn_status & KN_DISABLED) == 0)) {
1374 		kn->kn_status |= KN_DISABLED;
1375 	}
1376 
1377 	/*
1378 	 * Re-enablement may have to immediately enqueue an active knote.
1379 	 */
1380 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1381 		kn->kn_status &= ~KN_DISABLED;
1382 		if ((kn->kn_status & KN_ACTIVE) &&
1383 		    ((kn->kn_status & KN_QUEUED) == 0)) {
1384 			knote_enqueue(kn);
1385 		}
1386 	}
1387 
1388 	/*
1389 	 * Handle any required reprocessing
1390 	 */
1391 	knote_release(kn);
1392 	/* kn may be invalid now */
1393 
1394 	/*
1395 	 * Loop control.  We stop on errors (above), and also stop after
1396 	 * processing EV_RECEIPT, so the caller can process it.
1397 	 */
1398 	++count;
1399 	if (kev->flags & EV_RECEIPT) {
1400 		error = 0;
1401 		goto done;
1402 	}
1403 	++kev;
1404 	if (count < climit) {
1405 		if (fp[count-1])		/* drop unprocessed fp */
1406 			fdrop(fp[count-1]);
1407 		goto loop;
1408 	}
1409 
1410 	/*
1411 	 * Cleanup
1412 	 */
1413 done:
1414 	if (td != NULL) { /* Owner of the kq_regtd */
1415 		kq->kq_regtd = NULL;
1416 		if (__predict_false(kq->kq_state & KQ_REGWAIT)) {
1417 			kq->kq_state &= ~KQ_REGWAIT;
1418 			wakeup(&kq->kq_regtd);
1419 		}
1420 	}
1421 	lwkt_relpooltoken(kq);
1422 
1423 	/*
1424 	 * Drop unprocessed file pointers
1425 	 */
1426 	*countp = count;
1427 	if (count && fp[count-1])
1428 		fdrop(fp[count-1]);
1429 	while (count < climit) {
1430 		if (fp[count])
1431 			fdrop(fp[count]);
1432 		++count;
1433 	}
1434 	return (error);
1435 }
1436 
1437 /*
1438  * Scan the kqueue, return the number of active events placed in kevp up
1439  * to count.
1440  *
1441  * Continuous mode events may get recycled, do not continue scanning past
1442  * marker unless no events have been collected.
1443  */
1444 static int
1445 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1446             struct knote *marker, int closedcounter)
1447 {
1448         struct knote *kn, local_marker;
1449 	thread_t td = curthread;
1450         int total;
1451 
1452 	total = 0;
1453 	local_marker.kn_filter = EVFILT_MARKER;
1454 	local_marker.kn_status = KN_PROCESSING;
1455 
1456 	lwkt_getpooltoken(kq);
1457 
1458 	/*
1459 	 * Collect events.
1460 	 */
1461 	TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1462 	while (count) {
1463 		kn = TAILQ_NEXT(&local_marker, kn_tqe);
1464 		if (kn->kn_filter == EVFILT_MARKER) {
1465 			/* Marker reached, we are done */
1466 			if (kn == marker)
1467 				break;
1468 
1469 			/* Move local marker past some other threads marker */
1470 			kn = TAILQ_NEXT(kn, kn_tqe);
1471 			TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1472 			TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1473 			continue;
1474 		}
1475 
1476 		/*
1477 		 * We can't skip a knote undergoing processing, otherwise
1478 		 * we risk not returning it when the user process expects
1479 		 * it should be returned.  Sleep and retry.
1480 		 */
1481 		if (knote_acquire(kn) == 0)
1482 			continue;
1483 
1484 		/*
1485 		 * Remove the event for processing.
1486 		 *
1487 		 * WARNING!  We must leave KN_QUEUED set to prevent the
1488 		 *	     event from being KNOTE_ACTIVATE()d while
1489 		 *	     the queue state is in limbo, in case we
1490 		 *	     block.
1491 		 */
1492 		TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1493 		kq->kq_count--;
1494 
1495 		/*
1496 		 * We have to deal with an extremely important race against
1497 		 * file descriptor close()s here.  The file descriptor can
1498 		 * disappear MPSAFE, and there is a small window of
1499 		 * opportunity between that and the call to knote_fdclose().
1500 		 *
1501 		 * If we hit that window here while doselect or dopoll is
1502 		 * trying to delete a spurious event they will not be able
1503 		 * to match up the event against a knote and will go haywire.
1504 		 */
1505 		if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1506 		    checkfdclosed(td, kq->kq_fdp, kn->kn_kevent.ident,
1507 				  kn->kn_fp, closedcounter)) {
1508 			kn->kn_status |= KN_DELETING | KN_REPROCESS;
1509 		}
1510 
1511 		if (kn->kn_status & KN_DISABLED) {
1512 			/*
1513 			 * If disabled we ensure the event is not queued
1514 			 * but leave its active bit set.  On re-enablement
1515 			 * the event may be immediately triggered.
1516 			 */
1517 			kn->kn_status &= ~KN_QUEUED;
1518 		} else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1519 			   (kn->kn_status & KN_DELETING) == 0 &&
1520 			   filter_event(kn, 0) == 0) {
1521 			/*
1522 			 * If not running in one-shot mode and the event
1523 			 * is no longer present we ensure it is removed
1524 			 * from the queue and ignore it.
1525 			 */
1526 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1527 		} else {
1528 			/*
1529 			 * Post the event
1530 			 */
1531 			if (kn->kn_fop == &user_filtops)
1532 				filt_usertouch(kn, kevp, EVENT_PROCESS);
1533 			else
1534 				*kevp = kn->kn_kevent;
1535 			++kevp;
1536 			++total;
1537 			--count;
1538 
1539 			if (kn->kn_flags & EV_ONESHOT) {
1540 				kn->kn_status &= ~KN_QUEUED;
1541 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1542 			} else {
1543 				if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1544 					if (kn->kn_flags & EV_CLEAR) {
1545 						kn->kn_data = 0;
1546 						kn->kn_fflags = 0;
1547 					}
1548 					if (kn->kn_flags & EV_DISPATCH) {
1549 						kn->kn_status |= KN_DISABLED;
1550 					}
1551 					kn->kn_status &= ~(KN_QUEUED |
1552 							   KN_ACTIVE);
1553 				} else {
1554 					TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1555 					kq->kq_count++;
1556 				}
1557 			}
1558 		}
1559 
1560 		/*
1561 		 * Handle any post-processing states
1562 		 */
1563 		knote_release(kn);
1564 	}
1565 	TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1566 
1567 	lwkt_relpooltoken(kq);
1568 	return (total);
1569 }
1570 
1571 /*
1572  * XXX
1573  * This could be expanded to call kqueue_scan, if desired.
1574  *
1575  * MPSAFE
1576  */
1577 static int
1578 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1579 {
1580 	return (ENXIO);
1581 }
1582 
1583 /*
1584  * MPSAFE
1585  */
1586 static int
1587 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1588 {
1589 	return (ENXIO);
1590 }
1591 
1592 /*
1593  * MPALMOSTSAFE
1594  */
1595 static int
1596 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1597 	     struct ucred *cred, struct sysmsg *msg)
1598 {
1599 	struct kqueue *kq;
1600 	int error;
1601 
1602 	kq = (struct kqueue *)fp->f_data;
1603 	lwkt_getpooltoken(kq);
1604 	switch(com) {
1605 	case FIOASYNC:
1606 		if (*(int *)data)
1607 			kq->kq_state |= KQ_ASYNC;
1608 		else
1609 			kq->kq_state &= ~KQ_ASYNC;
1610 		error = 0;
1611 		break;
1612 	case FIOSETOWN:
1613 		error = fsetown(*(int *)data, &kq->kq_sigio);
1614 		break;
1615 	default:
1616 		error = ENOTTY;
1617 		break;
1618 	}
1619 	lwkt_relpooltoken(kq);
1620 	return (error);
1621 }
1622 
1623 /*
1624  * MPSAFE
1625  */
1626 static int
1627 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1628 {
1629 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1630 
1631 	bzero((void *)st, sizeof(*st));
1632 	st->st_size = kq->kq_count;
1633 	st->st_blksize = sizeof(struct kevent);
1634 	st->st_mode = S_IFIFO;
1635 	return (0);
1636 }
1637 
1638 /*
1639  * MPSAFE
1640  */
1641 static int
1642 kqueue_close(struct file *fp)
1643 {
1644 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1645 
1646 	kqueue_terminate(kq);
1647 
1648 	fp->f_data = NULL;
1649 	funsetown(&kq->kq_sigio);
1650 
1651 	kfree(kq, M_KQUEUE);
1652 	return (0);
1653 }
1654 
1655 static void
1656 kqueue_wakeup(struct kqueue *kq)
1657 {
1658 	if (kq->kq_sleep_cnt) {
1659 		u_int sleep_cnt = kq->kq_sleep_cnt;
1660 
1661 		kq->kq_sleep_cnt = 0;
1662 		if (sleep_cnt == 1)
1663 			wakeup_one(kq);
1664 		else
1665 			wakeup(kq);
1666 	}
1667 	KNOTE(&kq->kq_kqinfo.ki_note, 0);
1668 }
1669 
1670 /*
1671  * Calls filterops f_attach function, acquiring mplock if filter is not
1672  * marked as FILTEROP_MPSAFE.
1673  *
1674  * Caller must be holding the related kq token
1675  */
1676 static int
1677 filter_attach(struct knote *kn)
1678 {
1679 	int ret;
1680 
1681 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1682 		ret = kn->kn_fop->f_attach(kn);
1683 	} else {
1684 		get_mplock();
1685 		ret = kn->kn_fop->f_attach(kn);
1686 		rel_mplock();
1687 	}
1688 	return (ret);
1689 }
1690 
1691 /*
1692  * Detach the knote and drop it, destroying the knote.
1693  *
1694  * Calls filterops f_detach function, acquiring mplock if filter is not
1695  * marked as FILTEROP_MPSAFE.
1696  *
1697  * Caller must be holding the related kq token
1698  */
1699 static void
1700 knote_detach_and_drop(struct knote *kn)
1701 {
1702 	kn->kn_status |= KN_DELETING | KN_REPROCESS;
1703 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1704 		kn->kn_fop->f_detach(kn);
1705 	} else {
1706 		get_mplock();
1707 		kn->kn_fop->f_detach(kn);
1708 		rel_mplock();
1709 	}
1710 	knote_drop(kn);
1711 }
1712 
1713 /*
1714  * Calls filterops f_event function, acquiring mplock if filter is not
1715  * marked as FILTEROP_MPSAFE.
1716  *
1717  * If the knote is in the middle of being created or deleted we cannot
1718  * safely call the filter op.
1719  *
1720  * Caller must be holding the related kq token
1721  */
1722 static int
1723 filter_event(struct knote *kn, long hint)
1724 {
1725 	int ret;
1726 
1727 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1728 		ret = kn->kn_fop->f_event(kn, hint);
1729 	} else {
1730 		get_mplock();
1731 		ret = kn->kn_fop->f_event(kn, hint);
1732 		rel_mplock();
1733 	}
1734 	return (ret);
1735 }
1736 
1737 /*
1738  * Walk down a list of knotes, activating them if their event has triggered.
1739  *
1740  * If we encounter any knotes which are undergoing processing we just mark
1741  * them for reprocessing and do not try to [re]activate the knote.  However,
1742  * if a hint is being passed we have to wait and that makes things a bit
1743  * sticky.
1744  */
1745 void
1746 knote(struct klist *list, long hint)
1747 {
1748 	struct kqueue *kq;
1749 	struct knote *kn;
1750 	struct knote *kntmp;
1751 
1752 	lwkt_getpooltoken(list);
1753 restart:
1754 	SLIST_FOREACH(kn, list, kn_next) {
1755 		kq = kn->kn_kq;
1756 		lwkt_getpooltoken(kq);
1757 
1758 		/* temporary verification hack */
1759 		SLIST_FOREACH(kntmp, list, kn_next) {
1760 			if (kn == kntmp)
1761 				break;
1762 		}
1763 		if (kn != kntmp || kn->kn_kq != kq) {
1764 			lwkt_relpooltoken(kq);
1765 			goto restart;
1766 		}
1767 
1768 		if (kn->kn_status & KN_PROCESSING) {
1769 			/*
1770 			 * Someone else is processing the knote, ask the
1771 			 * other thread to reprocess it and don't mess
1772 			 * with it otherwise.
1773 			 */
1774 			if (hint == 0) {
1775 				kn->kn_status |= KN_REPROCESS;
1776 				lwkt_relpooltoken(kq);
1777 				continue;
1778 			}
1779 
1780 			/*
1781 			 * If the hint is non-zero we have to wait or risk
1782 			 * losing the state the caller is trying to update.
1783 			 *
1784 			 * XXX This is a real problem, certain process
1785 			 *     and signal filters will bump kn_data for
1786 			 *     already-processed notes more than once if
1787 			 *     we restart the list scan.  FIXME.
1788 			 */
1789 			kn->kn_status |= KN_WAITING | KN_REPROCESS;
1790 			tsleep(kn, 0, "knotec", hz);
1791 			lwkt_relpooltoken(kq);
1792 			goto restart;
1793 		}
1794 
1795 		/*
1796 		 * Become the reprocessing master ourselves.
1797 		 *
1798 		 * If hint is non-zero running the event is mandatory
1799 		 * when not deleting so do it whether reprocessing is
1800 		 * set or not.
1801 		 */
1802 		kn->kn_status |= KN_PROCESSING;
1803 		if ((kn->kn_status & KN_DELETING) == 0) {
1804 			if (filter_event(kn, hint))
1805 				KNOTE_ACTIVATE(kn);
1806 		}
1807 		if (knote_release(kn)) {
1808 			lwkt_relpooltoken(kq);
1809 			goto restart;
1810 		}
1811 		lwkt_relpooltoken(kq);
1812 	}
1813 	lwkt_relpooltoken(list);
1814 }
1815 
1816 /*
1817  * Insert knote at head of klist.
1818  *
1819  * This function may only be called via a filter function and thus
1820  * kq_token should already be held and marked for processing.
1821  */
1822 void
1823 knote_insert(struct klist *klist, struct knote *kn)
1824 {
1825 	lwkt_getpooltoken(klist);
1826 	KKASSERT(kn->kn_status & KN_PROCESSING);
1827 	SLIST_INSERT_HEAD(klist, kn, kn_next);
1828 	lwkt_relpooltoken(klist);
1829 }
1830 
1831 /*
1832  * Remove knote from a klist
1833  *
1834  * This function may only be called via a filter function and thus
1835  * kq_token should already be held and marked for processing.
1836  */
1837 void
1838 knote_remove(struct klist *klist, struct knote *kn)
1839 {
1840 	lwkt_getpooltoken(klist);
1841 	KKASSERT(kn->kn_status & KN_PROCESSING);
1842 	SLIST_REMOVE(klist, kn, knote, kn_next);
1843 	lwkt_relpooltoken(klist);
1844 }
1845 
1846 void
1847 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1848 		    struct filterops *ops, void *hook)
1849 {
1850 	struct kqueue *kq;
1851 	struct knote *kn;
1852 
1853 	lwkt_getpooltoken(&src->ki_note);
1854 	lwkt_getpooltoken(&dst->ki_note);
1855 	while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1856 		kq = kn->kn_kq;
1857 		lwkt_getpooltoken(kq);
1858 		if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) {
1859 			lwkt_relpooltoken(kq);
1860 			continue;
1861 		}
1862 		if (knote_acquire(kn)) {
1863 			knote_remove(&src->ki_note, kn);
1864 			kn->kn_fop = ops;
1865 			kn->kn_hook = hook;
1866 			knote_insert(&dst->ki_note, kn);
1867 			knote_release(kn);
1868 			/* kn may be invalid now */
1869 		}
1870 		lwkt_relpooltoken(kq);
1871 	}
1872 	lwkt_relpooltoken(&dst->ki_note);
1873 	lwkt_relpooltoken(&src->ki_note);
1874 }
1875 
1876 /*
1877  * Remove all knotes referencing a specified fd
1878  */
1879 void
1880 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1881 {
1882 	struct kqueue *kq;
1883 	struct knote *kn;
1884 	struct knote *kntmp;
1885 
1886 	lwkt_getpooltoken(&fp->f_klist);
1887 restart:
1888 	SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1889 		if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1890 			kq = kn->kn_kq;
1891 			lwkt_getpooltoken(kq);
1892 
1893 			/* temporary verification hack */
1894 			SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1895 				if (kn == kntmp)
1896 					break;
1897 			}
1898 			if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1899 			    kn->kn_id != fd || kn->kn_kq != kq) {
1900 				lwkt_relpooltoken(kq);
1901 				goto restart;
1902 			}
1903 			if (knote_acquire(kn))
1904 				knote_detach_and_drop(kn);
1905 			lwkt_relpooltoken(kq);
1906 			goto restart;
1907 		}
1908 	}
1909 	lwkt_relpooltoken(&fp->f_klist);
1910 }
1911 
1912 /*
1913  * Low level attach function.
1914  *
1915  * The knote should already be marked for processing.
1916  * Caller must hold the related kq token.
1917  */
1918 static void
1919 knote_attach(struct knote *kn)
1920 {
1921 	struct klist *list;
1922 	struct kqueue *kq = kn->kn_kq;
1923 
1924 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1925 		KKASSERT(kn->kn_fp);
1926 		list = &kn->kn_fp->f_klist;
1927 	} else {
1928 		if (kq->kq_knhashmask == 0)
1929 			kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1930 						 &kq->kq_knhashmask);
1931 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1932 	}
1933 	lwkt_getpooltoken(list);
1934 	SLIST_INSERT_HEAD(list, kn, kn_link);
1935 	lwkt_relpooltoken(list);
1936 	TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1937 }
1938 
1939 /*
1940  * Low level drop function.
1941  *
1942  * The knote should already be marked for processing.
1943  * Caller must hold the related kq token.
1944  */
1945 static void
1946 knote_drop(struct knote *kn)
1947 {
1948 	struct kqueue *kq;
1949 	struct klist *list;
1950 
1951 	kq = kn->kn_kq;
1952 
1953 	if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1954 		list = &kn->kn_fp->f_klist;
1955 	else
1956 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1957 
1958 	lwkt_getpooltoken(list);
1959 	SLIST_REMOVE(list, kn, knote, kn_link);
1960 	lwkt_relpooltoken(list);
1961 	TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1962 	if (kn->kn_status & KN_QUEUED)
1963 		knote_dequeue(kn);
1964 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1965 		fdrop(kn->kn_fp);
1966 		kn->kn_fp = NULL;
1967 	}
1968 	knote_free(kn);
1969 }
1970 
1971 /*
1972  * Low level enqueue function.
1973  *
1974  * The knote should already be marked for processing.
1975  * Caller must be holding the kq token
1976  */
1977 static void
1978 knote_enqueue(struct knote *kn)
1979 {
1980 	struct kqueue *kq = kn->kn_kq;
1981 
1982 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1983 	TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1984 	kn->kn_status |= KN_QUEUED;
1985 	++kq->kq_count;
1986 
1987 	/*
1988 	 * Send SIGIO on request (typically set up as a mailbox signal)
1989 	 */
1990 	if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1991 		pgsigio(kq->kq_sigio, SIGIO, 0);
1992 
1993 	kqueue_wakeup(kq);
1994 }
1995 
1996 /*
1997  * Low level dequeue function.
1998  *
1999  * The knote should already be marked for processing.
2000  * Caller must be holding the kq token
2001  */
2002 static void
2003 knote_dequeue(struct knote *kn)
2004 {
2005 	struct kqueue *kq = kn->kn_kq;
2006 
2007 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2008 	TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
2009 	kn->kn_status &= ~KN_QUEUED;
2010 	kq->kq_count--;
2011 }
2012 
2013 static struct knote *
2014 knote_alloc(void)
2015 {
2016 	return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
2017 }
2018 
2019 static void
2020 knote_free(struct knote *kn)
2021 {
2022 	struct knote_cache_list *cache_list;
2023 
2024 	cache_list = &knote_cache_lists[mycpuid];
2025 	if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) {
2026 		crit_enter();
2027 		SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link);
2028 		cache_list->knote_cache_cnt++;
2029 		crit_exit();
2030 		return;
2031 	}
2032 	kfree(kn, M_KQUEUE);
2033 }
2034 
2035 struct sleepinfo {
2036 	void *ident;
2037 	int timedout;
2038 };
2039 
2040 static void
2041 precise_sleep_intr(systimer_t info, int in_ipi, struct intrframe *frame)
2042 {
2043 	struct sleepinfo *si;
2044 
2045 	si = info->data;
2046 	si->timedout = 1;
2047 	wakeup(si->ident);
2048 }
2049 
2050 static int
2051 precise_sleep(void *ident, int flags, const char *wmesg, int us)
2052 {
2053 	struct systimer info;
2054 	struct sleepinfo si = {
2055 		.ident = ident,
2056 		.timedout = 0,
2057 	};
2058 	int r;
2059 
2060 	tsleep_interlock(ident, flags);
2061 	systimer_init_oneshot(&info, precise_sleep_intr, &si,
2062 	    us == 0 ? 1 : us);
2063 	r = tsleep(ident, flags | PINTERLOCKED, wmesg, 0);
2064 	systimer_del(&info);
2065 	if (si.timedout)
2066 		r = EWOULDBLOCK;
2067 
2068 	return r;
2069 }
2070