xref: /dragonfly/sys/kern/kern_event.c (revision dcb5d66b)
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
35 #include <sys/file.h>
36 #include <sys/lock.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
48 #include <sys/uio.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
51 #include <sys/ktr.h>
52 #include <sys/spinlock.h>
53 
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
57 #include <sys/spinlock2.h>
58 
59 #define EVENT_REGISTER	1
60 #define EVENT_PROCESS	2
61 
62 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
63 
64 struct kevent_copyin_args {
65 	struct kevent_args	*ka;
66 	int			pchanges;
67 };
68 
69 #define KNOTE_CACHE_MAX		8
70 
71 struct knote_cache_list {
72 	struct klist		knote_cache;
73 	int			knote_cache_cnt;
74 } __cachealign;
75 
76 static int	kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
77 		    struct knote *marker, int closedcounter);
78 static int 	kqueue_read(struct file *fp, struct uio *uio,
79 		    struct ucred *cred, int flags);
80 static int	kqueue_write(struct file *fp, struct uio *uio,
81 		    struct ucred *cred, int flags);
82 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
83 		    struct ucred *cred, struct sysmsg *msg);
84 static int 	kqueue_kqfilter(struct file *fp, struct knote *kn);
85 static int 	kqueue_stat(struct file *fp, struct stat *st,
86 		    struct ucred *cred);
87 static int 	kqueue_close(struct file *fp);
88 static void	kqueue_wakeup(struct kqueue *kq);
89 static int	filter_attach(struct knote *kn);
90 static int	filter_event(struct knote *kn, long hint);
91 
92 /*
93  * MPSAFE
94  */
95 static struct fileops kqueueops = {
96 	.fo_read = kqueue_read,
97 	.fo_write = kqueue_write,
98 	.fo_ioctl = kqueue_ioctl,
99 	.fo_kqfilter = kqueue_kqfilter,
100 	.fo_stat = kqueue_stat,
101 	.fo_close = kqueue_close,
102 	.fo_shutdown = nofo_shutdown
103 };
104 
105 static void 	knote_attach(struct knote *kn);
106 static void 	knote_drop(struct knote *kn);
107 static void	knote_detach_and_drop(struct knote *kn);
108 static void 	knote_enqueue(struct knote *kn);
109 static void 	knote_dequeue(struct knote *kn);
110 static struct 	knote *knote_alloc(void);
111 static void 	knote_free(struct knote *kn);
112 
113 static void	precise_sleep_intr(systimer_t info, int in_ipi,
114 				   struct intrframe *frame);
115 static int	precise_sleep(void *ident, int flags, const char *wmesg,
116 			      int us);
117 
118 static void	filt_kqdetach(struct knote *kn);
119 static int	filt_kqueue(struct knote *kn, long hint);
120 static int	filt_procattach(struct knote *kn);
121 static void	filt_procdetach(struct knote *kn);
122 static int	filt_proc(struct knote *kn, long hint);
123 static int	filt_fileattach(struct knote *kn);
124 static void	filt_timerexpire(void *knx);
125 static int	filt_timerattach(struct knote *kn);
126 static void	filt_timerdetach(struct knote *kn);
127 static int	filt_timer(struct knote *kn, long hint);
128 static int	filt_userattach(struct knote *kn);
129 static void	filt_userdetach(struct knote *kn);
130 static int	filt_user(struct knote *kn, long hint);
131 static void	filt_usertouch(struct knote *kn, struct kevent *kev,
132 				u_long type);
133 static int	filt_fsattach(struct knote *kn);
134 static void	filt_fsdetach(struct knote *kn);
135 static int	filt_fs(struct knote *kn, long hint);
136 
137 static struct filterops file_filtops =
138 	{ FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL };
139 static struct filterops kqread_filtops =
140 	{ FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue };
141 static struct filterops proc_filtops =
142 	{ FILTEROP_MPSAFE, filt_procattach, filt_procdetach, filt_proc };
143 static struct filterops timer_filtops =
144 	{ FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer };
145 static struct filterops user_filtops =
146 	{ FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user };
147 static struct filterops fs_filtops =
148 	{ FILTEROP_MPSAFE, filt_fsattach, filt_fsdetach, filt_fs };
149 
150 static int 		kq_ncallouts = 0;
151 static int 		kq_calloutmax = (4 * 1024);
152 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
153     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
154 static int		kq_checkloop = 1000000;
155 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
156     &kq_checkloop, 0, "Maximum number of loops for kqueue scan");
157 static int		kq_sleep_threshold = 20000;
158 SYSCTL_INT(_kern, OID_AUTO, kq_sleep_threshold, CTLFLAG_RW,
159     &kq_sleep_threshold, 0, "Minimum sleep duration without busy-looping");
160 
161 #define KNOTE_ACTIVATE(kn) do { 					\
162 	kn->kn_status |= KN_ACTIVE;					\
163 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
164 		knote_enqueue(kn);					\
165 } while(0)
166 
167 #define	KN_HASHSIZE		64		/* XXX should be tunable */
168 #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
169 
170 extern struct filterops aio_filtops;
171 extern struct filterops sig_filtops;
172 
173 /*
174  * Table for for all system-defined filters.
175  */
176 static struct filterops *sysfilt_ops[] = {
177 	&file_filtops,			/* EVFILT_READ */
178 	&file_filtops,			/* EVFILT_WRITE */
179 	&aio_filtops,			/* EVFILT_AIO */
180 	&file_filtops,			/* EVFILT_VNODE */
181 	&proc_filtops,			/* EVFILT_PROC */
182 	&sig_filtops,			/* EVFILT_SIGNAL */
183 	&timer_filtops,			/* EVFILT_TIMER */
184 	&file_filtops,			/* EVFILT_EXCEPT */
185 	&user_filtops,			/* EVFILT_USER */
186 	&fs_filtops,			/* EVFILT_FS */
187 };
188 
189 static struct knote_cache_list	knote_cache_lists[MAXCPU];
190 
191 /*
192  * Acquire a knote, return non-zero on success, 0 on failure.
193  *
194  * If we cannot acquire the knote we sleep and return 0.  The knote
195  * may be stale on return in this case and the caller must restart
196  * whatever loop they are in.
197  *
198  * Related kq token must be held.
199  */
200 static __inline int
201 knote_acquire(struct knote *kn)
202 {
203 	if (kn->kn_status & KN_PROCESSING) {
204 		kn->kn_status |= KN_WAITING | KN_REPROCESS;
205 		tsleep(kn, 0, "kqepts", hz);
206 		/* knote may be stale now */
207 		return(0);
208 	}
209 	kn->kn_status |= KN_PROCESSING;
210 	return(1);
211 }
212 
213 /*
214  * Release an acquired knote, clearing KN_PROCESSING and handling any
215  * KN_REPROCESS events.
216  *
217  * Caller must be holding the related kq token
218  *
219  * Non-zero is returned if the knote is destroyed or detached.
220  */
221 static __inline int
222 knote_release(struct knote *kn)
223 {
224 	int ret;
225 
226 	while (kn->kn_status & KN_REPROCESS) {
227 		kn->kn_status &= ~KN_REPROCESS;
228 		if (kn->kn_status & KN_WAITING) {
229 			kn->kn_status &= ~KN_WAITING;
230 			wakeup(kn);
231 		}
232 		if (kn->kn_status & KN_DELETING) {
233 			knote_detach_and_drop(kn);
234 			return(1);
235 			/* NOT REACHED */
236 		}
237 		if (filter_event(kn, 0))
238 			KNOTE_ACTIVATE(kn);
239 	}
240 	if (kn->kn_status & KN_DETACHED)
241 		ret = 1;
242 	else
243 		ret = 0;
244 	kn->kn_status &= ~KN_PROCESSING;
245 	/* kn should not be accessed anymore */
246 	return ret;
247 }
248 
249 static int
250 filt_fileattach(struct knote *kn)
251 {
252 	return (fo_kqfilter(kn->kn_fp, kn));
253 }
254 
255 /*
256  * MPSAFE
257  */
258 static int
259 kqueue_kqfilter(struct file *fp, struct knote *kn)
260 {
261 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
262 
263 	if (kn->kn_filter != EVFILT_READ)
264 		return (EOPNOTSUPP);
265 
266 	kn->kn_fop = &kqread_filtops;
267 	knote_insert(&kq->kq_kqinfo.ki_note, kn);
268 	return (0);
269 }
270 
271 static void
272 filt_kqdetach(struct knote *kn)
273 {
274 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
275 
276 	knote_remove(&kq->kq_kqinfo.ki_note, kn);
277 }
278 
279 /*ARGSUSED*/
280 static int
281 filt_kqueue(struct knote *kn, long hint)
282 {
283 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
284 
285 	kn->kn_data = kq->kq_count;
286 	return (kn->kn_data > 0);
287 }
288 
289 static int
290 filt_procattach(struct knote *kn)
291 {
292 	struct proc *p;
293 	int immediate;
294 
295 	immediate = 0;
296 	p = pfind(kn->kn_id);
297 	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
298 		p = zpfind(kn->kn_id);
299 		immediate = 1;
300 	}
301 	if (p == NULL) {
302 		return (ESRCH);
303 	}
304 	if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
305 		if (p)
306 			PRELE(p);
307 		return (EACCES);
308 	}
309 
310 	lwkt_gettoken(&p->p_token);
311 	kn->kn_ptr.p_proc = p;
312 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
313 
314 	/*
315 	 * internal flag indicating registration done by kernel
316 	 */
317 	if (kn->kn_flags & EV_FLAG1) {
318 		kn->kn_data = kn->kn_sdata;		/* ppid */
319 		kn->kn_fflags = NOTE_CHILD;
320 		kn->kn_flags &= ~EV_FLAG1;
321 	}
322 
323 	knote_insert(&p->p_klist, kn);
324 
325 	/*
326 	 * Immediately activate any exit notes if the target process is a
327 	 * zombie.  This is necessary to handle the case where the target
328 	 * process, e.g. a child, dies before the kevent is negistered.
329 	 */
330 	if (immediate && filt_proc(kn, NOTE_EXIT))
331 		KNOTE_ACTIVATE(kn);
332 	lwkt_reltoken(&p->p_token);
333 	PRELE(p);
334 
335 	return (0);
336 }
337 
338 /*
339  * The knote may be attached to a different process, which may exit,
340  * leaving nothing for the knote to be attached to.  So when the process
341  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
342  * it will be deleted when read out.  However, as part of the knote deletion,
343  * this routine is called, so a check is needed to avoid actually performing
344  * a detach, because the original process does not exist any more.
345  */
346 static void
347 filt_procdetach(struct knote *kn)
348 {
349 	struct proc *p;
350 
351 	if (kn->kn_status & KN_DETACHED)
352 		return;
353 	p = kn->kn_ptr.p_proc;
354 	knote_remove(&p->p_klist, kn);
355 }
356 
357 static int
358 filt_proc(struct knote *kn, long hint)
359 {
360 	u_int event;
361 
362 	/*
363 	 * mask off extra data
364 	 */
365 	event = (u_int)hint & NOTE_PCTRLMASK;
366 
367 	/*
368 	 * if the user is interested in this event, record it.
369 	 */
370 	if (kn->kn_sfflags & event)
371 		kn->kn_fflags |= event;
372 
373 	/*
374 	 * Process is gone, so flag the event as finished.  Detach the
375 	 * knote from the process now because the process will be poof,
376 	 * gone later on.
377 	 */
378 	if (event == NOTE_EXIT) {
379 		struct proc *p = kn->kn_ptr.p_proc;
380 		if ((kn->kn_status & KN_DETACHED) == 0) {
381 			PHOLD(p);
382 			knote_remove(&p->p_klist, kn);
383 			kn->kn_status |= KN_DETACHED;
384 			kn->kn_data = p->p_xstat;
385 			kn->kn_ptr.p_proc = NULL;
386 			PRELE(p);
387 		}
388 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
389 		return (1);
390 	}
391 
392 	/*
393 	 * process forked, and user wants to track the new process,
394 	 * so attach a new knote to it, and immediately report an
395 	 * event with the parent's pid.
396 	 */
397 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
398 		struct kevent kev;
399 		int error;
400 		int n;
401 
402 		/*
403 		 * register knote with new process.
404 		 */
405 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
406 		kev.filter = kn->kn_filter;
407 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
408 		kev.fflags = kn->kn_sfflags;
409 		kev.data = kn->kn_id;			/* parent */
410 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
411 		n = 1;
412 		error = kqueue_register(kn->kn_kq, &kev, &n);
413 		if (error)
414 			kn->kn_fflags |= NOTE_TRACKERR;
415 	}
416 
417 	return (kn->kn_fflags != 0);
418 }
419 
420 static void
421 filt_timerreset(struct knote *kn)
422 {
423 	struct callout *calloutp;
424 	struct timeval tv;
425 	int tticks;
426 
427 	tv.tv_sec = kn->kn_sdata / 1000;
428 	tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
429 	tticks = tvtohz_high(&tv);
430 	calloutp = (struct callout *)kn->kn_hook;
431 	callout_reset(calloutp, tticks, filt_timerexpire, kn);
432 }
433 
434 /*
435  * The callout interlocks with callout_terminate() but can still
436  * race a deletion so if KN_DELETING is set we just don't touch
437  * the knote.
438  */
439 static void
440 filt_timerexpire(void *knx)
441 {
442 	struct knote *kn = knx;
443 	struct kqueue *kq = kn->kn_kq;
444 
445 	lwkt_getpooltoken(kq);
446 
447 	/*
448 	 * Open knote_acquire(), since we can't sleep in callout,
449 	 * however, we do need to record this expiration.
450 	 */
451 	kn->kn_data++;
452 	if (kn->kn_status & KN_PROCESSING) {
453 		kn->kn_status |= KN_REPROCESS;
454 		if ((kn->kn_status & KN_DELETING) == 0 &&
455 		    (kn->kn_flags & EV_ONESHOT) == 0)
456 			filt_timerreset(kn);
457 		lwkt_relpooltoken(kq);
458 		return;
459 	}
460 	KASSERT((kn->kn_status & KN_DELETING) == 0,
461 	    ("acquire a deleting knote %#x", kn->kn_status));
462 	kn->kn_status |= KN_PROCESSING;
463 
464 	KNOTE_ACTIVATE(kn);
465 	if ((kn->kn_flags & EV_ONESHOT) == 0)
466 		filt_timerreset(kn);
467 
468 	knote_release(kn);
469 
470 	lwkt_relpooltoken(kq);
471 }
472 
473 /*
474  * data contains amount of time to sleep, in milliseconds
475  */
476 static int
477 filt_timerattach(struct knote *kn)
478 {
479 	struct callout *calloutp;
480 	int prev_ncallouts;
481 
482 	prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1);
483 	if (prev_ncallouts >= kq_calloutmax) {
484 		atomic_subtract_int(&kq_ncallouts, 1);
485 		kn->kn_hook = NULL;
486 		return (ENOMEM);
487 	}
488 
489 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
490 	calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
491 	callout_init_mp(calloutp);
492 	kn->kn_hook = (caddr_t)calloutp;
493 
494 	filt_timerreset(kn);
495 	return (0);
496 }
497 
498 /*
499  * This function is called with the knote flagged locked but it is
500  * still possible to race a callout event due to the callback blocking.
501  * We must call callout_terminate() instead of callout_stop() to deal
502  * with the race.
503  */
504 static void
505 filt_timerdetach(struct knote *kn)
506 {
507 	struct callout *calloutp;
508 
509 	calloutp = (struct callout *)kn->kn_hook;
510 	callout_terminate(calloutp);
511 	kn->kn_hook = NULL;
512 	kfree(calloutp, M_KQUEUE);
513 	atomic_subtract_int(&kq_ncallouts, 1);
514 }
515 
516 static int
517 filt_timer(struct knote *kn, long hint)
518 {
519 	return (kn->kn_data != 0);
520 }
521 
522 /*
523  * EVFILT_USER
524  */
525 static int
526 filt_userattach(struct knote *kn)
527 {
528 	u_int ffctrl;
529 
530 	kn->kn_hook = NULL;
531 	if (kn->kn_sfflags & NOTE_TRIGGER)
532 		kn->kn_ptr.hookid = 1;
533 	else
534 		kn->kn_ptr.hookid = 0;
535 
536 	ffctrl = kn->kn_sfflags & NOTE_FFCTRLMASK;
537 	kn->kn_sfflags &= NOTE_FFLAGSMASK;
538 	switch (ffctrl) {
539 	case NOTE_FFNOP:
540 		break;
541 
542 	case NOTE_FFAND:
543 		kn->kn_fflags &= kn->kn_sfflags;
544 		break;
545 
546 	case NOTE_FFOR:
547 		kn->kn_fflags |= kn->kn_sfflags;
548 		break;
549 
550 	case NOTE_FFCOPY:
551 		kn->kn_fflags = kn->kn_sfflags;
552 		break;
553 
554 	default:
555 		/* XXX Return error? */
556 		break;
557 	}
558 	/* We just happen to copy this value as well. Undocumented. */
559 	kn->kn_data = kn->kn_sdata;
560 
561 	return 0;
562 }
563 
564 static void
565 filt_userdetach(struct knote *kn)
566 {
567 	/* nothing to do */
568 }
569 
570 static int
571 filt_user(struct knote *kn, long hint)
572 {
573 	return (kn->kn_ptr.hookid);
574 }
575 
576 static void
577 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
578 {
579 	u_int ffctrl;
580 
581 	switch (type) {
582 	case EVENT_REGISTER:
583 		if (kev->fflags & NOTE_TRIGGER)
584 			kn->kn_ptr.hookid = 1;
585 
586 		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
587 		kev->fflags &= NOTE_FFLAGSMASK;
588 		switch (ffctrl) {
589 		case NOTE_FFNOP:
590 			break;
591 
592 		case NOTE_FFAND:
593 			kn->kn_fflags &= kev->fflags;
594 			break;
595 
596 		case NOTE_FFOR:
597 			kn->kn_fflags |= kev->fflags;
598 			break;
599 
600 		case NOTE_FFCOPY:
601 			kn->kn_fflags = kev->fflags;
602 			break;
603 
604 		default:
605 			/* XXX Return error? */
606 			break;
607 		}
608 		/* We just happen to copy this value as well. Undocumented. */
609 		kn->kn_data = kev->data;
610 
611 		/*
612 		 * This is not the correct use of EV_CLEAR in an event
613 		 * modification, it should have been passed as a NOTE instead.
614 		 * But we need to maintain compatibility with Apple & FreeBSD.
615 		 *
616 		 * Note however that EV_CLEAR can still be used when doing
617 		 * the initial registration of the event and works as expected
618 		 * (clears the event on reception).
619 		 */
620 		if (kev->flags & EV_CLEAR) {
621 			kn->kn_ptr.hookid = 0;
622 			/*
623 			 * Clearing kn->kn_data is fine, since it gets set
624 			 * every time anyway. We just shouldn't clear
625 			 * kn->kn_fflags here, since that would limit the
626 			 * possible uses of this API. NOTE_FFAND or
627 			 * NOTE_FFCOPY should be used for explicitly clearing
628 			 * kn->kn_fflags.
629 			 */
630 			kn->kn_data = 0;
631 		}
632 		break;
633 
634         case EVENT_PROCESS:
635 		*kev = kn->kn_kevent;
636 		kev->fflags = kn->kn_fflags;
637 		kev->data = kn->kn_data;
638 		if (kn->kn_flags & EV_CLEAR) {
639 			kn->kn_ptr.hookid = 0;
640 			/* kn_data, kn_fflags handled by parent */
641 		}
642 		break;
643 
644 	default:
645 		panic("filt_usertouch() - invalid type (%ld)", type);
646 		break;
647 	}
648 }
649 
650 /*
651  * EVFILT_FS
652  */
653 struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist);
654 
655 static int
656 filt_fsattach(struct knote *kn)
657 {
658 	kn->kn_flags |= EV_CLEAR;
659 	knote_insert(&fs_klist, kn);
660 
661 	return (0);
662 }
663 
664 static void
665 filt_fsdetach(struct knote *kn)
666 {
667 	knote_remove(&fs_klist, kn);
668 }
669 
670 static int
671 filt_fs(struct knote *kn, long hint)
672 {
673 	kn->kn_fflags |= hint;
674 	return (kn->kn_fflags != 0);
675 }
676 
677 /*
678  * Initialize a kqueue.
679  *
680  * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
681  *
682  * MPSAFE
683  */
684 void
685 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
686 {
687 	TAILQ_INIT(&kq->kq_knpend);
688 	TAILQ_INIT(&kq->kq_knlist);
689 	kq->kq_count = 0;
690 	kq->kq_fdp = fdp;
691 	SLIST_INIT(&kq->kq_kqinfo.ki_note);
692 }
693 
694 /*
695  * Terminate a kqueue.  Freeing the actual kq itself is left up to the
696  * caller (it might be embedded in a lwp so we don't do it here).
697  *
698  * The kq's knlist must be completely eradicated so block on any
699  * processing races.
700  */
701 void
702 kqueue_terminate(struct kqueue *kq)
703 {
704 	struct knote *kn;
705 
706 	lwkt_getpooltoken(kq);
707 	while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
708 		if (knote_acquire(kn))
709 			knote_detach_and_drop(kn);
710 	}
711 	lwkt_relpooltoken(kq);
712 
713 	if (kq->kq_knhash) {
714 		hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
715 		kq->kq_knhash = NULL;
716 		kq->kq_knhashmask = 0;
717 	}
718 }
719 
720 /*
721  * MPSAFE
722  */
723 int
724 sys_kqueue(struct kqueue_args *uap)
725 {
726 	struct thread *td = curthread;
727 	struct kqueue *kq;
728 	struct file *fp;
729 	int fd, error;
730 
731 	error = falloc(td->td_lwp, &fp, &fd);
732 	if (error)
733 		return (error);
734 	fp->f_flag = FREAD | FWRITE;
735 	fp->f_type = DTYPE_KQUEUE;
736 	fp->f_ops = &kqueueops;
737 
738 	kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
739 	kqueue_init(kq, td->td_proc->p_fd);
740 	fp->f_data = kq;
741 
742 	fsetfd(kq->kq_fdp, fp, fd);
743 	uap->sysmsg_result = fd;
744 	fdrop(fp);
745 	return (error);
746 }
747 
748 /*
749  * Copy 'count' items into the destination list pointed to by uap->eventlist.
750  */
751 static int
752 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
753 {
754 	struct kevent_copyin_args *kap;
755 	int error;
756 
757 	kap = (struct kevent_copyin_args *)arg;
758 
759 	error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
760 	if (error == 0) {
761 		kap->ka->eventlist += count;
762 		*res += count;
763 	} else {
764 		*res = -1;
765 	}
766 
767 	return (error);
768 }
769 
770 /*
771  * Copy at most 'max' items from the list pointed to by kap->changelist,
772  * return number of items in 'events'.
773  */
774 static int
775 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
776 {
777 	struct kevent_copyin_args *kap;
778 	int error, count;
779 
780 	kap = (struct kevent_copyin_args *)arg;
781 
782 	count = min(kap->ka->nchanges - kap->pchanges, max);
783 	error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
784 	if (error == 0) {
785 		kap->ka->changelist += count;
786 		kap->pchanges += count;
787 		*events = count;
788 	}
789 
790 	return (error);
791 }
792 
793 /*
794  * MPSAFE
795  */
796 int
797 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
798 	    k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
799 	    struct timespec *tsp_in, int flags)
800 {
801 	struct kevent *kevp;
802 	struct timespec *tsp, ats;
803 	int i, n, total, error, nerrors = 0;
804 	int gobbled;
805 	int lres;
806 	int limit = kq_checkloop;
807 	int closedcounter;
808 	struct kevent kev[KQ_NEVENTS];
809 	struct knote marker;
810 	struct lwkt_token *tok;
811 
812 	if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
813 		atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
814 
815 	tsp = tsp_in;
816 	*res = 0;
817 
818 	closedcounter = kq->kq_fdp->fd_closedcounter;
819 
820 	for (;;) {
821 		n = 0;
822 		error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
823 		if (error)
824 			return error;
825 		if (n == 0)
826 			break;
827 		for (i = 0; i < n; ++i)
828 			kev[i].flags &= ~EV_SYSFLAGS;
829 		for (i = 0; i < n; ++i) {
830 			gobbled = n - i;
831 			error = kqueue_register(kq, &kev[i], &gobbled);
832 			i += gobbled - 1;
833 			kevp = &kev[i];
834 
835 			/*
836 			 * If a registration returns an error we
837 			 * immediately post the error.  The kevent()
838 			 * call itself will fail with the error if
839 			 * no space is available for posting.
840 			 *
841 			 * Such errors normally bypass the timeout/blocking
842 			 * code.  However, if the copyoutfn function refuses
843 			 * to post the error (see sys_poll()), then we
844 			 * ignore it too.
845 			 */
846 			if (error || (kevp->flags & EV_RECEIPT)) {
847 				kevp->flags = EV_ERROR;
848 				kevp->data = error;
849 				lres = *res;
850 				kevent_copyoutfn(uap, kevp, 1, res);
851 				if (*res < 0) {
852 					return error;
853 				} else if (lres != *res) {
854 					nevents--;
855 					nerrors++;
856 				}
857 			}
858 		}
859 	}
860 	if (nerrors)
861 		return 0;
862 
863 	/*
864 	 * Acquire/wait for events - setup timeout
865 	 */
866 	if (tsp != NULL) {
867 		if (tsp->tv_sec || tsp->tv_nsec) {
868 			getnanouptime(&ats);
869 			timespecadd(tsp, &ats);		/* tsp = target time */
870 		}
871 	}
872 
873 	/*
874 	 * Loop as required.
875 	 *
876 	 * Collect as many events as we can. Sleeping on successive
877 	 * loops is disabled if copyoutfn has incremented (*res).
878 	 *
879 	 * The loop stops if an error occurs, all events have been
880 	 * scanned (the marker has been reached), or fewer than the
881 	 * maximum number of events is found.
882 	 *
883 	 * The copyoutfn function does not have to increment (*res) in
884 	 * order for the loop to continue.
885 	 *
886 	 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
887 	 */
888 	total = 0;
889 	error = 0;
890 	marker.kn_filter = EVFILT_MARKER;
891 	marker.kn_status = KN_PROCESSING;
892 	tok = lwkt_token_pool_lookup(kq);
893 	lwkt_gettoken(tok);
894 	TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
895 	lwkt_reltoken(tok);
896 	while ((n = nevents - total) > 0) {
897 		if (n > KQ_NEVENTS)
898 			n = KQ_NEVENTS;
899 
900 		/*
901 		 * If no events are pending sleep until timeout (if any)
902 		 * or an event occurs.
903 		 *
904 		 * After the sleep completes the marker is moved to the
905 		 * end of the list, making any received events available
906 		 * to our scan.
907 		 */
908 		if (kq->kq_count == 0 && *res == 0) {
909 			int timeout, ustimeout = 0;
910 
911 			if (tsp == NULL) {
912 				timeout = 0;
913 			} else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
914 				error = EWOULDBLOCK;
915 				break;
916 			} else {
917 				struct timespec atx = *tsp;
918 
919 				getnanouptime(&ats);
920 				timespecsub(&atx, &ats);
921 				if (atx.tv_sec < 0) {
922 					error = EWOULDBLOCK;
923 					break;
924 				} else {
925 					timeout = atx.tv_sec > 24 * 60 * 60 ?
926 					    24 * 60 * 60 * hz :
927 					    tstohz_high(&atx);
928 				}
929 				if (flags & KEVENT_TIMEOUT_PRECISE &&
930 				    timeout != 0) {
931 					if (atx.tv_sec == 0 &&
932 					    atx.tv_nsec < kq_sleep_threshold) {
933 						DELAY(atx.tv_nsec / 1000);
934 						error = EWOULDBLOCK;
935 						break;
936 					} else if (atx.tv_sec < 2000) {
937 						ustimeout = atx.tv_sec *
938 						    1000000 + atx.tv_nsec/1000;
939 					} else {
940 						ustimeout = 2000000000;
941 					}
942 				}
943 			}
944 
945 			lwkt_gettoken(tok);
946 			if (kq->kq_count == 0) {
947 				kq->kq_sleep_cnt++;
948 				if (__predict_false(kq->kq_sleep_cnt == 0)) {
949 					/*
950 					 * Guard against possible wrapping.  And
951 					 * set it to 2, so that kqueue_wakeup()
952 					 * can wake everyone up.
953 					 */
954 					kq->kq_sleep_cnt = 2;
955 				}
956 				if ((flags & KEVENT_TIMEOUT_PRECISE) &&
957 				    timeout != 0) {
958 					error = precise_sleep(kq, PCATCH,
959 					    "kqread", ustimeout);
960 				} else {
961 					error = tsleep(kq, PCATCH, "kqread",
962 					    timeout);
963 				}
964 
965 				/* don't restart after signals... */
966 				if (error == ERESTART)
967 					error = EINTR;
968 				if (error) {
969 					lwkt_reltoken(tok);
970 					break;
971 				}
972 
973 				TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
974 				TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker,
975 				    kn_tqe);
976 			}
977 			lwkt_reltoken(tok);
978 		}
979 
980 		/*
981 		 * Process all received events
982 		 * Account for all non-spurious events in our total
983 		 */
984 		i = kqueue_scan(kq, kev, n, &marker, closedcounter);
985 		if (i) {
986 			lres = *res;
987 			error = kevent_copyoutfn(uap, kev, i, res);
988 			total += *res - lres;
989 			if (error)
990 				break;
991 		}
992 		if (limit && --limit == 0)
993 			panic("kqueue: checkloop failed i=%d", i);
994 
995 		/*
996 		 * Normally when fewer events are returned than requested
997 		 * we can stop.  However, if only spurious events were
998 		 * collected the copyout will not bump (*res) and we have
999 		 * to continue.
1000 		 */
1001 		if (i < n && *res)
1002 			break;
1003 
1004 		/*
1005 		 * Deal with an edge case where spurious events can cause
1006 		 * a loop to occur without moving the marker.  This can
1007 		 * prevent kqueue_scan() from picking up new events which
1008 		 * race us.  We must be sure to move the marker for this
1009 		 * case.
1010 		 *
1011 		 * NOTE: We do not want to move the marker if events
1012 		 *	 were scanned because normal kqueue operations
1013 		 *	 may reactivate events.  Moving the marker in
1014 		 *	 that case could result in duplicates for the
1015 		 *	 same event.
1016 		 */
1017 		if (i == 0) {
1018 			lwkt_gettoken(tok);
1019 			TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
1020 			TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
1021 			lwkt_reltoken(tok);
1022 		}
1023 	}
1024 	lwkt_gettoken(tok);
1025 	TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
1026 	lwkt_reltoken(tok);
1027 
1028 	/* Timeouts do not return EWOULDBLOCK. */
1029 	if (error == EWOULDBLOCK)
1030 		error = 0;
1031 	return error;
1032 }
1033 
1034 /*
1035  * MPALMOSTSAFE
1036  */
1037 int
1038 sys_kevent(struct kevent_args *uap)
1039 {
1040 	struct thread *td = curthread;
1041 	struct timespec ts, *tsp;
1042 	struct kqueue *kq;
1043 	struct file *fp = NULL;
1044 	struct kevent_copyin_args *kap, ka;
1045 	int error;
1046 
1047 	if (uap->timeout) {
1048 		error = copyin(uap->timeout, &ts, sizeof(ts));
1049 		if (error)
1050 			return (error);
1051 		tsp = &ts;
1052 	} else {
1053 		tsp = NULL;
1054 	}
1055 	fp = holdfp(td, uap->fd, -1);
1056 	if (fp == NULL)
1057 		return (EBADF);
1058 	if (fp->f_type != DTYPE_KQUEUE) {
1059 		fdrop(fp);
1060 		return (EBADF);
1061 	}
1062 
1063 	kq = (struct kqueue *)fp->f_data;
1064 
1065 	kap = &ka;
1066 	kap->ka = uap;
1067 	kap->pchanges = 0;
1068 
1069 	error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
1070 			    kevent_copyin, kevent_copyout, tsp, 0);
1071 
1072 	dropfp(td, uap->fd, fp);
1073 
1074 	return (error);
1075 }
1076 
1077 /*
1078  * Efficiently load multiple file pointers.  This significantly reduces
1079  * threaded overhead.  When doing simple polling we can depend on the
1080  * per-thread (fd,fp) cache.  With more descriptors, we batch.
1081  */
1082 static
1083 void
1084 floadkevfps(thread_t td, struct filedesc *fdp, struct kevent *kev,
1085 	    struct file **fp, int climit)
1086 {
1087 	struct filterops *fops;
1088 	int tdcache;
1089 
1090 	if (climit <= 2 && td->td_proc && td->td_proc->p_fd == fdp) {
1091 		tdcache = 1;
1092 	} else {
1093 		tdcache = 0;
1094 		spin_lock_shared(&fdp->fd_spin);
1095 	}
1096 
1097 	while (climit) {
1098 		*fp = NULL;
1099 		if (kev->filter < 0 &&
1100 		    kev->filter + EVFILT_SYSCOUNT >= 0) {
1101 			fops = sysfilt_ops[~kev->filter];
1102 			if (fops->f_flags & FILTEROP_ISFD) {
1103 				if (tdcache) {
1104 					*fp = holdfp(td, kev->ident, -1);
1105 				} else {
1106 					*fp = holdfp_fdp_locked(fdp,
1107 								kev->ident, -1);
1108 				}
1109 			}
1110 		}
1111 		--climit;
1112 		++fp;
1113 		++kev;
1114 	}
1115 	if (tdcache == 0)
1116 		spin_unlock_shared(&fdp->fd_spin);
1117 }
1118 
1119 /*
1120  * Register up to *countp kev's.  Always registers at least 1.
1121  *
1122  * The number registered is returned in *countp.
1123  *
1124  * If an error occurs or a kev is flagged EV_RECEIPT, it is
1125  * processed and included in *countp, and processing then
1126  * stops.
1127  */
1128 int
1129 kqueue_register(struct kqueue *kq, struct kevent *kev, int *countp)
1130 {
1131 	struct filedesc *fdp = kq->kq_fdp;
1132 	struct klist *list = NULL;
1133 	struct filterops *fops;
1134 	struct file *fp[KQ_NEVENTS];
1135 	struct knote *kn = NULL;
1136 	struct thread *td;
1137 	int error;
1138 	int count;
1139 	int climit;
1140 	int closedcounter;
1141 	struct knote_cache_list *cache_list;
1142 
1143 	td = curthread;
1144 	climit = *countp;
1145 	if (climit > KQ_NEVENTS)
1146 		climit = KQ_NEVENTS;
1147 	closedcounter = fdp->fd_closedcounter;
1148 	floadkevfps(td, fdp, kev, fp, climit);
1149 
1150 	lwkt_getpooltoken(kq);
1151 	count = 0;
1152 
1153 	/*
1154 	 * To avoid races, only one thread can register events on this
1155 	 * kqueue at a time.
1156 	 */
1157 	while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) {
1158 		kq->kq_state |= KQ_REGWAIT;
1159 		tsleep(&kq->kq_regtd, 0, "kqreg", 0);
1160 	}
1161 	if (__predict_false(kq->kq_regtd != NULL)) {
1162 		/* Recursive calling of kqueue_register() */
1163 		td = NULL;
1164 	} else {
1165 		/* Owner of the kq_regtd, i.e. td != NULL */
1166 		kq->kq_regtd = td;
1167 	}
1168 
1169 loop:
1170 	if (kev->filter < 0) {
1171 		if (kev->filter + EVFILT_SYSCOUNT < 0) {
1172 			error = EINVAL;
1173 			++count;
1174 			goto done;
1175 		}
1176 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
1177 	} else {
1178 		/*
1179 		 * XXX
1180 		 * filter attach routine is responsible for insuring that
1181 		 * the identifier can be attached to it.
1182 		 */
1183 		error = EINVAL;
1184 		++count;
1185 		goto done;
1186 	}
1187 
1188 	if (fops->f_flags & FILTEROP_ISFD) {
1189 		/* validate descriptor */
1190 		if (fp[count] == NULL) {
1191 			error = EBADF;
1192 			++count;
1193 			goto done;
1194 		}
1195 	}
1196 
1197 	cache_list = &knote_cache_lists[mycpuid];
1198 	if (SLIST_EMPTY(&cache_list->knote_cache)) {
1199 		struct knote *new_kn;
1200 
1201 		new_kn = knote_alloc();
1202 		crit_enter();
1203 		SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link);
1204 		cache_list->knote_cache_cnt++;
1205 		crit_exit();
1206 	}
1207 
1208 	if (fp[count] != NULL) {
1209 		list = &fp[count]->f_klist;
1210 	} else if (kq->kq_knhashmask) {
1211 		list = &kq->kq_knhash[
1212 			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1213 	}
1214 	if (list != NULL) {
1215 		lwkt_getpooltoken(list);
1216 again:
1217 		SLIST_FOREACH(kn, list, kn_link) {
1218 			if (kn->kn_kq == kq &&
1219 			    kn->kn_filter == kev->filter &&
1220 			    kn->kn_id == kev->ident) {
1221 				if (knote_acquire(kn) == 0)
1222 					goto again;
1223 				break;
1224 			}
1225 		}
1226 		lwkt_relpooltoken(list);
1227 	}
1228 
1229 	/*
1230 	 * NOTE: At this point if kn is non-NULL we will have acquired
1231 	 *	 it and set KN_PROCESSING.
1232 	 */
1233 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
1234 		error = ENOENT;
1235 		++count;
1236 		goto done;
1237 	}
1238 
1239 	/*
1240 	 * kn now contains the matching knote, or NULL if no match
1241 	 */
1242 	if (kev->flags & EV_ADD) {
1243 		if (kn == NULL) {
1244 			crit_enter();
1245 			kn = SLIST_FIRST(&cache_list->knote_cache);
1246 			if (kn == NULL) {
1247 				crit_exit();
1248 				kn = knote_alloc();
1249 			} else {
1250 				SLIST_REMOVE_HEAD(&cache_list->knote_cache,
1251 				    kn_link);
1252 				cache_list->knote_cache_cnt--;
1253 				crit_exit();
1254 			}
1255 			kn->kn_fp = fp[count];
1256 			kn->kn_kq = kq;
1257 			kn->kn_fop = fops;
1258 
1259 			/*
1260 			 * apply reference count to knote structure, and
1261 			 * do not release it at the end of this routine.
1262 			 */
1263 			fp[count] = NULL;	/* safety */
1264 
1265 			kn->kn_sfflags = kev->fflags;
1266 			kn->kn_sdata = kev->data;
1267 			kev->fflags = 0;
1268 			kev->data = 0;
1269 			kn->kn_kevent = *kev;
1270 
1271 			/*
1272 			 * KN_PROCESSING prevents the knote from getting
1273 			 * ripped out from under us while we are trying
1274 			 * to attach it, in case the attach blocks.
1275 			 */
1276 			kn->kn_status = KN_PROCESSING;
1277 			knote_attach(kn);
1278 			if ((error = filter_attach(kn)) != 0) {
1279 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1280 				knote_drop(kn);
1281 				++count;
1282 				goto done;
1283 			}
1284 
1285 			/*
1286 			 * Interlock against close races which either tried
1287 			 * to remove our knote while we were blocked or missed
1288 			 * it entirely prior to our attachment.  We do not
1289 			 * want to end up with a knote on a closed descriptor.
1290 			 */
1291 			if ((fops->f_flags & FILTEROP_ISFD) &&
1292 			    checkfdclosed(curthread, fdp, kev->ident, kn->kn_fp,
1293 					  closedcounter)) {
1294 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1295 			}
1296 		} else {
1297 			/*
1298 			 * The user may change some filter values after the
1299 			 * initial EV_ADD, but doing so will not reset any
1300 			 * filter which have already been triggered.
1301 			 */
1302 			KKASSERT(kn->kn_status & KN_PROCESSING);
1303 			if (fops == &user_filtops) {
1304 				filt_usertouch(kn, kev, EVENT_REGISTER);
1305 			} else {
1306 				kn->kn_sfflags = kev->fflags;
1307 				kn->kn_sdata = kev->data;
1308 				kn->kn_kevent.udata = kev->udata;
1309 			}
1310 		}
1311 
1312 		/*
1313 		 * Execute the filter event to immediately activate the
1314 		 * knote if necessary.  If reprocessing events are pending
1315 		 * due to blocking above we do not run the filter here
1316 		 * but instead let knote_release() do it.  Otherwise we
1317 		 * might run the filter on a deleted event.
1318 		 */
1319 		if ((kn->kn_status & KN_REPROCESS) == 0) {
1320 			if (filter_event(kn, 0))
1321 				KNOTE_ACTIVATE(kn);
1322 		}
1323 	} else if (kev->flags & EV_DELETE) {
1324 		/*
1325 		 * Delete the existing knote
1326 		 */
1327 		knote_detach_and_drop(kn);
1328 		error = 0;
1329 		++count;
1330 		goto done;
1331 	} else {
1332 		/*
1333 		 * Modify an existing event.
1334 		 *
1335 		 * The user may change some filter values after the
1336 		 * initial EV_ADD, but doing so will not reset any
1337 		 * filter which have already been triggered.
1338 		 */
1339 		KKASSERT(kn->kn_status & KN_PROCESSING);
1340 		if (fops == &user_filtops) {
1341 			filt_usertouch(kn, kev, EVENT_REGISTER);
1342 		} else {
1343 			kn->kn_sfflags = kev->fflags;
1344 			kn->kn_sdata = kev->data;
1345 			kn->kn_kevent.udata = kev->udata;
1346 		}
1347 
1348 		/*
1349 		 * Execute the filter event to immediately activate the
1350 		 * knote if necessary.  If reprocessing events are pending
1351 		 * due to blocking above we do not run the filter here
1352 		 * but instead let knote_release() do it.  Otherwise we
1353 		 * might run the filter on a deleted event.
1354 		 */
1355 		if ((kn->kn_status & KN_REPROCESS) == 0) {
1356 			if (filter_event(kn, 0))
1357 				KNOTE_ACTIVATE(kn);
1358 		}
1359 	}
1360 
1361 	/*
1362 	 * Disablement does not deactivate a knote here.
1363 	 */
1364 	if ((kev->flags & EV_DISABLE) &&
1365 	    ((kn->kn_status & KN_DISABLED) == 0)) {
1366 		kn->kn_status |= KN_DISABLED;
1367 	}
1368 
1369 	/*
1370 	 * Re-enablement may have to immediately enqueue an active knote.
1371 	 */
1372 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1373 		kn->kn_status &= ~KN_DISABLED;
1374 		if ((kn->kn_status & KN_ACTIVE) &&
1375 		    ((kn->kn_status & KN_QUEUED) == 0)) {
1376 			knote_enqueue(kn);
1377 		}
1378 	}
1379 
1380 	/*
1381 	 * Handle any required reprocessing
1382 	 */
1383 	knote_release(kn);
1384 	/* kn may be invalid now */
1385 
1386 	/*
1387 	 * Loop control.  We stop on errors (above), and also stop after
1388 	 * processing EV_RECEIPT, so the caller can process it.
1389 	 */
1390 	++count;
1391 	if (kev->flags & EV_RECEIPT) {
1392 		error = 0;
1393 		goto done;
1394 	}
1395 	++kev;
1396 	if (count < climit) {
1397 		if (fp[count-1])		/* drop unprocessed fp */
1398 			fdrop(fp[count-1]);
1399 		goto loop;
1400 	}
1401 
1402 	/*
1403 	 * Cleanup
1404 	 */
1405 done:
1406 	if (td != NULL) { /* Owner of the kq_regtd */
1407 		kq->kq_regtd = NULL;
1408 		if (__predict_false(kq->kq_state & KQ_REGWAIT)) {
1409 			kq->kq_state &= ~KQ_REGWAIT;
1410 			wakeup(&kq->kq_regtd);
1411 		}
1412 	}
1413 	lwkt_relpooltoken(kq);
1414 
1415 	/*
1416 	 * Drop unprocessed file pointers
1417 	 */
1418 	*countp = count;
1419 	if (count && fp[count-1])
1420 		fdrop(fp[count-1]);
1421 	while (count < climit) {
1422 		if (fp[count])
1423 			fdrop(fp[count]);
1424 		++count;
1425 	}
1426 	return (error);
1427 }
1428 
1429 /*
1430  * Scan the kqueue, return the number of active events placed in kevp up
1431  * to count.
1432  *
1433  * Continuous mode events may get recycled, do not continue scanning past
1434  * marker unless no events have been collected.
1435  */
1436 static int
1437 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1438             struct knote *marker, int closedcounter)
1439 {
1440         struct knote *kn, local_marker;
1441 	thread_t td = curthread;
1442         int total;
1443 
1444 	total = 0;
1445 	local_marker.kn_filter = EVFILT_MARKER;
1446 	local_marker.kn_status = KN_PROCESSING;
1447 
1448 	lwkt_getpooltoken(kq);
1449 
1450 	/*
1451 	 * Collect events.
1452 	 */
1453 	TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1454 	while (count) {
1455 		kn = TAILQ_NEXT(&local_marker, kn_tqe);
1456 		if (kn->kn_filter == EVFILT_MARKER) {
1457 			/* Marker reached, we are done */
1458 			if (kn == marker)
1459 				break;
1460 
1461 			/* Move local marker past some other threads marker */
1462 			kn = TAILQ_NEXT(kn, kn_tqe);
1463 			TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1464 			TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1465 			continue;
1466 		}
1467 
1468 		/*
1469 		 * We can't skip a knote undergoing processing, otherwise
1470 		 * we risk not returning it when the user process expects
1471 		 * it should be returned.  Sleep and retry.
1472 		 */
1473 		if (knote_acquire(kn) == 0)
1474 			continue;
1475 
1476 		/*
1477 		 * Remove the event for processing.
1478 		 *
1479 		 * WARNING!  We must leave KN_QUEUED set to prevent the
1480 		 *	     event from being KNOTE_ACTIVATE()d while
1481 		 *	     the queue state is in limbo, in case we
1482 		 *	     block.
1483 		 */
1484 		TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1485 		kq->kq_count--;
1486 
1487 		/*
1488 		 * We have to deal with an extremely important race against
1489 		 * file descriptor close()s here.  The file descriptor can
1490 		 * disappear MPSAFE, and there is a small window of
1491 		 * opportunity between that and the call to knote_fdclose().
1492 		 *
1493 		 * If we hit that window here while doselect or dopoll is
1494 		 * trying to delete a spurious event they will not be able
1495 		 * to match up the event against a knote and will go haywire.
1496 		 */
1497 		if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1498 		    checkfdclosed(td, kq->kq_fdp, kn->kn_kevent.ident,
1499 				  kn->kn_fp, closedcounter)) {
1500 			kn->kn_status |= KN_DELETING | KN_REPROCESS;
1501 		}
1502 
1503 		if (kn->kn_status & KN_DISABLED) {
1504 			/*
1505 			 * If disabled we ensure the event is not queued
1506 			 * but leave its active bit set.  On re-enablement
1507 			 * the event may be immediately triggered.
1508 			 */
1509 			kn->kn_status &= ~KN_QUEUED;
1510 		} else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1511 			   (kn->kn_status & KN_DELETING) == 0 &&
1512 			   filter_event(kn, 0) == 0) {
1513 			/*
1514 			 * If not running in one-shot mode and the event
1515 			 * is no longer present we ensure it is removed
1516 			 * from the queue and ignore it.
1517 			 */
1518 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1519 		} else {
1520 			/*
1521 			 * Post the event
1522 			 */
1523 			if (kn->kn_fop == &user_filtops)
1524 				filt_usertouch(kn, kevp, EVENT_PROCESS);
1525 			else
1526 				*kevp = kn->kn_kevent;
1527 			++kevp;
1528 			++total;
1529 			--count;
1530 
1531 			if (kn->kn_flags & EV_ONESHOT) {
1532 				kn->kn_status &= ~KN_QUEUED;
1533 				kn->kn_status |= KN_DELETING | KN_REPROCESS;
1534 			} else {
1535 				if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1536 					if (kn->kn_flags & EV_CLEAR) {
1537 						kn->kn_data = 0;
1538 						kn->kn_fflags = 0;
1539 					}
1540 					if (kn->kn_flags & EV_DISPATCH) {
1541 						kn->kn_status |= KN_DISABLED;
1542 					}
1543 					kn->kn_status &= ~(KN_QUEUED |
1544 							   KN_ACTIVE);
1545 				} else {
1546 					TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1547 					kq->kq_count++;
1548 				}
1549 			}
1550 		}
1551 
1552 		/*
1553 		 * Handle any post-processing states
1554 		 */
1555 		knote_release(kn);
1556 	}
1557 	TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1558 
1559 	lwkt_relpooltoken(kq);
1560 	return (total);
1561 }
1562 
1563 /*
1564  * XXX
1565  * This could be expanded to call kqueue_scan, if desired.
1566  *
1567  * MPSAFE
1568  */
1569 static int
1570 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1571 {
1572 	return (ENXIO);
1573 }
1574 
1575 /*
1576  * MPSAFE
1577  */
1578 static int
1579 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1580 {
1581 	return (ENXIO);
1582 }
1583 
1584 /*
1585  * MPALMOSTSAFE
1586  */
1587 static int
1588 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1589 	     struct ucred *cred, struct sysmsg *msg)
1590 {
1591 	struct kqueue *kq;
1592 	int error;
1593 
1594 	kq = (struct kqueue *)fp->f_data;
1595 	lwkt_getpooltoken(kq);
1596 	switch(com) {
1597 	case FIOASYNC:
1598 		if (*(int *)data)
1599 			kq->kq_state |= KQ_ASYNC;
1600 		else
1601 			kq->kq_state &= ~KQ_ASYNC;
1602 		error = 0;
1603 		break;
1604 	case FIOSETOWN:
1605 		error = fsetown(*(int *)data, &kq->kq_sigio);
1606 		break;
1607 	default:
1608 		error = ENOTTY;
1609 		break;
1610 	}
1611 	lwkt_relpooltoken(kq);
1612 	return (error);
1613 }
1614 
1615 /*
1616  * MPSAFE
1617  */
1618 static int
1619 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1620 {
1621 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1622 
1623 	bzero((void *)st, sizeof(*st));
1624 	st->st_size = kq->kq_count;
1625 	st->st_blksize = sizeof(struct kevent);
1626 	st->st_mode = S_IFIFO;
1627 	return (0);
1628 }
1629 
1630 /*
1631  * MPSAFE
1632  */
1633 static int
1634 kqueue_close(struct file *fp)
1635 {
1636 	struct kqueue *kq = (struct kqueue *)fp->f_data;
1637 
1638 	kqueue_terminate(kq);
1639 
1640 	fp->f_data = NULL;
1641 	funsetown(&kq->kq_sigio);
1642 
1643 	kfree(kq, M_KQUEUE);
1644 	return (0);
1645 }
1646 
1647 static void
1648 kqueue_wakeup(struct kqueue *kq)
1649 {
1650 	if (kq->kq_sleep_cnt) {
1651 		u_int sleep_cnt = kq->kq_sleep_cnt;
1652 
1653 		kq->kq_sleep_cnt = 0;
1654 		if (sleep_cnt == 1)
1655 			wakeup_one(kq);
1656 		else
1657 			wakeup(kq);
1658 	}
1659 	KNOTE(&kq->kq_kqinfo.ki_note, 0);
1660 }
1661 
1662 /*
1663  * Calls filterops f_attach function, acquiring mplock if filter is not
1664  * marked as FILTEROP_MPSAFE.
1665  *
1666  * Caller must be holding the related kq token
1667  */
1668 static int
1669 filter_attach(struct knote *kn)
1670 {
1671 	int ret;
1672 
1673 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1674 		ret = kn->kn_fop->f_attach(kn);
1675 	} else {
1676 		get_mplock();
1677 		ret = kn->kn_fop->f_attach(kn);
1678 		rel_mplock();
1679 	}
1680 	return (ret);
1681 }
1682 
1683 /*
1684  * Detach the knote and drop it, destroying the knote.
1685  *
1686  * Calls filterops f_detach function, acquiring mplock if filter is not
1687  * marked as FILTEROP_MPSAFE.
1688  *
1689  * Caller must be holding the related kq token
1690  */
1691 static void
1692 knote_detach_and_drop(struct knote *kn)
1693 {
1694 	kn->kn_status |= KN_DELETING | KN_REPROCESS;
1695 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1696 		kn->kn_fop->f_detach(kn);
1697 	} else {
1698 		get_mplock();
1699 		kn->kn_fop->f_detach(kn);
1700 		rel_mplock();
1701 	}
1702 	knote_drop(kn);
1703 }
1704 
1705 /*
1706  * Calls filterops f_event function, acquiring mplock if filter is not
1707  * marked as FILTEROP_MPSAFE.
1708  *
1709  * If the knote is in the middle of being created or deleted we cannot
1710  * safely call the filter op.
1711  *
1712  * Caller must be holding the related kq token
1713  */
1714 static int
1715 filter_event(struct knote *kn, long hint)
1716 {
1717 	int ret;
1718 
1719 	if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1720 		ret = kn->kn_fop->f_event(kn, hint);
1721 	} else {
1722 		get_mplock();
1723 		ret = kn->kn_fop->f_event(kn, hint);
1724 		rel_mplock();
1725 	}
1726 	return (ret);
1727 }
1728 
1729 /*
1730  * Walk down a list of knotes, activating them if their event has triggered.
1731  *
1732  * If we encounter any knotes which are undergoing processing we just mark
1733  * them for reprocessing and do not try to [re]activate the knote.  However,
1734  * if a hint is being passed we have to wait and that makes things a bit
1735  * sticky.
1736  */
1737 void
1738 knote(struct klist *list, long hint)
1739 {
1740 	struct kqueue *kq;
1741 	struct knote *kn;
1742 	struct knote *kntmp;
1743 
1744 	lwkt_getpooltoken(list);
1745 restart:
1746 	SLIST_FOREACH(kn, list, kn_next) {
1747 		kq = kn->kn_kq;
1748 		lwkt_getpooltoken(kq);
1749 
1750 		/* temporary verification hack */
1751 		SLIST_FOREACH(kntmp, list, kn_next) {
1752 			if (kn == kntmp)
1753 				break;
1754 		}
1755 		if (kn != kntmp || kn->kn_kq != kq) {
1756 			lwkt_relpooltoken(kq);
1757 			goto restart;
1758 		}
1759 
1760 		if (kn->kn_status & KN_PROCESSING) {
1761 			/*
1762 			 * Someone else is processing the knote, ask the
1763 			 * other thread to reprocess it and don't mess
1764 			 * with it otherwise.
1765 			 */
1766 			if (hint == 0) {
1767 				kn->kn_status |= KN_REPROCESS;
1768 				lwkt_relpooltoken(kq);
1769 				continue;
1770 			}
1771 
1772 			/*
1773 			 * If the hint is non-zero we have to wait or risk
1774 			 * losing the state the caller is trying to update.
1775 			 *
1776 			 * XXX This is a real problem, certain process
1777 			 *     and signal filters will bump kn_data for
1778 			 *     already-processed notes more than once if
1779 			 *     we restart the list scan.  FIXME.
1780 			 */
1781 			kn->kn_status |= KN_WAITING | KN_REPROCESS;
1782 			tsleep(kn, 0, "knotec", hz);
1783 			lwkt_relpooltoken(kq);
1784 			goto restart;
1785 		}
1786 
1787 		/*
1788 		 * Become the reprocessing master ourselves.
1789 		 *
1790 		 * If hint is non-zero running the event is mandatory
1791 		 * when not deleting so do it whether reprocessing is
1792 		 * set or not.
1793 		 */
1794 		kn->kn_status |= KN_PROCESSING;
1795 		if ((kn->kn_status & KN_DELETING) == 0) {
1796 			if (filter_event(kn, hint))
1797 				KNOTE_ACTIVATE(kn);
1798 		}
1799 		if (knote_release(kn)) {
1800 			lwkt_relpooltoken(kq);
1801 			goto restart;
1802 		}
1803 		lwkt_relpooltoken(kq);
1804 	}
1805 	lwkt_relpooltoken(list);
1806 }
1807 
1808 /*
1809  * Insert knote at head of klist.
1810  *
1811  * This function may only be called via a filter function and thus
1812  * kq_token should already be held and marked for processing.
1813  */
1814 void
1815 knote_insert(struct klist *klist, struct knote *kn)
1816 {
1817 	lwkt_getpooltoken(klist);
1818 	KKASSERT(kn->kn_status & KN_PROCESSING);
1819 	SLIST_INSERT_HEAD(klist, kn, kn_next);
1820 	lwkt_relpooltoken(klist);
1821 }
1822 
1823 /*
1824  * Remove knote from a klist
1825  *
1826  * This function may only be called via a filter function and thus
1827  * kq_token should already be held and marked for processing.
1828  */
1829 void
1830 knote_remove(struct klist *klist, struct knote *kn)
1831 {
1832 	lwkt_getpooltoken(klist);
1833 	KKASSERT(kn->kn_status & KN_PROCESSING);
1834 	SLIST_REMOVE(klist, kn, knote, kn_next);
1835 	lwkt_relpooltoken(klist);
1836 }
1837 
1838 void
1839 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1840 		    struct filterops *ops, void *hook)
1841 {
1842 	struct kqueue *kq;
1843 	struct knote *kn;
1844 
1845 	lwkt_getpooltoken(&src->ki_note);
1846 	lwkt_getpooltoken(&dst->ki_note);
1847 	while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1848 		kq = kn->kn_kq;
1849 		lwkt_getpooltoken(kq);
1850 		if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) {
1851 			lwkt_relpooltoken(kq);
1852 			continue;
1853 		}
1854 		if (knote_acquire(kn)) {
1855 			knote_remove(&src->ki_note, kn);
1856 			kn->kn_fop = ops;
1857 			kn->kn_hook = hook;
1858 			knote_insert(&dst->ki_note, kn);
1859 			knote_release(kn);
1860 			/* kn may be invalid now */
1861 		}
1862 		lwkt_relpooltoken(kq);
1863 	}
1864 	lwkt_relpooltoken(&dst->ki_note);
1865 	lwkt_relpooltoken(&src->ki_note);
1866 }
1867 
1868 /*
1869  * Remove all knotes referencing a specified fd
1870  */
1871 void
1872 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1873 {
1874 	struct kqueue *kq;
1875 	struct knote *kn;
1876 	struct knote *kntmp;
1877 
1878 	lwkt_getpooltoken(&fp->f_klist);
1879 restart:
1880 	SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1881 		if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1882 			kq = kn->kn_kq;
1883 			lwkt_getpooltoken(kq);
1884 
1885 			/* temporary verification hack */
1886 			SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1887 				if (kn == kntmp)
1888 					break;
1889 			}
1890 			if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1891 			    kn->kn_id != fd || kn->kn_kq != kq) {
1892 				lwkt_relpooltoken(kq);
1893 				goto restart;
1894 			}
1895 			if (knote_acquire(kn))
1896 				knote_detach_and_drop(kn);
1897 			lwkt_relpooltoken(kq);
1898 			goto restart;
1899 		}
1900 	}
1901 	lwkt_relpooltoken(&fp->f_klist);
1902 }
1903 
1904 /*
1905  * Low level attach function.
1906  *
1907  * The knote should already be marked for processing.
1908  * Caller must hold the related kq token.
1909  */
1910 static void
1911 knote_attach(struct knote *kn)
1912 {
1913 	struct klist *list;
1914 	struct kqueue *kq = kn->kn_kq;
1915 
1916 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1917 		KKASSERT(kn->kn_fp);
1918 		list = &kn->kn_fp->f_klist;
1919 	} else {
1920 		if (kq->kq_knhashmask == 0)
1921 			kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1922 						 &kq->kq_knhashmask);
1923 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1924 	}
1925 	lwkt_getpooltoken(list);
1926 	SLIST_INSERT_HEAD(list, kn, kn_link);
1927 	lwkt_relpooltoken(list);
1928 	TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1929 }
1930 
1931 /*
1932  * Low level drop function.
1933  *
1934  * The knote should already be marked for processing.
1935  * Caller must hold the related kq token.
1936  */
1937 static void
1938 knote_drop(struct knote *kn)
1939 {
1940 	struct kqueue *kq;
1941 	struct klist *list;
1942 
1943 	kq = kn->kn_kq;
1944 
1945 	if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1946 		list = &kn->kn_fp->f_klist;
1947 	else
1948 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1949 
1950 	lwkt_getpooltoken(list);
1951 	SLIST_REMOVE(list, kn, knote, kn_link);
1952 	lwkt_relpooltoken(list);
1953 	TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1954 	if (kn->kn_status & KN_QUEUED)
1955 		knote_dequeue(kn);
1956 	if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1957 		fdrop(kn->kn_fp);
1958 		kn->kn_fp = NULL;
1959 	}
1960 	knote_free(kn);
1961 }
1962 
1963 /*
1964  * Low level enqueue function.
1965  *
1966  * The knote should already be marked for processing.
1967  * Caller must be holding the kq token
1968  */
1969 static void
1970 knote_enqueue(struct knote *kn)
1971 {
1972 	struct kqueue *kq = kn->kn_kq;
1973 
1974 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1975 	TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1976 	kn->kn_status |= KN_QUEUED;
1977 	++kq->kq_count;
1978 
1979 	/*
1980 	 * Send SIGIO on request (typically set up as a mailbox signal)
1981 	 */
1982 	if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1983 		pgsigio(kq->kq_sigio, SIGIO, 0);
1984 
1985 	kqueue_wakeup(kq);
1986 }
1987 
1988 /*
1989  * Low level dequeue function.
1990  *
1991  * The knote should already be marked for processing.
1992  * Caller must be holding the kq token
1993  */
1994 static void
1995 knote_dequeue(struct knote *kn)
1996 {
1997 	struct kqueue *kq = kn->kn_kq;
1998 
1999 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2000 	TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
2001 	kn->kn_status &= ~KN_QUEUED;
2002 	kq->kq_count--;
2003 }
2004 
2005 static struct knote *
2006 knote_alloc(void)
2007 {
2008 	return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
2009 }
2010 
2011 static void
2012 knote_free(struct knote *kn)
2013 {
2014 	struct knote_cache_list *cache_list;
2015 
2016 	cache_list = &knote_cache_lists[mycpuid];
2017 	if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) {
2018 		crit_enter();
2019 		SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link);
2020 		cache_list->knote_cache_cnt++;
2021 		crit_exit();
2022 		return;
2023 	}
2024 	kfree(kn, M_KQUEUE);
2025 }
2026 
2027 struct sleepinfo {
2028 	void *ident;
2029 	int timedout;
2030 };
2031 
2032 static void
2033 precise_sleep_intr(systimer_t info, int in_ipi, struct intrframe *frame)
2034 {
2035 	struct sleepinfo *si;
2036 
2037 	si = info->data;
2038 	si->timedout = 1;
2039 	wakeup(si->ident);
2040 }
2041 
2042 static int
2043 precise_sleep(void *ident, int flags, const char *wmesg, int us)
2044 {
2045 	struct systimer info;
2046 	struct sleepinfo si = {
2047 		.ident = ident,
2048 		.timedout = 0,
2049 	};
2050 	int r;
2051 
2052 	tsleep_interlock(ident, flags);
2053 	systimer_init_oneshot(&info, precise_sleep_intr, &si,
2054 	    us == 0 ? 1 : us);
2055 	r = tsleep(ident, flags | PINTERLOCKED, wmesg, 0);
2056 	systimer_del(&info);
2057 	if (si.timedout)
2058 		r = EWOULDBLOCK;
2059 
2060 	return r;
2061 }
2062