1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/malloc.h> 34 #include <sys/unistd.h> 35 #include <sys/file.h> 36 #include <sys/lock.h> 37 #include <sys/fcntl.h> 38 #include <sys/queue.h> 39 #include <sys/event.h> 40 #include <sys/eventvar.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/stat.h> 45 #include <sys/sysctl.h> 46 #include <sys/sysmsg.h> 47 #include <sys/thread.h> 48 #include <sys/uio.h> 49 #include <sys/signalvar.h> 50 #include <sys/filio.h> 51 #include <sys/ktr.h> 52 #include <sys/spinlock.h> 53 54 #include <sys/thread2.h> 55 #include <sys/file2.h> 56 #include <sys/mplock2.h> 57 #include <sys/spinlock2.h> 58 59 #define EVENT_REGISTER 1 60 #define EVENT_PROCESS 2 61 62 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 63 64 struct kevent_copyin_args { 65 const struct kevent_args *ka; 66 struct kevent *eventlist; 67 const struct kevent *changelist; 68 int pchanges; 69 }; 70 71 #define KNOTE_CACHE_MAX 64 72 73 struct knote_cache_list { 74 struct klist knote_cache; 75 int knote_cache_cnt; 76 } __cachealign; 77 78 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 79 struct knote *marker, int closedcounter, int flags); 80 static int kqueue_read(struct file *fp, struct uio *uio, 81 struct ucred *cred, int flags); 82 static int kqueue_write(struct file *fp, struct uio *uio, 83 struct ucred *cred, int flags); 84 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 85 struct ucred *cred, struct sysmsg *msg); 86 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 87 static int kqueue_stat(struct file *fp, struct stat *st, 88 struct ucred *cred); 89 static int kqueue_close(struct file *fp); 90 static void kqueue_wakeup(struct kqueue *kq); 91 static int filter_attach(struct knote *kn); 92 static int filter_event(struct knote *kn, long hint); 93 94 /* 95 * MPSAFE 96 */ 97 static struct fileops kqueueops = { 98 .fo_read = kqueue_read, 99 .fo_write = kqueue_write, 100 .fo_ioctl = kqueue_ioctl, 101 .fo_kqfilter = kqueue_kqfilter, 102 .fo_stat = kqueue_stat, 103 .fo_close = kqueue_close, 104 .fo_shutdown = nofo_shutdown 105 }; 106 107 static void knote_attach(struct knote *kn); 108 static void knote_drop(struct knote *kn); 109 static void knote_detach_and_drop(struct knote *kn); 110 static void knote_enqueue(struct knote *kn); 111 static void knote_dequeue(struct knote *kn); 112 static struct knote *knote_alloc(void); 113 static void knote_free(struct knote *kn); 114 115 static void precise_sleep_intr(systimer_t info, int in_ipi, 116 struct intrframe *frame); 117 static int precise_sleep(void *ident, int flags, const char *wmesg, 118 int us); 119 120 static void filt_kqdetach(struct knote *kn); 121 static int filt_kqueue(struct knote *kn, long hint); 122 static int filt_procattach(struct knote *kn); 123 static void filt_procdetach(struct knote *kn); 124 static int filt_proc(struct knote *kn, long hint); 125 static int filt_fileattach(struct knote *kn); 126 static void filt_timerexpire(void *knx); 127 static int filt_timerattach(struct knote *kn); 128 static void filt_timerdetach(struct knote *kn); 129 static int filt_timer(struct knote *kn, long hint); 130 static int filt_userattach(struct knote *kn); 131 static void filt_userdetach(struct knote *kn); 132 static int filt_user(struct knote *kn, long hint); 133 static void filt_usertouch(struct knote *kn, struct kevent *kev, 134 u_long type); 135 static int filt_fsattach(struct knote *kn); 136 static void filt_fsdetach(struct knote *kn); 137 static int filt_fs(struct knote *kn, long hint); 138 139 static struct filterops file_filtops = 140 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL }; 141 static struct filterops kqread_filtops = 142 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue }; 143 static struct filterops proc_filtops = 144 { FILTEROP_MPSAFE, filt_procattach, filt_procdetach, filt_proc }; 145 static struct filterops timer_filtops = 146 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer }; 147 static struct filterops user_filtops = 148 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user }; 149 static struct filterops fs_filtops = 150 { FILTEROP_MPSAFE, filt_fsattach, filt_fsdetach, filt_fs }; 151 152 static int kq_ncallouts = 0; 153 static int kq_calloutmax = 65536; 154 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 155 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 156 static int kq_checkloop = 1000000; 157 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW, 158 &kq_checkloop, 0, "Maximum number of loops for kqueue scan"); 159 static int kq_sleep_threshold = 20000; 160 SYSCTL_INT(_kern, OID_AUTO, kq_sleep_threshold, CTLFLAG_RW, 161 &kq_sleep_threshold, 0, "Minimum sleep duration without busy-looping"); 162 163 #define KNOTE_ACTIVATE(kn) do { \ 164 kn->kn_status |= KN_ACTIVE; \ 165 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 166 knote_enqueue(kn); \ 167 } while(0) 168 169 #define KN_HASHSIZE 64 /* XXX should be tunable */ 170 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 171 172 extern struct filterops aio_filtops; 173 extern struct filterops sig_filtops; 174 175 /* 176 * Table for for all system-defined filters. 177 */ 178 static struct filterops *sysfilt_ops[] = { 179 &file_filtops, /* EVFILT_READ */ 180 &file_filtops, /* EVFILT_WRITE */ 181 &aio_filtops, /* EVFILT_AIO */ 182 &file_filtops, /* EVFILT_VNODE */ 183 &proc_filtops, /* EVFILT_PROC */ 184 &sig_filtops, /* EVFILT_SIGNAL */ 185 &timer_filtops, /* EVFILT_TIMER */ 186 &file_filtops, /* EVFILT_EXCEPT */ 187 &user_filtops, /* EVFILT_USER */ 188 &fs_filtops, /* EVFILT_FS */ 189 }; 190 191 static struct knote_cache_list knote_cache_lists[MAXCPU]; 192 193 /* 194 * Acquire a knote, return non-zero on success, 0 on failure. 195 * 196 * If we cannot acquire the knote we sleep and return 0. The knote 197 * may be stale on return in this case and the caller must restart 198 * whatever loop they are in. 199 * 200 * Related kq token must be held. 201 */ 202 static __inline int 203 knote_acquire(struct knote *kn) 204 { 205 if (kn->kn_status & KN_PROCESSING) { 206 kn->kn_status |= KN_WAITING | KN_REPROCESS; 207 tsleep(kn, 0, "kqepts", hz); 208 /* knote may be stale now */ 209 return(0); 210 } 211 kn->kn_status |= KN_PROCESSING; 212 return(1); 213 } 214 215 /* 216 * Release an acquired knote, clearing KN_PROCESSING and handling any 217 * KN_REPROCESS events. 218 * 219 * Caller must be holding the related kq token 220 * 221 * Non-zero is returned if the knote is destroyed or detached. 222 */ 223 static __inline int 224 knote_release(struct knote *kn) 225 { 226 int ret; 227 228 while (kn->kn_status & KN_REPROCESS) { 229 kn->kn_status &= ~KN_REPROCESS; 230 if (kn->kn_status & KN_WAITING) { 231 kn->kn_status &= ~KN_WAITING; 232 wakeup(kn); 233 } 234 if (kn->kn_status & KN_DELETING) { 235 knote_detach_and_drop(kn); 236 return(1); 237 /* NOT REACHED */ 238 } 239 if (filter_event(kn, 0)) 240 KNOTE_ACTIVATE(kn); 241 } 242 if (kn->kn_status & KN_DETACHED) 243 ret = 1; 244 else 245 ret = 0; 246 kn->kn_status &= ~KN_PROCESSING; 247 /* kn should not be accessed anymore */ 248 return ret; 249 } 250 251 static int 252 filt_fileattach(struct knote *kn) 253 { 254 return (fo_kqfilter(kn->kn_fp, kn)); 255 } 256 257 /* 258 * MPSAFE 259 */ 260 static int 261 kqueue_kqfilter(struct file *fp, struct knote *kn) 262 { 263 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 264 265 if (kn->kn_filter != EVFILT_READ) 266 return (EOPNOTSUPP); 267 268 kn->kn_fop = &kqread_filtops; 269 knote_insert(&kq->kq_kqinfo.ki_note, kn); 270 return (0); 271 } 272 273 static void 274 filt_kqdetach(struct knote *kn) 275 { 276 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 277 278 knote_remove(&kq->kq_kqinfo.ki_note, kn); 279 } 280 281 /*ARGSUSED*/ 282 static int 283 filt_kqueue(struct knote *kn, long hint) 284 { 285 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 286 287 kn->kn_data = kq->kq_count; 288 return (kn->kn_data > 0); 289 } 290 291 static int 292 filt_procattach(struct knote *kn) 293 { 294 struct proc *p; 295 int immediate; 296 297 immediate = 0; 298 p = pfind(kn->kn_id); 299 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 300 p = zpfind(kn->kn_id); 301 immediate = 1; 302 } 303 if (p == NULL) { 304 return (ESRCH); 305 } 306 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 307 if (p) 308 PRELE(p); 309 return (EACCES); 310 } 311 312 lwkt_gettoken(&p->p_token); 313 kn->kn_ptr.p_proc = p; 314 kn->kn_flags |= EV_CLEAR; /* automatically set */ 315 316 /* 317 * internal flag indicating registration done by kernel 318 */ 319 if (kn->kn_flags & EV_FLAG1) { 320 kn->kn_data = kn->kn_sdata; /* ppid */ 321 kn->kn_fflags = NOTE_CHILD; 322 kn->kn_flags &= ~EV_FLAG1; 323 } 324 325 knote_insert(&p->p_klist, kn); 326 327 /* 328 * Immediately activate any exit notes if the target process is a 329 * zombie. This is necessary to handle the case where the target 330 * process, e.g. a child, dies before the kevent is negistered. 331 */ 332 if (immediate && filt_proc(kn, NOTE_EXIT)) 333 KNOTE_ACTIVATE(kn); 334 lwkt_reltoken(&p->p_token); 335 PRELE(p); 336 337 return (0); 338 } 339 340 /* 341 * The knote may be attached to a different process, which may exit, 342 * leaving nothing for the knote to be attached to. So when the process 343 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 344 * it will be deleted when read out. However, as part of the knote deletion, 345 * this routine is called, so a check is needed to avoid actually performing 346 * a detach, because the original process does not exist any more. 347 */ 348 static void 349 filt_procdetach(struct knote *kn) 350 { 351 struct proc *p; 352 353 if (kn->kn_status & KN_DETACHED) 354 return; 355 p = kn->kn_ptr.p_proc; 356 knote_remove(&p->p_klist, kn); 357 } 358 359 static int 360 filt_proc(struct knote *kn, long hint) 361 { 362 u_int event; 363 364 /* 365 * mask off extra data 366 */ 367 event = (u_int)hint & NOTE_PCTRLMASK; 368 369 /* 370 * if the user is interested in this event, record it. 371 */ 372 if (kn->kn_sfflags & event) 373 kn->kn_fflags |= event; 374 375 /* 376 * Process is gone, so flag the event as finished. Detach the 377 * knote from the process now because the process will be poof, 378 * gone later on. 379 */ 380 if (event == NOTE_EXIT) { 381 struct proc *p = kn->kn_ptr.p_proc; 382 if ((kn->kn_status & KN_DETACHED) == 0) { 383 PHOLD(p); 384 knote_remove(&p->p_klist, kn); 385 kn->kn_status |= KN_DETACHED; 386 kn->kn_data = p->p_xstat; 387 kn->kn_ptr.p_proc = NULL; 388 PRELE(p); 389 } 390 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 391 return (1); 392 } 393 394 /* 395 * process forked, and user wants to track the new process, 396 * so attach a new knote to it, and immediately report an 397 * event with the parent's pid. 398 */ 399 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 400 struct kevent kev; 401 int error; 402 int n; 403 404 /* 405 * register knote with new process. 406 */ 407 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 408 kev.filter = kn->kn_filter; 409 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 410 kev.fflags = kn->kn_sfflags; 411 kev.data = kn->kn_id; /* parent */ 412 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 413 n = 1; 414 error = kqueue_register(kn->kn_kq, &kev, &n, 0); 415 if (error) 416 kn->kn_fflags |= NOTE_TRACKERR; 417 } 418 419 return (kn->kn_fflags != 0); 420 } 421 422 static void 423 filt_timerreset(struct knote *kn) 424 { 425 struct callout *calloutp; 426 struct timeval tv; 427 int tticks; 428 429 tv.tv_sec = kn->kn_sdata / 1000; 430 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 431 tticks = tvtohz_high(&tv); 432 calloutp = (struct callout *)kn->kn_hook; 433 callout_reset(calloutp, tticks, filt_timerexpire, kn); 434 } 435 436 /* 437 * The callout interlocks with callout_stop() but can still 438 * race a deletion so if KN_DELETING is set we just don't touch 439 * the knote. 440 */ 441 static void 442 filt_timerexpire(void *knx) 443 { 444 struct knote *kn = knx; 445 struct kqueue *kq = kn->kn_kq; 446 447 lwkt_getpooltoken(kq); 448 449 /* 450 * Open knote_acquire(), since we can't sleep in callout, 451 * however, we do need to record this expiration. 452 */ 453 kn->kn_data++; 454 if (kn->kn_status & KN_PROCESSING) { 455 kn->kn_status |= KN_REPROCESS; 456 if ((kn->kn_status & KN_DELETING) == 0 && 457 (kn->kn_flags & EV_ONESHOT) == 0) 458 filt_timerreset(kn); 459 lwkt_relpooltoken(kq); 460 return; 461 } 462 KASSERT((kn->kn_status & KN_DELETING) == 0, 463 ("acquire a deleting knote %#x", kn->kn_status)); 464 kn->kn_status |= KN_PROCESSING; 465 466 KNOTE_ACTIVATE(kn); 467 if ((kn->kn_flags & EV_ONESHOT) == 0) 468 filt_timerreset(kn); 469 470 knote_release(kn); 471 472 lwkt_relpooltoken(kq); 473 } 474 475 /* 476 * data contains amount of time to sleep, in milliseconds 477 */ 478 static int 479 filt_timerattach(struct knote *kn) 480 { 481 struct callout *calloutp; 482 int prev_ncallouts; 483 484 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1); 485 if (prev_ncallouts >= kq_calloutmax) { 486 atomic_subtract_int(&kq_ncallouts, 1); 487 kn->kn_hook = NULL; 488 return (ENOMEM); 489 } 490 491 kn->kn_flags |= EV_CLEAR; /* automatically set */ 492 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 493 callout_init_mp(calloutp); 494 kn->kn_hook = (caddr_t)calloutp; 495 496 filt_timerreset(kn); 497 return (0); 498 } 499 500 /* 501 * This function is called with the knote flagged locked but it is 502 * still possible to race a callout event due to the callback blocking. 503 */ 504 static void 505 filt_timerdetach(struct knote *kn) 506 { 507 struct callout *calloutp; 508 509 calloutp = (struct callout *)kn->kn_hook; 510 callout_terminate(calloutp); 511 kn->kn_hook = NULL; 512 kfree(calloutp, M_KQUEUE); 513 atomic_subtract_int(&kq_ncallouts, 1); 514 } 515 516 static int 517 filt_timer(struct knote *kn, long hint) 518 { 519 return (kn->kn_data != 0); 520 } 521 522 /* 523 * EVFILT_USER 524 */ 525 static int 526 filt_userattach(struct knote *kn) 527 { 528 u_int ffctrl; 529 530 kn->kn_hook = NULL; 531 if (kn->kn_sfflags & NOTE_TRIGGER) 532 kn->kn_ptr.hookid = 1; 533 else 534 kn->kn_ptr.hookid = 0; 535 536 ffctrl = kn->kn_sfflags & NOTE_FFCTRLMASK; 537 kn->kn_sfflags &= NOTE_FFLAGSMASK; 538 switch (ffctrl) { 539 case NOTE_FFNOP: 540 break; 541 542 case NOTE_FFAND: 543 kn->kn_fflags &= kn->kn_sfflags; 544 break; 545 546 case NOTE_FFOR: 547 kn->kn_fflags |= kn->kn_sfflags; 548 break; 549 550 case NOTE_FFCOPY: 551 kn->kn_fflags = kn->kn_sfflags; 552 break; 553 554 default: 555 /* XXX Return error? */ 556 break; 557 } 558 /* We just happen to copy this value as well. Undocumented. */ 559 kn->kn_data = kn->kn_sdata; 560 561 return 0; 562 } 563 564 static void 565 filt_userdetach(struct knote *kn) 566 { 567 /* nothing to do */ 568 } 569 570 static int 571 filt_user(struct knote *kn, long hint) 572 { 573 return (kn->kn_ptr.hookid); 574 } 575 576 static void 577 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 578 { 579 u_int ffctrl; 580 581 switch (type) { 582 case EVENT_REGISTER: 583 if (kev->fflags & NOTE_TRIGGER) 584 kn->kn_ptr.hookid = 1; 585 586 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 587 kev->fflags &= NOTE_FFLAGSMASK; 588 switch (ffctrl) { 589 case NOTE_FFNOP: 590 break; 591 592 case NOTE_FFAND: 593 kn->kn_fflags &= kev->fflags; 594 break; 595 596 case NOTE_FFOR: 597 kn->kn_fflags |= kev->fflags; 598 break; 599 600 case NOTE_FFCOPY: 601 kn->kn_fflags = kev->fflags; 602 break; 603 604 default: 605 /* XXX Return error? */ 606 break; 607 } 608 /* We just happen to copy this value as well. Undocumented. */ 609 kn->kn_data = kev->data; 610 611 /* 612 * This is not the correct use of EV_CLEAR in an event 613 * modification, it should have been passed as a NOTE instead. 614 * But we need to maintain compatibility with Apple & FreeBSD. 615 * 616 * Note however that EV_CLEAR can still be used when doing 617 * the initial registration of the event and works as expected 618 * (clears the event on reception). 619 */ 620 if (kev->flags & EV_CLEAR) { 621 kn->kn_ptr.hookid = 0; 622 /* 623 * Clearing kn->kn_data is fine, since it gets set 624 * every time anyway. We just shouldn't clear 625 * kn->kn_fflags here, since that would limit the 626 * possible uses of this API. NOTE_FFAND or 627 * NOTE_FFCOPY should be used for explicitly clearing 628 * kn->kn_fflags. 629 */ 630 kn->kn_data = 0; 631 } 632 break; 633 634 case EVENT_PROCESS: 635 *kev = kn->kn_kevent; 636 kev->fflags = kn->kn_fflags; 637 kev->data = kn->kn_data; 638 if (kn->kn_flags & EV_CLEAR) { 639 kn->kn_ptr.hookid = 0; 640 /* kn_data, kn_fflags handled by parent */ 641 } 642 break; 643 644 default: 645 panic("filt_usertouch() - invalid type (%ld)", type); 646 break; 647 } 648 } 649 650 /* 651 * EVFILT_FS 652 */ 653 struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist); 654 655 static int 656 filt_fsattach(struct knote *kn) 657 { 658 kn->kn_flags |= EV_CLEAR; 659 knote_insert(&fs_klist, kn); 660 661 return (0); 662 } 663 664 static void 665 filt_fsdetach(struct knote *kn) 666 { 667 knote_remove(&fs_klist, kn); 668 } 669 670 static int 671 filt_fs(struct knote *kn, long hint) 672 { 673 kn->kn_fflags |= hint; 674 return (kn->kn_fflags != 0); 675 } 676 677 /* 678 * Initialize a kqueue. 679 * 680 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 681 */ 682 void 683 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 684 { 685 bzero(kq, sizeof(*kq)); 686 TAILQ_INIT(&kq->kq_knpend); 687 TAILQ_INIT(&kq->kq_knlist); 688 kq->kq_fdp = fdp; 689 SLIST_INIT(&kq->kq_kqinfo.ki_note); 690 } 691 692 /* 693 * Terminate a kqueue. Freeing the actual kq itself is left up to the 694 * caller (it might be embedded in a lwp so we don't do it here). 695 * 696 * The kq's knlist must be completely eradicated so block on any 697 * processing races. 698 */ 699 void 700 kqueue_terminate(struct kqueue *kq) 701 { 702 struct knote *kn; 703 704 lwkt_getpooltoken(kq); 705 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 706 if (knote_acquire(kn)) 707 knote_detach_and_drop(kn); 708 } 709 lwkt_relpooltoken(kq); 710 711 if (kq->kq_knhash) { 712 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask); 713 kq->kq_knhash = NULL; 714 kq->kq_knhashmask = 0; 715 } 716 } 717 718 /* 719 * MPSAFE 720 */ 721 int 722 sys_kqueue(struct sysmsg *sysmsg, const struct kqueue_args *uap) 723 { 724 struct thread *td = curthread; 725 struct kqueue *kq; 726 struct file *fp; 727 int fd, error; 728 729 error = falloc(td->td_lwp, &fp, &fd); 730 if (error) 731 return (error); 732 fp->f_flag = FREAD | FWRITE; 733 fp->f_type = DTYPE_KQUEUE; 734 fp->f_ops = &kqueueops; 735 736 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 737 kqueue_init(kq, td->td_proc->p_fd); 738 fp->f_data = kq; 739 740 fsetfd(kq->kq_fdp, fp, fd); 741 sysmsg->sysmsg_result = fd; 742 fdrop(fp); 743 return (0); 744 } 745 746 /* 747 * Copy 'count' items into the destination list pointed to by uap->eventlist. 748 */ 749 static int 750 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 751 { 752 struct kevent_copyin_args *kap; 753 int error; 754 755 kap = (struct kevent_copyin_args *)arg; 756 757 error = copyout(kevp, kap->eventlist, count * sizeof(*kevp)); 758 if (error == 0) { 759 kap->eventlist += count; 760 *res += count; 761 } else { 762 *res = -1; 763 } 764 765 return (error); 766 } 767 768 /* 769 * Copy at most 'max' items from the list pointed to by kap->changelist, 770 * return number of items in 'events'. 771 */ 772 static int 773 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 774 { 775 struct kevent_copyin_args *kap; 776 int error, count; 777 778 kap = (struct kevent_copyin_args *)arg; 779 780 count = min(kap->ka->nchanges - kap->pchanges, max); 781 error = copyin(kap->changelist, kevp, count * sizeof *kevp); 782 if (error == 0) { 783 kap->changelist += count; 784 kap->pchanges += count; 785 *events = count; 786 } 787 788 return (error); 789 } 790 791 /* 792 * MPSAFE 793 */ 794 int 795 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 796 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 797 struct timespec *tsp_in, int flags) 798 { 799 struct kevent *kevp; 800 struct timespec *tsp, ats; 801 int i, n, total, error, nerrors = 0; 802 int gobbled; 803 int lres; 804 int limit = kq_checkloop; 805 int closedcounter; 806 struct kevent kev[KQ_NEVENTS]; 807 struct knote marker; 808 struct lwkt_token *tok; 809 810 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec) 811 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC); 812 813 tsp = tsp_in; 814 *res = 0; 815 816 closedcounter = kq->kq_fdp->fd_closedcounter; 817 818 for (;;) { 819 n = 0; 820 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 821 if (error) 822 return error; 823 if (n == 0) 824 break; 825 for (i = 0; i < n; ++i) 826 kev[i].flags &= ~EV_SYSFLAGS; 827 for (i = 0; i < n; ++i) { 828 gobbled = n - i; 829 830 error = kqueue_register(kq, &kev[i], &gobbled, flags); 831 i += gobbled - 1; 832 kevp = &kev[i]; 833 834 /* 835 * If a registration returns an error we 836 * immediately post the error. The kevent() 837 * call itself will fail with the error if 838 * no space is available for posting. 839 * 840 * Such errors normally bypass the timeout/blocking 841 * code. However, if the copyoutfn function refuses 842 * to post the error (see sys_poll()), then we 843 * ignore it too. 844 */ 845 if (error || (kevp->flags & EV_RECEIPT)) { 846 kevp->flags = EV_ERROR; 847 kevp->data = error; 848 lres = *res; 849 kevent_copyoutfn(uap, kevp, 1, res); 850 if (*res < 0) { 851 return error; 852 } else if (lres != *res) { 853 nevents--; 854 nerrors++; 855 } 856 } 857 } 858 } 859 if (nerrors) 860 return 0; 861 862 /* 863 * Acquire/wait for events - setup timeout 864 * 865 * If no timeout specified clean up the run path by clearing the 866 * PRECISE flag. 867 */ 868 if (tsp != NULL) { 869 if (tsp->tv_sec || tsp->tv_nsec) { 870 getnanouptime(&ats); 871 timespecadd(tsp, &ats, tsp); /* tsp = target time */ 872 } 873 } else { 874 flags &= ~KEVENT_TIMEOUT_PRECISE; 875 } 876 877 /* 878 * Loop as required. 879 * 880 * Collect as many events as we can. Sleeping on successive 881 * loops is disabled if copyoutfn has incremented (*res). 882 * 883 * The loop stops if an error occurs, all events have been 884 * scanned (the marker has been reached), or fewer than the 885 * maximum number of events is found. 886 * 887 * The copyoutfn function does not have to increment (*res) in 888 * order for the loop to continue. 889 * 890 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 891 */ 892 total = 0; 893 error = 0; 894 marker.kn_filter = EVFILT_MARKER; 895 marker.kn_status = KN_PROCESSING; 896 897 tok = lwkt_token_pool_lookup(kq); 898 flags = (flags & ~KEVENT_SCAN_MASK) | KEVENT_SCAN_INSERT_MARKER; 899 900 while ((n = nevents - total) > 0) { 901 if (n > KQ_NEVENTS) 902 n = KQ_NEVENTS; 903 904 /* 905 * Process all received events 906 * Account for all non-spurious events in our total 907 */ 908 i = kqueue_scan(kq, kev, n, &marker, closedcounter, flags); 909 flags = (flags & ~KEVENT_SCAN_MASK) | KEVENT_SCAN_KEEP_MARKER; 910 if (i) { 911 lres = *res; 912 error = kevent_copyoutfn(uap, kev, i, res); 913 total += *res - lres; 914 if (error) 915 break; 916 } 917 if (limit && --limit == 0) 918 panic("kqueue: checkloop failed i=%d", i); 919 920 /* 921 * Normally when fewer events are returned than requested 922 * we can stop. However, if only spurious events were 923 * collected the copyout will not bump (*res) and we have 924 * to continue. 925 */ 926 if (i < n && *res) 927 break; 928 929 /* 930 * If no events were recorded (no events happened or the events 931 * that did happen were all spurious), block until an event 932 * occurs or the timeout occurs and reload the marker. 933 * 934 * If we saturated n (i == n) loop up without sleeping to 935 * continue processing the list. 936 */ 937 if (i != n && kq->kq_count == 0 && *res == 0) { 938 int timeout; 939 int ustimeout; 940 941 if (tsp == NULL) { 942 timeout = 0; 943 ustimeout = 0; 944 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 945 error = EWOULDBLOCK; 946 break; 947 } else { 948 struct timespec atx = *tsp; 949 950 getnanouptime(&ats); 951 timespecsub(&atx, &ats, &atx); 952 if (atx.tv_sec < 0 || 953 (atx.tv_sec == 0 && atx.tv_nsec <= 0)) { 954 error = EWOULDBLOCK; 955 break; 956 } 957 if (flags & KEVENT_TIMEOUT_PRECISE) { 958 if (atx.tv_sec == 0 && 959 atx.tv_nsec < kq_sleep_threshold) { 960 ustimeout = kq_sleep_threshold / 961 1000; 962 } else if (atx.tv_sec < 60) { 963 ustimeout = 964 atx.tv_sec * 1000000 + 965 atx.tv_nsec / 1000; 966 } else { 967 ustimeout = 60 * 1000000; 968 } 969 if (ustimeout == 0) 970 ustimeout = 1; 971 timeout = 0; 972 } else if (atx.tv_sec > 60 * 60) { 973 timeout = 60 * 60 * hz; 974 ustimeout = 0; 975 } else { 976 timeout = tstohz_high(&atx); 977 ustimeout = 0; 978 } 979 } 980 981 lwkt_gettoken(tok); 982 if (kq->kq_count == 0) { 983 kq->kq_sleep_cnt++; 984 if (__predict_false(kq->kq_sleep_cnt == 0)) { 985 /* 986 * Guard against possible wrapping. And 987 * set it to 2, so that kqueue_wakeup() 988 * can wake everyone up. 989 */ 990 kq->kq_sleep_cnt = 2; 991 } 992 if (flags & KEVENT_TIMEOUT_PRECISE) { 993 error = precise_sleep(kq, PCATCH, 994 "kqread", ustimeout); 995 } else { 996 error = tsleep(kq, PCATCH, 997 "kqread", timeout); 998 } 999 1000 /* don't restart after signals... */ 1001 if (error == ERESTART) 1002 error = EINTR; 1003 if (error == EWOULDBLOCK) 1004 error = 0; 1005 if (error) { 1006 lwkt_reltoken(tok); 1007 break; 1008 } 1009 flags = (flags & ~KEVENT_SCAN_MASK) | 1010 KEVENT_SCAN_RELOAD_MARKER; 1011 } 1012 lwkt_reltoken(tok); 1013 } 1014 1015 /* 1016 * Deal with an edge case where spurious events can cause 1017 * a loop to occur without moving the marker. This can 1018 * prevent kqueue_scan() from picking up new events which 1019 * race us. We must be sure to move the marker for this 1020 * case. 1021 * 1022 * NOTE: We do not want to move the marker if events 1023 * were scanned because normal kqueue operations 1024 * may reactivate events. Moving the marker in 1025 * that case could result in duplicates for the 1026 * same event. 1027 */ 1028 if (i == 0) { 1029 flags = (flags & ~KEVENT_SCAN_MASK) | 1030 KEVENT_SCAN_RELOAD_MARKER; 1031 } 1032 } 1033 1034 /* 1035 * Remove the marker 1036 */ 1037 if ((flags & KEVENT_SCAN_INSERT_MARKER) == 0) { 1038 lwkt_gettoken(tok); 1039 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 1040 lwkt_reltoken(tok); 1041 } 1042 1043 /* Timeouts do not return EWOULDBLOCK. */ 1044 if (error == EWOULDBLOCK) 1045 error = 0; 1046 return error; 1047 } 1048 1049 /* 1050 * MPALMOSTSAFE 1051 */ 1052 int 1053 sys_kevent(struct sysmsg *sysmsg, const struct kevent_args *uap) 1054 { 1055 struct thread *td = curthread; 1056 struct timespec ts, *tsp; 1057 struct kqueue *kq; 1058 struct file *fp = NULL; 1059 struct kevent_copyin_args *kap, ka; 1060 int error; 1061 1062 if (uap->timeout) { 1063 error = copyin(uap->timeout, &ts, sizeof(ts)); 1064 if (error) 1065 return (error); 1066 tsp = &ts; 1067 } else { 1068 tsp = NULL; 1069 } 1070 fp = holdfp(td, uap->fd, -1); 1071 if (fp == NULL) 1072 return (EBADF); 1073 if (fp->f_type != DTYPE_KQUEUE) { 1074 fdrop(fp); 1075 return (EBADF); 1076 } 1077 1078 kq = (struct kqueue *)fp->f_data; 1079 1080 kap = &ka; 1081 kap->ka = uap; 1082 kap->pchanges = 0; 1083 kap->eventlist = uap->eventlist; 1084 kap->changelist = uap->changelist; 1085 1086 error = kern_kevent(kq, uap->nevents, &sysmsg->sysmsg_result, kap, 1087 kevent_copyin, kevent_copyout, tsp, 0); 1088 1089 dropfp(td, uap->fd, fp); 1090 1091 return (error); 1092 } 1093 1094 /* 1095 * Efficiently load multiple file pointers. This significantly reduces 1096 * threaded overhead. When doing simple polling we can depend on the 1097 * per-thread (fd,fp) cache. With more descriptors, we batch. 1098 */ 1099 static 1100 void 1101 floadkevfps(thread_t td, struct filedesc *fdp, struct kevent *kev, 1102 struct file **fp, int climit) 1103 { 1104 struct filterops *fops; 1105 int tdcache; 1106 1107 if (climit <= 2 && td->td_proc && td->td_proc->p_fd == fdp) { 1108 tdcache = 1; 1109 } else { 1110 tdcache = 0; 1111 spin_lock_shared(&fdp->fd_spin); 1112 } 1113 1114 while (climit) { 1115 *fp = NULL; 1116 if (kev->filter < 0 && 1117 kev->filter + EVFILT_SYSCOUNT >= 0) { 1118 fops = sysfilt_ops[~kev->filter]; 1119 if (fops->f_flags & FILTEROP_ISFD) { 1120 if (tdcache) { 1121 *fp = holdfp(td, kev->ident, -1); 1122 } else { 1123 *fp = holdfp_fdp_locked(fdp, 1124 kev->ident, -1); 1125 } 1126 } 1127 } 1128 --climit; 1129 ++fp; 1130 ++kev; 1131 } 1132 if (tdcache == 0) 1133 spin_unlock_shared(&fdp->fd_spin); 1134 } 1135 1136 /* 1137 * Register up to *countp kev's. Always registers at least 1. 1138 * 1139 * The number registered is returned in *countp. 1140 * 1141 * If an error occurs or a kev is flagged EV_RECEIPT, it is 1142 * processed and included in *countp, and processing then 1143 * stops. 1144 * 1145 * If flags contains KEVENT_UNIQUE_NOTES, kev->data contains an identifier 1146 * to further distinguish knotes which might otherwise have the same kq, 1147 * ident, and filter (used by *poll() because multiple pfds are allowed to 1148 * reference the same descriptor and implied kq filter). kev->data is 1149 * implied to be zero for event processing when this flag is set. 1150 */ 1151 int 1152 kqueue_register(struct kqueue *kq, struct kevent *kev, int *countp, int flags) 1153 { 1154 struct filedesc *fdp = kq->kq_fdp; 1155 struct klist *list = NULL; 1156 struct filterops *fops; 1157 struct file *fp[KQ_NEVENTS]; 1158 struct knote *kn = NULL; 1159 struct thread *td; 1160 int error; 1161 int count; 1162 int climit; 1163 int closedcounter; 1164 int uniqifier = 0; 1165 struct knote_cache_list *cache_list; 1166 1167 td = curthread; 1168 climit = *countp; 1169 if (climit > KQ_NEVENTS) 1170 climit = KQ_NEVENTS; 1171 closedcounter = fdp->fd_closedcounter; 1172 floadkevfps(td, fdp, kev, fp, climit); 1173 1174 lwkt_getpooltoken(kq); 1175 count = 0; 1176 error = 0; 1177 1178 /* 1179 * To avoid races, only one thread can register events on this 1180 * kqueue at a time. 1181 */ 1182 while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) { 1183 kq->kq_state |= KQ_REGWAIT; 1184 tsleep(&kq->kq_regtd, 0, "kqreg", 0); 1185 } 1186 if (__predict_false(kq->kq_regtd != NULL)) { 1187 /* Recursive calling of kqueue_register() */ 1188 td = NULL; 1189 } else { 1190 /* Owner of the kq_regtd, i.e. td != NULL */ 1191 kq->kq_regtd = td; 1192 } 1193 1194 loop: 1195 /* 1196 * knote uniqifiers are used by *poll() because there may be 1197 * multiple pfd[] entries for the same descriptor and filter. 1198 * The unique id is stored in kev->data and kev->data for the 1199 * kevent is implied to be zero. 1200 */ 1201 if (flags & KEVENT_UNIQUE_NOTES) { 1202 uniqifier = kev->data; 1203 kev->data = 0; 1204 } 1205 1206 if (kev->filter < 0) { 1207 if (kev->filter + EVFILT_SYSCOUNT < 0) { 1208 error = EINVAL; 1209 ++count; 1210 goto done; 1211 } 1212 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 1213 } else { 1214 /* 1215 * XXX 1216 * filter attach routine is responsible for insuring that 1217 * the identifier can be attached to it. 1218 */ 1219 error = EINVAL; 1220 ++count; 1221 goto done; 1222 } 1223 1224 if (fops->f_flags & FILTEROP_ISFD) { 1225 /* validate descriptor */ 1226 if (fp[count] == NULL) { 1227 error = EBADF; 1228 ++count; 1229 goto done; 1230 } 1231 } 1232 1233 cache_list = &knote_cache_lists[mycpuid]; 1234 if (SLIST_EMPTY(&cache_list->knote_cache)) { 1235 struct knote *new_kn; 1236 1237 new_kn = knote_alloc(); 1238 crit_enter(); 1239 SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link); 1240 cache_list->knote_cache_cnt++; 1241 crit_exit(); 1242 } 1243 1244 if (fp[count] != NULL) { 1245 list = &fp[count]->f_klist; 1246 } else if (kq->kq_knhashmask) { 1247 list = &kq->kq_knhash[ 1248 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1249 } 1250 if (list != NULL) { 1251 lwkt_getpooltoken(list); 1252 again: 1253 SLIST_FOREACH(kn, list, kn_link) { 1254 if (kn->kn_kq == kq && 1255 kn->kn_filter == kev->filter && 1256 kn->kn_id == kev->ident && 1257 kn->kn_uniqifier == uniqifier) 1258 { 1259 if (knote_acquire(kn) == 0) 1260 goto again; 1261 break; 1262 } 1263 } 1264 lwkt_relpooltoken(list); 1265 } 1266 1267 /* 1268 * NOTE: At this point if kn is non-NULL we will have acquired 1269 * it and set KN_PROCESSING. 1270 */ 1271 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 1272 error = ENOENT; 1273 ++count; 1274 goto done; 1275 } 1276 1277 /* 1278 * kn now contains the matching knote, or NULL if no match 1279 */ 1280 if (kev->flags & EV_ADD) { 1281 if (kn == NULL) { 1282 crit_enter(); 1283 kn = SLIST_FIRST(&cache_list->knote_cache); 1284 if (kn == NULL) { 1285 crit_exit(); 1286 kn = knote_alloc(); 1287 } else { 1288 SLIST_REMOVE_HEAD(&cache_list->knote_cache, 1289 kn_link); 1290 cache_list->knote_cache_cnt--; 1291 crit_exit(); 1292 } 1293 kn->kn_fp = fp[count]; 1294 kn->kn_kq = kq; 1295 kn->kn_fop = fops; 1296 kn->kn_uniqifier = uniqifier; 1297 1298 /* 1299 * apply reference count to knote structure, and 1300 * do not release it at the end of this routine. 1301 */ 1302 fp[count] = NULL; /* safety */ 1303 1304 kn->kn_sfflags = kev->fflags; 1305 kn->kn_sdata = kev->data; 1306 kev->fflags = 0; 1307 kev->data = 0; 1308 kn->kn_kevent = *kev; 1309 1310 /* 1311 * KN_PROCESSING prevents the knote from getting 1312 * ripped out from under us while we are trying 1313 * to attach it, in case the attach blocks. 1314 */ 1315 kn->kn_status = KN_PROCESSING; 1316 knote_attach(kn); 1317 if ((error = filter_attach(kn)) != 0) { 1318 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1319 knote_drop(kn); 1320 ++count; 1321 goto done; 1322 } 1323 1324 /* 1325 * Interlock against close races which either tried 1326 * to remove our knote while we were blocked or missed 1327 * it entirely prior to our attachment. We do not 1328 * want to end up with a knote on a closed descriptor. 1329 */ 1330 if ((fops->f_flags & FILTEROP_ISFD) && 1331 checkfdclosed(curthread, fdp, kev->ident, kn->kn_fp, 1332 closedcounter)) { 1333 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1334 } 1335 } else { 1336 /* 1337 * The user may change some filter values after the 1338 * initial EV_ADD, but doing so will not reset any 1339 * filter which have already been triggered. 1340 */ 1341 KKASSERT(kn->kn_status & KN_PROCESSING); 1342 if (fops == &user_filtops) { 1343 filt_usertouch(kn, kev, EVENT_REGISTER); 1344 } else { 1345 kn->kn_sfflags = kev->fflags; 1346 kn->kn_sdata = kev->data; 1347 kn->kn_kevent.udata = kev->udata; 1348 } 1349 } 1350 1351 /* 1352 * Execute the filter event to immediately activate the 1353 * knote if necessary. If reprocessing events are pending 1354 * due to blocking above we do not run the filter here 1355 * but instead let knote_release() do it. Otherwise we 1356 * might run the filter on a deleted event. 1357 */ 1358 if ((kn->kn_status & KN_REPROCESS) == 0) { 1359 if (filter_event(kn, 0)) 1360 KNOTE_ACTIVATE(kn); 1361 } 1362 } else if (kev->flags & EV_DELETE) { 1363 /* 1364 * Delete the existing knote 1365 */ 1366 knote_detach_and_drop(kn); 1367 error = 0; 1368 ++count; 1369 goto done; 1370 } else { 1371 /* 1372 * Modify an existing event. 1373 * 1374 * The user may change some filter values after the 1375 * initial EV_ADD, but doing so will not reset any 1376 * filter which have already been triggered. 1377 */ 1378 KKASSERT(kn->kn_status & KN_PROCESSING); 1379 if (fops == &user_filtops) { 1380 filt_usertouch(kn, kev, EVENT_REGISTER); 1381 } else { 1382 kn->kn_sfflags = kev->fflags; 1383 kn->kn_sdata = kev->data; 1384 kn->kn_kevent.udata = kev->udata; 1385 } 1386 1387 /* 1388 * Execute the filter event to immediately activate the 1389 * knote if necessary. If reprocessing events are pending 1390 * due to blocking above we do not run the filter here 1391 * but instead let knote_release() do it. Otherwise we 1392 * might run the filter on a deleted event. 1393 */ 1394 if ((kn->kn_status & KN_REPROCESS) == 0) { 1395 if (filter_event(kn, 0)) 1396 KNOTE_ACTIVATE(kn); 1397 } 1398 } 1399 1400 /* 1401 * Disablement does not deactivate a knote here. 1402 */ 1403 if ((kev->flags & EV_DISABLE) && 1404 ((kn->kn_status & KN_DISABLED) == 0)) 1405 { 1406 kn->kn_status |= KN_DISABLED; 1407 } 1408 1409 /* 1410 * Re-enablement may have to immediately enqueue an active knote. 1411 */ 1412 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1413 kn->kn_status &= ~KN_DISABLED; 1414 if ((kn->kn_status & KN_ACTIVE) && 1415 ((kn->kn_status & KN_QUEUED) == 0)) 1416 { 1417 knote_enqueue(kn); 1418 } 1419 } 1420 1421 /* 1422 * Handle any required reprocessing 1423 */ 1424 knote_release(kn); 1425 /* kn may be invalid now */ 1426 1427 /* 1428 * Loop control. We stop on errors (above), and also stop after 1429 * processing EV_RECEIPT, so the caller can process it. 1430 */ 1431 ++count; 1432 if (kev->flags & EV_RECEIPT) { 1433 error = 0; 1434 goto done; 1435 } 1436 ++kev; 1437 if (count < climit) { 1438 if (fp[count-1]) /* drop unprocessed fp */ 1439 fdrop(fp[count-1]); 1440 goto loop; 1441 } 1442 1443 /* 1444 * Cleanup 1445 */ 1446 done: 1447 if (td != NULL) { /* Owner of the kq_regtd */ 1448 kq->kq_regtd = NULL; 1449 if (__predict_false(kq->kq_state & KQ_REGWAIT)) { 1450 kq->kq_state &= ~KQ_REGWAIT; 1451 wakeup(&kq->kq_regtd); 1452 } 1453 } 1454 lwkt_relpooltoken(kq); 1455 1456 /* 1457 * Drop unprocessed file pointers 1458 */ 1459 *countp = count; 1460 if (count && fp[count-1]) 1461 fdrop(fp[count-1]); 1462 while (count < climit) { 1463 if (fp[count]) 1464 fdrop(fp[count]); 1465 ++count; 1466 } 1467 return (error); 1468 } 1469 1470 /* 1471 * Scan the kqueue, return the number of active events placed in kevp up 1472 * to count. 1473 * 1474 * Continuous mode events may get recycled, do not continue scanning past 1475 * marker unless no events have been collected. 1476 */ 1477 static int 1478 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 1479 struct knote *marker, int closedcounter, int flags) 1480 { 1481 struct knote *kn, local_marker; 1482 thread_t td = curthread; 1483 int total; 1484 1485 total = 0; 1486 local_marker.kn_filter = EVFILT_MARKER; 1487 local_marker.kn_status = KN_PROCESSING; 1488 1489 lwkt_getpooltoken(kq); 1490 1491 /* 1492 * Adjust marker, insert initial marker, or leave the marker alone. 1493 * 1494 * Also setup our local_marker. 1495 */ 1496 switch(flags & KEVENT_SCAN_MASK) { 1497 case KEVENT_SCAN_RELOAD_MARKER: 1498 TAILQ_REMOVE(&kq->kq_knpend, marker, kn_tqe); 1499 /* fall through */ 1500 case KEVENT_SCAN_INSERT_MARKER: 1501 TAILQ_INSERT_TAIL(&kq->kq_knpend, marker, kn_tqe); 1502 break; 1503 } 1504 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 1505 1506 /* 1507 * Collect events. 1508 */ 1509 while (count) { 1510 kn = TAILQ_NEXT(&local_marker, kn_tqe); 1511 if (kn->kn_filter == EVFILT_MARKER) { 1512 /* Marker reached, we are done */ 1513 if (kn == marker) 1514 break; 1515 1516 /* Move local marker past some other threads marker */ 1517 kn = TAILQ_NEXT(kn, kn_tqe); 1518 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1519 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 1520 continue; 1521 } 1522 1523 /* 1524 * We can't skip a knote undergoing processing, otherwise 1525 * we risk not returning it when the user process expects 1526 * it should be returned. Sleep and retry. 1527 */ 1528 if (knote_acquire(kn) == 0) 1529 continue; 1530 1531 /* 1532 * Remove the event for processing. 1533 * 1534 * WARNING! We must leave KN_QUEUED set to prevent the 1535 * event from being KNOTE_ACTIVATE()d while 1536 * the queue state is in limbo, in case we 1537 * block. 1538 */ 1539 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1540 kq->kq_count--; 1541 1542 /* 1543 * Kernel select() and poll() functions cache previous 1544 * operations on the assumption that future operations 1545 * will use similr descriptor sets. This removes any 1546 * stale entries in a way that does not require a descriptor 1547 * lookup and is thus not affected by close() races. 1548 * 1549 * Do not report to *_copyout() 1550 */ 1551 if (flags & KEVENT_AUTO_STALE) { 1552 if ((uint64_t)kn->kn_kevent.udata < 1553 curthread->td_lwp->lwp_kqueue_serial) 1554 { 1555 kn->kn_status |= KN_DELETING | KN_REPROCESS | 1556 KN_DISABLED; 1557 } 1558 } 1559 1560 /* 1561 * If a descriptor is close()d out from under a poll/select, 1562 * we want to report the event but delete the note because 1563 * the note can wind up being 'stuck' on kq_knpend. 1564 */ 1565 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && 1566 checkfdclosed(td, kq->kq_fdp, kn->kn_kevent.ident, 1567 kn->kn_fp, closedcounter)) 1568 { 1569 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1570 } 1571 1572 if (kn->kn_status & KN_DISABLED) { 1573 /* 1574 * If disabled we ensure the event is not queued 1575 * but leave its active bit set. On re-enablement 1576 * the event may be immediately triggered. 1577 */ 1578 kn->kn_status &= ~KN_QUEUED; 1579 } else if ((kn->kn_flags & EV_ONESHOT) == 0 && 1580 (kn->kn_status & KN_DELETING) == 0 && 1581 filter_event(kn, 0) == 0) { 1582 /* 1583 * If not running in one-shot mode and the event 1584 * is no longer present we ensure it is removed 1585 * from the queue and ignore it. 1586 */ 1587 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1588 } else { 1589 /* 1590 * Post the event 1591 */ 1592 if (kn->kn_fop == &user_filtops) 1593 filt_usertouch(kn, kevp, EVENT_PROCESS); 1594 else 1595 *kevp = kn->kn_kevent; 1596 ++kevp; 1597 ++total; 1598 --count; 1599 1600 if (kn->kn_flags & EV_ONESHOT) { 1601 kn->kn_status &= ~KN_QUEUED; 1602 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1603 } else { 1604 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1605 if (kn->kn_flags & EV_CLEAR) { 1606 kn->kn_data = 0; 1607 kn->kn_fflags = 0; 1608 } 1609 if (kn->kn_flags & EV_DISPATCH) { 1610 kn->kn_status |= KN_DISABLED; 1611 } 1612 kn->kn_status &= ~(KN_QUEUED | 1613 KN_ACTIVE); 1614 } else { 1615 TAILQ_INSERT_TAIL(&kq->kq_knpend, 1616 kn, 1617 kn_tqe); 1618 kq->kq_count++; 1619 } 1620 } 1621 } 1622 1623 /* 1624 * Handle any post-processing states 1625 */ 1626 knote_release(kn); 1627 } 1628 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1629 1630 lwkt_relpooltoken(kq); 1631 return (total); 1632 } 1633 1634 /* 1635 * XXX 1636 * This could be expanded to call kqueue_scan, if desired. 1637 * 1638 * MPSAFE 1639 */ 1640 static int 1641 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1642 { 1643 return (ENXIO); 1644 } 1645 1646 /* 1647 * MPSAFE 1648 */ 1649 static int 1650 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1651 { 1652 return (ENXIO); 1653 } 1654 1655 /* 1656 * MPALMOSTSAFE 1657 */ 1658 static int 1659 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 1660 struct ucred *cred, struct sysmsg *msg) 1661 { 1662 struct kqueue *kq; 1663 int error; 1664 1665 kq = (struct kqueue *)fp->f_data; 1666 lwkt_getpooltoken(kq); 1667 switch(com) { 1668 case FIOASYNC: 1669 if (*(int *)data) 1670 kq->kq_state |= KQ_ASYNC; 1671 else 1672 kq->kq_state &= ~KQ_ASYNC; 1673 error = 0; 1674 break; 1675 case FIOSETOWN: 1676 error = fsetown(*(int *)data, &kq->kq_sigio); 1677 break; 1678 default: 1679 error = ENOTTY; 1680 break; 1681 } 1682 lwkt_relpooltoken(kq); 1683 return (error); 1684 } 1685 1686 /* 1687 * MPSAFE 1688 */ 1689 static int 1690 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1691 { 1692 struct kqueue *kq = (struct kqueue *)fp->f_data; 1693 1694 bzero((void *)st, sizeof(*st)); 1695 st->st_size = kq->kq_count; 1696 st->st_blksize = sizeof(struct kevent); 1697 st->st_mode = S_IFIFO; 1698 return (0); 1699 } 1700 1701 /* 1702 * MPSAFE 1703 */ 1704 static int 1705 kqueue_close(struct file *fp) 1706 { 1707 struct kqueue *kq = (struct kqueue *)fp->f_data; 1708 1709 kqueue_terminate(kq); 1710 1711 fp->f_data = NULL; 1712 funsetown(&kq->kq_sigio); 1713 1714 kfree(kq, M_KQUEUE); 1715 return (0); 1716 } 1717 1718 static void 1719 kqueue_wakeup(struct kqueue *kq) 1720 { 1721 if (kq->kq_sleep_cnt) { 1722 u_int sleep_cnt = kq->kq_sleep_cnt; 1723 1724 kq->kq_sleep_cnt = 0; 1725 if (sleep_cnt == 1) 1726 wakeup_one(kq); 1727 else 1728 wakeup(kq); 1729 } 1730 KNOTE(&kq->kq_kqinfo.ki_note, 0); 1731 } 1732 1733 /* 1734 * Calls filterops f_attach function, acquiring mplock if filter is not 1735 * marked as FILTEROP_MPSAFE. 1736 * 1737 * Caller must be holding the related kq token 1738 */ 1739 static int 1740 filter_attach(struct knote *kn) 1741 { 1742 int ret; 1743 1744 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1745 ret = kn->kn_fop->f_attach(kn); 1746 } else { 1747 get_mplock(); 1748 ret = kn->kn_fop->f_attach(kn); 1749 rel_mplock(); 1750 } 1751 return (ret); 1752 } 1753 1754 /* 1755 * Detach the knote and drop it, destroying the knote. 1756 * 1757 * Calls filterops f_detach function, acquiring mplock if filter is not 1758 * marked as FILTEROP_MPSAFE. 1759 * 1760 * Caller must be holding the related kq token 1761 */ 1762 static void 1763 knote_detach_and_drop(struct knote *kn) 1764 { 1765 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1766 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1767 kn->kn_fop->f_detach(kn); 1768 } else { 1769 get_mplock(); 1770 kn->kn_fop->f_detach(kn); 1771 rel_mplock(); 1772 } 1773 knote_drop(kn); 1774 } 1775 1776 /* 1777 * Calls filterops f_event function, acquiring mplock if filter is not 1778 * marked as FILTEROP_MPSAFE. 1779 * 1780 * If the knote is in the middle of being created or deleted we cannot 1781 * safely call the filter op. 1782 * 1783 * Caller must be holding the related kq token 1784 */ 1785 static int 1786 filter_event(struct knote *kn, long hint) 1787 { 1788 int ret; 1789 1790 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1791 ret = kn->kn_fop->f_event(kn, hint); 1792 } else { 1793 get_mplock(); 1794 ret = kn->kn_fop->f_event(kn, hint); 1795 rel_mplock(); 1796 } 1797 return (ret); 1798 } 1799 1800 /* 1801 * Walk down a list of knotes, activating them if their event has triggered. 1802 * 1803 * If we encounter any knotes which are undergoing processing we just mark 1804 * them for reprocessing and do not try to [re]activate the knote. However, 1805 * if a hint is being passed we have to wait and that makes things a bit 1806 * sticky. 1807 */ 1808 void 1809 knote(struct klist *list, long hint) 1810 { 1811 struct kqueue *kq; 1812 struct knote *kn; 1813 struct knote *kntmp; 1814 1815 lwkt_getpooltoken(list); 1816 restart: 1817 SLIST_FOREACH(kn, list, kn_next) { 1818 kq = kn->kn_kq; 1819 lwkt_getpooltoken(kq); 1820 1821 /* temporary verification hack */ 1822 SLIST_FOREACH(kntmp, list, kn_next) { 1823 if (kn == kntmp) 1824 break; 1825 } 1826 if (kn != kntmp || kn->kn_kq != kq) { 1827 lwkt_relpooltoken(kq); 1828 goto restart; 1829 } 1830 1831 if (kn->kn_status & KN_PROCESSING) { 1832 /* 1833 * Someone else is processing the knote, ask the 1834 * other thread to reprocess it and don't mess 1835 * with it otherwise. 1836 */ 1837 if (hint == 0) { 1838 kn->kn_status |= KN_REPROCESS; 1839 lwkt_relpooltoken(kq); 1840 continue; 1841 } 1842 1843 /* 1844 * If the hint is non-zero we have to wait or risk 1845 * losing the state the caller is trying to update. 1846 * 1847 * XXX This is a real problem, certain process 1848 * and signal filters will bump kn_data for 1849 * already-processed notes more than once if 1850 * we restart the list scan. FIXME. 1851 */ 1852 kn->kn_status |= KN_WAITING | KN_REPROCESS; 1853 tsleep(kn, 0, "knotec", hz); 1854 lwkt_relpooltoken(kq); 1855 goto restart; 1856 } 1857 1858 /* 1859 * Become the reprocessing master ourselves. 1860 * 1861 * If hint is non-zero running the event is mandatory 1862 * when not deleting so do it whether reprocessing is 1863 * set or not. 1864 */ 1865 kn->kn_status |= KN_PROCESSING; 1866 if ((kn->kn_status & KN_DELETING) == 0) { 1867 if (filter_event(kn, hint)) 1868 KNOTE_ACTIVATE(kn); 1869 } 1870 if (knote_release(kn)) { 1871 lwkt_relpooltoken(kq); 1872 goto restart; 1873 } 1874 lwkt_relpooltoken(kq); 1875 } 1876 lwkt_relpooltoken(list); 1877 } 1878 1879 /* 1880 * Insert knote at head of klist. 1881 * 1882 * This function may only be called via a filter function and thus 1883 * kq_token should already be held and marked for processing. 1884 */ 1885 void 1886 knote_insert(struct klist *klist, struct knote *kn) 1887 { 1888 lwkt_getpooltoken(klist); 1889 KKASSERT(kn->kn_status & KN_PROCESSING); 1890 SLIST_INSERT_HEAD(klist, kn, kn_next); 1891 lwkt_relpooltoken(klist); 1892 } 1893 1894 /* 1895 * Remove knote from a klist 1896 * 1897 * This function may only be called via a filter function and thus 1898 * kq_token should already be held and marked for processing. 1899 */ 1900 void 1901 knote_remove(struct klist *klist, struct knote *kn) 1902 { 1903 lwkt_getpooltoken(klist); 1904 KKASSERT(kn->kn_status & KN_PROCESSING); 1905 SLIST_REMOVE(klist, kn, knote, kn_next); 1906 lwkt_relpooltoken(klist); 1907 } 1908 1909 void 1910 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst, 1911 struct filterops *ops, void *hook) 1912 { 1913 struct kqueue *kq; 1914 struct knote *kn; 1915 1916 lwkt_getpooltoken(&src->ki_note); 1917 lwkt_getpooltoken(&dst->ki_note); 1918 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) { 1919 kq = kn->kn_kq; 1920 lwkt_getpooltoken(kq); 1921 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) { 1922 lwkt_relpooltoken(kq); 1923 continue; 1924 } 1925 if (knote_acquire(kn)) { 1926 knote_remove(&src->ki_note, kn); 1927 kn->kn_fop = ops; 1928 kn->kn_hook = hook; 1929 knote_insert(&dst->ki_note, kn); 1930 knote_release(kn); 1931 /* kn may be invalid now */ 1932 } 1933 lwkt_relpooltoken(kq); 1934 } 1935 lwkt_relpooltoken(&dst->ki_note); 1936 lwkt_relpooltoken(&src->ki_note); 1937 } 1938 1939 /* 1940 * Remove all knotes referencing a specified fd 1941 */ 1942 void 1943 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1944 { 1945 struct kqueue *kq; 1946 struct knote *kn; 1947 struct knote *kntmp; 1948 1949 lwkt_getpooltoken(&fp->f_klist); 1950 restart: 1951 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1952 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1953 kq = kn->kn_kq; 1954 lwkt_getpooltoken(kq); 1955 1956 /* temporary verification hack */ 1957 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) { 1958 if (kn == kntmp) 1959 break; 1960 } 1961 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp || 1962 kn->kn_id != fd || kn->kn_kq != kq) { 1963 lwkt_relpooltoken(kq); 1964 goto restart; 1965 } 1966 if (knote_acquire(kn)) 1967 knote_detach_and_drop(kn); 1968 lwkt_relpooltoken(kq); 1969 goto restart; 1970 } 1971 } 1972 lwkt_relpooltoken(&fp->f_klist); 1973 } 1974 1975 /* 1976 * Low level attach function. 1977 * 1978 * The knote should already be marked for processing. 1979 * Caller must hold the related kq token. 1980 */ 1981 static void 1982 knote_attach(struct knote *kn) 1983 { 1984 struct klist *list; 1985 struct kqueue *kq = kn->kn_kq; 1986 1987 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1988 KKASSERT(kn->kn_fp); 1989 list = &kn->kn_fp->f_klist; 1990 } else { 1991 if (kq->kq_knhashmask == 0) 1992 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1993 &kq->kq_knhashmask); 1994 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1995 } 1996 lwkt_getpooltoken(list); 1997 SLIST_INSERT_HEAD(list, kn, kn_link); 1998 lwkt_relpooltoken(list); 1999 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 2000 } 2001 2002 /* 2003 * Low level drop function. 2004 * 2005 * The knote should already be marked for processing. 2006 * Caller must hold the related kq token. 2007 */ 2008 static void 2009 knote_drop(struct knote *kn) 2010 { 2011 struct kqueue *kq; 2012 struct klist *list; 2013 2014 kq = kn->kn_kq; 2015 2016 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 2017 list = &kn->kn_fp->f_klist; 2018 else 2019 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2020 2021 lwkt_getpooltoken(list); 2022 SLIST_REMOVE(list, kn, knote, kn_link); 2023 lwkt_relpooltoken(list); 2024 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 2025 if (kn->kn_status & KN_QUEUED) 2026 knote_dequeue(kn); 2027 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 2028 fdrop(kn->kn_fp); 2029 kn->kn_fp = NULL; 2030 } 2031 knote_free(kn); 2032 } 2033 2034 /* 2035 * Low level enqueue function. 2036 * 2037 * The knote should already be marked for processing. 2038 * Caller must be holding the kq token 2039 */ 2040 static void 2041 knote_enqueue(struct knote *kn) 2042 { 2043 struct kqueue *kq = kn->kn_kq; 2044 2045 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2046 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 2047 kn->kn_status |= KN_QUEUED; 2048 ++kq->kq_count; 2049 2050 /* 2051 * Send SIGIO on request (typically set up as a mailbox signal) 2052 */ 2053 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 2054 pgsigio(kq->kq_sigio, SIGIO, 0); 2055 2056 kqueue_wakeup(kq); 2057 } 2058 2059 /* 2060 * Low level dequeue function. 2061 * 2062 * The knote should already be marked for processing. 2063 * Caller must be holding the kq token 2064 */ 2065 static void 2066 knote_dequeue(struct knote *kn) 2067 { 2068 struct kqueue *kq = kn->kn_kq; 2069 2070 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2071 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 2072 kn->kn_status &= ~KN_QUEUED; 2073 kq->kq_count--; 2074 } 2075 2076 static struct knote * 2077 knote_alloc(void) 2078 { 2079 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK); 2080 } 2081 2082 static void 2083 knote_free(struct knote *kn) 2084 { 2085 struct knote_cache_list *cache_list; 2086 2087 cache_list = &knote_cache_lists[mycpuid]; 2088 if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) { 2089 crit_enter(); 2090 SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link); 2091 cache_list->knote_cache_cnt++; 2092 crit_exit(); 2093 return; 2094 } 2095 kfree(kn, M_KQUEUE); 2096 } 2097 2098 struct sleepinfo { 2099 void *ident; 2100 int timedout; 2101 }; 2102 2103 static void 2104 precise_sleep_intr(systimer_t info, int in_ipi, struct intrframe *frame) 2105 { 2106 struct sleepinfo *si; 2107 2108 si = info->data; 2109 si->timedout = 1; 2110 wakeup(si->ident); 2111 } 2112 2113 static int 2114 precise_sleep(void *ident, int flags, const char *wmesg, int us) 2115 { 2116 struct systimer info; 2117 struct sleepinfo si = { 2118 .ident = ident, 2119 .timedout = 0, 2120 }; 2121 int r; 2122 2123 tsleep_interlock(ident, flags); 2124 systimer_init_oneshot(&info, precise_sleep_intr, &si, us); 2125 r = tsleep(ident, flags | PINTERLOCKED, wmesg, 0); 2126 systimer_del(&info); 2127 if (si.timedout) 2128 r = EWOULDBLOCK; 2129 2130 return r; 2131 } 2132