1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/malloc.h> 34 #include <sys/unistd.h> 35 #include <sys/file.h> 36 #include <sys/lock.h> 37 #include <sys/fcntl.h> 38 #include <sys/queue.h> 39 #include <sys/event.h> 40 #include <sys/eventvar.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/stat.h> 45 #include <sys/sysctl.h> 46 #include <sys/sysproto.h> 47 #include <sys/thread.h> 48 #include <sys/uio.h> 49 #include <sys/signalvar.h> 50 #include <sys/filio.h> 51 #include <sys/ktr.h> 52 #include <sys/spinlock.h> 53 54 #include <sys/thread2.h> 55 #include <sys/file2.h> 56 #include <sys/mplock2.h> 57 #include <sys/spinlock2.h> 58 59 #define EVENT_REGISTER 1 60 #define EVENT_PROCESS 2 61 62 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 63 64 struct kevent_copyin_args { 65 struct kevent_args *ka; 66 int pchanges; 67 }; 68 69 #define KNOTE_CACHE_MAX 64 70 71 struct knote_cache_list { 72 struct klist knote_cache; 73 int knote_cache_cnt; 74 } __cachealign; 75 76 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 77 struct knote *marker, int closedcounter, int scan_flags); 78 static int kqueue_read(struct file *fp, struct uio *uio, 79 struct ucred *cred, int flags); 80 static int kqueue_write(struct file *fp, struct uio *uio, 81 struct ucred *cred, int flags); 82 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 83 struct ucred *cred, struct sysmsg *msg); 84 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 85 static int kqueue_stat(struct file *fp, struct stat *st, 86 struct ucred *cred); 87 static int kqueue_close(struct file *fp); 88 static void kqueue_wakeup(struct kqueue *kq); 89 static int filter_attach(struct knote *kn); 90 static int filter_event(struct knote *kn, long hint); 91 92 /* 93 * MPSAFE 94 */ 95 static struct fileops kqueueops = { 96 .fo_read = kqueue_read, 97 .fo_write = kqueue_write, 98 .fo_ioctl = kqueue_ioctl, 99 .fo_kqfilter = kqueue_kqfilter, 100 .fo_stat = kqueue_stat, 101 .fo_close = kqueue_close, 102 .fo_shutdown = nofo_shutdown 103 }; 104 105 static void knote_attach(struct knote *kn); 106 static void knote_drop(struct knote *kn); 107 static void knote_detach_and_drop(struct knote *kn); 108 static void knote_enqueue(struct knote *kn); 109 static void knote_dequeue(struct knote *kn); 110 static struct knote *knote_alloc(void); 111 static void knote_free(struct knote *kn); 112 113 static void precise_sleep_intr(systimer_t info, int in_ipi, 114 struct intrframe *frame); 115 static int precise_sleep(void *ident, int flags, const char *wmesg, 116 int us); 117 118 static void filt_kqdetach(struct knote *kn); 119 static int filt_kqueue(struct knote *kn, long hint); 120 static int filt_procattach(struct knote *kn); 121 static void filt_procdetach(struct knote *kn); 122 static int filt_proc(struct knote *kn, long hint); 123 static int filt_fileattach(struct knote *kn); 124 static void filt_timerexpire(void *knx); 125 static int filt_timerattach(struct knote *kn); 126 static void filt_timerdetach(struct knote *kn); 127 static int filt_timer(struct knote *kn, long hint); 128 static int filt_userattach(struct knote *kn); 129 static void filt_userdetach(struct knote *kn); 130 static int filt_user(struct knote *kn, long hint); 131 static void filt_usertouch(struct knote *kn, struct kevent *kev, 132 u_long type); 133 static int filt_fsattach(struct knote *kn); 134 static void filt_fsdetach(struct knote *kn); 135 static int filt_fs(struct knote *kn, long hint); 136 137 static struct filterops file_filtops = 138 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL }; 139 static struct filterops kqread_filtops = 140 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue }; 141 static struct filterops proc_filtops = 142 { FILTEROP_MPSAFE, filt_procattach, filt_procdetach, filt_proc }; 143 static struct filterops timer_filtops = 144 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer }; 145 static struct filterops user_filtops = 146 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user }; 147 static struct filterops fs_filtops = 148 { FILTEROP_MPSAFE, filt_fsattach, filt_fsdetach, filt_fs }; 149 150 static int kq_ncallouts = 0; 151 static int kq_calloutmax = 65536; 152 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 153 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 154 static int kq_checkloop = 1000000; 155 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW, 156 &kq_checkloop, 0, "Maximum number of loops for kqueue scan"); 157 static int kq_sleep_threshold = 20000; 158 SYSCTL_INT(_kern, OID_AUTO, kq_sleep_threshold, CTLFLAG_RW, 159 &kq_sleep_threshold, 0, "Minimum sleep duration without busy-looping"); 160 161 #define KNOTE_ACTIVATE(kn) do { \ 162 kn->kn_status |= KN_ACTIVE; \ 163 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 164 knote_enqueue(kn); \ 165 } while(0) 166 167 #define KN_HASHSIZE 64 /* XXX should be tunable */ 168 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 169 170 extern struct filterops aio_filtops; 171 extern struct filterops sig_filtops; 172 173 /* 174 * Table for for all system-defined filters. 175 */ 176 static struct filterops *sysfilt_ops[] = { 177 &file_filtops, /* EVFILT_READ */ 178 &file_filtops, /* EVFILT_WRITE */ 179 &aio_filtops, /* EVFILT_AIO */ 180 &file_filtops, /* EVFILT_VNODE */ 181 &proc_filtops, /* EVFILT_PROC */ 182 &sig_filtops, /* EVFILT_SIGNAL */ 183 &timer_filtops, /* EVFILT_TIMER */ 184 &file_filtops, /* EVFILT_EXCEPT */ 185 &user_filtops, /* EVFILT_USER */ 186 &fs_filtops, /* EVFILT_FS */ 187 }; 188 189 static struct knote_cache_list knote_cache_lists[MAXCPU]; 190 191 /* 192 * Acquire a knote, return non-zero on success, 0 on failure. 193 * 194 * If we cannot acquire the knote we sleep and return 0. The knote 195 * may be stale on return in this case and the caller must restart 196 * whatever loop they are in. 197 * 198 * Related kq token must be held. 199 */ 200 static __inline int 201 knote_acquire(struct knote *kn) 202 { 203 if (kn->kn_status & KN_PROCESSING) { 204 kn->kn_status |= KN_WAITING | KN_REPROCESS; 205 tsleep(kn, 0, "kqepts", hz); 206 /* knote may be stale now */ 207 return(0); 208 } 209 kn->kn_status |= KN_PROCESSING; 210 return(1); 211 } 212 213 /* 214 * Release an acquired knote, clearing KN_PROCESSING and handling any 215 * KN_REPROCESS events. 216 * 217 * Caller must be holding the related kq token 218 * 219 * Non-zero is returned if the knote is destroyed or detached. 220 */ 221 static __inline int 222 knote_release(struct knote *kn) 223 { 224 int ret; 225 226 while (kn->kn_status & KN_REPROCESS) { 227 kn->kn_status &= ~KN_REPROCESS; 228 if (kn->kn_status & KN_WAITING) { 229 kn->kn_status &= ~KN_WAITING; 230 wakeup(kn); 231 } 232 if (kn->kn_status & KN_DELETING) { 233 knote_detach_and_drop(kn); 234 return(1); 235 /* NOT REACHED */ 236 } 237 if (filter_event(kn, 0)) 238 KNOTE_ACTIVATE(kn); 239 } 240 if (kn->kn_status & KN_DETACHED) 241 ret = 1; 242 else 243 ret = 0; 244 kn->kn_status &= ~KN_PROCESSING; 245 /* kn should not be accessed anymore */ 246 return ret; 247 } 248 249 static int 250 filt_fileattach(struct knote *kn) 251 { 252 return (fo_kqfilter(kn->kn_fp, kn)); 253 } 254 255 /* 256 * MPSAFE 257 */ 258 static int 259 kqueue_kqfilter(struct file *fp, struct knote *kn) 260 { 261 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 262 263 if (kn->kn_filter != EVFILT_READ) 264 return (EOPNOTSUPP); 265 266 kn->kn_fop = &kqread_filtops; 267 knote_insert(&kq->kq_kqinfo.ki_note, kn); 268 return (0); 269 } 270 271 static void 272 filt_kqdetach(struct knote *kn) 273 { 274 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 275 276 knote_remove(&kq->kq_kqinfo.ki_note, kn); 277 } 278 279 /*ARGSUSED*/ 280 static int 281 filt_kqueue(struct knote *kn, long hint) 282 { 283 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 284 285 kn->kn_data = kq->kq_count; 286 return (kn->kn_data > 0); 287 } 288 289 static int 290 filt_procattach(struct knote *kn) 291 { 292 struct proc *p; 293 int immediate; 294 295 immediate = 0; 296 p = pfind(kn->kn_id); 297 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 298 p = zpfind(kn->kn_id); 299 immediate = 1; 300 } 301 if (p == NULL) { 302 return (ESRCH); 303 } 304 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 305 if (p) 306 PRELE(p); 307 return (EACCES); 308 } 309 310 lwkt_gettoken(&p->p_token); 311 kn->kn_ptr.p_proc = p; 312 kn->kn_flags |= EV_CLEAR; /* automatically set */ 313 314 /* 315 * internal flag indicating registration done by kernel 316 */ 317 if (kn->kn_flags & EV_FLAG1) { 318 kn->kn_data = kn->kn_sdata; /* ppid */ 319 kn->kn_fflags = NOTE_CHILD; 320 kn->kn_flags &= ~EV_FLAG1; 321 } 322 323 knote_insert(&p->p_klist, kn); 324 325 /* 326 * Immediately activate any exit notes if the target process is a 327 * zombie. This is necessary to handle the case where the target 328 * process, e.g. a child, dies before the kevent is negistered. 329 */ 330 if (immediate && filt_proc(kn, NOTE_EXIT)) 331 KNOTE_ACTIVATE(kn); 332 lwkt_reltoken(&p->p_token); 333 PRELE(p); 334 335 return (0); 336 } 337 338 /* 339 * The knote may be attached to a different process, which may exit, 340 * leaving nothing for the knote to be attached to. So when the process 341 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 342 * it will be deleted when read out. However, as part of the knote deletion, 343 * this routine is called, so a check is needed to avoid actually performing 344 * a detach, because the original process does not exist any more. 345 */ 346 static void 347 filt_procdetach(struct knote *kn) 348 { 349 struct proc *p; 350 351 if (kn->kn_status & KN_DETACHED) 352 return; 353 p = kn->kn_ptr.p_proc; 354 knote_remove(&p->p_klist, kn); 355 } 356 357 static int 358 filt_proc(struct knote *kn, long hint) 359 { 360 u_int event; 361 362 /* 363 * mask off extra data 364 */ 365 event = (u_int)hint & NOTE_PCTRLMASK; 366 367 /* 368 * if the user is interested in this event, record it. 369 */ 370 if (kn->kn_sfflags & event) 371 kn->kn_fflags |= event; 372 373 /* 374 * Process is gone, so flag the event as finished. Detach the 375 * knote from the process now because the process will be poof, 376 * gone later on. 377 */ 378 if (event == NOTE_EXIT) { 379 struct proc *p = kn->kn_ptr.p_proc; 380 if ((kn->kn_status & KN_DETACHED) == 0) { 381 PHOLD(p); 382 knote_remove(&p->p_klist, kn); 383 kn->kn_status |= KN_DETACHED; 384 kn->kn_data = p->p_xstat; 385 kn->kn_ptr.p_proc = NULL; 386 PRELE(p); 387 } 388 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 389 return (1); 390 } 391 392 /* 393 * process forked, and user wants to track the new process, 394 * so attach a new knote to it, and immediately report an 395 * event with the parent's pid. 396 */ 397 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 398 struct kevent kev; 399 int error; 400 int n; 401 402 /* 403 * register knote with new process. 404 */ 405 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 406 kev.filter = kn->kn_filter; 407 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 408 kev.fflags = kn->kn_sfflags; 409 kev.data = kn->kn_id; /* parent */ 410 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 411 n = 1; 412 error = kqueue_register(kn->kn_kq, &kev, &n); 413 if (error) 414 kn->kn_fflags |= NOTE_TRACKERR; 415 } 416 417 return (kn->kn_fflags != 0); 418 } 419 420 static void 421 filt_timerreset(struct knote *kn) 422 { 423 struct callout *calloutp; 424 struct timeval tv; 425 int tticks; 426 427 tv.tv_sec = kn->kn_sdata / 1000; 428 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 429 tticks = tvtohz_high(&tv); 430 calloutp = (struct callout *)kn->kn_hook; 431 callout_reset(calloutp, tticks, filt_timerexpire, kn); 432 } 433 434 /* 435 * The callout interlocks with callout_stop() but can still 436 * race a deletion so if KN_DELETING is set we just don't touch 437 * the knote. 438 */ 439 static void 440 filt_timerexpire(void *knx) 441 { 442 struct knote *kn = knx; 443 struct kqueue *kq = kn->kn_kq; 444 445 lwkt_getpooltoken(kq); 446 447 /* 448 * Open knote_acquire(), since we can't sleep in callout, 449 * however, we do need to record this expiration. 450 */ 451 kn->kn_data++; 452 if (kn->kn_status & KN_PROCESSING) { 453 kn->kn_status |= KN_REPROCESS; 454 if ((kn->kn_status & KN_DELETING) == 0 && 455 (kn->kn_flags & EV_ONESHOT) == 0) 456 filt_timerreset(kn); 457 lwkt_relpooltoken(kq); 458 return; 459 } 460 KASSERT((kn->kn_status & KN_DELETING) == 0, 461 ("acquire a deleting knote %#x", kn->kn_status)); 462 kn->kn_status |= KN_PROCESSING; 463 464 KNOTE_ACTIVATE(kn); 465 if ((kn->kn_flags & EV_ONESHOT) == 0) 466 filt_timerreset(kn); 467 468 knote_release(kn); 469 470 lwkt_relpooltoken(kq); 471 } 472 473 /* 474 * data contains amount of time to sleep, in milliseconds 475 */ 476 static int 477 filt_timerattach(struct knote *kn) 478 { 479 struct callout *calloutp; 480 int prev_ncallouts; 481 482 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1); 483 if (prev_ncallouts >= kq_calloutmax) { 484 atomic_subtract_int(&kq_ncallouts, 1); 485 kn->kn_hook = NULL; 486 return (ENOMEM); 487 } 488 489 kn->kn_flags |= EV_CLEAR; /* automatically set */ 490 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 491 callout_init_mp(calloutp); 492 kn->kn_hook = (caddr_t)calloutp; 493 494 filt_timerreset(kn); 495 return (0); 496 } 497 498 /* 499 * This function is called with the knote flagged locked but it is 500 * still possible to race a callout event due to the callback blocking. 501 */ 502 static void 503 filt_timerdetach(struct knote *kn) 504 { 505 struct callout *calloutp; 506 507 calloutp = (struct callout *)kn->kn_hook; 508 callout_terminate(calloutp); 509 kn->kn_hook = NULL; 510 kfree(calloutp, M_KQUEUE); 511 atomic_subtract_int(&kq_ncallouts, 1); 512 } 513 514 static int 515 filt_timer(struct knote *kn, long hint) 516 { 517 return (kn->kn_data != 0); 518 } 519 520 /* 521 * EVFILT_USER 522 */ 523 static int 524 filt_userattach(struct knote *kn) 525 { 526 u_int ffctrl; 527 528 kn->kn_hook = NULL; 529 if (kn->kn_sfflags & NOTE_TRIGGER) 530 kn->kn_ptr.hookid = 1; 531 else 532 kn->kn_ptr.hookid = 0; 533 534 ffctrl = kn->kn_sfflags & NOTE_FFCTRLMASK; 535 kn->kn_sfflags &= NOTE_FFLAGSMASK; 536 switch (ffctrl) { 537 case NOTE_FFNOP: 538 break; 539 540 case NOTE_FFAND: 541 kn->kn_fflags &= kn->kn_sfflags; 542 break; 543 544 case NOTE_FFOR: 545 kn->kn_fflags |= kn->kn_sfflags; 546 break; 547 548 case NOTE_FFCOPY: 549 kn->kn_fflags = kn->kn_sfflags; 550 break; 551 552 default: 553 /* XXX Return error? */ 554 break; 555 } 556 /* We just happen to copy this value as well. Undocumented. */ 557 kn->kn_data = kn->kn_sdata; 558 559 return 0; 560 } 561 562 static void 563 filt_userdetach(struct knote *kn) 564 { 565 /* nothing to do */ 566 } 567 568 static int 569 filt_user(struct knote *kn, long hint) 570 { 571 return (kn->kn_ptr.hookid); 572 } 573 574 static void 575 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 576 { 577 u_int ffctrl; 578 579 switch (type) { 580 case EVENT_REGISTER: 581 if (kev->fflags & NOTE_TRIGGER) 582 kn->kn_ptr.hookid = 1; 583 584 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 585 kev->fflags &= NOTE_FFLAGSMASK; 586 switch (ffctrl) { 587 case NOTE_FFNOP: 588 break; 589 590 case NOTE_FFAND: 591 kn->kn_fflags &= kev->fflags; 592 break; 593 594 case NOTE_FFOR: 595 kn->kn_fflags |= kev->fflags; 596 break; 597 598 case NOTE_FFCOPY: 599 kn->kn_fflags = kev->fflags; 600 break; 601 602 default: 603 /* XXX Return error? */ 604 break; 605 } 606 /* We just happen to copy this value as well. Undocumented. */ 607 kn->kn_data = kev->data; 608 609 /* 610 * This is not the correct use of EV_CLEAR in an event 611 * modification, it should have been passed as a NOTE instead. 612 * But we need to maintain compatibility with Apple & FreeBSD. 613 * 614 * Note however that EV_CLEAR can still be used when doing 615 * the initial registration of the event and works as expected 616 * (clears the event on reception). 617 */ 618 if (kev->flags & EV_CLEAR) { 619 kn->kn_ptr.hookid = 0; 620 /* 621 * Clearing kn->kn_data is fine, since it gets set 622 * every time anyway. We just shouldn't clear 623 * kn->kn_fflags here, since that would limit the 624 * possible uses of this API. NOTE_FFAND or 625 * NOTE_FFCOPY should be used for explicitly clearing 626 * kn->kn_fflags. 627 */ 628 kn->kn_data = 0; 629 } 630 break; 631 632 case EVENT_PROCESS: 633 *kev = kn->kn_kevent; 634 kev->fflags = kn->kn_fflags; 635 kev->data = kn->kn_data; 636 if (kn->kn_flags & EV_CLEAR) { 637 kn->kn_ptr.hookid = 0; 638 /* kn_data, kn_fflags handled by parent */ 639 } 640 break; 641 642 default: 643 panic("filt_usertouch() - invalid type (%ld)", type); 644 break; 645 } 646 } 647 648 /* 649 * EVFILT_FS 650 */ 651 struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist); 652 653 static int 654 filt_fsattach(struct knote *kn) 655 { 656 kn->kn_flags |= EV_CLEAR; 657 knote_insert(&fs_klist, kn); 658 659 return (0); 660 } 661 662 static void 663 filt_fsdetach(struct knote *kn) 664 { 665 knote_remove(&fs_klist, kn); 666 } 667 668 static int 669 filt_fs(struct knote *kn, long hint) 670 { 671 kn->kn_fflags |= hint; 672 return (kn->kn_fflags != 0); 673 } 674 675 /* 676 * Initialize a kqueue. 677 * 678 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 679 */ 680 void 681 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 682 { 683 bzero(kq, sizeof(*kq)); 684 TAILQ_INIT(&kq->kq_knpend); 685 TAILQ_INIT(&kq->kq_knlist); 686 kq->kq_fdp = fdp; 687 SLIST_INIT(&kq->kq_kqinfo.ki_note); 688 } 689 690 /* 691 * Terminate a kqueue. Freeing the actual kq itself is left up to the 692 * caller (it might be embedded in a lwp so we don't do it here). 693 * 694 * The kq's knlist must be completely eradicated so block on any 695 * processing races. 696 */ 697 void 698 kqueue_terminate(struct kqueue *kq) 699 { 700 struct knote *kn; 701 702 lwkt_getpooltoken(kq); 703 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 704 if (knote_acquire(kn)) 705 knote_detach_and_drop(kn); 706 } 707 lwkt_relpooltoken(kq); 708 709 if (kq->kq_knhash) { 710 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask); 711 kq->kq_knhash = NULL; 712 kq->kq_knhashmask = 0; 713 } 714 } 715 716 /* 717 * MPSAFE 718 */ 719 int 720 sys_kqueue(struct kqueue_args *uap) 721 { 722 struct thread *td = curthread; 723 struct kqueue *kq; 724 struct file *fp; 725 int fd, error; 726 727 error = falloc(td->td_lwp, &fp, &fd); 728 if (error) 729 return (error); 730 fp->f_flag = FREAD | FWRITE; 731 fp->f_type = DTYPE_KQUEUE; 732 fp->f_ops = &kqueueops; 733 734 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 735 kqueue_init(kq, td->td_proc->p_fd); 736 fp->f_data = kq; 737 738 fsetfd(kq->kq_fdp, fp, fd); 739 uap->sysmsg_result = fd; 740 fdrop(fp); 741 return (error); 742 } 743 744 /* 745 * Copy 'count' items into the destination list pointed to by uap->eventlist. 746 */ 747 static int 748 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 749 { 750 struct kevent_copyin_args *kap; 751 int error; 752 753 kap = (struct kevent_copyin_args *)arg; 754 755 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp)); 756 if (error == 0) { 757 kap->ka->eventlist += count; 758 *res += count; 759 } else { 760 *res = -1; 761 } 762 763 return (error); 764 } 765 766 /* 767 * Copy at most 'max' items from the list pointed to by kap->changelist, 768 * return number of items in 'events'. 769 */ 770 static int 771 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 772 { 773 struct kevent_copyin_args *kap; 774 int error, count; 775 776 kap = (struct kevent_copyin_args *)arg; 777 778 count = min(kap->ka->nchanges - kap->pchanges, max); 779 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp); 780 if (error == 0) { 781 kap->ka->changelist += count; 782 kap->pchanges += count; 783 *events = count; 784 } 785 786 return (error); 787 } 788 789 /* 790 * MPSAFE 791 */ 792 int 793 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 794 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 795 struct timespec *tsp_in, int flags) 796 { 797 struct kevent *kevp; 798 struct timespec *tsp, ats; 799 int i, n, total, error, nerrors = 0; 800 int gobbled; 801 int lres; 802 int limit = kq_checkloop; 803 int closedcounter; 804 int scan_flags; 805 struct kevent kev[KQ_NEVENTS]; 806 struct knote marker; 807 struct lwkt_token *tok; 808 809 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec) 810 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC); 811 812 tsp = tsp_in; 813 *res = 0; 814 815 closedcounter = kq->kq_fdp->fd_closedcounter; 816 817 for (;;) { 818 n = 0; 819 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 820 if (error) 821 return error; 822 if (n == 0) 823 break; 824 for (i = 0; i < n; ++i) 825 kev[i].flags &= ~EV_SYSFLAGS; 826 for (i = 0; i < n; ++i) { 827 gobbled = n - i; 828 error = kqueue_register(kq, &kev[i], &gobbled); 829 i += gobbled - 1; 830 kevp = &kev[i]; 831 832 /* 833 * If a registration returns an error we 834 * immediately post the error. The kevent() 835 * call itself will fail with the error if 836 * no space is available for posting. 837 * 838 * Such errors normally bypass the timeout/blocking 839 * code. However, if the copyoutfn function refuses 840 * to post the error (see sys_poll()), then we 841 * ignore it too. 842 */ 843 if (error || (kevp->flags & EV_RECEIPT)) { 844 kevp->flags = EV_ERROR; 845 kevp->data = error; 846 lres = *res; 847 kevent_copyoutfn(uap, kevp, 1, res); 848 if (*res < 0) { 849 return error; 850 } else if (lres != *res) { 851 nevents--; 852 nerrors++; 853 } 854 } 855 } 856 } 857 if (nerrors) 858 return 0; 859 860 /* 861 * Acquire/wait for events - setup timeout 862 * 863 * If no timeout specified clean up the run path by clearing the 864 * PRECISE flag. 865 */ 866 if (tsp != NULL) { 867 if (tsp->tv_sec || tsp->tv_nsec) { 868 getnanouptime(&ats); 869 timespecadd(tsp, &ats, tsp); /* tsp = target time */ 870 } 871 } else { 872 flags &= ~KEVENT_TIMEOUT_PRECISE; 873 } 874 875 /* 876 * Loop as required. 877 * 878 * Collect as many events as we can. Sleeping on successive 879 * loops is disabled if copyoutfn has incremented (*res). 880 * 881 * The loop stops if an error occurs, all events have been 882 * scanned (the marker has been reached), or fewer than the 883 * maximum number of events is found. 884 * 885 * The copyoutfn function does not have to increment (*res) in 886 * order for the loop to continue. 887 * 888 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 889 */ 890 total = 0; 891 error = 0; 892 marker.kn_filter = EVFILT_MARKER; 893 marker.kn_status = KN_PROCESSING; 894 895 tok = lwkt_token_pool_lookup(kq); 896 scan_flags = KEVENT_SCAN_INSERT_MARKER; 897 898 while ((n = nevents - total) > 0) { 899 if (n > KQ_NEVENTS) 900 n = KQ_NEVENTS; 901 902 /* 903 * Process all received events 904 * Account for all non-spurious events in our total 905 */ 906 i = kqueue_scan(kq, kev, n, &marker, closedcounter, scan_flags); 907 scan_flags = KEVENT_SCAN_KEEP_MARKER; 908 if (i) { 909 lres = *res; 910 error = kevent_copyoutfn(uap, kev, i, res); 911 total += *res - lres; 912 if (error) 913 break; 914 } 915 if (limit && --limit == 0) 916 panic("kqueue: checkloop failed i=%d", i); 917 918 /* 919 * Normally when fewer events are returned than requested 920 * we can stop. However, if only spurious events were 921 * collected the copyout will not bump (*res) and we have 922 * to continue. 923 */ 924 if (i < n && *res) 925 break; 926 927 /* 928 * If no events were recorded (no events happened or the events 929 * that did happen were all spurious), block until an event 930 * occurs or the timeout occurs and reload the marker. 931 * 932 * If we saturated n (i == n) loop up without sleeping to 933 * continue processing the list. 934 */ 935 if (i != n && kq->kq_count == 0 && *res == 0) { 936 int timeout; 937 int ustimeout; 938 939 if (tsp == NULL) { 940 timeout = 0; 941 ustimeout = 0; 942 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 943 error = EWOULDBLOCK; 944 break; 945 } else { 946 struct timespec atx = *tsp; 947 948 getnanouptime(&ats); 949 timespecsub(&atx, &ats, &atx); 950 if (atx.tv_sec < 0 || 951 (atx.tv_sec == 0 && atx.tv_nsec <= 0)) { 952 error = EWOULDBLOCK; 953 break; 954 } 955 if (flags & KEVENT_TIMEOUT_PRECISE) { 956 if (atx.tv_sec == 0 && 957 atx.tv_nsec < kq_sleep_threshold) { 958 ustimeout = kq_sleep_threshold / 959 1000; 960 } else if (atx.tv_sec < 60) { 961 ustimeout = 962 atx.tv_sec * 1000000 + 963 atx.tv_nsec / 1000; 964 } else { 965 ustimeout = 60 * 1000000; 966 } 967 if (ustimeout == 0) 968 ustimeout = 1; 969 timeout = 0; 970 } else if (atx.tv_sec > 60 * 60) { 971 timeout = 60 * 60 * hz; 972 ustimeout = 0; 973 } else { 974 timeout = tstohz_high(&atx); 975 ustimeout = 0; 976 } 977 } 978 979 lwkt_gettoken(tok); 980 if (kq->kq_count == 0) { 981 kq->kq_sleep_cnt++; 982 if (__predict_false(kq->kq_sleep_cnt == 0)) { 983 /* 984 * Guard against possible wrapping. And 985 * set it to 2, so that kqueue_wakeup() 986 * can wake everyone up. 987 */ 988 kq->kq_sleep_cnt = 2; 989 } 990 if (flags & KEVENT_TIMEOUT_PRECISE) { 991 error = precise_sleep(kq, PCATCH, 992 "kqread", ustimeout); 993 } else { 994 error = tsleep(kq, PCATCH, 995 "kqread", timeout); 996 } 997 998 /* don't restart after signals... */ 999 if (error == ERESTART) 1000 error = EINTR; 1001 if (error == EWOULDBLOCK) 1002 error = 0; 1003 if (error) { 1004 lwkt_reltoken(tok); 1005 break; 1006 } 1007 scan_flags = KEVENT_SCAN_RELOAD_MARKER; 1008 } 1009 lwkt_reltoken(tok); 1010 } 1011 1012 /* 1013 * Deal with an edge case where spurious events can cause 1014 * a loop to occur without moving the marker. This can 1015 * prevent kqueue_scan() from picking up new events which 1016 * race us. We must be sure to move the marker for this 1017 * case. 1018 * 1019 * NOTE: We do not want to move the marker if events 1020 * were scanned because normal kqueue operations 1021 * may reactivate events. Moving the marker in 1022 * that case could result in duplicates for the 1023 * same event. 1024 */ 1025 if (i == 0) 1026 scan_flags = KEVENT_SCAN_RELOAD_MARKER; 1027 } 1028 1029 /* 1030 * Remove the marker 1031 */ 1032 if (scan_flags != KEVENT_SCAN_INSERT_MARKER) { 1033 lwkt_gettoken(tok); 1034 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 1035 lwkt_reltoken(tok); 1036 } 1037 1038 /* Timeouts do not return EWOULDBLOCK. */ 1039 if (error == EWOULDBLOCK) 1040 error = 0; 1041 return error; 1042 } 1043 1044 /* 1045 * MPALMOSTSAFE 1046 */ 1047 int 1048 sys_kevent(struct kevent_args *uap) 1049 { 1050 struct thread *td = curthread; 1051 struct timespec ts, *tsp; 1052 struct kqueue *kq; 1053 struct file *fp = NULL; 1054 struct kevent_copyin_args *kap, ka; 1055 int error; 1056 1057 if (uap->timeout) { 1058 error = copyin(uap->timeout, &ts, sizeof(ts)); 1059 if (error) 1060 return (error); 1061 tsp = &ts; 1062 } else { 1063 tsp = NULL; 1064 } 1065 fp = holdfp(td, uap->fd, -1); 1066 if (fp == NULL) 1067 return (EBADF); 1068 if (fp->f_type != DTYPE_KQUEUE) { 1069 fdrop(fp); 1070 return (EBADF); 1071 } 1072 1073 kq = (struct kqueue *)fp->f_data; 1074 1075 kap = &ka; 1076 kap->ka = uap; 1077 kap->pchanges = 0; 1078 1079 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap, 1080 kevent_copyin, kevent_copyout, tsp, 0); 1081 1082 dropfp(td, uap->fd, fp); 1083 1084 return (error); 1085 } 1086 1087 /* 1088 * Efficiently load multiple file pointers. This significantly reduces 1089 * threaded overhead. When doing simple polling we can depend on the 1090 * per-thread (fd,fp) cache. With more descriptors, we batch. 1091 */ 1092 static 1093 void 1094 floadkevfps(thread_t td, struct filedesc *fdp, struct kevent *kev, 1095 struct file **fp, int climit) 1096 { 1097 struct filterops *fops; 1098 int tdcache; 1099 1100 if (climit <= 2 && td->td_proc && td->td_proc->p_fd == fdp) { 1101 tdcache = 1; 1102 } else { 1103 tdcache = 0; 1104 spin_lock_shared(&fdp->fd_spin); 1105 } 1106 1107 while (climit) { 1108 *fp = NULL; 1109 if (kev->filter < 0 && 1110 kev->filter + EVFILT_SYSCOUNT >= 0) { 1111 fops = sysfilt_ops[~kev->filter]; 1112 if (fops->f_flags & FILTEROP_ISFD) { 1113 if (tdcache) { 1114 *fp = holdfp(td, kev->ident, -1); 1115 } else { 1116 *fp = holdfp_fdp_locked(fdp, 1117 kev->ident, -1); 1118 } 1119 } 1120 } 1121 --climit; 1122 ++fp; 1123 ++kev; 1124 } 1125 if (tdcache == 0) 1126 spin_unlock_shared(&fdp->fd_spin); 1127 } 1128 1129 /* 1130 * Register up to *countp kev's. Always registers at least 1. 1131 * 1132 * The number registered is returned in *countp. 1133 * 1134 * If an error occurs or a kev is flagged EV_RECEIPT, it is 1135 * processed and included in *countp, and processing then 1136 * stops. 1137 */ 1138 int 1139 kqueue_register(struct kqueue *kq, struct kevent *kev, int *countp) 1140 { 1141 struct filedesc *fdp = kq->kq_fdp; 1142 struct klist *list = NULL; 1143 struct filterops *fops; 1144 struct file *fp[KQ_NEVENTS]; 1145 struct knote *kn = NULL; 1146 struct thread *td; 1147 int error; 1148 int count; 1149 int climit; 1150 int closedcounter; 1151 struct knote_cache_list *cache_list; 1152 1153 td = curthread; 1154 climit = *countp; 1155 if (climit > KQ_NEVENTS) 1156 climit = KQ_NEVENTS; 1157 closedcounter = fdp->fd_closedcounter; 1158 floadkevfps(td, fdp, kev, fp, climit); 1159 1160 lwkt_getpooltoken(kq); 1161 count = 0; 1162 1163 /* 1164 * To avoid races, only one thread can register events on this 1165 * kqueue at a time. 1166 */ 1167 while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) { 1168 kq->kq_state |= KQ_REGWAIT; 1169 tsleep(&kq->kq_regtd, 0, "kqreg", 0); 1170 } 1171 if (__predict_false(kq->kq_regtd != NULL)) { 1172 /* Recursive calling of kqueue_register() */ 1173 td = NULL; 1174 } else { 1175 /* Owner of the kq_regtd, i.e. td != NULL */ 1176 kq->kq_regtd = td; 1177 } 1178 1179 loop: 1180 if (kev->filter < 0) { 1181 if (kev->filter + EVFILT_SYSCOUNT < 0) { 1182 error = EINVAL; 1183 ++count; 1184 goto done; 1185 } 1186 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 1187 } else { 1188 /* 1189 * XXX 1190 * filter attach routine is responsible for insuring that 1191 * the identifier can be attached to it. 1192 */ 1193 error = EINVAL; 1194 ++count; 1195 goto done; 1196 } 1197 1198 if (fops->f_flags & FILTEROP_ISFD) { 1199 /* validate descriptor */ 1200 if (fp[count] == NULL) { 1201 error = EBADF; 1202 ++count; 1203 goto done; 1204 } 1205 } 1206 1207 cache_list = &knote_cache_lists[mycpuid]; 1208 if (SLIST_EMPTY(&cache_list->knote_cache)) { 1209 struct knote *new_kn; 1210 1211 new_kn = knote_alloc(); 1212 crit_enter(); 1213 SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link); 1214 cache_list->knote_cache_cnt++; 1215 crit_exit(); 1216 } 1217 1218 if (fp[count] != NULL) { 1219 list = &fp[count]->f_klist; 1220 } else if (kq->kq_knhashmask) { 1221 list = &kq->kq_knhash[ 1222 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1223 } 1224 if (list != NULL) { 1225 lwkt_getpooltoken(list); 1226 again: 1227 SLIST_FOREACH(kn, list, kn_link) { 1228 if (kn->kn_kq == kq && 1229 kn->kn_filter == kev->filter && 1230 kn->kn_id == kev->ident) { 1231 if (knote_acquire(kn) == 0) 1232 goto again; 1233 break; 1234 } 1235 } 1236 lwkt_relpooltoken(list); 1237 } 1238 1239 /* 1240 * NOTE: At this point if kn is non-NULL we will have acquired 1241 * it and set KN_PROCESSING. 1242 */ 1243 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 1244 error = ENOENT; 1245 ++count; 1246 goto done; 1247 } 1248 1249 /* 1250 * kn now contains the matching knote, or NULL if no match 1251 */ 1252 if (kev->flags & EV_ADD) { 1253 if (kn == NULL) { 1254 crit_enter(); 1255 kn = SLIST_FIRST(&cache_list->knote_cache); 1256 if (kn == NULL) { 1257 crit_exit(); 1258 kn = knote_alloc(); 1259 } else { 1260 SLIST_REMOVE_HEAD(&cache_list->knote_cache, 1261 kn_link); 1262 cache_list->knote_cache_cnt--; 1263 crit_exit(); 1264 } 1265 kn->kn_fp = fp[count]; 1266 kn->kn_kq = kq; 1267 kn->kn_fop = fops; 1268 1269 /* 1270 * apply reference count to knote structure, and 1271 * do not release it at the end of this routine. 1272 */ 1273 fp[count] = NULL; /* safety */ 1274 1275 kn->kn_sfflags = kev->fflags; 1276 kn->kn_sdata = kev->data; 1277 kev->fflags = 0; 1278 kev->data = 0; 1279 kn->kn_kevent = *kev; 1280 1281 /* 1282 * KN_PROCESSING prevents the knote from getting 1283 * ripped out from under us while we are trying 1284 * to attach it, in case the attach blocks. 1285 */ 1286 kn->kn_status = KN_PROCESSING; 1287 knote_attach(kn); 1288 if ((error = filter_attach(kn)) != 0) { 1289 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1290 knote_drop(kn); 1291 ++count; 1292 goto done; 1293 } 1294 1295 /* 1296 * Interlock against close races which either tried 1297 * to remove our knote while we were blocked or missed 1298 * it entirely prior to our attachment. We do not 1299 * want to end up with a knote on a closed descriptor. 1300 */ 1301 if ((fops->f_flags & FILTEROP_ISFD) && 1302 checkfdclosed(curthread, fdp, kev->ident, kn->kn_fp, 1303 closedcounter)) { 1304 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1305 } 1306 } else { 1307 /* 1308 * The user may change some filter values after the 1309 * initial EV_ADD, but doing so will not reset any 1310 * filter which have already been triggered. 1311 */ 1312 KKASSERT(kn->kn_status & KN_PROCESSING); 1313 if (fops == &user_filtops) { 1314 filt_usertouch(kn, kev, EVENT_REGISTER); 1315 } else { 1316 kn->kn_sfflags = kev->fflags; 1317 kn->kn_sdata = kev->data; 1318 kn->kn_kevent.udata = kev->udata; 1319 } 1320 } 1321 1322 /* 1323 * Execute the filter event to immediately activate the 1324 * knote if necessary. If reprocessing events are pending 1325 * due to blocking above we do not run the filter here 1326 * but instead let knote_release() do it. Otherwise we 1327 * might run the filter on a deleted event. 1328 */ 1329 if ((kn->kn_status & KN_REPROCESS) == 0) { 1330 if (filter_event(kn, 0)) 1331 KNOTE_ACTIVATE(kn); 1332 } 1333 } else if (kev->flags & EV_DELETE) { 1334 /* 1335 * Delete the existing knote 1336 */ 1337 knote_detach_and_drop(kn); 1338 error = 0; 1339 ++count; 1340 goto done; 1341 } else { 1342 /* 1343 * Modify an existing event. 1344 * 1345 * The user may change some filter values after the 1346 * initial EV_ADD, but doing so will not reset any 1347 * filter which have already been triggered. 1348 */ 1349 KKASSERT(kn->kn_status & KN_PROCESSING); 1350 if (fops == &user_filtops) { 1351 filt_usertouch(kn, kev, EVENT_REGISTER); 1352 } else { 1353 kn->kn_sfflags = kev->fflags; 1354 kn->kn_sdata = kev->data; 1355 kn->kn_kevent.udata = kev->udata; 1356 } 1357 1358 /* 1359 * Execute the filter event to immediately activate the 1360 * knote if necessary. If reprocessing events are pending 1361 * due to blocking above we do not run the filter here 1362 * but instead let knote_release() do it. Otherwise we 1363 * might run the filter on a deleted event. 1364 */ 1365 if ((kn->kn_status & KN_REPROCESS) == 0) { 1366 if (filter_event(kn, 0)) 1367 KNOTE_ACTIVATE(kn); 1368 } 1369 } 1370 1371 /* 1372 * Disablement does not deactivate a knote here. 1373 */ 1374 if ((kev->flags & EV_DISABLE) && 1375 ((kn->kn_status & KN_DISABLED) == 0)) { 1376 kn->kn_status |= KN_DISABLED; 1377 } 1378 1379 /* 1380 * Re-enablement may have to immediately enqueue an active knote. 1381 */ 1382 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1383 kn->kn_status &= ~KN_DISABLED; 1384 if ((kn->kn_status & KN_ACTIVE) && 1385 ((kn->kn_status & KN_QUEUED) == 0)) { 1386 knote_enqueue(kn); 1387 } 1388 } 1389 1390 /* 1391 * Handle any required reprocessing 1392 */ 1393 knote_release(kn); 1394 /* kn may be invalid now */ 1395 1396 /* 1397 * Loop control. We stop on errors (above), and also stop after 1398 * processing EV_RECEIPT, so the caller can process it. 1399 */ 1400 ++count; 1401 if (kev->flags & EV_RECEIPT) { 1402 error = 0; 1403 goto done; 1404 } 1405 ++kev; 1406 if (count < climit) { 1407 if (fp[count-1]) /* drop unprocessed fp */ 1408 fdrop(fp[count-1]); 1409 goto loop; 1410 } 1411 1412 /* 1413 * Cleanup 1414 */ 1415 done: 1416 if (td != NULL) { /* Owner of the kq_regtd */ 1417 kq->kq_regtd = NULL; 1418 if (__predict_false(kq->kq_state & KQ_REGWAIT)) { 1419 kq->kq_state &= ~KQ_REGWAIT; 1420 wakeup(&kq->kq_regtd); 1421 } 1422 } 1423 lwkt_relpooltoken(kq); 1424 1425 /* 1426 * Drop unprocessed file pointers 1427 */ 1428 *countp = count; 1429 if (count && fp[count-1]) 1430 fdrop(fp[count-1]); 1431 while (count < climit) { 1432 if (fp[count]) 1433 fdrop(fp[count]); 1434 ++count; 1435 } 1436 return (error); 1437 } 1438 1439 /* 1440 * Scan the kqueue, return the number of active events placed in kevp up 1441 * to count. 1442 * 1443 * Continuous mode events may get recycled, do not continue scanning past 1444 * marker unless no events have been collected. 1445 */ 1446 static int 1447 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 1448 struct knote *marker, int closedcounter, int scan_flags) 1449 { 1450 struct knote *kn, local_marker; 1451 thread_t td = curthread; 1452 int total; 1453 1454 total = 0; 1455 local_marker.kn_filter = EVFILT_MARKER; 1456 local_marker.kn_status = KN_PROCESSING; 1457 1458 lwkt_getpooltoken(kq); 1459 1460 /* 1461 * Adjust marker, insert initial marker, or leave the marker alone. 1462 * 1463 * Also setup our local_marker. 1464 */ 1465 switch(scan_flags) { 1466 case KEVENT_SCAN_RELOAD_MARKER: 1467 TAILQ_REMOVE(&kq->kq_knpend, marker, kn_tqe); 1468 /* fall through */ 1469 case KEVENT_SCAN_INSERT_MARKER: 1470 TAILQ_INSERT_TAIL(&kq->kq_knpend, marker, kn_tqe); 1471 break; 1472 } 1473 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 1474 1475 /* 1476 * Collect events. 1477 */ 1478 while (count) { 1479 kn = TAILQ_NEXT(&local_marker, kn_tqe); 1480 if (kn->kn_filter == EVFILT_MARKER) { 1481 /* Marker reached, we are done */ 1482 if (kn == marker) 1483 break; 1484 1485 /* Move local marker past some other threads marker */ 1486 kn = TAILQ_NEXT(kn, kn_tqe); 1487 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1488 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 1489 continue; 1490 } 1491 1492 /* 1493 * We can't skip a knote undergoing processing, otherwise 1494 * we risk not returning it when the user process expects 1495 * it should be returned. Sleep and retry. 1496 */ 1497 if (knote_acquire(kn) == 0) 1498 continue; 1499 1500 /* 1501 * Remove the event for processing. 1502 * 1503 * WARNING! We must leave KN_QUEUED set to prevent the 1504 * event from being KNOTE_ACTIVATE()d while 1505 * the queue state is in limbo, in case we 1506 * block. 1507 */ 1508 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1509 kq->kq_count--; 1510 1511 /* 1512 * We have to deal with an extremely important race against 1513 * file descriptor close()s here. The file descriptor can 1514 * disappear MPSAFE, and there is a small window of 1515 * opportunity between that and the call to knote_fdclose(). 1516 * 1517 * If we hit that window here while doselect or dopoll is 1518 * trying to delete a spurious event they will not be able 1519 * to match up the event against a knote and will go haywire. 1520 */ 1521 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && 1522 checkfdclosed(td, kq->kq_fdp, kn->kn_kevent.ident, 1523 kn->kn_fp, closedcounter)) { 1524 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1525 } 1526 1527 if (kn->kn_status & KN_DISABLED) { 1528 /* 1529 * If disabled we ensure the event is not queued 1530 * but leave its active bit set. On re-enablement 1531 * the event may be immediately triggered. 1532 */ 1533 kn->kn_status &= ~KN_QUEUED; 1534 } else if ((kn->kn_flags & EV_ONESHOT) == 0 && 1535 (kn->kn_status & KN_DELETING) == 0 && 1536 filter_event(kn, 0) == 0) { 1537 /* 1538 * If not running in one-shot mode and the event 1539 * is no longer present we ensure it is removed 1540 * from the queue and ignore it. 1541 */ 1542 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1543 } else { 1544 /* 1545 * Post the event 1546 */ 1547 if (kn->kn_fop == &user_filtops) 1548 filt_usertouch(kn, kevp, EVENT_PROCESS); 1549 else 1550 *kevp = kn->kn_kevent; 1551 ++kevp; 1552 ++total; 1553 --count; 1554 1555 if (kn->kn_flags & EV_ONESHOT) { 1556 kn->kn_status &= ~KN_QUEUED; 1557 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1558 } else { 1559 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1560 if (kn->kn_flags & EV_CLEAR) { 1561 kn->kn_data = 0; 1562 kn->kn_fflags = 0; 1563 } 1564 if (kn->kn_flags & EV_DISPATCH) { 1565 kn->kn_status |= KN_DISABLED; 1566 } 1567 kn->kn_status &= ~(KN_QUEUED | 1568 KN_ACTIVE); 1569 } else { 1570 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1571 kq->kq_count++; 1572 } 1573 } 1574 } 1575 1576 /* 1577 * Handle any post-processing states 1578 */ 1579 knote_release(kn); 1580 } 1581 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1582 1583 lwkt_relpooltoken(kq); 1584 return (total); 1585 } 1586 1587 /* 1588 * XXX 1589 * This could be expanded to call kqueue_scan, if desired. 1590 * 1591 * MPSAFE 1592 */ 1593 static int 1594 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1595 { 1596 return (ENXIO); 1597 } 1598 1599 /* 1600 * MPSAFE 1601 */ 1602 static int 1603 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1604 { 1605 return (ENXIO); 1606 } 1607 1608 /* 1609 * MPALMOSTSAFE 1610 */ 1611 static int 1612 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 1613 struct ucred *cred, struct sysmsg *msg) 1614 { 1615 struct kqueue *kq; 1616 int error; 1617 1618 kq = (struct kqueue *)fp->f_data; 1619 lwkt_getpooltoken(kq); 1620 switch(com) { 1621 case FIOASYNC: 1622 if (*(int *)data) 1623 kq->kq_state |= KQ_ASYNC; 1624 else 1625 kq->kq_state &= ~KQ_ASYNC; 1626 error = 0; 1627 break; 1628 case FIOSETOWN: 1629 error = fsetown(*(int *)data, &kq->kq_sigio); 1630 break; 1631 default: 1632 error = ENOTTY; 1633 break; 1634 } 1635 lwkt_relpooltoken(kq); 1636 return (error); 1637 } 1638 1639 /* 1640 * MPSAFE 1641 */ 1642 static int 1643 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1644 { 1645 struct kqueue *kq = (struct kqueue *)fp->f_data; 1646 1647 bzero((void *)st, sizeof(*st)); 1648 st->st_size = kq->kq_count; 1649 st->st_blksize = sizeof(struct kevent); 1650 st->st_mode = S_IFIFO; 1651 return (0); 1652 } 1653 1654 /* 1655 * MPSAFE 1656 */ 1657 static int 1658 kqueue_close(struct file *fp) 1659 { 1660 struct kqueue *kq = (struct kqueue *)fp->f_data; 1661 1662 kqueue_terminate(kq); 1663 1664 fp->f_data = NULL; 1665 funsetown(&kq->kq_sigio); 1666 1667 kfree(kq, M_KQUEUE); 1668 return (0); 1669 } 1670 1671 static void 1672 kqueue_wakeup(struct kqueue *kq) 1673 { 1674 if (kq->kq_sleep_cnt) { 1675 u_int sleep_cnt = kq->kq_sleep_cnt; 1676 1677 kq->kq_sleep_cnt = 0; 1678 if (sleep_cnt == 1) 1679 wakeup_one(kq); 1680 else 1681 wakeup(kq); 1682 } 1683 KNOTE(&kq->kq_kqinfo.ki_note, 0); 1684 } 1685 1686 /* 1687 * Calls filterops f_attach function, acquiring mplock if filter is not 1688 * marked as FILTEROP_MPSAFE. 1689 * 1690 * Caller must be holding the related kq token 1691 */ 1692 static int 1693 filter_attach(struct knote *kn) 1694 { 1695 int ret; 1696 1697 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1698 ret = kn->kn_fop->f_attach(kn); 1699 } else { 1700 get_mplock(); 1701 ret = kn->kn_fop->f_attach(kn); 1702 rel_mplock(); 1703 } 1704 return (ret); 1705 } 1706 1707 /* 1708 * Detach the knote and drop it, destroying the knote. 1709 * 1710 * Calls filterops f_detach function, acquiring mplock if filter is not 1711 * marked as FILTEROP_MPSAFE. 1712 * 1713 * Caller must be holding the related kq token 1714 */ 1715 static void 1716 knote_detach_and_drop(struct knote *kn) 1717 { 1718 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1719 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1720 kn->kn_fop->f_detach(kn); 1721 } else { 1722 get_mplock(); 1723 kn->kn_fop->f_detach(kn); 1724 rel_mplock(); 1725 } 1726 knote_drop(kn); 1727 } 1728 1729 /* 1730 * Calls filterops f_event function, acquiring mplock if filter is not 1731 * marked as FILTEROP_MPSAFE. 1732 * 1733 * If the knote is in the middle of being created or deleted we cannot 1734 * safely call the filter op. 1735 * 1736 * Caller must be holding the related kq token 1737 */ 1738 static int 1739 filter_event(struct knote *kn, long hint) 1740 { 1741 int ret; 1742 1743 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1744 ret = kn->kn_fop->f_event(kn, hint); 1745 } else { 1746 get_mplock(); 1747 ret = kn->kn_fop->f_event(kn, hint); 1748 rel_mplock(); 1749 } 1750 return (ret); 1751 } 1752 1753 /* 1754 * Walk down a list of knotes, activating them if their event has triggered. 1755 * 1756 * If we encounter any knotes which are undergoing processing we just mark 1757 * them for reprocessing and do not try to [re]activate the knote. However, 1758 * if a hint is being passed we have to wait and that makes things a bit 1759 * sticky. 1760 */ 1761 void 1762 knote(struct klist *list, long hint) 1763 { 1764 struct kqueue *kq; 1765 struct knote *kn; 1766 struct knote *kntmp; 1767 1768 lwkt_getpooltoken(list); 1769 restart: 1770 SLIST_FOREACH(kn, list, kn_next) { 1771 kq = kn->kn_kq; 1772 lwkt_getpooltoken(kq); 1773 1774 /* temporary verification hack */ 1775 SLIST_FOREACH(kntmp, list, kn_next) { 1776 if (kn == kntmp) 1777 break; 1778 } 1779 if (kn != kntmp || kn->kn_kq != kq) { 1780 lwkt_relpooltoken(kq); 1781 goto restart; 1782 } 1783 1784 if (kn->kn_status & KN_PROCESSING) { 1785 /* 1786 * Someone else is processing the knote, ask the 1787 * other thread to reprocess it and don't mess 1788 * with it otherwise. 1789 */ 1790 if (hint == 0) { 1791 kn->kn_status |= KN_REPROCESS; 1792 lwkt_relpooltoken(kq); 1793 continue; 1794 } 1795 1796 /* 1797 * If the hint is non-zero we have to wait or risk 1798 * losing the state the caller is trying to update. 1799 * 1800 * XXX This is a real problem, certain process 1801 * and signal filters will bump kn_data for 1802 * already-processed notes more than once if 1803 * we restart the list scan. FIXME. 1804 */ 1805 kn->kn_status |= KN_WAITING | KN_REPROCESS; 1806 tsleep(kn, 0, "knotec", hz); 1807 lwkt_relpooltoken(kq); 1808 goto restart; 1809 } 1810 1811 /* 1812 * Become the reprocessing master ourselves. 1813 * 1814 * If hint is non-zero running the event is mandatory 1815 * when not deleting so do it whether reprocessing is 1816 * set or not. 1817 */ 1818 kn->kn_status |= KN_PROCESSING; 1819 if ((kn->kn_status & KN_DELETING) == 0) { 1820 if (filter_event(kn, hint)) 1821 KNOTE_ACTIVATE(kn); 1822 } 1823 if (knote_release(kn)) { 1824 lwkt_relpooltoken(kq); 1825 goto restart; 1826 } 1827 lwkt_relpooltoken(kq); 1828 } 1829 lwkt_relpooltoken(list); 1830 } 1831 1832 /* 1833 * Insert knote at head of klist. 1834 * 1835 * This function may only be called via a filter function and thus 1836 * kq_token should already be held and marked for processing. 1837 */ 1838 void 1839 knote_insert(struct klist *klist, struct knote *kn) 1840 { 1841 lwkt_getpooltoken(klist); 1842 KKASSERT(kn->kn_status & KN_PROCESSING); 1843 SLIST_INSERT_HEAD(klist, kn, kn_next); 1844 lwkt_relpooltoken(klist); 1845 } 1846 1847 /* 1848 * Remove knote from a klist 1849 * 1850 * This function may only be called via a filter function and thus 1851 * kq_token should already be held and marked for processing. 1852 */ 1853 void 1854 knote_remove(struct klist *klist, struct knote *kn) 1855 { 1856 lwkt_getpooltoken(klist); 1857 KKASSERT(kn->kn_status & KN_PROCESSING); 1858 SLIST_REMOVE(klist, kn, knote, kn_next); 1859 lwkt_relpooltoken(klist); 1860 } 1861 1862 void 1863 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst, 1864 struct filterops *ops, void *hook) 1865 { 1866 struct kqueue *kq; 1867 struct knote *kn; 1868 1869 lwkt_getpooltoken(&src->ki_note); 1870 lwkt_getpooltoken(&dst->ki_note); 1871 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) { 1872 kq = kn->kn_kq; 1873 lwkt_getpooltoken(kq); 1874 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) { 1875 lwkt_relpooltoken(kq); 1876 continue; 1877 } 1878 if (knote_acquire(kn)) { 1879 knote_remove(&src->ki_note, kn); 1880 kn->kn_fop = ops; 1881 kn->kn_hook = hook; 1882 knote_insert(&dst->ki_note, kn); 1883 knote_release(kn); 1884 /* kn may be invalid now */ 1885 } 1886 lwkt_relpooltoken(kq); 1887 } 1888 lwkt_relpooltoken(&dst->ki_note); 1889 lwkt_relpooltoken(&src->ki_note); 1890 } 1891 1892 /* 1893 * Remove all knotes referencing a specified fd 1894 */ 1895 void 1896 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1897 { 1898 struct kqueue *kq; 1899 struct knote *kn; 1900 struct knote *kntmp; 1901 1902 lwkt_getpooltoken(&fp->f_klist); 1903 restart: 1904 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1905 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1906 kq = kn->kn_kq; 1907 lwkt_getpooltoken(kq); 1908 1909 /* temporary verification hack */ 1910 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) { 1911 if (kn == kntmp) 1912 break; 1913 } 1914 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp || 1915 kn->kn_id != fd || kn->kn_kq != kq) { 1916 lwkt_relpooltoken(kq); 1917 goto restart; 1918 } 1919 if (knote_acquire(kn)) 1920 knote_detach_and_drop(kn); 1921 lwkt_relpooltoken(kq); 1922 goto restart; 1923 } 1924 } 1925 lwkt_relpooltoken(&fp->f_klist); 1926 } 1927 1928 /* 1929 * Low level attach function. 1930 * 1931 * The knote should already be marked for processing. 1932 * Caller must hold the related kq token. 1933 */ 1934 static void 1935 knote_attach(struct knote *kn) 1936 { 1937 struct klist *list; 1938 struct kqueue *kq = kn->kn_kq; 1939 1940 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1941 KKASSERT(kn->kn_fp); 1942 list = &kn->kn_fp->f_klist; 1943 } else { 1944 if (kq->kq_knhashmask == 0) 1945 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1946 &kq->kq_knhashmask); 1947 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1948 } 1949 lwkt_getpooltoken(list); 1950 SLIST_INSERT_HEAD(list, kn, kn_link); 1951 lwkt_relpooltoken(list); 1952 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 1953 } 1954 1955 /* 1956 * Low level drop function. 1957 * 1958 * The knote should already be marked for processing. 1959 * Caller must hold the related kq token. 1960 */ 1961 static void 1962 knote_drop(struct knote *kn) 1963 { 1964 struct kqueue *kq; 1965 struct klist *list; 1966 1967 kq = kn->kn_kq; 1968 1969 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1970 list = &kn->kn_fp->f_klist; 1971 else 1972 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1973 1974 lwkt_getpooltoken(list); 1975 SLIST_REMOVE(list, kn, knote, kn_link); 1976 lwkt_relpooltoken(list); 1977 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 1978 if (kn->kn_status & KN_QUEUED) 1979 knote_dequeue(kn); 1980 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1981 fdrop(kn->kn_fp); 1982 kn->kn_fp = NULL; 1983 } 1984 knote_free(kn); 1985 } 1986 1987 /* 1988 * Low level enqueue function. 1989 * 1990 * The knote should already be marked for processing. 1991 * Caller must be holding the kq token 1992 */ 1993 static void 1994 knote_enqueue(struct knote *kn) 1995 { 1996 struct kqueue *kq = kn->kn_kq; 1997 1998 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1999 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 2000 kn->kn_status |= KN_QUEUED; 2001 ++kq->kq_count; 2002 2003 /* 2004 * Send SIGIO on request (typically set up as a mailbox signal) 2005 */ 2006 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 2007 pgsigio(kq->kq_sigio, SIGIO, 0); 2008 2009 kqueue_wakeup(kq); 2010 } 2011 2012 /* 2013 * Low level dequeue function. 2014 * 2015 * The knote should already be marked for processing. 2016 * Caller must be holding the kq token 2017 */ 2018 static void 2019 knote_dequeue(struct knote *kn) 2020 { 2021 struct kqueue *kq = kn->kn_kq; 2022 2023 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2024 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 2025 kn->kn_status &= ~KN_QUEUED; 2026 kq->kq_count--; 2027 } 2028 2029 static struct knote * 2030 knote_alloc(void) 2031 { 2032 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK); 2033 } 2034 2035 static void 2036 knote_free(struct knote *kn) 2037 { 2038 struct knote_cache_list *cache_list; 2039 2040 cache_list = &knote_cache_lists[mycpuid]; 2041 if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) { 2042 crit_enter(); 2043 SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link); 2044 cache_list->knote_cache_cnt++; 2045 crit_exit(); 2046 return; 2047 } 2048 kfree(kn, M_KQUEUE); 2049 } 2050 2051 struct sleepinfo { 2052 void *ident; 2053 int timedout; 2054 }; 2055 2056 static void 2057 precise_sleep_intr(systimer_t info, int in_ipi, struct intrframe *frame) 2058 { 2059 struct sleepinfo *si; 2060 2061 si = info->data; 2062 si->timedout = 1; 2063 wakeup(si->ident); 2064 } 2065 2066 static int 2067 precise_sleep(void *ident, int flags, const char *wmesg, int us) 2068 { 2069 struct systimer info; 2070 struct sleepinfo si = { 2071 .ident = ident, 2072 .timedout = 0, 2073 }; 2074 int r; 2075 2076 tsleep_interlock(ident, flags); 2077 systimer_init_oneshot(&info, precise_sleep_intr, &si, us); 2078 r = tsleep(ident, flags | PINTERLOCKED, wmesg, 0); 2079 systimer_del(&info); 2080 if (si.timedout) 2081 r = EWOULDBLOCK; 2082 2083 return r; 2084 } 2085