1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/malloc.h> 34 #include <sys/unistd.h> 35 #include <sys/file.h> 36 #include <sys/lock.h> 37 #include <sys/fcntl.h> 38 #include <sys/queue.h> 39 #include <sys/event.h> 40 #include <sys/eventvar.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/stat.h> 45 #include <sys/sysctl.h> 46 #include <sys/sysproto.h> 47 #include <sys/thread.h> 48 #include <sys/uio.h> 49 #include <sys/signalvar.h> 50 #include <sys/filio.h> 51 #include <sys/ktr.h> 52 #include <sys/spinlock.h> 53 54 #include <sys/thread2.h> 55 #include <sys/file2.h> 56 #include <sys/mplock2.h> 57 #include <sys/spinlock2.h> 58 59 #define EVENT_REGISTER 1 60 #define EVENT_PROCESS 2 61 62 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 63 64 struct kevent_copyin_args { 65 struct kevent_args *ka; 66 int pchanges; 67 }; 68 69 #define KNOTE_CACHE_MAX 8 70 71 struct knote_cache_list { 72 struct klist knote_cache; 73 int knote_cache_cnt; 74 } __cachealign; 75 76 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 77 struct knote *marker, int closedcounter); 78 static int kqueue_read(struct file *fp, struct uio *uio, 79 struct ucred *cred, int flags); 80 static int kqueue_write(struct file *fp, struct uio *uio, 81 struct ucred *cred, int flags); 82 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 83 struct ucred *cred, struct sysmsg *msg); 84 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 85 static int kqueue_stat(struct file *fp, struct stat *st, 86 struct ucred *cred); 87 static int kqueue_close(struct file *fp); 88 static void kqueue_wakeup(struct kqueue *kq); 89 static int filter_attach(struct knote *kn); 90 static int filter_event(struct knote *kn, long hint); 91 92 /* 93 * MPSAFE 94 */ 95 static struct fileops kqueueops = { 96 .fo_read = kqueue_read, 97 .fo_write = kqueue_write, 98 .fo_ioctl = kqueue_ioctl, 99 .fo_kqfilter = kqueue_kqfilter, 100 .fo_stat = kqueue_stat, 101 .fo_close = kqueue_close, 102 .fo_shutdown = nofo_shutdown 103 }; 104 105 static void knote_attach(struct knote *kn); 106 static void knote_drop(struct knote *kn); 107 static void knote_detach_and_drop(struct knote *kn); 108 static void knote_enqueue(struct knote *kn); 109 static void knote_dequeue(struct knote *kn); 110 static struct knote *knote_alloc(void); 111 static void knote_free(struct knote *kn); 112 113 static void precise_sleep_intr(systimer_t info, int in_ipi, 114 struct intrframe *frame); 115 static int precise_sleep(void *ident, int flags, const char *wmesg, 116 int us); 117 118 static void filt_kqdetach(struct knote *kn); 119 static int filt_kqueue(struct knote *kn, long hint); 120 static int filt_procattach(struct knote *kn); 121 static void filt_procdetach(struct knote *kn); 122 static int filt_proc(struct knote *kn, long hint); 123 static int filt_fileattach(struct knote *kn); 124 static void filt_timerexpire(void *knx); 125 static int filt_timerattach(struct knote *kn); 126 static void filt_timerdetach(struct knote *kn); 127 static int filt_timer(struct knote *kn, long hint); 128 static int filt_userattach(struct knote *kn); 129 static void filt_userdetach(struct knote *kn); 130 static int filt_user(struct knote *kn, long hint); 131 static void filt_usertouch(struct knote *kn, struct kevent *kev, 132 u_long type); 133 static int filt_fsattach(struct knote *kn); 134 static void filt_fsdetach(struct knote *kn); 135 static int filt_fs(struct knote *kn, long hint); 136 137 static struct filterops file_filtops = 138 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL }; 139 static struct filterops kqread_filtops = 140 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue }; 141 static struct filterops proc_filtops = 142 { FILTEROP_MPSAFE, filt_procattach, filt_procdetach, filt_proc }; 143 static struct filterops timer_filtops = 144 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer }; 145 static struct filterops user_filtops = 146 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user }; 147 static struct filterops fs_filtops = 148 { FILTEROP_MPSAFE, filt_fsattach, filt_fsdetach, filt_fs }; 149 150 static int kq_ncallouts = 0; 151 static int kq_calloutmax = 65536; 152 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 153 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 154 static int kq_checkloop = 1000000; 155 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW, 156 &kq_checkloop, 0, "Maximum number of loops for kqueue scan"); 157 static int kq_sleep_threshold = 20000; 158 SYSCTL_INT(_kern, OID_AUTO, kq_sleep_threshold, CTLFLAG_RW, 159 &kq_sleep_threshold, 0, "Minimum sleep duration without busy-looping"); 160 161 #define KNOTE_ACTIVATE(kn) do { \ 162 kn->kn_status |= KN_ACTIVE; \ 163 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 164 knote_enqueue(kn); \ 165 } while(0) 166 167 #define KN_HASHSIZE 64 /* XXX should be tunable */ 168 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 169 170 extern struct filterops aio_filtops; 171 extern struct filterops sig_filtops; 172 173 /* 174 * Table for for all system-defined filters. 175 */ 176 static struct filterops *sysfilt_ops[] = { 177 &file_filtops, /* EVFILT_READ */ 178 &file_filtops, /* EVFILT_WRITE */ 179 &aio_filtops, /* EVFILT_AIO */ 180 &file_filtops, /* EVFILT_VNODE */ 181 &proc_filtops, /* EVFILT_PROC */ 182 &sig_filtops, /* EVFILT_SIGNAL */ 183 &timer_filtops, /* EVFILT_TIMER */ 184 &file_filtops, /* EVFILT_EXCEPT */ 185 &user_filtops, /* EVFILT_USER */ 186 &fs_filtops, /* EVFILT_FS */ 187 }; 188 189 static struct knote_cache_list knote_cache_lists[MAXCPU]; 190 191 /* 192 * Acquire a knote, return non-zero on success, 0 on failure. 193 * 194 * If we cannot acquire the knote we sleep and return 0. The knote 195 * may be stale on return in this case and the caller must restart 196 * whatever loop they are in. 197 * 198 * Related kq token must be held. 199 */ 200 static __inline int 201 knote_acquire(struct knote *kn) 202 { 203 if (kn->kn_status & KN_PROCESSING) { 204 kn->kn_status |= KN_WAITING | KN_REPROCESS; 205 tsleep(kn, 0, "kqepts", hz); 206 /* knote may be stale now */ 207 return(0); 208 } 209 kn->kn_status |= KN_PROCESSING; 210 return(1); 211 } 212 213 /* 214 * Release an acquired knote, clearing KN_PROCESSING and handling any 215 * KN_REPROCESS events. 216 * 217 * Caller must be holding the related kq token 218 * 219 * Non-zero is returned if the knote is destroyed or detached. 220 */ 221 static __inline int 222 knote_release(struct knote *kn) 223 { 224 int ret; 225 226 while (kn->kn_status & KN_REPROCESS) { 227 kn->kn_status &= ~KN_REPROCESS; 228 if (kn->kn_status & KN_WAITING) { 229 kn->kn_status &= ~KN_WAITING; 230 wakeup(kn); 231 } 232 if (kn->kn_status & KN_DELETING) { 233 knote_detach_and_drop(kn); 234 return(1); 235 /* NOT REACHED */ 236 } 237 if (filter_event(kn, 0)) 238 KNOTE_ACTIVATE(kn); 239 } 240 if (kn->kn_status & KN_DETACHED) 241 ret = 1; 242 else 243 ret = 0; 244 kn->kn_status &= ~KN_PROCESSING; 245 /* kn should not be accessed anymore */ 246 return ret; 247 } 248 249 static int 250 filt_fileattach(struct knote *kn) 251 { 252 return (fo_kqfilter(kn->kn_fp, kn)); 253 } 254 255 /* 256 * MPSAFE 257 */ 258 static int 259 kqueue_kqfilter(struct file *fp, struct knote *kn) 260 { 261 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 262 263 if (kn->kn_filter != EVFILT_READ) 264 return (EOPNOTSUPP); 265 266 kn->kn_fop = &kqread_filtops; 267 knote_insert(&kq->kq_kqinfo.ki_note, kn); 268 return (0); 269 } 270 271 static void 272 filt_kqdetach(struct knote *kn) 273 { 274 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 275 276 knote_remove(&kq->kq_kqinfo.ki_note, kn); 277 } 278 279 /*ARGSUSED*/ 280 static int 281 filt_kqueue(struct knote *kn, long hint) 282 { 283 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 284 285 kn->kn_data = kq->kq_count; 286 return (kn->kn_data > 0); 287 } 288 289 static int 290 filt_procattach(struct knote *kn) 291 { 292 struct proc *p; 293 int immediate; 294 295 immediate = 0; 296 p = pfind(kn->kn_id); 297 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 298 p = zpfind(kn->kn_id); 299 immediate = 1; 300 } 301 if (p == NULL) { 302 return (ESRCH); 303 } 304 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 305 if (p) 306 PRELE(p); 307 return (EACCES); 308 } 309 310 lwkt_gettoken(&p->p_token); 311 kn->kn_ptr.p_proc = p; 312 kn->kn_flags |= EV_CLEAR; /* automatically set */ 313 314 /* 315 * internal flag indicating registration done by kernel 316 */ 317 if (kn->kn_flags & EV_FLAG1) { 318 kn->kn_data = kn->kn_sdata; /* ppid */ 319 kn->kn_fflags = NOTE_CHILD; 320 kn->kn_flags &= ~EV_FLAG1; 321 } 322 323 knote_insert(&p->p_klist, kn); 324 325 /* 326 * Immediately activate any exit notes if the target process is a 327 * zombie. This is necessary to handle the case where the target 328 * process, e.g. a child, dies before the kevent is negistered. 329 */ 330 if (immediate && filt_proc(kn, NOTE_EXIT)) 331 KNOTE_ACTIVATE(kn); 332 lwkt_reltoken(&p->p_token); 333 PRELE(p); 334 335 return (0); 336 } 337 338 /* 339 * The knote may be attached to a different process, which may exit, 340 * leaving nothing for the knote to be attached to. So when the process 341 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 342 * it will be deleted when read out. However, as part of the knote deletion, 343 * this routine is called, so a check is needed to avoid actually performing 344 * a detach, because the original process does not exist any more. 345 */ 346 static void 347 filt_procdetach(struct knote *kn) 348 { 349 struct proc *p; 350 351 if (kn->kn_status & KN_DETACHED) 352 return; 353 p = kn->kn_ptr.p_proc; 354 knote_remove(&p->p_klist, kn); 355 } 356 357 static int 358 filt_proc(struct knote *kn, long hint) 359 { 360 u_int event; 361 362 /* 363 * mask off extra data 364 */ 365 event = (u_int)hint & NOTE_PCTRLMASK; 366 367 /* 368 * if the user is interested in this event, record it. 369 */ 370 if (kn->kn_sfflags & event) 371 kn->kn_fflags |= event; 372 373 /* 374 * Process is gone, so flag the event as finished. Detach the 375 * knote from the process now because the process will be poof, 376 * gone later on. 377 */ 378 if (event == NOTE_EXIT) { 379 struct proc *p = kn->kn_ptr.p_proc; 380 if ((kn->kn_status & KN_DETACHED) == 0) { 381 PHOLD(p); 382 knote_remove(&p->p_klist, kn); 383 kn->kn_status |= KN_DETACHED; 384 kn->kn_data = p->p_xstat; 385 kn->kn_ptr.p_proc = NULL; 386 PRELE(p); 387 } 388 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 389 return (1); 390 } 391 392 /* 393 * process forked, and user wants to track the new process, 394 * so attach a new knote to it, and immediately report an 395 * event with the parent's pid. 396 */ 397 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 398 struct kevent kev; 399 int error; 400 int n; 401 402 /* 403 * register knote with new process. 404 */ 405 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 406 kev.filter = kn->kn_filter; 407 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 408 kev.fflags = kn->kn_sfflags; 409 kev.data = kn->kn_id; /* parent */ 410 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 411 n = 1; 412 error = kqueue_register(kn->kn_kq, &kev, &n); 413 if (error) 414 kn->kn_fflags |= NOTE_TRACKERR; 415 } 416 417 return (kn->kn_fflags != 0); 418 } 419 420 static void 421 filt_timerreset(struct knote *kn) 422 { 423 struct callout *calloutp; 424 struct timeval tv; 425 int tticks; 426 427 tv.tv_sec = kn->kn_sdata / 1000; 428 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 429 tticks = tvtohz_high(&tv); 430 calloutp = (struct callout *)kn->kn_hook; 431 callout_reset(calloutp, tticks, filt_timerexpire, kn); 432 } 433 434 /* 435 * The callout interlocks with callout_stop() but can still 436 * race a deletion so if KN_DELETING is set we just don't touch 437 * the knote. 438 */ 439 static void 440 filt_timerexpire(void *knx) 441 { 442 struct knote *kn = knx; 443 struct kqueue *kq = kn->kn_kq; 444 445 lwkt_getpooltoken(kq); 446 447 /* 448 * Open knote_acquire(), since we can't sleep in callout, 449 * however, we do need to record this expiration. 450 */ 451 kn->kn_data++; 452 if (kn->kn_status & KN_PROCESSING) { 453 kn->kn_status |= KN_REPROCESS; 454 if ((kn->kn_status & KN_DELETING) == 0 && 455 (kn->kn_flags & EV_ONESHOT) == 0) 456 filt_timerreset(kn); 457 lwkt_relpooltoken(kq); 458 return; 459 } 460 KASSERT((kn->kn_status & KN_DELETING) == 0, 461 ("acquire a deleting knote %#x", kn->kn_status)); 462 kn->kn_status |= KN_PROCESSING; 463 464 KNOTE_ACTIVATE(kn); 465 if ((kn->kn_flags & EV_ONESHOT) == 0) 466 filt_timerreset(kn); 467 468 knote_release(kn); 469 470 lwkt_relpooltoken(kq); 471 } 472 473 /* 474 * data contains amount of time to sleep, in milliseconds 475 */ 476 static int 477 filt_timerattach(struct knote *kn) 478 { 479 struct callout *calloutp; 480 int prev_ncallouts; 481 482 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1); 483 if (prev_ncallouts >= kq_calloutmax) { 484 atomic_subtract_int(&kq_ncallouts, 1); 485 kn->kn_hook = NULL; 486 return (ENOMEM); 487 } 488 489 kn->kn_flags |= EV_CLEAR; /* automatically set */ 490 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 491 callout_init_mp(calloutp); 492 kn->kn_hook = (caddr_t)calloutp; 493 494 filt_timerreset(kn); 495 return (0); 496 } 497 498 /* 499 * This function is called with the knote flagged locked but it is 500 * still possible to race a callout event due to the callback blocking. 501 */ 502 static void 503 filt_timerdetach(struct knote *kn) 504 { 505 struct callout *calloutp; 506 507 calloutp = (struct callout *)kn->kn_hook; 508 callout_terminate(calloutp); 509 kn->kn_hook = NULL; 510 kfree(calloutp, M_KQUEUE); 511 atomic_subtract_int(&kq_ncallouts, 1); 512 } 513 514 static int 515 filt_timer(struct knote *kn, long hint) 516 { 517 return (kn->kn_data != 0); 518 } 519 520 /* 521 * EVFILT_USER 522 */ 523 static int 524 filt_userattach(struct knote *kn) 525 { 526 u_int ffctrl; 527 528 kn->kn_hook = NULL; 529 if (kn->kn_sfflags & NOTE_TRIGGER) 530 kn->kn_ptr.hookid = 1; 531 else 532 kn->kn_ptr.hookid = 0; 533 534 ffctrl = kn->kn_sfflags & NOTE_FFCTRLMASK; 535 kn->kn_sfflags &= NOTE_FFLAGSMASK; 536 switch (ffctrl) { 537 case NOTE_FFNOP: 538 break; 539 540 case NOTE_FFAND: 541 kn->kn_fflags &= kn->kn_sfflags; 542 break; 543 544 case NOTE_FFOR: 545 kn->kn_fflags |= kn->kn_sfflags; 546 break; 547 548 case NOTE_FFCOPY: 549 kn->kn_fflags = kn->kn_sfflags; 550 break; 551 552 default: 553 /* XXX Return error? */ 554 break; 555 } 556 /* We just happen to copy this value as well. Undocumented. */ 557 kn->kn_data = kn->kn_sdata; 558 559 return 0; 560 } 561 562 static void 563 filt_userdetach(struct knote *kn) 564 { 565 /* nothing to do */ 566 } 567 568 static int 569 filt_user(struct knote *kn, long hint) 570 { 571 return (kn->kn_ptr.hookid); 572 } 573 574 static void 575 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 576 { 577 u_int ffctrl; 578 579 switch (type) { 580 case EVENT_REGISTER: 581 if (kev->fflags & NOTE_TRIGGER) 582 kn->kn_ptr.hookid = 1; 583 584 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 585 kev->fflags &= NOTE_FFLAGSMASK; 586 switch (ffctrl) { 587 case NOTE_FFNOP: 588 break; 589 590 case NOTE_FFAND: 591 kn->kn_fflags &= kev->fflags; 592 break; 593 594 case NOTE_FFOR: 595 kn->kn_fflags |= kev->fflags; 596 break; 597 598 case NOTE_FFCOPY: 599 kn->kn_fflags = kev->fflags; 600 break; 601 602 default: 603 /* XXX Return error? */ 604 break; 605 } 606 /* We just happen to copy this value as well. Undocumented. */ 607 kn->kn_data = kev->data; 608 609 /* 610 * This is not the correct use of EV_CLEAR in an event 611 * modification, it should have been passed as a NOTE instead. 612 * But we need to maintain compatibility with Apple & FreeBSD. 613 * 614 * Note however that EV_CLEAR can still be used when doing 615 * the initial registration of the event and works as expected 616 * (clears the event on reception). 617 */ 618 if (kev->flags & EV_CLEAR) { 619 kn->kn_ptr.hookid = 0; 620 /* 621 * Clearing kn->kn_data is fine, since it gets set 622 * every time anyway. We just shouldn't clear 623 * kn->kn_fflags here, since that would limit the 624 * possible uses of this API. NOTE_FFAND or 625 * NOTE_FFCOPY should be used for explicitly clearing 626 * kn->kn_fflags. 627 */ 628 kn->kn_data = 0; 629 } 630 break; 631 632 case EVENT_PROCESS: 633 *kev = kn->kn_kevent; 634 kev->fflags = kn->kn_fflags; 635 kev->data = kn->kn_data; 636 if (kn->kn_flags & EV_CLEAR) { 637 kn->kn_ptr.hookid = 0; 638 /* kn_data, kn_fflags handled by parent */ 639 } 640 break; 641 642 default: 643 panic("filt_usertouch() - invalid type (%ld)", type); 644 break; 645 } 646 } 647 648 /* 649 * EVFILT_FS 650 */ 651 struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist); 652 653 static int 654 filt_fsattach(struct knote *kn) 655 { 656 kn->kn_flags |= EV_CLEAR; 657 knote_insert(&fs_klist, kn); 658 659 return (0); 660 } 661 662 static void 663 filt_fsdetach(struct knote *kn) 664 { 665 knote_remove(&fs_klist, kn); 666 } 667 668 static int 669 filt_fs(struct knote *kn, long hint) 670 { 671 kn->kn_fflags |= hint; 672 return (kn->kn_fflags != 0); 673 } 674 675 /* 676 * Initialize a kqueue. 677 * 678 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 679 * 680 * MPSAFE 681 */ 682 void 683 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 684 { 685 TAILQ_INIT(&kq->kq_knpend); 686 TAILQ_INIT(&kq->kq_knlist); 687 kq->kq_count = 0; 688 kq->kq_fdp = fdp; 689 SLIST_INIT(&kq->kq_kqinfo.ki_note); 690 } 691 692 /* 693 * Terminate a kqueue. Freeing the actual kq itself is left up to the 694 * caller (it might be embedded in a lwp so we don't do it here). 695 * 696 * The kq's knlist must be completely eradicated so block on any 697 * processing races. 698 */ 699 void 700 kqueue_terminate(struct kqueue *kq) 701 { 702 struct knote *kn; 703 704 lwkt_getpooltoken(kq); 705 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 706 if (knote_acquire(kn)) 707 knote_detach_and_drop(kn); 708 } 709 lwkt_relpooltoken(kq); 710 711 if (kq->kq_knhash) { 712 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask); 713 kq->kq_knhash = NULL; 714 kq->kq_knhashmask = 0; 715 } 716 } 717 718 /* 719 * MPSAFE 720 */ 721 int 722 sys_kqueue(struct kqueue_args *uap) 723 { 724 struct thread *td = curthread; 725 struct kqueue *kq; 726 struct file *fp; 727 int fd, error; 728 729 error = falloc(td->td_lwp, &fp, &fd); 730 if (error) 731 return (error); 732 fp->f_flag = FREAD | FWRITE; 733 fp->f_type = DTYPE_KQUEUE; 734 fp->f_ops = &kqueueops; 735 736 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 737 kqueue_init(kq, td->td_proc->p_fd); 738 fp->f_data = kq; 739 740 fsetfd(kq->kq_fdp, fp, fd); 741 uap->sysmsg_result = fd; 742 fdrop(fp); 743 return (error); 744 } 745 746 /* 747 * Copy 'count' items into the destination list pointed to by uap->eventlist. 748 */ 749 static int 750 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 751 { 752 struct kevent_copyin_args *kap; 753 int error; 754 755 kap = (struct kevent_copyin_args *)arg; 756 757 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp)); 758 if (error == 0) { 759 kap->ka->eventlist += count; 760 *res += count; 761 } else { 762 *res = -1; 763 } 764 765 return (error); 766 } 767 768 /* 769 * Copy at most 'max' items from the list pointed to by kap->changelist, 770 * return number of items in 'events'. 771 */ 772 static int 773 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 774 { 775 struct kevent_copyin_args *kap; 776 int error, count; 777 778 kap = (struct kevent_copyin_args *)arg; 779 780 count = min(kap->ka->nchanges - kap->pchanges, max); 781 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp); 782 if (error == 0) { 783 kap->ka->changelist += count; 784 kap->pchanges += count; 785 *events = count; 786 } 787 788 return (error); 789 } 790 791 /* 792 * MPSAFE 793 */ 794 int 795 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 796 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 797 struct timespec *tsp_in, int flags) 798 { 799 struct kevent *kevp; 800 struct timespec *tsp, ats; 801 int i, n, total, error, nerrors = 0; 802 int gobbled; 803 int lres; 804 int limit = kq_checkloop; 805 int closedcounter; 806 struct kevent kev[KQ_NEVENTS]; 807 struct knote marker; 808 struct lwkt_token *tok; 809 810 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec) 811 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC); 812 813 tsp = tsp_in; 814 *res = 0; 815 816 closedcounter = kq->kq_fdp->fd_closedcounter; 817 818 for (;;) { 819 n = 0; 820 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 821 if (error) 822 return error; 823 if (n == 0) 824 break; 825 for (i = 0; i < n; ++i) 826 kev[i].flags &= ~EV_SYSFLAGS; 827 for (i = 0; i < n; ++i) { 828 gobbled = n - i; 829 error = kqueue_register(kq, &kev[i], &gobbled); 830 i += gobbled - 1; 831 kevp = &kev[i]; 832 833 /* 834 * If a registration returns an error we 835 * immediately post the error. The kevent() 836 * call itself will fail with the error if 837 * no space is available for posting. 838 * 839 * Such errors normally bypass the timeout/blocking 840 * code. However, if the copyoutfn function refuses 841 * to post the error (see sys_poll()), then we 842 * ignore it too. 843 */ 844 if (error || (kevp->flags & EV_RECEIPT)) { 845 kevp->flags = EV_ERROR; 846 kevp->data = error; 847 lres = *res; 848 kevent_copyoutfn(uap, kevp, 1, res); 849 if (*res < 0) { 850 return error; 851 } else if (lres != *res) { 852 nevents--; 853 nerrors++; 854 } 855 } 856 } 857 } 858 if (nerrors) 859 return 0; 860 861 /* 862 * Acquire/wait for events - setup timeout 863 */ 864 if (tsp != NULL) { 865 if (tsp->tv_sec || tsp->tv_nsec) { 866 getnanouptime(&ats); 867 timespecadd(tsp, &ats, tsp); /* tsp = target time */ 868 } 869 } 870 871 /* 872 * Loop as required. 873 * 874 * Collect as many events as we can. Sleeping on successive 875 * loops is disabled if copyoutfn has incremented (*res). 876 * 877 * The loop stops if an error occurs, all events have been 878 * scanned (the marker has been reached), or fewer than the 879 * maximum number of events is found. 880 * 881 * The copyoutfn function does not have to increment (*res) in 882 * order for the loop to continue. 883 * 884 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 885 */ 886 total = 0; 887 error = 0; 888 marker.kn_filter = EVFILT_MARKER; 889 marker.kn_status = KN_PROCESSING; 890 tok = lwkt_token_pool_lookup(kq); 891 lwkt_gettoken(tok); 892 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 893 lwkt_reltoken(tok); 894 while ((n = nevents - total) > 0) { 895 if (n > KQ_NEVENTS) 896 n = KQ_NEVENTS; 897 898 /* 899 * If no events are pending sleep until timeout (if any) 900 * or an event occurs. 901 * 902 * After the sleep completes the marker is moved to the 903 * end of the list, making any received events available 904 * to our scan. 905 */ 906 if (kq->kq_count == 0 && *res == 0) { 907 int timeout, ustimeout = 0; 908 909 if (tsp == NULL) { 910 timeout = 0; 911 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 912 error = EWOULDBLOCK; 913 break; 914 } else { 915 struct timespec atx = *tsp; 916 917 getnanouptime(&ats); 918 timespecsub(&atx, &ats, &atx); 919 if (atx.tv_sec < 0) { 920 error = EWOULDBLOCK; 921 break; 922 } else { 923 timeout = atx.tv_sec > 24 * 60 * 60 ? 924 24 * 60 * 60 * hz : 925 tstohz_high(&atx); 926 } 927 if (flags & KEVENT_TIMEOUT_PRECISE && 928 timeout != 0) { 929 if (atx.tv_sec == 0 && 930 atx.tv_nsec < kq_sleep_threshold) { 931 DELAY(atx.tv_nsec / 1000); 932 error = EWOULDBLOCK; 933 break; 934 } else if (atx.tv_sec < 2000) { 935 ustimeout = atx.tv_sec * 936 1000000 + atx.tv_nsec/1000; 937 } else { 938 ustimeout = 2000000000; 939 } 940 } 941 } 942 943 lwkt_gettoken(tok); 944 if (kq->kq_count == 0) { 945 kq->kq_sleep_cnt++; 946 if (__predict_false(kq->kq_sleep_cnt == 0)) { 947 /* 948 * Guard against possible wrapping. And 949 * set it to 2, so that kqueue_wakeup() 950 * can wake everyone up. 951 */ 952 kq->kq_sleep_cnt = 2; 953 } 954 if ((flags & KEVENT_TIMEOUT_PRECISE) && 955 timeout != 0) { 956 error = precise_sleep(kq, PCATCH, 957 "kqread", ustimeout); 958 } else { 959 error = tsleep(kq, PCATCH, "kqread", 960 timeout); 961 } 962 963 /* don't restart after signals... */ 964 if (error == ERESTART) 965 error = EINTR; 966 if (error) { 967 lwkt_reltoken(tok); 968 break; 969 } 970 971 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 972 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, 973 kn_tqe); 974 } 975 lwkt_reltoken(tok); 976 } 977 978 /* 979 * Process all received events 980 * Account for all non-spurious events in our total 981 */ 982 i = kqueue_scan(kq, kev, n, &marker, closedcounter); 983 if (i) { 984 lres = *res; 985 error = kevent_copyoutfn(uap, kev, i, res); 986 total += *res - lres; 987 if (error) 988 break; 989 } 990 if (limit && --limit == 0) 991 panic("kqueue: checkloop failed i=%d", i); 992 993 /* 994 * Normally when fewer events are returned than requested 995 * we can stop. However, if only spurious events were 996 * collected the copyout will not bump (*res) and we have 997 * to continue. 998 */ 999 if (i < n && *res) 1000 break; 1001 1002 /* 1003 * Deal with an edge case where spurious events can cause 1004 * a loop to occur without moving the marker. This can 1005 * prevent kqueue_scan() from picking up new events which 1006 * race us. We must be sure to move the marker for this 1007 * case. 1008 * 1009 * NOTE: We do not want to move the marker if events 1010 * were scanned because normal kqueue operations 1011 * may reactivate events. Moving the marker in 1012 * that case could result in duplicates for the 1013 * same event. 1014 */ 1015 if (i == 0) { 1016 lwkt_gettoken(tok); 1017 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 1018 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 1019 lwkt_reltoken(tok); 1020 } 1021 } 1022 lwkt_gettoken(tok); 1023 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 1024 lwkt_reltoken(tok); 1025 1026 /* Timeouts do not return EWOULDBLOCK. */ 1027 if (error == EWOULDBLOCK) 1028 error = 0; 1029 return error; 1030 } 1031 1032 /* 1033 * MPALMOSTSAFE 1034 */ 1035 int 1036 sys_kevent(struct kevent_args *uap) 1037 { 1038 struct thread *td = curthread; 1039 struct timespec ts, *tsp; 1040 struct kqueue *kq; 1041 struct file *fp = NULL; 1042 struct kevent_copyin_args *kap, ka; 1043 int error; 1044 1045 if (uap->timeout) { 1046 error = copyin(uap->timeout, &ts, sizeof(ts)); 1047 if (error) 1048 return (error); 1049 tsp = &ts; 1050 } else { 1051 tsp = NULL; 1052 } 1053 fp = holdfp(td, uap->fd, -1); 1054 if (fp == NULL) 1055 return (EBADF); 1056 if (fp->f_type != DTYPE_KQUEUE) { 1057 fdrop(fp); 1058 return (EBADF); 1059 } 1060 1061 kq = (struct kqueue *)fp->f_data; 1062 1063 kap = &ka; 1064 kap->ka = uap; 1065 kap->pchanges = 0; 1066 1067 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap, 1068 kevent_copyin, kevent_copyout, tsp, 0); 1069 1070 dropfp(td, uap->fd, fp); 1071 1072 return (error); 1073 } 1074 1075 /* 1076 * Efficiently load multiple file pointers. This significantly reduces 1077 * threaded overhead. When doing simple polling we can depend on the 1078 * per-thread (fd,fp) cache. With more descriptors, we batch. 1079 */ 1080 static 1081 void 1082 floadkevfps(thread_t td, struct filedesc *fdp, struct kevent *kev, 1083 struct file **fp, int climit) 1084 { 1085 struct filterops *fops; 1086 int tdcache; 1087 1088 if (climit <= 2 && td->td_proc && td->td_proc->p_fd == fdp) { 1089 tdcache = 1; 1090 } else { 1091 tdcache = 0; 1092 spin_lock_shared(&fdp->fd_spin); 1093 } 1094 1095 while (climit) { 1096 *fp = NULL; 1097 if (kev->filter < 0 && 1098 kev->filter + EVFILT_SYSCOUNT >= 0) { 1099 fops = sysfilt_ops[~kev->filter]; 1100 if (fops->f_flags & FILTEROP_ISFD) { 1101 if (tdcache) { 1102 *fp = holdfp(td, kev->ident, -1); 1103 } else { 1104 *fp = holdfp_fdp_locked(fdp, 1105 kev->ident, -1); 1106 } 1107 } 1108 } 1109 --climit; 1110 ++fp; 1111 ++kev; 1112 } 1113 if (tdcache == 0) 1114 spin_unlock_shared(&fdp->fd_spin); 1115 } 1116 1117 /* 1118 * Register up to *countp kev's. Always registers at least 1. 1119 * 1120 * The number registered is returned in *countp. 1121 * 1122 * If an error occurs or a kev is flagged EV_RECEIPT, it is 1123 * processed and included in *countp, and processing then 1124 * stops. 1125 */ 1126 int 1127 kqueue_register(struct kqueue *kq, struct kevent *kev, int *countp) 1128 { 1129 struct filedesc *fdp = kq->kq_fdp; 1130 struct klist *list = NULL; 1131 struct filterops *fops; 1132 struct file *fp[KQ_NEVENTS]; 1133 struct knote *kn = NULL; 1134 struct thread *td; 1135 int error; 1136 int count; 1137 int climit; 1138 int closedcounter; 1139 struct knote_cache_list *cache_list; 1140 1141 td = curthread; 1142 climit = *countp; 1143 if (climit > KQ_NEVENTS) 1144 climit = KQ_NEVENTS; 1145 closedcounter = fdp->fd_closedcounter; 1146 floadkevfps(td, fdp, kev, fp, climit); 1147 1148 lwkt_getpooltoken(kq); 1149 count = 0; 1150 1151 /* 1152 * To avoid races, only one thread can register events on this 1153 * kqueue at a time. 1154 */ 1155 while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) { 1156 kq->kq_state |= KQ_REGWAIT; 1157 tsleep(&kq->kq_regtd, 0, "kqreg", 0); 1158 } 1159 if (__predict_false(kq->kq_regtd != NULL)) { 1160 /* Recursive calling of kqueue_register() */ 1161 td = NULL; 1162 } else { 1163 /* Owner of the kq_regtd, i.e. td != NULL */ 1164 kq->kq_regtd = td; 1165 } 1166 1167 loop: 1168 if (kev->filter < 0) { 1169 if (kev->filter + EVFILT_SYSCOUNT < 0) { 1170 error = EINVAL; 1171 ++count; 1172 goto done; 1173 } 1174 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 1175 } else { 1176 /* 1177 * XXX 1178 * filter attach routine is responsible for insuring that 1179 * the identifier can be attached to it. 1180 */ 1181 error = EINVAL; 1182 ++count; 1183 goto done; 1184 } 1185 1186 if (fops->f_flags & FILTEROP_ISFD) { 1187 /* validate descriptor */ 1188 if (fp[count] == NULL) { 1189 error = EBADF; 1190 ++count; 1191 goto done; 1192 } 1193 } 1194 1195 cache_list = &knote_cache_lists[mycpuid]; 1196 if (SLIST_EMPTY(&cache_list->knote_cache)) { 1197 struct knote *new_kn; 1198 1199 new_kn = knote_alloc(); 1200 crit_enter(); 1201 SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link); 1202 cache_list->knote_cache_cnt++; 1203 crit_exit(); 1204 } 1205 1206 if (fp[count] != NULL) { 1207 list = &fp[count]->f_klist; 1208 } else if (kq->kq_knhashmask) { 1209 list = &kq->kq_knhash[ 1210 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1211 } 1212 if (list != NULL) { 1213 lwkt_getpooltoken(list); 1214 again: 1215 SLIST_FOREACH(kn, list, kn_link) { 1216 if (kn->kn_kq == kq && 1217 kn->kn_filter == kev->filter && 1218 kn->kn_id == kev->ident) { 1219 if (knote_acquire(kn) == 0) 1220 goto again; 1221 break; 1222 } 1223 } 1224 lwkt_relpooltoken(list); 1225 } 1226 1227 /* 1228 * NOTE: At this point if kn is non-NULL we will have acquired 1229 * it and set KN_PROCESSING. 1230 */ 1231 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 1232 error = ENOENT; 1233 ++count; 1234 goto done; 1235 } 1236 1237 /* 1238 * kn now contains the matching knote, or NULL if no match 1239 */ 1240 if (kev->flags & EV_ADD) { 1241 if (kn == NULL) { 1242 crit_enter(); 1243 kn = SLIST_FIRST(&cache_list->knote_cache); 1244 if (kn == NULL) { 1245 crit_exit(); 1246 kn = knote_alloc(); 1247 } else { 1248 SLIST_REMOVE_HEAD(&cache_list->knote_cache, 1249 kn_link); 1250 cache_list->knote_cache_cnt--; 1251 crit_exit(); 1252 } 1253 kn->kn_fp = fp[count]; 1254 kn->kn_kq = kq; 1255 kn->kn_fop = fops; 1256 1257 /* 1258 * apply reference count to knote structure, and 1259 * do not release it at the end of this routine. 1260 */ 1261 fp[count] = NULL; /* safety */ 1262 1263 kn->kn_sfflags = kev->fflags; 1264 kn->kn_sdata = kev->data; 1265 kev->fflags = 0; 1266 kev->data = 0; 1267 kn->kn_kevent = *kev; 1268 1269 /* 1270 * KN_PROCESSING prevents the knote from getting 1271 * ripped out from under us while we are trying 1272 * to attach it, in case the attach blocks. 1273 */ 1274 kn->kn_status = KN_PROCESSING; 1275 knote_attach(kn); 1276 if ((error = filter_attach(kn)) != 0) { 1277 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1278 knote_drop(kn); 1279 ++count; 1280 goto done; 1281 } 1282 1283 /* 1284 * Interlock against close races which either tried 1285 * to remove our knote while we were blocked or missed 1286 * it entirely prior to our attachment. We do not 1287 * want to end up with a knote on a closed descriptor. 1288 */ 1289 if ((fops->f_flags & FILTEROP_ISFD) && 1290 checkfdclosed(curthread, fdp, kev->ident, kn->kn_fp, 1291 closedcounter)) { 1292 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1293 } 1294 } else { 1295 /* 1296 * The user may change some filter values after the 1297 * initial EV_ADD, but doing so will not reset any 1298 * filter which have already been triggered. 1299 */ 1300 KKASSERT(kn->kn_status & KN_PROCESSING); 1301 if (fops == &user_filtops) { 1302 filt_usertouch(kn, kev, EVENT_REGISTER); 1303 } else { 1304 kn->kn_sfflags = kev->fflags; 1305 kn->kn_sdata = kev->data; 1306 kn->kn_kevent.udata = kev->udata; 1307 } 1308 } 1309 1310 /* 1311 * Execute the filter event to immediately activate the 1312 * knote if necessary. If reprocessing events are pending 1313 * due to blocking above we do not run the filter here 1314 * but instead let knote_release() do it. Otherwise we 1315 * might run the filter on a deleted event. 1316 */ 1317 if ((kn->kn_status & KN_REPROCESS) == 0) { 1318 if (filter_event(kn, 0)) 1319 KNOTE_ACTIVATE(kn); 1320 } 1321 } else if (kev->flags & EV_DELETE) { 1322 /* 1323 * Delete the existing knote 1324 */ 1325 knote_detach_and_drop(kn); 1326 error = 0; 1327 ++count; 1328 goto done; 1329 } else { 1330 /* 1331 * Modify an existing event. 1332 * 1333 * The user may change some filter values after the 1334 * initial EV_ADD, but doing so will not reset any 1335 * filter which have already been triggered. 1336 */ 1337 KKASSERT(kn->kn_status & KN_PROCESSING); 1338 if (fops == &user_filtops) { 1339 filt_usertouch(kn, kev, EVENT_REGISTER); 1340 } else { 1341 kn->kn_sfflags = kev->fflags; 1342 kn->kn_sdata = kev->data; 1343 kn->kn_kevent.udata = kev->udata; 1344 } 1345 1346 /* 1347 * Execute the filter event to immediately activate the 1348 * knote if necessary. If reprocessing events are pending 1349 * due to blocking above we do not run the filter here 1350 * but instead let knote_release() do it. Otherwise we 1351 * might run the filter on a deleted event. 1352 */ 1353 if ((kn->kn_status & KN_REPROCESS) == 0) { 1354 if (filter_event(kn, 0)) 1355 KNOTE_ACTIVATE(kn); 1356 } 1357 } 1358 1359 /* 1360 * Disablement does not deactivate a knote here. 1361 */ 1362 if ((kev->flags & EV_DISABLE) && 1363 ((kn->kn_status & KN_DISABLED) == 0)) { 1364 kn->kn_status |= KN_DISABLED; 1365 } 1366 1367 /* 1368 * Re-enablement may have to immediately enqueue an active knote. 1369 */ 1370 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1371 kn->kn_status &= ~KN_DISABLED; 1372 if ((kn->kn_status & KN_ACTIVE) && 1373 ((kn->kn_status & KN_QUEUED) == 0)) { 1374 knote_enqueue(kn); 1375 } 1376 } 1377 1378 /* 1379 * Handle any required reprocessing 1380 */ 1381 knote_release(kn); 1382 /* kn may be invalid now */ 1383 1384 /* 1385 * Loop control. We stop on errors (above), and also stop after 1386 * processing EV_RECEIPT, so the caller can process it. 1387 */ 1388 ++count; 1389 if (kev->flags & EV_RECEIPT) { 1390 error = 0; 1391 goto done; 1392 } 1393 ++kev; 1394 if (count < climit) { 1395 if (fp[count-1]) /* drop unprocessed fp */ 1396 fdrop(fp[count-1]); 1397 goto loop; 1398 } 1399 1400 /* 1401 * Cleanup 1402 */ 1403 done: 1404 if (td != NULL) { /* Owner of the kq_regtd */ 1405 kq->kq_regtd = NULL; 1406 if (__predict_false(kq->kq_state & KQ_REGWAIT)) { 1407 kq->kq_state &= ~KQ_REGWAIT; 1408 wakeup(&kq->kq_regtd); 1409 } 1410 } 1411 lwkt_relpooltoken(kq); 1412 1413 /* 1414 * Drop unprocessed file pointers 1415 */ 1416 *countp = count; 1417 if (count && fp[count-1]) 1418 fdrop(fp[count-1]); 1419 while (count < climit) { 1420 if (fp[count]) 1421 fdrop(fp[count]); 1422 ++count; 1423 } 1424 return (error); 1425 } 1426 1427 /* 1428 * Scan the kqueue, return the number of active events placed in kevp up 1429 * to count. 1430 * 1431 * Continuous mode events may get recycled, do not continue scanning past 1432 * marker unless no events have been collected. 1433 */ 1434 static int 1435 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 1436 struct knote *marker, int closedcounter) 1437 { 1438 struct knote *kn, local_marker; 1439 thread_t td = curthread; 1440 int total; 1441 1442 total = 0; 1443 local_marker.kn_filter = EVFILT_MARKER; 1444 local_marker.kn_status = KN_PROCESSING; 1445 1446 lwkt_getpooltoken(kq); 1447 1448 /* 1449 * Collect events. 1450 */ 1451 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 1452 while (count) { 1453 kn = TAILQ_NEXT(&local_marker, kn_tqe); 1454 if (kn->kn_filter == EVFILT_MARKER) { 1455 /* Marker reached, we are done */ 1456 if (kn == marker) 1457 break; 1458 1459 /* Move local marker past some other threads marker */ 1460 kn = TAILQ_NEXT(kn, kn_tqe); 1461 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1462 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 1463 continue; 1464 } 1465 1466 /* 1467 * We can't skip a knote undergoing processing, otherwise 1468 * we risk not returning it when the user process expects 1469 * it should be returned. Sleep and retry. 1470 */ 1471 if (knote_acquire(kn) == 0) 1472 continue; 1473 1474 /* 1475 * Remove the event for processing. 1476 * 1477 * WARNING! We must leave KN_QUEUED set to prevent the 1478 * event from being KNOTE_ACTIVATE()d while 1479 * the queue state is in limbo, in case we 1480 * block. 1481 */ 1482 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1483 kq->kq_count--; 1484 1485 /* 1486 * We have to deal with an extremely important race against 1487 * file descriptor close()s here. The file descriptor can 1488 * disappear MPSAFE, and there is a small window of 1489 * opportunity between that and the call to knote_fdclose(). 1490 * 1491 * If we hit that window here while doselect or dopoll is 1492 * trying to delete a spurious event they will not be able 1493 * to match up the event against a knote and will go haywire. 1494 */ 1495 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && 1496 checkfdclosed(td, kq->kq_fdp, kn->kn_kevent.ident, 1497 kn->kn_fp, closedcounter)) { 1498 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1499 } 1500 1501 if (kn->kn_status & KN_DISABLED) { 1502 /* 1503 * If disabled we ensure the event is not queued 1504 * but leave its active bit set. On re-enablement 1505 * the event may be immediately triggered. 1506 */ 1507 kn->kn_status &= ~KN_QUEUED; 1508 } else if ((kn->kn_flags & EV_ONESHOT) == 0 && 1509 (kn->kn_status & KN_DELETING) == 0 && 1510 filter_event(kn, 0) == 0) { 1511 /* 1512 * If not running in one-shot mode and the event 1513 * is no longer present we ensure it is removed 1514 * from the queue and ignore it. 1515 */ 1516 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1517 } else { 1518 /* 1519 * Post the event 1520 */ 1521 if (kn->kn_fop == &user_filtops) 1522 filt_usertouch(kn, kevp, EVENT_PROCESS); 1523 else 1524 *kevp = kn->kn_kevent; 1525 ++kevp; 1526 ++total; 1527 --count; 1528 1529 if (kn->kn_flags & EV_ONESHOT) { 1530 kn->kn_status &= ~KN_QUEUED; 1531 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1532 } else { 1533 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1534 if (kn->kn_flags & EV_CLEAR) { 1535 kn->kn_data = 0; 1536 kn->kn_fflags = 0; 1537 } 1538 if (kn->kn_flags & EV_DISPATCH) { 1539 kn->kn_status |= KN_DISABLED; 1540 } 1541 kn->kn_status &= ~(KN_QUEUED | 1542 KN_ACTIVE); 1543 } else { 1544 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1545 kq->kq_count++; 1546 } 1547 } 1548 } 1549 1550 /* 1551 * Handle any post-processing states 1552 */ 1553 knote_release(kn); 1554 } 1555 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1556 1557 lwkt_relpooltoken(kq); 1558 return (total); 1559 } 1560 1561 /* 1562 * XXX 1563 * This could be expanded to call kqueue_scan, if desired. 1564 * 1565 * MPSAFE 1566 */ 1567 static int 1568 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1569 { 1570 return (ENXIO); 1571 } 1572 1573 /* 1574 * MPSAFE 1575 */ 1576 static int 1577 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1578 { 1579 return (ENXIO); 1580 } 1581 1582 /* 1583 * MPALMOSTSAFE 1584 */ 1585 static int 1586 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 1587 struct ucred *cred, struct sysmsg *msg) 1588 { 1589 struct kqueue *kq; 1590 int error; 1591 1592 kq = (struct kqueue *)fp->f_data; 1593 lwkt_getpooltoken(kq); 1594 switch(com) { 1595 case FIOASYNC: 1596 if (*(int *)data) 1597 kq->kq_state |= KQ_ASYNC; 1598 else 1599 kq->kq_state &= ~KQ_ASYNC; 1600 error = 0; 1601 break; 1602 case FIOSETOWN: 1603 error = fsetown(*(int *)data, &kq->kq_sigio); 1604 break; 1605 default: 1606 error = ENOTTY; 1607 break; 1608 } 1609 lwkt_relpooltoken(kq); 1610 return (error); 1611 } 1612 1613 /* 1614 * MPSAFE 1615 */ 1616 static int 1617 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1618 { 1619 struct kqueue *kq = (struct kqueue *)fp->f_data; 1620 1621 bzero((void *)st, sizeof(*st)); 1622 st->st_size = kq->kq_count; 1623 st->st_blksize = sizeof(struct kevent); 1624 st->st_mode = S_IFIFO; 1625 return (0); 1626 } 1627 1628 /* 1629 * MPSAFE 1630 */ 1631 static int 1632 kqueue_close(struct file *fp) 1633 { 1634 struct kqueue *kq = (struct kqueue *)fp->f_data; 1635 1636 kqueue_terminate(kq); 1637 1638 fp->f_data = NULL; 1639 funsetown(&kq->kq_sigio); 1640 1641 kfree(kq, M_KQUEUE); 1642 return (0); 1643 } 1644 1645 static void 1646 kqueue_wakeup(struct kqueue *kq) 1647 { 1648 if (kq->kq_sleep_cnt) { 1649 u_int sleep_cnt = kq->kq_sleep_cnt; 1650 1651 kq->kq_sleep_cnt = 0; 1652 if (sleep_cnt == 1) 1653 wakeup_one(kq); 1654 else 1655 wakeup(kq); 1656 } 1657 KNOTE(&kq->kq_kqinfo.ki_note, 0); 1658 } 1659 1660 /* 1661 * Calls filterops f_attach function, acquiring mplock if filter is not 1662 * marked as FILTEROP_MPSAFE. 1663 * 1664 * Caller must be holding the related kq token 1665 */ 1666 static int 1667 filter_attach(struct knote *kn) 1668 { 1669 int ret; 1670 1671 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1672 ret = kn->kn_fop->f_attach(kn); 1673 } else { 1674 get_mplock(); 1675 ret = kn->kn_fop->f_attach(kn); 1676 rel_mplock(); 1677 } 1678 return (ret); 1679 } 1680 1681 /* 1682 * Detach the knote and drop it, destroying the knote. 1683 * 1684 * Calls filterops f_detach function, acquiring mplock if filter is not 1685 * marked as FILTEROP_MPSAFE. 1686 * 1687 * Caller must be holding the related kq token 1688 */ 1689 static void 1690 knote_detach_and_drop(struct knote *kn) 1691 { 1692 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1693 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1694 kn->kn_fop->f_detach(kn); 1695 } else { 1696 get_mplock(); 1697 kn->kn_fop->f_detach(kn); 1698 rel_mplock(); 1699 } 1700 knote_drop(kn); 1701 } 1702 1703 /* 1704 * Calls filterops f_event function, acquiring mplock if filter is not 1705 * marked as FILTEROP_MPSAFE. 1706 * 1707 * If the knote is in the middle of being created or deleted we cannot 1708 * safely call the filter op. 1709 * 1710 * Caller must be holding the related kq token 1711 */ 1712 static int 1713 filter_event(struct knote *kn, long hint) 1714 { 1715 int ret; 1716 1717 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1718 ret = kn->kn_fop->f_event(kn, hint); 1719 } else { 1720 get_mplock(); 1721 ret = kn->kn_fop->f_event(kn, hint); 1722 rel_mplock(); 1723 } 1724 return (ret); 1725 } 1726 1727 /* 1728 * Walk down a list of knotes, activating them if their event has triggered. 1729 * 1730 * If we encounter any knotes which are undergoing processing we just mark 1731 * them for reprocessing and do not try to [re]activate the knote. However, 1732 * if a hint is being passed we have to wait and that makes things a bit 1733 * sticky. 1734 */ 1735 void 1736 knote(struct klist *list, long hint) 1737 { 1738 struct kqueue *kq; 1739 struct knote *kn; 1740 struct knote *kntmp; 1741 1742 lwkt_getpooltoken(list); 1743 restart: 1744 SLIST_FOREACH(kn, list, kn_next) { 1745 kq = kn->kn_kq; 1746 lwkt_getpooltoken(kq); 1747 1748 /* temporary verification hack */ 1749 SLIST_FOREACH(kntmp, list, kn_next) { 1750 if (kn == kntmp) 1751 break; 1752 } 1753 if (kn != kntmp || kn->kn_kq != kq) { 1754 lwkt_relpooltoken(kq); 1755 goto restart; 1756 } 1757 1758 if (kn->kn_status & KN_PROCESSING) { 1759 /* 1760 * Someone else is processing the knote, ask the 1761 * other thread to reprocess it and don't mess 1762 * with it otherwise. 1763 */ 1764 if (hint == 0) { 1765 kn->kn_status |= KN_REPROCESS; 1766 lwkt_relpooltoken(kq); 1767 continue; 1768 } 1769 1770 /* 1771 * If the hint is non-zero we have to wait or risk 1772 * losing the state the caller is trying to update. 1773 * 1774 * XXX This is a real problem, certain process 1775 * and signal filters will bump kn_data for 1776 * already-processed notes more than once if 1777 * we restart the list scan. FIXME. 1778 */ 1779 kn->kn_status |= KN_WAITING | KN_REPROCESS; 1780 tsleep(kn, 0, "knotec", hz); 1781 lwkt_relpooltoken(kq); 1782 goto restart; 1783 } 1784 1785 /* 1786 * Become the reprocessing master ourselves. 1787 * 1788 * If hint is non-zero running the event is mandatory 1789 * when not deleting so do it whether reprocessing is 1790 * set or not. 1791 */ 1792 kn->kn_status |= KN_PROCESSING; 1793 if ((kn->kn_status & KN_DELETING) == 0) { 1794 if (filter_event(kn, hint)) 1795 KNOTE_ACTIVATE(kn); 1796 } 1797 if (knote_release(kn)) { 1798 lwkt_relpooltoken(kq); 1799 goto restart; 1800 } 1801 lwkt_relpooltoken(kq); 1802 } 1803 lwkt_relpooltoken(list); 1804 } 1805 1806 /* 1807 * Insert knote at head of klist. 1808 * 1809 * This function may only be called via a filter function and thus 1810 * kq_token should already be held and marked for processing. 1811 */ 1812 void 1813 knote_insert(struct klist *klist, struct knote *kn) 1814 { 1815 lwkt_getpooltoken(klist); 1816 KKASSERT(kn->kn_status & KN_PROCESSING); 1817 SLIST_INSERT_HEAD(klist, kn, kn_next); 1818 lwkt_relpooltoken(klist); 1819 } 1820 1821 /* 1822 * Remove knote from a klist 1823 * 1824 * This function may only be called via a filter function and thus 1825 * kq_token should already be held and marked for processing. 1826 */ 1827 void 1828 knote_remove(struct klist *klist, struct knote *kn) 1829 { 1830 lwkt_getpooltoken(klist); 1831 KKASSERT(kn->kn_status & KN_PROCESSING); 1832 SLIST_REMOVE(klist, kn, knote, kn_next); 1833 lwkt_relpooltoken(klist); 1834 } 1835 1836 void 1837 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst, 1838 struct filterops *ops, void *hook) 1839 { 1840 struct kqueue *kq; 1841 struct knote *kn; 1842 1843 lwkt_getpooltoken(&src->ki_note); 1844 lwkt_getpooltoken(&dst->ki_note); 1845 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) { 1846 kq = kn->kn_kq; 1847 lwkt_getpooltoken(kq); 1848 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) { 1849 lwkt_relpooltoken(kq); 1850 continue; 1851 } 1852 if (knote_acquire(kn)) { 1853 knote_remove(&src->ki_note, kn); 1854 kn->kn_fop = ops; 1855 kn->kn_hook = hook; 1856 knote_insert(&dst->ki_note, kn); 1857 knote_release(kn); 1858 /* kn may be invalid now */ 1859 } 1860 lwkt_relpooltoken(kq); 1861 } 1862 lwkt_relpooltoken(&dst->ki_note); 1863 lwkt_relpooltoken(&src->ki_note); 1864 } 1865 1866 /* 1867 * Remove all knotes referencing a specified fd 1868 */ 1869 void 1870 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1871 { 1872 struct kqueue *kq; 1873 struct knote *kn; 1874 struct knote *kntmp; 1875 1876 lwkt_getpooltoken(&fp->f_klist); 1877 restart: 1878 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1879 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1880 kq = kn->kn_kq; 1881 lwkt_getpooltoken(kq); 1882 1883 /* temporary verification hack */ 1884 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) { 1885 if (kn == kntmp) 1886 break; 1887 } 1888 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp || 1889 kn->kn_id != fd || kn->kn_kq != kq) { 1890 lwkt_relpooltoken(kq); 1891 goto restart; 1892 } 1893 if (knote_acquire(kn)) 1894 knote_detach_and_drop(kn); 1895 lwkt_relpooltoken(kq); 1896 goto restart; 1897 } 1898 } 1899 lwkt_relpooltoken(&fp->f_klist); 1900 } 1901 1902 /* 1903 * Low level attach function. 1904 * 1905 * The knote should already be marked for processing. 1906 * Caller must hold the related kq token. 1907 */ 1908 static void 1909 knote_attach(struct knote *kn) 1910 { 1911 struct klist *list; 1912 struct kqueue *kq = kn->kn_kq; 1913 1914 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1915 KKASSERT(kn->kn_fp); 1916 list = &kn->kn_fp->f_klist; 1917 } else { 1918 if (kq->kq_knhashmask == 0) 1919 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1920 &kq->kq_knhashmask); 1921 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1922 } 1923 lwkt_getpooltoken(list); 1924 SLIST_INSERT_HEAD(list, kn, kn_link); 1925 lwkt_relpooltoken(list); 1926 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 1927 } 1928 1929 /* 1930 * Low level drop function. 1931 * 1932 * The knote should already be marked for processing. 1933 * Caller must hold the related kq token. 1934 */ 1935 static void 1936 knote_drop(struct knote *kn) 1937 { 1938 struct kqueue *kq; 1939 struct klist *list; 1940 1941 kq = kn->kn_kq; 1942 1943 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1944 list = &kn->kn_fp->f_klist; 1945 else 1946 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1947 1948 lwkt_getpooltoken(list); 1949 SLIST_REMOVE(list, kn, knote, kn_link); 1950 lwkt_relpooltoken(list); 1951 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 1952 if (kn->kn_status & KN_QUEUED) 1953 knote_dequeue(kn); 1954 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1955 fdrop(kn->kn_fp); 1956 kn->kn_fp = NULL; 1957 } 1958 knote_free(kn); 1959 } 1960 1961 /* 1962 * Low level enqueue function. 1963 * 1964 * The knote should already be marked for processing. 1965 * Caller must be holding the kq token 1966 */ 1967 static void 1968 knote_enqueue(struct knote *kn) 1969 { 1970 struct kqueue *kq = kn->kn_kq; 1971 1972 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1973 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1974 kn->kn_status |= KN_QUEUED; 1975 ++kq->kq_count; 1976 1977 /* 1978 * Send SIGIO on request (typically set up as a mailbox signal) 1979 */ 1980 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 1981 pgsigio(kq->kq_sigio, SIGIO, 0); 1982 1983 kqueue_wakeup(kq); 1984 } 1985 1986 /* 1987 * Low level dequeue function. 1988 * 1989 * The knote should already be marked for processing. 1990 * Caller must be holding the kq token 1991 */ 1992 static void 1993 knote_dequeue(struct knote *kn) 1994 { 1995 struct kqueue *kq = kn->kn_kq; 1996 1997 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1998 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1999 kn->kn_status &= ~KN_QUEUED; 2000 kq->kq_count--; 2001 } 2002 2003 static struct knote * 2004 knote_alloc(void) 2005 { 2006 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK); 2007 } 2008 2009 static void 2010 knote_free(struct knote *kn) 2011 { 2012 struct knote_cache_list *cache_list; 2013 2014 cache_list = &knote_cache_lists[mycpuid]; 2015 if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) { 2016 crit_enter(); 2017 SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link); 2018 cache_list->knote_cache_cnt++; 2019 crit_exit(); 2020 return; 2021 } 2022 kfree(kn, M_KQUEUE); 2023 } 2024 2025 struct sleepinfo { 2026 void *ident; 2027 int timedout; 2028 }; 2029 2030 static void 2031 precise_sleep_intr(systimer_t info, int in_ipi, struct intrframe *frame) 2032 { 2033 struct sleepinfo *si; 2034 2035 si = info->data; 2036 si->timedout = 1; 2037 wakeup(si->ident); 2038 } 2039 2040 static int 2041 precise_sleep(void *ident, int flags, const char *wmesg, int us) 2042 { 2043 struct systimer info; 2044 struct sleepinfo si = { 2045 .ident = ident, 2046 .timedout = 0, 2047 }; 2048 int r; 2049 2050 tsleep_interlock(ident, flags); 2051 systimer_init_oneshot(&info, precise_sleep_intr, &si, 2052 us == 0 ? 1 : us); 2053 r = tsleep(ident, flags | PINTERLOCKED, wmesg, 0); 2054 systimer_del(&info); 2055 if (si.timedout) 2056 r = EWOULDBLOCK; 2057 2058 return r; 2059 } 2060