1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/malloc.h> 34 #include <sys/unistd.h> 35 #include <sys/file.h> 36 #include <sys/lock.h> 37 #include <sys/fcntl.h> 38 #include <sys/queue.h> 39 #include <sys/event.h> 40 #include <sys/eventvar.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/stat.h> 45 #include <sys/sysctl.h> 46 #include <sys/sysmsg.h> 47 #include <sys/thread.h> 48 #include <sys/uio.h> 49 #include <sys/signalvar.h> 50 #include <sys/filio.h> 51 #include <sys/ktr.h> 52 #include <sys/spinlock.h> 53 54 #include <sys/thread2.h> 55 #include <sys/file2.h> 56 #include <sys/mplock2.h> 57 #include <sys/spinlock2.h> 58 59 #define EVENT_REGISTER 1 60 #define EVENT_PROCESS 2 61 62 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 63 64 struct kevent_copyin_args { 65 const struct kevent_args *ka; 66 struct kevent *eventlist; 67 const struct kevent *changelist; 68 int pchanges; 69 }; 70 71 #define KNOTE_CACHE_MAX 64 72 73 struct knote_cache_list { 74 struct klist knote_cache; 75 int knote_cache_cnt; 76 } __cachealign; 77 78 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 79 struct knote *marker, int closedcounter, int flags); 80 static int kqueue_read(struct file *fp, struct uio *uio, 81 struct ucred *cred, int flags); 82 static int kqueue_write(struct file *fp, struct uio *uio, 83 struct ucred *cred, int flags); 84 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 85 struct ucred *cred, struct sysmsg *msg); 86 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 87 static int kqueue_stat(struct file *fp, struct stat *st, 88 struct ucred *cred); 89 static int kqueue_close(struct file *fp); 90 static void kqueue_wakeup(struct kqueue *kq); 91 static int filter_attach(struct knote *kn); 92 static int filter_event(struct knote *kn, long hint); 93 94 /* 95 * MPSAFE 96 */ 97 static struct fileops kqueueops = { 98 .fo_read = kqueue_read, 99 .fo_write = kqueue_write, 100 .fo_ioctl = kqueue_ioctl, 101 .fo_kqfilter = kqueue_kqfilter, 102 .fo_stat = kqueue_stat, 103 .fo_close = kqueue_close, 104 .fo_shutdown = nofo_shutdown 105 }; 106 107 static void knote_attach(struct knote *kn); 108 static void knote_drop(struct knote *kn); 109 static void knote_detach_and_drop(struct knote *kn); 110 static void knote_enqueue(struct knote *kn); 111 static void knote_dequeue(struct knote *kn); 112 static struct knote *knote_alloc(void); 113 static void knote_free(struct knote *kn); 114 115 static void precise_sleep_intr(systimer_t info, int in_ipi, 116 struct intrframe *frame); 117 static int precise_sleep(void *ident, int flags, const char *wmesg, 118 int us); 119 120 static void filt_kqdetach(struct knote *kn); 121 static int filt_kqueue(struct knote *kn, long hint); 122 static int filt_procattach(struct knote *kn); 123 static void filt_procdetach(struct knote *kn); 124 static int filt_proc(struct knote *kn, long hint); 125 static int filt_fileattach(struct knote *kn); 126 static void filt_timerexpire(void *knx); 127 static int filt_timerattach(struct knote *kn); 128 static void filt_timerdetach(struct knote *kn); 129 static int filt_timer(struct knote *kn, long hint); 130 static int filt_userattach(struct knote *kn); 131 static void filt_userdetach(struct knote *kn); 132 static int filt_user(struct knote *kn, long hint); 133 static void filt_usertouch(struct knote *kn, struct kevent *kev, 134 u_long type); 135 static int filt_fsattach(struct knote *kn); 136 static void filt_fsdetach(struct knote *kn); 137 static int filt_fs(struct knote *kn, long hint); 138 139 static struct filterops file_filtops = 140 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL }; 141 static struct filterops kqread_filtops = 142 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue }; 143 static struct filterops proc_filtops = 144 { FILTEROP_MPSAFE, filt_procattach, filt_procdetach, filt_proc }; 145 static struct filterops timer_filtops = 146 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer }; 147 static struct filterops user_filtops = 148 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user }; 149 static struct filterops fs_filtops = 150 { FILTEROP_MPSAFE, filt_fsattach, filt_fsdetach, filt_fs }; 151 152 static int kq_ncallouts = 0; 153 static int kq_calloutmax = 65536; 154 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 155 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 156 static int kq_checkloop = 1000000; 157 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW, 158 &kq_checkloop, 0, "Maximum number of loops for kqueue scan"); 159 static int kq_sleep_threshold = 20000; 160 SYSCTL_INT(_kern, OID_AUTO, kq_sleep_threshold, CTLFLAG_RW, 161 &kq_sleep_threshold, 0, "Minimum sleep duration without busy-looping"); 162 163 #define KNOTE_ACTIVATE(kn) do { \ 164 kn->kn_status |= KN_ACTIVE; \ 165 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 166 knote_enqueue(kn); \ 167 } while(0) 168 169 #define KN_HASHSIZE 64 /* XXX should be tunable */ 170 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 171 172 extern struct filterops aio_filtops; 173 extern struct filterops sig_filtops; 174 175 /* 176 * Table for for all system-defined filters. 177 */ 178 static struct filterops *sysfilt_ops[] = { 179 &file_filtops, /* EVFILT_READ */ 180 &file_filtops, /* EVFILT_WRITE */ 181 &aio_filtops, /* EVFILT_AIO */ 182 &file_filtops, /* EVFILT_VNODE */ 183 &proc_filtops, /* EVFILT_PROC */ 184 &sig_filtops, /* EVFILT_SIGNAL */ 185 &timer_filtops, /* EVFILT_TIMER */ 186 &file_filtops, /* EVFILT_EXCEPT */ 187 &user_filtops, /* EVFILT_USER */ 188 &fs_filtops, /* EVFILT_FS */ 189 }; 190 191 static struct knote_cache_list knote_cache_lists[MAXCPU]; 192 193 /* 194 * Acquire a knote, return non-zero on success, 0 on failure. 195 * 196 * If we cannot acquire the knote we sleep and return 0. The knote 197 * may be stale on return in this case and the caller must restart 198 * whatever loop they are in. 199 * 200 * Related kq token must be held. 201 */ 202 static __inline int 203 knote_acquire(struct knote *kn) 204 { 205 if (kn->kn_status & KN_PROCESSING) { 206 kn->kn_status |= KN_WAITING | KN_REPROCESS; 207 tsleep(kn, 0, "kqepts", hz); 208 /* knote may be stale now */ 209 return(0); 210 } 211 kn->kn_status |= KN_PROCESSING; 212 return(1); 213 } 214 215 /* 216 * Release an acquired knote, clearing KN_PROCESSING and handling any 217 * KN_REPROCESS events. 218 * 219 * Caller must be holding the related kq token 220 * 221 * Non-zero is returned if the knote is destroyed or detached. 222 */ 223 static __inline int 224 knote_release(struct knote *kn) 225 { 226 int ret; 227 228 while (kn->kn_status & KN_REPROCESS) { 229 kn->kn_status &= ~KN_REPROCESS; 230 if (kn->kn_status & KN_WAITING) { 231 kn->kn_status &= ~KN_WAITING; 232 wakeup(kn); 233 } 234 if (kn->kn_status & KN_DELETING) { 235 knote_detach_and_drop(kn); 236 return(1); 237 /* NOT REACHED */ 238 } 239 if (filter_event(kn, 0)) 240 KNOTE_ACTIVATE(kn); 241 } 242 if (kn->kn_status & KN_DETACHED) 243 ret = 1; 244 else 245 ret = 0; 246 kn->kn_status &= ~KN_PROCESSING; 247 /* kn should not be accessed anymore */ 248 return ret; 249 } 250 251 static int 252 filt_fileattach(struct knote *kn) 253 { 254 return (fo_kqfilter(kn->kn_fp, kn)); 255 } 256 257 /* 258 * MPSAFE 259 */ 260 static int 261 kqueue_kqfilter(struct file *fp, struct knote *kn) 262 { 263 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 264 265 if (kn->kn_filter != EVFILT_READ) 266 return (EOPNOTSUPP); 267 268 kn->kn_fop = &kqread_filtops; 269 knote_insert(&kq->kq_kqinfo.ki_note, kn); 270 return (0); 271 } 272 273 static void 274 filt_kqdetach(struct knote *kn) 275 { 276 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 277 278 knote_remove(&kq->kq_kqinfo.ki_note, kn); 279 } 280 281 /*ARGSUSED*/ 282 static int 283 filt_kqueue(struct knote *kn, long hint) 284 { 285 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 286 287 kn->kn_data = kq->kq_count; 288 return (kn->kn_data > 0); 289 } 290 291 static int 292 filt_procattach(struct knote *kn) 293 { 294 struct proc *p; 295 int immediate; 296 297 immediate = 0; 298 p = pfind(kn->kn_id); 299 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 300 p = zpfind(kn->kn_id); 301 immediate = 1; 302 } 303 if (p == NULL) { 304 return (ESRCH); 305 } 306 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 307 if (p) 308 PRELE(p); 309 return (EACCES); 310 } 311 312 lwkt_gettoken(&p->p_token); 313 kn->kn_ptr.p_proc = p; 314 kn->kn_flags |= EV_CLEAR; /* automatically set */ 315 316 /* 317 * internal flag indicating registration done by kernel 318 */ 319 if (kn->kn_flags & EV_FLAG1) { 320 kn->kn_data = kn->kn_sdata; /* ppid */ 321 kn->kn_fflags = NOTE_CHILD; 322 kn->kn_flags &= ~EV_FLAG1; 323 } 324 325 knote_insert(&p->p_klist, kn); 326 327 /* 328 * Immediately activate any exit notes if the target process is a 329 * zombie. This is necessary to handle the case where the target 330 * process, e.g. a child, dies before the kevent is negistered. 331 */ 332 if (immediate && filt_proc(kn, NOTE_EXIT)) 333 KNOTE_ACTIVATE(kn); 334 lwkt_reltoken(&p->p_token); 335 PRELE(p); 336 337 return (0); 338 } 339 340 /* 341 * The knote may be attached to a different process, which may exit, 342 * leaving nothing for the knote to be attached to. So when the process 343 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 344 * it will be deleted when read out. However, as part of the knote deletion, 345 * this routine is called, so a check is needed to avoid actually performing 346 * a detach, because the original process does not exist any more. 347 */ 348 static void 349 filt_procdetach(struct knote *kn) 350 { 351 struct proc *p; 352 353 if (kn->kn_status & KN_DETACHED) 354 return; 355 p = kn->kn_ptr.p_proc; 356 knote_remove(&p->p_klist, kn); 357 } 358 359 static int 360 filt_proc(struct knote *kn, long hint) 361 { 362 u_int event; 363 364 /* 365 * mask off extra data 366 */ 367 event = (u_int)hint & NOTE_PCTRLMASK; 368 369 /* 370 * if the user is interested in this event, record it. 371 */ 372 if (kn->kn_sfflags & event) 373 kn->kn_fflags |= event; 374 375 /* 376 * Process is gone, so flag the event as finished. Detach the 377 * knote from the process now because the process will be poof, 378 * gone later on. 379 */ 380 if (event == NOTE_EXIT) { 381 struct proc *p = kn->kn_ptr.p_proc; 382 if ((kn->kn_status & KN_DETACHED) == 0) { 383 PHOLD(p); 384 knote_remove(&p->p_klist, kn); 385 kn->kn_status |= KN_DETACHED; 386 kn->kn_data = p->p_xstat; 387 kn->kn_ptr.p_proc = NULL; 388 PRELE(p); 389 } 390 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 391 return (1); 392 } 393 394 /* 395 * process forked, and user wants to track the new process, 396 * so attach a new knote to it, and immediately report an 397 * event with the parent's pid. 398 */ 399 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 400 struct kevent kev; 401 int error; 402 int n; 403 404 /* 405 * register knote with new process. 406 */ 407 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 408 kev.filter = kn->kn_filter; 409 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 410 kev.fflags = kn->kn_sfflags; 411 kev.data = kn->kn_id; /* parent */ 412 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 413 n = 1; 414 error = kqueue_register(kn->kn_kq, &kev, &n); 415 if (error) 416 kn->kn_fflags |= NOTE_TRACKERR; 417 } 418 419 return (kn->kn_fflags != 0); 420 } 421 422 static void 423 filt_timerreset(struct knote *kn) 424 { 425 struct callout *calloutp; 426 struct timeval tv; 427 int tticks; 428 429 tv.tv_sec = kn->kn_sdata / 1000; 430 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 431 tticks = tvtohz_high(&tv); 432 calloutp = (struct callout *)kn->kn_hook; 433 callout_reset(calloutp, tticks, filt_timerexpire, kn); 434 } 435 436 /* 437 * The callout interlocks with callout_stop() but can still 438 * race a deletion so if KN_DELETING is set we just don't touch 439 * the knote. 440 */ 441 static void 442 filt_timerexpire(void *knx) 443 { 444 struct knote *kn = knx; 445 struct kqueue *kq = kn->kn_kq; 446 447 lwkt_getpooltoken(kq); 448 449 /* 450 * Open knote_acquire(), since we can't sleep in callout, 451 * however, we do need to record this expiration. 452 */ 453 kn->kn_data++; 454 if (kn->kn_status & KN_PROCESSING) { 455 kn->kn_status |= KN_REPROCESS; 456 if ((kn->kn_status & KN_DELETING) == 0 && 457 (kn->kn_flags & EV_ONESHOT) == 0) 458 filt_timerreset(kn); 459 lwkt_relpooltoken(kq); 460 return; 461 } 462 KASSERT((kn->kn_status & KN_DELETING) == 0, 463 ("acquire a deleting knote %#x", kn->kn_status)); 464 kn->kn_status |= KN_PROCESSING; 465 466 KNOTE_ACTIVATE(kn); 467 if ((kn->kn_flags & EV_ONESHOT) == 0) 468 filt_timerreset(kn); 469 470 knote_release(kn); 471 472 lwkt_relpooltoken(kq); 473 } 474 475 /* 476 * data contains amount of time to sleep, in milliseconds 477 */ 478 static int 479 filt_timerattach(struct knote *kn) 480 { 481 struct callout *calloutp; 482 int prev_ncallouts; 483 484 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1); 485 if (prev_ncallouts >= kq_calloutmax) { 486 atomic_subtract_int(&kq_ncallouts, 1); 487 kn->kn_hook = NULL; 488 return (ENOMEM); 489 } 490 491 kn->kn_flags |= EV_CLEAR; /* automatically set */ 492 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 493 callout_init_mp(calloutp); 494 kn->kn_hook = (caddr_t)calloutp; 495 496 filt_timerreset(kn); 497 return (0); 498 } 499 500 /* 501 * This function is called with the knote flagged locked but it is 502 * still possible to race a callout event due to the callback blocking. 503 */ 504 static void 505 filt_timerdetach(struct knote *kn) 506 { 507 struct callout *calloutp; 508 509 calloutp = (struct callout *)kn->kn_hook; 510 callout_terminate(calloutp); 511 kn->kn_hook = NULL; 512 kfree(calloutp, M_KQUEUE); 513 atomic_subtract_int(&kq_ncallouts, 1); 514 } 515 516 static int 517 filt_timer(struct knote *kn, long hint) 518 { 519 return (kn->kn_data != 0); 520 } 521 522 /* 523 * EVFILT_USER 524 */ 525 static int 526 filt_userattach(struct knote *kn) 527 { 528 u_int ffctrl; 529 530 kn->kn_hook = NULL; 531 if (kn->kn_sfflags & NOTE_TRIGGER) 532 kn->kn_ptr.hookid = 1; 533 else 534 kn->kn_ptr.hookid = 0; 535 536 ffctrl = kn->kn_sfflags & NOTE_FFCTRLMASK; 537 kn->kn_sfflags &= NOTE_FFLAGSMASK; 538 switch (ffctrl) { 539 case NOTE_FFNOP: 540 break; 541 542 case NOTE_FFAND: 543 kn->kn_fflags &= kn->kn_sfflags; 544 break; 545 546 case NOTE_FFOR: 547 kn->kn_fflags |= kn->kn_sfflags; 548 break; 549 550 case NOTE_FFCOPY: 551 kn->kn_fflags = kn->kn_sfflags; 552 break; 553 554 default: 555 /* XXX Return error? */ 556 break; 557 } 558 /* We just happen to copy this value as well. Undocumented. */ 559 kn->kn_data = kn->kn_sdata; 560 561 return 0; 562 } 563 564 static void 565 filt_userdetach(struct knote *kn) 566 { 567 /* nothing to do */ 568 } 569 570 static int 571 filt_user(struct knote *kn, long hint) 572 { 573 return (kn->kn_ptr.hookid); 574 } 575 576 static void 577 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 578 { 579 u_int ffctrl; 580 581 switch (type) { 582 case EVENT_REGISTER: 583 if (kev->fflags & NOTE_TRIGGER) 584 kn->kn_ptr.hookid = 1; 585 586 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 587 kev->fflags &= NOTE_FFLAGSMASK; 588 switch (ffctrl) { 589 case NOTE_FFNOP: 590 break; 591 592 case NOTE_FFAND: 593 kn->kn_fflags &= kev->fflags; 594 break; 595 596 case NOTE_FFOR: 597 kn->kn_fflags |= kev->fflags; 598 break; 599 600 case NOTE_FFCOPY: 601 kn->kn_fflags = kev->fflags; 602 break; 603 604 default: 605 /* XXX Return error? */ 606 break; 607 } 608 /* We just happen to copy this value as well. Undocumented. */ 609 kn->kn_data = kev->data; 610 611 /* 612 * This is not the correct use of EV_CLEAR in an event 613 * modification, it should have been passed as a NOTE instead. 614 * But we need to maintain compatibility with Apple & FreeBSD. 615 * 616 * Note however that EV_CLEAR can still be used when doing 617 * the initial registration of the event and works as expected 618 * (clears the event on reception). 619 */ 620 if (kev->flags & EV_CLEAR) { 621 kn->kn_ptr.hookid = 0; 622 /* 623 * Clearing kn->kn_data is fine, since it gets set 624 * every time anyway. We just shouldn't clear 625 * kn->kn_fflags here, since that would limit the 626 * possible uses of this API. NOTE_FFAND or 627 * NOTE_FFCOPY should be used for explicitly clearing 628 * kn->kn_fflags. 629 */ 630 kn->kn_data = 0; 631 } 632 break; 633 634 case EVENT_PROCESS: 635 *kev = kn->kn_kevent; 636 kev->fflags = kn->kn_fflags; 637 kev->data = kn->kn_data; 638 if (kn->kn_flags & EV_CLEAR) { 639 kn->kn_ptr.hookid = 0; 640 /* kn_data, kn_fflags handled by parent */ 641 } 642 break; 643 644 default: 645 panic("filt_usertouch() - invalid type (%ld)", type); 646 break; 647 } 648 } 649 650 /* 651 * EVFILT_FS 652 */ 653 struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist); 654 655 static int 656 filt_fsattach(struct knote *kn) 657 { 658 kn->kn_flags |= EV_CLEAR; 659 knote_insert(&fs_klist, kn); 660 661 return (0); 662 } 663 664 static void 665 filt_fsdetach(struct knote *kn) 666 { 667 knote_remove(&fs_klist, kn); 668 } 669 670 static int 671 filt_fs(struct knote *kn, long hint) 672 { 673 kn->kn_fflags |= hint; 674 return (kn->kn_fflags != 0); 675 } 676 677 /* 678 * Initialize a kqueue. 679 * 680 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 681 */ 682 void 683 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 684 { 685 bzero(kq, sizeof(*kq)); 686 TAILQ_INIT(&kq->kq_knpend); 687 TAILQ_INIT(&kq->kq_knlist); 688 kq->kq_fdp = fdp; 689 SLIST_INIT(&kq->kq_kqinfo.ki_note); 690 } 691 692 /* 693 * Terminate a kqueue. Freeing the actual kq itself is left up to the 694 * caller (it might be embedded in a lwp so we don't do it here). 695 * 696 * The kq's knlist must be completely eradicated so block on any 697 * processing races. 698 */ 699 void 700 kqueue_terminate(struct kqueue *kq) 701 { 702 struct knote *kn; 703 704 lwkt_getpooltoken(kq); 705 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 706 if (knote_acquire(kn)) 707 knote_detach_and_drop(kn); 708 } 709 lwkt_relpooltoken(kq); 710 711 if (kq->kq_knhash) { 712 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask); 713 kq->kq_knhash = NULL; 714 kq->kq_knhashmask = 0; 715 } 716 } 717 718 /* 719 * MPSAFE 720 */ 721 int 722 sys_kqueue(struct sysmsg *sysmsg, const struct kqueue_args *uap) 723 { 724 struct thread *td = curthread; 725 struct kqueue *kq; 726 struct file *fp; 727 int fd, error; 728 729 error = falloc(td->td_lwp, &fp, &fd); 730 if (error) 731 return (error); 732 fp->f_flag = FREAD | FWRITE; 733 fp->f_type = DTYPE_KQUEUE; 734 fp->f_ops = &kqueueops; 735 736 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 737 kqueue_init(kq, td->td_proc->p_fd); 738 fp->f_data = kq; 739 740 fsetfd(kq->kq_fdp, fp, fd); 741 sysmsg->sysmsg_result = fd; 742 fdrop(fp); 743 return (0); 744 } 745 746 /* 747 * Copy 'count' items into the destination list pointed to by uap->eventlist. 748 */ 749 static int 750 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 751 { 752 struct kevent_copyin_args *kap; 753 int error; 754 755 kap = (struct kevent_copyin_args *)arg; 756 757 error = copyout(kevp, kap->eventlist, count * sizeof(*kevp)); 758 if (error == 0) { 759 kap->eventlist += count; 760 *res += count; 761 } else { 762 *res = -1; 763 } 764 765 return (error); 766 } 767 768 /* 769 * Copy at most 'max' items from the list pointed to by kap->changelist, 770 * return number of items in 'events'. 771 */ 772 static int 773 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 774 { 775 struct kevent_copyin_args *kap; 776 int error, count; 777 778 kap = (struct kevent_copyin_args *)arg; 779 780 count = min(kap->ka->nchanges - kap->pchanges, max); 781 error = copyin(kap->changelist, kevp, count * sizeof *kevp); 782 if (error == 0) { 783 kap->changelist += count; 784 kap->pchanges += count; 785 *events = count; 786 } 787 788 return (error); 789 } 790 791 /* 792 * MPSAFE 793 */ 794 int 795 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 796 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 797 struct timespec *tsp_in, int flags) 798 { 799 struct kevent *kevp; 800 struct timespec *tsp, ats; 801 int i, n, total, error, nerrors = 0; 802 int gobbled; 803 int lres; 804 int limit = kq_checkloop; 805 int closedcounter; 806 struct kevent kev[KQ_NEVENTS]; 807 struct knote marker; 808 struct lwkt_token *tok; 809 810 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec) 811 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC); 812 813 tsp = tsp_in; 814 *res = 0; 815 816 closedcounter = kq->kq_fdp->fd_closedcounter; 817 818 for (;;) { 819 n = 0; 820 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 821 if (error) 822 return error; 823 if (n == 0) 824 break; 825 for (i = 0; i < n; ++i) 826 kev[i].flags &= ~EV_SYSFLAGS; 827 for (i = 0; i < n; ++i) { 828 gobbled = n - i; 829 error = kqueue_register(kq, &kev[i], &gobbled); 830 i += gobbled - 1; 831 kevp = &kev[i]; 832 833 /* 834 * If a registration returns an error we 835 * immediately post the error. The kevent() 836 * call itself will fail with the error if 837 * no space is available for posting. 838 * 839 * Such errors normally bypass the timeout/blocking 840 * code. However, if the copyoutfn function refuses 841 * to post the error (see sys_poll()), then we 842 * ignore it too. 843 */ 844 if (error || (kevp->flags & EV_RECEIPT)) { 845 kevp->flags = EV_ERROR; 846 kevp->data = error; 847 lres = *res; 848 kevent_copyoutfn(uap, kevp, 1, res); 849 if (*res < 0) { 850 return error; 851 } else if (lres != *res) { 852 nevents--; 853 nerrors++; 854 } 855 } 856 } 857 } 858 if (nerrors) 859 return 0; 860 861 /* 862 * Acquire/wait for events - setup timeout 863 * 864 * If no timeout specified clean up the run path by clearing the 865 * PRECISE flag. 866 */ 867 if (tsp != NULL) { 868 if (tsp->tv_sec || tsp->tv_nsec) { 869 getnanouptime(&ats); 870 timespecadd(tsp, &ats, tsp); /* tsp = target time */ 871 } 872 } else { 873 flags &= ~KEVENT_TIMEOUT_PRECISE; 874 } 875 876 /* 877 * Loop as required. 878 * 879 * Collect as many events as we can. Sleeping on successive 880 * loops is disabled if copyoutfn has incremented (*res). 881 * 882 * The loop stops if an error occurs, all events have been 883 * scanned (the marker has been reached), or fewer than the 884 * maximum number of events is found. 885 * 886 * The copyoutfn function does not have to increment (*res) in 887 * order for the loop to continue. 888 * 889 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 890 */ 891 total = 0; 892 error = 0; 893 marker.kn_filter = EVFILT_MARKER; 894 marker.kn_status = KN_PROCESSING; 895 896 tok = lwkt_token_pool_lookup(kq); 897 flags = (flags & ~KEVENT_SCAN_MASK) | KEVENT_SCAN_INSERT_MARKER; 898 899 while ((n = nevents - total) > 0) { 900 if (n > KQ_NEVENTS) 901 n = KQ_NEVENTS; 902 903 /* 904 * Process all received events 905 * Account for all non-spurious events in our total 906 */ 907 i = kqueue_scan(kq, kev, n, &marker, closedcounter, flags); 908 flags = (flags & ~KEVENT_SCAN_MASK) | KEVENT_SCAN_KEEP_MARKER; 909 if (i) { 910 lres = *res; 911 error = kevent_copyoutfn(uap, kev, i, res); 912 total += *res - lres; 913 if (error) 914 break; 915 } 916 if (limit && --limit == 0) 917 panic("kqueue: checkloop failed i=%d", i); 918 919 /* 920 * Normally when fewer events are returned than requested 921 * we can stop. However, if only spurious events were 922 * collected the copyout will not bump (*res) and we have 923 * to continue. 924 */ 925 if (i < n && *res) 926 break; 927 928 /* 929 * If no events were recorded (no events happened or the events 930 * that did happen were all spurious), block until an event 931 * occurs or the timeout occurs and reload the marker. 932 * 933 * If we saturated n (i == n) loop up without sleeping to 934 * continue processing the list. 935 */ 936 if (i != n && kq->kq_count == 0 && *res == 0) { 937 int timeout; 938 int ustimeout; 939 940 if (tsp == NULL) { 941 timeout = 0; 942 ustimeout = 0; 943 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 944 error = EWOULDBLOCK; 945 break; 946 } else { 947 struct timespec atx = *tsp; 948 949 getnanouptime(&ats); 950 timespecsub(&atx, &ats, &atx); 951 if (atx.tv_sec < 0 || 952 (atx.tv_sec == 0 && atx.tv_nsec <= 0)) { 953 error = EWOULDBLOCK; 954 break; 955 } 956 if (flags & KEVENT_TIMEOUT_PRECISE) { 957 if (atx.tv_sec == 0 && 958 atx.tv_nsec < kq_sleep_threshold) { 959 ustimeout = kq_sleep_threshold / 960 1000; 961 } else if (atx.tv_sec < 60) { 962 ustimeout = 963 atx.tv_sec * 1000000 + 964 atx.tv_nsec / 1000; 965 } else { 966 ustimeout = 60 * 1000000; 967 } 968 if (ustimeout == 0) 969 ustimeout = 1; 970 timeout = 0; 971 } else if (atx.tv_sec > 60 * 60) { 972 timeout = 60 * 60 * hz; 973 ustimeout = 0; 974 } else { 975 timeout = tstohz_high(&atx); 976 ustimeout = 0; 977 } 978 } 979 980 lwkt_gettoken(tok); 981 if (kq->kq_count == 0) { 982 kq->kq_sleep_cnt++; 983 if (__predict_false(kq->kq_sleep_cnt == 0)) { 984 /* 985 * Guard against possible wrapping. And 986 * set it to 2, so that kqueue_wakeup() 987 * can wake everyone up. 988 */ 989 kq->kq_sleep_cnt = 2; 990 } 991 if (flags & KEVENT_TIMEOUT_PRECISE) { 992 error = precise_sleep(kq, PCATCH, 993 "kqread", ustimeout); 994 } else { 995 error = tsleep(kq, PCATCH, 996 "kqread", timeout); 997 } 998 999 /* don't restart after signals... */ 1000 if (error == ERESTART) 1001 error = EINTR; 1002 if (error == EWOULDBLOCK) 1003 error = 0; 1004 if (error) { 1005 lwkt_reltoken(tok); 1006 break; 1007 } 1008 flags = (flags & ~KEVENT_SCAN_MASK) | 1009 KEVENT_SCAN_RELOAD_MARKER; 1010 } 1011 lwkt_reltoken(tok); 1012 } 1013 1014 /* 1015 * Deal with an edge case where spurious events can cause 1016 * a loop to occur without moving the marker. This can 1017 * prevent kqueue_scan() from picking up new events which 1018 * race us. We must be sure to move the marker for this 1019 * case. 1020 * 1021 * NOTE: We do not want to move the marker if events 1022 * were scanned because normal kqueue operations 1023 * may reactivate events. Moving the marker in 1024 * that case could result in duplicates for the 1025 * same event. 1026 */ 1027 if (i == 0) { 1028 flags = (flags & ~KEVENT_SCAN_MASK) | 1029 KEVENT_SCAN_RELOAD_MARKER; 1030 } 1031 } 1032 1033 /* 1034 * Remove the marker 1035 */ 1036 if ((flags & KEVENT_SCAN_INSERT_MARKER) == 0) { 1037 lwkt_gettoken(tok); 1038 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 1039 lwkt_reltoken(tok); 1040 } 1041 1042 /* Timeouts do not return EWOULDBLOCK. */ 1043 if (error == EWOULDBLOCK) 1044 error = 0; 1045 return error; 1046 } 1047 1048 /* 1049 * MPALMOSTSAFE 1050 */ 1051 int 1052 sys_kevent(struct sysmsg *sysmsg, const struct kevent_args *uap) 1053 { 1054 struct thread *td = curthread; 1055 struct timespec ts, *tsp; 1056 struct kqueue *kq; 1057 struct file *fp = NULL; 1058 struct kevent_copyin_args *kap, ka; 1059 int error; 1060 1061 if (uap->timeout) { 1062 error = copyin(uap->timeout, &ts, sizeof(ts)); 1063 if (error) 1064 return (error); 1065 tsp = &ts; 1066 } else { 1067 tsp = NULL; 1068 } 1069 fp = holdfp(td, uap->fd, -1); 1070 if (fp == NULL) 1071 return (EBADF); 1072 if (fp->f_type != DTYPE_KQUEUE) { 1073 fdrop(fp); 1074 return (EBADF); 1075 } 1076 1077 kq = (struct kqueue *)fp->f_data; 1078 1079 kap = &ka; 1080 kap->ka = uap; 1081 kap->pchanges = 0; 1082 kap->eventlist = uap->eventlist; 1083 kap->changelist = uap->changelist; 1084 1085 error = kern_kevent(kq, uap->nevents, &sysmsg->sysmsg_result, kap, 1086 kevent_copyin, kevent_copyout, tsp, 0); 1087 1088 dropfp(td, uap->fd, fp); 1089 1090 return (error); 1091 } 1092 1093 /* 1094 * Efficiently load multiple file pointers. This significantly reduces 1095 * threaded overhead. When doing simple polling we can depend on the 1096 * per-thread (fd,fp) cache. With more descriptors, we batch. 1097 */ 1098 static 1099 void 1100 floadkevfps(thread_t td, struct filedesc *fdp, struct kevent *kev, 1101 struct file **fp, int climit) 1102 { 1103 struct filterops *fops; 1104 int tdcache; 1105 1106 if (climit <= 2 && td->td_proc && td->td_proc->p_fd == fdp) { 1107 tdcache = 1; 1108 } else { 1109 tdcache = 0; 1110 spin_lock_shared(&fdp->fd_spin); 1111 } 1112 1113 while (climit) { 1114 *fp = NULL; 1115 if (kev->filter < 0 && 1116 kev->filter + EVFILT_SYSCOUNT >= 0) { 1117 fops = sysfilt_ops[~kev->filter]; 1118 if (fops->f_flags & FILTEROP_ISFD) { 1119 if (tdcache) { 1120 *fp = holdfp(td, kev->ident, -1); 1121 } else { 1122 *fp = holdfp_fdp_locked(fdp, 1123 kev->ident, -1); 1124 } 1125 } 1126 } 1127 --climit; 1128 ++fp; 1129 ++kev; 1130 } 1131 if (tdcache == 0) 1132 spin_unlock_shared(&fdp->fd_spin); 1133 } 1134 1135 /* 1136 * Register up to *countp kev's. Always registers at least 1. 1137 * 1138 * The number registered is returned in *countp. 1139 * 1140 * If an error occurs or a kev is flagged EV_RECEIPT, it is 1141 * processed and included in *countp, and processing then 1142 * stops. 1143 */ 1144 int 1145 kqueue_register(struct kqueue *kq, struct kevent *kev, int *countp) 1146 { 1147 struct filedesc *fdp = kq->kq_fdp; 1148 struct klist *list = NULL; 1149 struct filterops *fops; 1150 struct file *fp[KQ_NEVENTS]; 1151 struct knote *kn = NULL; 1152 struct thread *td; 1153 int error; 1154 int count; 1155 int climit; 1156 int closedcounter; 1157 struct knote_cache_list *cache_list; 1158 1159 td = curthread; 1160 climit = *countp; 1161 if (climit > KQ_NEVENTS) 1162 climit = KQ_NEVENTS; 1163 closedcounter = fdp->fd_closedcounter; 1164 floadkevfps(td, fdp, kev, fp, climit); 1165 1166 lwkt_getpooltoken(kq); 1167 count = 0; 1168 error = 0; 1169 1170 /* 1171 * To avoid races, only one thread can register events on this 1172 * kqueue at a time. 1173 */ 1174 while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) { 1175 kq->kq_state |= KQ_REGWAIT; 1176 tsleep(&kq->kq_regtd, 0, "kqreg", 0); 1177 } 1178 if (__predict_false(kq->kq_regtd != NULL)) { 1179 /* Recursive calling of kqueue_register() */ 1180 td = NULL; 1181 } else { 1182 /* Owner of the kq_regtd, i.e. td != NULL */ 1183 kq->kq_regtd = td; 1184 } 1185 1186 loop: 1187 if (kev->filter < 0) { 1188 if (kev->filter + EVFILT_SYSCOUNT < 0) { 1189 error = EINVAL; 1190 ++count; 1191 goto done; 1192 } 1193 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 1194 } else { 1195 /* 1196 * XXX 1197 * filter attach routine is responsible for insuring that 1198 * the identifier can be attached to it. 1199 */ 1200 error = EINVAL; 1201 ++count; 1202 goto done; 1203 } 1204 1205 if (fops->f_flags & FILTEROP_ISFD) { 1206 /* validate descriptor */ 1207 if (fp[count] == NULL) { 1208 error = EBADF; 1209 ++count; 1210 goto done; 1211 } 1212 } 1213 1214 cache_list = &knote_cache_lists[mycpuid]; 1215 if (SLIST_EMPTY(&cache_list->knote_cache)) { 1216 struct knote *new_kn; 1217 1218 new_kn = knote_alloc(); 1219 crit_enter(); 1220 SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link); 1221 cache_list->knote_cache_cnt++; 1222 crit_exit(); 1223 } 1224 1225 if (fp[count] != NULL) { 1226 list = &fp[count]->f_klist; 1227 } else if (kq->kq_knhashmask) { 1228 list = &kq->kq_knhash[ 1229 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1230 } 1231 if (list != NULL) { 1232 lwkt_getpooltoken(list); 1233 again: 1234 SLIST_FOREACH(kn, list, kn_link) { 1235 if (kn->kn_kq == kq && 1236 kn->kn_filter == kev->filter && 1237 kn->kn_id == kev->ident) { 1238 if (knote_acquire(kn) == 0) 1239 goto again; 1240 break; 1241 } 1242 } 1243 lwkt_relpooltoken(list); 1244 } 1245 1246 /* 1247 * NOTE: At this point if kn is non-NULL we will have acquired 1248 * it and set KN_PROCESSING. 1249 */ 1250 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 1251 error = ENOENT; 1252 ++count; 1253 goto done; 1254 } 1255 1256 /* 1257 * kn now contains the matching knote, or NULL if no match 1258 */ 1259 if (kev->flags & EV_ADD) { 1260 if (kn == NULL) { 1261 crit_enter(); 1262 kn = SLIST_FIRST(&cache_list->knote_cache); 1263 if (kn == NULL) { 1264 crit_exit(); 1265 kn = knote_alloc(); 1266 } else { 1267 SLIST_REMOVE_HEAD(&cache_list->knote_cache, 1268 kn_link); 1269 cache_list->knote_cache_cnt--; 1270 crit_exit(); 1271 } 1272 kn->kn_fp = fp[count]; 1273 kn->kn_kq = kq; 1274 kn->kn_fop = fops; 1275 1276 /* 1277 * apply reference count to knote structure, and 1278 * do not release it at the end of this routine. 1279 */ 1280 fp[count] = NULL; /* safety */ 1281 1282 kn->kn_sfflags = kev->fflags; 1283 kn->kn_sdata = kev->data; 1284 kev->fflags = 0; 1285 kev->data = 0; 1286 kn->kn_kevent = *kev; 1287 1288 /* 1289 * KN_PROCESSING prevents the knote from getting 1290 * ripped out from under us while we are trying 1291 * to attach it, in case the attach blocks. 1292 */ 1293 kn->kn_status = KN_PROCESSING; 1294 knote_attach(kn); 1295 if ((error = filter_attach(kn)) != 0) { 1296 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1297 knote_drop(kn); 1298 ++count; 1299 goto done; 1300 } 1301 1302 /* 1303 * Interlock against close races which either tried 1304 * to remove our knote while we were blocked or missed 1305 * it entirely prior to our attachment. We do not 1306 * want to end up with a knote on a closed descriptor. 1307 */ 1308 if ((fops->f_flags & FILTEROP_ISFD) && 1309 checkfdclosed(curthread, fdp, kev->ident, kn->kn_fp, 1310 closedcounter)) { 1311 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1312 } 1313 } else { 1314 /* 1315 * The user may change some filter values after the 1316 * initial EV_ADD, but doing so will not reset any 1317 * filter which have already been triggered. 1318 */ 1319 KKASSERT(kn->kn_status & KN_PROCESSING); 1320 if (fops == &user_filtops) { 1321 filt_usertouch(kn, kev, EVENT_REGISTER); 1322 } else { 1323 kn->kn_sfflags = kev->fflags; 1324 kn->kn_sdata = kev->data; 1325 kn->kn_kevent.udata = kev->udata; 1326 } 1327 } 1328 1329 /* 1330 * Execute the filter event to immediately activate the 1331 * knote if necessary. If reprocessing events are pending 1332 * due to blocking above we do not run the filter here 1333 * but instead let knote_release() do it. Otherwise we 1334 * might run the filter on a deleted event. 1335 */ 1336 if ((kn->kn_status & KN_REPROCESS) == 0) { 1337 if (filter_event(kn, 0)) 1338 KNOTE_ACTIVATE(kn); 1339 } 1340 } else if (kev->flags & EV_DELETE) { 1341 /* 1342 * Delete the existing knote 1343 */ 1344 knote_detach_and_drop(kn); 1345 error = 0; 1346 ++count; 1347 goto done; 1348 } else { 1349 /* 1350 * Modify an existing event. 1351 * 1352 * The user may change some filter values after the 1353 * initial EV_ADD, but doing so will not reset any 1354 * filter which have already been triggered. 1355 */ 1356 KKASSERT(kn->kn_status & KN_PROCESSING); 1357 if (fops == &user_filtops) { 1358 filt_usertouch(kn, kev, EVENT_REGISTER); 1359 } else { 1360 kn->kn_sfflags = kev->fflags; 1361 kn->kn_sdata = kev->data; 1362 kn->kn_kevent.udata = kev->udata; 1363 } 1364 1365 /* 1366 * Execute the filter event to immediately activate the 1367 * knote if necessary. If reprocessing events are pending 1368 * due to blocking above we do not run the filter here 1369 * but instead let knote_release() do it. Otherwise we 1370 * might run the filter on a deleted event. 1371 */ 1372 if ((kn->kn_status & KN_REPROCESS) == 0) { 1373 if (filter_event(kn, 0)) 1374 KNOTE_ACTIVATE(kn); 1375 } 1376 } 1377 1378 /* 1379 * Disablement does not deactivate a knote here. 1380 */ 1381 if ((kev->flags & EV_DISABLE) && 1382 ((kn->kn_status & KN_DISABLED) == 0)) { 1383 kn->kn_status |= KN_DISABLED; 1384 } 1385 1386 /* 1387 * Re-enablement may have to immediately enqueue an active knote. 1388 */ 1389 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1390 kn->kn_status &= ~KN_DISABLED; 1391 if ((kn->kn_status & KN_ACTIVE) && 1392 ((kn->kn_status & KN_QUEUED) == 0)) { 1393 knote_enqueue(kn); 1394 } 1395 } 1396 1397 /* 1398 * Handle any required reprocessing 1399 */ 1400 knote_release(kn); 1401 /* kn may be invalid now */ 1402 1403 /* 1404 * Loop control. We stop on errors (above), and also stop after 1405 * processing EV_RECEIPT, so the caller can process it. 1406 */ 1407 ++count; 1408 if (kev->flags & EV_RECEIPT) { 1409 error = 0; 1410 goto done; 1411 } 1412 ++kev; 1413 if (count < climit) { 1414 if (fp[count-1]) /* drop unprocessed fp */ 1415 fdrop(fp[count-1]); 1416 goto loop; 1417 } 1418 1419 /* 1420 * Cleanup 1421 */ 1422 done: 1423 if (td != NULL) { /* Owner of the kq_regtd */ 1424 kq->kq_regtd = NULL; 1425 if (__predict_false(kq->kq_state & KQ_REGWAIT)) { 1426 kq->kq_state &= ~KQ_REGWAIT; 1427 wakeup(&kq->kq_regtd); 1428 } 1429 } 1430 lwkt_relpooltoken(kq); 1431 1432 /* 1433 * Drop unprocessed file pointers 1434 */ 1435 *countp = count; 1436 if (count && fp[count-1]) 1437 fdrop(fp[count-1]); 1438 while (count < climit) { 1439 if (fp[count]) 1440 fdrop(fp[count]); 1441 ++count; 1442 } 1443 return (error); 1444 } 1445 1446 /* 1447 * Scan the kqueue, return the number of active events placed in kevp up 1448 * to count. 1449 * 1450 * Continuous mode events may get recycled, do not continue scanning past 1451 * marker unless no events have been collected. 1452 */ 1453 static int 1454 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 1455 struct knote *marker, int closedcounter, int flags) 1456 { 1457 struct knote *kn, local_marker; 1458 thread_t td = curthread; 1459 int total; 1460 1461 total = 0; 1462 local_marker.kn_filter = EVFILT_MARKER; 1463 local_marker.kn_status = KN_PROCESSING; 1464 1465 lwkt_getpooltoken(kq); 1466 1467 /* 1468 * Adjust marker, insert initial marker, or leave the marker alone. 1469 * 1470 * Also setup our local_marker. 1471 */ 1472 switch(flags & KEVENT_SCAN_MASK) { 1473 case KEVENT_SCAN_RELOAD_MARKER: 1474 TAILQ_REMOVE(&kq->kq_knpend, marker, kn_tqe); 1475 /* fall through */ 1476 case KEVENT_SCAN_INSERT_MARKER: 1477 TAILQ_INSERT_TAIL(&kq->kq_knpend, marker, kn_tqe); 1478 break; 1479 } 1480 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 1481 1482 /* 1483 * Collect events. 1484 */ 1485 while (count) { 1486 kn = TAILQ_NEXT(&local_marker, kn_tqe); 1487 if (kn->kn_filter == EVFILT_MARKER) { 1488 /* Marker reached, we are done */ 1489 if (kn == marker) 1490 break; 1491 1492 /* Move local marker past some other threads marker */ 1493 kn = TAILQ_NEXT(kn, kn_tqe); 1494 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1495 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 1496 continue; 1497 } 1498 1499 /* 1500 * We can't skip a knote undergoing processing, otherwise 1501 * we risk not returning it when the user process expects 1502 * it should be returned. Sleep and retry. 1503 */ 1504 if (knote_acquire(kn) == 0) 1505 continue; 1506 1507 /* 1508 * Remove the event for processing. 1509 * 1510 * WARNING! We must leave KN_QUEUED set to prevent the 1511 * event from being KNOTE_ACTIVATE()d while 1512 * the queue state is in limbo, in case we 1513 * block. 1514 */ 1515 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1516 kq->kq_count--; 1517 1518 /* 1519 * Kernel select() and poll() functions cache previous 1520 * operations on the assumption that future operations 1521 * will use similr descriptor sets. This removes any 1522 * stale entries in a way that does not require a descriptor 1523 * lookup and is thus not affected by close() races. 1524 * 1525 * Do not report to *_copyout() 1526 */ 1527 if (flags & KEVENT_AUTO_STALE) { 1528 if ((uint64_t)kn->kn_kevent.udata < 1529 curthread->td_lwp->lwp_kqueue_serial) 1530 { 1531 kn->kn_status |= KN_DELETING | KN_REPROCESS | 1532 KN_DISABLED; 1533 } 1534 } 1535 1536 /* 1537 * If a descriptor is close()d out from under a poll/select, 1538 * we want to report the event but delete the note because 1539 * the note can wind up being 'stuck' on kq_knpend. 1540 */ 1541 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && 1542 checkfdclosed(td, kq->kq_fdp, kn->kn_kevent.ident, 1543 kn->kn_fp, closedcounter)) 1544 { 1545 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1546 } 1547 1548 if (kn->kn_status & KN_DISABLED) { 1549 /* 1550 * If disabled we ensure the event is not queued 1551 * but leave its active bit set. On re-enablement 1552 * the event may be immediately triggered. 1553 */ 1554 kn->kn_status &= ~KN_QUEUED; 1555 } else if ((kn->kn_flags & EV_ONESHOT) == 0 && 1556 (kn->kn_status & KN_DELETING) == 0 && 1557 filter_event(kn, 0) == 0) { 1558 /* 1559 * If not running in one-shot mode and the event 1560 * is no longer present we ensure it is removed 1561 * from the queue and ignore it. 1562 */ 1563 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1564 } else { 1565 /* 1566 * Post the event 1567 */ 1568 if (kn->kn_fop == &user_filtops) 1569 filt_usertouch(kn, kevp, EVENT_PROCESS); 1570 else 1571 *kevp = kn->kn_kevent; 1572 ++kevp; 1573 ++total; 1574 --count; 1575 1576 if (kn->kn_flags & EV_ONESHOT) { 1577 kn->kn_status &= ~KN_QUEUED; 1578 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1579 } else { 1580 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1581 if (kn->kn_flags & EV_CLEAR) { 1582 kn->kn_data = 0; 1583 kn->kn_fflags = 0; 1584 } 1585 if (kn->kn_flags & EV_DISPATCH) { 1586 kn->kn_status |= KN_DISABLED; 1587 } 1588 kn->kn_status &= ~(KN_QUEUED | 1589 KN_ACTIVE); 1590 } else { 1591 TAILQ_INSERT_TAIL(&kq->kq_knpend, 1592 kn, 1593 kn_tqe); 1594 kq->kq_count++; 1595 } 1596 } 1597 } 1598 1599 /* 1600 * Handle any post-processing states 1601 */ 1602 knote_release(kn); 1603 } 1604 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1605 1606 lwkt_relpooltoken(kq); 1607 return (total); 1608 } 1609 1610 /* 1611 * XXX 1612 * This could be expanded to call kqueue_scan, if desired. 1613 * 1614 * MPSAFE 1615 */ 1616 static int 1617 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1618 { 1619 return (ENXIO); 1620 } 1621 1622 /* 1623 * MPSAFE 1624 */ 1625 static int 1626 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1627 { 1628 return (ENXIO); 1629 } 1630 1631 /* 1632 * MPALMOSTSAFE 1633 */ 1634 static int 1635 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 1636 struct ucred *cred, struct sysmsg *msg) 1637 { 1638 struct kqueue *kq; 1639 int error; 1640 1641 kq = (struct kqueue *)fp->f_data; 1642 lwkt_getpooltoken(kq); 1643 switch(com) { 1644 case FIOASYNC: 1645 if (*(int *)data) 1646 kq->kq_state |= KQ_ASYNC; 1647 else 1648 kq->kq_state &= ~KQ_ASYNC; 1649 error = 0; 1650 break; 1651 case FIOSETOWN: 1652 error = fsetown(*(int *)data, &kq->kq_sigio); 1653 break; 1654 default: 1655 error = ENOTTY; 1656 break; 1657 } 1658 lwkt_relpooltoken(kq); 1659 return (error); 1660 } 1661 1662 /* 1663 * MPSAFE 1664 */ 1665 static int 1666 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1667 { 1668 struct kqueue *kq = (struct kqueue *)fp->f_data; 1669 1670 bzero((void *)st, sizeof(*st)); 1671 st->st_size = kq->kq_count; 1672 st->st_blksize = sizeof(struct kevent); 1673 st->st_mode = S_IFIFO; 1674 return (0); 1675 } 1676 1677 /* 1678 * MPSAFE 1679 */ 1680 static int 1681 kqueue_close(struct file *fp) 1682 { 1683 struct kqueue *kq = (struct kqueue *)fp->f_data; 1684 1685 kqueue_terminate(kq); 1686 1687 fp->f_data = NULL; 1688 funsetown(&kq->kq_sigio); 1689 1690 kfree(kq, M_KQUEUE); 1691 return (0); 1692 } 1693 1694 static void 1695 kqueue_wakeup(struct kqueue *kq) 1696 { 1697 if (kq->kq_sleep_cnt) { 1698 u_int sleep_cnt = kq->kq_sleep_cnt; 1699 1700 kq->kq_sleep_cnt = 0; 1701 if (sleep_cnt == 1) 1702 wakeup_one(kq); 1703 else 1704 wakeup(kq); 1705 } 1706 KNOTE(&kq->kq_kqinfo.ki_note, 0); 1707 } 1708 1709 /* 1710 * Calls filterops f_attach function, acquiring mplock if filter is not 1711 * marked as FILTEROP_MPSAFE. 1712 * 1713 * Caller must be holding the related kq token 1714 */ 1715 static int 1716 filter_attach(struct knote *kn) 1717 { 1718 int ret; 1719 1720 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1721 ret = kn->kn_fop->f_attach(kn); 1722 } else { 1723 get_mplock(); 1724 ret = kn->kn_fop->f_attach(kn); 1725 rel_mplock(); 1726 } 1727 return (ret); 1728 } 1729 1730 /* 1731 * Detach the knote and drop it, destroying the knote. 1732 * 1733 * Calls filterops f_detach function, acquiring mplock if filter is not 1734 * marked as FILTEROP_MPSAFE. 1735 * 1736 * Caller must be holding the related kq token 1737 */ 1738 static void 1739 knote_detach_and_drop(struct knote *kn) 1740 { 1741 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1742 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1743 kn->kn_fop->f_detach(kn); 1744 } else { 1745 get_mplock(); 1746 kn->kn_fop->f_detach(kn); 1747 rel_mplock(); 1748 } 1749 knote_drop(kn); 1750 } 1751 1752 /* 1753 * Calls filterops f_event function, acquiring mplock if filter is not 1754 * marked as FILTEROP_MPSAFE. 1755 * 1756 * If the knote is in the middle of being created or deleted we cannot 1757 * safely call the filter op. 1758 * 1759 * Caller must be holding the related kq token 1760 */ 1761 static int 1762 filter_event(struct knote *kn, long hint) 1763 { 1764 int ret; 1765 1766 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1767 ret = kn->kn_fop->f_event(kn, hint); 1768 } else { 1769 get_mplock(); 1770 ret = kn->kn_fop->f_event(kn, hint); 1771 rel_mplock(); 1772 } 1773 return (ret); 1774 } 1775 1776 /* 1777 * Walk down a list of knotes, activating them if their event has triggered. 1778 * 1779 * If we encounter any knotes which are undergoing processing we just mark 1780 * them for reprocessing and do not try to [re]activate the knote. However, 1781 * if a hint is being passed we have to wait and that makes things a bit 1782 * sticky. 1783 */ 1784 void 1785 knote(struct klist *list, long hint) 1786 { 1787 struct kqueue *kq; 1788 struct knote *kn; 1789 struct knote *kntmp; 1790 1791 lwkt_getpooltoken(list); 1792 restart: 1793 SLIST_FOREACH(kn, list, kn_next) { 1794 kq = kn->kn_kq; 1795 lwkt_getpooltoken(kq); 1796 1797 /* temporary verification hack */ 1798 SLIST_FOREACH(kntmp, list, kn_next) { 1799 if (kn == kntmp) 1800 break; 1801 } 1802 if (kn != kntmp || kn->kn_kq != kq) { 1803 lwkt_relpooltoken(kq); 1804 goto restart; 1805 } 1806 1807 if (kn->kn_status & KN_PROCESSING) { 1808 /* 1809 * Someone else is processing the knote, ask the 1810 * other thread to reprocess it and don't mess 1811 * with it otherwise. 1812 */ 1813 if (hint == 0) { 1814 kn->kn_status |= KN_REPROCESS; 1815 lwkt_relpooltoken(kq); 1816 continue; 1817 } 1818 1819 /* 1820 * If the hint is non-zero we have to wait or risk 1821 * losing the state the caller is trying to update. 1822 * 1823 * XXX This is a real problem, certain process 1824 * and signal filters will bump kn_data for 1825 * already-processed notes more than once if 1826 * we restart the list scan. FIXME. 1827 */ 1828 kn->kn_status |= KN_WAITING | KN_REPROCESS; 1829 tsleep(kn, 0, "knotec", hz); 1830 lwkt_relpooltoken(kq); 1831 goto restart; 1832 } 1833 1834 /* 1835 * Become the reprocessing master ourselves. 1836 * 1837 * If hint is non-zero running the event is mandatory 1838 * when not deleting so do it whether reprocessing is 1839 * set or not. 1840 */ 1841 kn->kn_status |= KN_PROCESSING; 1842 if ((kn->kn_status & KN_DELETING) == 0) { 1843 if (filter_event(kn, hint)) 1844 KNOTE_ACTIVATE(kn); 1845 } 1846 if (knote_release(kn)) { 1847 lwkt_relpooltoken(kq); 1848 goto restart; 1849 } 1850 lwkt_relpooltoken(kq); 1851 } 1852 lwkt_relpooltoken(list); 1853 } 1854 1855 /* 1856 * Insert knote at head of klist. 1857 * 1858 * This function may only be called via a filter function and thus 1859 * kq_token should already be held and marked for processing. 1860 */ 1861 void 1862 knote_insert(struct klist *klist, struct knote *kn) 1863 { 1864 lwkt_getpooltoken(klist); 1865 KKASSERT(kn->kn_status & KN_PROCESSING); 1866 SLIST_INSERT_HEAD(klist, kn, kn_next); 1867 lwkt_relpooltoken(klist); 1868 } 1869 1870 /* 1871 * Remove knote from a klist 1872 * 1873 * This function may only be called via a filter function and thus 1874 * kq_token should already be held and marked for processing. 1875 */ 1876 void 1877 knote_remove(struct klist *klist, struct knote *kn) 1878 { 1879 lwkt_getpooltoken(klist); 1880 KKASSERT(kn->kn_status & KN_PROCESSING); 1881 SLIST_REMOVE(klist, kn, knote, kn_next); 1882 lwkt_relpooltoken(klist); 1883 } 1884 1885 void 1886 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst, 1887 struct filterops *ops, void *hook) 1888 { 1889 struct kqueue *kq; 1890 struct knote *kn; 1891 1892 lwkt_getpooltoken(&src->ki_note); 1893 lwkt_getpooltoken(&dst->ki_note); 1894 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) { 1895 kq = kn->kn_kq; 1896 lwkt_getpooltoken(kq); 1897 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) { 1898 lwkt_relpooltoken(kq); 1899 continue; 1900 } 1901 if (knote_acquire(kn)) { 1902 knote_remove(&src->ki_note, kn); 1903 kn->kn_fop = ops; 1904 kn->kn_hook = hook; 1905 knote_insert(&dst->ki_note, kn); 1906 knote_release(kn); 1907 /* kn may be invalid now */ 1908 } 1909 lwkt_relpooltoken(kq); 1910 } 1911 lwkt_relpooltoken(&dst->ki_note); 1912 lwkt_relpooltoken(&src->ki_note); 1913 } 1914 1915 /* 1916 * Remove all knotes referencing a specified fd 1917 */ 1918 void 1919 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1920 { 1921 struct kqueue *kq; 1922 struct knote *kn; 1923 struct knote *kntmp; 1924 1925 lwkt_getpooltoken(&fp->f_klist); 1926 restart: 1927 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1928 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1929 kq = kn->kn_kq; 1930 lwkt_getpooltoken(kq); 1931 1932 /* temporary verification hack */ 1933 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) { 1934 if (kn == kntmp) 1935 break; 1936 } 1937 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp || 1938 kn->kn_id != fd || kn->kn_kq != kq) { 1939 lwkt_relpooltoken(kq); 1940 goto restart; 1941 } 1942 if (knote_acquire(kn)) 1943 knote_detach_and_drop(kn); 1944 lwkt_relpooltoken(kq); 1945 goto restart; 1946 } 1947 } 1948 lwkt_relpooltoken(&fp->f_klist); 1949 } 1950 1951 /* 1952 * Low level attach function. 1953 * 1954 * The knote should already be marked for processing. 1955 * Caller must hold the related kq token. 1956 */ 1957 static void 1958 knote_attach(struct knote *kn) 1959 { 1960 struct klist *list; 1961 struct kqueue *kq = kn->kn_kq; 1962 1963 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1964 KKASSERT(kn->kn_fp); 1965 list = &kn->kn_fp->f_klist; 1966 } else { 1967 if (kq->kq_knhashmask == 0) 1968 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1969 &kq->kq_knhashmask); 1970 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1971 } 1972 lwkt_getpooltoken(list); 1973 SLIST_INSERT_HEAD(list, kn, kn_link); 1974 lwkt_relpooltoken(list); 1975 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 1976 } 1977 1978 /* 1979 * Low level drop function. 1980 * 1981 * The knote should already be marked for processing. 1982 * Caller must hold the related kq token. 1983 */ 1984 static void 1985 knote_drop(struct knote *kn) 1986 { 1987 struct kqueue *kq; 1988 struct klist *list; 1989 1990 kq = kn->kn_kq; 1991 1992 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1993 list = &kn->kn_fp->f_klist; 1994 else 1995 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1996 1997 lwkt_getpooltoken(list); 1998 SLIST_REMOVE(list, kn, knote, kn_link); 1999 lwkt_relpooltoken(list); 2000 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 2001 if (kn->kn_status & KN_QUEUED) 2002 knote_dequeue(kn); 2003 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 2004 fdrop(kn->kn_fp); 2005 kn->kn_fp = NULL; 2006 } 2007 knote_free(kn); 2008 } 2009 2010 /* 2011 * Low level enqueue function. 2012 * 2013 * The knote should already be marked for processing. 2014 * Caller must be holding the kq token 2015 */ 2016 static void 2017 knote_enqueue(struct knote *kn) 2018 { 2019 struct kqueue *kq = kn->kn_kq; 2020 2021 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2022 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 2023 kn->kn_status |= KN_QUEUED; 2024 ++kq->kq_count; 2025 2026 /* 2027 * Send SIGIO on request (typically set up as a mailbox signal) 2028 */ 2029 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 2030 pgsigio(kq->kq_sigio, SIGIO, 0); 2031 2032 kqueue_wakeup(kq); 2033 } 2034 2035 /* 2036 * Low level dequeue function. 2037 * 2038 * The knote should already be marked for processing. 2039 * Caller must be holding the kq token 2040 */ 2041 static void 2042 knote_dequeue(struct knote *kn) 2043 { 2044 struct kqueue *kq = kn->kn_kq; 2045 2046 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2047 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 2048 kn->kn_status &= ~KN_QUEUED; 2049 kq->kq_count--; 2050 } 2051 2052 static struct knote * 2053 knote_alloc(void) 2054 { 2055 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK); 2056 } 2057 2058 static void 2059 knote_free(struct knote *kn) 2060 { 2061 struct knote_cache_list *cache_list; 2062 2063 cache_list = &knote_cache_lists[mycpuid]; 2064 if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) { 2065 crit_enter(); 2066 SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link); 2067 cache_list->knote_cache_cnt++; 2068 crit_exit(); 2069 return; 2070 } 2071 kfree(kn, M_KQUEUE); 2072 } 2073 2074 struct sleepinfo { 2075 void *ident; 2076 int timedout; 2077 }; 2078 2079 static void 2080 precise_sleep_intr(systimer_t info, int in_ipi, struct intrframe *frame) 2081 { 2082 struct sleepinfo *si; 2083 2084 si = info->data; 2085 si->timedout = 1; 2086 wakeup(si->ident); 2087 } 2088 2089 static int 2090 precise_sleep(void *ident, int flags, const char *wmesg, int us) 2091 { 2092 struct systimer info; 2093 struct sleepinfo si = { 2094 .ident = ident, 2095 .timedout = 0, 2096 }; 2097 int r; 2098 2099 tsleep_interlock(ident, flags); 2100 systimer_init_oneshot(&info, precise_sleep_intr, &si, us); 2101 r = tsleep(ident, flags | PINTERLOCKED, wmesg, 0); 2102 systimer_del(&info); 2103 if (si.timedout) 2104 r = EWOULDBLOCK; 2105 2106 return r; 2107 } 2108