1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/malloc.h> 34 #include <sys/unistd.h> 35 #include <sys/file.h> 36 #include <sys/lock.h> 37 #include <sys/fcntl.h> 38 #include <sys/queue.h> 39 #include <sys/event.h> 40 #include <sys/eventvar.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/stat.h> 45 #include <sys/sysctl.h> 46 #include <sys/sysproto.h> 47 #include <sys/thread.h> 48 #include <sys/uio.h> 49 #include <sys/signalvar.h> 50 #include <sys/filio.h> 51 #include <sys/ktr.h> 52 53 #include <sys/thread2.h> 54 #include <sys/file2.h> 55 #include <sys/mplock2.h> 56 57 #define EVENT_REGISTER 1 58 #define EVENT_PROCESS 2 59 60 /* 61 * Global token for kqueue subsystem 62 */ 63 #if 0 64 struct lwkt_token kq_token = LWKT_TOKEN_INITIALIZER(kq_token); 65 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions, 66 CTLFLAG_RW, &kq_token.t_collisions, 0, 67 "Collision counter of kq_token"); 68 #endif 69 70 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 71 72 struct kevent_copyin_args { 73 struct kevent_args *ka; 74 int pchanges; 75 }; 76 77 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 78 struct knote *marker); 79 static int kqueue_read(struct file *fp, struct uio *uio, 80 struct ucred *cred, int flags); 81 static int kqueue_write(struct file *fp, struct uio *uio, 82 struct ucred *cred, int flags); 83 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 84 struct ucred *cred, struct sysmsg *msg); 85 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 86 static int kqueue_stat(struct file *fp, struct stat *st, 87 struct ucred *cred); 88 static int kqueue_close(struct file *fp); 89 static void kqueue_wakeup(struct kqueue *kq); 90 static int filter_attach(struct knote *kn); 91 static int filter_event(struct knote *kn, long hint); 92 93 /* 94 * MPSAFE 95 */ 96 static struct fileops kqueueops = { 97 .fo_read = kqueue_read, 98 .fo_write = kqueue_write, 99 .fo_ioctl = kqueue_ioctl, 100 .fo_kqfilter = kqueue_kqfilter, 101 .fo_stat = kqueue_stat, 102 .fo_close = kqueue_close, 103 .fo_shutdown = nofo_shutdown 104 }; 105 106 static void knote_attach(struct knote *kn); 107 static void knote_drop(struct knote *kn); 108 static void knote_detach_and_drop(struct knote *kn); 109 static void knote_enqueue(struct knote *kn); 110 static void knote_dequeue(struct knote *kn); 111 static struct knote *knote_alloc(void); 112 static void knote_free(struct knote *kn); 113 114 static void filt_kqdetach(struct knote *kn); 115 static int filt_kqueue(struct knote *kn, long hint); 116 static int filt_procattach(struct knote *kn); 117 static void filt_procdetach(struct knote *kn); 118 static int filt_proc(struct knote *kn, long hint); 119 static int filt_fileattach(struct knote *kn); 120 static void filt_timerexpire(void *knx); 121 static int filt_timerattach(struct knote *kn); 122 static void filt_timerdetach(struct knote *kn); 123 static int filt_timer(struct knote *kn, long hint); 124 static int filt_userattach(struct knote *kn); 125 static void filt_userdetach(struct knote *kn); 126 static int filt_user(struct knote *kn, long hint); 127 static void filt_usertouch(struct knote *kn, struct kevent *kev, 128 u_long type); 129 130 static struct filterops file_filtops = 131 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL }; 132 static struct filterops kqread_filtops = 133 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue }; 134 static struct filterops proc_filtops = 135 { 0, filt_procattach, filt_procdetach, filt_proc }; 136 static struct filterops timer_filtops = 137 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer }; 138 static struct filterops user_filtops = 139 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user }; 140 141 static int kq_ncallouts = 0; 142 static int kq_calloutmax = (4 * 1024); 143 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 144 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 145 static int kq_checkloop = 1000000; 146 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW, 147 &kq_checkloop, 0, "Maximum number of loops for kqueue scan"); 148 static int kq_wakeup_one = 1; 149 SYSCTL_INT(_kern, OID_AUTO, kq_wakeup_one, CTLFLAG_RW, 150 &kq_wakeup_one, 0, "Wakeup only one kqueue scanner"); 151 152 #define KNOTE_ACTIVATE(kn) do { \ 153 kn->kn_status |= KN_ACTIVE; \ 154 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 155 knote_enqueue(kn); \ 156 } while(0) 157 158 #define KN_HASHSIZE 64 /* XXX should be tunable */ 159 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 160 161 extern struct filterops aio_filtops; 162 extern struct filterops sig_filtops; 163 164 /* 165 * Table for for all system-defined filters. 166 */ 167 static struct filterops *sysfilt_ops[] = { 168 &file_filtops, /* EVFILT_READ */ 169 &file_filtops, /* EVFILT_WRITE */ 170 &aio_filtops, /* EVFILT_AIO */ 171 &file_filtops, /* EVFILT_VNODE */ 172 &proc_filtops, /* EVFILT_PROC */ 173 &sig_filtops, /* EVFILT_SIGNAL */ 174 &timer_filtops, /* EVFILT_TIMER */ 175 &file_filtops, /* EVFILT_EXCEPT */ 176 &user_filtops, /* EVFILT_USER */ 177 }; 178 179 static int 180 filt_fileattach(struct knote *kn) 181 { 182 return (fo_kqfilter(kn->kn_fp, kn)); 183 } 184 185 /* 186 * MPSAFE 187 */ 188 static int 189 kqueue_kqfilter(struct file *fp, struct knote *kn) 190 { 191 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 192 193 if (kn->kn_filter != EVFILT_READ) 194 return (EOPNOTSUPP); 195 196 kn->kn_fop = &kqread_filtops; 197 knote_insert(&kq->kq_kqinfo.ki_note, kn); 198 return (0); 199 } 200 201 static void 202 filt_kqdetach(struct knote *kn) 203 { 204 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 205 206 knote_remove(&kq->kq_kqinfo.ki_note, kn); 207 } 208 209 /*ARGSUSED*/ 210 static int 211 filt_kqueue(struct knote *kn, long hint) 212 { 213 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 214 215 kn->kn_data = kq->kq_count; 216 return (kn->kn_data > 0); 217 } 218 219 static int 220 filt_procattach(struct knote *kn) 221 { 222 struct proc *p; 223 int immediate; 224 225 immediate = 0; 226 p = pfind(kn->kn_id); 227 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 228 p = zpfind(kn->kn_id); 229 immediate = 1; 230 } 231 if (p == NULL) { 232 return (ESRCH); 233 } 234 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 235 if (p) 236 PRELE(p); 237 return (EACCES); 238 } 239 240 lwkt_gettoken(&p->p_token); 241 kn->kn_ptr.p_proc = p; 242 kn->kn_flags |= EV_CLEAR; /* automatically set */ 243 244 /* 245 * internal flag indicating registration done by kernel 246 */ 247 if (kn->kn_flags & EV_FLAG1) { 248 kn->kn_data = kn->kn_sdata; /* ppid */ 249 kn->kn_fflags = NOTE_CHILD; 250 kn->kn_flags &= ~EV_FLAG1; 251 } 252 253 knote_insert(&p->p_klist, kn); 254 255 /* 256 * Immediately activate any exit notes if the target process is a 257 * zombie. This is necessary to handle the case where the target 258 * process, e.g. a child, dies before the kevent is negistered. 259 */ 260 if (immediate && filt_proc(kn, NOTE_EXIT)) 261 KNOTE_ACTIVATE(kn); 262 lwkt_reltoken(&p->p_token); 263 PRELE(p); 264 265 return (0); 266 } 267 268 /* 269 * The knote may be attached to a different process, which may exit, 270 * leaving nothing for the knote to be attached to. So when the process 271 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 272 * it will be deleted when read out. However, as part of the knote deletion, 273 * this routine is called, so a check is needed to avoid actually performing 274 * a detach, because the original process does not exist any more. 275 */ 276 static void 277 filt_procdetach(struct knote *kn) 278 { 279 struct proc *p; 280 281 if (kn->kn_status & KN_DETACHED) 282 return; 283 p = kn->kn_ptr.p_proc; 284 knote_remove(&p->p_klist, kn); 285 } 286 287 static int 288 filt_proc(struct knote *kn, long hint) 289 { 290 u_int event; 291 292 /* 293 * mask off extra data 294 */ 295 event = (u_int)hint & NOTE_PCTRLMASK; 296 297 /* 298 * if the user is interested in this event, record it. 299 */ 300 if (kn->kn_sfflags & event) 301 kn->kn_fflags |= event; 302 303 /* 304 * Process is gone, so flag the event as finished. Detach the 305 * knote from the process now because the process will be poof, 306 * gone later on. 307 */ 308 if (event == NOTE_EXIT) { 309 struct proc *p = kn->kn_ptr.p_proc; 310 if ((kn->kn_status & KN_DETACHED) == 0) { 311 PHOLD(p); 312 knote_remove(&p->p_klist, kn); 313 kn->kn_status |= KN_DETACHED; 314 kn->kn_data = p->p_xstat; 315 kn->kn_ptr.p_proc = NULL; 316 PRELE(p); 317 } 318 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 319 return (1); 320 } 321 322 /* 323 * process forked, and user wants to track the new process, 324 * so attach a new knote to it, and immediately report an 325 * event with the parent's pid. 326 */ 327 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 328 struct kevent kev; 329 int error; 330 331 /* 332 * register knote with new process. 333 */ 334 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 335 kev.filter = kn->kn_filter; 336 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 337 kev.fflags = kn->kn_sfflags; 338 kev.data = kn->kn_id; /* parent */ 339 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 340 error = kqueue_register(kn->kn_kq, &kev); 341 if (error) 342 kn->kn_fflags |= NOTE_TRACKERR; 343 } 344 345 return (kn->kn_fflags != 0); 346 } 347 348 /* 349 * The callout interlocks with callout_terminate() but can still 350 * race a deletion so if KN_DELETING is set we just don't touch 351 * the knote. 352 */ 353 static void 354 filt_timerexpire(void *knx) 355 { 356 struct lwkt_token *tok; 357 struct knote *kn = knx; 358 struct callout *calloutp; 359 struct timeval tv; 360 int tticks; 361 362 tok = lwkt_token_pool_lookup(kn->kn_kq); 363 lwkt_gettoken(tok); 364 if ((kn->kn_status & KN_DELETING) == 0) { 365 kn->kn_data++; 366 KNOTE_ACTIVATE(kn); 367 368 if ((kn->kn_flags & EV_ONESHOT) == 0) { 369 tv.tv_sec = kn->kn_sdata / 1000; 370 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 371 tticks = tvtohz_high(&tv); 372 calloutp = (struct callout *)kn->kn_hook; 373 callout_reset(calloutp, tticks, filt_timerexpire, kn); 374 } 375 } 376 lwkt_reltoken(tok); 377 } 378 379 /* 380 * data contains amount of time to sleep, in milliseconds 381 */ 382 static int 383 filt_timerattach(struct knote *kn) 384 { 385 struct callout *calloutp; 386 struct timeval tv; 387 int tticks; 388 int prev_ncallouts; 389 390 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1); 391 if (prev_ncallouts >= kq_calloutmax) { 392 atomic_subtract_int(&kq_ncallouts, 1); 393 kn->kn_hook = NULL; 394 return (ENOMEM); 395 } 396 397 tv.tv_sec = kn->kn_sdata / 1000; 398 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 399 tticks = tvtohz_high(&tv); 400 401 kn->kn_flags |= EV_CLEAR; /* automatically set */ 402 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 403 callout_init_mp(calloutp); 404 kn->kn_hook = (caddr_t)calloutp; 405 callout_reset(calloutp, tticks, filt_timerexpire, kn); 406 407 return (0); 408 } 409 410 /* 411 * This function is called with the knote flagged locked but it is 412 * still possible to race a callout event due to the callback blocking. 413 * We must call callout_terminate() instead of callout_stop() to deal 414 * with the race. 415 */ 416 static void 417 filt_timerdetach(struct knote *kn) 418 { 419 struct callout *calloutp; 420 421 calloutp = (struct callout *)kn->kn_hook; 422 callout_terminate(calloutp); 423 kfree(calloutp, M_KQUEUE); 424 atomic_subtract_int(&kq_ncallouts, 1); 425 } 426 427 static int 428 filt_timer(struct knote *kn, long hint) 429 { 430 431 return (kn->kn_data != 0); 432 } 433 434 /* 435 * EVFILT_USER 436 */ 437 static int 438 filt_userattach(struct knote *kn) 439 { 440 kn->kn_hook = NULL; 441 if (kn->kn_fflags & NOTE_TRIGGER) 442 kn->kn_ptr.hookid = 1; 443 else 444 kn->kn_ptr.hookid = 0; 445 return 0; 446 } 447 448 static void 449 filt_userdetach(struct knote *kn) 450 { 451 /* nothing to do */ 452 } 453 454 static int 455 filt_user(struct knote *kn, long hint) 456 { 457 return (kn->kn_ptr.hookid); 458 } 459 460 static void 461 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 462 { 463 u_int ffctrl; 464 465 switch (type) { 466 case EVENT_REGISTER: 467 if (kev->fflags & NOTE_TRIGGER) 468 kn->kn_ptr.hookid = 1; 469 470 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 471 kev->fflags &= NOTE_FFLAGSMASK; 472 switch (ffctrl) { 473 case NOTE_FFNOP: 474 break; 475 476 case NOTE_FFAND: 477 kn->kn_sfflags &= kev->fflags; 478 break; 479 480 case NOTE_FFOR: 481 kn->kn_sfflags |= kev->fflags; 482 break; 483 484 case NOTE_FFCOPY: 485 kn->kn_sfflags = kev->fflags; 486 break; 487 488 default: 489 /* XXX Return error? */ 490 break; 491 } 492 kn->kn_sdata = kev->data; 493 494 /* 495 * This is not the correct use of EV_CLEAR in an event 496 * modification, it should have been passed as a NOTE instead. 497 * But we need to maintain compatibility with Apple & FreeBSD. 498 * 499 * Note however that EV_CLEAR can still be used when doing 500 * the initial registration of the event and works as expected 501 * (clears the event on reception). 502 */ 503 if (kev->flags & EV_CLEAR) { 504 kn->kn_ptr.hookid = 0; 505 kn->kn_data = 0; 506 kn->kn_fflags = 0; 507 } 508 break; 509 510 case EVENT_PROCESS: 511 *kev = kn->kn_kevent; 512 kev->fflags = kn->kn_sfflags; 513 kev->data = kn->kn_sdata; 514 if (kn->kn_flags & EV_CLEAR) { 515 kn->kn_ptr.hookid = 0; 516 /* kn_data, kn_fflags handled by parent */ 517 } 518 break; 519 520 default: 521 panic("filt_usertouch() - invalid type (%ld)", type); 522 break; 523 } 524 } 525 526 /* 527 * Acquire a knote, return non-zero on success, 0 on failure. 528 * 529 * If we cannot acquire the knote we sleep and return 0. The knote 530 * may be stale on return in this case and the caller must restart 531 * whatever loop they are in. 532 * 533 * Related kq token must be held. 534 */ 535 static __inline int 536 knote_acquire(struct knote *kn) 537 { 538 if (kn->kn_status & KN_PROCESSING) { 539 kn->kn_status |= KN_WAITING | KN_REPROCESS; 540 tsleep(kn, 0, "kqepts", hz); 541 /* knote may be stale now */ 542 return(0); 543 } 544 kn->kn_status |= KN_PROCESSING; 545 return(1); 546 } 547 548 /* 549 * Release an acquired knote, clearing KN_PROCESSING and handling any 550 * KN_REPROCESS events. 551 * 552 * Caller must be holding the related kq token 553 * 554 * Non-zero is returned if the knote is destroyed or detached. 555 */ 556 static __inline int 557 knote_release(struct knote *kn) 558 { 559 while (kn->kn_status & KN_REPROCESS) { 560 kn->kn_status &= ~KN_REPROCESS; 561 if (kn->kn_status & KN_WAITING) { 562 kn->kn_status &= ~KN_WAITING; 563 wakeup(kn); 564 } 565 if (kn->kn_status & KN_DELETING) { 566 knote_detach_and_drop(kn); 567 return(1); 568 /* NOT REACHED */ 569 } 570 if (filter_event(kn, 0)) 571 KNOTE_ACTIVATE(kn); 572 } 573 kn->kn_status &= ~KN_PROCESSING; 574 if (kn->kn_status & KN_DETACHED) 575 return(1); 576 else 577 return(0); 578 } 579 580 /* 581 * Initialize a kqueue. 582 * 583 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 584 * 585 * MPSAFE 586 */ 587 void 588 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 589 { 590 TAILQ_INIT(&kq->kq_knpend); 591 TAILQ_INIT(&kq->kq_knlist); 592 kq->kq_count = 0; 593 kq->kq_fdp = fdp; 594 SLIST_INIT(&kq->kq_kqinfo.ki_note); 595 } 596 597 /* 598 * Terminate a kqueue. Freeing the actual kq itself is left up to the 599 * caller (it might be embedded in a lwp so we don't do it here). 600 * 601 * The kq's knlist must be completely eradicated so block on any 602 * processing races. 603 */ 604 void 605 kqueue_terminate(struct kqueue *kq) 606 { 607 struct lwkt_token *tok; 608 struct knote *kn; 609 610 tok = lwkt_token_pool_lookup(kq); 611 lwkt_gettoken(tok); 612 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 613 if (knote_acquire(kn)) 614 knote_detach_and_drop(kn); 615 } 616 lwkt_reltoken(tok); 617 618 if (kq->kq_knhash) { 619 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask); 620 kq->kq_knhash = NULL; 621 kq->kq_knhashmask = 0; 622 } 623 } 624 625 /* 626 * MPSAFE 627 */ 628 int 629 sys_kqueue(struct kqueue_args *uap) 630 { 631 struct thread *td = curthread; 632 struct kqueue *kq; 633 struct file *fp; 634 int fd, error; 635 636 error = falloc(td->td_lwp, &fp, &fd); 637 if (error) 638 return (error); 639 fp->f_flag = FREAD | FWRITE; 640 fp->f_type = DTYPE_KQUEUE; 641 fp->f_ops = &kqueueops; 642 643 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 644 kqueue_init(kq, td->td_proc->p_fd); 645 fp->f_data = kq; 646 647 fsetfd(kq->kq_fdp, fp, fd); 648 uap->sysmsg_result = fd; 649 fdrop(fp); 650 return (error); 651 } 652 653 /* 654 * Copy 'count' items into the destination list pointed to by uap->eventlist. 655 */ 656 static int 657 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 658 { 659 struct kevent_copyin_args *kap; 660 int error; 661 662 kap = (struct kevent_copyin_args *)arg; 663 664 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp)); 665 if (error == 0) { 666 kap->ka->eventlist += count; 667 *res += count; 668 } else { 669 *res = -1; 670 } 671 672 return (error); 673 } 674 675 /* 676 * Copy at most 'max' items from the list pointed to by kap->changelist, 677 * return number of items in 'events'. 678 */ 679 static int 680 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 681 { 682 struct kevent_copyin_args *kap; 683 int error, count; 684 685 kap = (struct kevent_copyin_args *)arg; 686 687 count = min(kap->ka->nchanges - kap->pchanges, max); 688 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp); 689 if (error == 0) { 690 kap->ka->changelist += count; 691 kap->pchanges += count; 692 *events = count; 693 } 694 695 return (error); 696 } 697 698 /* 699 * MPSAFE 700 */ 701 int 702 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 703 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 704 struct timespec *tsp_in) 705 { 706 struct kevent *kevp; 707 struct timespec *tsp, ats; 708 int i, n, total, error, nerrors = 0; 709 int lres; 710 int limit = kq_checkloop; 711 struct kevent kev[KQ_NEVENTS]; 712 struct knote marker; 713 struct lwkt_token *tok; 714 715 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec) 716 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC); 717 718 tsp = tsp_in; 719 *res = 0; 720 721 for (;;) { 722 n = 0; 723 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 724 if (error) 725 return error; 726 if (n == 0) 727 break; 728 for (i = 0; i < n; i++) { 729 kevp = &kev[i]; 730 kevp->flags &= ~EV_SYSFLAGS; 731 error = kqueue_register(kq, kevp); 732 733 /* 734 * If a registration returns an error we 735 * immediately post the error. The kevent() 736 * call itself will fail with the error if 737 * no space is available for posting. 738 * 739 * Such errors normally bypass the timeout/blocking 740 * code. However, if the copyoutfn function refuses 741 * to post the error (see sys_poll()), then we 742 * ignore it too. 743 */ 744 if (error || (kevp->flags & EV_RECEIPT)) { 745 kevp->flags = EV_ERROR; 746 kevp->data = error; 747 lres = *res; 748 kevent_copyoutfn(uap, kevp, 1, res); 749 if (*res < 0) { 750 return error; 751 } else if (lres != *res) { 752 nevents--; 753 nerrors++; 754 } 755 } 756 } 757 } 758 if (nerrors) 759 return 0; 760 761 /* 762 * Acquire/wait for events - setup timeout 763 */ 764 if (tsp != NULL) { 765 if (tsp->tv_sec || tsp->tv_nsec) { 766 getnanouptime(&ats); 767 timespecadd(tsp, &ats); /* tsp = target time */ 768 } 769 } 770 771 /* 772 * Loop as required. 773 * 774 * Collect as many events as we can. Sleeping on successive 775 * loops is disabled if copyoutfn has incremented (*res). 776 * 777 * The loop stops if an error occurs, all events have been 778 * scanned (the marker has been reached), or fewer than the 779 * maximum number of events is found. 780 * 781 * The copyoutfn function does not have to increment (*res) in 782 * order for the loop to continue. 783 * 784 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 785 */ 786 total = 0; 787 error = 0; 788 marker.kn_filter = EVFILT_MARKER; 789 marker.kn_status = KN_PROCESSING; 790 tok = lwkt_token_pool_lookup(kq); 791 lwkt_gettoken(tok); 792 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 793 lwkt_reltoken(tok); 794 while ((n = nevents - total) > 0) { 795 if (n > KQ_NEVENTS) 796 n = KQ_NEVENTS; 797 798 /* 799 * If no events are pending sleep until timeout (if any) 800 * or an event occurs. 801 * 802 * After the sleep completes the marker is moved to the 803 * end of the list, making any received events available 804 * to our scan. 805 */ 806 if (kq->kq_count == 0 && *res == 0) { 807 int timeout; 808 809 if (tsp == NULL) { 810 timeout = 0; 811 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 812 error = EWOULDBLOCK; 813 break; 814 } else { 815 struct timespec atx = *tsp; 816 817 getnanouptime(&ats); 818 timespecsub(&atx, &ats); 819 if (atx.tv_sec < 0) { 820 error = EWOULDBLOCK; 821 break; 822 } else { 823 timeout = atx.tv_sec > 24 * 60 * 60 ? 824 24 * 60 * 60 * hz : 825 tstohz_high(&atx); 826 } 827 } 828 829 lwkt_gettoken(tok); 830 if (kq->kq_count == 0) { 831 kq->kq_state |= KQ_SLEEP; 832 error = tsleep(kq, PCATCH, "kqread", timeout); 833 834 /* don't restart after signals... */ 835 if (error == ERESTART) 836 error = EINTR; 837 if (error) { 838 lwkt_reltoken(tok); 839 break; 840 } 841 842 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 843 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, 844 kn_tqe); 845 } 846 lwkt_reltoken(tok); 847 } 848 849 /* 850 * Process all received events 851 * Account for all non-spurious events in our total 852 */ 853 i = kqueue_scan(kq, kev, n, &marker); 854 if (i) { 855 lres = *res; 856 error = kevent_copyoutfn(uap, kev, i, res); 857 total += *res - lres; 858 if (error) 859 break; 860 } 861 if (limit && --limit == 0) 862 panic("kqueue: checkloop failed i=%d", i); 863 864 /* 865 * Normally when fewer events are returned than requested 866 * we can stop. However, if only spurious events were 867 * collected the copyout will not bump (*res) and we have 868 * to continue. 869 */ 870 if (i < n && *res) 871 break; 872 873 /* 874 * Deal with an edge case where spurious events can cause 875 * a loop to occur without moving the marker. This can 876 * prevent kqueue_scan() from picking up new events which 877 * race us. We must be sure to move the marker for this 878 * case. 879 * 880 * NOTE: We do not want to move the marker if events 881 * were scanned because normal kqueue operations 882 * may reactivate events. Moving the marker in 883 * that case could result in duplicates for the 884 * same event. 885 */ 886 if (i == 0) { 887 lwkt_gettoken(tok); 888 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 889 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 890 lwkt_reltoken(tok); 891 } 892 } 893 lwkt_gettoken(tok); 894 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 895 lwkt_reltoken(tok); 896 897 /* Timeouts do not return EWOULDBLOCK. */ 898 if (error == EWOULDBLOCK) 899 error = 0; 900 return error; 901 } 902 903 /* 904 * MPALMOSTSAFE 905 */ 906 int 907 sys_kevent(struct kevent_args *uap) 908 { 909 struct thread *td = curthread; 910 struct proc *p = td->td_proc; 911 struct timespec ts, *tsp; 912 struct kqueue *kq; 913 struct file *fp = NULL; 914 struct kevent_copyin_args *kap, ka; 915 int error; 916 917 if (uap->timeout) { 918 error = copyin(uap->timeout, &ts, sizeof(ts)); 919 if (error) 920 return (error); 921 tsp = &ts; 922 } else { 923 tsp = NULL; 924 } 925 fp = holdfp(p->p_fd, uap->fd, -1); 926 if (fp == NULL) 927 return (EBADF); 928 if (fp->f_type != DTYPE_KQUEUE) { 929 fdrop(fp); 930 return (EBADF); 931 } 932 933 kq = (struct kqueue *)fp->f_data; 934 935 kap = &ka; 936 kap->ka = uap; 937 kap->pchanges = 0; 938 939 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap, 940 kevent_copyin, kevent_copyout, tsp); 941 942 fdrop(fp); 943 944 return (error); 945 } 946 947 int 948 kqueue_register(struct kqueue *kq, struct kevent *kev) 949 { 950 struct lwkt_token *tok; 951 struct filedesc *fdp = kq->kq_fdp; 952 struct filterops *fops; 953 struct file *fp = NULL; 954 struct knote *kn = NULL; 955 int error = 0; 956 957 if (kev->filter < 0) { 958 if (kev->filter + EVFILT_SYSCOUNT < 0) 959 return (EINVAL); 960 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 961 } else { 962 /* 963 * XXX 964 * filter attach routine is responsible for insuring that 965 * the identifier can be attached to it. 966 */ 967 return (EINVAL); 968 } 969 970 tok = lwkt_token_pool_lookup(kq); 971 lwkt_gettoken(tok); 972 if (fops->f_flags & FILTEROP_ISFD) { 973 /* validate descriptor */ 974 fp = holdfp(fdp, kev->ident, -1); 975 if (fp == NULL) { 976 lwkt_reltoken(tok); 977 return (EBADF); 978 } 979 lwkt_getpooltoken(&fp->f_klist); 980 again1: 981 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 982 if (kn->kn_kq == kq && 983 kn->kn_filter == kev->filter && 984 kn->kn_id == kev->ident) { 985 if (knote_acquire(kn) == 0) 986 goto again1; 987 break; 988 } 989 } 990 lwkt_relpooltoken(&fp->f_klist); 991 } else { 992 if (kq->kq_knhashmask) { 993 struct klist *list; 994 995 list = &kq->kq_knhash[ 996 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 997 lwkt_getpooltoken(list); 998 again2: 999 SLIST_FOREACH(kn, list, kn_link) { 1000 if (kn->kn_id == kev->ident && 1001 kn->kn_filter == kev->filter) { 1002 if (knote_acquire(kn) == 0) 1003 goto again2; 1004 break; 1005 } 1006 } 1007 lwkt_relpooltoken(list); 1008 } 1009 } 1010 1011 /* 1012 * NOTE: At this point if kn is non-NULL we will have acquired 1013 * it and set KN_PROCESSING. 1014 */ 1015 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 1016 error = ENOENT; 1017 goto done; 1018 } 1019 1020 /* 1021 * kn now contains the matching knote, or NULL if no match 1022 */ 1023 if (kev->flags & EV_ADD) { 1024 if (kn == NULL) { 1025 kn = knote_alloc(); 1026 kn->kn_fp = fp; 1027 kn->kn_kq = kq; 1028 kn->kn_fop = fops; 1029 1030 /* 1031 * apply reference count to knote structure, and 1032 * do not release it at the end of this routine. 1033 */ 1034 fp = NULL; 1035 1036 kn->kn_sfflags = kev->fflags; 1037 kn->kn_sdata = kev->data; 1038 kev->fflags = 0; 1039 kev->data = 0; 1040 kn->kn_kevent = *kev; 1041 1042 /* 1043 * KN_PROCESSING prevents the knote from getting 1044 * ripped out from under us while we are trying 1045 * to attach it, in case the attach blocks. 1046 */ 1047 kn->kn_status = KN_PROCESSING; 1048 knote_attach(kn); 1049 if ((error = filter_attach(kn)) != 0) { 1050 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1051 knote_drop(kn); 1052 goto done; 1053 } 1054 1055 /* 1056 * Interlock against close races which either tried 1057 * to remove our knote while we were blocked or missed 1058 * it entirely prior to our attachment. We do not 1059 * want to end up with a knote on a closed descriptor. 1060 */ 1061 if ((fops->f_flags & FILTEROP_ISFD) && 1062 checkfdclosed(fdp, kev->ident, kn->kn_fp)) { 1063 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1064 } 1065 } else { 1066 /* 1067 * The user may change some filter values after the 1068 * initial EV_ADD, but doing so will not reset any 1069 * filter which have already been triggered. 1070 */ 1071 KKASSERT(kn->kn_status & KN_PROCESSING); 1072 if (fops == &user_filtops) { 1073 filt_usertouch(kn, kev, EVENT_REGISTER); 1074 } else { 1075 kn->kn_sfflags = kev->fflags; 1076 kn->kn_sdata = kev->data; 1077 kn->kn_kevent.udata = kev->udata; 1078 } 1079 } 1080 1081 /* 1082 * Execute the filter event to immediately activate the 1083 * knote if necessary. If reprocessing events are pending 1084 * due to blocking above we do not run the filter here 1085 * but instead let knote_release() do it. Otherwise we 1086 * might run the filter on a deleted event. 1087 */ 1088 if ((kn->kn_status & KN_REPROCESS) == 0) { 1089 if (filter_event(kn, 0)) 1090 KNOTE_ACTIVATE(kn); 1091 } 1092 } else if (kev->flags & EV_DELETE) { 1093 /* 1094 * Delete the existing knote 1095 */ 1096 knote_detach_and_drop(kn); 1097 goto done; 1098 } else { 1099 /* 1100 * Modify an existing event. 1101 * 1102 * The user may change some filter values after the 1103 * initial EV_ADD, but doing so will not reset any 1104 * filter which have already been triggered. 1105 */ 1106 KKASSERT(kn->kn_status & KN_PROCESSING); 1107 if (fops == &user_filtops) { 1108 filt_usertouch(kn, kev, EVENT_REGISTER); 1109 } else { 1110 kn->kn_sfflags = kev->fflags; 1111 kn->kn_sdata = kev->data; 1112 kn->kn_kevent.udata = kev->udata; 1113 } 1114 1115 /* 1116 * Execute the filter event to immediately activate the 1117 * knote if necessary. If reprocessing events are pending 1118 * due to blocking above we do not run the filter here 1119 * but instead let knote_release() do it. Otherwise we 1120 * might run the filter on a deleted event. 1121 */ 1122 if ((kn->kn_status & KN_REPROCESS) == 0) { 1123 if (filter_event(kn, 0)) 1124 KNOTE_ACTIVATE(kn); 1125 } 1126 } 1127 1128 /* 1129 * Disablement does not deactivate a knote here. 1130 */ 1131 if ((kev->flags & EV_DISABLE) && 1132 ((kn->kn_status & KN_DISABLED) == 0)) { 1133 kn->kn_status |= KN_DISABLED; 1134 } 1135 1136 /* 1137 * Re-enablement may have to immediately enqueue an active knote. 1138 */ 1139 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1140 kn->kn_status &= ~KN_DISABLED; 1141 if ((kn->kn_status & KN_ACTIVE) && 1142 ((kn->kn_status & KN_QUEUED) == 0)) { 1143 knote_enqueue(kn); 1144 } 1145 } 1146 1147 /* 1148 * Handle any required reprocessing 1149 */ 1150 knote_release(kn); 1151 /* kn may be invalid now */ 1152 1153 done: 1154 lwkt_reltoken(tok); 1155 if (fp != NULL) 1156 fdrop(fp); 1157 return (error); 1158 } 1159 1160 /* 1161 * Scan the kqueue, return the number of active events placed in kevp up 1162 * to count. 1163 * 1164 * Continuous mode events may get recycled, do not continue scanning past 1165 * marker unless no events have been collected. 1166 */ 1167 static int 1168 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 1169 struct knote *marker) 1170 { 1171 struct knote *kn, local_marker; 1172 int total; 1173 1174 total = 0; 1175 local_marker.kn_filter = EVFILT_MARKER; 1176 local_marker.kn_status = KN_PROCESSING; 1177 1178 lwkt_getpooltoken(kq); 1179 1180 /* 1181 * Collect events. 1182 */ 1183 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 1184 while (count) { 1185 kn = TAILQ_NEXT(&local_marker, kn_tqe); 1186 if (kn->kn_filter == EVFILT_MARKER) { 1187 /* Marker reached, we are done */ 1188 if (kn == marker) 1189 break; 1190 1191 /* Move local marker past some other threads marker */ 1192 kn = TAILQ_NEXT(kn, kn_tqe); 1193 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1194 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 1195 continue; 1196 } 1197 1198 /* 1199 * We can't skip a knote undergoing processing, otherwise 1200 * we risk not returning it when the user process expects 1201 * it should be returned. Sleep and retry. 1202 */ 1203 if (knote_acquire(kn) == 0) 1204 continue; 1205 1206 /* 1207 * Remove the event for processing. 1208 * 1209 * WARNING! We must leave KN_QUEUED set to prevent the 1210 * event from being KNOTE_ACTIVATE()d while 1211 * the queue state is in limbo, in case we 1212 * block. 1213 */ 1214 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1215 kq->kq_count--; 1216 1217 /* 1218 * We have to deal with an extremely important race against 1219 * file descriptor close()s here. The file descriptor can 1220 * disappear MPSAFE, and there is a small window of 1221 * opportunity between that and the call to knote_fdclose(). 1222 * 1223 * If we hit that window here while doselect or dopoll is 1224 * trying to delete a spurious event they will not be able 1225 * to match up the event against a knote and will go haywire. 1226 */ 1227 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && 1228 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) { 1229 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1230 } 1231 1232 if (kn->kn_status & KN_DISABLED) { 1233 /* 1234 * If disabled we ensure the event is not queued 1235 * but leave its active bit set. On re-enablement 1236 * the event may be immediately triggered. 1237 */ 1238 kn->kn_status &= ~KN_QUEUED; 1239 } else if ((kn->kn_flags & EV_ONESHOT) == 0 && 1240 (kn->kn_status & KN_DELETING) == 0 && 1241 filter_event(kn, 0) == 0) { 1242 /* 1243 * If not running in one-shot mode and the event 1244 * is no longer present we ensure it is removed 1245 * from the queue and ignore it. 1246 */ 1247 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1248 } else { 1249 /* 1250 * Post the event 1251 */ 1252 if (kn->kn_fop == &user_filtops) 1253 filt_usertouch(kn, kevp, EVENT_PROCESS); 1254 else 1255 *kevp = kn->kn_kevent; 1256 ++kevp; 1257 ++total; 1258 --count; 1259 1260 if (kn->kn_flags & EV_ONESHOT) { 1261 kn->kn_status &= ~KN_QUEUED; 1262 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1263 } else { 1264 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1265 if (kn->kn_flags & EV_CLEAR) { 1266 kn->kn_data = 0; 1267 kn->kn_fflags = 0; 1268 } 1269 if (kn->kn_flags & EV_DISPATCH) { 1270 kn->kn_status |= KN_DISABLED; 1271 } 1272 kn->kn_status &= ~(KN_QUEUED | 1273 KN_ACTIVE); 1274 } else { 1275 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1276 kq->kq_count++; 1277 } 1278 } 1279 } 1280 1281 /* 1282 * Handle any post-processing states 1283 */ 1284 knote_release(kn); 1285 } 1286 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1287 1288 lwkt_relpooltoken(kq); 1289 return (total); 1290 } 1291 1292 /* 1293 * XXX 1294 * This could be expanded to call kqueue_scan, if desired. 1295 * 1296 * MPSAFE 1297 */ 1298 static int 1299 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1300 { 1301 return (ENXIO); 1302 } 1303 1304 /* 1305 * MPSAFE 1306 */ 1307 static int 1308 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1309 { 1310 return (ENXIO); 1311 } 1312 1313 /* 1314 * MPALMOSTSAFE 1315 */ 1316 static int 1317 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 1318 struct ucred *cred, struct sysmsg *msg) 1319 { 1320 struct lwkt_token *tok; 1321 struct kqueue *kq; 1322 int error; 1323 1324 kq = (struct kqueue *)fp->f_data; 1325 tok = lwkt_token_pool_lookup(kq); 1326 lwkt_gettoken(tok); 1327 1328 switch(com) { 1329 case FIOASYNC: 1330 if (*(int *)data) 1331 kq->kq_state |= KQ_ASYNC; 1332 else 1333 kq->kq_state &= ~KQ_ASYNC; 1334 error = 0; 1335 break; 1336 case FIOSETOWN: 1337 error = fsetown(*(int *)data, &kq->kq_sigio); 1338 break; 1339 default: 1340 error = ENOTTY; 1341 break; 1342 } 1343 lwkt_reltoken(tok); 1344 return (error); 1345 } 1346 1347 /* 1348 * MPSAFE 1349 */ 1350 static int 1351 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1352 { 1353 struct kqueue *kq = (struct kqueue *)fp->f_data; 1354 1355 bzero((void *)st, sizeof(*st)); 1356 st->st_size = kq->kq_count; 1357 st->st_blksize = sizeof(struct kevent); 1358 st->st_mode = S_IFIFO; 1359 return (0); 1360 } 1361 1362 /* 1363 * MPSAFE 1364 */ 1365 static int 1366 kqueue_close(struct file *fp) 1367 { 1368 struct kqueue *kq = (struct kqueue *)fp->f_data; 1369 1370 kqueue_terminate(kq); 1371 1372 fp->f_data = NULL; 1373 funsetown(&kq->kq_sigio); 1374 1375 kfree(kq, M_KQUEUE); 1376 return (0); 1377 } 1378 1379 static void 1380 kqueue_wakeup(struct kqueue *kq) 1381 { 1382 if (kq->kq_state & KQ_SLEEP) { 1383 kq->kq_state &= ~KQ_SLEEP; 1384 if (kq_wakeup_one) 1385 wakeup_one(kq); 1386 else 1387 wakeup(kq); 1388 } 1389 KNOTE(&kq->kq_kqinfo.ki_note, 0); 1390 } 1391 1392 /* 1393 * Calls filterops f_attach function, acquiring mplock if filter is not 1394 * marked as FILTEROP_MPSAFE. 1395 * 1396 * Caller must be holding the related kq token 1397 */ 1398 static int 1399 filter_attach(struct knote *kn) 1400 { 1401 int ret; 1402 1403 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1404 ret = kn->kn_fop->f_attach(kn); 1405 } else { 1406 get_mplock(); 1407 ret = kn->kn_fop->f_attach(kn); 1408 rel_mplock(); 1409 } 1410 return (ret); 1411 } 1412 1413 /* 1414 * Detach the knote and drop it, destroying the knote. 1415 * 1416 * Calls filterops f_detach function, acquiring mplock if filter is not 1417 * marked as FILTEROP_MPSAFE. 1418 * 1419 * Caller must be holding the related kq token 1420 */ 1421 static void 1422 knote_detach_and_drop(struct knote *kn) 1423 { 1424 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1425 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1426 kn->kn_fop->f_detach(kn); 1427 } else { 1428 get_mplock(); 1429 kn->kn_fop->f_detach(kn); 1430 rel_mplock(); 1431 } 1432 knote_drop(kn); 1433 } 1434 1435 /* 1436 * Calls filterops f_event function, acquiring mplock if filter is not 1437 * marked as FILTEROP_MPSAFE. 1438 * 1439 * If the knote is in the middle of being created or deleted we cannot 1440 * safely call the filter op. 1441 * 1442 * Caller must be holding the related kq token 1443 */ 1444 static int 1445 filter_event(struct knote *kn, long hint) 1446 { 1447 int ret; 1448 1449 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1450 ret = kn->kn_fop->f_event(kn, hint); 1451 } else { 1452 get_mplock(); 1453 ret = kn->kn_fop->f_event(kn, hint); 1454 rel_mplock(); 1455 } 1456 return (ret); 1457 } 1458 1459 /* 1460 * Walk down a list of knotes, activating them if their event has triggered. 1461 * 1462 * If we encounter any knotes which are undergoing processing we just mark 1463 * them for reprocessing and do not try to [re]activate the knote. However, 1464 * if a hint is being passed we have to wait and that makes things a bit 1465 * sticky. 1466 */ 1467 void 1468 knote(struct klist *list, long hint) 1469 { 1470 struct kqueue *kq; 1471 struct knote *kn; 1472 struct knote *kntmp; 1473 1474 lwkt_getpooltoken(list); 1475 restart: 1476 SLIST_FOREACH(kn, list, kn_next) { 1477 kq = kn->kn_kq; 1478 lwkt_getpooltoken(kq); 1479 1480 /* temporary verification hack */ 1481 SLIST_FOREACH(kntmp, list, kn_next) { 1482 if (kn == kntmp) 1483 break; 1484 } 1485 if (kn != kntmp || kn->kn_kq != kq) { 1486 lwkt_relpooltoken(kq); 1487 goto restart; 1488 } 1489 1490 if (kn->kn_status & KN_PROCESSING) { 1491 /* 1492 * Someone else is processing the knote, ask the 1493 * other thread to reprocess it and don't mess 1494 * with it otherwise. 1495 */ 1496 if (hint == 0) { 1497 kn->kn_status |= KN_REPROCESS; 1498 lwkt_relpooltoken(kq); 1499 continue; 1500 } 1501 1502 /* 1503 * If the hint is non-zero we have to wait or risk 1504 * losing the state the caller is trying to update. 1505 * 1506 * XXX This is a real problem, certain process 1507 * and signal filters will bump kn_data for 1508 * already-processed notes more than once if 1509 * we restart the list scan. FIXME. 1510 */ 1511 kn->kn_status |= KN_WAITING | KN_REPROCESS; 1512 tsleep(kn, 0, "knotec", hz); 1513 lwkt_relpooltoken(kq); 1514 goto restart; 1515 } 1516 1517 /* 1518 * Become the reprocessing master ourselves. 1519 * 1520 * If hint is non-zero running the event is mandatory 1521 * when not deleting so do it whether reprocessing is 1522 * set or not. 1523 */ 1524 kn->kn_status |= KN_PROCESSING; 1525 if ((kn->kn_status & KN_DELETING) == 0) { 1526 if (filter_event(kn, hint)) 1527 KNOTE_ACTIVATE(kn); 1528 } 1529 if (knote_release(kn)) { 1530 lwkt_relpooltoken(kq); 1531 goto restart; 1532 } 1533 lwkt_relpooltoken(kq); 1534 } 1535 lwkt_relpooltoken(list); 1536 } 1537 1538 /* 1539 * Insert knote at head of klist. 1540 * 1541 * This function may only be called via a filter function and thus 1542 * kq_token should already be held and marked for processing. 1543 */ 1544 void 1545 knote_insert(struct klist *klist, struct knote *kn) 1546 { 1547 lwkt_getpooltoken(klist); 1548 KKASSERT(kn->kn_status & KN_PROCESSING); 1549 SLIST_INSERT_HEAD(klist, kn, kn_next); 1550 lwkt_relpooltoken(klist); 1551 } 1552 1553 /* 1554 * Remove knote from a klist 1555 * 1556 * This function may only be called via a filter function and thus 1557 * kq_token should already be held and marked for processing. 1558 */ 1559 void 1560 knote_remove(struct klist *klist, struct knote *kn) 1561 { 1562 lwkt_getpooltoken(klist); 1563 KKASSERT(kn->kn_status & KN_PROCESSING); 1564 SLIST_REMOVE(klist, kn, knote, kn_next); 1565 lwkt_relpooltoken(klist); 1566 } 1567 1568 #if 0 1569 /* 1570 * Remove all knotes from a specified klist 1571 * 1572 * Only called from aio. 1573 */ 1574 void 1575 knote_empty(struct klist *list) 1576 { 1577 struct knote *kn; 1578 1579 lwkt_gettoken(&kq_token); 1580 while ((kn = SLIST_FIRST(list)) != NULL) { 1581 if (knote_acquire(kn)) 1582 knote_detach_and_drop(kn); 1583 } 1584 lwkt_reltoken(&kq_token); 1585 } 1586 #endif 1587 1588 void 1589 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst, 1590 struct filterops *ops, void *hook) 1591 { 1592 struct kqueue *kq; 1593 struct knote *kn; 1594 1595 lwkt_getpooltoken(&src->ki_note); 1596 lwkt_getpooltoken(&dst->ki_note); 1597 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) { 1598 kq = kn->kn_kq; 1599 lwkt_getpooltoken(kq); 1600 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) { 1601 lwkt_relpooltoken(kq); 1602 continue; 1603 } 1604 if (knote_acquire(kn)) { 1605 knote_remove(&src->ki_note, kn); 1606 kn->kn_fop = ops; 1607 kn->kn_hook = hook; 1608 knote_insert(&dst->ki_note, kn); 1609 knote_release(kn); 1610 /* kn may be invalid now */ 1611 } 1612 lwkt_relpooltoken(kq); 1613 } 1614 lwkt_relpooltoken(&dst->ki_note); 1615 lwkt_relpooltoken(&src->ki_note); 1616 } 1617 1618 /* 1619 * Remove all knotes referencing a specified fd 1620 */ 1621 void 1622 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1623 { 1624 struct kqueue *kq; 1625 struct knote *kn; 1626 struct knote *kntmp; 1627 1628 lwkt_getpooltoken(&fp->f_klist); 1629 restart: 1630 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1631 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1632 kq = kn->kn_kq; 1633 lwkt_getpooltoken(kq); 1634 1635 /* temporary verification hack */ 1636 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) { 1637 if (kn == kntmp) 1638 break; 1639 } 1640 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp || 1641 kn->kn_id != fd || kn->kn_kq != kq) { 1642 lwkt_relpooltoken(kq); 1643 goto restart; 1644 } 1645 if (knote_acquire(kn)) 1646 knote_detach_and_drop(kn); 1647 lwkt_relpooltoken(kq); 1648 goto restart; 1649 } 1650 } 1651 lwkt_relpooltoken(&fp->f_klist); 1652 } 1653 1654 /* 1655 * Low level attach function. 1656 * 1657 * The knote should already be marked for processing. 1658 * Caller must hold the related kq token. 1659 */ 1660 static void 1661 knote_attach(struct knote *kn) 1662 { 1663 struct klist *list; 1664 struct kqueue *kq = kn->kn_kq; 1665 1666 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1667 KKASSERT(kn->kn_fp); 1668 list = &kn->kn_fp->f_klist; 1669 } else { 1670 if (kq->kq_knhashmask == 0) 1671 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1672 &kq->kq_knhashmask); 1673 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1674 } 1675 lwkt_getpooltoken(list); 1676 SLIST_INSERT_HEAD(list, kn, kn_link); 1677 lwkt_relpooltoken(list); 1678 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 1679 } 1680 1681 /* 1682 * Low level drop function. 1683 * 1684 * The knote should already be marked for processing. 1685 * Caller must hold the related kq token. 1686 */ 1687 static void 1688 knote_drop(struct knote *kn) 1689 { 1690 struct kqueue *kq; 1691 struct klist *list; 1692 1693 kq = kn->kn_kq; 1694 1695 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1696 list = &kn->kn_fp->f_klist; 1697 else 1698 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1699 1700 lwkt_getpooltoken(list); 1701 SLIST_REMOVE(list, kn, knote, kn_link); 1702 lwkt_relpooltoken(list); 1703 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 1704 if (kn->kn_status & KN_QUEUED) 1705 knote_dequeue(kn); 1706 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1707 fdrop(kn->kn_fp); 1708 kn->kn_fp = NULL; 1709 } 1710 knote_free(kn); 1711 } 1712 1713 /* 1714 * Low level enqueue function. 1715 * 1716 * The knote should already be marked for processing. 1717 * Caller must be holding the kq token 1718 */ 1719 static void 1720 knote_enqueue(struct knote *kn) 1721 { 1722 struct kqueue *kq = kn->kn_kq; 1723 1724 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1725 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1726 kn->kn_status |= KN_QUEUED; 1727 ++kq->kq_count; 1728 1729 /* 1730 * Send SIGIO on request (typically set up as a mailbox signal) 1731 */ 1732 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 1733 pgsigio(kq->kq_sigio, SIGIO, 0); 1734 1735 kqueue_wakeup(kq); 1736 } 1737 1738 /* 1739 * Low level dequeue function. 1740 * 1741 * The knote should already be marked for processing. 1742 * Caller must be holding the kq token 1743 */ 1744 static void 1745 knote_dequeue(struct knote *kn) 1746 { 1747 struct kqueue *kq = kn->kn_kq; 1748 1749 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1750 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1751 kn->kn_status &= ~KN_QUEUED; 1752 kq->kq_count--; 1753 } 1754 1755 static struct knote * 1756 knote_alloc(void) 1757 { 1758 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK); 1759 } 1760 1761 static void 1762 knote_free(struct knote *kn) 1763 { 1764 kfree(kn, M_KQUEUE); 1765 } 1766