1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/malloc.h> 34 #include <sys/unistd.h> 35 #include <sys/file.h> 36 #include <sys/lock.h> 37 #include <sys/fcntl.h> 38 #include <sys/queue.h> 39 #include <sys/event.h> 40 #include <sys/eventvar.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/stat.h> 45 #include <sys/sysctl.h> 46 #include <sys/sysproto.h> 47 #include <sys/thread.h> 48 #include <sys/uio.h> 49 #include <sys/signalvar.h> 50 #include <sys/filio.h> 51 #include <sys/ktr.h> 52 53 #include <sys/thread2.h> 54 #include <sys/file2.h> 55 #include <sys/mplock2.h> 56 57 /* 58 * Global token for kqueue subsystem 59 */ 60 #if 0 61 struct lwkt_token kq_token = LWKT_TOKEN_INITIALIZER(kq_token); 62 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions, 63 CTLFLAG_RW, &kq_token.t_collisions, 0, 64 "Collision counter of kq_token"); 65 #endif 66 67 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 68 69 struct kevent_copyin_args { 70 struct kevent_args *ka; 71 int pchanges; 72 }; 73 74 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp); 75 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 76 struct knote *marker); 77 static int kqueue_read(struct file *fp, struct uio *uio, 78 struct ucred *cred, int flags); 79 static int kqueue_write(struct file *fp, struct uio *uio, 80 struct ucred *cred, int flags); 81 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 82 struct ucred *cred, struct sysmsg *msg); 83 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 84 static int kqueue_stat(struct file *fp, struct stat *st, 85 struct ucred *cred); 86 static int kqueue_close(struct file *fp); 87 static void kqueue_wakeup(struct kqueue *kq); 88 static int filter_attach(struct knote *kn); 89 static int filter_event(struct knote *kn, long hint); 90 91 /* 92 * MPSAFE 93 */ 94 static struct fileops kqueueops = { 95 .fo_read = kqueue_read, 96 .fo_write = kqueue_write, 97 .fo_ioctl = kqueue_ioctl, 98 .fo_kqfilter = kqueue_kqfilter, 99 .fo_stat = kqueue_stat, 100 .fo_close = kqueue_close, 101 .fo_shutdown = nofo_shutdown 102 }; 103 104 static void knote_attach(struct knote *kn); 105 static void knote_drop(struct knote *kn); 106 static void knote_detach_and_drop(struct knote *kn); 107 static void knote_enqueue(struct knote *kn); 108 static void knote_dequeue(struct knote *kn); 109 static struct knote *knote_alloc(void); 110 static void knote_free(struct knote *kn); 111 112 static void filt_kqdetach(struct knote *kn); 113 static int filt_kqueue(struct knote *kn, long hint); 114 static int filt_procattach(struct knote *kn); 115 static void filt_procdetach(struct knote *kn); 116 static int filt_proc(struct knote *kn, long hint); 117 static int filt_fileattach(struct knote *kn); 118 static void filt_timerexpire(void *knx); 119 static int filt_timerattach(struct knote *kn); 120 static void filt_timerdetach(struct knote *kn); 121 static int filt_timer(struct knote *kn, long hint); 122 123 static struct filterops file_filtops = 124 { FILTEROP_ISFD, filt_fileattach, NULL, NULL }; 125 static struct filterops kqread_filtops = 126 { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue }; 127 static struct filterops proc_filtops = 128 { 0, filt_procattach, filt_procdetach, filt_proc }; 129 static struct filterops timer_filtops = 130 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 131 132 static int kq_ncallouts = 0; 133 static int kq_calloutmax = (4 * 1024); 134 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 135 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 136 static int kq_checkloop = 1000000; 137 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW, 138 &kq_checkloop, 0, "Maximum number of callouts allocated for kqueue"); 139 140 #define KNOTE_ACTIVATE(kn) do { \ 141 kn->kn_status |= KN_ACTIVE; \ 142 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 143 knote_enqueue(kn); \ 144 } while(0) 145 146 #define KN_HASHSIZE 64 /* XXX should be tunable */ 147 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 148 149 extern struct filterops aio_filtops; 150 extern struct filterops sig_filtops; 151 152 /* 153 * Table for for all system-defined filters. 154 */ 155 static struct filterops *sysfilt_ops[] = { 156 &file_filtops, /* EVFILT_READ */ 157 &file_filtops, /* EVFILT_WRITE */ 158 &aio_filtops, /* EVFILT_AIO */ 159 &file_filtops, /* EVFILT_VNODE */ 160 &proc_filtops, /* EVFILT_PROC */ 161 &sig_filtops, /* EVFILT_SIGNAL */ 162 &timer_filtops, /* EVFILT_TIMER */ 163 &file_filtops, /* EVFILT_EXCEPT */ 164 }; 165 166 static int 167 filt_fileattach(struct knote *kn) 168 { 169 return (fo_kqfilter(kn->kn_fp, kn)); 170 } 171 172 /* 173 * MPSAFE 174 */ 175 static int 176 kqueue_kqfilter(struct file *fp, struct knote *kn) 177 { 178 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 179 180 if (kn->kn_filter != EVFILT_READ) 181 return (EOPNOTSUPP); 182 183 kn->kn_fop = &kqread_filtops; 184 knote_insert(&kq->kq_kqinfo.ki_note, kn); 185 return (0); 186 } 187 188 static void 189 filt_kqdetach(struct knote *kn) 190 { 191 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 192 193 knote_remove(&kq->kq_kqinfo.ki_note, kn); 194 } 195 196 /*ARGSUSED*/ 197 static int 198 filt_kqueue(struct knote *kn, long hint) 199 { 200 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 201 202 kn->kn_data = kq->kq_count; 203 return (kn->kn_data > 0); 204 } 205 206 static int 207 filt_procattach(struct knote *kn) 208 { 209 struct proc *p; 210 int immediate; 211 212 immediate = 0; 213 p = pfind(kn->kn_id); 214 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 215 p = zpfind(kn->kn_id); 216 immediate = 1; 217 } 218 if (p == NULL) { 219 return (ESRCH); 220 } 221 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 222 if (p) 223 PRELE(p); 224 return (EACCES); 225 } 226 227 lwkt_gettoken(&p->p_token); 228 kn->kn_ptr.p_proc = p; 229 kn->kn_flags |= EV_CLEAR; /* automatically set */ 230 231 /* 232 * internal flag indicating registration done by kernel 233 */ 234 if (kn->kn_flags & EV_FLAG1) { 235 kn->kn_data = kn->kn_sdata; /* ppid */ 236 kn->kn_fflags = NOTE_CHILD; 237 kn->kn_flags &= ~EV_FLAG1; 238 } 239 240 knote_insert(&p->p_klist, kn); 241 242 /* 243 * Immediately activate any exit notes if the target process is a 244 * zombie. This is necessary to handle the case where the target 245 * process, e.g. a child, dies before the kevent is negistered. 246 */ 247 if (immediate && filt_proc(kn, NOTE_EXIT)) 248 KNOTE_ACTIVATE(kn); 249 lwkt_reltoken(&p->p_token); 250 PRELE(p); 251 252 return (0); 253 } 254 255 /* 256 * The knote may be attached to a different process, which may exit, 257 * leaving nothing for the knote to be attached to. So when the process 258 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 259 * it will be deleted when read out. However, as part of the knote deletion, 260 * this routine is called, so a check is needed to avoid actually performing 261 * a detach, because the original process does not exist any more. 262 */ 263 static void 264 filt_procdetach(struct knote *kn) 265 { 266 struct proc *p; 267 268 if (kn->kn_status & KN_DETACHED) 269 return; 270 /* XXX locking? take proc_token here? */ 271 p = kn->kn_ptr.p_proc; 272 knote_remove(&p->p_klist, kn); 273 } 274 275 static int 276 filt_proc(struct knote *kn, long hint) 277 { 278 u_int event; 279 280 /* 281 * mask off extra data 282 */ 283 event = (u_int)hint & NOTE_PCTRLMASK; 284 285 /* 286 * if the user is interested in this event, record it. 287 */ 288 if (kn->kn_sfflags & event) 289 kn->kn_fflags |= event; 290 291 /* 292 * Process is gone, so flag the event as finished. Detach the 293 * knote from the process now because the process will be poof, 294 * gone later on. 295 */ 296 if (event == NOTE_EXIT) { 297 struct proc *p = kn->kn_ptr.p_proc; 298 if ((kn->kn_status & KN_DETACHED) == 0) { 299 PHOLD(p); 300 knote_remove(&p->p_klist, kn); 301 kn->kn_status |= KN_DETACHED; 302 kn->kn_data = p->p_xstat; 303 kn->kn_ptr.p_proc = NULL; 304 PRELE(p); 305 } 306 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 307 return (1); 308 } 309 310 /* 311 * process forked, and user wants to track the new process, 312 * so attach a new knote to it, and immediately report an 313 * event with the parent's pid. 314 */ 315 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 316 struct kevent kev; 317 int error; 318 319 /* 320 * register knote with new process. 321 */ 322 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 323 kev.filter = kn->kn_filter; 324 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 325 kev.fflags = kn->kn_sfflags; 326 kev.data = kn->kn_id; /* parent */ 327 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 328 error = kqueue_register(kn->kn_kq, &kev); 329 if (error) 330 kn->kn_fflags |= NOTE_TRACKERR; 331 } 332 333 return (kn->kn_fflags != 0); 334 } 335 336 /* 337 * The callout interlocks with callout_terminate() but can still 338 * race a deletion so if KN_DELETING is set we just don't touch 339 * the knote. 340 */ 341 static void 342 filt_timerexpire(void *knx) 343 { 344 struct lwkt_token *tok; 345 struct knote *kn = knx; 346 struct callout *calloutp; 347 struct timeval tv; 348 int tticks; 349 350 tok = lwkt_token_pool_lookup(kn->kn_kq); 351 lwkt_gettoken(tok); 352 if ((kn->kn_status & KN_DELETING) == 0) { 353 kn->kn_data++; 354 KNOTE_ACTIVATE(kn); 355 356 if ((kn->kn_flags & EV_ONESHOT) == 0) { 357 tv.tv_sec = kn->kn_sdata / 1000; 358 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 359 tticks = tvtohz_high(&tv); 360 calloutp = (struct callout *)kn->kn_hook; 361 callout_reset(calloutp, tticks, filt_timerexpire, kn); 362 } 363 } 364 lwkt_reltoken(tok); 365 } 366 367 /* 368 * data contains amount of time to sleep, in milliseconds 369 */ 370 static int 371 filt_timerattach(struct knote *kn) 372 { 373 struct callout *calloutp; 374 struct timeval tv; 375 int tticks; 376 377 if (kq_ncallouts >= kq_calloutmax) { 378 kn->kn_hook = NULL; 379 return (ENOMEM); 380 } 381 kq_ncallouts++; 382 383 tv.tv_sec = kn->kn_sdata / 1000; 384 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 385 tticks = tvtohz_high(&tv); 386 387 kn->kn_flags |= EV_CLEAR; /* automatically set */ 388 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 389 callout_init(calloutp); 390 kn->kn_hook = (caddr_t)calloutp; 391 callout_reset(calloutp, tticks, filt_timerexpire, kn); 392 393 return (0); 394 } 395 396 /* 397 * This function is called with the knote flagged locked but it is 398 * still possible to race a callout event due to the callback blocking. 399 * We must call callout_terminate() instead of callout_stop() to deal 400 * with the race. 401 */ 402 static void 403 filt_timerdetach(struct knote *kn) 404 { 405 struct callout *calloutp; 406 407 calloutp = (struct callout *)kn->kn_hook; 408 callout_terminate(calloutp); 409 kfree(calloutp, M_KQUEUE); 410 kq_ncallouts--; 411 } 412 413 static int 414 filt_timer(struct knote *kn, long hint) 415 { 416 417 return (kn->kn_data != 0); 418 } 419 420 /* 421 * Acquire a knote, return non-zero on success, 0 on failure. 422 * 423 * If we cannot acquire the knote we sleep and return 0. The knote 424 * may be stale on return in this case and the caller must restart 425 * whatever loop they are in. 426 * 427 * Related kq token must be held. 428 */ 429 static __inline 430 int 431 knote_acquire(struct knote *kn) 432 { 433 if (kn->kn_status & KN_PROCESSING) { 434 kn->kn_status |= KN_WAITING | KN_REPROCESS; 435 tsleep(kn, 0, "kqepts", hz); 436 /* knote may be stale now */ 437 return(0); 438 } 439 kn->kn_status |= KN_PROCESSING; 440 return(1); 441 } 442 443 /* 444 * Release an acquired knote, clearing KN_PROCESSING and handling any 445 * KN_REPROCESS events. 446 * 447 * Caller must be holding the related kq token 448 * 449 * Non-zero is returned if the knote is destroyed or detached. 450 */ 451 static __inline 452 int 453 knote_release(struct knote *kn) 454 { 455 while (kn->kn_status & KN_REPROCESS) { 456 kn->kn_status &= ~KN_REPROCESS; 457 if (kn->kn_status & KN_WAITING) { 458 kn->kn_status &= ~KN_WAITING; 459 wakeup(kn); 460 } 461 if (kn->kn_status & KN_DELETING) { 462 knote_detach_and_drop(kn); 463 return(1); 464 /* NOT REACHED */ 465 } 466 if (filter_event(kn, 0)) 467 KNOTE_ACTIVATE(kn); 468 } 469 if (kn->kn_status & KN_DETACHED) { 470 kn->kn_status &= ~KN_PROCESSING; 471 return(1); 472 } else { 473 kn->kn_status &= ~KN_PROCESSING; 474 return(0); 475 } 476 } 477 478 /* 479 * Initialize a kqueue. 480 * 481 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 482 * 483 * MPSAFE 484 */ 485 void 486 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 487 { 488 TAILQ_INIT(&kq->kq_knpend); 489 TAILQ_INIT(&kq->kq_knlist); 490 kq->kq_count = 0; 491 kq->kq_fdp = fdp; 492 SLIST_INIT(&kq->kq_kqinfo.ki_note); 493 } 494 495 /* 496 * Terminate a kqueue. Freeing the actual kq itself is left up to the 497 * caller (it might be embedded in a lwp so we don't do it here). 498 * 499 * The kq's knlist must be completely eradicated so block on any 500 * processing races. 501 */ 502 void 503 kqueue_terminate(struct kqueue *kq) 504 { 505 struct lwkt_token *tok; 506 struct knote *kn; 507 508 tok = lwkt_token_pool_lookup(kq); 509 lwkt_gettoken(tok); 510 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 511 if (knote_acquire(kn)) 512 knote_detach_and_drop(kn); 513 } 514 if (kq->kq_knhash) { 515 kfree(kq->kq_knhash, M_KQUEUE); 516 kq->kq_knhash = NULL; 517 kq->kq_knhashmask = 0; 518 } 519 lwkt_reltoken(tok); 520 } 521 522 /* 523 * MPSAFE 524 */ 525 int 526 sys_kqueue(struct kqueue_args *uap) 527 { 528 struct thread *td = curthread; 529 struct kqueue *kq; 530 struct file *fp; 531 int fd, error; 532 533 error = falloc(td->td_lwp, &fp, &fd); 534 if (error) 535 return (error); 536 fp->f_flag = FREAD | FWRITE; 537 fp->f_type = DTYPE_KQUEUE; 538 fp->f_ops = &kqueueops; 539 540 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 541 kqueue_init(kq, td->td_proc->p_fd); 542 fp->f_data = kq; 543 544 fsetfd(kq->kq_fdp, fp, fd); 545 uap->sysmsg_result = fd; 546 fdrop(fp); 547 return (error); 548 } 549 550 /* 551 * Copy 'count' items into the destination list pointed to by uap->eventlist. 552 */ 553 static int 554 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 555 { 556 struct kevent_copyin_args *kap; 557 int error; 558 559 kap = (struct kevent_copyin_args *)arg; 560 561 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp)); 562 if (error == 0) { 563 kap->ka->eventlist += count; 564 *res += count; 565 } else { 566 *res = -1; 567 } 568 569 return (error); 570 } 571 572 /* 573 * Copy at most 'max' items from the list pointed to by kap->changelist, 574 * return number of items in 'events'. 575 */ 576 static int 577 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 578 { 579 struct kevent_copyin_args *kap; 580 int error, count; 581 582 kap = (struct kevent_copyin_args *)arg; 583 584 count = min(kap->ka->nchanges - kap->pchanges, max); 585 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp); 586 if (error == 0) { 587 kap->ka->changelist += count; 588 kap->pchanges += count; 589 *events = count; 590 } 591 592 return (error); 593 } 594 595 /* 596 * MPSAFE 597 */ 598 int 599 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 600 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 601 struct timespec *tsp_in) 602 { 603 struct kevent *kevp; 604 struct timespec *tsp; 605 int i, n, total, error, nerrors = 0; 606 int lres; 607 int limit = kq_checkloop; 608 struct kevent kev[KQ_NEVENTS]; 609 struct knote marker; 610 struct lwkt_token *tok; 611 612 tsp = tsp_in; 613 *res = 0; 614 615 tok = lwkt_token_pool_lookup(kq); 616 lwkt_gettoken(tok); 617 for ( ;; ) { 618 n = 0; 619 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 620 if (error) 621 goto done; 622 if (n == 0) 623 break; 624 for (i = 0; i < n; i++) { 625 kevp = &kev[i]; 626 kevp->flags &= ~EV_SYSFLAGS; 627 error = kqueue_register(kq, kevp); 628 629 /* 630 * If a registration returns an error we 631 * immediately post the error. The kevent() 632 * call itself will fail with the error if 633 * no space is available for posting. 634 * 635 * Such errors normally bypass the timeout/blocking 636 * code. However, if the copyoutfn function refuses 637 * to post the error (see sys_poll()), then we 638 * ignore it too. 639 */ 640 if (error) { 641 kevp->flags = EV_ERROR; 642 kevp->data = error; 643 lres = *res; 644 kevent_copyoutfn(uap, kevp, 1, res); 645 if (*res < 0) { 646 goto done; 647 } else if (lres != *res) { 648 nevents--; 649 nerrors++; 650 } 651 } 652 } 653 } 654 if (nerrors) { 655 error = 0; 656 goto done; 657 } 658 659 /* 660 * Acquire/wait for events - setup timeout 661 */ 662 if (tsp != NULL) { 663 struct timespec ats; 664 665 if (tsp->tv_sec || tsp->tv_nsec) { 666 nanouptime(&ats); 667 timespecadd(tsp, &ats); /* tsp = target time */ 668 } 669 } 670 671 /* 672 * Loop as required. 673 * 674 * Collect as many events as we can. Sleeping on successive 675 * loops is disabled if copyoutfn has incremented (*res). 676 * 677 * The loop stops if an error occurs, all events have been 678 * scanned (the marker has been reached), or fewer than the 679 * maximum number of events is found. 680 * 681 * The copyoutfn function does not have to increment (*res) in 682 * order for the loop to continue. 683 * 684 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 685 */ 686 total = 0; 687 error = 0; 688 marker.kn_filter = EVFILT_MARKER; 689 marker.kn_status = KN_PROCESSING; 690 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 691 while ((n = nevents - total) > 0) { 692 if (n > KQ_NEVENTS) 693 n = KQ_NEVENTS; 694 695 /* 696 * If no events are pending sleep until timeout (if any) 697 * or an event occurs. 698 * 699 * After the sleep completes the marker is moved to the 700 * end of the list, making any received events available 701 * to our scan. 702 */ 703 if (kq->kq_count == 0 && *res == 0) { 704 error = kqueue_sleep(kq, tsp); 705 if (error) 706 break; 707 708 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 709 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 710 } 711 712 /* 713 * Process all received events 714 * Account for all non-spurious events in our total 715 */ 716 i = kqueue_scan(kq, kev, n, &marker); 717 if (i) { 718 lres = *res; 719 error = kevent_copyoutfn(uap, kev, i, res); 720 total += *res - lres; 721 if (error) 722 break; 723 } 724 if (limit && --limit == 0) 725 panic("kqueue: checkloop failed i=%d", i); 726 727 /* 728 * Normally when fewer events are returned than requested 729 * we can stop. However, if only spurious events were 730 * collected the copyout will not bump (*res) and we have 731 * to continue. 732 */ 733 if (i < n && *res) 734 break; 735 736 /* 737 * Deal with an edge case where spurious events can cause 738 * a loop to occur without moving the marker. This can 739 * prevent kqueue_scan() from picking up new events which 740 * race us. We must be sure to move the marker for this 741 * case. 742 * 743 * NOTE: We do not want to move the marker if events 744 * were scanned because normal kqueue operations 745 * may reactivate events. Moving the marker in 746 * that case could result in duplicates for the 747 * same event. 748 */ 749 if (i == 0) { 750 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 751 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 752 } 753 } 754 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 755 756 /* Timeouts do not return EWOULDBLOCK. */ 757 if (error == EWOULDBLOCK) 758 error = 0; 759 760 done: 761 lwkt_reltoken(tok); 762 return (error); 763 } 764 765 /* 766 * MPALMOSTSAFE 767 */ 768 int 769 sys_kevent(struct kevent_args *uap) 770 { 771 struct thread *td = curthread; 772 struct proc *p = td->td_proc; 773 struct timespec ts, *tsp; 774 struct kqueue *kq; 775 struct file *fp = NULL; 776 struct kevent_copyin_args *kap, ka; 777 int error; 778 779 if (uap->timeout) { 780 error = copyin(uap->timeout, &ts, sizeof(ts)); 781 if (error) 782 return (error); 783 tsp = &ts; 784 } else { 785 tsp = NULL; 786 } 787 788 fp = holdfp(p->p_fd, uap->fd, -1); 789 if (fp == NULL) 790 return (EBADF); 791 if (fp->f_type != DTYPE_KQUEUE) { 792 fdrop(fp); 793 return (EBADF); 794 } 795 796 kq = (struct kqueue *)fp->f_data; 797 798 kap = &ka; 799 kap->ka = uap; 800 kap->pchanges = 0; 801 802 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap, 803 kevent_copyin, kevent_copyout, tsp); 804 805 fdrop(fp); 806 807 return (error); 808 } 809 810 /* 811 * Caller must be holding the kq token 812 */ 813 int 814 kqueue_register(struct kqueue *kq, struct kevent *kev) 815 { 816 struct lwkt_token *tok; 817 struct filedesc *fdp = kq->kq_fdp; 818 struct filterops *fops; 819 struct file *fp = NULL; 820 struct knote *kn = NULL; 821 int error = 0; 822 823 if (kev->filter < 0) { 824 if (kev->filter + EVFILT_SYSCOUNT < 0) 825 return (EINVAL); 826 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 827 } else { 828 /* 829 * XXX 830 * filter attach routine is responsible for insuring that 831 * the identifier can be attached to it. 832 */ 833 kprintf("unknown filter: %d\n", kev->filter); 834 return (EINVAL); 835 } 836 837 tok = lwkt_token_pool_lookup(kq); 838 lwkt_gettoken(tok); 839 if (fops->f_flags & FILTEROP_ISFD) { 840 /* validate descriptor */ 841 fp = holdfp(fdp, kev->ident, -1); 842 if (fp == NULL) { 843 lwkt_reltoken(tok); 844 return (EBADF); 845 } 846 lwkt_getpooltoken(&fp->f_klist); 847 again1: 848 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 849 if (kn->kn_kq == kq && 850 kn->kn_filter == kev->filter && 851 kn->kn_id == kev->ident) { 852 if (knote_acquire(kn) == 0) 853 goto again1; 854 break; 855 } 856 } 857 lwkt_relpooltoken(&fp->f_klist); 858 } else { 859 if (kq->kq_knhashmask) { 860 struct klist *list; 861 862 list = &kq->kq_knhash[ 863 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 864 lwkt_getpooltoken(list); 865 again2: 866 SLIST_FOREACH(kn, list, kn_link) { 867 if (kn->kn_id == kev->ident && 868 kn->kn_filter == kev->filter) { 869 if (knote_acquire(kn) == 0) 870 goto again2; 871 break; 872 } 873 } 874 lwkt_relpooltoken(list); 875 } 876 } 877 878 /* 879 * NOTE: At this point if kn is non-NULL we will have acquired 880 * it and set KN_PROCESSING. 881 */ 882 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 883 error = ENOENT; 884 goto done; 885 } 886 887 /* 888 * kn now contains the matching knote, or NULL if no match 889 */ 890 if (kev->flags & EV_ADD) { 891 if (kn == NULL) { 892 kn = knote_alloc(); 893 if (kn == NULL) { 894 error = ENOMEM; 895 goto done; 896 } 897 kn->kn_fp = fp; 898 kn->kn_kq = kq; 899 kn->kn_fop = fops; 900 901 /* 902 * apply reference count to knote structure, and 903 * do not release it at the end of this routine. 904 */ 905 fp = NULL; 906 907 kn->kn_sfflags = kev->fflags; 908 kn->kn_sdata = kev->data; 909 kev->fflags = 0; 910 kev->data = 0; 911 kn->kn_kevent = *kev; 912 913 /* 914 * KN_PROCESSING prevents the knote from getting 915 * ripped out from under us while we are trying 916 * to attach it, in case the attach blocks. 917 */ 918 kn->kn_status = KN_PROCESSING; 919 knote_attach(kn); 920 if ((error = filter_attach(kn)) != 0) { 921 kn->kn_status |= KN_DELETING | KN_REPROCESS; 922 knote_drop(kn); 923 goto done; 924 } 925 926 /* 927 * Interlock against close races which either tried 928 * to remove our knote while we were blocked or missed 929 * it entirely prior to our attachment. We do not 930 * want to end up with a knote on a closed descriptor. 931 */ 932 if ((fops->f_flags & FILTEROP_ISFD) && 933 checkfdclosed(fdp, kev->ident, kn->kn_fp)) { 934 kn->kn_status |= KN_DELETING | KN_REPROCESS; 935 } 936 } else { 937 /* 938 * The user may change some filter values after the 939 * initial EV_ADD, but doing so will not reset any 940 * filter which have already been triggered. 941 */ 942 KKASSERT(kn->kn_status & KN_PROCESSING); 943 kn->kn_sfflags = kev->fflags; 944 kn->kn_sdata = kev->data; 945 kn->kn_kevent.udata = kev->udata; 946 } 947 948 /* 949 * Execute the filter event to immediately activate the 950 * knote if necessary. If reprocessing events are pending 951 * due to blocking above we do not run the filter here 952 * but instead let knote_release() do it. Otherwise we 953 * might run the filter on a deleted event. 954 */ 955 if ((kn->kn_status & KN_REPROCESS) == 0) { 956 if (filter_event(kn, 0)) 957 KNOTE_ACTIVATE(kn); 958 } 959 } else if (kev->flags & EV_DELETE) { 960 /* 961 * Delete the existing knote 962 */ 963 knote_detach_and_drop(kn); 964 goto done; 965 } 966 967 /* 968 * Disablement does not deactivate a knote here. 969 */ 970 if ((kev->flags & EV_DISABLE) && 971 ((kn->kn_status & KN_DISABLED) == 0)) { 972 kn->kn_status |= KN_DISABLED; 973 } 974 975 /* 976 * Re-enablement may have to immediately enqueue an active knote. 977 */ 978 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 979 kn->kn_status &= ~KN_DISABLED; 980 if ((kn->kn_status & KN_ACTIVE) && 981 ((kn->kn_status & KN_QUEUED) == 0)) { 982 knote_enqueue(kn); 983 } 984 } 985 986 /* 987 * Handle any required reprocessing 988 */ 989 knote_release(kn); 990 /* kn may be invalid now */ 991 992 done: 993 lwkt_reltoken(tok); 994 if (fp != NULL) 995 fdrop(fp); 996 return (error); 997 } 998 999 /* 1000 * Block as necessary until the target time is reached. 1001 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both 1002 * 0 we do not block at all. 1003 * 1004 * Caller must be holding the kq token. 1005 */ 1006 static int 1007 kqueue_sleep(struct kqueue *kq, struct timespec *tsp) 1008 { 1009 int error = 0; 1010 1011 if (tsp == NULL) { 1012 kq->kq_state |= KQ_SLEEP; 1013 error = tsleep(kq, PCATCH, "kqread", 0); 1014 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 1015 error = EWOULDBLOCK; 1016 } else { 1017 struct timespec ats; 1018 struct timespec atx = *tsp; 1019 int timeout; 1020 1021 nanouptime(&ats); 1022 timespecsub(&atx, &ats); 1023 if (ats.tv_sec < 0) { 1024 error = EWOULDBLOCK; 1025 } else { 1026 timeout = atx.tv_sec > 24 * 60 * 60 ? 1027 24 * 60 * 60 * hz : tstohz_high(&atx); 1028 kq->kq_state |= KQ_SLEEP; 1029 error = tsleep(kq, PCATCH, "kqread", timeout); 1030 } 1031 } 1032 1033 /* don't restart after signals... */ 1034 if (error == ERESTART) 1035 return (EINTR); 1036 1037 return (error); 1038 } 1039 1040 /* 1041 * Scan the kqueue, return the number of active events placed in kevp up 1042 * to count. 1043 * 1044 * Continuous mode events may get recycled, do not continue scanning past 1045 * marker unless no events have been collected. 1046 * 1047 * Caller must be holding the kq token 1048 */ 1049 static int 1050 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 1051 struct knote *marker) 1052 { 1053 struct knote *kn, local_marker; 1054 int total; 1055 1056 total = 0; 1057 local_marker.kn_filter = EVFILT_MARKER; 1058 local_marker.kn_status = KN_PROCESSING; 1059 1060 /* 1061 * Collect events. 1062 */ 1063 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 1064 while (count) { 1065 kn = TAILQ_NEXT(&local_marker, kn_tqe); 1066 if (kn->kn_filter == EVFILT_MARKER) { 1067 /* Marker reached, we are done */ 1068 if (kn == marker) 1069 break; 1070 1071 /* Move local marker past some other threads marker */ 1072 kn = TAILQ_NEXT(kn, kn_tqe); 1073 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1074 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 1075 continue; 1076 } 1077 1078 /* 1079 * We can't skip a knote undergoing processing, otherwise 1080 * we risk not returning it when the user process expects 1081 * it should be returned. Sleep and retry. 1082 */ 1083 if (knote_acquire(kn) == 0) 1084 continue; 1085 1086 /* 1087 * Remove the event for processing. 1088 * 1089 * WARNING! We must leave KN_QUEUED set to prevent the 1090 * event from being KNOTE_ACTIVATE()d while 1091 * the queue state is in limbo, in case we 1092 * block. 1093 * 1094 * WARNING! We must set KN_PROCESSING to avoid races 1095 * against deletion or another thread's 1096 * processing. 1097 */ 1098 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1099 kq->kq_count--; 1100 1101 /* 1102 * We have to deal with an extremely important race against 1103 * file descriptor close()s here. The file descriptor can 1104 * disappear MPSAFE, and there is a small window of 1105 * opportunity between that and the call to knote_fdclose(). 1106 * 1107 * If we hit that window here while doselect or dopoll is 1108 * trying to delete a spurious event they will not be able 1109 * to match up the event against a knote and will go haywire. 1110 */ 1111 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && 1112 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) { 1113 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1114 } 1115 1116 if (kn->kn_status & KN_DISABLED) { 1117 /* 1118 * If disabled we ensure the event is not queued 1119 * but leave its active bit set. On re-enablement 1120 * the event may be immediately triggered. 1121 */ 1122 kn->kn_status &= ~KN_QUEUED; 1123 } else if ((kn->kn_flags & EV_ONESHOT) == 0 && 1124 (kn->kn_status & KN_DELETING) == 0 && 1125 filter_event(kn, 0) == 0) { 1126 /* 1127 * If not running in one-shot mode and the event 1128 * is no longer present we ensure it is removed 1129 * from the queue and ignore it. 1130 */ 1131 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1132 } else { 1133 /* 1134 * Post the event 1135 */ 1136 *kevp++ = kn->kn_kevent; 1137 ++total; 1138 --count; 1139 1140 if (kn->kn_flags & EV_ONESHOT) { 1141 kn->kn_status &= ~KN_QUEUED; 1142 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1143 } else if (kn->kn_flags & EV_CLEAR) { 1144 kn->kn_data = 0; 1145 kn->kn_fflags = 0; 1146 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1147 } else { 1148 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1149 kq->kq_count++; 1150 } 1151 } 1152 1153 /* 1154 * Handle any post-processing states 1155 */ 1156 knote_release(kn); 1157 } 1158 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1159 1160 return (total); 1161 } 1162 1163 /* 1164 * XXX 1165 * This could be expanded to call kqueue_scan, if desired. 1166 * 1167 * MPSAFE 1168 */ 1169 static int 1170 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1171 { 1172 return (ENXIO); 1173 } 1174 1175 /* 1176 * MPSAFE 1177 */ 1178 static int 1179 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1180 { 1181 return (ENXIO); 1182 } 1183 1184 /* 1185 * MPALMOSTSAFE 1186 */ 1187 static int 1188 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 1189 struct ucred *cred, struct sysmsg *msg) 1190 { 1191 struct lwkt_token *tok; 1192 struct kqueue *kq; 1193 int error; 1194 1195 kq = (struct kqueue *)fp->f_data; 1196 tok = lwkt_token_pool_lookup(kq); 1197 lwkt_gettoken(tok); 1198 1199 switch(com) { 1200 case FIOASYNC: 1201 if (*(int *)data) 1202 kq->kq_state |= KQ_ASYNC; 1203 else 1204 kq->kq_state &= ~KQ_ASYNC; 1205 error = 0; 1206 break; 1207 case FIOSETOWN: 1208 error = fsetown(*(int *)data, &kq->kq_sigio); 1209 break; 1210 default: 1211 error = ENOTTY; 1212 break; 1213 } 1214 lwkt_reltoken(tok); 1215 return (error); 1216 } 1217 1218 /* 1219 * MPSAFE 1220 */ 1221 static int 1222 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1223 { 1224 struct kqueue *kq = (struct kqueue *)fp->f_data; 1225 1226 bzero((void *)st, sizeof(*st)); 1227 st->st_size = kq->kq_count; 1228 st->st_blksize = sizeof(struct kevent); 1229 st->st_mode = S_IFIFO; 1230 return (0); 1231 } 1232 1233 /* 1234 * MPSAFE 1235 */ 1236 static int 1237 kqueue_close(struct file *fp) 1238 { 1239 struct kqueue *kq = (struct kqueue *)fp->f_data; 1240 1241 kqueue_terminate(kq); 1242 1243 fp->f_data = NULL; 1244 funsetown(&kq->kq_sigio); 1245 1246 kfree(kq, M_KQUEUE); 1247 return (0); 1248 } 1249 1250 static void 1251 kqueue_wakeup(struct kqueue *kq) 1252 { 1253 if (kq->kq_state & KQ_SLEEP) { 1254 kq->kq_state &= ~KQ_SLEEP; 1255 wakeup(kq); 1256 } 1257 KNOTE(&kq->kq_kqinfo.ki_note, 0); 1258 } 1259 1260 /* 1261 * Calls filterops f_attach function, acquiring mplock if filter is not 1262 * marked as FILTEROP_MPSAFE. 1263 * 1264 * Caller must be holding the related kq token 1265 */ 1266 static int 1267 filter_attach(struct knote *kn) 1268 { 1269 int ret; 1270 1271 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) { 1272 get_mplock(); 1273 ret = kn->kn_fop->f_attach(kn); 1274 rel_mplock(); 1275 } else { 1276 ret = kn->kn_fop->f_attach(kn); 1277 } 1278 1279 return (ret); 1280 } 1281 1282 /* 1283 * Detach the knote and drop it, destroying the knote. 1284 * 1285 * Calls filterops f_detach function, acquiring mplock if filter is not 1286 * marked as FILTEROP_MPSAFE. 1287 * 1288 * Caller must be holding the related kq token 1289 */ 1290 static void 1291 knote_detach_and_drop(struct knote *kn) 1292 { 1293 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1294 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1295 kn->kn_fop->f_detach(kn); 1296 } else { 1297 get_mplock(); 1298 kn->kn_fop->f_detach(kn); 1299 rel_mplock(); 1300 } 1301 knote_drop(kn); 1302 } 1303 1304 /* 1305 * Calls filterops f_event function, acquiring mplock if filter is not 1306 * marked as FILTEROP_MPSAFE. 1307 * 1308 * If the knote is in the middle of being created or deleted we cannot 1309 * safely call the filter op. 1310 * 1311 * Caller must be holding the related kq token 1312 */ 1313 static int 1314 filter_event(struct knote *kn, long hint) 1315 { 1316 int ret; 1317 1318 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1319 ret = kn->kn_fop->f_event(kn, hint); 1320 } else { 1321 get_mplock(); 1322 ret = kn->kn_fop->f_event(kn, hint); 1323 rel_mplock(); 1324 } 1325 return (ret); 1326 } 1327 1328 /* 1329 * Walk down a list of knotes, activating them if their event has triggered. 1330 * 1331 * If we encounter any knotes which are undergoing processing we just mark 1332 * them for reprocessing and do not try to [re]activate the knote. However, 1333 * if a hint is being passed we have to wait and that makes things a bit 1334 * sticky. 1335 */ 1336 void 1337 knote(struct klist *list, long hint) 1338 { 1339 struct kqueue *kq; 1340 struct knote *kn; 1341 struct knote *kntmp; 1342 1343 lwkt_getpooltoken(list); 1344 restart: 1345 SLIST_FOREACH(kn, list, kn_next) { 1346 kq = kn->kn_kq; 1347 lwkt_getpooltoken(kq); 1348 1349 /* temporary verification hack */ 1350 SLIST_FOREACH(kntmp, list, kn_next) { 1351 if (kn == kntmp) 1352 break; 1353 } 1354 if (kn != kntmp || kn->kn_kq != kq) { 1355 lwkt_relpooltoken(kq); 1356 goto restart; 1357 } 1358 1359 if (kn->kn_status & KN_PROCESSING) { 1360 /* 1361 * Someone else is processing the knote, ask the 1362 * other thread to reprocess it and don't mess 1363 * with it otherwise. 1364 */ 1365 if (hint == 0) { 1366 kn->kn_status |= KN_REPROCESS; 1367 lwkt_relpooltoken(kq); 1368 continue; 1369 } 1370 1371 /* 1372 * If the hint is non-zero we have to wait or risk 1373 * losing the state the caller is trying to update. 1374 * 1375 * XXX This is a real problem, certain process 1376 * and signal filters will bump kn_data for 1377 * already-processed notes more than once if 1378 * we restart the list scan. FIXME. 1379 */ 1380 kn->kn_status |= KN_WAITING | KN_REPROCESS; 1381 tsleep(kn, 0, "knotec", hz); 1382 lwkt_relpooltoken(kq); 1383 goto restart; 1384 } 1385 1386 /* 1387 * Become the reprocessing master ourselves. 1388 * 1389 * If hint is non-zer running the event is mandatory 1390 * when not deleting so do it whether reprocessing is 1391 * set or not. 1392 */ 1393 kn->kn_status |= KN_PROCESSING; 1394 if ((kn->kn_status & KN_DELETING) == 0) { 1395 if (filter_event(kn, hint)) 1396 KNOTE_ACTIVATE(kn); 1397 } 1398 if (knote_release(kn)) { 1399 lwkt_relpooltoken(kq); 1400 goto restart; 1401 } 1402 lwkt_relpooltoken(kq); 1403 } 1404 lwkt_relpooltoken(list); 1405 } 1406 1407 /* 1408 * Insert knote at head of klist. 1409 * 1410 * This function may only be called via a filter function and thus 1411 * kq_token should already be held and marked for processing. 1412 */ 1413 void 1414 knote_insert(struct klist *klist, struct knote *kn) 1415 { 1416 lwkt_getpooltoken(klist); 1417 KKASSERT(kn->kn_status & KN_PROCESSING); 1418 SLIST_INSERT_HEAD(klist, kn, kn_next); 1419 lwkt_relpooltoken(klist); 1420 } 1421 1422 /* 1423 * Remove knote from a klist 1424 * 1425 * This function may only be called via a filter function and thus 1426 * kq_token should already be held and marked for processing. 1427 */ 1428 void 1429 knote_remove(struct klist *klist, struct knote *kn) 1430 { 1431 lwkt_getpooltoken(klist); 1432 KKASSERT(kn->kn_status & KN_PROCESSING); 1433 SLIST_REMOVE(klist, kn, knote, kn_next); 1434 lwkt_relpooltoken(klist); 1435 } 1436 1437 #if 0 1438 /* 1439 * Remove all knotes from a specified klist 1440 * 1441 * Only called from aio. 1442 */ 1443 void 1444 knote_empty(struct klist *list) 1445 { 1446 struct knote *kn; 1447 1448 lwkt_gettoken(&kq_token); 1449 while ((kn = SLIST_FIRST(list)) != NULL) { 1450 if (knote_acquire(kn)) 1451 knote_detach_and_drop(kn); 1452 } 1453 lwkt_reltoken(&kq_token); 1454 } 1455 #endif 1456 1457 void 1458 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst, 1459 struct filterops *ops, void *hook) 1460 { 1461 struct kqueue *kq; 1462 struct knote *kn; 1463 1464 lwkt_getpooltoken(&src->ki_note); 1465 lwkt_getpooltoken(&dst->ki_note); 1466 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) { 1467 kq = kn->kn_kq; 1468 lwkt_getpooltoken(kq); 1469 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) { 1470 lwkt_relpooltoken(kq); 1471 continue; 1472 } 1473 if (knote_acquire(kn)) { 1474 knote_remove(&src->ki_note, kn); 1475 kn->kn_fop = ops; 1476 kn->kn_hook = hook; 1477 knote_insert(&dst->ki_note, kn); 1478 knote_release(kn); 1479 /* kn may be invalid now */ 1480 } 1481 lwkt_relpooltoken(kq); 1482 } 1483 lwkt_relpooltoken(&dst->ki_note); 1484 lwkt_relpooltoken(&src->ki_note); 1485 } 1486 1487 /* 1488 * Remove all knotes referencing a specified fd 1489 */ 1490 void 1491 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1492 { 1493 struct kqueue *kq; 1494 struct knote *kn; 1495 struct knote *kntmp; 1496 1497 lwkt_getpooltoken(&fp->f_klist); 1498 restart: 1499 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1500 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1501 kq = kn->kn_kq; 1502 lwkt_getpooltoken(kq); 1503 1504 /* temporary verification hack */ 1505 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) { 1506 if (kn == kntmp) 1507 break; 1508 } 1509 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp || 1510 kn->kn_id != fd || kn->kn_kq != kq) { 1511 lwkt_relpooltoken(kq); 1512 goto restart; 1513 } 1514 if (knote_acquire(kn)) 1515 knote_detach_and_drop(kn); 1516 lwkt_relpooltoken(kq); 1517 goto restart; 1518 } 1519 } 1520 lwkt_relpooltoken(&fp->f_klist); 1521 } 1522 1523 /* 1524 * Low level attach function. 1525 * 1526 * The knote should already be marked for processing. 1527 * Caller must hold the related kq token. 1528 */ 1529 static void 1530 knote_attach(struct knote *kn) 1531 { 1532 struct klist *list; 1533 struct kqueue *kq = kn->kn_kq; 1534 1535 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1536 KKASSERT(kn->kn_fp); 1537 list = &kn->kn_fp->f_klist; 1538 } else { 1539 if (kq->kq_knhashmask == 0) 1540 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1541 &kq->kq_knhashmask); 1542 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1543 } 1544 lwkt_getpooltoken(list); 1545 SLIST_INSERT_HEAD(list, kn, kn_link); 1546 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 1547 lwkt_relpooltoken(list); 1548 } 1549 1550 /* 1551 * Low level drop function. 1552 * 1553 * The knote should already be marked for processing. 1554 * Caller must hold the related kq token. 1555 */ 1556 static void 1557 knote_drop(struct knote *kn) 1558 { 1559 struct kqueue *kq; 1560 struct klist *list; 1561 1562 kq = kn->kn_kq; 1563 1564 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1565 list = &kn->kn_fp->f_klist; 1566 else 1567 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1568 1569 lwkt_getpooltoken(list); 1570 SLIST_REMOVE(list, kn, knote, kn_link); 1571 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 1572 if (kn->kn_status & KN_QUEUED) 1573 knote_dequeue(kn); 1574 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1575 fdrop(kn->kn_fp); 1576 kn->kn_fp = NULL; 1577 } 1578 knote_free(kn); 1579 lwkt_relpooltoken(list); 1580 } 1581 1582 /* 1583 * Low level enqueue function. 1584 * 1585 * The knote should already be marked for processing. 1586 * Caller must be holding the kq token 1587 */ 1588 static void 1589 knote_enqueue(struct knote *kn) 1590 { 1591 struct kqueue *kq = kn->kn_kq; 1592 1593 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1594 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1595 kn->kn_status |= KN_QUEUED; 1596 ++kq->kq_count; 1597 1598 /* 1599 * Send SIGIO on request (typically set up as a mailbox signal) 1600 */ 1601 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 1602 pgsigio(kq->kq_sigio, SIGIO, 0); 1603 1604 kqueue_wakeup(kq); 1605 } 1606 1607 /* 1608 * Low level dequeue function. 1609 * 1610 * The knote should already be marked for processing. 1611 * Caller must be holding the kq token 1612 */ 1613 static void 1614 knote_dequeue(struct knote *kn) 1615 { 1616 struct kqueue *kq = kn->kn_kq; 1617 1618 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1619 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1620 kn->kn_status &= ~KN_QUEUED; 1621 kq->kq_count--; 1622 } 1623 1624 static struct knote * 1625 knote_alloc(void) 1626 { 1627 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK); 1628 } 1629 1630 static void 1631 knote_free(struct knote *kn) 1632 { 1633 kfree(kn, M_KQUEUE); 1634 } 1635