1 /*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $ 27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $ 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/proc.h> 34 #include <sys/malloc.h> 35 #include <sys/unistd.h> 36 #include <sys/file.h> 37 #include <sys/lock.h> 38 #include <sys/fcntl.h> 39 #include <sys/queue.h> 40 #include <sys/event.h> 41 #include <sys/eventvar.h> 42 #include <sys/protosw.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/stat.h> 46 #include <sys/sysctl.h> 47 #include <sys/sysproto.h> 48 #include <sys/thread.h> 49 #include <sys/uio.h> 50 #include <sys/signalvar.h> 51 #include <sys/filio.h> 52 #include <sys/ktr.h> 53 54 #include <sys/thread2.h> 55 #include <sys/file2.h> 56 #include <sys/mplock2.h> 57 58 #include <vm/vm_zone.h> 59 60 /* 61 * Global token for kqueue subsystem 62 */ 63 struct lwkt_token kq_token = LWKT_TOKEN_UP_INITIALIZER(kq_token); 64 SYSCTL_INT(_lwkt, OID_AUTO, kq_mpsafe, 65 CTLFLAG_RW, &kq_token.t_flags, 0, 66 "Require MP lock for kq_token"); 67 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions, 68 CTLFLAG_RW, &kq_token.t_collisions, 0, 69 "Collision counter of kq_token"); 70 71 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 72 73 struct kevent_copyin_args { 74 struct kevent_args *ka; 75 int pchanges; 76 }; 77 78 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp); 79 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 80 struct knote *marker); 81 static int kqueue_read(struct file *fp, struct uio *uio, 82 struct ucred *cred, int flags); 83 static int kqueue_write(struct file *fp, struct uio *uio, 84 struct ucred *cred, int flags); 85 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 86 struct ucred *cred, struct sysmsg *msg); 87 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 88 static int kqueue_stat(struct file *fp, struct stat *st, 89 struct ucred *cred); 90 static int kqueue_close(struct file *fp); 91 static void kqueue_wakeup(struct kqueue *kq); 92 static int filter_attach(struct knote *kn); 93 static int filter_event(struct knote *kn, long hint); 94 95 /* 96 * MPSAFE 97 */ 98 static struct fileops kqueueops = { 99 .fo_read = kqueue_read, 100 .fo_write = kqueue_write, 101 .fo_ioctl = kqueue_ioctl, 102 .fo_kqfilter = kqueue_kqfilter, 103 .fo_stat = kqueue_stat, 104 .fo_close = kqueue_close, 105 .fo_shutdown = nofo_shutdown 106 }; 107 108 static void knote_attach(struct knote *kn); 109 static void knote_drop(struct knote *kn); 110 static void knote_detach_and_drop(struct knote *kn); 111 static void knote_enqueue(struct knote *kn); 112 static void knote_dequeue(struct knote *kn); 113 static void knote_init(void); 114 static struct knote *knote_alloc(void); 115 static void knote_free(struct knote *kn); 116 117 static void filt_kqdetach(struct knote *kn); 118 static int filt_kqueue(struct knote *kn, long hint); 119 static int filt_procattach(struct knote *kn); 120 static void filt_procdetach(struct knote *kn); 121 static int filt_proc(struct knote *kn, long hint); 122 static int filt_fileattach(struct knote *kn); 123 static void filt_timerexpire(void *knx); 124 static int filt_timerattach(struct knote *kn); 125 static void filt_timerdetach(struct knote *kn); 126 static int filt_timer(struct knote *kn, long hint); 127 128 static struct filterops file_filtops = 129 { FILTEROP_ISFD, filt_fileattach, NULL, NULL }; 130 static struct filterops kqread_filtops = 131 { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue }; 132 static struct filterops proc_filtops = 133 { 0, filt_procattach, filt_procdetach, filt_proc }; 134 static struct filterops timer_filtops = 135 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 136 137 static vm_zone_t knote_zone; 138 static int kq_ncallouts = 0; 139 static int kq_calloutmax = (4 * 1024); 140 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 141 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 142 static int kq_checkloop = 1000000; 143 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW, 144 &kq_checkloop, 0, "Maximum number of callouts allocated for kqueue"); 145 146 #define KNOTE_ACTIVATE(kn) do { \ 147 kn->kn_status |= KN_ACTIVE; \ 148 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 149 knote_enqueue(kn); \ 150 } while(0) 151 152 #define KN_HASHSIZE 64 /* XXX should be tunable */ 153 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 154 155 extern struct filterops aio_filtops; 156 extern struct filterops sig_filtops; 157 158 /* 159 * Table for for all system-defined filters. 160 */ 161 static struct filterops *sysfilt_ops[] = { 162 &file_filtops, /* EVFILT_READ */ 163 &file_filtops, /* EVFILT_WRITE */ 164 &aio_filtops, /* EVFILT_AIO */ 165 &file_filtops, /* EVFILT_VNODE */ 166 &proc_filtops, /* EVFILT_PROC */ 167 &sig_filtops, /* EVFILT_SIGNAL */ 168 &timer_filtops, /* EVFILT_TIMER */ 169 &file_filtops, /* EVFILT_EXCEPT */ 170 }; 171 172 static int 173 filt_fileattach(struct knote *kn) 174 { 175 return (fo_kqfilter(kn->kn_fp, kn)); 176 } 177 178 /* 179 * MPSAFE 180 */ 181 static int 182 kqueue_kqfilter(struct file *fp, struct knote *kn) 183 { 184 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 185 186 if (kn->kn_filter != EVFILT_READ) 187 return (EOPNOTSUPP); 188 189 kn->kn_fop = &kqread_filtops; 190 knote_insert(&kq->kq_kqinfo.ki_note, kn); 191 return (0); 192 } 193 194 static void 195 filt_kqdetach(struct knote *kn) 196 { 197 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 198 199 knote_remove(&kq->kq_kqinfo.ki_note, kn); 200 } 201 202 /*ARGSUSED*/ 203 static int 204 filt_kqueue(struct knote *kn, long hint) 205 { 206 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 207 208 kn->kn_data = kq->kq_count; 209 return (kn->kn_data > 0); 210 } 211 212 static int 213 filt_procattach(struct knote *kn) 214 { 215 struct proc *p; 216 int immediate; 217 218 immediate = 0; 219 lwkt_gettoken(&proc_token); 220 p = pfind(kn->kn_id); 221 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 222 p = zpfind(kn->kn_id); 223 immediate = 1; 224 } 225 if (p == NULL) { 226 lwkt_reltoken(&proc_token); 227 return (ESRCH); 228 } 229 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) { 230 lwkt_reltoken(&proc_token); 231 return (EACCES); 232 } 233 234 kn->kn_ptr.p_proc = p; 235 kn->kn_flags |= EV_CLEAR; /* automatically set */ 236 237 /* 238 * internal flag indicating registration done by kernel 239 */ 240 if (kn->kn_flags & EV_FLAG1) { 241 kn->kn_data = kn->kn_sdata; /* ppid */ 242 kn->kn_fflags = NOTE_CHILD; 243 kn->kn_flags &= ~EV_FLAG1; 244 } 245 246 knote_insert(&p->p_klist, kn); 247 248 /* 249 * Immediately activate any exit notes if the target process is a 250 * zombie. This is necessary to handle the case where the target 251 * process, e.g. a child, dies before the kevent is negistered. 252 */ 253 if (immediate && filt_proc(kn, NOTE_EXIT)) 254 KNOTE_ACTIVATE(kn); 255 lwkt_reltoken(&proc_token); 256 257 return (0); 258 } 259 260 /* 261 * The knote may be attached to a different process, which may exit, 262 * leaving nothing for the knote to be attached to. So when the process 263 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 264 * it will be deleted when read out. However, as part of the knote deletion, 265 * this routine is called, so a check is needed to avoid actually performing 266 * a detach, because the original process does not exist any more. 267 */ 268 static void 269 filt_procdetach(struct knote *kn) 270 { 271 struct proc *p; 272 273 if (kn->kn_status & KN_DETACHED) 274 return; 275 /* XXX locking? take proc_token here? */ 276 p = kn->kn_ptr.p_proc; 277 knote_remove(&p->p_klist, kn); 278 } 279 280 static int 281 filt_proc(struct knote *kn, long hint) 282 { 283 u_int event; 284 285 /* 286 * mask off extra data 287 */ 288 event = (u_int)hint & NOTE_PCTRLMASK; 289 290 /* 291 * if the user is interested in this event, record it. 292 */ 293 if (kn->kn_sfflags & event) 294 kn->kn_fflags |= event; 295 296 /* 297 * Process is gone, so flag the event as finished. Detach the 298 * knote from the process now because the process will be poof, 299 * gone later on. 300 */ 301 if (event == NOTE_EXIT) { 302 struct proc *p = kn->kn_ptr.p_proc; 303 if ((kn->kn_status & KN_DETACHED) == 0) { 304 knote_remove(&p->p_klist, kn); 305 kn->kn_status |= KN_DETACHED; 306 kn->kn_data = p->p_xstat; 307 kn->kn_ptr.p_proc = NULL; 308 } 309 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 310 return (1); 311 } 312 313 /* 314 * process forked, and user wants to track the new process, 315 * so attach a new knote to it, and immediately report an 316 * event with the parent's pid. 317 */ 318 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 319 struct kevent kev; 320 int error; 321 322 /* 323 * register knote with new process. 324 */ 325 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 326 kev.filter = kn->kn_filter; 327 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 328 kev.fflags = kn->kn_sfflags; 329 kev.data = kn->kn_id; /* parent */ 330 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 331 error = kqueue_register(kn->kn_kq, &kev); 332 if (error) 333 kn->kn_fflags |= NOTE_TRACKERR; 334 } 335 336 return (kn->kn_fflags != 0); 337 } 338 339 /* 340 * The callout interlocks with callout_stop() (or should), so the 341 * knote should still be a valid structure. However the timeout 342 * can race a deletion so if KN_DELETING is set we just don't touch 343 * the knote. 344 */ 345 static void 346 filt_timerexpire(void *knx) 347 { 348 struct knote *kn = knx; 349 struct callout *calloutp; 350 struct timeval tv; 351 int tticks; 352 353 lwkt_gettoken(&kq_token); 354 if ((kn->kn_status & KN_DELETING) == 0) { 355 kn->kn_data++; 356 KNOTE_ACTIVATE(kn); 357 358 if ((kn->kn_flags & EV_ONESHOT) == 0) { 359 tv.tv_sec = kn->kn_sdata / 1000; 360 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 361 tticks = tvtohz_high(&tv); 362 calloutp = (struct callout *)kn->kn_hook; 363 callout_reset(calloutp, tticks, filt_timerexpire, kn); 364 } 365 } 366 lwkt_reltoken(&kq_token); 367 } 368 369 /* 370 * data contains amount of time to sleep, in milliseconds 371 */ 372 static int 373 filt_timerattach(struct knote *kn) 374 { 375 struct callout *calloutp; 376 struct timeval tv; 377 int tticks; 378 379 if (kq_ncallouts >= kq_calloutmax) 380 return (ENOMEM); 381 kq_ncallouts++; 382 383 tv.tv_sec = kn->kn_sdata / 1000; 384 tv.tv_usec = (kn->kn_sdata % 1000) * 1000; 385 tticks = tvtohz_high(&tv); 386 387 kn->kn_flags |= EV_CLEAR; /* automatically set */ 388 MALLOC(calloutp, struct callout *, sizeof(*calloutp), 389 M_KQUEUE, M_WAITOK); 390 callout_init(calloutp); 391 kn->kn_hook = (caddr_t)calloutp; 392 callout_reset(calloutp, tticks, filt_timerexpire, kn); 393 394 return (0); 395 } 396 397 static void 398 filt_timerdetach(struct knote *kn) 399 { 400 struct callout *calloutp; 401 402 calloutp = (struct callout *)kn->kn_hook; 403 callout_stop(calloutp); 404 FREE(calloutp, M_KQUEUE); 405 kq_ncallouts--; 406 } 407 408 static int 409 filt_timer(struct knote *kn, long hint) 410 { 411 412 return (kn->kn_data != 0); 413 } 414 415 /* 416 * Acquire a knote, return non-zero on success, 0 on failure. 417 * 418 * If we cannot acquire the knote we sleep and return 0. The knote 419 * may be stale on return in this case and the caller must restart 420 * whatever loop they are in. 421 */ 422 static __inline 423 int 424 knote_acquire(struct knote *kn) 425 { 426 if (kn->kn_status & KN_PROCESSING) { 427 kn->kn_status |= KN_WAITING | KN_REPROCESS; 428 tsleep(kn, 0, "kqepts", hz); 429 /* knote may be stale now */ 430 return(0); 431 } 432 kn->kn_status |= KN_PROCESSING; 433 return(1); 434 } 435 436 /* 437 * Release an acquired knote, clearing KN_PROCESSING and handling any 438 * KN_REPROCESS events. 439 * 440 * Non-zero is returned if the knote is destroyed. 441 */ 442 static __inline 443 int 444 knote_release(struct knote *kn) 445 { 446 while (kn->kn_status & KN_REPROCESS) { 447 kn->kn_status &= ~KN_REPROCESS; 448 if (kn->kn_status & KN_WAITING) { 449 kn->kn_status &= ~KN_WAITING; 450 wakeup(kn); 451 } 452 if (kn->kn_status & KN_DELETING) { 453 knote_detach_and_drop(kn); 454 return(1); 455 /* NOT REACHED */ 456 } 457 if (filter_event(kn, 0)) 458 KNOTE_ACTIVATE(kn); 459 } 460 kn->kn_status &= ~KN_PROCESSING; 461 return(0); 462 } 463 464 /* 465 * Initialize a kqueue. 466 * 467 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops. 468 * 469 * MPSAFE 470 */ 471 void 472 kqueue_init(struct kqueue *kq, struct filedesc *fdp) 473 { 474 TAILQ_INIT(&kq->kq_knpend); 475 TAILQ_INIT(&kq->kq_knlist); 476 kq->kq_count = 0; 477 kq->kq_fdp = fdp; 478 SLIST_INIT(&kq->kq_kqinfo.ki_note); 479 } 480 481 /* 482 * Terminate a kqueue. Freeing the actual kq itself is left up to the 483 * caller (it might be embedded in a lwp so we don't do it here). 484 * 485 * The kq's knlist must be completely eradicated so block on any 486 * processing races. 487 */ 488 void 489 kqueue_terminate(struct kqueue *kq) 490 { 491 struct knote *kn; 492 493 lwkt_gettoken(&kq_token); 494 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) { 495 if (knote_acquire(kn)) 496 knote_detach_and_drop(kn); 497 } 498 if (kq->kq_knhash) { 499 kfree(kq->kq_knhash, M_KQUEUE); 500 kq->kq_knhash = NULL; 501 kq->kq_knhashmask = 0; 502 } 503 lwkt_reltoken(&kq_token); 504 } 505 506 /* 507 * MPSAFE 508 */ 509 int 510 sys_kqueue(struct kqueue_args *uap) 511 { 512 struct thread *td = curthread; 513 struct kqueue *kq; 514 struct file *fp; 515 int fd, error; 516 517 error = falloc(td->td_lwp, &fp, &fd); 518 if (error) 519 return (error); 520 fp->f_flag = FREAD | FWRITE; 521 fp->f_type = DTYPE_KQUEUE; 522 fp->f_ops = &kqueueops; 523 524 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); 525 kqueue_init(kq, td->td_proc->p_fd); 526 fp->f_data = kq; 527 528 fsetfd(kq->kq_fdp, fp, fd); 529 uap->sysmsg_result = fd; 530 fdrop(fp); 531 return (error); 532 } 533 534 /* 535 * Copy 'count' items into the destination list pointed to by uap->eventlist. 536 */ 537 static int 538 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res) 539 { 540 struct kevent_copyin_args *kap; 541 int error; 542 543 kap = (struct kevent_copyin_args *)arg; 544 545 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp)); 546 if (error == 0) { 547 kap->ka->eventlist += count; 548 *res += count; 549 } else { 550 *res = -1; 551 } 552 553 return (error); 554 } 555 556 /* 557 * Copy at most 'max' items from the list pointed to by kap->changelist, 558 * return number of items in 'events'. 559 */ 560 static int 561 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events) 562 { 563 struct kevent_copyin_args *kap; 564 int error, count; 565 566 kap = (struct kevent_copyin_args *)arg; 567 568 count = min(kap->ka->nchanges - kap->pchanges, max); 569 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp); 570 if (error == 0) { 571 kap->ka->changelist += count; 572 kap->pchanges += count; 573 *events = count; 574 } 575 576 return (error); 577 } 578 579 /* 580 * MPSAFE 581 */ 582 int 583 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap, 584 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn, 585 struct timespec *tsp_in) 586 { 587 struct kevent *kevp; 588 struct timespec *tsp; 589 int i, n, total, error, nerrors = 0; 590 int lres; 591 int limit = kq_checkloop; 592 struct kevent kev[KQ_NEVENTS]; 593 struct knote marker; 594 595 tsp = tsp_in; 596 *res = 0; 597 598 lwkt_gettoken(&kq_token); 599 for ( ;; ) { 600 n = 0; 601 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n); 602 if (error) 603 goto done; 604 if (n == 0) 605 break; 606 for (i = 0; i < n; i++) { 607 kevp = &kev[i]; 608 kevp->flags &= ~EV_SYSFLAGS; 609 error = kqueue_register(kq, kevp); 610 611 /* 612 * If a registration returns an error we 613 * immediately post the error. The kevent() 614 * call itself will fail with the error if 615 * no space is available for posting. 616 * 617 * Such errors normally bypass the timeout/blocking 618 * code. However, if the copyoutfn function refuses 619 * to post the error (see sys_poll()), then we 620 * ignore it too. 621 */ 622 if (error) { 623 kevp->flags = EV_ERROR; 624 kevp->data = error; 625 lres = *res; 626 kevent_copyoutfn(uap, kevp, 1, res); 627 if (lres != *res) { 628 nevents--; 629 nerrors++; 630 } 631 } 632 } 633 } 634 if (nerrors) { 635 error = 0; 636 goto done; 637 } 638 639 /* 640 * Acquire/wait for events - setup timeout 641 */ 642 if (tsp != NULL) { 643 struct timespec ats; 644 645 if (tsp->tv_sec || tsp->tv_nsec) { 646 nanouptime(&ats); 647 timespecadd(tsp, &ats); /* tsp = target time */ 648 } 649 } 650 651 /* 652 * Loop as required. 653 * 654 * Collect as many events as we can. Sleeping on successive 655 * loops is disabled if copyoutfn has incremented (*res). 656 * 657 * The loop stops if an error occurs, all events have been 658 * scanned (the marker has been reached), or fewer than the 659 * maximum number of events is found. 660 * 661 * The copyoutfn function does not have to increment (*res) in 662 * order for the loop to continue. 663 * 664 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents. 665 */ 666 total = 0; 667 error = 0; 668 marker.kn_filter = EVFILT_MARKER; 669 marker.kn_status = KN_PROCESSING; 670 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 671 while ((n = nevents - total) > 0) { 672 if (n > KQ_NEVENTS) 673 n = KQ_NEVENTS; 674 675 /* 676 * If no events are pending sleep until timeout (if any) 677 * or an event occurs. 678 * 679 * After the sleep completes the marker is moved to the 680 * end of the list, making any received events available 681 * to our scan. 682 */ 683 if (kq->kq_count == 0 && *res == 0) { 684 error = kqueue_sleep(kq, tsp); 685 if (error) 686 break; 687 688 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 689 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 690 } 691 692 /* 693 * Process all received events 694 * Account for all non-spurious events in our total 695 */ 696 i = kqueue_scan(kq, kev, n, &marker); 697 if (i) { 698 lres = *res; 699 error = kevent_copyoutfn(uap, kev, i, res); 700 total += *res - lres; 701 if (error) 702 break; 703 } 704 if (limit && --limit == 0) 705 panic("kqueue: checkloop failed i=%d", i); 706 707 /* 708 * Normally when fewer events are returned than requested 709 * we can stop. However, if only spurious events were 710 * collected the copyout will not bump (*res) and we have 711 * to continue. 712 */ 713 if (i < n && *res) 714 break; 715 716 /* 717 * Deal with an edge case where spurious events can cause 718 * a loop to occur without moving the marker. This can 719 * prevent kqueue_scan() from picking up new events which 720 * race us. We must be sure to move the marker for this 721 * case. 722 * 723 * NOTE: We do not want to move the marker if events 724 * were scanned because normal kqueue operations 725 * may reactivate events. Moving the marker in 726 * that case could result in duplicates for the 727 * same event. 728 */ 729 if (i == 0) { 730 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 731 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe); 732 } 733 } 734 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe); 735 736 /* Timeouts do not return EWOULDBLOCK. */ 737 if (error == EWOULDBLOCK) 738 error = 0; 739 740 done: 741 lwkt_reltoken(&kq_token); 742 return (error); 743 } 744 745 /* 746 * MPALMOSTSAFE 747 */ 748 int 749 sys_kevent(struct kevent_args *uap) 750 { 751 struct thread *td = curthread; 752 struct proc *p = td->td_proc; 753 struct timespec ts, *tsp; 754 struct kqueue *kq; 755 struct file *fp = NULL; 756 struct kevent_copyin_args *kap, ka; 757 int error; 758 759 if (uap->timeout) { 760 error = copyin(uap->timeout, &ts, sizeof(ts)); 761 if (error) 762 return (error); 763 tsp = &ts; 764 } else { 765 tsp = NULL; 766 } 767 768 fp = holdfp(p->p_fd, uap->fd, -1); 769 if (fp == NULL) 770 return (EBADF); 771 if (fp->f_type != DTYPE_KQUEUE) { 772 fdrop(fp); 773 return (EBADF); 774 } 775 776 kq = (struct kqueue *)fp->f_data; 777 778 kap = &ka; 779 kap->ka = uap; 780 kap->pchanges = 0; 781 782 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap, 783 kevent_copyin, kevent_copyout, tsp); 784 785 fdrop(fp); 786 787 return (error); 788 } 789 790 int 791 kqueue_register(struct kqueue *kq, struct kevent *kev) 792 { 793 struct filedesc *fdp = kq->kq_fdp; 794 struct filterops *fops; 795 struct file *fp = NULL; 796 struct knote *kn = NULL; 797 int error = 0; 798 799 if (kev->filter < 0) { 800 if (kev->filter + EVFILT_SYSCOUNT < 0) 801 return (EINVAL); 802 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 803 } else { 804 /* 805 * XXX 806 * filter attach routine is responsible for insuring that 807 * the identifier can be attached to it. 808 */ 809 kprintf("unknown filter: %d\n", kev->filter); 810 return (EINVAL); 811 } 812 813 lwkt_gettoken(&kq_token); 814 if (fops->f_flags & FILTEROP_ISFD) { 815 /* validate descriptor */ 816 fp = holdfp(fdp, kev->ident, -1); 817 if (fp == NULL) { 818 lwkt_reltoken(&kq_token); 819 return (EBADF); 820 } 821 822 again1: 823 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 824 if (kn->kn_kq == kq && 825 kn->kn_filter == kev->filter && 826 kn->kn_id == kev->ident) { 827 if (knote_acquire(kn) == 0) 828 goto again1; 829 break; 830 } 831 } 832 } else { 833 if (kq->kq_knhashmask) { 834 struct klist *list; 835 836 list = &kq->kq_knhash[ 837 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 838 again2: 839 SLIST_FOREACH(kn, list, kn_link) { 840 if (kn->kn_id == kev->ident && 841 kn->kn_filter == kev->filter) { 842 if (knote_acquire(kn) == 0) 843 goto again2; 844 break; 845 } 846 } 847 } 848 } 849 850 /* 851 * NOTE: At this point if kn is non-NULL we will have acquired 852 * it and set KN_PROCESSING. 853 */ 854 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 855 error = ENOENT; 856 goto done; 857 } 858 859 /* 860 * kn now contains the matching knote, or NULL if no match 861 */ 862 if (kev->flags & EV_ADD) { 863 if (kn == NULL) { 864 kn = knote_alloc(); 865 if (kn == NULL) { 866 error = ENOMEM; 867 goto done; 868 } 869 kn->kn_fp = fp; 870 kn->kn_kq = kq; 871 kn->kn_fop = fops; 872 873 /* 874 * apply reference count to knote structure, and 875 * do not release it at the end of this routine. 876 */ 877 fp = NULL; 878 879 kn->kn_sfflags = kev->fflags; 880 kn->kn_sdata = kev->data; 881 kev->fflags = 0; 882 kev->data = 0; 883 kn->kn_kevent = *kev; 884 885 /* 886 * KN_PROCESSING prevents the knote from getting 887 * ripped out from under us while we are trying 888 * to attach it, in case the attach blocks. 889 */ 890 kn->kn_status = KN_PROCESSING; 891 knote_attach(kn); 892 if ((error = filter_attach(kn)) != 0) { 893 kn->kn_status |= KN_DELETING | KN_REPROCESS; 894 knote_drop(kn); 895 goto done; 896 } 897 898 /* 899 * Interlock against close races which either tried 900 * to remove our knote while we were blocked or missed 901 * it entirely prior to our attachment. We do not 902 * want to end up with a knote on a closed descriptor. 903 */ 904 if ((fops->f_flags & FILTEROP_ISFD) && 905 checkfdclosed(fdp, kev->ident, kn->kn_fp)) { 906 kn->kn_status |= KN_DELETING | KN_REPROCESS; 907 } 908 } else { 909 /* 910 * The user may change some filter values after the 911 * initial EV_ADD, but doing so will not reset any 912 * filter which have already been triggered. 913 */ 914 KKASSERT(kn->kn_status & KN_PROCESSING); 915 kn->kn_sfflags = kev->fflags; 916 kn->kn_sdata = kev->data; 917 kn->kn_kevent.udata = kev->udata; 918 } 919 920 /* 921 * Execute the filter event to immediately activate the 922 * knote if necessary. If reprocessing events are pending 923 * due to blocking above we do not run the filter here 924 * but instead let knote_release() do it. Otherwise we 925 * might run the filter on a deleted event. 926 */ 927 if ((kn->kn_status & KN_REPROCESS) == 0) { 928 if (filter_event(kn, 0)) 929 KNOTE_ACTIVATE(kn); 930 } 931 } else if (kev->flags & EV_DELETE) { 932 /* 933 * Delete the existing knote 934 */ 935 knote_detach_and_drop(kn); 936 goto done; 937 } 938 939 /* 940 * Disablement does not deactivate a knote here. 941 */ 942 if ((kev->flags & EV_DISABLE) && 943 ((kn->kn_status & KN_DISABLED) == 0)) { 944 kn->kn_status |= KN_DISABLED; 945 } 946 947 /* 948 * Re-enablement may have to immediately enqueue an active knote. 949 */ 950 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 951 kn->kn_status &= ~KN_DISABLED; 952 if ((kn->kn_status & KN_ACTIVE) && 953 ((kn->kn_status & KN_QUEUED) == 0)) { 954 knote_enqueue(kn); 955 } 956 } 957 958 /* 959 * Handle any required reprocessing 960 */ 961 knote_release(kn); 962 /* kn may be invalid now */ 963 964 done: 965 lwkt_reltoken(&kq_token); 966 if (fp != NULL) 967 fdrop(fp); 968 return (error); 969 } 970 971 /* 972 * Block as necessary until the target time is reached. 973 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both 974 * 0 we do not block at all. 975 */ 976 static int 977 kqueue_sleep(struct kqueue *kq, struct timespec *tsp) 978 { 979 int error = 0; 980 981 if (tsp == NULL) { 982 kq->kq_state |= KQ_SLEEP; 983 error = tsleep(kq, PCATCH, "kqread", 0); 984 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 985 error = EWOULDBLOCK; 986 } else { 987 struct timespec ats; 988 struct timespec atx = *tsp; 989 int timeout; 990 991 nanouptime(&ats); 992 timespecsub(&atx, &ats); 993 if (ats.tv_sec < 0) { 994 error = EWOULDBLOCK; 995 } else { 996 timeout = atx.tv_sec > 24 * 60 * 60 ? 997 24 * 60 * 60 * hz : tstohz_high(&atx); 998 kq->kq_state |= KQ_SLEEP; 999 error = tsleep(kq, PCATCH, "kqread", timeout); 1000 } 1001 } 1002 1003 /* don't restart after signals... */ 1004 if (error == ERESTART) 1005 return (EINTR); 1006 1007 return (error); 1008 } 1009 1010 /* 1011 * Scan the kqueue, return the number of active events placed in kevp up 1012 * to count. 1013 * 1014 * Continuous mode events may get recycled, do not continue scanning past 1015 * marker unless no events have been collected. 1016 */ 1017 static int 1018 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, 1019 struct knote *marker) 1020 { 1021 struct knote *kn, local_marker; 1022 int total; 1023 1024 total = 0; 1025 local_marker.kn_filter = EVFILT_MARKER; 1026 local_marker.kn_status = KN_PROCESSING; 1027 1028 /* 1029 * Collect events. 1030 */ 1031 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe); 1032 while (count) { 1033 kn = TAILQ_NEXT(&local_marker, kn_tqe); 1034 if (kn->kn_filter == EVFILT_MARKER) { 1035 /* Marker reached, we are done */ 1036 if (kn == marker) 1037 break; 1038 1039 /* Move local marker past some other threads marker */ 1040 kn = TAILQ_NEXT(kn, kn_tqe); 1041 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1042 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe); 1043 continue; 1044 } 1045 1046 /* 1047 * We can't skip a knote undergoing processing, otherwise 1048 * we risk not returning it when the user process expects 1049 * it should be returned. Sleep and retry. 1050 */ 1051 if (knote_acquire(kn) == 0) 1052 continue; 1053 1054 /* 1055 * Remove the event for processing. 1056 * 1057 * WARNING! We must leave KN_QUEUED set to prevent the 1058 * event from being KNOTE_ACTIVATE()d while 1059 * the queue state is in limbo, in case we 1060 * block. 1061 * 1062 * WARNING! We must set KN_PROCESSING to avoid races 1063 * against deletion or another thread's 1064 * processing. 1065 */ 1066 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1067 kq->kq_count--; 1068 1069 /* 1070 * We have to deal with an extremely important race against 1071 * file descriptor close()s here. The file descriptor can 1072 * disappear MPSAFE, and there is a small window of 1073 * opportunity between that and the call to knote_fdclose(). 1074 * 1075 * If we hit that window here while doselect or dopoll is 1076 * trying to delete a spurious event they will not be able 1077 * to match up the event against a knote and will go haywire. 1078 */ 1079 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && 1080 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) { 1081 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1082 } 1083 1084 if (kn->kn_status & KN_DISABLED) { 1085 /* 1086 * If disabled we ensure the event is not queued 1087 * but leave its active bit set. On re-enablement 1088 * the event may be immediately triggered. 1089 */ 1090 kn->kn_status &= ~KN_QUEUED; 1091 } else if ((kn->kn_flags & EV_ONESHOT) == 0 && 1092 (kn->kn_status & KN_DELETING) == 0 && 1093 filter_event(kn, 0) == 0) { 1094 /* 1095 * If not running in one-shot mode and the event 1096 * is no longer present we ensure it is removed 1097 * from the queue and ignore it. 1098 */ 1099 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1100 } else { 1101 /* 1102 * Post the event 1103 */ 1104 *kevp++ = kn->kn_kevent; 1105 ++total; 1106 --count; 1107 1108 if (kn->kn_flags & EV_ONESHOT) { 1109 kn->kn_status &= ~KN_QUEUED; 1110 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1111 } else if (kn->kn_flags & EV_CLEAR) { 1112 kn->kn_data = 0; 1113 kn->kn_fflags = 0; 1114 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1115 } else { 1116 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1117 kq->kq_count++; 1118 } 1119 } 1120 1121 /* 1122 * Handle any post-processing states 1123 */ 1124 knote_release(kn); 1125 } 1126 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe); 1127 1128 return (total); 1129 } 1130 1131 /* 1132 * XXX 1133 * This could be expanded to call kqueue_scan, if desired. 1134 * 1135 * MPSAFE 1136 */ 1137 static int 1138 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1139 { 1140 return (ENXIO); 1141 } 1142 1143 /* 1144 * MPSAFE 1145 */ 1146 static int 1147 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 1148 { 1149 return (ENXIO); 1150 } 1151 1152 /* 1153 * MPALMOSTSAFE 1154 */ 1155 static int 1156 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, 1157 struct ucred *cred, struct sysmsg *msg) 1158 { 1159 struct kqueue *kq; 1160 int error; 1161 1162 lwkt_gettoken(&kq_token); 1163 kq = (struct kqueue *)fp->f_data; 1164 1165 switch(com) { 1166 case FIOASYNC: 1167 if (*(int *)data) 1168 kq->kq_state |= KQ_ASYNC; 1169 else 1170 kq->kq_state &= ~KQ_ASYNC; 1171 error = 0; 1172 break; 1173 case FIOSETOWN: 1174 error = fsetown(*(int *)data, &kq->kq_sigio); 1175 break; 1176 default: 1177 error = ENOTTY; 1178 break; 1179 } 1180 lwkt_reltoken(&kq_token); 1181 return (error); 1182 } 1183 1184 /* 1185 * MPSAFE 1186 */ 1187 static int 1188 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred) 1189 { 1190 struct kqueue *kq = (struct kqueue *)fp->f_data; 1191 1192 bzero((void *)st, sizeof(*st)); 1193 st->st_size = kq->kq_count; 1194 st->st_blksize = sizeof(struct kevent); 1195 st->st_mode = S_IFIFO; 1196 return (0); 1197 } 1198 1199 /* 1200 * MPSAFE 1201 */ 1202 static int 1203 kqueue_close(struct file *fp) 1204 { 1205 struct kqueue *kq = (struct kqueue *)fp->f_data; 1206 1207 kqueue_terminate(kq); 1208 1209 fp->f_data = NULL; 1210 funsetown(kq->kq_sigio); 1211 1212 kfree(kq, M_KQUEUE); 1213 return (0); 1214 } 1215 1216 static void 1217 kqueue_wakeup(struct kqueue *kq) 1218 { 1219 if (kq->kq_state & KQ_SLEEP) { 1220 kq->kq_state &= ~KQ_SLEEP; 1221 wakeup(kq); 1222 } 1223 KNOTE(&kq->kq_kqinfo.ki_note, 0); 1224 } 1225 1226 /* 1227 * Calls filterops f_attach function, acquiring mplock if filter is not 1228 * marked as FILTEROP_MPSAFE. 1229 */ 1230 static int 1231 filter_attach(struct knote *kn) 1232 { 1233 int ret; 1234 1235 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) { 1236 get_mplock(); 1237 ret = kn->kn_fop->f_attach(kn); 1238 rel_mplock(); 1239 } else { 1240 ret = kn->kn_fop->f_attach(kn); 1241 } 1242 1243 return (ret); 1244 } 1245 1246 /* 1247 * Detach the knote and drop it, destroying the knote. 1248 * 1249 * Calls filterops f_detach function, acquiring mplock if filter is not 1250 * marked as FILTEROP_MPSAFE. 1251 */ 1252 static void 1253 knote_detach_and_drop(struct knote *kn) 1254 { 1255 kn->kn_status |= KN_DELETING | KN_REPROCESS; 1256 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1257 kn->kn_fop->f_detach(kn); 1258 } else { 1259 get_mplock(); 1260 kn->kn_fop->f_detach(kn); 1261 rel_mplock(); 1262 } 1263 knote_drop(kn); 1264 } 1265 1266 /* 1267 * Calls filterops f_event function, acquiring mplock if filter is not 1268 * marked as FILTEROP_MPSAFE. 1269 * 1270 * If the knote is in the middle of being created or deleted we cannot 1271 * safely call the filter op. 1272 */ 1273 static int 1274 filter_event(struct knote *kn, long hint) 1275 { 1276 int ret; 1277 1278 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) { 1279 ret = kn->kn_fop->f_event(kn, hint); 1280 } else { 1281 get_mplock(); 1282 ret = kn->kn_fop->f_event(kn, hint); 1283 rel_mplock(); 1284 } 1285 return (ret); 1286 } 1287 1288 /* 1289 * Walk down a list of knotes, activating them if their event has triggered. 1290 * 1291 * If we encounter any knotes which are undergoing processing we just mark 1292 * them for reprocessing and do not try to [re]activate the knote. However, 1293 * if a hint is being passed we have to wait and that makes things a bit 1294 * sticky. 1295 */ 1296 void 1297 knote(struct klist *list, long hint) 1298 { 1299 struct knote *kn; 1300 1301 lwkt_gettoken(&kq_token); 1302 restart: 1303 SLIST_FOREACH(kn, list, kn_next) { 1304 if (kn->kn_status & KN_PROCESSING) { 1305 /* 1306 * Someone else is processing the knote, ask the 1307 * other thread to reprocess it and don't mess 1308 * with it otherwise. 1309 */ 1310 if (hint == 0) { 1311 kn->kn_status |= KN_REPROCESS; 1312 continue; 1313 } 1314 1315 /* 1316 * If the hint is non-zero we have to wait or risk 1317 * losing the state the caller is trying to update. 1318 * 1319 * XXX This is a real problem, certain process 1320 * and signal filters will bump kn_data for 1321 * already-processed notes more than once if 1322 * we restart the list scan. FIXME. 1323 */ 1324 kn->kn_status |= KN_WAITING | KN_REPROCESS; 1325 tsleep(kn, 0, "knotec", hz); 1326 goto restart; 1327 } 1328 1329 /* 1330 * Become the reprocessing master ourselves. 1331 * 1332 * If hint is non-zer running the event is mandatory 1333 * when not deleting so do it whether reprocessing is 1334 * set or not. 1335 */ 1336 kn->kn_status |= KN_PROCESSING; 1337 if ((kn->kn_status & KN_DELETING) == 0) { 1338 if (filter_event(kn, hint)) 1339 KNOTE_ACTIVATE(kn); 1340 } 1341 if (knote_release(kn)) 1342 goto restart; 1343 } 1344 lwkt_reltoken(&kq_token); 1345 } 1346 1347 /* 1348 * Insert knote at head of klist. 1349 * 1350 * This function may only be called via a filter function and thus 1351 * kq_token should already be held and marked for processing. 1352 */ 1353 void 1354 knote_insert(struct klist *klist, struct knote *kn) 1355 { 1356 KKASSERT(kn->kn_status & KN_PROCESSING); 1357 ASSERT_LWKT_TOKEN_HELD(&kq_token); 1358 SLIST_INSERT_HEAD(klist, kn, kn_next); 1359 } 1360 1361 /* 1362 * Remove knote from a klist 1363 * 1364 * This function may only be called via a filter function and thus 1365 * kq_token should already be held and marked for processing. 1366 */ 1367 void 1368 knote_remove(struct klist *klist, struct knote *kn) 1369 { 1370 KKASSERT(kn->kn_status & KN_PROCESSING); 1371 ASSERT_LWKT_TOKEN_HELD(&kq_token); 1372 SLIST_REMOVE(klist, kn, knote, kn_next); 1373 } 1374 1375 /* 1376 * Remove all knotes from a specified klist 1377 * 1378 * Only called from aio. 1379 */ 1380 void 1381 knote_empty(struct klist *list) 1382 { 1383 struct knote *kn; 1384 1385 lwkt_gettoken(&kq_token); 1386 while ((kn = SLIST_FIRST(list)) != NULL) { 1387 if (knote_acquire(kn)) 1388 knote_detach_and_drop(kn); 1389 } 1390 lwkt_reltoken(&kq_token); 1391 } 1392 1393 void 1394 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst, 1395 struct filterops *ops, void *hook) 1396 { 1397 struct knote *kn; 1398 1399 lwkt_gettoken(&kq_token); 1400 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) { 1401 if (knote_acquire(kn)) { 1402 knote_remove(&src->ki_note, kn); 1403 kn->kn_fop = ops; 1404 kn->kn_hook = hook; 1405 knote_insert(&dst->ki_note, kn); 1406 knote_release(kn); 1407 /* kn may be invalid now */ 1408 } 1409 } 1410 lwkt_reltoken(&kq_token); 1411 } 1412 1413 /* 1414 * Remove all knotes referencing a specified fd 1415 */ 1416 void 1417 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd) 1418 { 1419 struct knote *kn; 1420 1421 lwkt_gettoken(&kq_token); 1422 restart: 1423 SLIST_FOREACH(kn, &fp->f_klist, kn_link) { 1424 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) { 1425 if (knote_acquire(kn)) 1426 knote_detach_and_drop(kn); 1427 goto restart; 1428 } 1429 } 1430 lwkt_reltoken(&kq_token); 1431 } 1432 1433 /* 1434 * Low level attach function. 1435 * 1436 * The knote should already be marked for processing. 1437 */ 1438 static void 1439 knote_attach(struct knote *kn) 1440 { 1441 struct klist *list; 1442 struct kqueue *kq = kn->kn_kq; 1443 1444 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1445 KKASSERT(kn->kn_fp); 1446 list = &kn->kn_fp->f_klist; 1447 } else { 1448 if (kq->kq_knhashmask == 0) 1449 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1450 &kq->kq_knhashmask); 1451 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1452 } 1453 SLIST_INSERT_HEAD(list, kn, kn_link); 1454 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink); 1455 } 1456 1457 /* 1458 * Low level drop function. 1459 * 1460 * The knote should already be marked for processing. 1461 */ 1462 static void 1463 knote_drop(struct knote *kn) 1464 { 1465 struct kqueue *kq; 1466 struct klist *list; 1467 1468 kq = kn->kn_kq; 1469 1470 if (kn->kn_fop->f_flags & FILTEROP_ISFD) 1471 list = &kn->kn_fp->f_klist; 1472 else 1473 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 1474 1475 SLIST_REMOVE(list, kn, knote, kn_link); 1476 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink); 1477 if (kn->kn_status & KN_QUEUED) 1478 knote_dequeue(kn); 1479 if (kn->kn_fop->f_flags & FILTEROP_ISFD) { 1480 fdrop(kn->kn_fp); 1481 kn->kn_fp = NULL; 1482 } 1483 knote_free(kn); 1484 } 1485 1486 /* 1487 * Low level enqueue function. 1488 * 1489 * The knote should already be marked for processing. 1490 */ 1491 static void 1492 knote_enqueue(struct knote *kn) 1493 { 1494 struct kqueue *kq = kn->kn_kq; 1495 1496 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 1497 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe); 1498 kn->kn_status |= KN_QUEUED; 1499 ++kq->kq_count; 1500 1501 /* 1502 * Send SIGIO on request (typically set up as a mailbox signal) 1503 */ 1504 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1) 1505 pgsigio(kq->kq_sigio, SIGIO, 0); 1506 1507 kqueue_wakeup(kq); 1508 } 1509 1510 /* 1511 * Low level dequeue function. 1512 * 1513 * The knote should already be marked for processing. 1514 */ 1515 static void 1516 knote_dequeue(struct knote *kn) 1517 { 1518 struct kqueue *kq = kn->kn_kq; 1519 1520 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 1521 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe); 1522 kn->kn_status &= ~KN_QUEUED; 1523 kq->kq_count--; 1524 } 1525 1526 static void 1527 knote_init(void) 1528 { 1529 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1); 1530 } 1531 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 1532 1533 static struct knote * 1534 knote_alloc(void) 1535 { 1536 return ((struct knote *)zalloc(knote_zone)); 1537 } 1538 1539 static void 1540 knote_free(struct knote *kn) 1541 { 1542 zfree(knote_zone, kn); 1543 } 1544