1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94 39 * $FreeBSD: src/sys/kern/sys_generic.c,v 1.55.2.10 2001/03/17 10:39:32 peter Exp $ 40 * $DragonFly: src/sys/kern/sys_generic.c,v 1.49 2008/05/05 22:09:44 dillon Exp $ 41 */ 42 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/event.h> 49 #include <sys/filedesc.h> 50 #include <sys/filio.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/proc.h> 54 #include <sys/signalvar.h> 55 #include <sys/socketvar.h> 56 #include <sys/uio.h> 57 #include <sys/kernel.h> 58 #include <sys/kern_syscall.h> 59 #include <sys/malloc.h> 60 #include <sys/mapped_ioctl.h> 61 #include <sys/poll.h> 62 #include <sys/queue.h> 63 #include <sys/resourcevar.h> 64 #include <sys/socketops.h> 65 #include <sys/sysctl.h> 66 #include <sys/sysent.h> 67 #include <sys/buf.h> 68 #ifdef KTRACE 69 #include <sys/ktrace.h> 70 #endif 71 #include <vm/vm.h> 72 #include <vm/vm_page.h> 73 74 #include <sys/file2.h> 75 #include <sys/mplock2.h> 76 #include <sys/spinlock2.h> 77 78 #include <machine/limits.h> 79 80 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 81 static MALLOC_DEFINE(M_IOCTLMAP, "ioctlmap", "mapped ioctl handler buffer"); 82 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 83 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 84 85 typedef struct kfd_set { 86 fd_mask fds_bits[2]; 87 } kfd_set; 88 89 enum select_copyin_states { 90 COPYIN_READ, COPYIN_WRITE, COPYIN_EXCEPT, COPYIN_DONE }; 91 92 struct select_kevent_copyin_args { 93 kfd_set *read_set; 94 kfd_set *write_set; 95 kfd_set *except_set; 96 int active_set; /* One of select_copyin_states */ 97 struct lwp *lwp; /* Pointer to our lwp */ 98 int num_fds; /* Number of file descriptors (syscall arg) */ 99 int proc_fds; /* Processed fd's (wraps) */ 100 int error; /* Returned to userland */ 101 }; 102 103 struct poll_kevent_copyin_args { 104 struct lwp *lwp; 105 struct pollfd *fds; 106 int nfds; 107 int pfds; 108 int error; 109 }; 110 111 static struct lwkt_token mioctl_token = LWKT_TOKEN_INITIALIZER(mioctl_token); 112 113 static int doselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, 114 struct timespec *ts, int *res); 115 static int dopoll(int nfds, struct pollfd *fds, struct timespec *ts, 116 int *res); 117 static int dofileread(int, struct file *, struct uio *, int, size_t *); 118 static int dofilewrite(int, struct file *, struct uio *, int, size_t *); 119 120 /* 121 * Read system call. 122 * 123 * MPSAFE 124 */ 125 int 126 sys_read(struct read_args *uap) 127 { 128 struct thread *td = curthread; 129 struct uio auio; 130 struct iovec aiov; 131 int error; 132 133 if ((ssize_t)uap->nbyte < 0) 134 error = EINVAL; 135 136 aiov.iov_base = uap->buf; 137 aiov.iov_len = uap->nbyte; 138 auio.uio_iov = &aiov; 139 auio.uio_iovcnt = 1; 140 auio.uio_offset = -1; 141 auio.uio_resid = uap->nbyte; 142 auio.uio_rw = UIO_READ; 143 auio.uio_segflg = UIO_USERSPACE; 144 auio.uio_td = td; 145 146 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult); 147 return(error); 148 } 149 150 /* 151 * Positioned (Pread) read system call 152 * 153 * MPSAFE 154 */ 155 int 156 sys_extpread(struct extpread_args *uap) 157 { 158 struct thread *td = curthread; 159 struct uio auio; 160 struct iovec aiov; 161 int error; 162 int flags; 163 164 if ((ssize_t)uap->nbyte < 0) 165 return(EINVAL); 166 167 aiov.iov_base = uap->buf; 168 aiov.iov_len = uap->nbyte; 169 auio.uio_iov = &aiov; 170 auio.uio_iovcnt = 1; 171 auio.uio_offset = uap->offset; 172 auio.uio_resid = uap->nbyte; 173 auio.uio_rw = UIO_READ; 174 auio.uio_segflg = UIO_USERSPACE; 175 auio.uio_td = td; 176 177 flags = uap->flags & O_FMASK; 178 if (uap->offset != (off_t)-1) 179 flags |= O_FOFFSET; 180 181 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult); 182 return(error); 183 } 184 185 /* 186 * Scatter read system call. 187 * 188 * MPSAFE 189 */ 190 int 191 sys_readv(struct readv_args *uap) 192 { 193 struct thread *td = curthread; 194 struct uio auio; 195 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 196 int error; 197 198 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 199 &auio.uio_resid); 200 if (error) 201 return (error); 202 auio.uio_iov = iov; 203 auio.uio_iovcnt = uap->iovcnt; 204 auio.uio_offset = -1; 205 auio.uio_rw = UIO_READ; 206 auio.uio_segflg = UIO_USERSPACE; 207 auio.uio_td = td; 208 209 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult); 210 211 iovec_free(&iov, aiov); 212 return (error); 213 } 214 215 216 /* 217 * Scatter positioned read system call. 218 * 219 * MPSAFE 220 */ 221 int 222 sys_extpreadv(struct extpreadv_args *uap) 223 { 224 struct thread *td = curthread; 225 struct uio auio; 226 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 227 int error; 228 int flags; 229 230 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 231 &auio.uio_resid); 232 if (error) 233 return (error); 234 auio.uio_iov = iov; 235 auio.uio_iovcnt = uap->iovcnt; 236 auio.uio_offset = uap->offset; 237 auio.uio_rw = UIO_READ; 238 auio.uio_segflg = UIO_USERSPACE; 239 auio.uio_td = td; 240 241 flags = uap->flags & O_FMASK; 242 if (uap->offset != (off_t)-1) 243 flags |= O_FOFFSET; 244 245 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult); 246 247 iovec_free(&iov, aiov); 248 return(error); 249 } 250 251 /* 252 * MPSAFE 253 */ 254 int 255 kern_preadv(int fd, struct uio *auio, int flags, size_t *res) 256 { 257 struct thread *td = curthread; 258 struct proc *p = td->td_proc; 259 struct file *fp; 260 int error; 261 262 KKASSERT(p); 263 264 fp = holdfp(p->p_fd, fd, FREAD); 265 if (fp == NULL) 266 return (EBADF); 267 if (flags & O_FOFFSET && fp->f_type != DTYPE_VNODE) { 268 error = ESPIPE; 269 } else { 270 error = dofileread(fd, fp, auio, flags, res); 271 } 272 fdrop(fp); 273 return(error); 274 } 275 276 /* 277 * Common code for readv and preadv that reads data in 278 * from a file using the passed in uio, offset, and flags. 279 * 280 * MPALMOSTSAFE - ktrace needs help 281 */ 282 static int 283 dofileread(int fd, struct file *fp, struct uio *auio, int flags, size_t *res) 284 { 285 int error; 286 size_t len; 287 #ifdef KTRACE 288 struct thread *td = curthread; 289 struct iovec *ktriov = NULL; 290 struct uio ktruio; 291 #endif 292 293 #ifdef KTRACE 294 /* 295 * if tracing, save a copy of iovec 296 */ 297 if (KTRPOINT(td, KTR_GENIO)) { 298 int iovlen = auio->uio_iovcnt * sizeof(struct iovec); 299 300 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); 301 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 302 ktruio = *auio; 303 } 304 #endif 305 len = auio->uio_resid; 306 error = fo_read(fp, auio, fp->f_cred, flags); 307 if (error) { 308 if (auio->uio_resid != len && (error == ERESTART || 309 error == EINTR || error == EWOULDBLOCK)) 310 error = 0; 311 } 312 #ifdef KTRACE 313 if (ktriov != NULL) { 314 if (error == 0) { 315 ktruio.uio_iov = ktriov; 316 ktruio.uio_resid = len - auio->uio_resid; 317 get_mplock(); 318 ktrgenio(td->td_lwp, fd, UIO_READ, &ktruio, error); 319 rel_mplock(); 320 } 321 FREE(ktriov, M_TEMP); 322 } 323 #endif 324 if (error == 0) 325 *res = len - auio->uio_resid; 326 327 return(error); 328 } 329 330 /* 331 * Write system call 332 * 333 * MPSAFE 334 */ 335 int 336 sys_write(struct write_args *uap) 337 { 338 struct thread *td = curthread; 339 struct uio auio; 340 struct iovec aiov; 341 int error; 342 343 if ((ssize_t)uap->nbyte < 0) 344 error = EINVAL; 345 346 aiov.iov_base = (void *)(uintptr_t)uap->buf; 347 aiov.iov_len = uap->nbyte; 348 auio.uio_iov = &aiov; 349 auio.uio_iovcnt = 1; 350 auio.uio_offset = -1; 351 auio.uio_resid = uap->nbyte; 352 auio.uio_rw = UIO_WRITE; 353 auio.uio_segflg = UIO_USERSPACE; 354 auio.uio_td = td; 355 356 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult); 357 358 return(error); 359 } 360 361 /* 362 * Pwrite system call 363 * 364 * MPSAFE 365 */ 366 int 367 sys_extpwrite(struct extpwrite_args *uap) 368 { 369 struct thread *td = curthread; 370 struct uio auio; 371 struct iovec aiov; 372 int error; 373 int flags; 374 375 if ((ssize_t)uap->nbyte < 0) 376 error = EINVAL; 377 378 aiov.iov_base = (void *)(uintptr_t)uap->buf; 379 aiov.iov_len = uap->nbyte; 380 auio.uio_iov = &aiov; 381 auio.uio_iovcnt = 1; 382 auio.uio_offset = uap->offset; 383 auio.uio_resid = uap->nbyte; 384 auio.uio_rw = UIO_WRITE; 385 auio.uio_segflg = UIO_USERSPACE; 386 auio.uio_td = td; 387 388 flags = uap->flags & O_FMASK; 389 if (uap->offset != (off_t)-1) 390 flags |= O_FOFFSET; 391 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult); 392 return(error); 393 } 394 395 /* 396 * MPSAFE 397 */ 398 int 399 sys_writev(struct writev_args *uap) 400 { 401 struct thread *td = curthread; 402 struct uio auio; 403 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 404 int error; 405 406 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 407 &auio.uio_resid); 408 if (error) 409 return (error); 410 auio.uio_iov = iov; 411 auio.uio_iovcnt = uap->iovcnt; 412 auio.uio_offset = -1; 413 auio.uio_rw = UIO_WRITE; 414 auio.uio_segflg = UIO_USERSPACE; 415 auio.uio_td = td; 416 417 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult); 418 419 iovec_free(&iov, aiov); 420 return (error); 421 } 422 423 424 /* 425 * Gather positioned write system call 426 * 427 * MPSAFE 428 */ 429 int 430 sys_extpwritev(struct extpwritev_args *uap) 431 { 432 struct thread *td = curthread; 433 struct uio auio; 434 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 435 int error; 436 int flags; 437 438 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 439 &auio.uio_resid); 440 if (error) 441 return (error); 442 auio.uio_iov = iov; 443 auio.uio_iovcnt = uap->iovcnt; 444 auio.uio_offset = uap->offset; 445 auio.uio_rw = UIO_WRITE; 446 auio.uio_segflg = UIO_USERSPACE; 447 auio.uio_td = td; 448 449 flags = uap->flags & O_FMASK; 450 if (uap->offset != (off_t)-1) 451 flags |= O_FOFFSET; 452 453 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult); 454 455 iovec_free(&iov, aiov); 456 return(error); 457 } 458 459 /* 460 * MPSAFE 461 */ 462 int 463 kern_pwritev(int fd, struct uio *auio, int flags, size_t *res) 464 { 465 struct thread *td = curthread; 466 struct proc *p = td->td_proc; 467 struct file *fp; 468 int error; 469 470 KKASSERT(p); 471 472 fp = holdfp(p->p_fd, fd, FWRITE); 473 if (fp == NULL) 474 return (EBADF); 475 else if ((flags & O_FOFFSET) && fp->f_type != DTYPE_VNODE) { 476 error = ESPIPE; 477 } else { 478 error = dofilewrite(fd, fp, auio, flags, res); 479 } 480 481 fdrop(fp); 482 return (error); 483 } 484 485 /* 486 * Common code for writev and pwritev that writes data to 487 * a file using the passed in uio, offset, and flags. 488 * 489 * MPALMOSTSAFE - ktrace needs help 490 */ 491 static int 492 dofilewrite(int fd, struct file *fp, struct uio *auio, int flags, size_t *res) 493 { 494 struct thread *td = curthread; 495 struct lwp *lp = td->td_lwp; 496 int error; 497 size_t len; 498 #ifdef KTRACE 499 struct iovec *ktriov = NULL; 500 struct uio ktruio; 501 #endif 502 503 #ifdef KTRACE 504 /* 505 * if tracing, save a copy of iovec and uio 506 */ 507 if (KTRPOINT(td, KTR_GENIO)) { 508 int iovlen = auio->uio_iovcnt * sizeof(struct iovec); 509 510 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); 511 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 512 ktruio = *auio; 513 } 514 #endif 515 len = auio->uio_resid; 516 error = fo_write(fp, auio, fp->f_cred, flags); 517 if (error) { 518 if (auio->uio_resid != len && (error == ERESTART || 519 error == EINTR || error == EWOULDBLOCK)) 520 error = 0; 521 /* Socket layer is responsible for issuing SIGPIPE. */ 522 if (error == EPIPE) 523 lwpsignal(lp->lwp_proc, lp, SIGPIPE); 524 } 525 #ifdef KTRACE 526 if (ktriov != NULL) { 527 if (error == 0) { 528 ktruio.uio_iov = ktriov; 529 ktruio.uio_resid = len - auio->uio_resid; 530 get_mplock(); 531 ktrgenio(lp, fd, UIO_WRITE, &ktruio, error); 532 rel_mplock(); 533 } 534 FREE(ktriov, M_TEMP); 535 } 536 #endif 537 if (error == 0) 538 *res = len - auio->uio_resid; 539 540 return(error); 541 } 542 543 /* 544 * Ioctl system call 545 * 546 * MPSAFE 547 */ 548 int 549 sys_ioctl(struct ioctl_args *uap) 550 { 551 int error; 552 553 error = mapped_ioctl(uap->fd, uap->com, uap->data, NULL, &uap->sysmsg); 554 return (error); 555 } 556 557 struct ioctl_map_entry { 558 const char *subsys; 559 struct ioctl_map_range *cmd_ranges; 560 LIST_ENTRY(ioctl_map_entry) entries; 561 }; 562 563 /* 564 * The true heart of all ioctl syscall handlers (native, emulation). 565 * If map != NULL, it will be searched for a matching entry for com, 566 * and appropriate conversions/conversion functions will be utilized. 567 * 568 * MPSAFE 569 */ 570 int 571 mapped_ioctl(int fd, u_long com, caddr_t uspc_data, struct ioctl_map *map, 572 struct sysmsg *msg) 573 { 574 struct thread *td = curthread; 575 struct proc *p = td->td_proc; 576 struct ucred *cred; 577 struct file *fp; 578 struct ioctl_map_range *iomc = NULL; 579 int error; 580 u_int size; 581 u_long ocom = com; 582 caddr_t data, memp; 583 int tmp; 584 #define STK_PARAMS 128 585 union { 586 char stkbuf[STK_PARAMS]; 587 long align; 588 } ubuf; 589 590 KKASSERT(p); 591 cred = td->td_ucred; 592 593 fp = holdfp(p->p_fd, fd, FREAD|FWRITE); 594 if (fp == NULL) 595 return(EBADF); 596 597 if (map != NULL) { /* obey translation map */ 598 u_long maskcmd; 599 struct ioctl_map_entry *e; 600 601 maskcmd = com & map->mask; 602 603 lwkt_gettoken(&mioctl_token); 604 LIST_FOREACH(e, &map->mapping, entries) { 605 for (iomc = e->cmd_ranges; iomc->start != 0 || 606 iomc->maptocmd != 0 || iomc->wrapfunc != NULL || 607 iomc->mapfunc != NULL; 608 iomc++) { 609 if (maskcmd >= iomc->start && 610 maskcmd <= iomc->end) 611 break; 612 } 613 614 /* Did we find a match? */ 615 if (iomc->start != 0 || iomc->maptocmd != 0 || 616 iomc->wrapfunc != NULL || iomc->mapfunc != NULL) 617 break; 618 } 619 lwkt_reltoken(&mioctl_token); 620 621 if (iomc == NULL || 622 (iomc->start == 0 && iomc->maptocmd == 0 623 && iomc->wrapfunc == NULL && iomc->mapfunc == NULL)) { 624 kprintf("%s: 'ioctl' fd=%d, cmd=0x%lx ('%c',%d) not implemented\n", 625 map->sys, fd, maskcmd, 626 (int)((maskcmd >> 8) & 0xff), 627 (int)(maskcmd & 0xff)); 628 error = EINVAL; 629 goto done; 630 } 631 632 /* 633 * If it's a non-range one to one mapping, maptocmd should be 634 * correct. If it's a ranged one to one mapping, we pass the 635 * original value of com, and for a range mapped to a different 636 * range, we always need a mapping function to translate the 637 * ioctl to our native ioctl. Ex. 6500-65ff <-> 9500-95ff 638 */ 639 if (iomc->start == iomc->end && iomc->maptocmd == iomc->maptoend) { 640 com = iomc->maptocmd; 641 } else if (iomc->start == iomc->maptocmd && iomc->end == iomc->maptoend) { 642 if (iomc->mapfunc != NULL) 643 com = iomc->mapfunc(iomc->start, iomc->end, 644 iomc->start, iomc->end, 645 com, com); 646 } else { 647 if (iomc->mapfunc != NULL) { 648 com = iomc->mapfunc(iomc->start, iomc->end, 649 iomc->maptocmd, iomc->maptoend, 650 com, ocom); 651 } else { 652 kprintf("%s: Invalid mapping for fd=%d, cmd=%#lx ('%c',%d)\n", 653 map->sys, fd, maskcmd, 654 (int)((maskcmd >> 8) & 0xff), 655 (int)(maskcmd & 0xff)); 656 error = EINVAL; 657 goto done; 658 } 659 } 660 } 661 662 switch (com) { 663 case FIONCLEX: 664 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 665 goto done; 666 case FIOCLEX: 667 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 668 goto done; 669 } 670 671 /* 672 * Interpret high order word to find amount of data to be 673 * copied to/from the user's address space. 674 */ 675 size = IOCPARM_LEN(com); 676 if (size > IOCPARM_MAX) { 677 error = ENOTTY; 678 goto done; 679 } 680 681 if (size > sizeof (ubuf.stkbuf)) { 682 memp = kmalloc(size, M_IOCTLOPS, M_WAITOK); 683 data = memp; 684 } else { 685 memp = NULL; 686 data = ubuf.stkbuf; 687 } 688 if ((com & IOC_IN) != 0) { 689 if (size != 0) { 690 error = copyin(uspc_data, data, (size_t)size); 691 if (error) { 692 if (memp != NULL) 693 kfree(memp, M_IOCTLOPS); 694 goto done; 695 } 696 } else { 697 *(caddr_t *)data = uspc_data; 698 } 699 } else if ((com & IOC_OUT) != 0 && size) { 700 /* 701 * Zero the buffer so the user always 702 * gets back something deterministic. 703 */ 704 bzero(data, (size_t)size); 705 } else if ((com & IOC_VOID) != 0) { 706 *(caddr_t *)data = uspc_data; 707 } 708 709 switch (com) { 710 case FIONBIO: 711 if ((tmp = *(int *)data)) 712 atomic_set_int(&fp->f_flag, FNONBLOCK); 713 else 714 atomic_clear_int(&fp->f_flag, FNONBLOCK); 715 error = 0; 716 break; 717 718 case FIOASYNC: 719 if ((tmp = *(int *)data)) 720 atomic_set_int(&fp->f_flag, FASYNC); 721 else 722 atomic_clear_int(&fp->f_flag, FASYNC); 723 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, cred, msg); 724 break; 725 726 default: 727 /* 728 * If there is a override function, 729 * call it instead of directly routing the call 730 */ 731 if (map != NULL && iomc->wrapfunc != NULL) 732 error = iomc->wrapfunc(fp, com, ocom, data, cred); 733 else 734 error = fo_ioctl(fp, com, data, cred, msg); 735 /* 736 * Copy any data to user, size was 737 * already set and checked above. 738 */ 739 if (error == 0 && (com & IOC_OUT) != 0 && size != 0) 740 error = copyout(data, uspc_data, (size_t)size); 741 break; 742 } 743 if (memp != NULL) 744 kfree(memp, M_IOCTLOPS); 745 done: 746 fdrop(fp); 747 return(error); 748 } 749 750 /* 751 * MPSAFE 752 */ 753 int 754 mapped_ioctl_register_handler(struct ioctl_map_handler *he) 755 { 756 struct ioctl_map_entry *ne; 757 758 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL && 759 he->subsys != NULL && *he->subsys != '\0'); 760 761 ne = kmalloc(sizeof(struct ioctl_map_entry), M_IOCTLMAP, 762 M_WAITOK | M_ZERO); 763 764 ne->subsys = he->subsys; 765 ne->cmd_ranges = he->cmd_ranges; 766 767 lwkt_gettoken(&mioctl_token); 768 LIST_INSERT_HEAD(&he->map->mapping, ne, entries); 769 lwkt_reltoken(&mioctl_token); 770 771 return(0); 772 } 773 774 /* 775 * MPSAFE 776 */ 777 int 778 mapped_ioctl_unregister_handler(struct ioctl_map_handler *he) 779 { 780 struct ioctl_map_entry *ne; 781 int error = EINVAL; 782 783 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL); 784 785 lwkt_gettoken(&mioctl_token); 786 LIST_FOREACH(ne, &he->map->mapping, entries) { 787 if (ne->cmd_ranges == he->cmd_ranges) { 788 LIST_REMOVE(ne, entries); 789 kfree(ne, M_IOCTLMAP); 790 error = 0; 791 break; 792 } 793 } 794 lwkt_reltoken(&mioctl_token); 795 return(error); 796 } 797 798 static int nselcoll; /* Select collisions since boot */ 799 int selwait; 800 SYSCTL_INT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, ""); 801 static int nseldebug; 802 SYSCTL_INT(_kern, OID_AUTO, nseldebug, CTLFLAG_RW, &nseldebug, 0, ""); 803 804 /* 805 * Select system call. 806 * 807 * MPSAFE 808 */ 809 int 810 sys_select(struct select_args *uap) 811 { 812 struct timeval ktv; 813 struct timespec *ktsp, kts; 814 int error; 815 816 /* 817 * Get timeout if any. 818 */ 819 if (uap->tv != NULL) { 820 error = copyin(uap->tv, &ktv, sizeof (ktv)); 821 if (error) 822 return (error); 823 TIMEVAL_TO_TIMESPEC(&ktv, &kts); 824 ktsp = &kts; 825 } else { 826 ktsp = NULL; 827 } 828 829 /* 830 * Do real work. 831 */ 832 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp, 833 &uap->sysmsg_result); 834 835 return (error); 836 } 837 838 839 /* 840 * Pselect system call. 841 */ 842 int 843 sys_pselect(struct pselect_args *uap) 844 { 845 struct thread *td = curthread; 846 struct lwp *lp = td->td_lwp; 847 struct timespec *ktsp, kts; 848 sigset_t sigmask; 849 int error; 850 851 /* 852 * Get timeout if any. 853 */ 854 if (uap->ts != NULL) { 855 error = copyin(uap->ts, &kts, sizeof (kts)); 856 if (error) 857 return (error); 858 ktsp = &kts; 859 } else { 860 ktsp = NULL; 861 } 862 863 /* 864 * Install temporary signal mask if any provided. 865 */ 866 if (uap->sigmask != NULL) { 867 error = copyin(uap->sigmask, &sigmask, sizeof(sigmask)); 868 if (error) 869 return (error); 870 lwkt_gettoken(&lp->lwp_proc->p_token); 871 lp->lwp_oldsigmask = lp->lwp_sigmask; 872 SIG_CANTMASK(sigmask); 873 lp->lwp_sigmask = sigmask; 874 lwkt_reltoken(&lp->lwp_proc->p_token); 875 } 876 877 /* 878 * Do real job. 879 */ 880 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp, 881 &uap->sysmsg_result); 882 883 if (uap->sigmask != NULL) { 884 lwkt_gettoken(&lp->lwp_proc->p_token); 885 /* doselect() responsible for turning ERESTART into EINTR */ 886 KKASSERT(error != ERESTART); 887 if (error == EINTR) { 888 /* 889 * We can't restore the previous signal mask now 890 * because it could block the signal that interrupted 891 * us. So make a note to restore it after executing 892 * the handler. 893 */ 894 lp->lwp_flag |= LWP_OLDMASK; 895 } else { 896 /* 897 * No handler to run. Restore previous mask immediately. 898 */ 899 lp->lwp_sigmask = lp->lwp_oldsigmask; 900 } 901 lwkt_reltoken(&lp->lwp_proc->p_token); 902 } 903 904 return (error); 905 } 906 907 static int 908 select_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 909 { 910 struct select_kevent_copyin_args *skap = NULL; 911 struct kevent *kev; 912 int fd; 913 kfd_set *fdp = NULL; 914 short filter = 0; 915 u_int fflags = 0; 916 917 skap = (struct select_kevent_copyin_args *)arg; 918 919 if (*events == maxevents) 920 return (0); 921 922 while (skap->active_set < COPYIN_DONE) { 923 switch (skap->active_set) { 924 case COPYIN_READ: 925 /* 926 * Register descriptors for the read filter 927 */ 928 fdp = skap->read_set; 929 filter = EVFILT_READ; 930 fflags = NOTE_OLDAPI; 931 if (fdp) 932 break; 933 ++skap->active_set; 934 skap->proc_fds = 0; 935 /* fall through */ 936 case COPYIN_WRITE: 937 /* 938 * Register descriptors for the write filter 939 */ 940 fdp = skap->write_set; 941 filter = EVFILT_WRITE; 942 fflags = NOTE_OLDAPI; 943 if (fdp) 944 break; 945 ++skap->active_set; 946 skap->proc_fds = 0; 947 /* fall through */ 948 case COPYIN_EXCEPT: 949 /* 950 * Register descriptors for the exception filter 951 */ 952 fdp = skap->except_set; 953 filter = EVFILT_EXCEPT; 954 fflags = NOTE_OLDAPI | NOTE_OOB; 955 if (fdp) 956 break; 957 ++skap->active_set; 958 skap->proc_fds = 0; 959 /* fall through */ 960 case COPYIN_DONE: 961 /* 962 * Nothing left to register 963 */ 964 return(0); 965 /* NOT REACHED */ 966 } 967 968 while (skap->proc_fds < skap->num_fds) { 969 fd = skap->proc_fds; 970 if (FD_ISSET(fd, fdp)) { 971 kev = &kevp[*events]; 972 EV_SET(kev, fd, filter, 973 EV_ADD|EV_ENABLE, 974 fflags, 0, 975 (void *)(uintptr_t) 976 skap->lwp->lwp_kqueue_serial); 977 FD_CLR(fd, fdp); 978 ++*events; 979 980 if (nseldebug) 981 kprintf("select fd %d filter %d serial %d\n", 982 fd, filter, skap->lwp->lwp_kqueue_serial); 983 } 984 ++skap->proc_fds; 985 if (*events == maxevents) 986 return (0); 987 } 988 skap->active_set++; 989 skap->proc_fds = 0; 990 } 991 992 return (0); 993 } 994 995 static int 996 select_copyout(void *arg, struct kevent *kevp, int count, int *res) 997 { 998 struct select_kevent_copyin_args *skap; 999 struct kevent kev; 1000 int i = 0; 1001 1002 skap = (struct select_kevent_copyin_args *)arg; 1003 1004 for (i = 0; i < count; ++i) { 1005 /* 1006 * Filter out and delete spurious events 1007 */ 1008 if ((u_int)(uintptr_t)kevp[i].udata != 1009 skap->lwp->lwp_kqueue_serial) { 1010 kev = kevp[i]; 1011 kev.flags = EV_DISABLE|EV_DELETE; 1012 kqueue_register(&skap->lwp->lwp_kqueue, &kev); 1013 if (nseldebug) 1014 kprintf("select fd %ju mismatched serial %d\n", 1015 (uintmax_t)kevp[i].ident, 1016 skap->lwp->lwp_kqueue_serial); 1017 continue; 1018 } 1019 1020 /* 1021 * Handle errors 1022 */ 1023 if (kevp[i].flags & EV_ERROR) { 1024 switch(kevp[i].data) { 1025 case EBADF: 1026 /* 1027 * A bad file descriptor is considered a 1028 * fatal error for select, bail out. 1029 */ 1030 skap->error = EBADF; 1031 *res = 0; 1032 return (1); 1033 break; 1034 default: 1035 /* 1036 * Select silently swallows any unknown errors 1037 * for descriptors in the read or write sets. 1038 * 1039 * ALWAYS filter out EOPNOTSUPP errors from 1040 * filters (at least until all filters support 1041 * EVFILT_EXCEPT) 1042 */ 1043 if (kevp[i].filter != EVFILT_READ && 1044 kevp[i].filter != EVFILT_WRITE && 1045 kevp[i].data != EOPNOTSUPP) { 1046 skap->error = kevp[i].data; 1047 *res = 0; 1048 return (1); 1049 } 1050 break; 1051 } 1052 if (nseldebug) 1053 kprintf("select fd %ju filter %d error %jd\n", 1054 (uintmax_t)kevp[i].ident, 1055 kevp[i].filter, 1056 (intmax_t)kevp[i].data); 1057 continue; 1058 } 1059 1060 switch (kevp[i].filter) { 1061 case EVFILT_READ: 1062 FD_SET(kevp[i].ident, skap->read_set); 1063 break; 1064 case EVFILT_WRITE: 1065 FD_SET(kevp[i].ident, skap->write_set); 1066 break; 1067 case EVFILT_EXCEPT: 1068 FD_SET(kevp[i].ident, skap->except_set); 1069 break; 1070 } 1071 1072 ++*res; 1073 } 1074 1075 return (0); 1076 } 1077 1078 /* 1079 * Copy select bits in from userland. Allocate kernel memory if the 1080 * set is large. 1081 */ 1082 static int 1083 getbits(int bytes, fd_set *in_set, kfd_set **out_set, kfd_set *tmp_set) 1084 { 1085 int error; 1086 1087 if (in_set) { 1088 if (bytes < sizeof(*tmp_set)) 1089 *out_set = tmp_set; 1090 else 1091 *out_set = kmalloc(bytes, M_SELECT, M_WAITOK); 1092 error = copyin(in_set, *out_set, bytes); 1093 } else { 1094 *out_set = NULL; 1095 error = 0; 1096 } 1097 return (error); 1098 } 1099 1100 /* 1101 * Copy returned select bits back out to userland. 1102 */ 1103 static int 1104 putbits(int bytes, kfd_set *in_set, fd_set *out_set) 1105 { 1106 int error; 1107 1108 if (in_set) { 1109 error = copyout(in_set, out_set, bytes); 1110 } else { 1111 error = 0; 1112 } 1113 return (error); 1114 } 1115 1116 /* 1117 * Common code for sys_select() and sys_pselect(). 1118 * 1119 * in, out and ex are userland pointers. ts must point to validated 1120 * kernel-side timeout value or NULL for infinite timeout. res must 1121 * point to syscall return value. 1122 */ 1123 static int 1124 doselect(int nd, fd_set *read, fd_set *write, fd_set *except, 1125 struct timespec *ts, int *res) 1126 { 1127 struct proc *p = curproc; 1128 struct select_kevent_copyin_args *kap, ka; 1129 int bytes, error; 1130 kfd_set read_tmp; 1131 kfd_set write_tmp; 1132 kfd_set except_tmp; 1133 1134 *res = 0; 1135 if (nd < 0) 1136 return (EINVAL); 1137 if (nd > p->p_fd->fd_nfiles) /* limit kmalloc */ 1138 nd = p->p_fd->fd_nfiles; 1139 1140 kap = &ka; 1141 kap->lwp = curthread->td_lwp; 1142 kap->num_fds = nd; 1143 kap->proc_fds = 0; 1144 kap->error = 0; 1145 kap->active_set = COPYIN_READ; 1146 1147 /* 1148 * Calculate bytes based on the number of __fd_mask[] array entries 1149 * multiplied by the size of __fd_mask. 1150 */ 1151 bytes = howmany(nd, __NFDBITS) * sizeof(__fd_mask); 1152 1153 /* kap->read_set = NULL; not needed */ 1154 kap->write_set = NULL; 1155 kap->except_set = NULL; 1156 1157 error = getbits(bytes, read, &kap->read_set, &read_tmp); 1158 if (error == 0) 1159 error = getbits(bytes, write, &kap->write_set, &write_tmp); 1160 if (error == 0) 1161 error = getbits(bytes, except, &kap->except_set, &except_tmp); 1162 if (error) 1163 goto done; 1164 1165 /* 1166 * NOTE: Make sure the max events passed to kern_kevent() is 1167 * effectively unlimited. (nd * 3) accomplishes this. 1168 * 1169 * (*res) continues to increment as returned events are 1170 * loaded in. 1171 */ 1172 error = kern_kevent(&kap->lwp->lwp_kqueue, 0x7FFFFFFF, res, kap, 1173 select_copyin, select_copyout, ts); 1174 if (error == 0) 1175 error = putbits(bytes, kap->read_set, read); 1176 if (error == 0) 1177 error = putbits(bytes, kap->write_set, write); 1178 if (error == 0) 1179 error = putbits(bytes, kap->except_set, except); 1180 1181 /* 1182 * An error from an individual event that should be passed 1183 * back to userland (EBADF) 1184 */ 1185 if (kap->error) 1186 error = kap->error; 1187 1188 /* 1189 * Clean up. 1190 */ 1191 done: 1192 if (kap->read_set && kap->read_set != &read_tmp) 1193 kfree(kap->read_set, M_SELECT); 1194 if (kap->write_set && kap->write_set != &write_tmp) 1195 kfree(kap->write_set, M_SELECT); 1196 if (kap->except_set && kap->except_set != &except_tmp) 1197 kfree(kap->except_set, M_SELECT); 1198 1199 kap->lwp->lwp_kqueue_serial += kap->num_fds; 1200 1201 return (error); 1202 } 1203 1204 /* 1205 * Poll system call. 1206 * 1207 * MPSAFE 1208 */ 1209 int 1210 sys_poll(struct poll_args *uap) 1211 { 1212 struct timespec ts, *tsp; 1213 int error; 1214 1215 if (uap->timeout != INFTIM) { 1216 ts.tv_sec = uap->timeout / 1000; 1217 ts.tv_nsec = (uap->timeout % 1000) * 1000 * 1000; 1218 tsp = &ts; 1219 } else { 1220 tsp = NULL; 1221 } 1222 1223 error = dopoll(uap->nfds, uap->fds, tsp, &uap->sysmsg_result); 1224 1225 return (error); 1226 } 1227 1228 static int 1229 poll_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 1230 { 1231 struct poll_kevent_copyin_args *pkap; 1232 struct pollfd *pfd; 1233 struct kevent *kev; 1234 int kev_count; 1235 1236 pkap = (struct poll_kevent_copyin_args *)arg; 1237 1238 while (pkap->pfds < pkap->nfds) { 1239 pfd = &pkap->fds[pkap->pfds]; 1240 1241 /* Clear return events */ 1242 pfd->revents = 0; 1243 1244 /* Do not check if fd is equal to -1 */ 1245 if (pfd->fd == -1) { 1246 ++pkap->pfds; 1247 continue; 1248 } 1249 1250 kev_count = 0; 1251 if (pfd->events & (POLLIN | POLLRDNORM)) 1252 kev_count++; 1253 if (pfd->events & (POLLOUT | POLLWRNORM)) 1254 kev_count++; 1255 if (pfd->events & (POLLPRI | POLLRDBAND)) 1256 kev_count++; 1257 1258 if (*events + kev_count > maxevents) 1259 return (0); 1260 1261 /* 1262 * NOTE: A combined serial number and poll array index is 1263 * stored in kev->udata. 1264 */ 1265 kev = &kevp[*events]; 1266 if (pfd->events & (POLLIN | POLLRDNORM)) { 1267 EV_SET(kev++, pfd->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 1268 NOTE_OLDAPI, 0, (void *)(uintptr_t) 1269 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1270 } 1271 if (pfd->events & (POLLOUT | POLLWRNORM)) { 1272 EV_SET(kev++, pfd->fd, EVFILT_WRITE, EV_ADD|EV_ENABLE, 1273 NOTE_OLDAPI, 0, (void *)(uintptr_t) 1274 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1275 } 1276 if (pfd->events & (POLLPRI | POLLRDBAND)) { 1277 EV_SET(kev++, pfd->fd, EVFILT_EXCEPT, EV_ADD|EV_ENABLE, 1278 NOTE_OLDAPI | NOTE_OOB, 0, 1279 (void *)(uintptr_t) 1280 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1281 } 1282 1283 if (nseldebug) { 1284 kprintf("poll index %d/%d fd %d events %08x serial %d\n", 1285 pkap->pfds, pkap->nfds-1, pfd->fd, pfd->events, 1286 pkap->lwp->lwp_kqueue_serial); 1287 } 1288 1289 ++pkap->pfds; 1290 (*events) += kev_count; 1291 } 1292 1293 return (0); 1294 } 1295 1296 static int 1297 poll_copyout(void *arg, struct kevent *kevp, int count, int *res) 1298 { 1299 struct poll_kevent_copyin_args *pkap; 1300 struct pollfd *pfd; 1301 struct kevent kev; 1302 int count_res; 1303 int i; 1304 u_int pi; 1305 1306 pkap = (struct poll_kevent_copyin_args *)arg; 1307 1308 for (i = 0; i < count; ++i) { 1309 /* 1310 * Extract the poll array index and delete spurious events. 1311 * We can easily tell if the serial number is incorrect 1312 * by checking whether the extracted index is out of range. 1313 */ 1314 pi = (u_int)(uintptr_t)kevp[i].udata - 1315 (u_int)pkap->lwp->lwp_kqueue_serial; 1316 1317 if (pi >= pkap->nfds) { 1318 kev = kevp[i]; 1319 kev.flags = EV_DISABLE|EV_DELETE; 1320 kqueue_register(&pkap->lwp->lwp_kqueue, &kev); 1321 if (nseldebug) 1322 kprintf("poll index %d out of range against serial %d\n", 1323 pi, pkap->lwp->lwp_kqueue_serial); 1324 continue; 1325 } 1326 pfd = &pkap->fds[pi]; 1327 if (kevp[i].ident == pfd->fd) { 1328 /* 1329 * A single descriptor may generate an error against 1330 * more than one filter, make sure to set the 1331 * appropriate flags but do not increment (*res) 1332 * more than once. 1333 */ 1334 count_res = (pfd->revents == 0); 1335 if (kevp[i].flags & EV_ERROR) { 1336 switch(kevp[i].data) { 1337 case EBADF: 1338 /* Bad file descriptor */ 1339 if (count_res) 1340 ++*res; 1341 pfd->revents |= POLLNVAL; 1342 break; 1343 default: 1344 /* 1345 * Poll silently swallows any unknown 1346 * errors except in the case of POLLPRI 1347 * (OOB/urgent data). 1348 * 1349 * ALWAYS filter out EOPNOTSUPP errors 1350 * from filters, common applications 1351 * set POLLPRI|POLLRDBAND and most 1352 * filters do not support EVFILT_EXCEPT. 1353 */ 1354 if (kevp[i].filter != EVFILT_READ && 1355 kevp[i].filter != EVFILT_WRITE && 1356 kevp[i].data != EOPNOTSUPP) { 1357 if (count_res == 0) 1358 ++*res; 1359 pfd->revents |= POLLERR; 1360 } 1361 break; 1362 } 1363 if (nseldebug) { 1364 kprintf("poll index %d fd %d " 1365 "filter %d error %jd\n", 1366 pi, pfd->fd, 1367 kevp[i].filter, 1368 (intmax_t)kevp[i].data); 1369 } 1370 continue; 1371 } 1372 1373 switch (kevp[i].filter) { 1374 case EVFILT_READ: 1375 #if 0 1376 /* 1377 * EOF on the read side can indicate a 1378 * half-closed situation and not necessarily 1379 * a disconnect, so depend on the user 1380 * issuing a read() and getting 0 bytes back. 1381 */ 1382 if (kevp[i].flags & EV_EOF) 1383 pfd->revents |= POLLHUP; 1384 #endif 1385 if (pfd->events & POLLIN) 1386 pfd->revents |= POLLIN; 1387 if (pfd->events & POLLRDNORM) 1388 pfd->revents |= POLLRDNORM; 1389 break; 1390 case EVFILT_WRITE: 1391 /* 1392 * As per the OpenGroup POLLHUP is mutually 1393 * exclusive with the writability flags. I 1394 * consider this a bit broken but... 1395 * 1396 * In this case a disconnect is implied even 1397 * for a half-closed (write side) situation. 1398 */ 1399 if (kevp[i].flags & EV_EOF) { 1400 pfd->revents |= POLLHUP; 1401 } else { 1402 if (pfd->events & POLLOUT) 1403 pfd->revents |= POLLOUT; 1404 if (pfd->events & POLLWRNORM) 1405 pfd->revents |= POLLWRNORM; 1406 } 1407 break; 1408 case EVFILT_EXCEPT: 1409 /* 1410 * EV_EOF should never be tagged for this 1411 * filter. 1412 */ 1413 if (pfd->events & POLLPRI) 1414 pfd->revents |= POLLPRI; 1415 if (pfd->events & POLLRDBAND) 1416 pfd->revents |= POLLRDBAND; 1417 break; 1418 } 1419 1420 if (nseldebug) { 1421 kprintf("poll index %d/%d fd %d revents %08x\n", 1422 pi, pkap->nfds, pfd->fd, pfd->revents); 1423 } 1424 1425 if (count_res && pfd->revents) 1426 ++*res; 1427 } else { 1428 if (nseldebug) { 1429 kprintf("poll index %d mismatch %ju/%d\n", 1430 pi, (uintmax_t)kevp[i].ident, pfd->fd); 1431 } 1432 } 1433 } 1434 1435 return (0); 1436 } 1437 1438 static int 1439 dopoll(int nfds, struct pollfd *fds, struct timespec *ts, int *res) 1440 { 1441 struct poll_kevent_copyin_args ka; 1442 struct pollfd sfds[64]; 1443 int bytes; 1444 int error; 1445 1446 *res = 0; 1447 if (nfds < 0) 1448 return (EINVAL); 1449 1450 /* 1451 * This is a bit arbitrary but we need to limit internal kmallocs. 1452 */ 1453 if (nfds > maxfilesperproc * 2) 1454 nfds = maxfilesperproc * 2; 1455 bytes = sizeof(struct pollfd) * nfds; 1456 1457 ka.lwp = curthread->td_lwp; 1458 ka.nfds = nfds; 1459 ka.pfds = 0; 1460 ka.error = 0; 1461 1462 if (ka.nfds < 64) 1463 ka.fds = sfds; 1464 else 1465 ka.fds = kmalloc(bytes, M_SELECT, M_WAITOK); 1466 1467 error = copyin(fds, ka.fds, bytes); 1468 if (error == 0) 1469 error = kern_kevent(&ka.lwp->lwp_kqueue, 0x7FFFFFFF, res, &ka, 1470 poll_copyin, poll_copyout, ts); 1471 1472 if (error == 0) 1473 error = copyout(ka.fds, fds, bytes); 1474 1475 if (ka.fds != sfds) 1476 kfree(ka.fds, M_SELECT); 1477 1478 ka.lwp->lwp_kqueue_serial += nfds; 1479 1480 return (error); 1481 } 1482 1483 static int 1484 socket_wait_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 1485 { 1486 return (0); 1487 } 1488 1489 static int 1490 socket_wait_copyout(void *arg, struct kevent *kevp, int count, int *res) 1491 { 1492 ++*res; 1493 return (0); 1494 } 1495 1496 extern struct fileops socketops; 1497 1498 /* 1499 * NOTE: Callers of socket_wait() must already have a reference on the 1500 * socket. 1501 */ 1502 int 1503 socket_wait(struct socket *so, struct timespec *ts, int *res) 1504 { 1505 struct thread *td = curthread; 1506 struct file *fp; 1507 struct kqueue kq; 1508 struct kevent kev; 1509 int error, fd; 1510 1511 if ((error = falloc(td->td_lwp, &fp, &fd)) != 0) 1512 return (error); 1513 1514 fp->f_type = DTYPE_SOCKET; 1515 fp->f_flag = FREAD | FWRITE; 1516 fp->f_ops = &socketops; 1517 fp->f_data = so; 1518 fsetfd(td->td_lwp->lwp_proc->p_fd, fp, fd); 1519 1520 kqueue_init(&kq, td->td_lwp->lwp_proc->p_fd); 1521 EV_SET(&kev, fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, NULL); 1522 if ((error = kqueue_register(&kq, &kev)) != 0) { 1523 fdrop(fp); 1524 return (error); 1525 } 1526 1527 error = kern_kevent(&kq, 1, res, NULL, socket_wait_copyin, 1528 socket_wait_copyout, ts); 1529 1530 EV_SET(&kev, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); 1531 kqueue_register(&kq, &kev); 1532 fp->f_ops = &badfileops; 1533 fdrop(fp); 1534 1535 return (error); 1536 } 1537 1538 /* 1539 * OpenBSD poll system call. 1540 * XXX this isn't quite a true representation.. OpenBSD uses select ops. 1541 * 1542 * MPSAFE 1543 */ 1544 int 1545 sys_openbsd_poll(struct openbsd_poll_args *uap) 1546 { 1547 return (sys_poll((struct poll_args *)uap)); 1548 } 1549 1550 /*ARGSUSED*/ 1551 int 1552 seltrue(cdev_t dev, int events) 1553 { 1554 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 1555 } 1556