1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94 35 * $FreeBSD: src/sys/kern/sys_generic.c,v 1.55.2.10 2001/03/17 10:39:32 peter Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/event.h> 44 #include <sys/filedesc.h> 45 #include <sys/filio.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/proc.h> 49 #include <sys/signalvar.h> 50 #include <sys/socketvar.h> 51 #include <sys/uio.h> 52 #include <sys/kernel.h> 53 #include <sys/kern_syscall.h> 54 #include <sys/malloc.h> 55 #include <sys/mapped_ioctl.h> 56 #include <sys/poll.h> 57 #include <sys/queue.h> 58 #include <sys/resourcevar.h> 59 #include <sys/socketops.h> 60 #include <sys/sysctl.h> 61 #include <sys/sysent.h> 62 #include <sys/buf.h> 63 #ifdef KTRACE 64 #include <sys/ktrace.h> 65 #endif 66 #include <vm/vm.h> 67 #include <vm/vm_page.h> 68 69 #include <sys/file2.h> 70 #include <sys/spinlock2.h> 71 72 #include <machine/limits.h> 73 74 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 75 static MALLOC_DEFINE(M_IOCTLMAP, "ioctlmap", "mapped ioctl handler buffer"); 76 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 77 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 78 79 typedef struct kfd_set { 80 fd_mask fds_bits[2]; 81 } kfd_set; 82 83 enum select_copyin_states { 84 COPYIN_READ, COPYIN_WRITE, COPYIN_EXCEPT, COPYIN_DONE }; 85 86 struct select_kevent_copyin_args { 87 kfd_set *read_set; 88 kfd_set *write_set; 89 kfd_set *except_set; 90 int active_set; /* One of select_copyin_states */ 91 struct lwp *lwp; /* Pointer to our lwp */ 92 int num_fds; /* Number of file descriptors (syscall arg) */ 93 int proc_fds; /* Processed fd's (wraps) */ 94 int error; /* Returned to userland */ 95 }; 96 97 struct poll_kevent_copyin_args { 98 struct lwp *lwp; 99 struct pollfd *fds; 100 int nfds; 101 int pfds; 102 int error; 103 }; 104 105 static struct lwkt_token mioctl_token = LWKT_TOKEN_INITIALIZER(mioctl_token); 106 107 static int doselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, 108 struct timespec *ts, int *res); 109 static int dopoll(int nfds, struct pollfd *fds, struct timespec *ts, 110 int *res, int flags); 111 static int dofileread(int, struct file *, struct uio *, int, size_t *); 112 static int dofilewrite(int, struct file *, struct uio *, int, size_t *); 113 114 /* 115 * Read system call. 116 * 117 * MPSAFE 118 */ 119 int 120 sys_read(struct read_args *uap) 121 { 122 struct thread *td = curthread; 123 struct uio auio; 124 struct iovec aiov; 125 int error; 126 127 if ((ssize_t)uap->nbyte < 0) 128 error = EINVAL; 129 130 aiov.iov_base = uap->buf; 131 aiov.iov_len = uap->nbyte; 132 auio.uio_iov = &aiov; 133 auio.uio_iovcnt = 1; 134 auio.uio_offset = -1; 135 auio.uio_resid = uap->nbyte; 136 auio.uio_rw = UIO_READ; 137 auio.uio_segflg = UIO_USERSPACE; 138 auio.uio_td = td; 139 140 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult); 141 return(error); 142 } 143 144 /* 145 * Positioned (Pread) read system call 146 * 147 * MPSAFE 148 */ 149 int 150 sys_extpread(struct extpread_args *uap) 151 { 152 struct thread *td = curthread; 153 struct uio auio; 154 struct iovec aiov; 155 int error; 156 int flags; 157 158 if ((ssize_t)uap->nbyte < 0) 159 return(EINVAL); 160 161 aiov.iov_base = uap->buf; 162 aiov.iov_len = uap->nbyte; 163 auio.uio_iov = &aiov; 164 auio.uio_iovcnt = 1; 165 auio.uio_offset = uap->offset; 166 auio.uio_resid = uap->nbyte; 167 auio.uio_rw = UIO_READ; 168 auio.uio_segflg = UIO_USERSPACE; 169 auio.uio_td = td; 170 171 flags = uap->flags & O_FMASK; 172 if (uap->offset != (off_t)-1) 173 flags |= O_FOFFSET; 174 175 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult); 176 return(error); 177 } 178 179 /* 180 * Scatter read system call. 181 * 182 * MPSAFE 183 */ 184 int 185 sys_readv(struct readv_args *uap) 186 { 187 struct thread *td = curthread; 188 struct uio auio; 189 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 190 int error; 191 192 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 193 &auio.uio_resid); 194 if (error) 195 return (error); 196 auio.uio_iov = iov; 197 auio.uio_iovcnt = uap->iovcnt; 198 auio.uio_offset = -1; 199 auio.uio_rw = UIO_READ; 200 auio.uio_segflg = UIO_USERSPACE; 201 auio.uio_td = td; 202 203 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult); 204 205 iovec_free(&iov, aiov); 206 return (error); 207 } 208 209 210 /* 211 * Scatter positioned read system call. 212 * 213 * MPSAFE 214 */ 215 int 216 sys_extpreadv(struct extpreadv_args *uap) 217 { 218 struct thread *td = curthread; 219 struct uio auio; 220 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 221 int error; 222 int flags; 223 224 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 225 &auio.uio_resid); 226 if (error) 227 return (error); 228 auio.uio_iov = iov; 229 auio.uio_iovcnt = uap->iovcnt; 230 auio.uio_offset = uap->offset; 231 auio.uio_rw = UIO_READ; 232 auio.uio_segflg = UIO_USERSPACE; 233 auio.uio_td = td; 234 235 flags = uap->flags & O_FMASK; 236 if (uap->offset != (off_t)-1) 237 flags |= O_FOFFSET; 238 239 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult); 240 241 iovec_free(&iov, aiov); 242 return(error); 243 } 244 245 /* 246 * MPSAFE 247 */ 248 int 249 kern_preadv(int fd, struct uio *auio, int flags, size_t *res) 250 { 251 struct thread *td = curthread; 252 struct proc *p = td->td_proc; 253 struct file *fp; 254 int error; 255 256 KKASSERT(p); 257 258 fp = holdfp(p->p_fd, fd, FREAD); 259 if (fp == NULL) 260 return (EBADF); 261 if (flags & O_FOFFSET && fp->f_type != DTYPE_VNODE) { 262 error = ESPIPE; 263 } else { 264 error = dofileread(fd, fp, auio, flags, res); 265 } 266 fdrop(fp); 267 return(error); 268 } 269 270 /* 271 * Common code for readv and preadv that reads data in 272 * from a file using the passed in uio, offset, and flags. 273 * 274 * MPALMOSTSAFE - ktrace needs help 275 */ 276 static int 277 dofileread(int fd, struct file *fp, struct uio *auio, int flags, size_t *res) 278 { 279 int error; 280 size_t len; 281 #ifdef KTRACE 282 struct thread *td = curthread; 283 struct iovec *ktriov = NULL; 284 struct uio ktruio; 285 #endif 286 287 #ifdef KTRACE 288 /* 289 * if tracing, save a copy of iovec 290 */ 291 if (KTRPOINT(td, KTR_GENIO)) { 292 int iovlen = auio->uio_iovcnt * sizeof(struct iovec); 293 294 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK); 295 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 296 ktruio = *auio; 297 } 298 #endif 299 len = auio->uio_resid; 300 error = fo_read(fp, auio, fp->f_cred, flags); 301 if (error) { 302 if (auio->uio_resid != len && (error == ERESTART || 303 error == EINTR || error == EWOULDBLOCK)) 304 error = 0; 305 } 306 #ifdef KTRACE 307 if (ktriov != NULL) { 308 if (error == 0) { 309 ktruio.uio_iov = ktriov; 310 ktruio.uio_resid = len - auio->uio_resid; 311 ktrgenio(td->td_lwp, fd, UIO_READ, &ktruio, error); 312 } 313 kfree(ktriov, M_TEMP); 314 } 315 #endif 316 if (error == 0) 317 *res = len - auio->uio_resid; 318 319 return(error); 320 } 321 322 /* 323 * Write system call 324 * 325 * MPSAFE 326 */ 327 int 328 sys_write(struct write_args *uap) 329 { 330 struct thread *td = curthread; 331 struct uio auio; 332 struct iovec aiov; 333 int error; 334 335 if ((ssize_t)uap->nbyte < 0) 336 error = EINVAL; 337 338 aiov.iov_base = (void *)(uintptr_t)uap->buf; 339 aiov.iov_len = uap->nbyte; 340 auio.uio_iov = &aiov; 341 auio.uio_iovcnt = 1; 342 auio.uio_offset = -1; 343 auio.uio_resid = uap->nbyte; 344 auio.uio_rw = UIO_WRITE; 345 auio.uio_segflg = UIO_USERSPACE; 346 auio.uio_td = td; 347 348 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult); 349 350 return(error); 351 } 352 353 /* 354 * Pwrite system call 355 * 356 * MPSAFE 357 */ 358 int 359 sys_extpwrite(struct extpwrite_args *uap) 360 { 361 struct thread *td = curthread; 362 struct uio auio; 363 struct iovec aiov; 364 int error; 365 int flags; 366 367 if ((ssize_t)uap->nbyte < 0) 368 error = EINVAL; 369 370 aiov.iov_base = (void *)(uintptr_t)uap->buf; 371 aiov.iov_len = uap->nbyte; 372 auio.uio_iov = &aiov; 373 auio.uio_iovcnt = 1; 374 auio.uio_offset = uap->offset; 375 auio.uio_resid = uap->nbyte; 376 auio.uio_rw = UIO_WRITE; 377 auio.uio_segflg = UIO_USERSPACE; 378 auio.uio_td = td; 379 380 flags = uap->flags & O_FMASK; 381 if (uap->offset != (off_t)-1) 382 flags |= O_FOFFSET; 383 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult); 384 return(error); 385 } 386 387 /* 388 * MPSAFE 389 */ 390 int 391 sys_writev(struct writev_args *uap) 392 { 393 struct thread *td = curthread; 394 struct uio auio; 395 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 396 int error; 397 398 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 399 &auio.uio_resid); 400 if (error) 401 return (error); 402 auio.uio_iov = iov; 403 auio.uio_iovcnt = uap->iovcnt; 404 auio.uio_offset = -1; 405 auio.uio_rw = UIO_WRITE; 406 auio.uio_segflg = UIO_USERSPACE; 407 auio.uio_td = td; 408 409 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult); 410 411 iovec_free(&iov, aiov); 412 return (error); 413 } 414 415 416 /* 417 * Gather positioned write system call 418 * 419 * MPSAFE 420 */ 421 int 422 sys_extpwritev(struct extpwritev_args *uap) 423 { 424 struct thread *td = curthread; 425 struct uio auio; 426 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 427 int error; 428 int flags; 429 430 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 431 &auio.uio_resid); 432 if (error) 433 return (error); 434 auio.uio_iov = iov; 435 auio.uio_iovcnt = uap->iovcnt; 436 auio.uio_offset = uap->offset; 437 auio.uio_rw = UIO_WRITE; 438 auio.uio_segflg = UIO_USERSPACE; 439 auio.uio_td = td; 440 441 flags = uap->flags & O_FMASK; 442 if (uap->offset != (off_t)-1) 443 flags |= O_FOFFSET; 444 445 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult); 446 447 iovec_free(&iov, aiov); 448 return(error); 449 } 450 451 /* 452 * MPSAFE 453 */ 454 int 455 kern_pwritev(int fd, struct uio *auio, int flags, size_t *res) 456 { 457 struct thread *td = curthread; 458 struct proc *p = td->td_proc; 459 struct file *fp; 460 int error; 461 462 KKASSERT(p); 463 464 fp = holdfp(p->p_fd, fd, FWRITE); 465 if (fp == NULL) 466 return (EBADF); 467 else if ((flags & O_FOFFSET) && fp->f_type != DTYPE_VNODE) { 468 error = ESPIPE; 469 } else { 470 error = dofilewrite(fd, fp, auio, flags, res); 471 } 472 473 fdrop(fp); 474 return (error); 475 } 476 477 /* 478 * Common code for writev and pwritev that writes data to 479 * a file using the passed in uio, offset, and flags. 480 * 481 * MPALMOSTSAFE - ktrace needs help 482 */ 483 static int 484 dofilewrite(int fd, struct file *fp, struct uio *auio, int flags, size_t *res) 485 { 486 struct thread *td = curthread; 487 struct lwp *lp = td->td_lwp; 488 int error; 489 size_t len; 490 #ifdef KTRACE 491 struct iovec *ktriov = NULL; 492 struct uio ktruio; 493 #endif 494 495 #ifdef KTRACE 496 /* 497 * if tracing, save a copy of iovec and uio 498 */ 499 if (KTRPOINT(td, KTR_GENIO)) { 500 int iovlen = auio->uio_iovcnt * sizeof(struct iovec); 501 502 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK); 503 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 504 ktruio = *auio; 505 } 506 #endif 507 len = auio->uio_resid; 508 error = fo_write(fp, auio, fp->f_cred, flags); 509 if (error) { 510 if (auio->uio_resid != len && (error == ERESTART || 511 error == EINTR || error == EWOULDBLOCK)) 512 error = 0; 513 /* Socket layer is responsible for issuing SIGPIPE. */ 514 if (error == EPIPE && fp->f_type != DTYPE_SOCKET) 515 lwpsignal(lp->lwp_proc, lp, SIGPIPE); 516 } 517 #ifdef KTRACE 518 if (ktriov != NULL) { 519 if (error == 0) { 520 ktruio.uio_iov = ktriov; 521 ktruio.uio_resid = len - auio->uio_resid; 522 ktrgenio(lp, fd, UIO_WRITE, &ktruio, error); 523 } 524 kfree(ktriov, M_TEMP); 525 } 526 #endif 527 if (error == 0) 528 *res = len - auio->uio_resid; 529 530 return(error); 531 } 532 533 /* 534 * Ioctl system call 535 * 536 * MPSAFE 537 */ 538 int 539 sys_ioctl(struct ioctl_args *uap) 540 { 541 int error; 542 543 error = mapped_ioctl(uap->fd, uap->com, uap->data, NULL, &uap->sysmsg); 544 return (error); 545 } 546 547 struct ioctl_map_entry { 548 const char *subsys; 549 struct ioctl_map_range *cmd_ranges; 550 LIST_ENTRY(ioctl_map_entry) entries; 551 }; 552 553 /* 554 * The true heart of all ioctl syscall handlers (native, emulation). 555 * If map != NULL, it will be searched for a matching entry for com, 556 * and appropriate conversions/conversion functions will be utilized. 557 * 558 * MPSAFE 559 */ 560 int 561 mapped_ioctl(int fd, u_long com, caddr_t uspc_data, struct ioctl_map *map, 562 struct sysmsg *msg) 563 { 564 struct thread *td = curthread; 565 struct proc *p = td->td_proc; 566 struct ucred *cred; 567 struct file *fp; 568 struct ioctl_map_range *iomc = NULL; 569 int error; 570 u_int size; 571 u_long ocom = com; 572 caddr_t data, memp; 573 int tmp; 574 #define STK_PARAMS 128 575 union { 576 char stkbuf[STK_PARAMS]; 577 long align; 578 } ubuf; 579 580 KKASSERT(p); 581 cred = td->td_ucred; 582 memp = NULL; 583 584 fp = holdfp(p->p_fd, fd, FREAD|FWRITE); 585 if (fp == NULL) 586 return(EBADF); 587 588 if (map != NULL) { /* obey translation map */ 589 u_long maskcmd; 590 struct ioctl_map_entry *e; 591 592 maskcmd = com & map->mask; 593 594 lwkt_gettoken(&mioctl_token); 595 LIST_FOREACH(e, &map->mapping, entries) { 596 for (iomc = e->cmd_ranges; iomc->start != 0 || 597 iomc->maptocmd != 0 || iomc->wrapfunc != NULL || 598 iomc->mapfunc != NULL; 599 iomc++) { 600 if (maskcmd >= iomc->start && 601 maskcmd <= iomc->end) 602 break; 603 } 604 605 /* Did we find a match? */ 606 if (iomc->start != 0 || iomc->maptocmd != 0 || 607 iomc->wrapfunc != NULL || iomc->mapfunc != NULL) 608 break; 609 } 610 lwkt_reltoken(&mioctl_token); 611 612 if (iomc == NULL || 613 (iomc->start == 0 && iomc->maptocmd == 0 614 && iomc->wrapfunc == NULL && iomc->mapfunc == NULL)) { 615 kprintf("%s: 'ioctl' fd=%d, cmd=0x%lx ('%c',%d) not implemented\n", 616 map->sys, fd, maskcmd, 617 (int)((maskcmd >> 8) & 0xff), 618 (int)(maskcmd & 0xff)); 619 error = EINVAL; 620 goto done; 621 } 622 623 /* 624 * If it's a non-range one to one mapping, maptocmd should be 625 * correct. If it's a ranged one to one mapping, we pass the 626 * original value of com, and for a range mapped to a different 627 * range, we always need a mapping function to translate the 628 * ioctl to our native ioctl. Ex. 6500-65ff <-> 9500-95ff 629 */ 630 if (iomc->start == iomc->end && iomc->maptocmd == iomc->maptoend) { 631 com = iomc->maptocmd; 632 } else if (iomc->start == iomc->maptocmd && iomc->end == iomc->maptoend) { 633 if (iomc->mapfunc != NULL) 634 com = iomc->mapfunc(iomc->start, iomc->end, 635 iomc->start, iomc->end, 636 com, com); 637 } else { 638 if (iomc->mapfunc != NULL) { 639 com = iomc->mapfunc(iomc->start, iomc->end, 640 iomc->maptocmd, iomc->maptoend, 641 com, ocom); 642 } else { 643 kprintf("%s: Invalid mapping for fd=%d, cmd=%#lx ('%c',%d)\n", 644 map->sys, fd, maskcmd, 645 (int)((maskcmd >> 8) & 0xff), 646 (int)(maskcmd & 0xff)); 647 error = EINVAL; 648 goto done; 649 } 650 } 651 } 652 653 switch (com) { 654 case FIONCLEX: 655 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 656 goto done; 657 case FIOCLEX: 658 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 659 goto done; 660 } 661 662 /* 663 * Interpret high order word to find amount of data to be 664 * copied to/from the user's address space. 665 */ 666 size = IOCPARM_LEN(com); 667 if (size > IOCPARM_MAX) { 668 error = ENOTTY; 669 goto done; 670 } 671 672 if ((com & IOC_VOID) == 0 && size > sizeof(ubuf.stkbuf)) { 673 memp = kmalloc(size, M_IOCTLOPS, M_WAITOK); 674 data = memp; 675 } else { 676 memp = NULL; 677 data = ubuf.stkbuf; 678 } 679 if (com & IOC_VOID) { 680 *(caddr_t *)data = uspc_data; 681 } else if (com & IOC_IN) { 682 if (size != 0) { 683 error = copyin(uspc_data, data, (size_t)size); 684 if (error) 685 goto done; 686 } else { 687 *(caddr_t *)data = uspc_data; 688 } 689 } else if ((com & IOC_OUT) != 0 && size) { 690 /* 691 * Zero the buffer so the user always 692 * gets back something deterministic. 693 */ 694 bzero(data, (size_t)size); 695 } 696 697 switch (com) { 698 case FIONBIO: 699 if ((tmp = *(int *)data)) 700 atomic_set_int(&fp->f_flag, FNONBLOCK); 701 else 702 atomic_clear_int(&fp->f_flag, FNONBLOCK); 703 error = 0; 704 break; 705 706 case FIOASYNC: 707 if ((tmp = *(int *)data)) 708 atomic_set_int(&fp->f_flag, FASYNC); 709 else 710 atomic_clear_int(&fp->f_flag, FASYNC); 711 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, cred, msg); 712 break; 713 714 default: 715 /* 716 * If there is a override function, 717 * call it instead of directly routing the call 718 */ 719 if (map != NULL && iomc->wrapfunc != NULL) 720 error = iomc->wrapfunc(fp, com, ocom, data, cred); 721 else 722 error = fo_ioctl(fp, com, data, cred, msg); 723 /* 724 * Copy any data to user, size was 725 * already set and checked above. 726 */ 727 if (error == 0 && (com & IOC_OUT) != 0 && size != 0) 728 error = copyout(data, uspc_data, (size_t)size); 729 break; 730 } 731 done: 732 if (memp != NULL) 733 kfree(memp, M_IOCTLOPS); 734 fdrop(fp); 735 return(error); 736 } 737 738 /* 739 * MPSAFE 740 */ 741 int 742 mapped_ioctl_register_handler(struct ioctl_map_handler *he) 743 { 744 struct ioctl_map_entry *ne; 745 746 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL && 747 he->subsys != NULL && *he->subsys != '\0'); 748 749 ne = kmalloc(sizeof(struct ioctl_map_entry), M_IOCTLMAP, 750 M_WAITOK | M_ZERO); 751 752 ne->subsys = he->subsys; 753 ne->cmd_ranges = he->cmd_ranges; 754 755 lwkt_gettoken(&mioctl_token); 756 LIST_INSERT_HEAD(&he->map->mapping, ne, entries); 757 lwkt_reltoken(&mioctl_token); 758 759 return(0); 760 } 761 762 /* 763 * MPSAFE 764 */ 765 int 766 mapped_ioctl_unregister_handler(struct ioctl_map_handler *he) 767 { 768 struct ioctl_map_entry *ne; 769 int error = EINVAL; 770 771 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL); 772 773 lwkt_gettoken(&mioctl_token); 774 LIST_FOREACH(ne, &he->map->mapping, entries) { 775 if (ne->cmd_ranges == he->cmd_ranges) { 776 LIST_REMOVE(ne, entries); 777 kfree(ne, M_IOCTLMAP); 778 error = 0; 779 break; 780 } 781 } 782 lwkt_reltoken(&mioctl_token); 783 return(error); 784 } 785 786 static int nselcoll; /* Select collisions since boot */ 787 int selwait; 788 SYSCTL_INT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, ""); 789 static int nseldebug; 790 SYSCTL_INT(_kern, OID_AUTO, nseldebug, CTLFLAG_RW, &nseldebug, 0, ""); 791 792 /* 793 * Select system call. 794 * 795 * MPSAFE 796 */ 797 int 798 sys_select(struct select_args *uap) 799 { 800 struct timeval ktv; 801 struct timespec *ktsp, kts; 802 int error; 803 804 /* 805 * Get timeout if any. 806 */ 807 if (uap->tv != NULL) { 808 error = copyin(uap->tv, &ktv, sizeof (ktv)); 809 if (error) 810 return (error); 811 TIMEVAL_TO_TIMESPEC(&ktv, &kts); 812 ktsp = &kts; 813 } else { 814 ktsp = NULL; 815 } 816 817 /* 818 * Do real work. 819 */ 820 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp, 821 &uap->sysmsg_result); 822 823 return (error); 824 } 825 826 827 /* 828 * Pselect system call. 829 */ 830 int 831 sys_pselect(struct pselect_args *uap) 832 { 833 struct thread *td = curthread; 834 struct lwp *lp = td->td_lwp; 835 struct timespec *ktsp, kts; 836 sigset_t sigmask; 837 int error; 838 839 /* 840 * Get timeout if any. 841 */ 842 if (uap->ts != NULL) { 843 error = copyin(uap->ts, &kts, sizeof (kts)); 844 if (error) 845 return (error); 846 ktsp = &kts; 847 } else { 848 ktsp = NULL; 849 } 850 851 /* 852 * Install temporary signal mask if any provided. 853 */ 854 if (uap->sigmask != NULL) { 855 error = copyin(uap->sigmask, &sigmask, sizeof(sigmask)); 856 if (error) 857 return (error); 858 lwkt_gettoken(&lp->lwp_proc->p_token); 859 lp->lwp_oldsigmask = lp->lwp_sigmask; 860 SIG_CANTMASK(sigmask); 861 lp->lwp_sigmask = sigmask; 862 lwkt_reltoken(&lp->lwp_proc->p_token); 863 } 864 865 /* 866 * Do real job. 867 */ 868 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp, 869 &uap->sysmsg_result); 870 871 if (uap->sigmask != NULL) { 872 lwkt_gettoken(&lp->lwp_proc->p_token); 873 /* doselect() responsible for turning ERESTART into EINTR */ 874 KKASSERT(error != ERESTART); 875 if (error == EINTR) { 876 /* 877 * We can't restore the previous signal mask now 878 * because it could block the signal that interrupted 879 * us. So make a note to restore it after executing 880 * the handler. 881 */ 882 lp->lwp_flags |= LWP_OLDMASK; 883 } else { 884 /* 885 * No handler to run. Restore previous mask immediately. 886 */ 887 lp->lwp_sigmask = lp->lwp_oldsigmask; 888 } 889 lwkt_reltoken(&lp->lwp_proc->p_token); 890 } 891 892 return (error); 893 } 894 895 static int 896 select_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 897 { 898 struct select_kevent_copyin_args *skap = NULL; 899 struct kevent *kev; 900 int fd; 901 kfd_set *fdp = NULL; 902 short filter = 0; 903 u_int fflags = 0; 904 905 skap = (struct select_kevent_copyin_args *)arg; 906 907 if (*events == maxevents) 908 return (0); 909 910 while (skap->active_set < COPYIN_DONE) { 911 switch (skap->active_set) { 912 case COPYIN_READ: 913 /* 914 * Register descriptors for the read filter 915 */ 916 fdp = skap->read_set; 917 filter = EVFILT_READ; 918 fflags = NOTE_OLDAPI; 919 if (fdp) 920 break; 921 ++skap->active_set; 922 skap->proc_fds = 0; 923 /* fall through */ 924 case COPYIN_WRITE: 925 /* 926 * Register descriptors for the write filter 927 */ 928 fdp = skap->write_set; 929 filter = EVFILT_WRITE; 930 fflags = NOTE_OLDAPI; 931 if (fdp) 932 break; 933 ++skap->active_set; 934 skap->proc_fds = 0; 935 /* fall through */ 936 case COPYIN_EXCEPT: 937 /* 938 * Register descriptors for the exception filter 939 */ 940 fdp = skap->except_set; 941 filter = EVFILT_EXCEPT; 942 fflags = NOTE_OLDAPI | NOTE_OOB; 943 if (fdp) 944 break; 945 ++skap->active_set; 946 skap->proc_fds = 0; 947 /* fall through */ 948 case COPYIN_DONE: 949 /* 950 * Nothing left to register 951 */ 952 return(0); 953 /* NOT REACHED */ 954 } 955 956 while (skap->proc_fds < skap->num_fds) { 957 fd = skap->proc_fds; 958 if (FD_ISSET(fd, fdp)) { 959 kev = &kevp[*events]; 960 EV_SET(kev, fd, filter, 961 EV_ADD|EV_ENABLE, 962 fflags, 0, 963 (void *)(uintptr_t) 964 skap->lwp->lwp_kqueue_serial); 965 FD_CLR(fd, fdp); 966 ++*events; 967 968 if (nseldebug) 969 kprintf("select fd %d filter %d serial %d\n", 970 fd, filter, skap->lwp->lwp_kqueue_serial); 971 } 972 ++skap->proc_fds; 973 if (*events == maxevents) 974 return (0); 975 } 976 skap->active_set++; 977 skap->proc_fds = 0; 978 } 979 980 return (0); 981 } 982 983 static int 984 select_copyout(void *arg, struct kevent *kevp, int count, int *res) 985 { 986 struct select_kevent_copyin_args *skap; 987 struct kevent kev; 988 int i = 0; 989 990 skap = (struct select_kevent_copyin_args *)arg; 991 992 for (i = 0; i < count; ++i) { 993 /* 994 * Filter out and delete spurious events 995 */ 996 if ((u_int)(uintptr_t)kevp[i].udata != 997 skap->lwp->lwp_kqueue_serial) { 998 kev = kevp[i]; 999 kev.flags = EV_DISABLE|EV_DELETE; 1000 kqueue_register(&skap->lwp->lwp_kqueue, &kev); 1001 if (nseldebug) 1002 kprintf("select fd %ju mismatched serial %d\n", 1003 (uintmax_t)kevp[i].ident, 1004 skap->lwp->lwp_kqueue_serial); 1005 continue; 1006 } 1007 1008 /* 1009 * Handle errors 1010 */ 1011 if (kevp[i].flags & EV_ERROR) { 1012 int error = kevp[i].data; 1013 1014 switch (error) { 1015 case EBADF: 1016 /* 1017 * A bad file descriptor is considered a 1018 * fatal error for select, bail out. 1019 */ 1020 skap->error = error; 1021 *res = -1; 1022 return error; 1023 1024 default: 1025 /* 1026 * Select silently swallows any unknown errors 1027 * for descriptors in the read or write sets. 1028 * 1029 * ALWAYS filter out EOPNOTSUPP errors from 1030 * filters (at least until all filters support 1031 * EVFILT_EXCEPT) 1032 * 1033 * We also filter out ENODEV since dev_dkqfilter 1034 * returns ENODEV if EOPNOTSUPP is returned in an 1035 * inner call. 1036 * 1037 * XXX: fix this 1038 */ 1039 if (kevp[i].filter != EVFILT_READ && 1040 kevp[i].filter != EVFILT_WRITE && 1041 error != EOPNOTSUPP && 1042 error != ENODEV) { 1043 skap->error = error; 1044 *res = -1; 1045 return error; 1046 } 1047 break; 1048 } 1049 if (nseldebug) 1050 kprintf("select fd %ju filter %d error %d\n", 1051 (uintmax_t)kevp[i].ident, 1052 kevp[i].filter, error); 1053 continue; 1054 } 1055 1056 switch (kevp[i].filter) { 1057 case EVFILT_READ: 1058 FD_SET(kevp[i].ident, skap->read_set); 1059 break; 1060 case EVFILT_WRITE: 1061 FD_SET(kevp[i].ident, skap->write_set); 1062 break; 1063 case EVFILT_EXCEPT: 1064 FD_SET(kevp[i].ident, skap->except_set); 1065 break; 1066 } 1067 1068 ++*res; 1069 } 1070 1071 return (0); 1072 } 1073 1074 /* 1075 * Copy select bits in from userland. Allocate kernel memory if the 1076 * set is large. 1077 */ 1078 static int 1079 getbits(int bytes, fd_set *in_set, kfd_set **out_set, kfd_set *tmp_set) 1080 { 1081 int error; 1082 1083 if (in_set) { 1084 if (bytes < sizeof(*tmp_set)) 1085 *out_set = tmp_set; 1086 else 1087 *out_set = kmalloc(bytes, M_SELECT, M_WAITOK); 1088 error = copyin(in_set, *out_set, bytes); 1089 } else { 1090 *out_set = NULL; 1091 error = 0; 1092 } 1093 return (error); 1094 } 1095 1096 /* 1097 * Copy returned select bits back out to userland. 1098 */ 1099 static int 1100 putbits(int bytes, kfd_set *in_set, fd_set *out_set) 1101 { 1102 int error; 1103 1104 if (in_set) { 1105 error = copyout(in_set, out_set, bytes); 1106 } else { 1107 error = 0; 1108 } 1109 return (error); 1110 } 1111 1112 static int 1113 dotimeout_only(struct timespec *ts) 1114 { 1115 return(nanosleep1(ts, NULL)); 1116 } 1117 1118 /* 1119 * Common code for sys_select() and sys_pselect(). 1120 * 1121 * in, out and ex are userland pointers. ts must point to validated 1122 * kernel-side timeout value or NULL for infinite timeout. res must 1123 * point to syscall return value. 1124 */ 1125 static int 1126 doselect(int nd, fd_set *read, fd_set *write, fd_set *except, 1127 struct timespec *ts, int *res) 1128 { 1129 struct proc *p = curproc; 1130 struct select_kevent_copyin_args *kap, ka; 1131 int bytes, error; 1132 kfd_set read_tmp; 1133 kfd_set write_tmp; 1134 kfd_set except_tmp; 1135 1136 *res = 0; 1137 if (nd < 0) 1138 return (EINVAL); 1139 if (nd == 0 && ts) 1140 return (dotimeout_only(ts)); 1141 1142 if (nd > p->p_fd->fd_nfiles) /* limit kmalloc */ 1143 nd = p->p_fd->fd_nfiles; 1144 1145 kap = &ka; 1146 kap->lwp = curthread->td_lwp; 1147 kap->num_fds = nd; 1148 kap->proc_fds = 0; 1149 kap->error = 0; 1150 kap->active_set = COPYIN_READ; 1151 1152 /* 1153 * Calculate bytes based on the number of __fd_mask[] array entries 1154 * multiplied by the size of __fd_mask. 1155 */ 1156 bytes = howmany(nd, __NFDBITS) * sizeof(__fd_mask); 1157 1158 /* kap->read_set = NULL; not needed */ 1159 kap->write_set = NULL; 1160 kap->except_set = NULL; 1161 1162 error = getbits(bytes, read, &kap->read_set, &read_tmp); 1163 if (error == 0) 1164 error = getbits(bytes, write, &kap->write_set, &write_tmp); 1165 if (error == 0) 1166 error = getbits(bytes, except, &kap->except_set, &except_tmp); 1167 if (error) 1168 goto done; 1169 1170 /* 1171 * NOTE: Make sure the max events passed to kern_kevent() is 1172 * effectively unlimited. (nd * 3) accomplishes this. 1173 * 1174 * (*res) continues to increment as returned events are 1175 * loaded in. 1176 */ 1177 error = kern_kevent(&kap->lwp->lwp_kqueue, 0x7FFFFFFF, res, kap, 1178 select_copyin, select_copyout, ts, 0); 1179 if (error == 0) 1180 error = putbits(bytes, kap->read_set, read); 1181 if (error == 0) 1182 error = putbits(bytes, kap->write_set, write); 1183 if (error == 0) 1184 error = putbits(bytes, kap->except_set, except); 1185 1186 /* 1187 * An error from an individual event that should be passed 1188 * back to userland (EBADF) 1189 */ 1190 if (kap->error) 1191 error = kap->error; 1192 1193 /* 1194 * Clean up. 1195 */ 1196 done: 1197 if (kap->read_set && kap->read_set != &read_tmp) 1198 kfree(kap->read_set, M_SELECT); 1199 if (kap->write_set && kap->write_set != &write_tmp) 1200 kfree(kap->write_set, M_SELECT); 1201 if (kap->except_set && kap->except_set != &except_tmp) 1202 kfree(kap->except_set, M_SELECT); 1203 1204 kap->lwp->lwp_kqueue_serial += kap->num_fds; 1205 1206 return (error); 1207 } 1208 1209 /* 1210 * Poll system call. 1211 * 1212 * MPSAFE 1213 */ 1214 int 1215 sys_poll(struct poll_args *uap) 1216 { 1217 struct timespec ts, *tsp; 1218 int error; 1219 1220 if (uap->timeout != INFTIM) { 1221 if (uap->timeout < 0) 1222 return (EINVAL); 1223 ts.tv_sec = uap->timeout / 1000; 1224 ts.tv_nsec = (uap->timeout % 1000) * 1000 * 1000; 1225 tsp = &ts; 1226 } else { 1227 tsp = NULL; 1228 } 1229 1230 error = dopoll(uap->nfds, uap->fds, tsp, &uap->sysmsg_result, 0); 1231 1232 return (error); 1233 } 1234 1235 /* 1236 * Ppoll system call. 1237 * 1238 * MPSAFE 1239 */ 1240 int 1241 sys_ppoll(struct ppoll_args *uap) 1242 { 1243 struct thread *td = curthread; 1244 struct lwp *lp = td->td_lwp; 1245 struct timespec *ktsp, kts; 1246 sigset_t sigmask; 1247 int error; 1248 1249 /* 1250 * Get timeout if any. 1251 */ 1252 if (uap->ts != NULL) { 1253 error = copyin(uap->ts, &kts, sizeof (kts)); 1254 if (error) 1255 return (error); 1256 ktsp = &kts; 1257 } else { 1258 ktsp = NULL; 1259 } 1260 1261 /* 1262 * Install temporary signal mask if any provided. 1263 */ 1264 if (uap->sigmask != NULL) { 1265 error = copyin(uap->sigmask, &sigmask, sizeof(sigmask)); 1266 if (error) 1267 return (error); 1268 lwkt_gettoken(&lp->lwp_proc->p_token); 1269 lp->lwp_oldsigmask = lp->lwp_sigmask; 1270 SIG_CANTMASK(sigmask); 1271 lp->lwp_sigmask = sigmask; 1272 lwkt_reltoken(&lp->lwp_proc->p_token); 1273 } 1274 1275 error = dopoll(uap->nfds, uap->fds, ktsp, &uap->sysmsg_result, 1276 ktsp != NULL ? KEVENT_TIMEOUT_PRECISE : 0); 1277 1278 if (uap->sigmask != NULL) { 1279 lwkt_gettoken(&lp->lwp_proc->p_token); 1280 /* dopoll() responsible for turning ERESTART into EINTR */ 1281 KKASSERT(error != ERESTART); 1282 if (error == EINTR) { 1283 /* 1284 * We can't restore the previous signal mask now 1285 * because it could block the signal that interrupted 1286 * us. So make a note to restore it after executing 1287 * the handler. 1288 */ 1289 lp->lwp_flags |= LWP_OLDMASK; 1290 } else { 1291 /* 1292 * No handler to run. Restore previous mask immediately. 1293 */ 1294 lp->lwp_sigmask = lp->lwp_oldsigmask; 1295 } 1296 lwkt_reltoken(&lp->lwp_proc->p_token); 1297 } 1298 1299 return (error); 1300 } 1301 1302 static int 1303 poll_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 1304 { 1305 struct poll_kevent_copyin_args *pkap; 1306 struct pollfd *pfd; 1307 struct kevent *kev; 1308 int kev_count; 1309 1310 pkap = (struct poll_kevent_copyin_args *)arg; 1311 1312 while (pkap->pfds < pkap->nfds) { 1313 pfd = &pkap->fds[pkap->pfds]; 1314 1315 /* Clear return events */ 1316 pfd->revents = 0; 1317 1318 /* Do not check if fd is equal to -1 */ 1319 if (pfd->fd == -1) { 1320 ++pkap->pfds; 1321 continue; 1322 } 1323 1324 kev_count = 0; 1325 if (pfd->events & (POLLIN | POLLRDNORM)) 1326 kev_count++; 1327 if (pfd->events & (POLLOUT | POLLWRNORM)) 1328 kev_count++; 1329 if (pfd->events & (POLLPRI | POLLRDBAND)) 1330 kev_count++; 1331 1332 if (*events + kev_count > maxevents) 1333 return (0); 1334 1335 /* 1336 * NOTE: A combined serial number and poll array index is 1337 * stored in kev->udata. 1338 */ 1339 kev = &kevp[*events]; 1340 if (pfd->events & (POLLIN | POLLRDNORM)) { 1341 EV_SET(kev++, pfd->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 1342 NOTE_OLDAPI, 0, (void *)(uintptr_t) 1343 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1344 } 1345 if (pfd->events & (POLLOUT | POLLWRNORM)) { 1346 EV_SET(kev++, pfd->fd, EVFILT_WRITE, EV_ADD|EV_ENABLE, 1347 NOTE_OLDAPI, 0, (void *)(uintptr_t) 1348 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1349 } 1350 if (pfd->events & (POLLPRI | POLLRDBAND)) { 1351 EV_SET(kev++, pfd->fd, EVFILT_EXCEPT, EV_ADD|EV_ENABLE, 1352 NOTE_OLDAPI | NOTE_OOB, 0, 1353 (void *)(uintptr_t) 1354 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1355 } 1356 1357 if (nseldebug) { 1358 kprintf("poll index %d/%d fd %d events %08x serial %d\n", 1359 pkap->pfds, pkap->nfds-1, pfd->fd, pfd->events, 1360 pkap->lwp->lwp_kqueue_serial); 1361 } 1362 1363 ++pkap->pfds; 1364 (*events) += kev_count; 1365 } 1366 1367 return (0); 1368 } 1369 1370 static int 1371 poll_copyout(void *arg, struct kevent *kevp, int count, int *res) 1372 { 1373 struct poll_kevent_copyin_args *pkap; 1374 struct pollfd *pfd; 1375 struct kevent kev; 1376 int count_res; 1377 int i; 1378 u_int pi; 1379 1380 pkap = (struct poll_kevent_copyin_args *)arg; 1381 1382 for (i = 0; i < count; ++i) { 1383 /* 1384 * Extract the poll array index and delete spurious events. 1385 * We can easily tell if the serial number is incorrect 1386 * by checking whether the extracted index is out of range. 1387 */ 1388 pi = (u_int)(uintptr_t)kevp[i].udata - 1389 (u_int)pkap->lwp->lwp_kqueue_serial; 1390 1391 if (pi >= pkap->nfds) { 1392 kev = kevp[i]; 1393 kev.flags = EV_DISABLE|EV_DELETE; 1394 kqueue_register(&pkap->lwp->lwp_kqueue, &kev); 1395 if (nseldebug) 1396 kprintf("poll index %d out of range against serial %d\n", 1397 pi, pkap->lwp->lwp_kqueue_serial); 1398 continue; 1399 } 1400 pfd = &pkap->fds[pi]; 1401 if (kevp[i].ident == pfd->fd) { 1402 /* 1403 * A single descriptor may generate an error against 1404 * more than one filter, make sure to set the 1405 * appropriate flags but do not increment (*res) 1406 * more than once. 1407 */ 1408 count_res = (pfd->revents == 0); 1409 if (kevp[i].flags & EV_ERROR) { 1410 switch(kevp[i].data) { 1411 case EBADF: 1412 case POLLNVAL: 1413 /* Bad file descriptor */ 1414 if (count_res) 1415 ++*res; 1416 pfd->revents |= POLLNVAL; 1417 break; 1418 default: 1419 /* 1420 * Poll silently swallows any unknown 1421 * errors except in the case of POLLPRI 1422 * (OOB/urgent data). 1423 * 1424 * ALWAYS filter out EOPNOTSUPP errors 1425 * from filters, common applications 1426 * set POLLPRI|POLLRDBAND and most 1427 * filters do not support EVFILT_EXCEPT. 1428 * 1429 * We also filter out ENODEV since dev_dkqfilter 1430 * returns ENODEV if EOPNOTSUPP is returned in an 1431 * inner call. 1432 * 1433 * XXX: fix this 1434 */ 1435 if (kevp[i].filter != EVFILT_READ && 1436 kevp[i].filter != EVFILT_WRITE && 1437 kevp[i].data != EOPNOTSUPP && 1438 kevp[i].data != ENODEV) { 1439 if (count_res == 0) 1440 ++*res; 1441 pfd->revents |= POLLERR; 1442 } 1443 break; 1444 } 1445 if (nseldebug) { 1446 kprintf("poll index %d fd %d " 1447 "filter %d error %jd\n", 1448 pi, pfd->fd, 1449 kevp[i].filter, 1450 (intmax_t)kevp[i].data); 1451 } 1452 continue; 1453 } 1454 1455 switch (kevp[i].filter) { 1456 case EVFILT_READ: 1457 #if 0 1458 /* 1459 * NODATA on the read side can indicate a 1460 * half-closed situation and not necessarily 1461 * a disconnect, so depend on the user 1462 * issuing a read() and getting 0 bytes back. 1463 */ 1464 if (kevp[i].flags & EV_NODATA) 1465 pfd->revents |= POLLHUP; 1466 #endif 1467 if ((kevp[i].flags & EV_EOF) && 1468 kevp[i].fflags != 0) 1469 pfd->revents |= POLLERR; 1470 if (pfd->events & POLLIN) 1471 pfd->revents |= POLLIN; 1472 if (pfd->events & POLLRDNORM) 1473 pfd->revents |= POLLRDNORM; 1474 break; 1475 case EVFILT_WRITE: 1476 /* 1477 * As per the OpenGroup POLLHUP is mutually 1478 * exclusive with the writability flags. I 1479 * consider this a bit broken but... 1480 * 1481 * In this case a disconnect is implied even 1482 * for a half-closed (write side) situation. 1483 */ 1484 if (kevp[i].flags & EV_EOF) { 1485 pfd->revents |= POLLHUP; 1486 if (kevp[i].fflags != 0) 1487 pfd->revents |= POLLERR; 1488 } else { 1489 if (pfd->events & POLLOUT) 1490 pfd->revents |= POLLOUT; 1491 if (pfd->events & POLLWRNORM) 1492 pfd->revents |= POLLWRNORM; 1493 } 1494 break; 1495 case EVFILT_EXCEPT: 1496 /* 1497 * EV_NODATA should never be tagged for this 1498 * filter. 1499 */ 1500 if (pfd->events & POLLPRI) 1501 pfd->revents |= POLLPRI; 1502 if (pfd->events & POLLRDBAND) 1503 pfd->revents |= POLLRDBAND; 1504 break; 1505 } 1506 1507 if (nseldebug) { 1508 kprintf("poll index %d/%d fd %d revents %08x\n", 1509 pi, pkap->nfds, pfd->fd, pfd->revents); 1510 } 1511 1512 if (count_res && pfd->revents) 1513 ++*res; 1514 } else { 1515 if (nseldebug) { 1516 kprintf("poll index %d mismatch %ju/%d\n", 1517 pi, (uintmax_t)kevp[i].ident, pfd->fd); 1518 } 1519 } 1520 } 1521 1522 return (0); 1523 } 1524 1525 static int 1526 dopoll(int nfds, struct pollfd *fds, struct timespec *ts, int *res, int flags) 1527 { 1528 struct poll_kevent_copyin_args ka; 1529 struct pollfd sfds[64]; 1530 int bytes; 1531 int error; 1532 1533 *res = 0; 1534 if (nfds < 0) 1535 return (EINVAL); 1536 1537 if (nfds == 0 && ts) 1538 return (dotimeout_only(ts)); 1539 1540 /* 1541 * This is a bit arbitrary but we need to limit internal kmallocs. 1542 */ 1543 if (nfds > maxfilesperproc * 2) 1544 nfds = maxfilesperproc * 2; 1545 bytes = sizeof(struct pollfd) * nfds; 1546 1547 ka.lwp = curthread->td_lwp; 1548 ka.nfds = nfds; 1549 ka.pfds = 0; 1550 ka.error = 0; 1551 1552 if (ka.nfds < 64) 1553 ka.fds = sfds; 1554 else 1555 ka.fds = kmalloc(bytes, M_SELECT, M_WAITOK); 1556 1557 error = copyin(fds, ka.fds, bytes); 1558 if (error == 0) 1559 error = kern_kevent(&ka.lwp->lwp_kqueue, 0x7FFFFFFF, res, &ka, 1560 poll_copyin, poll_copyout, ts, flags); 1561 1562 if (error == 0) 1563 error = copyout(ka.fds, fds, bytes); 1564 1565 if (ka.fds != sfds) 1566 kfree(ka.fds, M_SELECT); 1567 1568 ka.lwp->lwp_kqueue_serial += nfds; 1569 1570 return (error); 1571 } 1572 1573 static int 1574 socket_wait_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 1575 { 1576 return (0); 1577 } 1578 1579 static int 1580 socket_wait_copyout(void *arg, struct kevent *kevp, int count, int *res) 1581 { 1582 ++*res; 1583 return (0); 1584 } 1585 1586 extern struct fileops socketops; 1587 1588 /* 1589 * NOTE: Callers of socket_wait() must already have a reference on the 1590 * socket. 1591 */ 1592 int 1593 socket_wait(struct socket *so, struct timespec *ts, int *res) 1594 { 1595 struct thread *td = curthread; 1596 struct file *fp; 1597 struct kqueue kq; 1598 struct kevent kev; 1599 int error, fd; 1600 1601 if ((error = falloc(td->td_lwp, &fp, &fd)) != 0) 1602 return (error); 1603 1604 fp->f_type = DTYPE_SOCKET; 1605 fp->f_flag = FREAD | FWRITE; 1606 fp->f_ops = &socketops; 1607 fp->f_data = so; 1608 fsetfd(td->td_lwp->lwp_proc->p_fd, fp, fd); 1609 fsetfdflags(td->td_proc->p_fd, fd, UF_EXCLOSE); 1610 1611 bzero(&kq, sizeof(kq)); 1612 kqueue_init(&kq, td->td_lwp->lwp_proc->p_fd); 1613 EV_SET(&kev, fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, NULL); 1614 if ((error = kqueue_register(&kq, &kev)) != 0) { 1615 fdrop(fp); 1616 return (error); 1617 } 1618 1619 error = kern_kevent(&kq, 1, res, NULL, socket_wait_copyin, 1620 socket_wait_copyout, ts, 0); 1621 1622 EV_SET(&kev, fd, EVFILT_READ, EV_DELETE|EV_DISABLE, 0, 0, NULL); 1623 kqueue_register(&kq, &kev); 1624 fp->f_ops = &badfileops; 1625 fdrop(fp); 1626 1627 return (error); 1628 } 1629 1630 /* 1631 * OpenBSD poll system call. 1632 * XXX this isn't quite a true representation.. OpenBSD uses select ops. 1633 * 1634 * MPSAFE 1635 */ 1636 int 1637 sys_openbsd_poll(struct openbsd_poll_args *uap) 1638 { 1639 return (sys_poll((struct poll_args *)uap)); 1640 } 1641 1642 /*ARGSUSED*/ 1643 int 1644 seltrue(cdev_t dev, int events) 1645 { 1646 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 1647 } 1648