1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94 35 * $FreeBSD: src/sys/kern/sys_generic.c,v 1.55.2.10 2001/03/17 10:39:32 peter Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/event.h> 44 #include <sys/filedesc.h> 45 #include <sys/filio.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/proc.h> 49 #include <sys/signalvar.h> 50 #include <sys/socketvar.h> 51 #include <sys/malloc.h> 52 #include <sys/uio.h> 53 #include <sys/kernel.h> 54 #include <sys/kern_syscall.h> 55 #include <sys/mapped_ioctl.h> 56 #include <sys/poll.h> 57 #include <sys/queue.h> 58 #include <sys/resourcevar.h> 59 #include <sys/socketops.h> 60 #include <sys/sysctl.h> 61 #include <sys/sysent.h> 62 #include <sys/buf.h> 63 #ifdef KTRACE 64 #include <sys/ktrace.h> 65 #endif 66 #include <vm/vm.h> 67 #include <vm/vm_page.h> 68 69 #include <sys/file2.h> 70 #include <sys/spinlock2.h> 71 72 #include <machine/limits.h> 73 74 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 75 static MALLOC_DEFINE(M_IOCTLMAP, "ioctlmap", "mapped ioctl handler buffer"); 76 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 77 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 78 79 typedef struct kfd_set { 80 fd_mask fds_bits[2]; 81 } kfd_set; 82 83 enum select_copyin_states { 84 COPYIN_READ, COPYIN_WRITE, COPYIN_EXCEPT, COPYIN_DONE }; 85 86 struct select_kevent_copyin_args { 87 kfd_set *read_set; 88 kfd_set *write_set; 89 kfd_set *except_set; 90 int active_set; /* One of select_copyin_states */ 91 struct lwp *lwp; /* Pointer to our lwp */ 92 int num_fds; /* Number of file descriptors (syscall arg) */ 93 int proc_fds; /* Processed fd's (wraps) */ 94 int error; /* Returned to userland */ 95 }; 96 97 struct poll_kevent_copyin_args { 98 struct lwp *lwp; 99 struct pollfd *fds; 100 int nfds; 101 int pfds; 102 int error; 103 }; 104 105 static struct lwkt_token mioctl_token = LWKT_TOKEN_INITIALIZER(mioctl_token); 106 107 static int doselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, 108 struct timespec *ts, int *res); 109 static int dopoll(int nfds, struct pollfd *fds, struct timespec *ts, 110 int *res, int flags); 111 static int dofileread(int, struct file *, struct uio *, int, size_t *); 112 static int dofilewrite(int, struct file *, struct uio *, int, size_t *); 113 114 /* 115 * Read system call. 116 * 117 * MPSAFE 118 */ 119 int 120 sys_read(struct read_args *uap) 121 { 122 struct thread *td = curthread; 123 struct uio auio; 124 struct iovec aiov; 125 int error; 126 127 if ((ssize_t)uap->nbyte < 0) 128 error = EINVAL; 129 130 aiov.iov_base = uap->buf; 131 aiov.iov_len = uap->nbyte; 132 auio.uio_iov = &aiov; 133 auio.uio_iovcnt = 1; 134 auio.uio_offset = -1; 135 auio.uio_resid = uap->nbyte; 136 auio.uio_rw = UIO_READ; 137 auio.uio_segflg = UIO_USERSPACE; 138 auio.uio_td = td; 139 140 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult); 141 return(error); 142 } 143 144 /* 145 * Positioned (Pread) read system call 146 * 147 * MPSAFE 148 */ 149 int 150 sys_extpread(struct extpread_args *uap) 151 { 152 struct thread *td = curthread; 153 struct uio auio; 154 struct iovec aiov; 155 int error; 156 int flags; 157 158 if ((ssize_t)uap->nbyte < 0) 159 return(EINVAL); 160 161 aiov.iov_base = uap->buf; 162 aiov.iov_len = uap->nbyte; 163 auio.uio_iov = &aiov; 164 auio.uio_iovcnt = 1; 165 auio.uio_offset = uap->offset; 166 auio.uio_resid = uap->nbyte; 167 auio.uio_rw = UIO_READ; 168 auio.uio_segflg = UIO_USERSPACE; 169 auio.uio_td = td; 170 171 flags = uap->flags & O_FMASK; 172 if (uap->offset != (off_t)-1) 173 flags |= O_FOFFSET; 174 175 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult); 176 return(error); 177 } 178 179 /* 180 * Scatter read system call. 181 * 182 * MPSAFE 183 */ 184 int 185 sys_readv(struct readv_args *uap) 186 { 187 struct thread *td = curthread; 188 struct uio auio; 189 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 190 int error; 191 192 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 193 &auio.uio_resid); 194 if (error) 195 return (error); 196 auio.uio_iov = iov; 197 auio.uio_iovcnt = uap->iovcnt; 198 auio.uio_offset = -1; 199 auio.uio_rw = UIO_READ; 200 auio.uio_segflg = UIO_USERSPACE; 201 auio.uio_td = td; 202 203 error = kern_preadv(uap->fd, &auio, 0, &uap->sysmsg_szresult); 204 205 iovec_free(&iov, aiov); 206 return (error); 207 } 208 209 210 /* 211 * Scatter positioned read system call. 212 * 213 * MPSAFE 214 */ 215 int 216 sys_extpreadv(struct extpreadv_args *uap) 217 { 218 struct thread *td = curthread; 219 struct uio auio; 220 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 221 int error; 222 int flags; 223 224 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 225 &auio.uio_resid); 226 if (error) 227 return (error); 228 auio.uio_iov = iov; 229 auio.uio_iovcnt = uap->iovcnt; 230 auio.uio_offset = uap->offset; 231 auio.uio_rw = UIO_READ; 232 auio.uio_segflg = UIO_USERSPACE; 233 auio.uio_td = td; 234 235 flags = uap->flags & O_FMASK; 236 if (uap->offset != (off_t)-1) 237 flags |= O_FOFFSET; 238 239 error = kern_preadv(uap->fd, &auio, flags, &uap->sysmsg_szresult); 240 241 iovec_free(&iov, aiov); 242 return(error); 243 } 244 245 /* 246 * MPSAFE 247 */ 248 int 249 kern_preadv(int fd, struct uio *auio, int flags, size_t *res) 250 { 251 struct thread *td = curthread; 252 struct file *fp; 253 int error; 254 255 fp = holdfp(td, fd, FREAD); 256 if (fp == NULL) 257 return (EBADF); 258 if (flags & O_FOFFSET && fp->f_type != DTYPE_VNODE) { 259 error = ESPIPE; 260 } else { 261 error = dofileread(fd, fp, auio, flags, res); 262 } 263 dropfp(td, fd, fp); 264 265 return(error); 266 } 267 268 /* 269 * Common code for readv and preadv that reads data in 270 * from a file using the passed in uio, offset, and flags. 271 * 272 * MPALMOSTSAFE - ktrace needs help 273 */ 274 static int 275 dofileread(int fd, struct file *fp, struct uio *auio, int flags, size_t *res) 276 { 277 int error; 278 size_t len; 279 #ifdef KTRACE 280 struct thread *td = curthread; 281 struct iovec *ktriov = NULL; 282 struct uio ktruio; 283 #endif 284 285 #ifdef KTRACE 286 /* 287 * if tracing, save a copy of iovec 288 */ 289 if (KTRPOINT(td, KTR_GENIO)) { 290 int iovlen = auio->uio_iovcnt * sizeof(struct iovec); 291 292 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK); 293 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 294 ktruio = *auio; 295 } 296 #endif 297 len = auio->uio_resid; 298 error = fo_read(fp, auio, fp->f_cred, flags); 299 if (error) { 300 if (auio->uio_resid != len && (error == ERESTART || 301 error == EINTR || error == EWOULDBLOCK)) 302 error = 0; 303 } 304 #ifdef KTRACE 305 if (ktriov != NULL) { 306 if (error == 0) { 307 ktruio.uio_iov = ktriov; 308 ktruio.uio_resid = len - auio->uio_resid; 309 ktrgenio(td->td_lwp, fd, UIO_READ, &ktruio, error); 310 } 311 kfree(ktriov, M_TEMP); 312 } 313 #endif 314 if (error == 0) 315 *res = len - auio->uio_resid; 316 317 return(error); 318 } 319 320 /* 321 * Write system call 322 * 323 * MPSAFE 324 */ 325 int 326 sys_write(struct write_args *uap) 327 { 328 struct thread *td = curthread; 329 struct uio auio; 330 struct iovec aiov; 331 int error; 332 333 if ((ssize_t)uap->nbyte < 0) 334 error = EINVAL; 335 336 aiov.iov_base = (void *)(uintptr_t)uap->buf; 337 aiov.iov_len = uap->nbyte; 338 auio.uio_iov = &aiov; 339 auio.uio_iovcnt = 1; 340 auio.uio_offset = -1; 341 auio.uio_resid = uap->nbyte; 342 auio.uio_rw = UIO_WRITE; 343 auio.uio_segflg = UIO_USERSPACE; 344 auio.uio_td = td; 345 346 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult); 347 348 return(error); 349 } 350 351 /* 352 * Pwrite system call 353 * 354 * MPSAFE 355 */ 356 int 357 sys_extpwrite(struct extpwrite_args *uap) 358 { 359 struct thread *td = curthread; 360 struct uio auio; 361 struct iovec aiov; 362 int error; 363 int flags; 364 365 if ((ssize_t)uap->nbyte < 0) 366 error = EINVAL; 367 368 aiov.iov_base = (void *)(uintptr_t)uap->buf; 369 aiov.iov_len = uap->nbyte; 370 auio.uio_iov = &aiov; 371 auio.uio_iovcnt = 1; 372 auio.uio_offset = uap->offset; 373 auio.uio_resid = uap->nbyte; 374 auio.uio_rw = UIO_WRITE; 375 auio.uio_segflg = UIO_USERSPACE; 376 auio.uio_td = td; 377 378 flags = uap->flags & O_FMASK; 379 if (uap->offset != (off_t)-1) 380 flags |= O_FOFFSET; 381 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult); 382 return(error); 383 } 384 385 /* 386 * MPSAFE 387 */ 388 int 389 sys_writev(struct writev_args *uap) 390 { 391 struct thread *td = curthread; 392 struct uio auio; 393 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 394 int error; 395 396 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 397 &auio.uio_resid); 398 if (error) 399 return (error); 400 auio.uio_iov = iov; 401 auio.uio_iovcnt = uap->iovcnt; 402 auio.uio_offset = -1; 403 auio.uio_rw = UIO_WRITE; 404 auio.uio_segflg = UIO_USERSPACE; 405 auio.uio_td = td; 406 407 error = kern_pwritev(uap->fd, &auio, 0, &uap->sysmsg_szresult); 408 409 iovec_free(&iov, aiov); 410 return (error); 411 } 412 413 414 /* 415 * Gather positioned write system call 416 * 417 * MPSAFE 418 */ 419 int 420 sys_extpwritev(struct extpwritev_args *uap) 421 { 422 struct thread *td = curthread; 423 struct uio auio; 424 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 425 int error; 426 int flags; 427 428 error = iovec_copyin(uap->iovp, &iov, aiov, uap->iovcnt, 429 &auio.uio_resid); 430 if (error) 431 return (error); 432 auio.uio_iov = iov; 433 auio.uio_iovcnt = uap->iovcnt; 434 auio.uio_offset = uap->offset; 435 auio.uio_rw = UIO_WRITE; 436 auio.uio_segflg = UIO_USERSPACE; 437 auio.uio_td = td; 438 439 flags = uap->flags & O_FMASK; 440 if (uap->offset != (off_t)-1) 441 flags |= O_FOFFSET; 442 443 error = kern_pwritev(uap->fd, &auio, flags, &uap->sysmsg_szresult); 444 445 iovec_free(&iov, aiov); 446 return(error); 447 } 448 449 /* 450 * MPSAFE 451 */ 452 int 453 kern_pwritev(int fd, struct uio *auio, int flags, size_t *res) 454 { 455 struct thread *td = curthread; 456 struct file *fp; 457 int error; 458 459 fp = holdfp(td, fd, FWRITE); 460 if (fp == NULL) 461 return (EBADF); 462 else if ((flags & O_FOFFSET) && fp->f_type != DTYPE_VNODE) { 463 error = ESPIPE; 464 } else { 465 error = dofilewrite(fd, fp, auio, flags, res); 466 } 467 dropfp(td, fd, fp); 468 469 return(error); 470 } 471 472 /* 473 * Common code for writev and pwritev that writes data to 474 * a file using the passed in uio, offset, and flags. 475 * 476 * MPALMOSTSAFE - ktrace needs help 477 */ 478 static int 479 dofilewrite(int fd, struct file *fp, struct uio *auio, int flags, size_t *res) 480 { 481 struct thread *td = curthread; 482 struct lwp *lp = td->td_lwp; 483 int error; 484 size_t len; 485 #ifdef KTRACE 486 struct iovec *ktriov = NULL; 487 struct uio ktruio; 488 #endif 489 490 #ifdef KTRACE 491 /* 492 * if tracing, save a copy of iovec and uio 493 */ 494 if (KTRPOINT(td, KTR_GENIO)) { 495 int iovlen = auio->uio_iovcnt * sizeof(struct iovec); 496 497 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK); 498 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 499 ktruio = *auio; 500 } 501 #endif 502 len = auio->uio_resid; 503 error = fo_write(fp, auio, fp->f_cred, flags); 504 if (error) { 505 if (auio->uio_resid != len && (error == ERESTART || 506 error == EINTR || error == EWOULDBLOCK)) 507 error = 0; 508 /* Socket layer is responsible for issuing SIGPIPE. */ 509 if (error == EPIPE && fp->f_type != DTYPE_SOCKET) 510 lwpsignal(lp->lwp_proc, lp, SIGPIPE); 511 } 512 #ifdef KTRACE 513 if (ktriov != NULL) { 514 if (error == 0) { 515 ktruio.uio_iov = ktriov; 516 ktruio.uio_resid = len - auio->uio_resid; 517 ktrgenio(lp, fd, UIO_WRITE, &ktruio, error); 518 } 519 kfree(ktriov, M_TEMP); 520 } 521 #endif 522 if (error == 0) 523 *res = len - auio->uio_resid; 524 525 return(error); 526 } 527 528 /* 529 * Ioctl system call 530 * 531 * MPSAFE 532 */ 533 int 534 sys_ioctl(struct ioctl_args *uap) 535 { 536 int error; 537 538 error = mapped_ioctl(uap->fd, uap->com, uap->data, NULL, &uap->sysmsg); 539 return (error); 540 } 541 542 struct ioctl_map_entry { 543 const char *subsys; 544 struct ioctl_map_range *cmd_ranges; 545 LIST_ENTRY(ioctl_map_entry) entries; 546 }; 547 548 /* 549 * The true heart of all ioctl syscall handlers (native, emulation). 550 * If map != NULL, it will be searched for a matching entry for com, 551 * and appropriate conversions/conversion functions will be utilized. 552 * 553 * MPSAFE 554 */ 555 int 556 mapped_ioctl(int fd, u_long com, caddr_t uspc_data, struct ioctl_map *map, 557 struct sysmsg *msg) 558 { 559 struct thread *td = curthread; 560 struct proc *p = td->td_proc; 561 struct ucred *cred; 562 struct file *fp; 563 struct ioctl_map_range *iomc = NULL; 564 int error; 565 u_int size; 566 u_long ocom = com; 567 caddr_t data, memp; 568 int tmp; 569 #define STK_PARAMS 128 570 union { 571 char stkbuf[STK_PARAMS]; 572 long align; 573 } ubuf; 574 575 KKASSERT(p); 576 cred = td->td_ucred; 577 memp = NULL; 578 579 fp = holdfp(td, fd, FREAD|FWRITE); 580 if (fp == NULL) 581 return(EBADF); 582 583 if (map != NULL) { /* obey translation map */ 584 u_long maskcmd; 585 struct ioctl_map_entry *e; 586 587 maskcmd = com & map->mask; 588 589 lwkt_gettoken(&mioctl_token); 590 LIST_FOREACH(e, &map->mapping, entries) { 591 for (iomc = e->cmd_ranges; iomc->start != 0 || 592 iomc->maptocmd != 0 || iomc->wrapfunc != NULL || 593 iomc->mapfunc != NULL; 594 iomc++) { 595 if (maskcmd >= iomc->start && 596 maskcmd <= iomc->end) 597 break; 598 } 599 600 /* Did we find a match? */ 601 if (iomc->start != 0 || iomc->maptocmd != 0 || 602 iomc->wrapfunc != NULL || iomc->mapfunc != NULL) 603 break; 604 } 605 lwkt_reltoken(&mioctl_token); 606 607 if (iomc == NULL || 608 (iomc->start == 0 && iomc->maptocmd == 0 609 && iomc->wrapfunc == NULL && iomc->mapfunc == NULL)) { 610 kprintf("%s: 'ioctl' fd=%d, cmd=0x%lx ('%c',%d) not implemented\n", 611 map->sys, fd, maskcmd, 612 (int)((maskcmd >> 8) & 0xff), 613 (int)(maskcmd & 0xff)); 614 error = EINVAL; 615 goto done; 616 } 617 618 /* 619 * If it's a non-range one to one mapping, maptocmd should be 620 * correct. If it's a ranged one to one mapping, we pass the 621 * original value of com, and for a range mapped to a different 622 * range, we always need a mapping function to translate the 623 * ioctl to our native ioctl. Ex. 6500-65ff <-> 9500-95ff 624 */ 625 if (iomc->start == iomc->end && iomc->maptocmd == iomc->maptoend) { 626 com = iomc->maptocmd; 627 } else if (iomc->start == iomc->maptocmd && iomc->end == iomc->maptoend) { 628 if (iomc->mapfunc != NULL) 629 com = iomc->mapfunc(iomc->start, iomc->end, 630 iomc->start, iomc->end, 631 com, com); 632 } else { 633 if (iomc->mapfunc != NULL) { 634 com = iomc->mapfunc(iomc->start, iomc->end, 635 iomc->maptocmd, iomc->maptoend, 636 com, ocom); 637 } else { 638 kprintf("%s: Invalid mapping for fd=%d, cmd=%#lx ('%c',%d)\n", 639 map->sys, fd, maskcmd, 640 (int)((maskcmd >> 8) & 0xff), 641 (int)(maskcmd & 0xff)); 642 error = EINVAL; 643 goto done; 644 } 645 } 646 } 647 648 switch (com) { 649 case FIONCLEX: 650 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 651 goto done; 652 case FIOCLEX: 653 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 654 goto done; 655 } 656 657 /* 658 * Interpret high order word to find amount of data to be 659 * copied to/from the user's address space. 660 */ 661 size = IOCPARM_LEN(com); 662 if (size > IOCPARM_MAX) { 663 error = ENOTTY; 664 goto done; 665 } 666 667 if ((com & IOC_VOID) == 0 && size > sizeof(ubuf.stkbuf)) { 668 memp = kmalloc(size, M_IOCTLOPS, M_WAITOK); 669 data = memp; 670 } else { 671 memp = NULL; 672 data = ubuf.stkbuf; 673 } 674 if (com & IOC_VOID) { 675 *(caddr_t *)data = uspc_data; 676 } else if (com & IOC_IN) { 677 if (size != 0) { 678 error = copyin(uspc_data, data, (size_t)size); 679 if (error) 680 goto done; 681 } else { 682 *(caddr_t *)data = uspc_data; 683 } 684 } else if ((com & IOC_OUT) != 0 && size) { 685 /* 686 * Zero the buffer so the user always 687 * gets back something deterministic. 688 */ 689 bzero(data, (size_t)size); 690 } 691 692 switch (com) { 693 case FIONBIO: 694 if ((tmp = *(int *)data)) 695 atomic_set_int(&fp->f_flag, FNONBLOCK); 696 else 697 atomic_clear_int(&fp->f_flag, FNONBLOCK); 698 error = 0; 699 break; 700 701 case FIOASYNC: 702 if ((tmp = *(int *)data)) 703 atomic_set_int(&fp->f_flag, FASYNC); 704 else 705 atomic_clear_int(&fp->f_flag, FASYNC); 706 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, cred, msg); 707 break; 708 709 default: 710 /* 711 * If there is a override function, 712 * call it instead of directly routing the call 713 */ 714 if (map != NULL && iomc->wrapfunc != NULL) 715 error = iomc->wrapfunc(fp, com, ocom, data, cred); 716 else 717 error = fo_ioctl(fp, com, data, cred, msg); 718 /* 719 * Copy any data to user, size was 720 * already set and checked above. 721 */ 722 if (error == 0 && (com & IOC_OUT) != 0 && size != 0) 723 error = copyout(data, uspc_data, (size_t)size); 724 break; 725 } 726 done: 727 if (memp != NULL) 728 kfree(memp, M_IOCTLOPS); 729 dropfp(td, fd, fp); 730 731 return(error); 732 } 733 734 /* 735 * MPSAFE 736 */ 737 int 738 mapped_ioctl_register_handler(struct ioctl_map_handler *he) 739 { 740 struct ioctl_map_entry *ne; 741 742 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL && 743 he->subsys != NULL && *he->subsys != '\0'); 744 745 ne = kmalloc(sizeof(struct ioctl_map_entry), M_IOCTLMAP, 746 M_WAITOK | M_ZERO); 747 748 ne->subsys = he->subsys; 749 ne->cmd_ranges = he->cmd_ranges; 750 751 lwkt_gettoken(&mioctl_token); 752 LIST_INSERT_HEAD(&he->map->mapping, ne, entries); 753 lwkt_reltoken(&mioctl_token); 754 755 return(0); 756 } 757 758 /* 759 * MPSAFE 760 */ 761 int 762 mapped_ioctl_unregister_handler(struct ioctl_map_handler *he) 763 { 764 struct ioctl_map_entry *ne; 765 int error = EINVAL; 766 767 KKASSERT(he != NULL && he->map != NULL && he->cmd_ranges != NULL); 768 769 lwkt_gettoken(&mioctl_token); 770 LIST_FOREACH(ne, &he->map->mapping, entries) { 771 if (ne->cmd_ranges == he->cmd_ranges) { 772 LIST_REMOVE(ne, entries); 773 kfree(ne, M_IOCTLMAP); 774 error = 0; 775 break; 776 } 777 } 778 lwkt_reltoken(&mioctl_token); 779 return(error); 780 } 781 782 static int nselcoll; /* Select collisions since boot */ 783 int selwait; 784 SYSCTL_INT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, ""); 785 static int nseldebug; 786 SYSCTL_INT(_kern, OID_AUTO, nseldebug, CTLFLAG_RW, &nseldebug, 0, ""); 787 788 /* 789 * Select system call. 790 * 791 * MPSAFE 792 */ 793 int 794 sys_select(struct select_args *uap) 795 { 796 struct timeval ktv; 797 struct timespec *ktsp, kts; 798 int error; 799 800 /* 801 * Get timeout if any. 802 */ 803 if (uap->tv != NULL) { 804 error = copyin(uap->tv, &ktv, sizeof (ktv)); 805 if (error) 806 return (error); 807 TIMEVAL_TO_TIMESPEC(&ktv, &kts); 808 ktsp = &kts; 809 } else { 810 ktsp = NULL; 811 } 812 813 /* 814 * Do real work. 815 */ 816 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp, 817 &uap->sysmsg_result); 818 819 return (error); 820 } 821 822 823 /* 824 * Pselect system call. 825 */ 826 int 827 sys_pselect(struct pselect_args *uap) 828 { 829 struct thread *td = curthread; 830 struct lwp *lp = td->td_lwp; 831 struct timespec *ktsp, kts; 832 sigset_t sigmask; 833 int error; 834 835 /* 836 * Get timeout if any. 837 */ 838 if (uap->ts != NULL) { 839 error = copyin(uap->ts, &kts, sizeof (kts)); 840 if (error) 841 return (error); 842 ktsp = &kts; 843 } else { 844 ktsp = NULL; 845 } 846 847 /* 848 * Install temporary signal mask if any provided. 849 */ 850 if (uap->sigmask != NULL) { 851 error = copyin(uap->sigmask, &sigmask, sizeof(sigmask)); 852 if (error) 853 return (error); 854 lwkt_gettoken(&lp->lwp_proc->p_token); 855 lp->lwp_oldsigmask = lp->lwp_sigmask; 856 SIG_CANTMASK(sigmask); 857 lp->lwp_sigmask = sigmask; 858 lwkt_reltoken(&lp->lwp_proc->p_token); 859 } 860 861 /* 862 * Do real job. 863 */ 864 error = doselect(uap->nd, uap->in, uap->ou, uap->ex, ktsp, 865 &uap->sysmsg_result); 866 867 if (uap->sigmask != NULL) { 868 lwkt_gettoken(&lp->lwp_proc->p_token); 869 /* doselect() responsible for turning ERESTART into EINTR */ 870 KKASSERT(error != ERESTART); 871 if (error == EINTR) { 872 /* 873 * We can't restore the previous signal mask now 874 * because it could block the signal that interrupted 875 * us. So make a note to restore it after executing 876 * the handler. 877 */ 878 lp->lwp_flags |= LWP_OLDMASK; 879 } else { 880 /* 881 * No handler to run. Restore previous mask immediately. 882 */ 883 lp->lwp_sigmask = lp->lwp_oldsigmask; 884 } 885 lwkt_reltoken(&lp->lwp_proc->p_token); 886 } 887 888 return (error); 889 } 890 891 static int 892 select_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 893 { 894 struct select_kevent_copyin_args *skap = NULL; 895 struct kevent *kev; 896 int fd; 897 kfd_set *fdp = NULL; 898 short filter = 0; 899 u_int fflags = 0; 900 901 skap = (struct select_kevent_copyin_args *)arg; 902 903 if (*events == maxevents) 904 return (0); 905 906 while (skap->active_set < COPYIN_DONE) { 907 switch (skap->active_set) { 908 case COPYIN_READ: 909 /* 910 * Register descriptors for the read filter 911 */ 912 fdp = skap->read_set; 913 filter = EVFILT_READ; 914 fflags = NOTE_OLDAPI; 915 if (fdp) 916 break; 917 ++skap->active_set; 918 skap->proc_fds = 0; 919 /* fall through */ 920 case COPYIN_WRITE: 921 /* 922 * Register descriptors for the write filter 923 */ 924 fdp = skap->write_set; 925 filter = EVFILT_WRITE; 926 fflags = NOTE_OLDAPI; 927 if (fdp) 928 break; 929 ++skap->active_set; 930 skap->proc_fds = 0; 931 /* fall through */ 932 case COPYIN_EXCEPT: 933 /* 934 * Register descriptors for the exception filter 935 */ 936 fdp = skap->except_set; 937 filter = EVFILT_EXCEPT; 938 fflags = NOTE_OLDAPI | NOTE_OOB; 939 if (fdp) 940 break; 941 ++skap->active_set; 942 skap->proc_fds = 0; 943 /* fall through */ 944 case COPYIN_DONE: 945 /* 946 * Nothing left to register 947 */ 948 return(0); 949 /* NOT REACHED */ 950 } 951 952 while (skap->proc_fds < skap->num_fds) { 953 fd = skap->proc_fds; 954 if (FD_ISSET(fd, fdp)) { 955 kev = &kevp[*events]; 956 EV_SET(kev, fd, filter, 957 EV_ADD|EV_ENABLE, 958 fflags, 0, 959 (void *)(uintptr_t) 960 skap->lwp->lwp_kqueue_serial); 961 FD_CLR(fd, fdp); 962 ++*events; 963 964 if (nseldebug) { 965 kprintf("select fd %d filter %d " 966 "serial %ju\n", fd, filter, 967 (uintmax_t) 968 skap->lwp->lwp_kqueue_serial); 969 } 970 } 971 ++skap->proc_fds; 972 if (*events == maxevents) 973 return (0); 974 } 975 skap->active_set++; 976 skap->proc_fds = 0; 977 } 978 979 return (0); 980 } 981 982 static int 983 select_copyout(void *arg, struct kevent *kevp, int count, int *res) 984 { 985 struct select_kevent_copyin_args *skap; 986 struct kevent kev; 987 int i; 988 int n; 989 990 skap = (struct select_kevent_copyin_args *)arg; 991 992 for (i = 0; i < count; ++i) { 993 /* 994 * Filter out and delete spurious events 995 */ 996 if ((uint64_t)(uintptr_t)kevp[i].udata != 997 skap->lwp->lwp_kqueue_serial) { 998 kev = kevp[i]; 999 kev.flags = EV_DISABLE|EV_DELETE; 1000 n = 1; 1001 kqueue_register(&skap->lwp->lwp_kqueue, &kev, &n); 1002 if (nseldebug) { 1003 kprintf("select fd %ju mismatched serial %ju\n", 1004 (uintmax_t)kevp[i].ident, 1005 (uintmax_t)skap->lwp->lwp_kqueue_serial); 1006 } 1007 continue; 1008 } 1009 1010 /* 1011 * Handle errors 1012 */ 1013 if (kevp[i].flags & EV_ERROR) { 1014 int error = kevp[i].data; 1015 1016 switch (error) { 1017 case EBADF: 1018 /* 1019 * A bad file descriptor is considered a 1020 * fatal error for select, bail out. 1021 */ 1022 skap->error = error; 1023 *res = -1; 1024 return error; 1025 1026 default: 1027 /* 1028 * Select silently swallows any unknown errors 1029 * for descriptors in the read or write sets. 1030 * 1031 * ALWAYS filter out EOPNOTSUPP errors from 1032 * filters (at least until all filters support 1033 * EVFILT_EXCEPT) 1034 * 1035 * We also filter out ENODEV since dev_dkqfilter 1036 * returns ENODEV if EOPNOTSUPP is returned in an 1037 * inner call. 1038 * 1039 * XXX: fix this 1040 */ 1041 if (kevp[i].filter != EVFILT_READ && 1042 kevp[i].filter != EVFILT_WRITE && 1043 error != EOPNOTSUPP && 1044 error != ENODEV) { 1045 skap->error = error; 1046 *res = -1; 1047 return error; 1048 } 1049 break; 1050 } 1051 if (nseldebug) 1052 kprintf("select fd %ju filter %d error %d\n", 1053 (uintmax_t)kevp[i].ident, 1054 kevp[i].filter, error); 1055 continue; 1056 } 1057 1058 switch (kevp[i].filter) { 1059 case EVFILT_READ: 1060 FD_SET(kevp[i].ident, skap->read_set); 1061 break; 1062 case EVFILT_WRITE: 1063 FD_SET(kevp[i].ident, skap->write_set); 1064 break; 1065 case EVFILT_EXCEPT: 1066 FD_SET(kevp[i].ident, skap->except_set); 1067 break; 1068 } 1069 1070 ++*res; 1071 } 1072 1073 return (0); 1074 } 1075 1076 /* 1077 * Copy select bits in from userland. Allocate kernel memory if the 1078 * set is large. 1079 */ 1080 static int 1081 getbits(int bytes, fd_set *in_set, kfd_set **out_set, kfd_set *tmp_set) 1082 { 1083 int error; 1084 1085 if (in_set) { 1086 if (bytes < sizeof(*tmp_set)) 1087 *out_set = tmp_set; 1088 else 1089 *out_set = kmalloc(bytes, M_SELECT, M_WAITOK); 1090 error = copyin(in_set, *out_set, bytes); 1091 } else { 1092 *out_set = NULL; 1093 error = 0; 1094 } 1095 return (error); 1096 } 1097 1098 /* 1099 * Copy returned select bits back out to userland. 1100 */ 1101 static int 1102 putbits(int bytes, kfd_set *in_set, fd_set *out_set) 1103 { 1104 int error; 1105 1106 if (in_set) { 1107 error = copyout(in_set, out_set, bytes); 1108 } else { 1109 error = 0; 1110 } 1111 return (error); 1112 } 1113 1114 static int 1115 dotimeout_only(struct timespec *ts) 1116 { 1117 return(nanosleep1(ts, NULL)); 1118 } 1119 1120 /* 1121 * Common code for sys_select() and sys_pselect(). 1122 * 1123 * in, out and ex are userland pointers. ts must point to validated 1124 * kernel-side timeout value or NULL for infinite timeout. res must 1125 * point to syscall return value. 1126 */ 1127 static int 1128 doselect(int nd, fd_set *read, fd_set *write, fd_set *except, 1129 struct timespec *ts, int *res) 1130 { 1131 struct proc *p = curproc; 1132 struct select_kevent_copyin_args *kap, ka; 1133 int bytes, error; 1134 kfd_set read_tmp; 1135 kfd_set write_tmp; 1136 kfd_set except_tmp; 1137 1138 *res = 0; 1139 if (nd < 0) 1140 return (EINVAL); 1141 if (nd == 0 && ts) 1142 return (dotimeout_only(ts)); 1143 1144 if (nd > p->p_fd->fd_nfiles) /* limit kmalloc */ 1145 nd = p->p_fd->fd_nfiles; 1146 1147 kap = &ka; 1148 kap->lwp = curthread->td_lwp; 1149 kap->num_fds = nd; 1150 kap->proc_fds = 0; 1151 kap->error = 0; 1152 kap->active_set = COPYIN_READ; 1153 1154 /* 1155 * Calculate bytes based on the number of __fd_mask[] array entries 1156 * multiplied by the size of __fd_mask. 1157 */ 1158 bytes = howmany(nd, __NFDBITS) * sizeof(__fd_mask); 1159 1160 /* kap->read_set = NULL; not needed */ 1161 kap->write_set = NULL; 1162 kap->except_set = NULL; 1163 1164 error = getbits(bytes, read, &kap->read_set, &read_tmp); 1165 if (error == 0) 1166 error = getbits(bytes, write, &kap->write_set, &write_tmp); 1167 if (error == 0) 1168 error = getbits(bytes, except, &kap->except_set, &except_tmp); 1169 if (error) 1170 goto done; 1171 1172 /* 1173 * NOTE: Make sure the max events passed to kern_kevent() is 1174 * effectively unlimited. (nd * 3) accomplishes this. 1175 * 1176 * (*res) continues to increment as returned events are 1177 * loaded in. 1178 */ 1179 error = kern_kevent(&kap->lwp->lwp_kqueue, 0x7FFFFFFF, res, kap, 1180 select_copyin, select_copyout, ts, 0); 1181 if (error == 0) 1182 error = putbits(bytes, kap->read_set, read); 1183 if (error == 0) 1184 error = putbits(bytes, kap->write_set, write); 1185 if (error == 0) 1186 error = putbits(bytes, kap->except_set, except); 1187 1188 /* 1189 * An error from an individual event that should be passed 1190 * back to userland (EBADF) 1191 */ 1192 if (kap->error) 1193 error = kap->error; 1194 1195 /* 1196 * Clean up. 1197 */ 1198 done: 1199 if (kap->read_set && kap->read_set != &read_tmp) 1200 kfree(kap->read_set, M_SELECT); 1201 if (kap->write_set && kap->write_set != &write_tmp) 1202 kfree(kap->write_set, M_SELECT); 1203 if (kap->except_set && kap->except_set != &except_tmp) 1204 kfree(kap->except_set, M_SELECT); 1205 1206 kap->lwp->lwp_kqueue_serial += kap->num_fds; 1207 1208 return (error); 1209 } 1210 1211 /* 1212 * Poll system call. 1213 * 1214 * MPSAFE 1215 */ 1216 int 1217 sys_poll(struct poll_args *uap) 1218 { 1219 struct timespec ts, *tsp; 1220 int error; 1221 1222 if (uap->timeout != INFTIM) { 1223 if (uap->timeout < 0) 1224 return (EINVAL); 1225 ts.tv_sec = uap->timeout / 1000; 1226 ts.tv_nsec = (uap->timeout % 1000) * 1000 * 1000; 1227 tsp = &ts; 1228 } else { 1229 tsp = NULL; 1230 } 1231 1232 error = dopoll(uap->nfds, uap->fds, tsp, &uap->sysmsg_result, 0); 1233 1234 return (error); 1235 } 1236 1237 /* 1238 * Ppoll system call. 1239 * 1240 * MPSAFE 1241 */ 1242 int 1243 sys_ppoll(struct ppoll_args *uap) 1244 { 1245 struct thread *td = curthread; 1246 struct lwp *lp = td->td_lwp; 1247 struct timespec *ktsp, kts; 1248 sigset_t sigmask; 1249 int error; 1250 1251 /* 1252 * Get timeout if any. 1253 */ 1254 if (uap->ts != NULL) { 1255 error = copyin(uap->ts, &kts, sizeof (kts)); 1256 if (error) 1257 return (error); 1258 ktsp = &kts; 1259 } else { 1260 ktsp = NULL; 1261 } 1262 1263 /* 1264 * Install temporary signal mask if any provided. 1265 */ 1266 if (uap->sigmask != NULL) { 1267 error = copyin(uap->sigmask, &sigmask, sizeof(sigmask)); 1268 if (error) 1269 return (error); 1270 lwkt_gettoken(&lp->lwp_proc->p_token); 1271 lp->lwp_oldsigmask = lp->lwp_sigmask; 1272 SIG_CANTMASK(sigmask); 1273 lp->lwp_sigmask = sigmask; 1274 lwkt_reltoken(&lp->lwp_proc->p_token); 1275 } 1276 1277 error = dopoll(uap->nfds, uap->fds, ktsp, &uap->sysmsg_result, 1278 ktsp != NULL ? KEVENT_TIMEOUT_PRECISE : 0); 1279 1280 if (uap->sigmask != NULL) { 1281 lwkt_gettoken(&lp->lwp_proc->p_token); 1282 /* dopoll() responsible for turning ERESTART into EINTR */ 1283 KKASSERT(error != ERESTART); 1284 if (error == EINTR) { 1285 /* 1286 * We can't restore the previous signal mask now 1287 * because it could block the signal that interrupted 1288 * us. So make a note to restore it after executing 1289 * the handler. 1290 */ 1291 lp->lwp_flags |= LWP_OLDMASK; 1292 } else { 1293 /* 1294 * No handler to run. Restore previous mask immediately. 1295 */ 1296 lp->lwp_sigmask = lp->lwp_oldsigmask; 1297 } 1298 lwkt_reltoken(&lp->lwp_proc->p_token); 1299 } 1300 1301 return (error); 1302 } 1303 1304 static int 1305 poll_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 1306 { 1307 struct poll_kevent_copyin_args *pkap; 1308 struct pollfd *pfd; 1309 struct kevent *kev; 1310 int kev_count; 1311 1312 pkap = (struct poll_kevent_copyin_args *)arg; 1313 1314 while (pkap->pfds < pkap->nfds) { 1315 pfd = &pkap->fds[pkap->pfds]; 1316 1317 /* Clear return events */ 1318 pfd->revents = 0; 1319 1320 /* Do not check if fd is equal to -1 */ 1321 if (pfd->fd == -1) { 1322 ++pkap->pfds; 1323 continue; 1324 } 1325 1326 kev_count = 0; 1327 if (pfd->events & (POLLIN | POLLRDNORM)) 1328 kev_count++; 1329 if (pfd->events & (POLLOUT | POLLWRNORM)) 1330 kev_count++; 1331 if (pfd->events & (POLLPRI | POLLRDBAND)) 1332 kev_count++; 1333 1334 if (*events + kev_count > maxevents) 1335 return (0); 1336 1337 /* 1338 * NOTE: A combined serial number and poll array index is 1339 * stored in kev->udata. 1340 */ 1341 kev = &kevp[*events]; 1342 if (pfd->events & (POLLIN | POLLRDNORM)) { 1343 EV_SET(kev++, pfd->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 1344 NOTE_OLDAPI, 0, (void *)(uintptr_t) 1345 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1346 } 1347 if (pfd->events & (POLLOUT | POLLWRNORM)) { 1348 EV_SET(kev++, pfd->fd, EVFILT_WRITE, EV_ADD|EV_ENABLE, 1349 NOTE_OLDAPI, 0, (void *)(uintptr_t) 1350 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1351 } 1352 if (pfd->events & (POLLPRI | POLLRDBAND)) { 1353 EV_SET(kev++, pfd->fd, EVFILT_EXCEPT, EV_ADD|EV_ENABLE, 1354 NOTE_OLDAPI | NOTE_OOB, 0, 1355 (void *)(uintptr_t) 1356 (pkap->lwp->lwp_kqueue_serial + pkap->pfds)); 1357 } 1358 1359 if (nseldebug) { 1360 kprintf("poll index %d/%d fd %d events %08x " 1361 "serial %ju\n", pkap->pfds, pkap->nfds-1, 1362 pfd->fd, pfd->events, 1363 (uintmax_t)pkap->lwp->lwp_kqueue_serial); 1364 } 1365 1366 ++pkap->pfds; 1367 (*events) += kev_count; 1368 } 1369 1370 return (0); 1371 } 1372 1373 static int 1374 poll_copyout(void *arg, struct kevent *kevp, int count, int *res) 1375 { 1376 struct poll_kevent_copyin_args *pkap; 1377 struct pollfd *pfd; 1378 struct kevent kev; 1379 int count_res; 1380 int i; 1381 int n; 1382 uint64_t pi; 1383 1384 pkap = (struct poll_kevent_copyin_args *)arg; 1385 1386 for (i = 0; i < count; ++i) { 1387 /* 1388 * Extract the poll array index and delete spurious events. 1389 * We can easily tell if the serial number is incorrect 1390 * by checking whether the extracted index is out of range. 1391 */ 1392 pi = (uint64_t)(uintptr_t)kevp[i].udata - 1393 pkap->lwp->lwp_kqueue_serial; 1394 1395 if (pi >= pkap->nfds) { 1396 kev = kevp[i]; 1397 kev.flags = EV_DISABLE|EV_DELETE; 1398 n = 1; 1399 kqueue_register(&pkap->lwp->lwp_kqueue, &kev, &n); 1400 if (nseldebug) { 1401 kprintf("poll index %ju out of range against " 1402 "serial %ju\n", (uintmax_t)pi, 1403 (uintmax_t)pkap->lwp->lwp_kqueue_serial); 1404 } 1405 continue; 1406 } 1407 pfd = &pkap->fds[pi]; 1408 if (kevp[i].ident == pfd->fd) { 1409 /* 1410 * A single descriptor may generate an error against 1411 * more than one filter, make sure to set the 1412 * appropriate flags but do not increment (*res) 1413 * more than once. 1414 */ 1415 count_res = (pfd->revents == 0); 1416 if (kevp[i].flags & EV_ERROR) { 1417 switch(kevp[i].data) { 1418 case EBADF: 1419 case POLLNVAL: 1420 /* Bad file descriptor */ 1421 if (count_res) 1422 ++*res; 1423 pfd->revents |= POLLNVAL; 1424 break; 1425 default: 1426 /* 1427 * Poll silently swallows any unknown 1428 * errors except in the case of POLLPRI 1429 * (OOB/urgent data). 1430 * 1431 * ALWAYS filter out EOPNOTSUPP errors 1432 * from filters, common applications 1433 * set POLLPRI|POLLRDBAND and most 1434 * filters do not support EVFILT_EXCEPT. 1435 * 1436 * We also filter out ENODEV since dev_dkqfilter 1437 * returns ENODEV if EOPNOTSUPP is returned in an 1438 * inner call. 1439 * 1440 * XXX: fix this 1441 */ 1442 if (kevp[i].filter != EVFILT_READ && 1443 kevp[i].filter != EVFILT_WRITE && 1444 kevp[i].data != EOPNOTSUPP && 1445 kevp[i].data != ENODEV) { 1446 if (count_res) 1447 ++*res; 1448 pfd->revents |= POLLERR; 1449 } 1450 break; 1451 } 1452 if (nseldebug) { 1453 kprintf("poll index %ju fd %d " 1454 "filter %d error %jd\n", 1455 (uintmax_t)pi, pfd->fd, 1456 kevp[i].filter, 1457 (intmax_t)kevp[i].data); 1458 } 1459 continue; 1460 } 1461 1462 switch (kevp[i].filter) { 1463 case EVFILT_READ: 1464 /* 1465 * NODATA on the read side can indicate a 1466 * half-closed situation and not necessarily 1467 * a disconnect, so depend on the user 1468 * issuing a read() and getting 0 bytes back. 1469 * 1470 * If EV_HUP is set the peer completely 1471 * disconnected and we can set POLLHUP 1472 * once data is exhausted. 1473 */ 1474 if (kevp[i].flags & EV_NODATA) { 1475 if (kevp[i].flags & EV_HUP) 1476 pfd->revents |= POLLHUP; 1477 } 1478 if ((kevp[i].flags & EV_EOF) && 1479 kevp[i].fflags != 0) 1480 pfd->revents |= POLLERR; 1481 if (pfd->events & POLLIN) 1482 pfd->revents |= POLLIN; 1483 if (pfd->events & POLLRDNORM) 1484 pfd->revents |= POLLRDNORM; 1485 break; 1486 case EVFILT_WRITE: 1487 /* 1488 * As per the OpenGroup POLLHUP is mutually 1489 * exclusive with the writability flags. I 1490 * consider this a bit broken but... 1491 * 1492 * In this case a disconnect is implied even 1493 * for a half-closed (write side) situation. 1494 */ 1495 if (kevp[i].flags & EV_EOF) { 1496 pfd->revents |= POLLHUP; 1497 if (kevp[i].fflags != 0) 1498 pfd->revents |= POLLERR; 1499 } else { 1500 if (pfd->events & POLLOUT) 1501 pfd->revents |= POLLOUT; 1502 if (pfd->events & POLLWRNORM) 1503 pfd->revents |= POLLWRNORM; 1504 } 1505 break; 1506 case EVFILT_EXCEPT: 1507 /* 1508 * EV_NODATA should never be tagged for this 1509 * filter. 1510 */ 1511 if (pfd->events & POLLPRI) 1512 pfd->revents |= POLLPRI; 1513 if (pfd->events & POLLRDBAND) 1514 pfd->revents |= POLLRDBAND; 1515 break; 1516 } 1517 1518 if (nseldebug) { 1519 kprintf("poll index %ju/%d fd %d " 1520 "revents %08x\n", (uintmax_t)pi, pkap->nfds, 1521 pfd->fd, pfd->revents); 1522 } 1523 1524 if (count_res && pfd->revents) 1525 ++*res; 1526 } else { 1527 if (nseldebug) { 1528 kprintf("poll index %ju mismatch %ju/%d\n", 1529 (uintmax_t)pi, (uintmax_t)kevp[i].ident, 1530 pfd->fd); 1531 } 1532 } 1533 } 1534 1535 return (0); 1536 } 1537 1538 static int 1539 dopoll(int nfds, struct pollfd *fds, struct timespec *ts, int *res, int flags) 1540 { 1541 struct poll_kevent_copyin_args ka; 1542 struct pollfd sfds[64]; 1543 int bytes; 1544 int error; 1545 1546 *res = 0; 1547 if (nfds < 0) 1548 return (EINVAL); 1549 1550 if (nfds == 0 && ts) 1551 return (dotimeout_only(ts)); 1552 1553 /* 1554 * This is a bit arbitrary but we need to limit internal kmallocs. 1555 */ 1556 if (nfds > maxfilesperproc * 2) 1557 nfds = maxfilesperproc * 2; 1558 bytes = sizeof(struct pollfd) * nfds; 1559 1560 ka.lwp = curthread->td_lwp; 1561 ka.nfds = nfds; 1562 ka.pfds = 0; 1563 ka.error = 0; 1564 1565 if (ka.nfds < 64) 1566 ka.fds = sfds; 1567 else 1568 ka.fds = kmalloc(bytes, M_SELECT, M_WAITOK); 1569 1570 error = copyin(fds, ka.fds, bytes); 1571 if (error == 0) 1572 error = kern_kevent(&ka.lwp->lwp_kqueue, 0x7FFFFFFF, res, &ka, 1573 poll_copyin, poll_copyout, ts, flags); 1574 1575 if (error == 0) 1576 error = copyout(ka.fds, fds, bytes); 1577 1578 if (ka.fds != sfds) 1579 kfree(ka.fds, M_SELECT); 1580 1581 ka.lwp->lwp_kqueue_serial += nfds; 1582 1583 return (error); 1584 } 1585 1586 static int 1587 socket_wait_copyin(void *arg, struct kevent *kevp, int maxevents, int *events) 1588 { 1589 return (0); 1590 } 1591 1592 static int 1593 socket_wait_copyout(void *arg, struct kevent *kevp, int count, int *res) 1594 { 1595 ++*res; 1596 return (0); 1597 } 1598 1599 extern struct fileops socketops; 1600 1601 /* 1602 * NOTE: Callers of socket_wait() must already have a reference on the 1603 * socket. 1604 */ 1605 int 1606 socket_wait(struct socket *so, struct timespec *ts, int *res) 1607 { 1608 struct thread *td = curthread; 1609 struct file *fp; 1610 struct kqueue kq; 1611 struct kevent kev; 1612 int error, fd; 1613 int n; 1614 1615 if ((error = falloc(td->td_lwp, &fp, &fd)) != 0) 1616 return (error); 1617 1618 fp->f_type = DTYPE_SOCKET; 1619 fp->f_flag = FREAD | FWRITE; 1620 fp->f_ops = &socketops; 1621 fp->f_data = so; 1622 fsetfd(td->td_lwp->lwp_proc->p_fd, fp, fd); 1623 fsetfdflags(td->td_proc->p_fd, fd, UF_EXCLOSE); 1624 1625 bzero(&kq, sizeof(kq)); 1626 kqueue_init(&kq, td->td_lwp->lwp_proc->p_fd); 1627 EV_SET(&kev, fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, NULL); 1628 n = 1; 1629 if ((error = kqueue_register(&kq, &kev, &n)) != 0) { 1630 fdrop(fp); 1631 return (error); 1632 } 1633 1634 error = kern_kevent(&kq, 1, res, NULL, socket_wait_copyin, 1635 socket_wait_copyout, ts, 0); 1636 1637 EV_SET(&kev, fd, EVFILT_READ, EV_DELETE|EV_DISABLE, 0, 0, NULL); 1638 n = 1; 1639 kqueue_register(&kq, &kev, &n); 1640 fp->f_ops = &badfileops; 1641 fdrop(fp); 1642 1643 return (error); 1644 } 1645 1646 /* 1647 * OpenBSD poll system call. 1648 * XXX this isn't quite a true representation.. OpenBSD uses select ops. 1649 * 1650 * MPSAFE 1651 */ 1652 int 1653 sys_openbsd_poll(struct openbsd_poll_args *uap) 1654 { 1655 return (sys_poll((struct poll_args *)uap)); 1656 } 1657 1658 /*ARGSUSED*/ 1659 int 1660 seltrue(cdev_t dev, int events) 1661 { 1662 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 1663 } 1664