1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 37 * 38 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 39 * $FreeBSD: src/sys/vm/vm_mmap.c,v 1.108.2.6 2002/07/02 20:06:19 dillon Exp $ 40 */ 41 42 /* 43 * Mapped file (mmap) interface to VM 44 */ 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/systm.h> 49 #include <sys/sysmsg.h> 50 #include <sys/filedesc.h> 51 #include <sys/kern_syscall.h> 52 #include <sys/proc.h> 53 #include <sys/priv.h> 54 #include <sys/resource.h> 55 #include <sys/resourcevar.h> 56 #include <sys/vnode.h> 57 #include <sys/fcntl.h> 58 #include <sys/file.h> 59 #include <sys/mman.h> 60 #include <sys/conf.h> 61 #include <sys/stat.h> 62 #include <sys/vmmeter.h> 63 #include <sys/sysctl.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_param.h> 67 #include <sys/lock.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vm_pageout.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_kern.h> 76 77 #include <sys/file2.h> 78 #include <sys/thread.h> 79 #include <vm/vm_page2.h> 80 81 static int max_proc_mmap = 1000000; 82 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 83 int vkernel_enable; 84 SYSCTL_INT(_vm, OID_AUTO, vkernel_enable, CTLFLAG_RW, &vkernel_enable, 0, ""); 85 86 /* 87 * sstk_args(int incr) 88 * 89 * MPSAFE 90 */ 91 int 92 sys_sstk(struct sysmsg *sysmsg, const struct sstk_args *uap) 93 { 94 /* Not yet implemented */ 95 return (EOPNOTSUPP); 96 } 97 98 /* 99 * mmap_args(void *addr, size_t len, int prot, int flags, int fd, 100 * long pad, off_t pos) 101 * 102 * Memory Map (mmap) system call. Note that the file offset 103 * and address are allowed to be NOT page aligned, though if 104 * the MAP_FIXED flag it set, both must have the same remainder 105 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 106 * page-aligned, the actual mapping starts at trunc_page(addr) 107 * and the return value is adjusted up by the page offset. 108 * 109 * Generally speaking, only character devices which are themselves 110 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 111 * there would be no cache coherency between a descriptor and a VM mapping 112 * both to the same character device. 113 * 114 * Block devices can be mmap'd no matter what they represent. Cache coherency 115 * is maintained as long as you do not write directly to the underlying 116 * character device. 117 * 118 * No requirements 119 */ 120 int 121 kern_mmap(struct vmspace *vms, caddr_t uaddr, size_t ulen, 122 int uprot, int uflags, int fd, off_t upos, void **res) 123 { 124 struct thread *td = curthread; 125 struct proc *p = td->td_proc; 126 struct file *fp = NULL; 127 struct vnode *vp; 128 vm_offset_t addr; 129 vm_offset_t tmpaddr; 130 vm_size_t size, pageoff; 131 vm_prot_t prot, maxprot; 132 void *handle; 133 int flags, error; 134 off_t pos; 135 vm_object_t obj; 136 137 KKASSERT(p); 138 139 addr = (vm_offset_t) uaddr; 140 size = ulen; 141 prot = uprot & VM_PROT_ALL; 142 flags = uflags; 143 pos = upos; 144 145 /* 146 * Make sure mapping fits into numeric range etc. 147 * 148 * NOTE: We support the full unsigned range for size now. 149 */ 150 if (((flags & MAP_ANON) && (fd != -1 || pos != 0))) 151 return (EINVAL); 152 153 if (size == 0) 154 return (EINVAL); 155 156 if (flags & MAP_STACK) { 157 if (fd != -1) 158 return (EINVAL); 159 if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE)) 160 return (EINVAL); 161 flags |= MAP_ANON; 162 pos = 0; 163 } 164 165 /* 166 * Virtual page tables cannot be used with MAP_STACK. Apart from 167 * it not making any sense, the aux union is used by both 168 * types. 169 * 170 * Because the virtual page table is stored in the backing object 171 * and might be updated by the kernel, the mapping must be R+W. 172 */ 173 if (flags & MAP_VPAGETABLE) { 174 if (vkernel_enable == 0) 175 return (EOPNOTSUPP); 176 if (flags & MAP_STACK) 177 return (EINVAL); 178 if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE)) 179 return (EINVAL); 180 } 181 182 /* 183 * Align the file position to a page boundary, 184 * and save its page offset component. 185 */ 186 pageoff = (pos & PAGE_MASK); 187 pos -= pageoff; 188 189 /* Adjust size for rounding (on both ends). */ 190 size += pageoff; /* low end... */ 191 size = (vm_size_t) round_page(size); /* hi end */ 192 if (size < ulen) /* wrap */ 193 return(EINVAL); 194 195 /* 196 * Check for illegal addresses. Watch out for address wrap... Note 197 * that VM_*_ADDRESS are not constants due to casts (argh). 198 */ 199 if (flags & (MAP_FIXED | MAP_TRYFIXED)) { 200 /* 201 * The specified address must have the same remainder 202 * as the file offset taken modulo PAGE_SIZE, so it 203 * should be aligned after adjustment by pageoff. 204 */ 205 addr -= pageoff; 206 if (addr & PAGE_MASK) 207 return (EINVAL); 208 209 /* 210 * Address range must be all in user VM space and not wrap. 211 */ 212 tmpaddr = addr + size; 213 if (tmpaddr < addr) 214 return (EINVAL); 215 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 216 return (EINVAL); 217 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 218 return (EINVAL); 219 } else { 220 /* 221 * Get a hint of where to map. It also provides mmap offset 222 * randomization if enabled. 223 */ 224 addr = vm_map_hint(p, addr, prot); 225 } 226 227 if (flags & MAP_ANON) { 228 /* 229 * Mapping blank space is trivial. 230 */ 231 handle = NULL; 232 maxprot = VM_PROT_ALL; 233 } else { 234 /* 235 * Mapping file, get fp for validation. Obtain vnode and make 236 * sure it is of appropriate type. 237 */ 238 fp = holdfp(td, fd, -1); 239 if (fp == NULL) 240 return (EBADF); 241 if (fp->f_type != DTYPE_VNODE) { 242 error = EINVAL; 243 goto done; 244 } 245 /* 246 * POSIX shared-memory objects are defined to have 247 * kernel persistence, and are not defined to support 248 * read(2)/write(2) -- or even open(2). Thus, we can 249 * use MAP_ASYNC to trade on-disk coherence for speed. 250 * The shm_open(3) library routine turns on the FPOSIXSHM 251 * flag to request this behavior. 252 */ 253 if (fp->f_flag & FPOSIXSHM) 254 flags |= MAP_NOSYNC; 255 vp = (struct vnode *) fp->f_data; 256 257 /* 258 * Validate the vnode for the operation. 259 */ 260 switch(vp->v_type) { 261 case VREG: 262 /* 263 * Get the proper underlying object 264 */ 265 if ((obj = vp->v_object) == NULL) { 266 error = EINVAL; 267 goto done; 268 } 269 KKASSERT((struct vnode *)obj->handle == vp); 270 break; 271 case VCHR: 272 /* 273 * Make sure a device has not been revoked. 274 * Mappability is handled by the device layer. 275 */ 276 if (vp->v_rdev == NULL) { 277 error = EBADF; 278 goto done; 279 } 280 break; 281 default: 282 /* 283 * Nothing else is mappable. 284 */ 285 error = EINVAL; 286 goto done; 287 } 288 289 /* 290 * XXX hack to handle use of /dev/zero to map anon memory (ala 291 * SunOS). 292 */ 293 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 294 handle = NULL; 295 maxprot = VM_PROT_ALL; 296 flags |= MAP_ANON; 297 pos = 0; 298 } else { 299 /* 300 * cdevs does not provide private mappings of any kind. 301 */ 302 if (vp->v_type == VCHR && 303 (flags & (MAP_PRIVATE|MAP_COPY))) { 304 error = EINVAL; 305 goto done; 306 } 307 /* 308 * Ensure that file and memory protections are 309 * compatible. Note that we only worry about 310 * writability if mapping is shared; in this case, 311 * current and max prot are dictated by the open file. 312 * XXX use the vnode instead? Problem is: what 313 * credentials do we use for determination? What if 314 * proc does a setuid? 315 */ 316 maxprot = VM_PROT_EXECUTE; 317 if (fp->f_flag & FREAD) { 318 maxprot |= VM_PROT_READ; 319 } else if (prot & PROT_READ) { 320 error = EACCES; 321 goto done; 322 } 323 /* 324 * If we are sharing potential changes (either via 325 * MAP_SHARED or via the implicit sharing of character 326 * device mappings), and we are trying to get write 327 * permission although we opened it without asking 328 * for it, bail out. Check for superuser, only if 329 * we're at securelevel < 1, to allow the XIG X server 330 * to continue to work. 331 * 332 * PROT_WRITE + MAP_SHARED 333 */ 334 if ((flags & MAP_SHARED) != 0 || vp->v_type == VCHR) { 335 if ((fp->f_flag & FWRITE) != 0) { 336 struct vattr va; 337 if ((error = VOP_GETATTR(vp, &va))) { 338 goto done; 339 } 340 if ((va.va_flags & 341 (IMMUTABLE|APPEND)) == 0) { 342 maxprot |= VM_PROT_WRITE; 343 344 /* 345 * SHARED+RW regular file mmap() 346 * updates v_lastwrite_ts. 347 */ 348 if ((prot & PROT_WRITE) && 349 vp->v_type == VREG && 350 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY) == 0) { 351 vfs_timestamp(&vp->v_lastwrite_ts); 352 vsetflags(vp, VLASTWRITETS); 353 vn_unlock(vp); 354 } 355 } else if (prot & PROT_WRITE) { 356 error = EPERM; 357 goto done; 358 } 359 } else if ((prot & PROT_WRITE) != 0) { 360 error = EACCES; 361 goto done; 362 } 363 } else { 364 maxprot |= VM_PROT_WRITE; 365 } 366 handle = (void *)vp; 367 } 368 } 369 370 lwkt_gettoken(&vms->vm_map.token); 371 372 /* 373 * Do not allow more then a certain number of vm_map_entry structures 374 * per process. 0 to disable. 375 */ 376 if (max_proc_mmap && vms->vm_map.nentries >= max_proc_mmap) { 377 error = ENOMEM; 378 lwkt_reltoken(&vms->vm_map.token); 379 goto done; 380 } 381 382 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 383 flags, handle, pos, fp); 384 if (error == 0) 385 *res = (void *)(addr + pageoff); 386 387 lwkt_reltoken(&vms->vm_map.token); 388 done: 389 if (fp) 390 dropfp(td, fd, fp); 391 392 return (error); 393 } 394 395 /* 396 * mmap system call handler 397 * 398 * No requirements. 399 */ 400 int 401 sys_mmap(struct sysmsg *sysmsg, const struct mmap_args *uap) 402 { 403 int error; 404 int flags = uap->flags; 405 off_t upos = uap->pos; 406 407 /* 408 * Work around fairly serious problems with trying to have an 409 * auto-grow stack segment related to other unrelated calls to 410 * mmap() potentially getting addresses within such segments. 411 * 412 * Our attempt to use TRYFIXED to mediate the problem basically 413 * failed. For example, rtld-elf uses it to try to optimize 414 * shlib placement, but could run afoul of this issue. 415 * 416 * The only remaining true MAP_STACK we allow is the user stack as 417 * created by the exec code. All userland MAP_STACK's are converted 418 * to normal mmap()s right here. 419 */ 420 if (flags & MAP_STACK) { 421 if (uap->fd != -1) 422 return (EINVAL); 423 if ((uap->prot & (PROT_READ|PROT_WRITE)) != 424 (PROT_READ|PROT_WRITE)) { 425 return (EINVAL); 426 } 427 flags &= ~MAP_STACK; 428 flags |= MAP_ANON; 429 upos = 0; 430 } 431 432 error = kern_mmap(curproc->p_vmspace, uap->addr, uap->len, 433 uap->prot, flags, 434 uap->fd, upos, &sysmsg->sysmsg_resultp); 435 436 return (error); 437 } 438 439 /* 440 * msync system call handler 441 * 442 * msync_args(void *addr, size_t len, int flags) 443 * 444 * No requirements 445 */ 446 int 447 sys_msync(struct sysmsg *sysmsg, const struct msync_args *uap) 448 { 449 struct proc *p = curproc; 450 vm_offset_t addr; 451 vm_offset_t tmpaddr; 452 vm_size_t size, pageoff; 453 int flags; 454 vm_map_t map; 455 int rv; 456 457 addr = (vm_offset_t) uap->addr; 458 size = uap->len; 459 flags = uap->flags; 460 461 pageoff = (addr & PAGE_MASK); 462 addr -= pageoff; 463 size += pageoff; 464 size = (vm_size_t) round_page(size); 465 if (size < uap->len) /* wrap */ 466 return(EINVAL); 467 tmpaddr = addr + size; /* workaround gcc4 opt */ 468 if (tmpaddr < addr) /* wrap */ 469 return(EINVAL); 470 471 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 472 return (EINVAL); 473 474 map = &p->p_vmspace->vm_map; 475 476 /* 477 * map->token serializes extracting the address range for size == 0 478 * msyncs with the vm_map_clean call; if the token were not held 479 * across the two calls, an intervening munmap/mmap pair, for example, 480 * could cause msync to occur on a wrong region. 481 */ 482 lwkt_gettoken(&map->token); 483 484 /* 485 * XXX Gak! If size is zero we are supposed to sync "all modified 486 * pages with the region containing addr". Unfortunately, we don't 487 * really keep track of individual mmaps so we approximate by flushing 488 * the range of the map entry containing addr. This can be incorrect 489 * if the region splits or is coalesced with a neighbor. 490 */ 491 if (size == 0) { 492 vm_map_entry_t entry; 493 494 vm_map_lock_read(map); 495 rv = vm_map_lookup_entry(map, addr, &entry); 496 if (rv == FALSE) { 497 vm_map_unlock_read(map); 498 rv = KERN_INVALID_ADDRESS; 499 goto done; 500 } 501 addr = entry->ba.start; 502 size = entry->ba.end - entry->ba.start; 503 vm_map_unlock_read(map); 504 } 505 506 /* 507 * Clean the pages and interpret the return value. 508 */ 509 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 510 (flags & MS_INVALIDATE) != 0); 511 done: 512 lwkt_reltoken(&map->token); 513 514 switch (rv) { 515 case KERN_SUCCESS: 516 break; 517 case KERN_INVALID_ADDRESS: 518 return (EINVAL); /* Sun returns ENOMEM? */ 519 case KERN_FAILURE: 520 return (EIO); 521 default: 522 return (EINVAL); 523 } 524 525 return (0); 526 } 527 528 /* 529 * munmap system call handler 530 * 531 * munmap_args(void *addr, size_t len) 532 * 533 * No requirements 534 */ 535 int 536 sys_munmap(struct sysmsg *sysmsg, const struct munmap_args *uap) 537 { 538 struct proc *p = curproc; 539 vm_offset_t addr; 540 vm_offset_t tmpaddr; 541 vm_size_t size, pageoff; 542 vm_map_t map; 543 544 addr = (vm_offset_t) uap->addr; 545 size = uap->len; 546 547 pageoff = (addr & PAGE_MASK); 548 addr -= pageoff; 549 size += pageoff; 550 size = (vm_size_t) round_page(size); 551 if (size < uap->len) /* wrap */ 552 return(EINVAL); 553 tmpaddr = addr + size; /* workaround gcc4 opt */ 554 if (tmpaddr < addr) /* wrap */ 555 return(EINVAL); 556 557 if (size == 0) 558 return (0); 559 560 /* 561 * Check for illegal addresses. Watch out for address wrap... Note 562 * that VM_*_ADDRESS are not constants due to casts (argh). 563 */ 564 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 565 return (EINVAL); 566 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 567 return (EINVAL); 568 569 map = &p->p_vmspace->vm_map; 570 571 /* map->token serializes between the map check and the actual unmap */ 572 lwkt_gettoken(&map->token); 573 574 /* 575 * Make sure entire range is allocated. 576 */ 577 if (!vm_map_check_protection(map, addr, addr + size, 578 VM_PROT_NONE, FALSE)) { 579 lwkt_reltoken(&map->token); 580 return (EINVAL); 581 } 582 /* returns nothing but KERN_SUCCESS anyway */ 583 vm_map_remove(map, addr, addr + size); 584 lwkt_reltoken(&map->token); 585 return (0); 586 } 587 588 /* 589 * mprotect_args(const void *addr, size_t len, int prot) 590 * 591 * No requirements. 592 */ 593 int 594 sys_mprotect(struct sysmsg *sysmsg, const struct mprotect_args *uap) 595 { 596 struct proc *p = curproc; 597 vm_offset_t addr; 598 vm_offset_t tmpaddr; 599 vm_size_t size, pageoff; 600 vm_prot_t prot; 601 int error; 602 603 addr = (vm_offset_t) uap->addr; 604 size = uap->len; 605 prot = uap->prot & VM_PROT_ALL; 606 607 pageoff = (addr & PAGE_MASK); 608 addr -= pageoff; 609 size += pageoff; 610 size = (vm_size_t) round_page(size); 611 if (size < uap->len) /* wrap */ 612 return(EINVAL); 613 tmpaddr = addr + size; /* workaround gcc4 opt */ 614 if (tmpaddr < addr) /* wrap */ 615 return(EINVAL); 616 617 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, 618 prot, FALSE)) { 619 case KERN_SUCCESS: 620 error = 0; 621 break; 622 case KERN_PROTECTION_FAILURE: 623 error = EACCES; 624 break; 625 default: 626 error = EINVAL; 627 break; 628 } 629 return (error); 630 } 631 632 /* 633 * minherit system call handler 634 * 635 * minherit_args(void *addr, size_t len, int inherit) 636 * 637 * No requirements. 638 */ 639 int 640 sys_minherit(struct sysmsg *sysmsg, const struct minherit_args *uap) 641 { 642 struct proc *p = curproc; 643 vm_offset_t addr; 644 vm_offset_t tmpaddr; 645 vm_size_t size, pageoff; 646 vm_inherit_t inherit; 647 int error; 648 649 addr = (vm_offset_t)uap->addr; 650 size = uap->len; 651 inherit = uap->inherit; 652 653 pageoff = (addr & PAGE_MASK); 654 addr -= pageoff; 655 size += pageoff; 656 size = (vm_size_t) round_page(size); 657 if (size < uap->len) /* wrap */ 658 return(EINVAL); 659 tmpaddr = addr + size; /* workaround gcc4 opt */ 660 if (tmpaddr < addr) /* wrap */ 661 return(EINVAL); 662 663 switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, 664 addr + size, inherit)) { 665 case KERN_SUCCESS: 666 error = 0; 667 break; 668 case KERN_PROTECTION_FAILURE: 669 error = EACCES; 670 break; 671 default: 672 error = EINVAL; 673 break; 674 } 675 return (error); 676 } 677 678 /* 679 * madvise system call handler 680 * 681 * madvise_args(void *addr, size_t len, int behav) 682 * 683 * No requirements. 684 */ 685 int 686 sys_madvise(struct sysmsg *sysmsg, const struct madvise_args *uap) 687 { 688 struct proc *p = curproc; 689 vm_offset_t start, end; 690 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 691 int error; 692 693 /* 694 * Check for illegal behavior 695 */ 696 if (uap->behav < 0 || uap->behav >= MADV_CONTROL_END) 697 return (EINVAL); 698 /* 699 * Check for illegal addresses. Watch out for address wrap... Note 700 * that VM_*_ADDRESS are not constants due to casts (argh). 701 */ 702 if (tmpaddr < (vm_offset_t)uap->addr) 703 return (EINVAL); 704 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 705 return (EINVAL); 706 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 707 return (EINVAL); 708 709 /* 710 * Since this routine is only advisory, we default to conservative 711 * behavior. 712 */ 713 start = trunc_page((vm_offset_t)uap->addr); 714 end = round_page(tmpaddr); 715 716 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 717 uap->behav, 0); 718 return (error); 719 } 720 721 /* 722 * mcontrol system call handler 723 * 724 * mcontrol_args(void *addr, size_t len, int behav, off_t value) 725 * 726 * No requirements 727 */ 728 int 729 sys_mcontrol(struct sysmsg *sysmsg, const struct mcontrol_args *uap) 730 { 731 struct proc *p = curproc; 732 vm_offset_t start, end; 733 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 734 int error; 735 736 /* 737 * Check for illegal behavior 738 */ 739 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) 740 return (EINVAL); 741 /* 742 * Check for illegal addresses. Watch out for address wrap... Note 743 * that VM_*_ADDRESS are not constants due to casts (argh). 744 */ 745 if (tmpaddr < (vm_offset_t) uap->addr) 746 return (EINVAL); 747 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 748 return (EINVAL); 749 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 750 return (EINVAL); 751 752 /* 753 * Since this routine is only advisory, we default to conservative 754 * behavior. 755 */ 756 start = trunc_page((vm_offset_t)uap->addr); 757 end = round_page(tmpaddr); 758 759 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 760 uap->behav, uap->value); 761 return (error); 762 } 763 764 765 /* 766 * mincore system call handler 767 * 768 * mincore_args(const void *addr, size_t len, char *vec) 769 * 770 * No requirements 771 */ 772 int 773 sys_mincore(struct sysmsg *sysmsg, const struct mincore_args *uap) 774 { 775 struct proc *p = curproc; 776 vm_offset_t addr, first_addr; 777 vm_offset_t end, cend; 778 pmap_t pmap; 779 vm_map_t map; 780 char *vec; 781 int error; 782 int vecindex, lastvecindex; 783 vm_map_entry_t current; 784 vm_map_entry_t entry; 785 int mincoreinfo; 786 unsigned int timestamp; 787 788 /* 789 * Make sure that the addresses presented are valid for user 790 * mode. 791 */ 792 first_addr = addr = trunc_page((vm_offset_t) uap->addr); 793 end = addr + (vm_size_t)round_page(uap->len); 794 if (end < addr) 795 return (EINVAL); 796 if (VM_MAX_USER_ADDRESS > 0 && end > VM_MAX_USER_ADDRESS) 797 return (EINVAL); 798 799 /* 800 * Address of byte vector 801 */ 802 vec = uap->vec; 803 804 map = &p->p_vmspace->vm_map; 805 pmap = vmspace_pmap(p->p_vmspace); 806 807 lwkt_gettoken(&map->token); 808 vm_map_lock_read(map); 809 RestartScan: 810 timestamp = map->timestamp; 811 812 if (!vm_map_lookup_entry(map, addr, &entry)) 813 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 814 815 /* 816 * Do this on a map entry basis so that if the pages are not 817 * in the current processes address space, we can easily look 818 * up the pages elsewhere. 819 */ 820 lastvecindex = -1; 821 for (current = entry; 822 current && current->ba.start < end; 823 current = vm_map_rb_tree_RB_NEXT(current)) { 824 /* 825 * ignore submaps (for now) or null objects 826 */ 827 if (current->maptype != VM_MAPTYPE_NORMAL && 828 current->maptype != VM_MAPTYPE_VPAGETABLE) { 829 continue; 830 } 831 if (current->ba.object == NULL) 832 continue; 833 834 /* 835 * limit this scan to the current map entry and the 836 * limits for the mincore call 837 */ 838 if (addr < current->ba.start) 839 addr = current->ba.start; 840 cend = current->ba.end; 841 if (cend > end) 842 cend = end; 843 844 /* 845 * scan this entry one page at a time 846 */ 847 while (addr < cend) { 848 /* 849 * Check pmap first, it is likely faster, also 850 * it can provide info as to whether we are the 851 * one referencing or modifying the page. 852 * 853 * If we have to check the VM object, only mess 854 * around with normal maps. Do not mess around 855 * with virtual page tables (XXX). 856 */ 857 mincoreinfo = pmap_mincore(pmap, addr); 858 if (mincoreinfo == 0 && 859 current->maptype == VM_MAPTYPE_NORMAL) { 860 vm_pindex_t pindex; 861 vm_ooffset_t offset; 862 vm_page_t m; 863 864 /* 865 * calculate the page index into the object 866 */ 867 offset = current->ba.offset + 868 (addr - current->ba.start); 869 pindex = OFF_TO_IDX(offset); 870 871 /* 872 * if the page is resident, then gather 873 * information about it. spl protection is 874 * required to maintain the object 875 * association. And XXX what if the page is 876 * busy? What's the deal with that? 877 * 878 * XXX vm_token - legacy for pmap_ts_referenced 879 * in x86 and vkernel pmap code. 880 */ 881 lwkt_gettoken(&vm_token); 882 vm_object_hold(current->ba.object); 883 m = vm_page_lookup(current->ba.object, pindex); 884 if (m && m->valid) { 885 mincoreinfo = MINCORE_INCORE; 886 if (m->dirty || pmap_is_modified(m)) 887 mincoreinfo |= MINCORE_MODIFIED_OTHER; 888 if ((m->flags & PG_REFERENCED) || 889 pmap_ts_referenced(m)) { 890 vm_page_flag_set(m, PG_REFERENCED); 891 mincoreinfo |= MINCORE_REFERENCED_OTHER; 892 } 893 } 894 vm_object_drop(current->ba.object); 895 lwkt_reltoken(&vm_token); 896 } 897 898 /* 899 * subyte may page fault. In case it needs to modify 900 * the map, we release the lock. 901 */ 902 vm_map_unlock_read(map); 903 904 /* 905 * calculate index into user supplied byte vector 906 */ 907 vecindex = OFF_TO_IDX(addr - first_addr); 908 909 /* 910 * If we have skipped map entries, we need to make sure that 911 * the byte vector is zeroed for those skipped entries. 912 */ 913 while((lastvecindex + 1) < vecindex) { 914 error = subyte( vec + lastvecindex, 0); 915 if (error) { 916 error = EFAULT; 917 goto done; 918 } 919 ++lastvecindex; 920 } 921 922 /* 923 * Pass the page information to the user 924 */ 925 error = subyte(vec + vecindex, mincoreinfo); 926 if (error) { 927 error = EFAULT; 928 goto done; 929 } 930 931 /* 932 * If the map has changed, due to the subyte, 933 * the previous output may be invalid. 934 */ 935 vm_map_lock_read(map); 936 if (timestamp != map->timestamp) 937 goto RestartScan; 938 939 lastvecindex = vecindex; 940 addr += PAGE_SIZE; 941 } 942 } 943 944 /* 945 * subyte may page fault. In case it needs to modify 946 * the map, we release the lock. 947 */ 948 vm_map_unlock_read(map); 949 950 /* 951 * Zero the last entries in the byte vector. 952 */ 953 vecindex = OFF_TO_IDX(end - first_addr); 954 while((lastvecindex + 1) < vecindex) { 955 error = subyte( vec + lastvecindex, 0); 956 if (error) { 957 error = EFAULT; 958 goto done; 959 } 960 ++lastvecindex; 961 } 962 963 /* 964 * If the map has changed, due to the subyte, the previous 965 * output may be invalid. 966 */ 967 vm_map_lock_read(map); 968 if (timestamp != map->timestamp) 969 goto RestartScan; 970 vm_map_unlock_read(map); 971 972 error = 0; 973 done: 974 lwkt_reltoken(&map->token); 975 return (error); 976 } 977 978 /* 979 * mlock system call handler 980 * 981 * mlock_args(const void *addr, size_t len) 982 * 983 * No requirements 984 */ 985 int 986 sys_mlock(struct sysmsg *sysmsg, const struct mlock_args *uap) 987 { 988 vm_offset_t addr; 989 vm_offset_t tmpaddr; 990 vm_size_t size, pageoff; 991 struct thread *td = curthread; 992 struct proc *p = td->td_proc; 993 int error; 994 995 addr = (vm_offset_t) uap->addr; 996 size = uap->len; 997 998 pageoff = (addr & PAGE_MASK); 999 addr -= pageoff; 1000 size += pageoff; 1001 size = (vm_size_t) round_page(size); 1002 if (size < uap->len) /* wrap */ 1003 return (EINVAL); 1004 if (size == 0) /* silently allow 0 size */ 1005 return (0); 1006 tmpaddr = addr + size; /* workaround gcc4 opt */ 1007 if (tmpaddr < addr) /* wrap */ 1008 return (EINVAL); 1009 1010 if (atop(size) + vmstats.v_wire_count > vm_page_max_wired) 1011 return (EAGAIN); 1012 1013 /* 1014 * We do not need to synchronize against other threads updating ucred; 1015 * they update p->ucred, which is synchronized into td_ucred ourselves. 1016 */ 1017 #ifdef pmap_wired_count 1018 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 1019 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) { 1020 return (ENOMEM); 1021 } 1022 #else 1023 error = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 1024 if (error) { 1025 return (error); 1026 } 1027 #endif 1028 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, FALSE); 1029 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1030 } 1031 1032 /* 1033 * mlockall(int how) 1034 * 1035 * No requirements 1036 */ 1037 int 1038 sys_mlockall(struct sysmsg *sysmsg, const struct mlockall_args *uap) 1039 { 1040 struct thread *td = curthread; 1041 struct proc *p = td->td_proc; 1042 vm_map_t map = &p->p_vmspace->vm_map; 1043 vm_map_entry_t entry; 1044 int how = uap->how; 1045 int rc = KERN_SUCCESS; 1046 1047 if (((how & MCL_CURRENT) == 0) && ((how & MCL_FUTURE) == 0)) 1048 return (EINVAL); 1049 1050 rc = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 1051 if (rc) 1052 return (rc); 1053 1054 vm_map_lock(map); 1055 do { 1056 if (how & MCL_CURRENT) { 1057 RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) { 1058 ; /* NOT IMPLEMENTED YET */ 1059 } 1060 rc = ENOSYS; 1061 break; 1062 } 1063 if (how & MCL_FUTURE) 1064 map->flags |= MAP_WIREFUTURE; 1065 } while(0); 1066 vm_map_unlock(map); 1067 1068 return (rc); 1069 } 1070 1071 /* 1072 * munlockall(void) 1073 * 1074 * Unwire all user-wired map entries, cancel MCL_FUTURE. 1075 * 1076 * No requirements 1077 */ 1078 int 1079 sys_munlockall(struct sysmsg *sysmsg, const struct munlockall_args *uap) 1080 { 1081 struct thread *td = curthread; 1082 struct proc *p = td->td_proc; 1083 vm_map_t map = &p->p_vmspace->vm_map; 1084 vm_map_entry_t entry; 1085 int rc = KERN_SUCCESS; 1086 1087 vm_map_lock(map); 1088 1089 /* Clear MAP_WIREFUTURE to cancel mlockall(MCL_FUTURE) */ 1090 map->flags &= ~MAP_WIREFUTURE; 1091 1092 retry: 1093 RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) { 1094 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 1095 continue; 1096 1097 /* 1098 * If we encounter an in-transition entry, we release the 1099 * map lock and retry the scan; we do not decrement any 1100 * wired_count more than once because we do not touch 1101 * any entries with MAP_ENTRY_USER_WIRED not set. 1102 * 1103 * There is a potential interleaving with concurrent 1104 * mlockall()s here -- if we abort a scan, an mlockall() 1105 * could start, wire a number of entries before our 1106 * current position in, and then stall itself on this 1107 * or any other in-transition entry. If that occurs, when 1108 * we resume, we will unwire those entries. 1109 */ 1110 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1111 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1112 ++mycpu->gd_cnt.v_intrans_coll; 1113 ++mycpu->gd_cnt.v_intrans_wait; 1114 vm_map_transition_wait(map, 1); 1115 goto retry; 1116 } 1117 1118 KASSERT(entry->wired_count > 0, 1119 ("wired_count was 0 with USER_WIRED set! %p", entry)); 1120 1121 /* Drop wired count, if it hits zero, unwire the entry */ 1122 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1123 entry->wired_count--; 1124 if (entry->wired_count == 0) 1125 vm_fault_unwire(map, entry); 1126 } 1127 1128 vm_map_unlock(map); 1129 1130 return (rc); 1131 } 1132 1133 /* 1134 * munlock system call handler 1135 * 1136 * munlock_args(const void *addr, size_t len) 1137 * 1138 * No requirements 1139 */ 1140 int 1141 sys_munlock(struct sysmsg *sysmsg, const struct munlock_args *uap) 1142 { 1143 struct thread *td = curthread; 1144 struct proc *p = td->td_proc; 1145 vm_offset_t addr; 1146 vm_offset_t tmpaddr; 1147 vm_size_t size, pageoff; 1148 int error; 1149 1150 addr = (vm_offset_t) uap->addr; 1151 size = uap->len; 1152 1153 pageoff = (addr & PAGE_MASK); 1154 addr -= pageoff; 1155 size += pageoff; 1156 size = (vm_size_t) round_page(size); 1157 1158 tmpaddr = addr + size; 1159 if (tmpaddr < addr) /* wrap */ 1160 return (EINVAL); 1161 if (size == 0) /* silently allow 0 size */ 1162 return (0); 1163 1164 #ifndef pmap_wired_count 1165 error = priv_check(td, PRIV_ROOT); 1166 if (error) 1167 return (error); 1168 #endif 1169 1170 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, TRUE); 1171 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1172 } 1173 1174 /* 1175 * Internal version of mmap. 1176 * Currently used by mmap, exec, and sys5 shared memory. 1177 * Handle is either a vnode pointer or NULL for MAP_ANON. 1178 * 1179 * No requirements 1180 */ 1181 int 1182 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1183 vm_prot_t maxprot, int flags, void *handle, vm_ooffset_t foff, 1184 struct file *fp) 1185 { 1186 boolean_t fitit; 1187 vm_object_t object; 1188 vm_offset_t eaddr; 1189 vm_size_t esize; 1190 vm_size_t align; 1191 int (*uksmap)(vm_map_backing_t ba, int op, cdev_t dev, vm_page_t fake); 1192 struct vnode *vp; 1193 struct thread *td = curthread; 1194 struct proc *p; 1195 int rv = KERN_SUCCESS; 1196 off_t objsize; 1197 int docow; 1198 int error; 1199 1200 if (size == 0) 1201 return (0); 1202 1203 objsize = round_page(size); 1204 if (objsize < size) 1205 return (EINVAL); 1206 size = objsize; 1207 1208 lwkt_gettoken(&map->token); 1209 1210 /* 1211 * XXX messy code, fixme 1212 * 1213 * NOTE: Overflow checks require discrete statements or GCC4 1214 * will optimize it out. 1215 */ 1216 if ((p = curproc) != NULL && map == &p->p_vmspace->vm_map) { 1217 esize = map->size + size; /* workaround gcc4 opt */ 1218 if (esize < map->size || 1219 esize > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1220 lwkt_reltoken(&map->token); 1221 return(ENOMEM); 1222 } 1223 } 1224 1225 /* 1226 * We currently can only deal with page aligned file offsets. 1227 * The check is here rather than in the syscall because the 1228 * kernel calls this function internally for other mmaping 1229 * operations (such as in exec) and non-aligned offsets will 1230 * cause pmap inconsistencies...so we want to be sure to 1231 * disallow this in all cases. 1232 * 1233 * NOTE: Overflow checks require discrete statements or GCC4 1234 * will optimize it out. 1235 */ 1236 if (foff & PAGE_MASK) { 1237 lwkt_reltoken(&map->token); 1238 return (EINVAL); 1239 } 1240 1241 /* 1242 * Handle alignment. For large memory maps it is possible 1243 * that the MMU can optimize the page table so align anything 1244 * that is a multiple of SEG_SIZE to SEG_SIZE. 1245 * 1246 * Also align any large mapping (bigger than 16x SG_SIZE) to a 1247 * SEG_SIZE address boundary. 1248 */ 1249 if (flags & MAP_SIZEALIGN) { 1250 align = size; 1251 if ((align ^ (align - 1)) != (align << 1) - 1) { 1252 lwkt_reltoken(&map->token); 1253 return (EINVAL); 1254 } 1255 } else if ((flags & MAP_FIXED) == 0 && 1256 ((size & SEG_MASK) == 0 || size > SEG_SIZE * 16)) { 1257 align = SEG_SIZE; 1258 } else { 1259 align = PAGE_SIZE; 1260 } 1261 1262 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 1263 fitit = TRUE; 1264 *addr = round_page(*addr); 1265 } else { 1266 if (*addr != trunc_page(*addr)) { 1267 lwkt_reltoken(&map->token); 1268 return (EINVAL); 1269 } 1270 eaddr = *addr + size; 1271 if (eaddr < *addr) { 1272 lwkt_reltoken(&map->token); 1273 return (EINVAL); 1274 } 1275 fitit = FALSE; 1276 if ((flags & MAP_TRYFIXED) == 0) 1277 vm_map_remove(map, *addr, *addr + size); 1278 } 1279 1280 uksmap = NULL; 1281 1282 /* 1283 * Lookup/allocate object. 1284 */ 1285 if (flags & MAP_ANON) { 1286 /* 1287 * Unnamed anonymous regions always start at 0. 1288 */ 1289 if (handle) { 1290 /* 1291 * Default memory object 1292 */ 1293 object = default_pager_alloc(handle, objsize, 1294 prot, foff); 1295 if (object == NULL) { 1296 lwkt_reltoken(&map->token); 1297 return(ENOMEM); 1298 } 1299 docow = MAP_PREFAULT_PARTIAL; 1300 } else { 1301 /* 1302 * Implicit single instance of a default memory 1303 * object, so we don't need a VM object yet. 1304 */ 1305 foff = 0; 1306 object = NULL; 1307 docow = 0; 1308 } 1309 vp = NULL; 1310 } else { 1311 vp = (struct vnode *)handle; 1312 1313 /* 1314 * Non-anonymous mappings of VCHR (aka not /dev/zero) 1315 * cannot specify MAP_STACK or MAP_VPAGETABLE. 1316 */ 1317 if (vp->v_type == VCHR) { 1318 if (flags & (MAP_STACK | MAP_VPAGETABLE)) { 1319 lwkt_reltoken(&map->token); 1320 return(EINVAL); 1321 } 1322 } 1323 1324 if (vp->v_type == VCHR && vp->v_rdev->si_ops->d_uksmap) { 1325 /* 1326 * Device mappings without a VM object, typically 1327 * sharing permanently allocated kernel memory or 1328 * process-context-specific (per-process) data. 1329 * 1330 * The object offset for uksmap represents the 1331 * lwp_tid that did the mapping. 1332 * 1333 * Force them to be shared. 1334 */ 1335 uksmap = vp->v_rdev->si_ops->d_uksmap; 1336 object = NULL; 1337 docow = MAP_PREFAULT_PARTIAL; 1338 flags &= ~(MAP_PRIVATE|MAP_COPY); 1339 flags |= MAP_SHARED; 1340 } else if (vp->v_type == VCHR) { 1341 /* 1342 * Device mappings (device size unknown?). 1343 * Force them to be shared. 1344 */ 1345 error = dev_dmmap_single(vp->v_rdev, &foff, objsize, 1346 &object, prot, fp); 1347 1348 if (error == ENODEV) { 1349 handle = (void *)(intptr_t)vp->v_rdev; 1350 object = dev_pager_alloc(handle, objsize, prot, foff); 1351 if (object == NULL) { 1352 lwkt_reltoken(&map->token); 1353 return(EINVAL); 1354 } 1355 } else if (error) { 1356 lwkt_reltoken(&map->token); 1357 return(error); 1358 } 1359 1360 docow = MAP_PREFAULT_PARTIAL; 1361 flags &= ~(MAP_PRIVATE|MAP_COPY); 1362 flags |= MAP_SHARED; 1363 } else { 1364 /* 1365 * Regular file mapping (typically). The attribute 1366 * check is for the link count test only. mmapable 1367 * vnodes must already have a VM object assigned. 1368 */ 1369 struct vattr vat; 1370 int error; 1371 1372 error = VOP_GETATTR(vp, &vat); 1373 if (error) { 1374 lwkt_reltoken(&map->token); 1375 return (error); 1376 } 1377 docow = MAP_PREFAULT_PARTIAL; 1378 object = vnode_pager_reference(vp); 1379 if (object == NULL && vp->v_type == VREG) { 1380 lwkt_reltoken(&map->token); 1381 kprintf("Warning: cannot mmap vnode %p, no " 1382 "object\n", vp); 1383 return(EINVAL); 1384 } 1385 1386 /* 1387 * If it is a regular file without any references 1388 * we do not need to sync it. 1389 */ 1390 if (vp->v_type == VREG && vat.va_nlink == 0) { 1391 flags |= MAP_NOSYNC; 1392 } 1393 } 1394 } 1395 1396 /* 1397 * Deal with the adjusted flags 1398 */ 1399 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1400 docow |= MAP_COPY_ON_WRITE; 1401 if (flags & MAP_NOSYNC) 1402 docow |= MAP_DISABLE_SYNCER; 1403 if (flags & MAP_NOCORE) 1404 docow |= MAP_DISABLE_COREDUMP; 1405 1406 /* 1407 * This may place the area in its own page directory if (size) is 1408 * large enough, otherwise it typically returns its argument. 1409 * 1410 * (object can be NULL) 1411 */ 1412 if (fitit) { 1413 *addr = pmap_addr_hint(object, *addr, size); 1414 } 1415 1416 /* 1417 * Stack mappings need special attention. 1418 * 1419 * Mappings that use virtual page tables will default to storing 1420 * the page table at offset 0. 1421 */ 1422 if (uksmap) { 1423 rv = vm_map_find(map, uksmap, vp->v_rdev, 1424 foff, addr, size, 1425 align, fitit, 1426 VM_MAPTYPE_UKSMAP, VM_SUBSYS_MMAP, 1427 prot, maxprot, docow); 1428 } else if (flags & MAP_STACK) { 1429 rv = vm_map_stack(map, addr, size, flags, 1430 prot, maxprot, docow); 1431 } else if (flags & MAP_VPAGETABLE) { 1432 rv = vm_map_find(map, object, NULL, 1433 foff, addr, size, 1434 align, fitit, 1435 VM_MAPTYPE_VPAGETABLE, VM_SUBSYS_MMAP, 1436 prot, maxprot, docow); 1437 } else { 1438 rv = vm_map_find(map, object, NULL, 1439 foff, addr, size, 1440 align, fitit, 1441 VM_MAPTYPE_NORMAL, VM_SUBSYS_MMAP, 1442 prot, maxprot, docow); 1443 } 1444 1445 if (rv != KERN_SUCCESS) { 1446 /* 1447 * Lose the object reference. Will destroy the 1448 * object if it's an unnamed anonymous mapping 1449 * or named anonymous without other references. 1450 * 1451 * (NOTE: object can be NULL) 1452 */ 1453 vm_object_deallocate(object); 1454 goto out; 1455 } 1456 1457 /* 1458 * Shared memory is also shared with children. 1459 */ 1460 if (flags & (MAP_SHARED|MAP_INHERIT)) { 1461 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1462 if (rv != KERN_SUCCESS) { 1463 vm_map_remove(map, *addr, *addr + size); 1464 goto out; 1465 } 1466 } 1467 1468 /* If a process has marked all future mappings for wiring, do so */ 1469 if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1470 vm_map_unwire(map, *addr, *addr + size, FALSE); 1471 1472 /* 1473 * Set the access time on the vnode 1474 */ 1475 if (vp != NULL) 1476 vn_mark_atime(vp, td); 1477 out: 1478 lwkt_reltoken(&map->token); 1479 1480 switch (rv) { 1481 case KERN_SUCCESS: 1482 return (0); 1483 case KERN_INVALID_ADDRESS: 1484 case KERN_NO_SPACE: 1485 return (ENOMEM); 1486 case KERN_PROTECTION_FAILURE: 1487 return (EACCES); 1488 default: 1489 return (EINVAL); 1490 } 1491 } 1492 1493 /* 1494 * Translate a Mach VM return code to zero on success or the appropriate errno 1495 * on failure. 1496 */ 1497 int 1498 vm_mmap_to_errno(int rv) 1499 { 1500 1501 switch (rv) { 1502 case KERN_SUCCESS: 1503 return (0); 1504 case KERN_INVALID_ADDRESS: 1505 case KERN_NO_SPACE: 1506 return (ENOMEM); 1507 case KERN_PROTECTION_FAILURE: 1508 return (EACCES); 1509 default: 1510 return (EINVAL); 1511 } 1512 } 1513