1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 37 * 38 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 39 * $FreeBSD: src/sys/vm/vm_mmap.c,v 1.108.2.6 2002/07/02 20:06:19 dillon Exp $ 40 */ 41 42 /* 43 * Mapped file (mmap) interface to VM 44 */ 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/systm.h> 49 #include <sys/sysproto.h> 50 #include <sys/filedesc.h> 51 #include <sys/kern_syscall.h> 52 #include <sys/proc.h> 53 #include <sys/priv.h> 54 #include <sys/resource.h> 55 #include <sys/resourcevar.h> 56 #include <sys/vnode.h> 57 #include <sys/fcntl.h> 58 #include <sys/file.h> 59 #include <sys/mman.h> 60 #include <sys/conf.h> 61 #include <sys/stat.h> 62 #include <sys/vmmeter.h> 63 #include <sys/sysctl.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_param.h> 67 #include <sys/lock.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vm_pageout.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_kern.h> 76 77 #include <sys/file2.h> 78 #include <sys/thread.h> 79 #include <sys/thread2.h> 80 81 static int max_proc_mmap; 82 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 83 int vkernel_enable; 84 SYSCTL_INT(_vm, OID_AUTO, vkernel_enable, CTLFLAG_RW, &vkernel_enable, 0, ""); 85 86 /* 87 * Set the maximum number of vm_map_entry structures per process. Roughly 88 * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 89 * of our KVM malloc space still results in generous limits. We want a 90 * default that is good enough to prevent the kernel running out of resources 91 * if attacked from compromised user account but generous enough such that 92 * multi-threaded processes are not unduly inconvenienced. 93 */ 94 95 static void vmmapentry_rsrc_init (void *); 96 SYSINIT(vmmersrc, SI_BOOT1_POST, SI_ORDER_ANY, vmmapentry_rsrc_init, NULL) 97 98 static void 99 vmmapentry_rsrc_init(void *dummy) 100 { 101 max_proc_mmap = KvaSize / sizeof(struct vm_map_entry); 102 max_proc_mmap /= 100; 103 } 104 105 /* 106 * MPSAFE 107 */ 108 int 109 sys_sbrk(struct sbrk_args *uap) 110 { 111 /* Not yet implemented */ 112 return (EOPNOTSUPP); 113 } 114 115 /* 116 * sstk_args(int incr) 117 * 118 * MPSAFE 119 */ 120 int 121 sys_sstk(struct sstk_args *uap) 122 { 123 /* Not yet implemented */ 124 return (EOPNOTSUPP); 125 } 126 127 /* 128 * mmap_args(void *addr, size_t len, int prot, int flags, int fd, 129 * long pad, off_t pos) 130 * 131 * Memory Map (mmap) system call. Note that the file offset 132 * and address are allowed to be NOT page aligned, though if 133 * the MAP_FIXED flag it set, both must have the same remainder 134 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 135 * page-aligned, the actual mapping starts at trunc_page(addr) 136 * and the return value is adjusted up by the page offset. 137 * 138 * Generally speaking, only character devices which are themselves 139 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 140 * there would be no cache coherency between a descriptor and a VM mapping 141 * both to the same character device. 142 * 143 * Block devices can be mmap'd no matter what they represent. Cache coherency 144 * is maintained as long as you do not write directly to the underlying 145 * character device. 146 * 147 * No requirements 148 */ 149 int 150 kern_mmap(struct vmspace *vms, caddr_t uaddr, size_t ulen, 151 int uprot, int uflags, int fd, off_t upos, void **res) 152 { 153 struct thread *td = curthread; 154 struct proc *p = td->td_proc; 155 struct file *fp = NULL; 156 struct vnode *vp; 157 vm_offset_t addr; 158 vm_offset_t tmpaddr; 159 vm_size_t size, pageoff; 160 vm_prot_t prot, maxprot; 161 void *handle; 162 int flags, error; 163 off_t pos; 164 vm_object_t obj; 165 166 KKASSERT(p); 167 168 addr = (vm_offset_t) uaddr; 169 size = ulen; 170 prot = uprot & VM_PROT_ALL; 171 flags = uflags; 172 pos = upos; 173 174 /* 175 * Make sure mapping fits into numeric range etc. 176 * 177 * NOTE: We support the full unsigned range for size now. 178 */ 179 if (((flags & MAP_ANON) && (fd != -1 || pos != 0))) 180 return (EINVAL); 181 182 if (size == 0) 183 return (EINVAL); 184 185 if (flags & MAP_STACK) { 186 if ((fd != -1) || 187 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 188 return (EINVAL); 189 flags |= MAP_ANON; 190 pos = 0; 191 } 192 193 /* 194 * Virtual page tables cannot be used with MAP_STACK. Apart from 195 * it not making any sense, the aux union is used by both 196 * types. 197 * 198 * Because the virtual page table is stored in the backing object 199 * and might be updated by the kernel, the mapping must be R+W. 200 */ 201 if (flags & MAP_VPAGETABLE) { 202 if (vkernel_enable == 0) 203 return (EOPNOTSUPP); 204 if (flags & MAP_STACK) 205 return (EINVAL); 206 if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE)) 207 return (EINVAL); 208 } 209 210 /* 211 * Align the file position to a page boundary, 212 * and save its page offset component. 213 */ 214 pageoff = (pos & PAGE_MASK); 215 pos -= pageoff; 216 217 /* Adjust size for rounding (on both ends). */ 218 size += pageoff; /* low end... */ 219 size = (vm_size_t) round_page(size); /* hi end */ 220 if (size < ulen) /* wrap */ 221 return(EINVAL); 222 223 /* 224 * Check for illegal addresses. Watch out for address wrap... Note 225 * that VM_*_ADDRESS are not constants due to casts (argh). 226 */ 227 if (flags & (MAP_FIXED | MAP_TRYFIXED)) { 228 /* 229 * The specified address must have the same remainder 230 * as the file offset taken modulo PAGE_SIZE, so it 231 * should be aligned after adjustment by pageoff. 232 */ 233 addr -= pageoff; 234 if (addr & PAGE_MASK) 235 return (EINVAL); 236 237 /* 238 * Address range must be all in user VM space and not wrap. 239 */ 240 tmpaddr = addr + size; 241 if (tmpaddr < addr) 242 return (EINVAL); 243 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 244 return (EINVAL); 245 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 246 return (EINVAL); 247 } else { 248 /* 249 * Get a hint of where to map. It also provides mmap offset 250 * randomization if enabled. 251 */ 252 addr = vm_map_hint(p, addr, prot); 253 } 254 255 if (flags & MAP_ANON) { 256 /* 257 * Mapping blank space is trivial. 258 */ 259 handle = NULL; 260 maxprot = VM_PROT_ALL; 261 } else { 262 /* 263 * Mapping file, get fp for validation. Obtain vnode and make 264 * sure it is of appropriate type. 265 */ 266 fp = holdfp(p->p_fd, fd, -1); 267 if (fp == NULL) 268 return (EBADF); 269 if (fp->f_type != DTYPE_VNODE) { 270 error = EINVAL; 271 goto done; 272 } 273 /* 274 * POSIX shared-memory objects are defined to have 275 * kernel persistence, and are not defined to support 276 * read(2)/write(2) -- or even open(2). Thus, we can 277 * use MAP_ASYNC to trade on-disk coherence for speed. 278 * The shm_open(3) library routine turns on the FPOSIXSHM 279 * flag to request this behavior. 280 */ 281 if (fp->f_flag & FPOSIXSHM) 282 flags |= MAP_NOSYNC; 283 vp = (struct vnode *) fp->f_data; 284 285 /* 286 * Validate the vnode for the operation. 287 */ 288 switch(vp->v_type) { 289 case VREG: 290 /* 291 * Get the proper underlying object 292 */ 293 if ((obj = vp->v_object) == NULL) { 294 error = EINVAL; 295 goto done; 296 } 297 KKASSERT((struct vnode *)obj->handle == vp); 298 break; 299 case VCHR: 300 /* 301 * Make sure a device has not been revoked. 302 * Mappability is handled by the device layer. 303 */ 304 if (vp->v_rdev == NULL) { 305 error = EBADF; 306 goto done; 307 } 308 break; 309 default: 310 /* 311 * Nothing else is mappable. 312 */ 313 error = EINVAL; 314 goto done; 315 } 316 317 /* 318 * XXX hack to handle use of /dev/zero to map anon memory (ala 319 * SunOS). 320 */ 321 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 322 handle = NULL; 323 maxprot = VM_PROT_ALL; 324 flags |= MAP_ANON; 325 pos = 0; 326 } else { 327 /* 328 * cdevs does not provide private mappings of any kind. 329 */ 330 if (vp->v_type == VCHR && 331 (flags & (MAP_PRIVATE|MAP_COPY))) { 332 error = EINVAL; 333 goto done; 334 } 335 /* 336 * Ensure that file and memory protections are 337 * compatible. Note that we only worry about 338 * writability if mapping is shared; in this case, 339 * current and max prot are dictated by the open file. 340 * XXX use the vnode instead? Problem is: what 341 * credentials do we use for determination? What if 342 * proc does a setuid? 343 */ 344 maxprot = VM_PROT_EXECUTE; /* ??? */ 345 if (fp->f_flag & FREAD) { 346 maxprot |= VM_PROT_READ; 347 } else if (prot & PROT_READ) { 348 error = EACCES; 349 goto done; 350 } 351 /* 352 * If we are sharing potential changes (either via 353 * MAP_SHARED or via the implicit sharing of character 354 * device mappings), and we are trying to get write 355 * permission although we opened it without asking 356 * for it, bail out. Check for superuser, only if 357 * we're at securelevel < 1, to allow the XIG X server 358 * to continue to work. 359 */ 360 if ((flags & MAP_SHARED) != 0 || vp->v_type == VCHR) { 361 if ((fp->f_flag & FWRITE) != 0) { 362 struct vattr va; 363 if ((error = VOP_GETATTR(vp, &va))) { 364 goto done; 365 } 366 if ((va.va_flags & 367 (IMMUTABLE|APPEND)) == 0) { 368 maxprot |= VM_PROT_WRITE; 369 } else if (prot & PROT_WRITE) { 370 error = EPERM; 371 goto done; 372 } 373 } else if ((prot & PROT_WRITE) != 0) { 374 error = EACCES; 375 goto done; 376 } 377 } else { 378 maxprot |= VM_PROT_WRITE; 379 } 380 handle = (void *)vp; 381 } 382 } 383 384 lwkt_gettoken(&vms->vm_map.token); 385 386 /* 387 * Do not allow more then a certain number of vm_map_entry structures 388 * per process. Scale with the number of rforks sharing the map 389 * to make the limit reasonable for threads. 390 */ 391 if (max_proc_mmap && 392 vms->vm_map.nentries >= max_proc_mmap * vms->vm_sysref.refcnt) { 393 error = ENOMEM; 394 lwkt_reltoken(&vms->vm_map.token); 395 goto done; 396 } 397 398 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 399 flags, handle, pos); 400 if (error == 0) 401 *res = (void *)(addr + pageoff); 402 403 lwkt_reltoken(&vms->vm_map.token); 404 done: 405 if (fp) 406 fdrop(fp); 407 408 return (error); 409 } 410 411 /* 412 * mmap system call handler 413 * 414 * No requirements. 415 */ 416 int 417 sys_mmap(struct mmap_args *uap) 418 { 419 int error; 420 421 error = kern_mmap(curproc->p_vmspace, uap->addr, uap->len, 422 uap->prot, uap->flags, 423 uap->fd, uap->pos, &uap->sysmsg_resultp); 424 425 return (error); 426 } 427 428 /* 429 * msync system call handler 430 * 431 * msync_args(void *addr, size_t len, int flags) 432 * 433 * No requirements 434 */ 435 int 436 sys_msync(struct msync_args *uap) 437 { 438 struct proc *p = curproc; 439 vm_offset_t addr; 440 vm_offset_t tmpaddr; 441 vm_size_t size, pageoff; 442 int flags; 443 vm_map_t map; 444 int rv; 445 446 addr = (vm_offset_t) uap->addr; 447 size = uap->len; 448 flags = uap->flags; 449 450 pageoff = (addr & PAGE_MASK); 451 addr -= pageoff; 452 size += pageoff; 453 size = (vm_size_t) round_page(size); 454 if (size < uap->len) /* wrap */ 455 return(EINVAL); 456 tmpaddr = addr + size; /* workaround gcc4 opt */ 457 if (tmpaddr < addr) /* wrap */ 458 return(EINVAL); 459 460 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 461 return (EINVAL); 462 463 map = &p->p_vmspace->vm_map; 464 465 /* 466 * map->token serializes extracting the address range for size == 0 467 * msyncs with the vm_map_clean call; if the token were not held 468 * across the two calls, an intervening munmap/mmap pair, for example, 469 * could cause msync to occur on a wrong region. 470 */ 471 lwkt_gettoken(&map->token); 472 473 /* 474 * XXX Gak! If size is zero we are supposed to sync "all modified 475 * pages with the region containing addr". Unfortunately, we don't 476 * really keep track of individual mmaps so we approximate by flushing 477 * the range of the map entry containing addr. This can be incorrect 478 * if the region splits or is coalesced with a neighbor. 479 */ 480 if (size == 0) { 481 vm_map_entry_t entry; 482 483 vm_map_lock_read(map); 484 rv = vm_map_lookup_entry(map, addr, &entry); 485 if (rv == FALSE) { 486 vm_map_unlock_read(map); 487 rv = KERN_INVALID_ADDRESS; 488 goto done; 489 } 490 addr = entry->start; 491 size = entry->end - entry->start; 492 vm_map_unlock_read(map); 493 } 494 495 /* 496 * Clean the pages and interpret the return value. 497 */ 498 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 499 (flags & MS_INVALIDATE) != 0); 500 done: 501 lwkt_reltoken(&map->token); 502 503 switch (rv) { 504 case KERN_SUCCESS: 505 break; 506 case KERN_INVALID_ADDRESS: 507 return (EINVAL); /* Sun returns ENOMEM? */ 508 case KERN_FAILURE: 509 return (EIO); 510 default: 511 return (EINVAL); 512 } 513 514 return (0); 515 } 516 517 /* 518 * munmap system call handler 519 * 520 * munmap_args(void *addr, size_t len) 521 * 522 * No requirements 523 */ 524 int 525 sys_munmap(struct munmap_args *uap) 526 { 527 struct proc *p = curproc; 528 vm_offset_t addr; 529 vm_offset_t tmpaddr; 530 vm_size_t size, pageoff; 531 vm_map_t map; 532 533 addr = (vm_offset_t) uap->addr; 534 size = uap->len; 535 536 pageoff = (addr & PAGE_MASK); 537 addr -= pageoff; 538 size += pageoff; 539 size = (vm_size_t) round_page(size); 540 if (size < uap->len) /* wrap */ 541 return(EINVAL); 542 tmpaddr = addr + size; /* workaround gcc4 opt */ 543 if (tmpaddr < addr) /* wrap */ 544 return(EINVAL); 545 546 if (size == 0) 547 return (0); 548 549 /* 550 * Check for illegal addresses. Watch out for address wrap... Note 551 * that VM_*_ADDRESS are not constants due to casts (argh). 552 */ 553 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 554 return (EINVAL); 555 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 556 return (EINVAL); 557 558 map = &p->p_vmspace->vm_map; 559 560 /* map->token serializes between the map check and the actual unmap */ 561 lwkt_gettoken(&map->token); 562 563 /* 564 * Make sure entire range is allocated. 565 */ 566 if (!vm_map_check_protection(map, addr, addr + size, 567 VM_PROT_NONE, FALSE)) { 568 lwkt_reltoken(&map->token); 569 return (EINVAL); 570 } 571 /* returns nothing but KERN_SUCCESS anyway */ 572 vm_map_remove(map, addr, addr + size); 573 lwkt_reltoken(&map->token); 574 return (0); 575 } 576 577 /* 578 * mprotect_args(const void *addr, size_t len, int prot) 579 * 580 * No requirements. 581 */ 582 int 583 sys_mprotect(struct mprotect_args *uap) 584 { 585 struct proc *p = curproc; 586 vm_offset_t addr; 587 vm_offset_t tmpaddr; 588 vm_size_t size, pageoff; 589 vm_prot_t prot; 590 int error; 591 592 addr = (vm_offset_t) uap->addr; 593 size = uap->len; 594 prot = uap->prot & VM_PROT_ALL; 595 #if defined(VM_PROT_READ_IS_EXEC) 596 if (prot & VM_PROT_READ) 597 prot |= VM_PROT_EXECUTE; 598 #endif 599 600 pageoff = (addr & PAGE_MASK); 601 addr -= pageoff; 602 size += pageoff; 603 size = (vm_size_t) round_page(size); 604 if (size < uap->len) /* wrap */ 605 return(EINVAL); 606 tmpaddr = addr + size; /* workaround gcc4 opt */ 607 if (tmpaddr < addr) /* wrap */ 608 return(EINVAL); 609 610 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, 611 prot, FALSE)) { 612 case KERN_SUCCESS: 613 error = 0; 614 break; 615 case KERN_PROTECTION_FAILURE: 616 error = EACCES; 617 break; 618 default: 619 error = EINVAL; 620 break; 621 } 622 return (error); 623 } 624 625 /* 626 * minherit system call handler 627 * 628 * minherit_args(void *addr, size_t len, int inherit) 629 * 630 * No requirements. 631 */ 632 int 633 sys_minherit(struct minherit_args *uap) 634 { 635 struct proc *p = curproc; 636 vm_offset_t addr; 637 vm_offset_t tmpaddr; 638 vm_size_t size, pageoff; 639 vm_inherit_t inherit; 640 int error; 641 642 addr = (vm_offset_t)uap->addr; 643 size = uap->len; 644 inherit = uap->inherit; 645 646 pageoff = (addr & PAGE_MASK); 647 addr -= pageoff; 648 size += pageoff; 649 size = (vm_size_t) round_page(size); 650 if (size < uap->len) /* wrap */ 651 return(EINVAL); 652 tmpaddr = addr + size; /* workaround gcc4 opt */ 653 if (tmpaddr < addr) /* wrap */ 654 return(EINVAL); 655 656 switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, 657 addr + size, inherit)) { 658 case KERN_SUCCESS: 659 error = 0; 660 break; 661 case KERN_PROTECTION_FAILURE: 662 error = EACCES; 663 break; 664 default: 665 error = EINVAL; 666 break; 667 } 668 return (error); 669 } 670 671 /* 672 * madvise system call handler 673 * 674 * madvise_args(void *addr, size_t len, int behav) 675 * 676 * No requirements. 677 */ 678 int 679 sys_madvise(struct madvise_args *uap) 680 { 681 struct proc *p = curproc; 682 vm_offset_t start, end; 683 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 684 int error; 685 686 /* 687 * Check for illegal behavior 688 */ 689 if (uap->behav < 0 || uap->behav >= MADV_CONTROL_END) 690 return (EINVAL); 691 /* 692 * Check for illegal addresses. Watch out for address wrap... Note 693 * that VM_*_ADDRESS are not constants due to casts (argh). 694 */ 695 if (tmpaddr < (vm_offset_t)uap->addr) 696 return (EINVAL); 697 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 698 return (EINVAL); 699 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 700 return (EINVAL); 701 702 /* 703 * Since this routine is only advisory, we default to conservative 704 * behavior. 705 */ 706 start = trunc_page((vm_offset_t)uap->addr); 707 end = round_page(tmpaddr); 708 709 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 710 uap->behav, 0); 711 return (error); 712 } 713 714 /* 715 * mcontrol system call handler 716 * 717 * mcontrol_args(void *addr, size_t len, int behav, off_t value) 718 * 719 * No requirements 720 */ 721 int 722 sys_mcontrol(struct mcontrol_args *uap) 723 { 724 struct proc *p = curproc; 725 vm_offset_t start, end; 726 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 727 int error; 728 729 /* 730 * Check for illegal behavior 731 */ 732 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) 733 return (EINVAL); 734 /* 735 * Check for illegal addresses. Watch out for address wrap... Note 736 * that VM_*_ADDRESS are not constants due to casts (argh). 737 */ 738 if (tmpaddr < (vm_offset_t) uap->addr) 739 return (EINVAL); 740 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 741 return (EINVAL); 742 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 743 return (EINVAL); 744 745 /* 746 * Since this routine is only advisory, we default to conservative 747 * behavior. 748 */ 749 start = trunc_page((vm_offset_t)uap->addr); 750 end = round_page(tmpaddr); 751 752 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 753 uap->behav, uap->value); 754 return (error); 755 } 756 757 758 /* 759 * mincore system call handler 760 * 761 * mincore_args(const void *addr, size_t len, char *vec) 762 * 763 * No requirements 764 */ 765 int 766 sys_mincore(struct mincore_args *uap) 767 { 768 struct proc *p = curproc; 769 vm_offset_t addr, first_addr; 770 vm_offset_t end, cend; 771 pmap_t pmap; 772 vm_map_t map; 773 char *vec; 774 int error; 775 int vecindex, lastvecindex; 776 vm_map_entry_t current; 777 vm_map_entry_t entry; 778 int mincoreinfo; 779 unsigned int timestamp; 780 781 /* 782 * Make sure that the addresses presented are valid for user 783 * mode. 784 */ 785 first_addr = addr = trunc_page((vm_offset_t) uap->addr); 786 end = addr + (vm_size_t)round_page(uap->len); 787 if (end < addr) 788 return (EINVAL); 789 if (VM_MAX_USER_ADDRESS > 0 && end > VM_MAX_USER_ADDRESS) 790 return (EINVAL); 791 792 /* 793 * Address of byte vector 794 */ 795 vec = uap->vec; 796 797 map = &p->p_vmspace->vm_map; 798 pmap = vmspace_pmap(p->p_vmspace); 799 800 lwkt_gettoken(&map->token); 801 vm_map_lock_read(map); 802 RestartScan: 803 timestamp = map->timestamp; 804 805 if (!vm_map_lookup_entry(map, addr, &entry)) 806 entry = entry->next; 807 808 /* 809 * Do this on a map entry basis so that if the pages are not 810 * in the current processes address space, we can easily look 811 * up the pages elsewhere. 812 */ 813 lastvecindex = -1; 814 for(current = entry; 815 (current != &map->header) && (current->start < end); 816 current = current->next) { 817 818 /* 819 * ignore submaps (for now) or null objects 820 */ 821 if (current->maptype != VM_MAPTYPE_NORMAL && 822 current->maptype != VM_MAPTYPE_VPAGETABLE) { 823 continue; 824 } 825 if (current->object.vm_object == NULL) 826 continue; 827 828 /* 829 * limit this scan to the current map entry and the 830 * limits for the mincore call 831 */ 832 if (addr < current->start) 833 addr = current->start; 834 cend = current->end; 835 if (cend > end) 836 cend = end; 837 838 /* 839 * scan this entry one page at a time 840 */ 841 while (addr < cend) { 842 /* 843 * Check pmap first, it is likely faster, also 844 * it can provide info as to whether we are the 845 * one referencing or modifying the page. 846 * 847 * If we have to check the VM object, only mess 848 * around with normal maps. Do not mess around 849 * with virtual page tables (XXX). 850 */ 851 mincoreinfo = pmap_mincore(pmap, addr); 852 if (mincoreinfo == 0 && 853 current->maptype == VM_MAPTYPE_NORMAL) { 854 vm_pindex_t pindex; 855 vm_ooffset_t offset; 856 vm_page_t m; 857 858 /* 859 * calculate the page index into the object 860 */ 861 offset = current->offset + (addr - current->start); 862 pindex = OFF_TO_IDX(offset); 863 864 /* 865 * if the page is resident, then gather 866 * information about it. spl protection is 867 * required to maintain the object 868 * association. And XXX what if the page is 869 * busy? What's the deal with that? 870 * 871 * XXX vm_token - legacy for pmap_ts_referenced 872 * in i386 and vkernel pmap code. 873 */ 874 lwkt_gettoken(&vm_token); 875 vm_object_hold(current->object.vm_object); 876 m = vm_page_lookup(current->object.vm_object, 877 pindex); 878 if (m && m->valid) { 879 mincoreinfo = MINCORE_INCORE; 880 if (m->dirty || 881 pmap_is_modified(m)) 882 mincoreinfo |= MINCORE_MODIFIED_OTHER; 883 if ((m->flags & PG_REFERENCED) || 884 pmap_ts_referenced(m)) { 885 vm_page_flag_set(m, PG_REFERENCED); 886 mincoreinfo |= MINCORE_REFERENCED_OTHER; 887 } 888 } 889 vm_object_drop(current->object.vm_object); 890 lwkt_reltoken(&vm_token); 891 } 892 893 /* 894 * subyte may page fault. In case it needs to modify 895 * the map, we release the lock. 896 */ 897 vm_map_unlock_read(map); 898 899 /* 900 * calculate index into user supplied byte vector 901 */ 902 vecindex = OFF_TO_IDX(addr - first_addr); 903 904 /* 905 * If we have skipped map entries, we need to make sure that 906 * the byte vector is zeroed for those skipped entries. 907 */ 908 while((lastvecindex + 1) < vecindex) { 909 error = subyte( vec + lastvecindex, 0); 910 if (error) { 911 error = EFAULT; 912 goto done; 913 } 914 ++lastvecindex; 915 } 916 917 /* 918 * Pass the page information to the user 919 */ 920 error = subyte( vec + vecindex, mincoreinfo); 921 if (error) { 922 error = EFAULT; 923 goto done; 924 } 925 926 /* 927 * If the map has changed, due to the subyte, the previous 928 * output may be invalid. 929 */ 930 vm_map_lock_read(map); 931 if (timestamp != map->timestamp) 932 goto RestartScan; 933 934 lastvecindex = vecindex; 935 addr += PAGE_SIZE; 936 } 937 } 938 939 /* 940 * subyte may page fault. In case it needs to modify 941 * the map, we release the lock. 942 */ 943 vm_map_unlock_read(map); 944 945 /* 946 * Zero the last entries in the byte vector. 947 */ 948 vecindex = OFF_TO_IDX(end - first_addr); 949 while((lastvecindex + 1) < vecindex) { 950 error = subyte( vec + lastvecindex, 0); 951 if (error) { 952 error = EFAULT; 953 goto done; 954 } 955 ++lastvecindex; 956 } 957 958 /* 959 * If the map has changed, due to the subyte, the previous 960 * output may be invalid. 961 */ 962 vm_map_lock_read(map); 963 if (timestamp != map->timestamp) 964 goto RestartScan; 965 vm_map_unlock_read(map); 966 967 error = 0; 968 done: 969 lwkt_reltoken(&map->token); 970 return (error); 971 } 972 973 /* 974 * mlock system call handler 975 * 976 * mlock_args(const void *addr, size_t len) 977 * 978 * No requirements 979 */ 980 int 981 sys_mlock(struct mlock_args *uap) 982 { 983 vm_offset_t addr; 984 vm_offset_t tmpaddr; 985 vm_size_t size, pageoff; 986 struct thread *td = curthread; 987 struct proc *p = td->td_proc; 988 int error; 989 990 addr = (vm_offset_t) uap->addr; 991 size = uap->len; 992 993 pageoff = (addr & PAGE_MASK); 994 addr -= pageoff; 995 size += pageoff; 996 size = (vm_size_t) round_page(size); 997 if (size < uap->len) /* wrap */ 998 return(EINVAL); 999 tmpaddr = addr + size; /* workaround gcc4 opt */ 1000 if (tmpaddr < addr) /* wrap */ 1001 return (EINVAL); 1002 1003 if (atop(size) + vmstats.v_wire_count > vm_page_max_wired) 1004 return (EAGAIN); 1005 1006 /* 1007 * We do not need to synchronize against other threads updating ucred; 1008 * they update p->ucred, which is synchronized into td_ucred ourselves. 1009 */ 1010 #ifdef pmap_wired_count 1011 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 1012 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) { 1013 return (ENOMEM); 1014 } 1015 #else 1016 error = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 1017 if (error) { 1018 return (error); 1019 } 1020 #endif 1021 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, FALSE); 1022 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1023 } 1024 1025 /* 1026 * mlockall(int how) 1027 * 1028 * No requirements 1029 */ 1030 int 1031 sys_mlockall(struct mlockall_args *uap) 1032 { 1033 #ifdef _P1003_1B_VISIBLE 1034 struct thread *td = curthread; 1035 struct proc *p = td->td_proc; 1036 vm_map_t map = &p->p_vmspace->vm_map; 1037 vm_map_entry_t entry; 1038 int how = uap->how; 1039 int rc = KERN_SUCCESS; 1040 1041 if (((how & MCL_CURRENT) == 0) && ((how & MCL_FUTURE) == 0)) 1042 return (EINVAL); 1043 1044 rc = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 1045 if (rc) 1046 return (rc); 1047 1048 vm_map_lock(map); 1049 do { 1050 if (how & MCL_CURRENT) { 1051 for(entry = map->header.next; 1052 entry != &map->header; 1053 entry = entry->next); 1054 1055 rc = ENOSYS; 1056 break; 1057 } 1058 1059 if (how & MCL_FUTURE) 1060 map->flags |= MAP_WIREFUTURE; 1061 } while(0); 1062 vm_map_unlock(map); 1063 1064 return (rc); 1065 #else /* !_P1003_1B_VISIBLE */ 1066 return (ENOSYS); 1067 #endif /* _P1003_1B_VISIBLE */ 1068 } 1069 1070 /* 1071 * munlockall(void) 1072 * 1073 * Unwire all user-wired map entries, cancel MCL_FUTURE. 1074 * 1075 * No requirements 1076 */ 1077 int 1078 sys_munlockall(struct munlockall_args *uap) 1079 { 1080 struct thread *td = curthread; 1081 struct proc *p = td->td_proc; 1082 vm_map_t map = &p->p_vmspace->vm_map; 1083 vm_map_entry_t entry; 1084 int rc = KERN_SUCCESS; 1085 1086 vm_map_lock(map); 1087 1088 /* Clear MAP_WIREFUTURE to cancel mlockall(MCL_FUTURE) */ 1089 map->flags &= ~MAP_WIREFUTURE; 1090 1091 retry: 1092 for (entry = map->header.next; 1093 entry != &map->header; 1094 entry = entry->next) { 1095 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 1096 continue; 1097 1098 /* 1099 * If we encounter an in-transition entry, we release the 1100 * map lock and retry the scan; we do not decrement any 1101 * wired_count more than once because we do not touch 1102 * any entries with MAP_ENTRY_USER_WIRED not set. 1103 * 1104 * There is a potential interleaving with concurrent 1105 * mlockall()s here -- if we abort a scan, an mlockall() 1106 * could start, wire a number of entries before our 1107 * current position in, and then stall itself on this 1108 * or any other in-transition entry. If that occurs, when 1109 * we resume, we will unwire those entries. 1110 */ 1111 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1112 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1113 ++mycpu->gd_cnt.v_intrans_coll; 1114 ++mycpu->gd_cnt.v_intrans_wait; 1115 vm_map_transition_wait(map); 1116 goto retry; 1117 } 1118 1119 KASSERT(entry->wired_count > 0, 1120 ("wired_count was 0 with USER_WIRED set! %p", entry)); 1121 1122 /* Drop wired count, if it hits zero, unwire the entry */ 1123 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1124 entry->wired_count--; 1125 if (entry->wired_count == 0) 1126 vm_fault_unwire(map, entry); 1127 } 1128 1129 map->timestamp++; 1130 vm_map_unlock(map); 1131 1132 return (rc); 1133 } 1134 1135 /* 1136 * munlock system call handler 1137 * 1138 * munlock_args(const void *addr, size_t len) 1139 * 1140 * No requirements 1141 */ 1142 int 1143 sys_munlock(struct munlock_args *uap) 1144 { 1145 struct thread *td = curthread; 1146 struct proc *p = td->td_proc; 1147 vm_offset_t addr; 1148 vm_offset_t tmpaddr; 1149 vm_size_t size, pageoff; 1150 int error; 1151 1152 addr = (vm_offset_t) uap->addr; 1153 size = uap->len; 1154 1155 pageoff = (addr & PAGE_MASK); 1156 addr -= pageoff; 1157 size += pageoff; 1158 size = (vm_size_t) round_page(size); 1159 1160 tmpaddr = addr + size; 1161 if (tmpaddr < addr) /* wrap */ 1162 return (EINVAL); 1163 1164 #ifndef pmap_wired_count 1165 error = priv_check(td, PRIV_ROOT); 1166 if (error) 1167 return (error); 1168 #endif 1169 1170 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, TRUE); 1171 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1172 } 1173 1174 /* 1175 * Internal version of mmap. 1176 * Currently used by mmap, exec, and sys5 shared memory. 1177 * Handle is either a vnode pointer or NULL for MAP_ANON. 1178 * 1179 * No requirements 1180 */ 1181 int 1182 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1183 vm_prot_t maxprot, int flags, void *handle, vm_ooffset_t foff) 1184 { 1185 boolean_t fitit; 1186 vm_object_t object; 1187 vm_offset_t eaddr; 1188 vm_size_t esize; 1189 vm_size_t align; 1190 struct vnode *vp; 1191 struct thread *td = curthread; 1192 struct proc *p; 1193 int rv = KERN_SUCCESS; 1194 off_t objsize; 1195 int docow; 1196 1197 if (size == 0) 1198 return (0); 1199 1200 objsize = round_page(size); 1201 if (objsize < size) 1202 return (EINVAL); 1203 size = objsize; 1204 1205 lwkt_gettoken(&map->token); 1206 1207 /* 1208 * XXX messy code, fixme 1209 * 1210 * NOTE: Overflow checks require discrete statements or GCC4 1211 * will optimize it out. 1212 */ 1213 if ((p = curproc) != NULL && map == &p->p_vmspace->vm_map) { 1214 esize = map->size + size; /* workaround gcc4 opt */ 1215 if (esize < map->size || 1216 esize > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1217 lwkt_reltoken(&map->token); 1218 return(ENOMEM); 1219 } 1220 } 1221 1222 /* 1223 * We currently can only deal with page aligned file offsets. 1224 * The check is here rather than in the syscall because the 1225 * kernel calls this function internally for other mmaping 1226 * operations (such as in exec) and non-aligned offsets will 1227 * cause pmap inconsistencies...so we want to be sure to 1228 * disallow this in all cases. 1229 * 1230 * NOTE: Overflow checks require discrete statements or GCC4 1231 * will optimize it out. 1232 */ 1233 if (foff & PAGE_MASK) { 1234 lwkt_reltoken(&map->token); 1235 return (EINVAL); 1236 } 1237 1238 /* 1239 * Handle alignment. For large memory maps it is possible 1240 * that the MMU can optimize the page table so align anything 1241 * that is a multiple of SEG_SIZE to SEG_SIZE. 1242 * 1243 * Also align any large mapping (bigger than 16x SG_SIZE) to a 1244 * SEG_SIZE address boundary. 1245 */ 1246 if (flags & MAP_SIZEALIGN) { 1247 align = size; 1248 if ((align ^ (align - 1)) != (align << 1) - 1) { 1249 lwkt_reltoken(&map->token); 1250 return (EINVAL); 1251 } 1252 } else if ((flags & MAP_FIXED) == 0 && 1253 ((size & SEG_MASK) == 0 || size > SEG_SIZE * 16)) { 1254 align = SEG_SIZE; 1255 } else { 1256 align = PAGE_SIZE; 1257 } 1258 1259 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 1260 fitit = TRUE; 1261 *addr = round_page(*addr); 1262 } else { 1263 if (*addr != trunc_page(*addr)) { 1264 lwkt_reltoken(&map->token); 1265 return (EINVAL); 1266 } 1267 eaddr = *addr + size; 1268 if (eaddr < *addr) { 1269 lwkt_reltoken(&map->token); 1270 return (EINVAL); 1271 } 1272 fitit = FALSE; 1273 if ((flags & MAP_TRYFIXED) == 0) 1274 vm_map_remove(map, *addr, *addr + size); 1275 } 1276 1277 /* 1278 * Lookup/allocate object. 1279 */ 1280 if (flags & MAP_ANON) { 1281 /* 1282 * Unnamed anonymous regions always start at 0. 1283 */ 1284 if (handle) { 1285 /* 1286 * Default memory object 1287 */ 1288 object = default_pager_alloc(handle, objsize, 1289 prot, foff); 1290 if (object == NULL) { 1291 lwkt_reltoken(&map->token); 1292 return(ENOMEM); 1293 } 1294 docow = MAP_PREFAULT_PARTIAL; 1295 } else { 1296 /* 1297 * Implicit single instance of a default memory 1298 * object, so we don't need a VM object yet. 1299 */ 1300 foff = 0; 1301 object = NULL; 1302 docow = 0; 1303 } 1304 vp = NULL; 1305 } else { 1306 vp = (struct vnode *)handle; 1307 if (vp->v_type == VCHR) { 1308 /* 1309 * Device mappings (device size unknown?). 1310 * Force them to be shared. 1311 */ 1312 handle = (void *)(intptr_t)vp->v_rdev; 1313 object = dev_pager_alloc(handle, objsize, prot, foff); 1314 if (object == NULL) { 1315 lwkt_reltoken(&map->token); 1316 return(EINVAL); 1317 } 1318 docow = MAP_PREFAULT_PARTIAL; 1319 flags &= ~(MAP_PRIVATE|MAP_COPY); 1320 flags |= MAP_SHARED; 1321 } else { 1322 /* 1323 * Regular file mapping (typically). The attribute 1324 * check is for the link count test only. Mmapble 1325 * vnodes must already have a VM object assigned. 1326 */ 1327 struct vattr vat; 1328 int error; 1329 1330 error = VOP_GETATTR(vp, &vat); 1331 if (error) { 1332 lwkt_reltoken(&map->token); 1333 return (error); 1334 } 1335 docow = MAP_PREFAULT_PARTIAL; 1336 object = vnode_pager_reference(vp); 1337 if (object == NULL && vp->v_type == VREG) { 1338 lwkt_reltoken(&map->token); 1339 kprintf("Warning: cannot mmap vnode %p, no " 1340 "object\n", vp); 1341 return(EINVAL); 1342 } 1343 1344 /* 1345 * If it is a regular file without any references 1346 * we do not need to sync it. 1347 */ 1348 if (vp->v_type == VREG && vat.va_nlink == 0) { 1349 flags |= MAP_NOSYNC; 1350 } 1351 } 1352 } 1353 1354 /* 1355 * Deal with the adjusted flags 1356 */ 1357 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1358 docow |= MAP_COPY_ON_WRITE; 1359 if (flags & MAP_NOSYNC) 1360 docow |= MAP_DISABLE_SYNCER; 1361 if (flags & MAP_NOCORE) 1362 docow |= MAP_DISABLE_COREDUMP; 1363 1364 #if defined(VM_PROT_READ_IS_EXEC) 1365 if (prot & VM_PROT_READ) 1366 prot |= VM_PROT_EXECUTE; 1367 1368 if (maxprot & VM_PROT_READ) 1369 maxprot |= VM_PROT_EXECUTE; 1370 #endif 1371 1372 /* 1373 * This may place the area in its own page directory if (size) is 1374 * large enough, otherwise it typically returns its argument. 1375 */ 1376 if (fitit) { 1377 *addr = pmap_addr_hint(object, *addr, size); 1378 } 1379 1380 /* 1381 * Stack mappings need special attention. 1382 * 1383 * Mappings that use virtual page tables will default to storing 1384 * the page table at offset 0. 1385 */ 1386 if (flags & MAP_STACK) { 1387 rv = vm_map_stack(map, *addr, size, flags, 1388 prot, maxprot, docow); 1389 } else if (flags & MAP_VPAGETABLE) { 1390 rv = vm_map_find(map, object, foff, addr, size, align, 1391 fitit, VM_MAPTYPE_VPAGETABLE, 1392 prot, maxprot, docow); 1393 } else { 1394 rv = vm_map_find(map, object, foff, addr, size, align, 1395 fitit, VM_MAPTYPE_NORMAL, 1396 prot, maxprot, docow); 1397 } 1398 1399 if (rv != KERN_SUCCESS) { 1400 /* 1401 * Lose the object reference. Will destroy the 1402 * object if it's an unnamed anonymous mapping 1403 * or named anonymous without other references. 1404 */ 1405 vm_object_deallocate(object); 1406 goto out; 1407 } 1408 1409 /* 1410 * Shared memory is also shared with children. 1411 */ 1412 if (flags & (MAP_SHARED|MAP_INHERIT)) { 1413 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1414 if (rv != KERN_SUCCESS) { 1415 vm_map_remove(map, *addr, *addr + size); 1416 goto out; 1417 } 1418 } 1419 1420 /* If a process has marked all future mappings for wiring, do so */ 1421 if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1422 vm_map_unwire(map, *addr, *addr + size, FALSE); 1423 1424 /* 1425 * Set the access time on the vnode 1426 */ 1427 if (vp != NULL) 1428 vn_mark_atime(vp, td); 1429 out: 1430 lwkt_reltoken(&map->token); 1431 1432 switch (rv) { 1433 case KERN_SUCCESS: 1434 return (0); 1435 case KERN_INVALID_ADDRESS: 1436 case KERN_NO_SPACE: 1437 return (ENOMEM); 1438 case KERN_PROTECTION_FAILURE: 1439 return (EACCES); 1440 default: 1441 return (EINVAL); 1442 } 1443 } 1444