1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 13 * 14 * @(#)vm_mmap.c 7.21 (Berkeley) 02/26/93 15 */ 16 17 /* 18 * Mapped file (mmap) interface to VM 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/filedesc.h> 24 #include <sys/proc.h> 25 #include <sys/vnode.h> 26 #include <sys/file.h> 27 #include <sys/mman.h> 28 #include <sys/conf.h> 29 30 #include <miscfs/specfs/specdev.h> 31 32 #include <vm/vm.h> 33 #include <vm/vm_pager.h> 34 #include <vm/vm_prot.h> 35 36 #ifdef DEBUG 37 int mmapdebug = 0; 38 #define MDB_FOLLOW 0x01 39 #define MDB_SYNC 0x02 40 #define MDB_MAPIT 0x04 41 #endif 42 43 struct sbrk_args { 44 int incr; 45 }; 46 /* ARGSUSED */ 47 int 48 sbrk(p, uap, retval) 49 struct proc *p; 50 struct sbrk_args *uap; 51 int *retval; 52 { 53 54 /* Not yet implemented */ 55 return (EOPNOTSUPP); 56 } 57 58 struct sstk_args { 59 int incr; 60 }; 61 /* ARGSUSED */ 62 int 63 sstk(p, uap, retval) 64 struct proc *p; 65 struct sstk_args *uap; 66 int *retval; 67 { 68 69 /* Not yet implemented */ 70 return (EOPNOTSUPP); 71 } 72 73 struct mmap_args { 74 caddr_t addr; 75 int len; 76 int prot; 77 int flags; 78 int fd; 79 long pad; 80 off_t pos; 81 }; 82 83 #ifdef COMPAT_43 84 struct getpagesize_args { 85 int dummy; 86 }; 87 /* ARGSUSED */ 88 int 89 getpagesize(p, uap, retval) 90 struct proc *p; 91 struct getpagesize_args *uap; 92 int *retval; 93 { 94 95 *retval = PAGE_SIZE; 96 return (0); 97 } 98 99 struct osmmap_args { 100 caddr_t addr; 101 int len; 102 int prot; 103 int flags; 104 int fd; 105 long pos; 106 }; 107 int 108 osmmap(p, uap, retval) 109 struct proc *p; 110 register struct osmmap_args *uap; 111 int *retval; 112 { 113 struct mmap_args nargs; 114 static const char cvtbsdprot[8] = { 115 0, 116 PROT_EXEC, 117 PROT_WRITE, 118 PROT_EXEC|PROT_WRITE, 119 PROT_READ, 120 PROT_EXEC|PROT_READ, 121 PROT_WRITE|PROT_READ, 122 PROT_EXEC|PROT_WRITE|PROT_READ, 123 }; 124 #define OMAP_ANON 0x0002 125 #define OMAP_COPY 0x0020 126 #define OMAP_SHARED 0x0010 127 #define OMAP_FIXED 0x0100 128 #define OMAP_INHERIT 0x0800 129 130 nargs.addr = uap->addr; 131 nargs.len = uap->len; 132 nargs.prot = cvtbsdprot[uap->prot&0x7]; 133 nargs.flags = 0; 134 if (uap->flags & OMAP_ANON) 135 nargs.flags |= MAP_ANON; 136 if (uap->flags & OMAP_COPY) 137 nargs.flags |= MAP_COPY; 138 if (uap->flags & OMAP_SHARED) 139 nargs.flags |= MAP_SHARED; 140 else 141 nargs.flags |= MAP_PRIVATE; 142 if (uap->flags & OMAP_FIXED) 143 nargs.flags |= MAP_FIXED; 144 if (uap->flags & OMAP_INHERIT) 145 nargs.flags |= MAP_INHERIT; 146 nargs.fd = uap->fd; 147 nargs.pos = uap->pos; 148 return (smmap(p, &nargs, retval)); 149 } 150 #endif 151 152 int 153 smmap(p, uap, retval) 154 struct proc *p; 155 register struct mmap_args *uap; 156 int *retval; 157 { 158 register struct filedesc *fdp = p->p_fd; 159 register struct file *fp; 160 struct vnode *vp; 161 vm_offset_t addr; 162 vm_size_t size; 163 vm_prot_t prot; 164 caddr_t handle; 165 int flags, error; 166 167 flags = uap->flags; 168 #ifdef DEBUG 169 if (mmapdebug & MDB_FOLLOW) 170 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", 171 p->p_pid, uap->addr, uap->len, uap->prot, 172 flags, uap->fd, uap->pos); 173 #endif 174 /* 175 * Address (if FIXED) must be page aligned. 176 * Size is implicitly rounded to a page boundary. 177 */ 178 addr = (vm_offset_t) uap->addr; 179 if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) || uap->len < 0 || 180 ((flags & MAP_ANON) && uap->fd != -1)) 181 return (EINVAL); 182 size = (vm_size_t) round_page(uap->len); 183 /* 184 * Check for illegal addresses. Watch out for address wrap... 185 * Note that VM_*_ADDRESS are not constants due to casts (argh). 186 */ 187 if (flags & MAP_FIXED) { 188 if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 189 return (EINVAL); 190 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 191 return (EINVAL); 192 if (addr > addr + size) 193 return (EINVAL); 194 } 195 /* 196 * XXX if no hint provided for a non-fixed mapping place it after 197 * the end of the largest possible heap. 198 * 199 * There should really be a pmap call to determine a reasonable 200 * location. 201 */ 202 if (addr == 0 && (flags & MAP_FIXED) == 0) 203 addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 204 /* 205 * If we are mapping a file we need to check various 206 * file/vnode related things. 207 */ 208 if (flags & MAP_ANON) 209 handle = NULL; 210 else { 211 /* 212 * Mapping file, get fp for validation. 213 * Obtain vnode and make sure it is of appropriate type 214 */ 215 if (((unsigned)uap->fd) >= fdp->fd_nfiles || 216 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 217 return(EBADF); 218 if (fp->f_type != DTYPE_VNODE) 219 return(EINVAL); 220 vp = (struct vnode *)fp->f_data; 221 if (vp->v_type != VREG && vp->v_type != VCHR) 222 return(EINVAL); 223 /* 224 * Ensure that file protection and desired protection 225 * are compatible. Note that we only worry about writability 226 * if mapping is shared. 227 */ 228 if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || 229 ((flags & MAP_SHARED) && 230 (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) 231 return(EACCES); 232 if ((flags & MAP_SHARED) && (fp->f_flag & FWRITE) == 0) 233 flags = (flags & ~MAP_SHARED) | MAP_PRIVATE; 234 handle = (caddr_t)vp; 235 } 236 /* 237 * Map protections to MACH style 238 */ 239 prot = uap->prot & VM_PROT_ALL; 240 error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, 241 flags, handle, (vm_offset_t)uap->pos); 242 if (error == 0) 243 *retval = (int) addr; 244 return(error); 245 } 246 247 struct msync_args { 248 caddr_t addr; 249 int len; 250 }; 251 int 252 msync(p, uap, retval) 253 struct proc *p; 254 struct msync_args *uap; 255 int *retval; 256 { 257 vm_offset_t addr, objoff, oaddr; 258 vm_size_t size, osize; 259 vm_prot_t prot, mprot; 260 vm_inherit_t inherit; 261 vm_object_t object; 262 boolean_t shared; 263 int rv; 264 265 #ifdef DEBUG 266 if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) 267 printf("msync(%d): addr %x len %x\n", 268 p->p_pid, uap->addr, uap->len); 269 #endif 270 if (((int)uap->addr & PAGE_MASK) || uap->len < 0) 271 return(EINVAL); 272 addr = oaddr = (vm_offset_t)uap->addr; 273 osize = (vm_size_t)uap->len; 274 /* 275 * Region must be entirely contained in a single entry 276 */ 277 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, 278 TRUE)) 279 return(EINVAL); 280 /* 281 * Determine the object associated with that entry 282 * (object is returned locked on KERN_SUCCESS) 283 */ 284 rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, 285 &inherit, &shared, &object, &objoff); 286 if (rv != KERN_SUCCESS) 287 return(EINVAL); 288 #ifdef DEBUG 289 if (mmapdebug & MDB_SYNC) 290 printf("msync: region: object %x addr %x size %d objoff %d\n", 291 object, addr, size, objoff); 292 #endif 293 /* 294 * Do not msync non-vnoded backed objects. 295 */ 296 if ((object->flags & OBJ_INTERNAL) || object->pager == NULL || 297 object->pager->pg_type != PG_VNODE) { 298 vm_object_unlock(object); 299 return(EINVAL); 300 } 301 objoff += oaddr - addr; 302 if (osize == 0) 303 osize = size; 304 #ifdef DEBUG 305 if (mmapdebug & MDB_SYNC) 306 printf("msync: cleaning/flushing object range [%x-%x)\n", 307 objoff, objoff+osize); 308 #endif 309 if (prot & VM_PROT_WRITE) 310 vm_object_page_clean(object, objoff, objoff+osize, FALSE); 311 /* 312 * (XXX) 313 * Bummer, gotta flush all cached pages to ensure 314 * consistency with the file system cache. 315 */ 316 vm_object_page_remove(object, objoff, objoff+osize); 317 vm_object_unlock(object); 318 return(0); 319 } 320 321 struct munmap_args { 322 caddr_t addr; 323 int len; 324 }; 325 int 326 munmap(p, uap, retval) 327 register struct proc *p; 328 register struct munmap_args *uap; 329 int *retval; 330 { 331 vm_offset_t addr; 332 vm_size_t size; 333 334 #ifdef DEBUG 335 if (mmapdebug & MDB_FOLLOW) 336 printf("munmap(%d): addr %x len %x\n", 337 p->p_pid, uap->addr, uap->len); 338 #endif 339 340 addr = (vm_offset_t) uap->addr; 341 if ((addr & PAGE_MASK) || uap->len < 0) 342 return(EINVAL); 343 size = (vm_size_t) round_page(uap->len); 344 if (size == 0) 345 return(0); 346 /* 347 * Check for illegal addresses. Watch out for address wrap... 348 * Note that VM_*_ADDRESS are not constants due to casts (argh). 349 */ 350 if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 351 return (EINVAL); 352 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 353 return (EINVAL); 354 if (addr > addr + size) 355 return (EINVAL); 356 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr + size, 357 FALSE)) 358 return(EINVAL); 359 /* returns nothing but KERN_SUCCESS anyway */ 360 (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); 361 return(0); 362 } 363 364 void 365 munmapfd(fd) 366 int fd; 367 { 368 #ifdef DEBUG 369 if (mmapdebug & MDB_FOLLOW) 370 printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd); 371 #endif 372 373 /* 374 * XXX -- should vm_deallocate any regions mapped to this file 375 */ 376 curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 377 } 378 379 struct mprotect_args { 380 caddr_t addr; 381 int len; 382 int prot; 383 }; 384 int 385 mprotect(p, uap, retval) 386 struct proc *p; 387 struct mprotect_args *uap; 388 int *retval; 389 { 390 vm_offset_t addr; 391 vm_size_t size; 392 register vm_prot_t prot; 393 394 #ifdef DEBUG 395 if (mmapdebug & MDB_FOLLOW) 396 printf("mprotect(%d): addr %x len %x prot %d\n", 397 p->p_pid, uap->addr, uap->len, uap->prot); 398 #endif 399 400 addr = (vm_offset_t)uap->addr; 401 if ((addr & PAGE_MASK) || uap->len < 0) 402 return(EINVAL); 403 size = (vm_size_t)uap->len; 404 /* 405 * Map protections 406 */ 407 prot = VM_PROT_NONE; 408 if (uap->prot & PROT_READ) 409 prot |= VM_PROT_READ; 410 if (uap->prot & PROT_WRITE) 411 prot |= VM_PROT_WRITE; 412 if (uap->prot & PROT_EXEC) 413 prot |= VM_PROT_EXECUTE; 414 415 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, 416 FALSE)) { 417 case KERN_SUCCESS: 418 return (0); 419 case KERN_PROTECTION_FAILURE: 420 return (EACCES); 421 } 422 return (EINVAL); 423 } 424 425 struct madvise_args { 426 caddr_t addr; 427 int len; 428 int behav; 429 }; 430 /* ARGSUSED */ 431 int 432 madvise(p, uap, retval) 433 struct proc *p; 434 struct madvise_args *uap; 435 int *retval; 436 { 437 438 /* Not yet implemented */ 439 return (EOPNOTSUPP); 440 } 441 442 struct mincore_args { 443 caddr_t addr; 444 int len; 445 char *vec; 446 }; 447 /* ARGSUSED */ 448 int 449 mincore(p, uap, retval) 450 struct proc *p; 451 struct mincore_args *uap; 452 int *retval; 453 { 454 455 /* Not yet implemented */ 456 return (EOPNOTSUPP); 457 } 458 459 /* 460 * Internal version of mmap. 461 * Currently used by mmap, exec, and sys5 shared memory. 462 * Handle is either a vnode pointer or NULL for MAP_ANON. 463 */ 464 int 465 vm_mmap(map, addr, size, prot, flags, handle, foff) 466 register vm_map_t map; 467 register vm_offset_t *addr; 468 register vm_size_t size; 469 vm_prot_t prot; 470 register int flags; 471 caddr_t handle; /* XXX should be vp */ 472 vm_offset_t foff; 473 { 474 register vm_pager_t pager; 475 boolean_t fitit; 476 vm_object_t object; 477 struct vnode *vp; 478 int type; 479 int rv = KERN_SUCCESS; 480 481 if (size == 0) 482 return (0); 483 484 if ((flags & MAP_FIXED) == 0) { 485 fitit = TRUE; 486 *addr = round_page(*addr); 487 } else { 488 fitit = FALSE; 489 (void)vm_deallocate(map, *addr, size); 490 } 491 492 /* 493 * Lookup/allocate pager. All except an unnamed anonymous lookup 494 * gain a reference to ensure continued existance of the object. 495 * (XXX the exception is to appease the pageout daemon) 496 */ 497 if (flags & MAP_ANON) 498 type = PG_DFLT; 499 else { 500 vp = (struct vnode *)handle; 501 if (vp->v_type == VCHR) { 502 type = PG_DEVICE; 503 handle = (caddr_t)vp->v_rdev; 504 } else 505 type = PG_VNODE; 506 } 507 pager = vm_pager_allocate(type, handle, size, prot); 508 if (pager == NULL) 509 return (type == PG_DEVICE ? EINVAL : ENOMEM); 510 /* 511 * Find object and release extra reference gained by lookup 512 */ 513 object = vm_object_lookup(pager); 514 vm_object_deallocate(object); 515 516 /* 517 * Anonymous memory. 518 */ 519 if (flags & MAP_ANON) { 520 rv = vm_allocate_with_pager(map, addr, size, fitit, 521 pager, (vm_offset_t)foff, TRUE); 522 if (rv != KERN_SUCCESS) { 523 if (handle == NULL) 524 vm_pager_deallocate(pager); 525 else 526 vm_object_deallocate(object); 527 goto out; 528 } 529 /* 530 * Don't cache anonymous objects. 531 * Loses the reference gained by vm_pager_allocate. 532 */ 533 (void) pager_cache(object, FALSE); 534 #ifdef DEBUG 535 if (mmapdebug & MDB_MAPIT) 536 printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", 537 curproc->p_pid, *addr, size, pager); 538 #endif 539 } 540 /* 541 * Must be a mapped file. 542 * Distinguish between character special and regular files. 543 */ 544 else if (vp->v_type == VCHR) { 545 rv = vm_allocate_with_pager(map, addr, size, fitit, 546 pager, (vm_offset_t)foff, FALSE); 547 /* 548 * Uncache the object and lose the reference gained 549 * by vm_pager_allocate(). If the call to 550 * vm_allocate_with_pager() was sucessful, then we 551 * gained an additional reference ensuring the object 552 * will continue to exist. If the call failed then 553 * the deallocate call below will terminate the 554 * object which is fine. 555 */ 556 (void) pager_cache(object, FALSE); 557 if (rv != KERN_SUCCESS) 558 goto out; 559 } 560 /* 561 * A regular file 562 */ 563 else { 564 #ifdef DEBUG 565 if (object == NULL) 566 printf("vm_mmap: no object: vp %x, pager %x\n", 567 vp, pager); 568 #endif 569 /* 570 * Map it directly. 571 * Allows modifications to go out to the vnode. 572 */ 573 if (flags & MAP_SHARED) { 574 rv = vm_allocate_with_pager(map, addr, size, 575 fitit, pager, 576 (vm_offset_t)foff, FALSE); 577 if (rv != KERN_SUCCESS) { 578 vm_object_deallocate(object); 579 goto out; 580 } 581 /* 582 * Don't cache the object. This is the easiest way 583 * of ensuring that data gets back to the filesystem 584 * because vnode_pager_deallocate() will fsync the 585 * vnode. pager_cache() will lose the extra ref. 586 */ 587 if (prot & VM_PROT_WRITE) 588 pager_cache(object, FALSE); 589 else 590 vm_object_deallocate(object); 591 } 592 /* 593 * Copy-on-write of file. Two flavors. 594 * MAP_COPY is true COW, you essentially get a snapshot of 595 * the region at the time of mapping. MAP_PRIVATE means only 596 * that your changes are not reflected back to the object. 597 * Changes made by others will be seen. 598 */ 599 else { 600 vm_map_t tmap; 601 vm_offset_t off; 602 603 /* locate and allocate the target address space */ 604 rv = vm_map_find(map, NULL, (vm_offset_t)0, 605 addr, size, fitit); 606 if (rv != KERN_SUCCESS) { 607 vm_object_deallocate(object); 608 goto out; 609 } 610 tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, 611 VM_MIN_ADDRESS+size, TRUE); 612 off = VM_MIN_ADDRESS; 613 rv = vm_allocate_with_pager(tmap, &off, size, 614 TRUE, pager, 615 (vm_offset_t)foff, FALSE); 616 if (rv != KERN_SUCCESS) { 617 vm_object_deallocate(object); 618 vm_map_deallocate(tmap); 619 goto out; 620 } 621 /* 622 * (XXX) 623 * MAP_PRIVATE implies that we see changes made by 624 * others. To ensure that we need to guarentee that 625 * no copy object is created (otherwise original 626 * pages would be pushed to the copy object and we 627 * would never see changes made by others). We 628 * totally sleeze it right now by marking the object 629 * internal temporarily. 630 */ 631 if ((flags & MAP_COPY) == 0) 632 object->flags |= OBJ_INTERNAL; 633 rv = vm_map_copy(map, tmap, *addr, size, off, 634 FALSE, FALSE); 635 object->flags &= ~OBJ_INTERNAL; 636 /* 637 * (XXX) 638 * My oh my, this only gets worse... 639 * Force creation of a shadow object so that 640 * vm_map_fork will do the right thing. 641 */ 642 if ((flags & MAP_COPY) == 0) { 643 vm_map_t tmap; 644 vm_map_entry_t tentry; 645 vm_object_t tobject; 646 vm_offset_t toffset; 647 vm_prot_t tprot; 648 boolean_t twired, tsu; 649 650 tmap = map; 651 vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, 652 &tentry, &tobject, &toffset, 653 &tprot, &twired, &tsu); 654 vm_map_lookup_done(tmap, tentry); 655 } 656 /* 657 * (XXX) 658 * Map copy code cannot detect sharing unless a 659 * sharing map is involved. So we cheat and write 660 * protect everything ourselves. 661 */ 662 vm_object_pmap_copy(object, (vm_offset_t)foff, 663 (vm_offset_t)foff+size); 664 vm_object_deallocate(object); 665 vm_map_deallocate(tmap); 666 if (rv != KERN_SUCCESS) 667 goto out; 668 } 669 #ifdef DEBUG 670 if (mmapdebug & MDB_MAPIT) 671 printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", 672 curproc->p_pid, *addr, size, pager); 673 #endif 674 } 675 /* 676 * Correct protection (default is VM_PROT_ALL). 677 * Note that we set the maximum protection. This may not be 678 * entirely correct. Maybe the maximum protection should be based 679 * on the object permissions where it makes sense (e.g. a vnode). 680 * 681 * Changed my mind: leave max prot at VM_PROT_ALL. 682 */ 683 if (prot != VM_PROT_ALL) { 684 rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); 685 if (rv != KERN_SUCCESS) { 686 (void) vm_deallocate(map, *addr, size); 687 goto out; 688 } 689 } 690 /* 691 * Shared memory is also shared with children. 692 */ 693 if (flags & MAP_SHARED) { 694 rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); 695 if (rv != KERN_SUCCESS) { 696 (void) vm_deallocate(map, *addr, size); 697 goto out; 698 } 699 } 700 out: 701 #ifdef DEBUG 702 if (mmapdebug & MDB_MAPIT) 703 printf("vm_mmap: rv %d\n", rv); 704 #endif 705 switch (rv) { 706 case KERN_SUCCESS: 707 return (0); 708 case KERN_INVALID_ADDRESS: 709 case KERN_NO_SPACE: 710 return (ENOMEM); 711 case KERN_PROTECTION_FAILURE: 712 return (EACCES); 713 default: 714 return (EINVAL); 715 } 716 } 717 718 /* 719 * Internal bastardized version of MACHs vm_region system call. 720 * Given address and size it returns map attributes as well 721 * as the (locked) object mapped at that location. 722 */ 723 int 724 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) 725 vm_map_t map; 726 vm_offset_t *addr; /* IN/OUT */ 727 vm_size_t *size; /* OUT */ 728 vm_prot_t *prot; /* OUT */ 729 vm_prot_t *max_prot; /* OUT */ 730 vm_inherit_t *inheritance; /* OUT */ 731 boolean_t *shared; /* OUT */ 732 vm_object_t *object; /* OUT */ 733 vm_offset_t *objoff; /* OUT */ 734 { 735 vm_map_entry_t tmp_entry; 736 register 737 vm_map_entry_t entry; 738 register 739 vm_offset_t tmp_offset; 740 vm_offset_t start; 741 742 if (map == NULL) 743 return(KERN_INVALID_ARGUMENT); 744 745 start = *addr; 746 747 vm_map_lock_read(map); 748 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 749 if ((entry = tmp_entry->next) == &map->header) { 750 vm_map_unlock_read(map); 751 return(KERN_NO_SPACE); 752 } 753 start = entry->start; 754 *addr = start; 755 } else 756 entry = tmp_entry; 757 758 *prot = entry->protection; 759 *max_prot = entry->max_protection; 760 *inheritance = entry->inheritance; 761 762 tmp_offset = entry->offset + (start - entry->start); 763 *size = (entry->end - start); 764 765 if (entry->is_a_map) { 766 register vm_map_t share_map; 767 vm_size_t share_size; 768 769 share_map = entry->object.share_map; 770 771 vm_map_lock_read(share_map); 772 (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); 773 774 if ((share_size = (tmp_entry->end - tmp_offset)) < *size) 775 *size = share_size; 776 777 vm_object_lock(tmp_entry->object); 778 *object = tmp_entry->object.vm_object; 779 *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); 780 781 *shared = (share_map->ref_count != 1); 782 vm_map_unlock_read(share_map); 783 } else { 784 vm_object_lock(entry->object); 785 *object = entry->object.vm_object; 786 *objoff = tmp_offset; 787 788 *shared = FALSE; 789 } 790 791 vm_map_unlock_read(map); 792 793 return(KERN_SUCCESS); 794 } 795 796 /* 797 * Yet another bastard routine. 798 */ 799 int 800 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) 801 register vm_map_t map; 802 register vm_offset_t *addr; 803 register vm_size_t size; 804 boolean_t fitit; 805 vm_pager_t pager; 806 vm_offset_t poffset; 807 boolean_t internal; 808 { 809 register vm_object_t object; 810 register int result; 811 812 if (map == NULL) 813 return(KERN_INVALID_ARGUMENT); 814 815 *addr = trunc_page(*addr); 816 size = round_page(size); 817 818 /* 819 * Lookup the pager/paging-space in the object cache. 820 * If it's not there, then create a new object and cache 821 * it. 822 */ 823 object = vm_object_lookup(pager); 824 cnt.v_lookups++; 825 if (object == NULL) { 826 object = vm_object_allocate(size); 827 /* 828 * From Mike Hibler: "unnamed anonymous objects should never 829 * be on the hash list ... For now you can just change 830 * vm_allocate_with_pager to not do vm_object_enter if this 831 * is an internal object ..." 832 */ 833 if (!internal) 834 vm_object_enter(object, pager); 835 } else 836 cnt.v_hits++; 837 if (internal) 838 object->flags |= OBJ_INTERNAL; 839 else 840 object->flags &= ~OBJ_INTERNAL; 841 842 result = vm_map_find(map, object, poffset, addr, size, fitit); 843 if (result != KERN_SUCCESS) 844 vm_object_deallocate(object); 845 else if (pager != NULL) 846 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 847 return(result); 848 } 849 850 /* 851 * XXX: this routine belongs in vm_map.c. 852 * 853 * Returns TRUE if the range [start - end) is allocated in either 854 * a single entry (single_entry == TRUE) or multiple contiguous 855 * entries (single_entry == FALSE). 856 * 857 * start and end should be page aligned. 858 */ 859 boolean_t 860 vm_map_is_allocated(map, start, end, single_entry) 861 vm_map_t map; 862 vm_offset_t start, end; 863 boolean_t single_entry; 864 { 865 vm_map_entry_t mapent; 866 register vm_offset_t nend; 867 868 vm_map_lock_read(map); 869 870 /* 871 * Start address not in any entry 872 */ 873 if (!vm_map_lookup_entry(map, start, &mapent)) { 874 vm_map_unlock_read(map); 875 return (FALSE); 876 } 877 /* 878 * Find the maximum stretch of contiguously allocated space 879 */ 880 nend = mapent->end; 881 if (!single_entry) { 882 mapent = mapent->next; 883 while (mapent != &map->header && mapent->start == nend) { 884 nend = mapent->end; 885 mapent = mapent->next; 886 } 887 } 888 889 vm_map_unlock_read(map); 890 return (end <= nend); 891 } 892