1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 13 * 14 * @(#)vm_mmap.c 7.20 (Berkeley) 02/20/93 15 */ 16 17 /* 18 * Mapped file (mmap) interface to VM 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/filedesc.h> 24 #include <sys/proc.h> 25 #include <sys/vnode.h> 26 #include <sys/file.h> 27 #include <sys/mman.h> 28 #include <sys/conf.h> 29 30 #include <miscfs/specfs/specdev.h> 31 32 #include <vm/vm.h> 33 #include <vm/vm_pager.h> 34 #include <vm/vm_prot.h> 35 36 #ifdef DEBUG 37 int mmapdebug = 0; 38 #define MDB_FOLLOW 0x01 39 #define MDB_SYNC 0x02 40 #define MDB_MAPIT 0x04 41 #endif 42 43 struct sbrk_args { 44 int incr; 45 }; 46 /* ARGSUSED */ 47 int 48 sbrk(p, uap, retval) 49 struct proc *p; 50 struct sbrk_args *uap; 51 int *retval; 52 { 53 54 /* Not yet implemented */ 55 return (EOPNOTSUPP); 56 } 57 58 struct sstk_args { 59 int incr; 60 }; 61 /* ARGSUSED */ 62 int 63 sstk(p, uap, retval) 64 struct proc *p; 65 struct sstk_args *uap; 66 int *retval; 67 { 68 69 /* Not yet implemented */ 70 return (EOPNOTSUPP); 71 } 72 73 struct mmap_args { 74 caddr_t addr; 75 int len; 76 int prot; 77 int flags; 78 int fd; 79 long pad; 80 off_t pos; 81 }; 82 83 #ifdef COMPAT_43 84 struct getpagesize_args { 85 int dummy; 86 }; 87 /* ARGSUSED */ 88 int 89 getpagesize(p, uap, retval) 90 struct proc *p; 91 struct getpagesize_args *uap; 92 int *retval; 93 { 94 95 *retval = PAGE_SIZE; 96 return (0); 97 } 98 99 struct osmmap_args { 100 caddr_t addr; 101 int len; 102 int prot; 103 int flags; 104 int fd; 105 long pos; 106 }; 107 int 108 osmmap(p, uap, retval) 109 struct proc *p; 110 register struct osmmap_args *uap; 111 int *retval; 112 { 113 struct mmap_args nargs; 114 static const char cvtbsdprot[8] = { 115 0, 116 PROT_EXEC, 117 PROT_WRITE, 118 PROT_EXEC|PROT_WRITE, 119 PROT_READ, 120 PROT_EXEC|PROT_READ, 121 PROT_WRITE|PROT_READ, 122 PROT_EXEC|PROT_WRITE|PROT_READ, 123 }; 124 #define OMAP_ANON 0x0002 125 #define OMAP_COPY 0x0020 126 #define OMAP_SHARED 0x0010 127 #define OMAP_FIXED 0x0100 128 #define OMAP_INHERIT 0x0800 129 130 nargs.addr = uap->addr; 131 nargs.len = uap->len; 132 nargs.prot = cvtbsdprot[uap->prot&0x7]; 133 nargs.flags = 0; 134 if (uap->flags & OMAP_ANON) 135 nargs.flags |= MAP_ANON; 136 if (uap->flags & OMAP_COPY) 137 nargs.flags |= MAP_COPY; 138 if (uap->flags & OMAP_SHARED) 139 nargs.flags |= MAP_SHARED; 140 else 141 nargs.flags |= MAP_PRIVATE; 142 if (uap->flags & OMAP_FIXED) 143 nargs.flags |= MAP_FIXED; 144 if (uap->flags & OMAP_INHERIT) 145 nargs.flags |= MAP_INHERIT; 146 nargs.fd = uap->fd; 147 nargs.pos = uap->pos; 148 return (smmap(p, &nargs, retval)); 149 } 150 #endif 151 152 int 153 smmap(p, uap, retval) 154 struct proc *p; 155 register struct mmap_args *uap; 156 int *retval; 157 { 158 register struct filedesc *fdp = p->p_fd; 159 register struct file *fp; 160 struct vnode *vp; 161 vm_offset_t addr; 162 vm_size_t size; 163 vm_prot_t prot; 164 caddr_t handle; 165 int error; 166 167 #ifdef DEBUG 168 if (mmapdebug & MDB_FOLLOW) 169 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", 170 p->p_pid, uap->addr, uap->len, uap->prot, 171 uap->flags, uap->fd, uap->pos); 172 #endif 173 /* 174 * Address (if FIXED) must be page aligned. 175 * Size is implicitly rounded to a page boundary. 176 */ 177 addr = (vm_offset_t) uap->addr; 178 if (((uap->flags & MAP_FIXED) && (addr & PAGE_MASK)) || uap->len < 0 || 179 ((uap->flags & MAP_ANON) && uap->fd != -1)) 180 return (EINVAL); 181 size = (vm_size_t) round_page(uap->len); 182 /* 183 * Check for illegal addresses. Watch out for address wrap... 184 * Note that VM_*_ADDRESS are not constants due to casts (argh). 185 */ 186 if (uap->flags & MAP_FIXED) { 187 if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 188 return (EINVAL); 189 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 190 return (EINVAL); 191 if (addr > addr + size) 192 return (EINVAL); 193 } 194 /* 195 * XXX if no hint provided for a non-fixed mapping place it after 196 * the end of the largest possible heap. 197 * 198 * There should really be a pmap call to determine a reasonable 199 * location. 200 */ 201 if (addr == 0 && (uap->flags & MAP_FIXED) == 0) 202 addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 203 /* 204 * If we are mapping a file we need to check various 205 * file/vnode related things. 206 */ 207 if (uap->flags & MAP_ANON) 208 handle = NULL; 209 else { 210 /* 211 * Mapping file, get fp for validation. 212 * Obtain vnode and make sure it is of appropriate type 213 */ 214 if (((unsigned)uap->fd) >= fdp->fd_nfiles || 215 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 216 return(EBADF); 217 if (fp->f_type != DTYPE_VNODE) 218 return(EINVAL); 219 vp = (struct vnode *)fp->f_data; 220 if (vp->v_type != VREG && vp->v_type != VCHR) 221 return(EINVAL); 222 /* 223 * Ensure that file protection and desired protection 224 * are compatible. Note that we only worry about writability 225 * if mapping is shared. 226 */ 227 if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || 228 ((uap->flags & MAP_SHARED) && 229 (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) 230 return(EACCES); 231 handle = (caddr_t)vp; 232 } 233 /* 234 * Map protections to MACH style 235 */ 236 prot = uap->prot & VM_PROT_ALL; 237 error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, 238 uap->flags, handle, (vm_offset_t)uap->pos); 239 if (error == 0) 240 *retval = (int) addr; 241 return(error); 242 } 243 244 struct msync_args { 245 caddr_t addr; 246 int len; 247 }; 248 int 249 msync(p, uap, retval) 250 struct proc *p; 251 struct msync_args *uap; 252 int *retval; 253 { 254 vm_offset_t addr, objoff, oaddr; 255 vm_size_t size, osize; 256 vm_prot_t prot, mprot; 257 vm_inherit_t inherit; 258 vm_object_t object; 259 boolean_t shared; 260 int rv; 261 262 #ifdef DEBUG 263 if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) 264 printf("msync(%d): addr %x len %x\n", 265 p->p_pid, uap->addr, uap->len); 266 #endif 267 if (((int)uap->addr & PAGE_MASK) || uap->len < 0) 268 return(EINVAL); 269 addr = oaddr = (vm_offset_t)uap->addr; 270 osize = (vm_size_t)uap->len; 271 /* 272 * Region must be entirely contained in a single entry 273 */ 274 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, 275 TRUE)) 276 return(EINVAL); 277 /* 278 * Determine the object associated with that entry 279 * (object is returned locked on KERN_SUCCESS) 280 */ 281 rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, 282 &inherit, &shared, &object, &objoff); 283 if (rv != KERN_SUCCESS) 284 return(EINVAL); 285 #ifdef DEBUG 286 if (mmapdebug & MDB_SYNC) 287 printf("msync: region: object %x addr %x size %d objoff %d\n", 288 object, addr, size, objoff); 289 #endif 290 /* 291 * Do not msync non-vnoded backed objects. 292 */ 293 if ((object->flags & OBJ_INTERNAL) || object->pager == NULL || 294 object->pager->pg_type != PG_VNODE) { 295 vm_object_unlock(object); 296 return(EINVAL); 297 } 298 objoff += oaddr - addr; 299 if (osize == 0) 300 osize = size; 301 #ifdef DEBUG 302 if (mmapdebug & MDB_SYNC) 303 printf("msync: cleaning/flushing object range [%x-%x)\n", 304 objoff, objoff+osize); 305 #endif 306 if (prot & VM_PROT_WRITE) 307 vm_object_page_clean(object, objoff, objoff+osize, FALSE); 308 /* 309 * (XXX) 310 * Bummer, gotta flush all cached pages to ensure 311 * consistency with the file system cache. 312 */ 313 vm_object_page_remove(object, objoff, objoff+osize); 314 vm_object_unlock(object); 315 return(0); 316 } 317 318 struct munmap_args { 319 caddr_t addr; 320 int len; 321 }; 322 int 323 munmap(p, uap, retval) 324 register struct proc *p; 325 register struct munmap_args *uap; 326 int *retval; 327 { 328 vm_offset_t addr; 329 vm_size_t size; 330 331 #ifdef DEBUG 332 if (mmapdebug & MDB_FOLLOW) 333 printf("munmap(%d): addr %x len %x\n", 334 p->p_pid, uap->addr, uap->len); 335 #endif 336 337 addr = (vm_offset_t) uap->addr; 338 if ((addr & PAGE_MASK) || uap->len < 0) 339 return(EINVAL); 340 size = (vm_size_t) round_page(uap->len); 341 if (size == 0) 342 return(0); 343 /* 344 * Check for illegal addresses. Watch out for address wrap... 345 * Note that VM_*_ADDRESS are not constants due to casts (argh). 346 */ 347 if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 348 return (EINVAL); 349 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 350 return (EINVAL); 351 if (addr > addr + size) 352 return (EINVAL); 353 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr + size, 354 FALSE)) 355 return(EINVAL); 356 /* returns nothing but KERN_SUCCESS anyway */ 357 (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); 358 return(0); 359 } 360 361 void 362 munmapfd(fd) 363 int fd; 364 { 365 #ifdef DEBUG 366 if (mmapdebug & MDB_FOLLOW) 367 printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd); 368 #endif 369 370 /* 371 * XXX -- should vm_deallocate any regions mapped to this file 372 */ 373 curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 374 } 375 376 struct mprotect_args { 377 caddr_t addr; 378 int len; 379 int prot; 380 }; 381 int 382 mprotect(p, uap, retval) 383 struct proc *p; 384 struct mprotect_args *uap; 385 int *retval; 386 { 387 vm_offset_t addr; 388 vm_size_t size; 389 register vm_prot_t prot; 390 391 #ifdef DEBUG 392 if (mmapdebug & MDB_FOLLOW) 393 printf("mprotect(%d): addr %x len %x prot %d\n", 394 p->p_pid, uap->addr, uap->len, uap->prot); 395 #endif 396 397 addr = (vm_offset_t)uap->addr; 398 if ((addr & PAGE_MASK) || uap->len < 0) 399 return(EINVAL); 400 size = (vm_size_t)uap->len; 401 /* 402 * Map protections 403 */ 404 prot = VM_PROT_NONE; 405 if (uap->prot & PROT_READ) 406 prot |= VM_PROT_READ; 407 if (uap->prot & PROT_WRITE) 408 prot |= VM_PROT_WRITE; 409 if (uap->prot & PROT_EXEC) 410 prot |= VM_PROT_EXECUTE; 411 412 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, 413 FALSE)) { 414 case KERN_SUCCESS: 415 return (0); 416 case KERN_PROTECTION_FAILURE: 417 return (EACCES); 418 } 419 return (EINVAL); 420 } 421 422 struct madvise_args { 423 caddr_t addr; 424 int len; 425 int behav; 426 }; 427 /* ARGSUSED */ 428 int 429 madvise(p, uap, retval) 430 struct proc *p; 431 struct madvise_args *uap; 432 int *retval; 433 { 434 435 /* Not yet implemented */ 436 return (EOPNOTSUPP); 437 } 438 439 struct mincore_args { 440 caddr_t addr; 441 int len; 442 char *vec; 443 }; 444 /* ARGSUSED */ 445 int 446 mincore(p, uap, retval) 447 struct proc *p; 448 struct mincore_args *uap; 449 int *retval; 450 { 451 452 /* Not yet implemented */ 453 return (EOPNOTSUPP); 454 } 455 456 /* 457 * Internal version of mmap. 458 * Currently used by mmap, exec, and sys5 shared memory. 459 * Handle is either a vnode pointer or NULL for MAP_ANON. 460 */ 461 int 462 vm_mmap(map, addr, size, prot, flags, handle, foff) 463 register vm_map_t map; 464 register vm_offset_t *addr; 465 register vm_size_t size; 466 vm_prot_t prot; 467 register int flags; 468 caddr_t handle; /* XXX should be vp */ 469 vm_offset_t foff; 470 { 471 register vm_pager_t pager; 472 boolean_t fitit; 473 vm_object_t object; 474 struct vnode *vp; 475 int type; 476 int rv = KERN_SUCCESS; 477 478 if (size == 0) 479 return (0); 480 481 if ((flags & MAP_FIXED) == 0) { 482 fitit = TRUE; 483 *addr = round_page(*addr); 484 } else { 485 fitit = FALSE; 486 (void)vm_deallocate(map, *addr, size); 487 } 488 489 /* 490 * Lookup/allocate pager. All except an unnamed anonymous lookup 491 * gain a reference to ensure continued existance of the object. 492 * (XXX the exception is to appease the pageout daemon) 493 */ 494 if (flags & MAP_ANON) 495 type = PG_DFLT; 496 else { 497 vp = (struct vnode *)handle; 498 if (vp->v_type == VCHR) { 499 type = PG_DEVICE; 500 handle = (caddr_t)vp->v_rdev; 501 } else 502 type = PG_VNODE; 503 } 504 pager = vm_pager_allocate(type, handle, size, prot); 505 if (pager == NULL) 506 return (type == PG_DEVICE ? EINVAL : ENOMEM); 507 /* 508 * Find object and release extra reference gained by lookup 509 */ 510 object = vm_object_lookup(pager); 511 vm_object_deallocate(object); 512 513 /* 514 * Anonymous memory. 515 */ 516 if (flags & MAP_ANON) { 517 rv = vm_allocate_with_pager(map, addr, size, fitit, 518 pager, (vm_offset_t)foff, TRUE); 519 if (rv != KERN_SUCCESS) { 520 if (handle == NULL) 521 vm_pager_deallocate(pager); 522 else 523 vm_object_deallocate(object); 524 goto out; 525 } 526 /* 527 * Don't cache anonymous objects. 528 * Loses the reference gained by vm_pager_allocate. 529 */ 530 (void) pager_cache(object, FALSE); 531 #ifdef DEBUG 532 if (mmapdebug & MDB_MAPIT) 533 printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", 534 curproc->p_pid, *addr, size, pager); 535 #endif 536 } 537 /* 538 * Must be a mapped file. 539 * Distinguish between character special and regular files. 540 */ 541 else if (vp->v_type == VCHR) { 542 rv = vm_allocate_with_pager(map, addr, size, fitit, 543 pager, (vm_offset_t)foff, FALSE); 544 /* 545 * Uncache the object and lose the reference gained 546 * by vm_pager_allocate(). If the call to 547 * vm_allocate_with_pager() was sucessful, then we 548 * gained an additional reference ensuring the object 549 * will continue to exist. If the call failed then 550 * the deallocate call below will terminate the 551 * object which is fine. 552 */ 553 (void) pager_cache(object, FALSE); 554 if (rv != KERN_SUCCESS) 555 goto out; 556 } 557 /* 558 * A regular file 559 */ 560 else { 561 #ifdef DEBUG 562 if (object == NULL) 563 printf("vm_mmap: no object: vp %x, pager %x\n", 564 vp, pager); 565 #endif 566 /* 567 * Map it directly. 568 * Allows modifications to go out to the vnode. 569 */ 570 if (flags & MAP_SHARED) { 571 rv = vm_allocate_with_pager(map, addr, size, 572 fitit, pager, 573 (vm_offset_t)foff, FALSE); 574 if (rv != KERN_SUCCESS) { 575 vm_object_deallocate(object); 576 goto out; 577 } 578 /* 579 * Don't cache the object. This is the easiest way 580 * of ensuring that data gets back to the filesystem 581 * because vnode_pager_deallocate() will fsync the 582 * vnode. pager_cache() will lose the extra ref. 583 */ 584 if (prot & VM_PROT_WRITE) 585 pager_cache(object, FALSE); 586 else 587 vm_object_deallocate(object); 588 } 589 /* 590 * Copy-on-write of file. Two flavors. 591 * MAP_COPY is true COW, you essentially get a snapshot of 592 * the region at the time of mapping. MAP_PRIVATE means only 593 * that your changes are not reflected back to the object. 594 * Changes made by others will be seen. 595 */ 596 else { 597 vm_map_t tmap; 598 vm_offset_t off; 599 600 /* locate and allocate the target address space */ 601 rv = vm_map_find(map, NULL, (vm_offset_t)0, 602 addr, size, fitit); 603 if (rv != KERN_SUCCESS) { 604 vm_object_deallocate(object); 605 goto out; 606 } 607 tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, 608 VM_MIN_ADDRESS+size, TRUE); 609 off = VM_MIN_ADDRESS; 610 rv = vm_allocate_with_pager(tmap, &off, size, 611 TRUE, pager, 612 (vm_offset_t)foff, FALSE); 613 if (rv != KERN_SUCCESS) { 614 vm_object_deallocate(object); 615 vm_map_deallocate(tmap); 616 goto out; 617 } 618 /* 619 * (XXX) 620 * MAP_PRIVATE implies that we see changes made by 621 * others. To ensure that we need to guarentee that 622 * no copy object is created (otherwise original 623 * pages would be pushed to the copy object and we 624 * would never see changes made by others). We 625 * totally sleeze it right now by marking the object 626 * internal temporarily. 627 */ 628 if ((flags & MAP_COPY) == 0) 629 object->flags |= OBJ_INTERNAL; 630 rv = vm_map_copy(map, tmap, *addr, size, off, 631 FALSE, FALSE); 632 object->flags &= ~OBJ_INTERNAL; 633 /* 634 * (XXX) 635 * My oh my, this only gets worse... 636 * Force creation of a shadow object so that 637 * vm_map_fork will do the right thing. 638 */ 639 if ((flags & MAP_COPY) == 0) { 640 vm_map_t tmap; 641 vm_map_entry_t tentry; 642 vm_object_t tobject; 643 vm_offset_t toffset; 644 vm_prot_t tprot; 645 boolean_t twired, tsu; 646 647 tmap = map; 648 vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, 649 &tentry, &tobject, &toffset, 650 &tprot, &twired, &tsu); 651 vm_map_lookup_done(tmap, tentry); 652 } 653 /* 654 * (XXX) 655 * Map copy code cannot detect sharing unless a 656 * sharing map is involved. So we cheat and write 657 * protect everything ourselves. 658 */ 659 vm_object_pmap_copy(object, (vm_offset_t)foff, 660 (vm_offset_t)foff+size); 661 vm_object_deallocate(object); 662 vm_map_deallocate(tmap); 663 if (rv != KERN_SUCCESS) 664 goto out; 665 } 666 #ifdef DEBUG 667 if (mmapdebug & MDB_MAPIT) 668 printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", 669 curproc->p_pid, *addr, size, pager); 670 #endif 671 } 672 /* 673 * Correct protection (default is VM_PROT_ALL). 674 * Note that we set the maximum protection. This may not be 675 * entirely correct. Maybe the maximum protection should be based 676 * on the object permissions where it makes sense (e.g. a vnode). 677 * 678 * Changed my mind: leave max prot at VM_PROT_ALL. 679 */ 680 if (prot != VM_PROT_ALL) { 681 rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); 682 if (rv != KERN_SUCCESS) { 683 (void) vm_deallocate(map, *addr, size); 684 goto out; 685 } 686 } 687 /* 688 * Shared memory is also shared with children. 689 */ 690 if (flags & MAP_SHARED) { 691 rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); 692 if (rv != KERN_SUCCESS) { 693 (void) vm_deallocate(map, *addr, size); 694 goto out; 695 } 696 } 697 out: 698 #ifdef DEBUG 699 if (mmapdebug & MDB_MAPIT) 700 printf("vm_mmap: rv %d\n", rv); 701 #endif 702 switch (rv) { 703 case KERN_SUCCESS: 704 return (0); 705 case KERN_INVALID_ADDRESS: 706 case KERN_NO_SPACE: 707 return (ENOMEM); 708 case KERN_PROTECTION_FAILURE: 709 return (EACCES); 710 default: 711 return (EINVAL); 712 } 713 } 714 715 /* 716 * Internal bastardized version of MACHs vm_region system call. 717 * Given address and size it returns map attributes as well 718 * as the (locked) object mapped at that location. 719 */ 720 int 721 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) 722 vm_map_t map; 723 vm_offset_t *addr; /* IN/OUT */ 724 vm_size_t *size; /* OUT */ 725 vm_prot_t *prot; /* OUT */ 726 vm_prot_t *max_prot; /* OUT */ 727 vm_inherit_t *inheritance; /* OUT */ 728 boolean_t *shared; /* OUT */ 729 vm_object_t *object; /* OUT */ 730 vm_offset_t *objoff; /* OUT */ 731 { 732 vm_map_entry_t tmp_entry; 733 register 734 vm_map_entry_t entry; 735 register 736 vm_offset_t tmp_offset; 737 vm_offset_t start; 738 739 if (map == NULL) 740 return(KERN_INVALID_ARGUMENT); 741 742 start = *addr; 743 744 vm_map_lock_read(map); 745 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 746 if ((entry = tmp_entry->next) == &map->header) { 747 vm_map_unlock_read(map); 748 return(KERN_NO_SPACE); 749 } 750 start = entry->start; 751 *addr = start; 752 } else 753 entry = tmp_entry; 754 755 *prot = entry->protection; 756 *max_prot = entry->max_protection; 757 *inheritance = entry->inheritance; 758 759 tmp_offset = entry->offset + (start - entry->start); 760 *size = (entry->end - start); 761 762 if (entry->is_a_map) { 763 register vm_map_t share_map; 764 vm_size_t share_size; 765 766 share_map = entry->object.share_map; 767 768 vm_map_lock_read(share_map); 769 (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); 770 771 if ((share_size = (tmp_entry->end - tmp_offset)) < *size) 772 *size = share_size; 773 774 vm_object_lock(tmp_entry->object); 775 *object = tmp_entry->object.vm_object; 776 *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); 777 778 *shared = (share_map->ref_count != 1); 779 vm_map_unlock_read(share_map); 780 } else { 781 vm_object_lock(entry->object); 782 *object = entry->object.vm_object; 783 *objoff = tmp_offset; 784 785 *shared = FALSE; 786 } 787 788 vm_map_unlock_read(map); 789 790 return(KERN_SUCCESS); 791 } 792 793 /* 794 * Yet another bastard routine. 795 */ 796 int 797 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) 798 register vm_map_t map; 799 register vm_offset_t *addr; 800 register vm_size_t size; 801 boolean_t fitit; 802 vm_pager_t pager; 803 vm_offset_t poffset; 804 boolean_t internal; 805 { 806 register vm_object_t object; 807 register int result; 808 809 if (map == NULL) 810 return(KERN_INVALID_ARGUMENT); 811 812 *addr = trunc_page(*addr); 813 size = round_page(size); 814 815 /* 816 * Lookup the pager/paging-space in the object cache. 817 * If it's not there, then create a new object and cache 818 * it. 819 */ 820 object = vm_object_lookup(pager); 821 cnt.v_lookups++; 822 if (object == NULL) { 823 object = vm_object_allocate(size); 824 /* 825 * From Mike Hibler: "unnamed anonymous objects should never 826 * be on the hash list ... For now you can just change 827 * vm_allocate_with_pager to not do vm_object_enter if this 828 * is an internal object ..." 829 */ 830 if (!internal) 831 vm_object_enter(object, pager); 832 } else 833 cnt.v_hits++; 834 if (internal) 835 object->flags |= OBJ_INTERNAL; 836 else 837 object->flags &= ~OBJ_INTERNAL; 838 839 result = vm_map_find(map, object, poffset, addr, size, fitit); 840 if (result != KERN_SUCCESS) 841 vm_object_deallocate(object); 842 else if (pager != NULL) 843 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 844 return(result); 845 } 846 847 /* 848 * XXX: this routine belongs in vm_map.c. 849 * 850 * Returns TRUE if the range [start - end) is allocated in either 851 * a single entry (single_entry == TRUE) or multiple contiguous 852 * entries (single_entry == FALSE). 853 * 854 * start and end should be page aligned. 855 */ 856 boolean_t 857 vm_map_is_allocated(map, start, end, single_entry) 858 vm_map_t map; 859 vm_offset_t start, end; 860 boolean_t single_entry; 861 { 862 vm_map_entry_t mapent; 863 register vm_offset_t nend; 864 865 vm_map_lock_read(map); 866 867 /* 868 * Start address not in any entry 869 */ 870 if (!vm_map_lookup_entry(map, start, &mapent)) { 871 vm_map_unlock_read(map); 872 return (FALSE); 873 } 874 /* 875 * Find the maximum stretch of contiguously allocated space 876 */ 877 nend = mapent->end; 878 if (!single_entry) { 879 mapent = mapent->next; 880 while (mapent != &map->header && mapent->start == nend) { 881 nend = mapent->end; 882 mapent = mapent->next; 883 } 884 } 885 886 vm_map_unlock_read(map); 887 return (end <= nend); 888 } 889