1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 13 * 14 * @(#)vm_mmap.c 7.9 (Berkeley) 10/21/91 15 */ 16 17 /* 18 * Mapped file (mmap) interface to VM 19 */ 20 21 #include "param.h" 22 #include "systm.h" 23 #include "filedesc.h" 24 #include "proc.h" 25 #include "vnode.h" 26 #include "specdev.h" 27 #include "file.h" 28 #include "mman.h" 29 #include "conf.h" 30 31 #include "vm.h" 32 #include "vm_pager.h" 33 #include "vm_prot.h" 34 35 #ifdef DEBUG 36 int mmapdebug = 0; 37 #define MDB_FOLLOW 0x01 38 #define MDB_SYNC 0x02 39 #define MDB_MAPIT 0x04 40 #endif 41 42 /* ARGSUSED */ 43 getpagesize(p, uap, retval) 44 struct proc *p; 45 void *uap; 46 int *retval; 47 { 48 49 *retval = PAGE_SIZE; 50 return (0); 51 } 52 53 /* ARGSUSED */ 54 sbrk(p, uap, retval) 55 struct proc *p; 56 struct args { 57 int incr; 58 } *uap; 59 int *retval; 60 { 61 62 /* Not yet implemented */ 63 return (EOPNOTSUPP); 64 } 65 66 /* ARGSUSED */ 67 sstk(p, uap, retval) 68 struct proc *p; 69 struct args { 70 int incr; 71 } *uap; 72 int *retval; 73 { 74 75 /* Not yet implemented */ 76 return (EOPNOTSUPP); 77 } 78 79 smmap(p, uap, retval) 80 struct proc *p; 81 register struct args { 82 caddr_t addr; 83 int len; 84 int prot; 85 int flags; 86 int fd; 87 off_t pos; 88 } *uap; 89 int *retval; 90 { 91 register struct filedesc *fdp = p->p_fd; 92 register struct file *fp; 93 struct vnode *vp; 94 vm_offset_t addr; 95 vm_size_t size; 96 vm_prot_t prot; 97 caddr_t handle; 98 int mtype, error; 99 100 #ifdef DEBUG 101 if (mmapdebug & MDB_FOLLOW) 102 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", 103 p->p_pid, uap->addr, uap->len, uap->prot, 104 uap->flags, uap->fd, uap->pos); 105 #endif 106 /* 107 * Make sure one of the sharing types is specified 108 */ 109 mtype = uap->flags & MAP_TYPE; 110 switch (mtype) { 111 case MAP_FILE: 112 case MAP_ANON: 113 break; 114 default: 115 return(EINVAL); 116 } 117 /* 118 * Address (if FIXED) must be page aligned. 119 * Size is implicitly rounded to a page boundary. 120 */ 121 addr = (vm_offset_t) uap->addr; 122 if ((uap->flags & MAP_FIXED) && (addr & PAGE_MASK) || uap->len < 0) 123 return(EINVAL); 124 size = (vm_size_t) round_page(uap->len); 125 /* 126 * XXX if no hint provided for a non-fixed mapping place it after 127 * the end of the largest possible heap. 128 * 129 * There should really be a pmap call to determine a reasonable 130 * location. 131 */ 132 if (addr == 0 && (uap->flags & MAP_FIXED) == 0) 133 addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 134 /* 135 * Mapping file or named anonymous, get fp for validation 136 */ 137 if (mtype == MAP_FILE || uap->fd != -1) { 138 if (((unsigned)uap->fd) >= fdp->fd_nfiles || 139 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 140 return(EBADF); 141 } 142 /* 143 * If we are mapping a file we need to check various 144 * file/vnode related things. 145 */ 146 if (mtype == MAP_FILE) { 147 /* 148 * Obtain vnode and make sure it is of appropriate type 149 */ 150 if (fp->f_type != DTYPE_VNODE) 151 return(EINVAL); 152 vp = (struct vnode *)fp->f_data; 153 if (vp->v_type != VREG && vp->v_type != VCHR) 154 return(EINVAL); 155 /* 156 * Ensure that file protection and desired protection 157 * are compatible. Note that we only worry about writability 158 * if mapping is shared. 159 */ 160 if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || 161 ((uap->flags & MAP_SHARED) && 162 (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) 163 return(EACCES); 164 handle = (caddr_t)vp; 165 } else if (uap->fd != -1) 166 handle = (caddr_t)fp; 167 else 168 handle = NULL; 169 /* 170 * Map protections to MACH style 171 */ 172 prot = VM_PROT_NONE; 173 if (uap->prot & PROT_READ) 174 prot |= VM_PROT_READ; 175 if (uap->prot & PROT_WRITE) 176 prot |= VM_PROT_WRITE; 177 if (uap->prot & PROT_EXEC) 178 prot |= VM_PROT_EXECUTE; 179 180 error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, 181 uap->flags, handle, (vm_offset_t)uap->pos); 182 if (error == 0) 183 *retval = (int) addr; 184 return(error); 185 } 186 187 msync(p, uap, retval) 188 struct proc *p; 189 struct args { 190 caddr_t addr; 191 int len; 192 } *uap; 193 int *retval; 194 { 195 vm_offset_t addr, objoff, oaddr; 196 vm_size_t size, osize; 197 vm_prot_t prot, mprot; 198 vm_inherit_t inherit; 199 vm_object_t object; 200 boolean_t shared; 201 int rv; 202 203 #ifdef DEBUG 204 if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) 205 printf("msync(%d): addr %x len %x\n", 206 p->p_pid, uap->addr, uap->len); 207 #endif 208 if (((int)uap->addr & PAGE_MASK) || uap->len < 0) 209 return(EINVAL); 210 addr = oaddr = (vm_offset_t)uap->addr; 211 osize = (vm_size_t)uap->len; 212 /* 213 * Region must be entirely contained in a single entry 214 */ 215 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, 216 TRUE)) 217 return(EINVAL); 218 /* 219 * Determine the object associated with that entry 220 * (object is returned locked on KERN_SUCCESS) 221 */ 222 rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, 223 &inherit, &shared, &object, &objoff); 224 if (rv != KERN_SUCCESS) 225 return(EINVAL); 226 #ifdef DEBUG 227 if (mmapdebug & MDB_SYNC) 228 printf("msync: region: object %x addr %x size %d objoff %d\n", 229 object, addr, size, objoff); 230 #endif 231 /* 232 * Do not msync non-vnoded backed objects. 233 */ 234 if ((object->flags & OBJ_INTERNAL) || object->pager == NULL || 235 object->pager->pg_type != PG_VNODE) { 236 vm_object_unlock(object); 237 return(EINVAL); 238 } 239 objoff += oaddr - addr; 240 if (osize == 0) 241 osize = size; 242 #ifdef DEBUG 243 if (mmapdebug & MDB_SYNC) 244 printf("msync: cleaning/flushing object range [%x-%x)\n", 245 objoff, objoff+osize); 246 #endif 247 if (prot & VM_PROT_WRITE) 248 vm_object_page_clean(object, objoff, objoff+osize, FALSE); 249 /* 250 * (XXX) 251 * Bummer, gotta flush all cached pages to ensure 252 * consistency with the file system cache. 253 */ 254 vm_object_page_remove(object, objoff, objoff+osize); 255 vm_object_unlock(object); 256 return(0); 257 } 258 259 munmap(p, uap, retval) 260 register struct proc *p; 261 register struct args { 262 caddr_t addr; 263 int len; 264 } *uap; 265 int *retval; 266 { 267 vm_offset_t addr; 268 vm_size_t size; 269 270 #ifdef DEBUG 271 if (mmapdebug & MDB_FOLLOW) 272 printf("munmap(%d): addr %x len %x\n", 273 p->p_pid, uap->addr, uap->len); 274 #endif 275 276 addr = (vm_offset_t) uap->addr; 277 if ((addr & PAGE_MASK) || uap->len < 0) 278 return(EINVAL); 279 size = (vm_size_t) round_page(uap->len); 280 if (size == 0) 281 return(0); 282 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size, 283 FALSE)) 284 return(EINVAL); 285 /* returns nothing but KERN_SUCCESS anyway */ 286 (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); 287 return(0); 288 } 289 290 munmapfd(fd) 291 { 292 #ifdef DEBUG 293 if (mmapdebug & MDB_FOLLOW) 294 printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd); 295 #endif 296 297 /* 298 * XXX -- should vm_deallocate any regions mapped to this file 299 */ 300 curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 301 } 302 303 mprotect(p, uap, retval) 304 struct proc *p; 305 struct args { 306 caddr_t addr; 307 int len; 308 int prot; 309 } *uap; 310 int *retval; 311 { 312 vm_offset_t addr; 313 vm_size_t size; 314 register vm_prot_t prot; 315 316 #ifdef DEBUG 317 if (mmapdebug & MDB_FOLLOW) 318 printf("mprotect(%d): addr %x len %x prot %d\n", 319 p->p_pid, uap->addr, uap->len, uap->prot); 320 #endif 321 322 addr = (vm_offset_t) uap->addr; 323 if ((addr & PAGE_MASK) || uap->len < 0) 324 return(EINVAL); 325 size = (vm_size_t) uap->len; 326 /* 327 * Map protections 328 */ 329 prot = VM_PROT_NONE; 330 if (uap->prot & PROT_READ) 331 prot |= VM_PROT_READ; 332 if (uap->prot & PROT_WRITE) 333 prot |= VM_PROT_WRITE; 334 if (uap->prot & PROT_EXEC) 335 prot |= VM_PROT_EXECUTE; 336 337 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, 338 FALSE)) { 339 case KERN_SUCCESS: 340 return (0); 341 case KERN_PROTECTION_FAILURE: 342 return (EACCES); 343 } 344 return (EINVAL); 345 } 346 347 /* ARGSUSED */ 348 madvise(p, uap, retval) 349 struct proc *p; 350 struct args { 351 caddr_t addr; 352 int len; 353 int behav; 354 } *uap; 355 int *retval; 356 { 357 358 /* Not yet implemented */ 359 return (EOPNOTSUPP); 360 } 361 362 /* ARGSUSED */ 363 mincore(p, uap, retval) 364 struct proc *p; 365 struct args { 366 caddr_t addr; 367 int len; 368 char *vec; 369 } *uap; 370 int *retval; 371 { 372 373 /* Not yet implemented */ 374 return (EOPNOTSUPP); 375 } 376 377 /* 378 * Internal version of mmap. 379 * Currently used by mmap, exec, and sys5 shared memory. 380 * Handle is: 381 * MAP_FILE: a vnode pointer 382 * MAP_ANON: NULL or a file pointer 383 */ 384 vm_mmap(map, addr, size, prot, flags, handle, foff) 385 register vm_map_t map; 386 register vm_offset_t *addr; 387 register vm_size_t size; 388 vm_prot_t prot; 389 register int flags; 390 caddr_t handle; /* XXX should be vp */ 391 vm_offset_t foff; 392 { 393 register vm_pager_t pager; 394 boolean_t fitit; 395 vm_object_t object; 396 struct vnode *vp; 397 int type; 398 int rv = KERN_SUCCESS; 399 400 if (size == 0) 401 return (0); 402 403 if ((flags & MAP_FIXED) == 0) { 404 fitit = TRUE; 405 *addr = round_page(*addr); 406 } else { 407 fitit = FALSE; 408 (void) vm_deallocate(map, *addr, size); 409 } 410 411 /* 412 * Lookup/allocate pager. All except an unnamed anonymous lookup 413 * gain a reference to ensure continued existance of the object. 414 * (XXX the exception is to appease the pageout daemon) 415 */ 416 if ((flags & MAP_TYPE) == MAP_ANON) 417 type = PG_DFLT; 418 else { 419 vp = (struct vnode *)handle; 420 if (vp->v_type == VCHR) { 421 type = PG_DEVICE; 422 handle = (caddr_t)vp->v_rdev; 423 } else 424 type = PG_VNODE; 425 } 426 pager = vm_pager_allocate(type, handle, size, prot); 427 if (pager == NULL) 428 return (type == PG_DEVICE ? EINVAL : ENOMEM); 429 /* 430 * Find object and release extra reference gained by lookup 431 */ 432 object = vm_object_lookup(pager); 433 vm_object_deallocate(object); 434 435 /* 436 * Anonymous memory. 437 */ 438 if ((flags & MAP_TYPE) == MAP_ANON) { 439 rv = vm_allocate_with_pager(map, addr, size, fitit, 440 pager, (vm_offset_t)foff, TRUE); 441 if (rv != KERN_SUCCESS) { 442 if (handle == NULL) 443 vm_pager_deallocate(pager); 444 else 445 vm_object_deallocate(object); 446 goto out; 447 } 448 /* 449 * Don't cache anonymous objects. 450 * Loses the reference gained by vm_pager_allocate. 451 */ 452 (void) pager_cache(object, FALSE); 453 #ifdef DEBUG 454 if (mmapdebug & MDB_MAPIT) 455 printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", 456 curproc->p_pid, *addr, size, pager); 457 #endif 458 } 459 /* 460 * Must be type MAP_FILE. 461 * Distinguish between character special and regular files. 462 */ 463 else if (vp->v_type == VCHR) { 464 rv = vm_allocate_with_pager(map, addr, size, fitit, 465 pager, (vm_offset_t)foff, FALSE); 466 /* 467 * Uncache the object and lose the reference gained 468 * by vm_pager_allocate(). If the call to 469 * vm_allocate_with_pager() was sucessful, then we 470 * gained an additional reference ensuring the object 471 * will continue to exist. If the call failed then 472 * the deallocate call below will terminate the 473 * object which is fine. 474 */ 475 (void) pager_cache(object, FALSE); 476 if (rv != KERN_SUCCESS) 477 goto out; 478 } 479 /* 480 * A regular file 481 */ 482 else { 483 #ifdef DEBUG 484 if (object == NULL) 485 printf("vm_mmap: no object: vp %x, pager %x\n", 486 vp, pager); 487 #endif 488 /* 489 * Map it directly. 490 * Allows modifications to go out to the vnode. 491 */ 492 if (flags & MAP_SHARED) { 493 rv = vm_allocate_with_pager(map, addr, size, 494 fitit, pager, 495 (vm_offset_t)foff, FALSE); 496 if (rv != KERN_SUCCESS) { 497 vm_object_deallocate(object); 498 goto out; 499 } 500 /* 501 * Don't cache the object. This is the easiest way 502 * of ensuring that data gets back to the filesystem 503 * because vnode_pager_deallocate() will fsync the 504 * vnode. pager_cache() will lose the extra ref. 505 */ 506 if (prot & VM_PROT_WRITE) 507 pager_cache(object, FALSE); 508 else 509 vm_object_deallocate(object); 510 } 511 /* 512 * Copy-on-write of file. Two flavors. 513 * MAP_COPY is true COW, you essentially get a snapshot of 514 * the region at the time of mapping. MAP_PRIVATE means only 515 * that your changes are not reflected back to the object. 516 * Changes made by others will be seen. 517 */ 518 else { 519 vm_map_t tmap; 520 vm_offset_t off; 521 522 /* locate and allocate the target address space */ 523 rv = vm_map_find(map, NULL, (vm_offset_t)0, 524 addr, size, fitit); 525 if (rv != KERN_SUCCESS) { 526 vm_object_deallocate(object); 527 goto out; 528 } 529 tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, 530 VM_MIN_ADDRESS+size, TRUE); 531 off = VM_MIN_ADDRESS; 532 rv = vm_allocate_with_pager(tmap, &off, size, 533 TRUE, pager, 534 (vm_offset_t)foff, FALSE); 535 if (rv != KERN_SUCCESS) { 536 vm_object_deallocate(object); 537 vm_map_deallocate(tmap); 538 goto out; 539 } 540 /* 541 * (XXX) 542 * MAP_PRIVATE implies that we see changes made by 543 * others. To ensure that we need to guarentee that 544 * no copy object is created (otherwise original 545 * pages would be pushed to the copy object and we 546 * would never see changes made by others). We 547 * totally sleeze it right now by marking the object 548 * internal temporarily. 549 */ 550 if ((flags & MAP_COPY) == 0) 551 object->flags |= OBJ_INTERNAL; 552 rv = vm_map_copy(map, tmap, *addr, size, off, 553 FALSE, FALSE); 554 object->flags &= ~OBJ_INTERNAL; 555 /* 556 * (XXX) 557 * My oh my, this only gets worse... 558 * Force creation of a shadow object so that 559 * vm_map_fork will do the right thing. 560 */ 561 if ((flags & MAP_COPY) == 0) { 562 vm_map_t tmap; 563 vm_map_entry_t tentry; 564 vm_object_t tobject; 565 vm_offset_t toffset; 566 vm_prot_t tprot; 567 boolean_t twired, tsu; 568 569 tmap = map; 570 vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, 571 &tentry, &tobject, &toffset, 572 &tprot, &twired, &tsu); 573 vm_map_lookup_done(tmap, tentry); 574 } 575 /* 576 * (XXX) 577 * Map copy code cannot detect sharing unless a 578 * sharing map is involved. So we cheat and write 579 * protect everything ourselves. 580 */ 581 vm_object_pmap_copy(object, (vm_offset_t)foff, 582 (vm_offset_t)foff+size); 583 vm_object_deallocate(object); 584 vm_map_deallocate(tmap); 585 if (rv != KERN_SUCCESS) 586 goto out; 587 } 588 #ifdef DEBUG 589 if (mmapdebug & MDB_MAPIT) 590 printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", 591 curproc->p_pid, *addr, size, pager); 592 #endif 593 } 594 /* 595 * Correct protection (default is VM_PROT_ALL). 596 * Note that we set the maximum protection. This may not be 597 * entirely correct. Maybe the maximum protection should be based 598 * on the object permissions where it makes sense (e.g. a vnode). 599 * 600 * Changed my mind: leave max prot at VM_PROT_ALL. 601 */ 602 if (prot != VM_PROT_ALL) { 603 rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); 604 if (rv != KERN_SUCCESS) { 605 (void) vm_deallocate(map, *addr, size); 606 goto out; 607 } 608 } 609 /* 610 * Shared memory is also shared with children. 611 */ 612 if (flags & MAP_SHARED) { 613 rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); 614 if (rv != KERN_SUCCESS) { 615 (void) vm_deallocate(map, *addr, size); 616 goto out; 617 } 618 } 619 out: 620 #ifdef DEBUG 621 if (mmapdebug & MDB_MAPIT) 622 printf("vm_mmap: rv %d\n", rv); 623 #endif 624 switch (rv) { 625 case KERN_SUCCESS: 626 return (0); 627 case KERN_INVALID_ADDRESS: 628 case KERN_NO_SPACE: 629 return (ENOMEM); 630 case KERN_PROTECTION_FAILURE: 631 return (EACCES); 632 default: 633 return (EINVAL); 634 } 635 } 636 637 /* 638 * Internal bastardized version of MACHs vm_region system call. 639 * Given address and size it returns map attributes as well 640 * as the (locked) object mapped at that location. 641 */ 642 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) 643 vm_map_t map; 644 vm_offset_t *addr; /* IN/OUT */ 645 vm_size_t *size; /* OUT */ 646 vm_prot_t *prot; /* OUT */ 647 vm_prot_t *max_prot; /* OUT */ 648 vm_inherit_t *inheritance; /* OUT */ 649 boolean_t *shared; /* OUT */ 650 vm_object_t *object; /* OUT */ 651 vm_offset_t *objoff; /* OUT */ 652 { 653 vm_map_entry_t tmp_entry; 654 register 655 vm_map_entry_t entry; 656 register 657 vm_offset_t tmp_offset; 658 vm_offset_t start; 659 660 if (map == NULL) 661 return(KERN_INVALID_ARGUMENT); 662 663 start = *addr; 664 665 vm_map_lock_read(map); 666 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 667 if ((entry = tmp_entry->next) == &map->header) { 668 vm_map_unlock_read(map); 669 return(KERN_NO_SPACE); 670 } 671 start = entry->start; 672 *addr = start; 673 } else 674 entry = tmp_entry; 675 676 *prot = entry->protection; 677 *max_prot = entry->max_protection; 678 *inheritance = entry->inheritance; 679 680 tmp_offset = entry->offset + (start - entry->start); 681 *size = (entry->end - start); 682 683 if (entry->is_a_map) { 684 register vm_map_t share_map; 685 vm_size_t share_size; 686 687 share_map = entry->object.share_map; 688 689 vm_map_lock_read(share_map); 690 (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); 691 692 if ((share_size = (tmp_entry->end - tmp_offset)) < *size) 693 *size = share_size; 694 695 vm_object_lock(tmp_entry->object); 696 *object = tmp_entry->object.vm_object; 697 *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); 698 699 *shared = (share_map->ref_count != 1); 700 vm_map_unlock_read(share_map); 701 } else { 702 vm_object_lock(entry->object); 703 *object = entry->object.vm_object; 704 *objoff = tmp_offset; 705 706 *shared = FALSE; 707 } 708 709 vm_map_unlock_read(map); 710 711 return(KERN_SUCCESS); 712 } 713 714 /* 715 * Yet another bastard routine. 716 */ 717 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) 718 register vm_map_t map; 719 register vm_offset_t *addr; 720 register vm_size_t size; 721 boolean_t fitit; 722 vm_pager_t pager; 723 vm_offset_t poffset; 724 boolean_t internal; 725 { 726 register vm_object_t object; 727 register int result; 728 729 if (map == NULL) 730 return(KERN_INVALID_ARGUMENT); 731 732 *addr = trunc_page(*addr); 733 size = round_page(size); 734 735 /* 736 * Lookup the pager/paging-space in the object cache. 737 * If it's not there, then create a new object and cache 738 * it. 739 */ 740 object = vm_object_lookup(pager); 741 cnt.v_lookups++; 742 if (object == NULL) { 743 object = vm_object_allocate(size); 744 vm_object_enter(object, pager); 745 } else 746 cnt.v_hits++; 747 if (internal) 748 object->flags |= OBJ_INTERNAL; 749 else 750 object->flags &= ~OBJ_INTERNAL; 751 752 result = vm_map_find(map, object, poffset, addr, size, fitit); 753 if (result != KERN_SUCCESS) 754 vm_object_deallocate(object); 755 else if (pager != NULL) 756 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 757 return(result); 758 } 759 760 /* 761 * XXX: this routine belongs in vm_map.c. 762 * 763 * Returns TRUE if the range [start - end) is allocated in either 764 * a single entry (single_entry == TRUE) or multiple contiguous 765 * entries (single_entry == FALSE). 766 * 767 * start and end should be page aligned. 768 */ 769 boolean_t 770 vm_map_is_allocated(map, start, end, single_entry) 771 vm_map_t map; 772 vm_offset_t start, end; 773 boolean_t single_entry; 774 { 775 vm_map_entry_t mapent; 776 register vm_offset_t nend; 777 778 vm_map_lock_read(map); 779 780 /* 781 * Start address not in any entry 782 */ 783 if (!vm_map_lookup_entry(map, start, &mapent)) { 784 vm_map_unlock_read(map); 785 return (FALSE); 786 } 787 /* 788 * Find the maximum stretch of contiguously allocated space 789 */ 790 nend = mapent->end; 791 if (!single_entry) { 792 mapent = mapent->next; 793 while (mapent != &map->header && mapent->start == nend) { 794 nend = mapent->end; 795 mapent = mapent->next; 796 } 797 } 798 799 vm_map_unlock_read(map); 800 return (end <= nend); 801 } 802