1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 13 * 14 * @(#)vm_mmap.c 7.10 (Berkeley) 05/04/92 15 */ 16 17 /* 18 * Mapped file (mmap) interface to VM 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/filedesc.h> 24 #include <sys/proc.h> 25 #include <sys/vnode.h> 26 #include <sys/specdev.h> 27 #include <sys/file.h> 28 #include <sys/mman.h> 29 #include <sys/conf.h> 30 31 #include <vm/vm.h> 32 #include <vm/vm_pager.h> 33 #include <vm/vm_prot.h> 34 35 #ifdef DEBUG 36 int mmapdebug = 0; 37 #define MDB_FOLLOW 0x01 38 #define MDB_SYNC 0x02 39 #define MDB_MAPIT 0x04 40 #endif 41 42 /* ARGSUSED */ 43 int 44 getpagesize(p, uap, retval) 45 struct proc *p; 46 void *uap; 47 int *retval; 48 { 49 50 *retval = PAGE_SIZE; 51 return (0); 52 } 53 54 /* ARGSUSED */ 55 int 56 sbrk(p, uap, retval) 57 struct proc *p; 58 struct args { 59 int incr; 60 } *uap; 61 int *retval; 62 { 63 64 /* Not yet implemented */ 65 return (EOPNOTSUPP); 66 } 67 68 /* ARGSUSED */ 69 int 70 sstk(p, uap, retval) 71 struct proc *p; 72 struct args { 73 int incr; 74 } *uap; 75 int *retval; 76 { 77 78 /* Not yet implemented */ 79 return (EOPNOTSUPP); 80 } 81 82 int 83 smmap(p, uap, retval) 84 struct proc *p; 85 register struct args { 86 caddr_t addr; 87 int len; 88 int prot; 89 int flags; 90 int fd; 91 off_t pos; 92 } *uap; 93 int *retval; 94 { 95 register struct filedesc *fdp = p->p_fd; 96 register struct file *fp; 97 struct vnode *vp; 98 vm_offset_t addr; 99 vm_size_t size; 100 vm_prot_t prot; 101 caddr_t handle; 102 int mtype, error; 103 104 #ifdef DEBUG 105 if (mmapdebug & MDB_FOLLOW) 106 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", 107 p->p_pid, uap->addr, uap->len, uap->prot, 108 uap->flags, uap->fd, uap->pos); 109 #endif 110 /* 111 * Make sure one of the sharing types is specified 112 */ 113 mtype = uap->flags & MAP_TYPE; 114 switch (mtype) { 115 case MAP_FILE: 116 case MAP_ANON: 117 break; 118 default: 119 return(EINVAL); 120 } 121 /* 122 * Address (if FIXED) must be page aligned. 123 * Size is implicitly rounded to a page boundary. 124 */ 125 addr = (vm_offset_t) uap->addr; 126 if ((uap->flags & MAP_FIXED) && (addr & PAGE_MASK) || uap->len < 0) 127 return(EINVAL); 128 size = (vm_size_t) round_page(uap->len); 129 /* 130 * XXX if no hint provided for a non-fixed mapping place it after 131 * the end of the largest possible heap. 132 * 133 * There should really be a pmap call to determine a reasonable 134 * location. 135 */ 136 if (addr == 0 && (uap->flags & MAP_FIXED) == 0) 137 addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 138 /* 139 * Mapping file or named anonymous, get fp for validation 140 */ 141 if (mtype == MAP_FILE || uap->fd != -1) { 142 if (((unsigned)uap->fd) >= fdp->fd_nfiles || 143 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 144 return(EBADF); 145 } 146 /* 147 * If we are mapping a file we need to check various 148 * file/vnode related things. 149 */ 150 if (mtype == MAP_FILE) { 151 /* 152 * Obtain vnode and make sure it is of appropriate type 153 */ 154 if (fp->f_type != DTYPE_VNODE) 155 return(EINVAL); 156 vp = (struct vnode *)fp->f_data; 157 if (vp->v_type != VREG && vp->v_type != VCHR) 158 return(EINVAL); 159 /* 160 * Ensure that file protection and desired protection 161 * are compatible. Note that we only worry about writability 162 * if mapping is shared. 163 */ 164 if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || 165 ((uap->flags & MAP_SHARED) && 166 (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) 167 return(EACCES); 168 handle = (caddr_t)vp; 169 } else if (uap->fd != -1) 170 handle = (caddr_t)fp; 171 else 172 handle = NULL; 173 /* 174 * Map protections to MACH style 175 */ 176 prot = VM_PROT_NONE; 177 if (uap->prot & PROT_READ) 178 prot |= VM_PROT_READ; 179 if (uap->prot & PROT_WRITE) 180 prot |= VM_PROT_WRITE; 181 if (uap->prot & PROT_EXEC) 182 prot |= VM_PROT_EXECUTE; 183 184 error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, 185 uap->flags, handle, (vm_offset_t)uap->pos); 186 if (error == 0) 187 *retval = (int) addr; 188 return(error); 189 } 190 191 int 192 msync(p, uap, retval) 193 struct proc *p; 194 struct args { 195 caddr_t addr; 196 int len; 197 } *uap; 198 int *retval; 199 { 200 vm_offset_t addr, objoff, oaddr; 201 vm_size_t size, osize; 202 vm_prot_t prot, mprot; 203 vm_inherit_t inherit; 204 vm_object_t object; 205 boolean_t shared; 206 int rv; 207 208 #ifdef DEBUG 209 if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) 210 printf("msync(%d): addr %x len %x\n", 211 p->p_pid, uap->addr, uap->len); 212 #endif 213 if (((int)uap->addr & PAGE_MASK) || uap->len < 0) 214 return(EINVAL); 215 addr = oaddr = (vm_offset_t)uap->addr; 216 osize = (vm_size_t)uap->len; 217 /* 218 * Region must be entirely contained in a single entry 219 */ 220 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, 221 TRUE)) 222 return(EINVAL); 223 /* 224 * Determine the object associated with that entry 225 * (object is returned locked on KERN_SUCCESS) 226 */ 227 rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, 228 &inherit, &shared, &object, &objoff); 229 if (rv != KERN_SUCCESS) 230 return(EINVAL); 231 #ifdef DEBUG 232 if (mmapdebug & MDB_SYNC) 233 printf("msync: region: object %x addr %x size %d objoff %d\n", 234 object, addr, size, objoff); 235 #endif 236 /* 237 * Do not msync non-vnoded backed objects. 238 */ 239 if ((object->flags & OBJ_INTERNAL) || object->pager == NULL || 240 object->pager->pg_type != PG_VNODE) { 241 vm_object_unlock(object); 242 return(EINVAL); 243 } 244 objoff += oaddr - addr; 245 if (osize == 0) 246 osize = size; 247 #ifdef DEBUG 248 if (mmapdebug & MDB_SYNC) 249 printf("msync: cleaning/flushing object range [%x-%x)\n", 250 objoff, objoff+osize); 251 #endif 252 if (prot & VM_PROT_WRITE) 253 vm_object_page_clean(object, objoff, objoff+osize, FALSE); 254 /* 255 * (XXX) 256 * Bummer, gotta flush all cached pages to ensure 257 * consistency with the file system cache. 258 */ 259 vm_object_page_remove(object, objoff, objoff+osize); 260 vm_object_unlock(object); 261 return(0); 262 } 263 264 int 265 munmap(p, uap, retval) 266 register struct proc *p; 267 register struct args { 268 caddr_t addr; 269 int len; 270 } *uap; 271 int *retval; 272 { 273 vm_offset_t addr; 274 vm_size_t size; 275 276 #ifdef DEBUG 277 if (mmapdebug & MDB_FOLLOW) 278 printf("munmap(%d): addr %x len %x\n", 279 p->p_pid, uap->addr, uap->len); 280 #endif 281 282 addr = (vm_offset_t) uap->addr; 283 if ((addr & PAGE_MASK) || uap->len < 0) 284 return(EINVAL); 285 size = (vm_size_t) round_page(uap->len); 286 if (size == 0) 287 return(0); 288 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size, 289 FALSE)) 290 return(EINVAL); 291 /* returns nothing but KERN_SUCCESS anyway */ 292 (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); 293 return(0); 294 } 295 296 void 297 munmapfd(fd) 298 { 299 #ifdef DEBUG 300 if (mmapdebug & MDB_FOLLOW) 301 printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd); 302 #endif 303 304 /* 305 * XXX -- should vm_deallocate any regions mapped to this file 306 */ 307 curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 308 } 309 310 int 311 mprotect(p, uap, retval) 312 struct proc *p; 313 struct args { 314 caddr_t addr; 315 int len; 316 int prot; 317 } *uap; 318 int *retval; 319 { 320 vm_offset_t addr; 321 vm_size_t size; 322 register vm_prot_t prot; 323 324 #ifdef DEBUG 325 if (mmapdebug & MDB_FOLLOW) 326 printf("mprotect(%d): addr %x len %x prot %d\n", 327 p->p_pid, uap->addr, uap->len, uap->prot); 328 #endif 329 330 addr = (vm_offset_t) uap->addr; 331 if ((addr & PAGE_MASK) || uap->len < 0) 332 return(EINVAL); 333 size = (vm_size_t) uap->len; 334 /* 335 * Map protections 336 */ 337 prot = VM_PROT_NONE; 338 if (uap->prot & PROT_READ) 339 prot |= VM_PROT_READ; 340 if (uap->prot & PROT_WRITE) 341 prot |= VM_PROT_WRITE; 342 if (uap->prot & PROT_EXEC) 343 prot |= VM_PROT_EXECUTE; 344 345 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, 346 FALSE)) { 347 case KERN_SUCCESS: 348 return (0); 349 case KERN_PROTECTION_FAILURE: 350 return (EACCES); 351 } 352 return (EINVAL); 353 } 354 355 /* ARGSUSED */ 356 int 357 madvise(p, uap, retval) 358 struct proc *p; 359 struct args { 360 caddr_t addr; 361 int len; 362 int behav; 363 } *uap; 364 int *retval; 365 { 366 367 /* Not yet implemented */ 368 return (EOPNOTSUPP); 369 } 370 371 /* ARGSUSED */ 372 int 373 mincore(p, uap, retval) 374 struct proc *p; 375 struct args { 376 caddr_t addr; 377 int len; 378 char *vec; 379 } *uap; 380 int *retval; 381 { 382 383 /* Not yet implemented */ 384 return (EOPNOTSUPP); 385 } 386 387 /* 388 * Internal version of mmap. 389 * Currently used by mmap, exec, and sys5 shared memory. 390 * Handle is: 391 * MAP_FILE: a vnode pointer 392 * MAP_ANON: NULL or a file pointer 393 */ 394 int 395 vm_mmap(map, addr, size, prot, flags, handle, foff) 396 register vm_map_t map; 397 register vm_offset_t *addr; 398 register vm_size_t size; 399 vm_prot_t prot; 400 register int flags; 401 caddr_t handle; /* XXX should be vp */ 402 vm_offset_t foff; 403 { 404 register vm_pager_t pager; 405 boolean_t fitit; 406 vm_object_t object; 407 struct vnode *vp; 408 int type; 409 int rv = KERN_SUCCESS; 410 411 if (size == 0) 412 return (0); 413 414 if ((flags & MAP_FIXED) == 0) { 415 fitit = TRUE; 416 *addr = round_page(*addr); 417 } else { 418 fitit = FALSE; 419 (void) vm_deallocate(map, *addr, size); 420 } 421 422 /* 423 * Lookup/allocate pager. All except an unnamed anonymous lookup 424 * gain a reference to ensure continued existance of the object. 425 * (XXX the exception is to appease the pageout daemon) 426 */ 427 if ((flags & MAP_TYPE) == MAP_ANON) 428 type = PG_DFLT; 429 else { 430 vp = (struct vnode *)handle; 431 if (vp->v_type == VCHR) { 432 type = PG_DEVICE; 433 handle = (caddr_t)vp->v_rdev; 434 } else 435 type = PG_VNODE; 436 } 437 pager = vm_pager_allocate(type, handle, size, prot); 438 if (pager == NULL) 439 return (type == PG_DEVICE ? EINVAL : ENOMEM); 440 /* 441 * Find object and release extra reference gained by lookup 442 */ 443 object = vm_object_lookup(pager); 444 vm_object_deallocate(object); 445 446 /* 447 * Anonymous memory. 448 */ 449 if ((flags & MAP_TYPE) == MAP_ANON) { 450 rv = vm_allocate_with_pager(map, addr, size, fitit, 451 pager, (vm_offset_t)foff, TRUE); 452 if (rv != KERN_SUCCESS) { 453 if (handle == NULL) 454 vm_pager_deallocate(pager); 455 else 456 vm_object_deallocate(object); 457 goto out; 458 } 459 /* 460 * Don't cache anonymous objects. 461 * Loses the reference gained by vm_pager_allocate. 462 */ 463 (void) pager_cache(object, FALSE); 464 #ifdef DEBUG 465 if (mmapdebug & MDB_MAPIT) 466 printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", 467 curproc->p_pid, *addr, size, pager); 468 #endif 469 } 470 /* 471 * Must be type MAP_FILE. 472 * Distinguish between character special and regular files. 473 */ 474 else if (vp->v_type == VCHR) { 475 rv = vm_allocate_with_pager(map, addr, size, fitit, 476 pager, (vm_offset_t)foff, FALSE); 477 /* 478 * Uncache the object and lose the reference gained 479 * by vm_pager_allocate(). If the call to 480 * vm_allocate_with_pager() was sucessful, then we 481 * gained an additional reference ensuring the object 482 * will continue to exist. If the call failed then 483 * the deallocate call below will terminate the 484 * object which is fine. 485 */ 486 (void) pager_cache(object, FALSE); 487 if (rv != KERN_SUCCESS) 488 goto out; 489 } 490 /* 491 * A regular file 492 */ 493 else { 494 #ifdef DEBUG 495 if (object == NULL) 496 printf("vm_mmap: no object: vp %x, pager %x\n", 497 vp, pager); 498 #endif 499 /* 500 * Map it directly. 501 * Allows modifications to go out to the vnode. 502 */ 503 if (flags & MAP_SHARED) { 504 rv = vm_allocate_with_pager(map, addr, size, 505 fitit, pager, 506 (vm_offset_t)foff, FALSE); 507 if (rv != KERN_SUCCESS) { 508 vm_object_deallocate(object); 509 goto out; 510 } 511 /* 512 * Don't cache the object. This is the easiest way 513 * of ensuring that data gets back to the filesystem 514 * because vnode_pager_deallocate() will fsync the 515 * vnode. pager_cache() will lose the extra ref. 516 */ 517 if (prot & VM_PROT_WRITE) 518 pager_cache(object, FALSE); 519 else 520 vm_object_deallocate(object); 521 } 522 /* 523 * Copy-on-write of file. Two flavors. 524 * MAP_COPY is true COW, you essentially get a snapshot of 525 * the region at the time of mapping. MAP_PRIVATE means only 526 * that your changes are not reflected back to the object. 527 * Changes made by others will be seen. 528 */ 529 else { 530 vm_map_t tmap; 531 vm_offset_t off; 532 533 /* locate and allocate the target address space */ 534 rv = vm_map_find(map, NULL, (vm_offset_t)0, 535 addr, size, fitit); 536 if (rv != KERN_SUCCESS) { 537 vm_object_deallocate(object); 538 goto out; 539 } 540 tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, 541 VM_MIN_ADDRESS+size, TRUE); 542 off = VM_MIN_ADDRESS; 543 rv = vm_allocate_with_pager(tmap, &off, size, 544 TRUE, pager, 545 (vm_offset_t)foff, FALSE); 546 if (rv != KERN_SUCCESS) { 547 vm_object_deallocate(object); 548 vm_map_deallocate(tmap); 549 goto out; 550 } 551 /* 552 * (XXX) 553 * MAP_PRIVATE implies that we see changes made by 554 * others. To ensure that we need to guarentee that 555 * no copy object is created (otherwise original 556 * pages would be pushed to the copy object and we 557 * would never see changes made by others). We 558 * totally sleeze it right now by marking the object 559 * internal temporarily. 560 */ 561 if ((flags & MAP_COPY) == 0) 562 object->flags |= OBJ_INTERNAL; 563 rv = vm_map_copy(map, tmap, *addr, size, off, 564 FALSE, FALSE); 565 object->flags &= ~OBJ_INTERNAL; 566 /* 567 * (XXX) 568 * My oh my, this only gets worse... 569 * Force creation of a shadow object so that 570 * vm_map_fork will do the right thing. 571 */ 572 if ((flags & MAP_COPY) == 0) { 573 vm_map_t tmap; 574 vm_map_entry_t tentry; 575 vm_object_t tobject; 576 vm_offset_t toffset; 577 vm_prot_t tprot; 578 boolean_t twired, tsu; 579 580 tmap = map; 581 vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, 582 &tentry, &tobject, &toffset, 583 &tprot, &twired, &tsu); 584 vm_map_lookup_done(tmap, tentry); 585 } 586 /* 587 * (XXX) 588 * Map copy code cannot detect sharing unless a 589 * sharing map is involved. So we cheat and write 590 * protect everything ourselves. 591 */ 592 vm_object_pmap_copy(object, (vm_offset_t)foff, 593 (vm_offset_t)foff+size); 594 vm_object_deallocate(object); 595 vm_map_deallocate(tmap); 596 if (rv != KERN_SUCCESS) 597 goto out; 598 } 599 #ifdef DEBUG 600 if (mmapdebug & MDB_MAPIT) 601 printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", 602 curproc->p_pid, *addr, size, pager); 603 #endif 604 } 605 /* 606 * Correct protection (default is VM_PROT_ALL). 607 * Note that we set the maximum protection. This may not be 608 * entirely correct. Maybe the maximum protection should be based 609 * on the object permissions where it makes sense (e.g. a vnode). 610 * 611 * Changed my mind: leave max prot at VM_PROT_ALL. 612 */ 613 if (prot != VM_PROT_ALL) { 614 rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); 615 if (rv != KERN_SUCCESS) { 616 (void) vm_deallocate(map, *addr, size); 617 goto out; 618 } 619 } 620 /* 621 * Shared memory is also shared with children. 622 */ 623 if (flags & MAP_SHARED) { 624 rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); 625 if (rv != KERN_SUCCESS) { 626 (void) vm_deallocate(map, *addr, size); 627 goto out; 628 } 629 } 630 out: 631 #ifdef DEBUG 632 if (mmapdebug & MDB_MAPIT) 633 printf("vm_mmap: rv %d\n", rv); 634 #endif 635 switch (rv) { 636 case KERN_SUCCESS: 637 return (0); 638 case KERN_INVALID_ADDRESS: 639 case KERN_NO_SPACE: 640 return (ENOMEM); 641 case KERN_PROTECTION_FAILURE: 642 return (EACCES); 643 default: 644 return (EINVAL); 645 } 646 } 647 648 /* 649 * Internal bastardized version of MACHs vm_region system call. 650 * Given address and size it returns map attributes as well 651 * as the (locked) object mapped at that location. 652 */ 653 int 654 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) 655 vm_map_t map; 656 vm_offset_t *addr; /* IN/OUT */ 657 vm_size_t *size; /* OUT */ 658 vm_prot_t *prot; /* OUT */ 659 vm_prot_t *max_prot; /* OUT */ 660 vm_inherit_t *inheritance; /* OUT */ 661 boolean_t *shared; /* OUT */ 662 vm_object_t *object; /* OUT */ 663 vm_offset_t *objoff; /* OUT */ 664 { 665 vm_map_entry_t tmp_entry; 666 register 667 vm_map_entry_t entry; 668 register 669 vm_offset_t tmp_offset; 670 vm_offset_t start; 671 672 if (map == NULL) 673 return(KERN_INVALID_ARGUMENT); 674 675 start = *addr; 676 677 vm_map_lock_read(map); 678 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 679 if ((entry = tmp_entry->next) == &map->header) { 680 vm_map_unlock_read(map); 681 return(KERN_NO_SPACE); 682 } 683 start = entry->start; 684 *addr = start; 685 } else 686 entry = tmp_entry; 687 688 *prot = entry->protection; 689 *max_prot = entry->max_protection; 690 *inheritance = entry->inheritance; 691 692 tmp_offset = entry->offset + (start - entry->start); 693 *size = (entry->end - start); 694 695 if (entry->is_a_map) { 696 register vm_map_t share_map; 697 vm_size_t share_size; 698 699 share_map = entry->object.share_map; 700 701 vm_map_lock_read(share_map); 702 (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); 703 704 if ((share_size = (tmp_entry->end - tmp_offset)) < *size) 705 *size = share_size; 706 707 vm_object_lock(tmp_entry->object); 708 *object = tmp_entry->object.vm_object; 709 *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); 710 711 *shared = (share_map->ref_count != 1); 712 vm_map_unlock_read(share_map); 713 } else { 714 vm_object_lock(entry->object); 715 *object = entry->object.vm_object; 716 *objoff = tmp_offset; 717 718 *shared = FALSE; 719 } 720 721 vm_map_unlock_read(map); 722 723 return(KERN_SUCCESS); 724 } 725 726 /* 727 * Yet another bastard routine. 728 */ 729 int 730 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) 731 register vm_map_t map; 732 register vm_offset_t *addr; 733 register vm_size_t size; 734 boolean_t fitit; 735 vm_pager_t pager; 736 vm_offset_t poffset; 737 boolean_t internal; 738 { 739 register vm_object_t object; 740 register int result; 741 742 if (map == NULL) 743 return(KERN_INVALID_ARGUMENT); 744 745 *addr = trunc_page(*addr); 746 size = round_page(size); 747 748 /* 749 * Lookup the pager/paging-space in the object cache. 750 * If it's not there, then create a new object and cache 751 * it. 752 */ 753 object = vm_object_lookup(pager); 754 cnt.v_lookups++; 755 if (object == NULL) { 756 object = vm_object_allocate(size); 757 vm_object_enter(object, pager); 758 } else 759 cnt.v_hits++; 760 if (internal) 761 object->flags |= OBJ_INTERNAL; 762 else 763 object->flags &= ~OBJ_INTERNAL; 764 765 result = vm_map_find(map, object, poffset, addr, size, fitit); 766 if (result != KERN_SUCCESS) 767 vm_object_deallocate(object); 768 else if (pager != NULL) 769 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 770 return(result); 771 } 772 773 /* 774 * XXX: this routine belongs in vm_map.c. 775 * 776 * Returns TRUE if the range [start - end) is allocated in either 777 * a single entry (single_entry == TRUE) or multiple contiguous 778 * entries (single_entry == FALSE). 779 * 780 * start and end should be page aligned. 781 */ 782 boolean_t 783 vm_map_is_allocated(map, start, end, single_entry) 784 vm_map_t map; 785 vm_offset_t start, end; 786 boolean_t single_entry; 787 { 788 vm_map_entry_t mapent; 789 register vm_offset_t nend; 790 791 vm_map_lock_read(map); 792 793 /* 794 * Start address not in any entry 795 */ 796 if (!vm_map_lookup_entry(map, start, &mapent)) { 797 vm_map_unlock_read(map); 798 return (FALSE); 799 } 800 /* 801 * Find the maximum stretch of contiguously allocated space 802 */ 803 nend = mapent->end; 804 if (!single_entry) { 805 mapent = mapent->next; 806 while (mapent != &map->header && mapent->start == nend) { 807 nend = mapent->end; 808 mapent = mapent->next; 809 } 810 } 811 812 vm_map_unlock_read(map); 813 return (end <= nend); 814 } 815