xref: /original-bsd/sys/vm/vm_mmap.c (revision 860e07fc)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
13  *
14  *	@(#)vm_mmap.c	7.17 (Berkeley) 07/12/92
15  */
16 
17 /*
18  * Mapped file (mmap) interface to VM
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/filedesc.h>
24 #include <sys/proc.h>
25 #include <sys/vnode.h>
26 #include <sys/file.h>
27 #include <sys/mman.h>
28 #include <sys/conf.h>
29 
30 #include <miscfs/specfs/specdev.h>
31 
32 #include <vm/vm.h>
33 #include <vm/vm_pager.h>
34 #include <vm/vm_prot.h>
35 
36 #ifdef DEBUG
37 int mmapdebug = 0;
38 #define MDB_FOLLOW	0x01
39 #define MDB_SYNC	0x02
40 #define MDB_MAPIT	0x04
41 #endif
42 
43 struct getpagesize_args {
44 	int	dummy;
45 };
46 /* ARGSUSED */
47 int
48 getpagesize(p, uap, retval)
49 	struct proc *p;
50 	struct getpagesize_args *uap;
51 	int *retval;
52 {
53 
54 	*retval = PAGE_SIZE;
55 	return (0);
56 }
57 
58 struct sbrk_args {
59 	int	incr;
60 };
61 /* ARGSUSED */
62 int
63 sbrk(p, uap, retval)
64 	struct proc *p;
65 	struct sbrk_args *uap;
66 	int *retval;
67 {
68 
69 	/* Not yet implemented */
70 	return (EOPNOTSUPP);
71 }
72 
73 struct sstk_args {
74 	int	incr;
75 };
76 /* ARGSUSED */
77 int
78 sstk(p, uap, retval)
79 	struct proc *p;
80 	struct sstk_args *uap;
81 	int *retval;
82 {
83 
84 	/* Not yet implemented */
85 	return (EOPNOTSUPP);
86 }
87 
88 struct mmap_args {
89 	caddr_t	addr;
90 	int	len;
91 	int	prot;
92 	int	flags;
93 	int	fd;
94 	long	pad;
95 	off_t	pos;
96 };
97 
98 #ifdef COMPAT_43
99 struct osmmap_args {
100 	caddr_t	addr;
101 	int	len;
102 	int	prot;
103 	int	flags;
104 	int	fd;
105 	long	pos;
106 };
107 int
108 osmmap(p, uap, retval)
109 	struct proc *p;
110 	register struct osmmap_args *uap;
111 	int *retval;
112 {
113 	struct mmap_args nargs;
114 	static const char cvtbsdprot[8] = {
115 		0,
116 		PROT_EXEC,
117 		PROT_WRITE,
118 		PROT_EXEC|PROT_WRITE,
119 		PROT_READ,
120 		PROT_EXEC|PROT_READ,
121 		PROT_WRITE|PROT_READ,
122 		PROT_EXEC|PROT_WRITE|PROT_READ,
123 	};
124 #define	OMAP_ANON	0x0002
125 #define	OMAP_COPY	0x0020
126 #define	OMAP_SHARED	0x0010
127 #define	OMAP_FIXED	0x0100
128 #define	OMAP_INHERIT	0x0800
129 
130 	nargs.addr = uap->addr;
131 	nargs.len = uap->len;
132 	nargs.prot = cvtbsdprot[uap->prot&0x7];
133 	nargs.flags = 0;
134 	if (uap->flags & OMAP_ANON)
135 		nargs.flags |= MAP_ANON;
136 	if (uap->flags & OMAP_COPY)
137 		nargs.flags |= MAP_COPY;
138 	if (uap->flags & OMAP_SHARED)
139 		nargs.flags |= MAP_SHARED;
140 	else
141 		nargs.flags |= MAP_PRIVATE;
142 	if (uap->flags & OMAP_FIXED)
143 		nargs.flags |= MAP_FIXED;
144 	if (uap->flags & OMAP_INHERIT)
145 		nargs.flags |= MAP_INHERIT;
146 	nargs.fd = uap->fd;
147 	nargs.pos = uap->pos;
148 	return (smmap(p, &nargs, retval));
149 }
150 #endif
151 
152 int
153 smmap(p, uap, retval)
154 	struct proc *p;
155 	register struct mmap_args *uap;
156 	int *retval;
157 {
158 	register struct filedesc *fdp = p->p_fd;
159 	register struct file *fp;
160 	struct vnode *vp;
161 	vm_offset_t addr;
162 	vm_size_t size;
163 	vm_prot_t prot;
164 	caddr_t handle;
165 	int error;
166 
167 #ifdef DEBUG
168 	if (mmapdebug & MDB_FOLLOW)
169 		printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
170 		       p->p_pid, uap->addr, uap->len, uap->prot,
171 		       uap->flags, uap->fd, uap->pos);
172 #endif
173 	/*
174 	 * Address (if FIXED) must be page aligned.
175 	 * Size is implicitly rounded to a page boundary.
176 	 */
177 	addr = (vm_offset_t) uap->addr;
178 	if (((uap->flags & MAP_FIXED) && (addr & PAGE_MASK)) || uap->len < 0 ||
179 	    ((uap->flags & MAP_ANON) && uap->fd != -1))
180 		return (EINVAL);
181 	size = (vm_size_t) round_page(uap->len);
182 	/*
183 	 * XXX if no hint provided for a non-fixed mapping place it after
184 	 * the end of the largest possible heap.
185 	 *
186 	 * There should really be a pmap call to determine a reasonable
187 	 * location.
188 	 */
189 	if (addr == 0 && (uap->flags & MAP_FIXED) == 0)
190 		addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
191 	/*
192 	 * If we are mapping a file we need to check various
193 	 * file/vnode related things.
194 	 */
195 	if (uap->flags & MAP_ANON)
196 		handle = NULL;
197 	else {
198 		/*
199 		 * Mapping file, get fp for validation.
200 		 * Obtain vnode and make sure it is of appropriate type
201 		 */
202 		if (((unsigned)uap->fd) >= fdp->fd_nfiles ||
203 		    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
204 			return(EBADF);
205 		if (fp->f_type != DTYPE_VNODE)
206 			return(EINVAL);
207 		vp = (struct vnode *)fp->f_data;
208 		if (vp->v_type != VREG && vp->v_type != VCHR)
209 			return(EINVAL);
210 		/*
211 		 * Ensure that file protection and desired protection
212 		 * are compatible.  Note that we only worry about writability
213 		 * if mapping is shared.
214 		 */
215 		if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 ||
216 		    ((uap->flags & MAP_SHARED) &&
217 		     (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0))
218 			return(EACCES);
219 		handle = (caddr_t)vp;
220 	}
221 	/*
222 	 * Map protections to MACH style
223 	 */
224 	prot = uap->prot & VM_PROT_ALL;
225 	error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot,
226 			uap->flags, handle, (vm_offset_t)uap->pos);
227 	if (error == 0)
228 		*retval = (int) addr;
229 	return(error);
230 }
231 
232 struct msync_args {
233 	caddr_t	addr;
234 	int	len;
235 };
236 int
237 msync(p, uap, retval)
238 	struct proc *p;
239 	struct msync_args *uap;
240 	int *retval;
241 {
242 	vm_offset_t addr, objoff, oaddr;
243 	vm_size_t size, osize;
244 	vm_prot_t prot, mprot;
245 	vm_inherit_t inherit;
246 	vm_object_t object;
247 	boolean_t shared;
248 	int rv;
249 
250 #ifdef DEBUG
251 	if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
252 		printf("msync(%d): addr %x len %x\n",
253 		       p->p_pid, uap->addr, uap->len);
254 #endif
255 	if (((int)uap->addr & PAGE_MASK) || uap->len < 0)
256 		return(EINVAL);
257 	addr = oaddr = (vm_offset_t)uap->addr;
258 	osize = (vm_size_t)uap->len;
259 	/*
260 	 * Region must be entirely contained in a single entry
261 	 */
262 	if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize,
263 	    TRUE))
264 		return(EINVAL);
265 	/*
266 	 * Determine the object associated with that entry
267 	 * (object is returned locked on KERN_SUCCESS)
268 	 */
269 	rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot,
270 		       &inherit, &shared, &object, &objoff);
271 	if (rv != KERN_SUCCESS)
272 		return(EINVAL);
273 #ifdef DEBUG
274 	if (mmapdebug & MDB_SYNC)
275 		printf("msync: region: object %x addr %x size %d objoff %d\n",
276 		       object, addr, size, objoff);
277 #endif
278 	/*
279 	 * Do not msync non-vnoded backed objects.
280 	 */
281 	if ((object->flags & OBJ_INTERNAL) || object->pager == NULL ||
282 	    object->pager->pg_type != PG_VNODE) {
283 		vm_object_unlock(object);
284 		return(EINVAL);
285 	}
286 	objoff += oaddr - addr;
287 	if (osize == 0)
288 		osize = size;
289 #ifdef DEBUG
290 	if (mmapdebug & MDB_SYNC)
291 		printf("msync: cleaning/flushing object range [%x-%x)\n",
292 		       objoff, objoff+osize);
293 #endif
294 	if (prot & VM_PROT_WRITE)
295 		vm_object_page_clean(object, objoff, objoff+osize, FALSE);
296 	/*
297 	 * (XXX)
298 	 * Bummer, gotta flush all cached pages to ensure
299 	 * consistency with the file system cache.
300 	 */
301 	vm_object_page_remove(object, objoff, objoff+osize);
302 	vm_object_unlock(object);
303 	return(0);
304 }
305 
306 struct munmap_args {
307 	caddr_t	addr;
308 	int	len;
309 };
310 int
311 munmap(p, uap, retval)
312 	register struct proc *p;
313 	register struct munmap_args *uap;
314 	int *retval;
315 {
316 	vm_offset_t addr;
317 	vm_size_t size;
318 
319 #ifdef DEBUG
320 	if (mmapdebug & MDB_FOLLOW)
321 		printf("munmap(%d): addr %x len %x\n",
322 		       p->p_pid, uap->addr, uap->len);
323 #endif
324 
325 	addr = (vm_offset_t) uap->addr;
326 	if ((addr & PAGE_MASK) || uap->len < 0)
327 		return(EINVAL);
328 	size = (vm_size_t) round_page(uap->len);
329 	if (size == 0)
330 		return(0);
331 	if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size,
332 	    FALSE))
333 		return(EINVAL);
334 	/* returns nothing but KERN_SUCCESS anyway */
335 	(void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size);
336 	return(0);
337 }
338 
339 void
340 munmapfd(fd)
341 	int fd;
342 {
343 #ifdef DEBUG
344 	if (mmapdebug & MDB_FOLLOW)
345 		printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd);
346 #endif
347 
348 	/*
349 	 * XXX -- should vm_deallocate any regions mapped to this file
350 	 */
351 	curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
352 }
353 
354 struct mprotect_args {
355 	caddr_t	addr;
356 	int	len;
357 	int	prot;
358 };
359 int
360 mprotect(p, uap, retval)
361 	struct proc *p;
362 	struct mprotect_args *uap;
363 	int *retval;
364 {
365 	vm_offset_t addr;
366 	vm_size_t size;
367 	register vm_prot_t prot;
368 
369 #ifdef DEBUG
370 	if (mmapdebug & MDB_FOLLOW)
371 		printf("mprotect(%d): addr %x len %x prot %d\n",
372 		       p->p_pid, uap->addr, uap->len, uap->prot);
373 #endif
374 
375 	addr = (vm_offset_t)uap->addr;
376 	if ((addr & PAGE_MASK) || uap->len < 0)
377 		return(EINVAL);
378 	size = (vm_size_t)uap->len;
379 	/*
380 	 * Map protections
381 	 */
382 	prot = VM_PROT_NONE;
383 	if (uap->prot & PROT_READ)
384 		prot |= VM_PROT_READ;
385 	if (uap->prot & PROT_WRITE)
386 		prot |= VM_PROT_WRITE;
387 	if (uap->prot & PROT_EXEC)
388 		prot |= VM_PROT_EXECUTE;
389 
390 	switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,
391 	    FALSE)) {
392 	case KERN_SUCCESS:
393 		return (0);
394 	case KERN_PROTECTION_FAILURE:
395 		return (EACCES);
396 	}
397 	return (EINVAL);
398 }
399 
400 struct madvise_args {
401 	caddr_t	addr;
402 	int	len;
403 	int	behav;
404 };
405 /* ARGSUSED */
406 int
407 madvise(p, uap, retval)
408 	struct proc *p;
409 	struct madvise_args *uap;
410 	int *retval;
411 {
412 
413 	/* Not yet implemented */
414 	return (EOPNOTSUPP);
415 }
416 
417 struct mincore_args {
418 	caddr_t	addr;
419 	int	len;
420 	char	*vec;
421 };
422 /* ARGSUSED */
423 int
424 mincore(p, uap, retval)
425 	struct proc *p;
426 	struct mincore_args *uap;
427 	int *retval;
428 {
429 
430 	/* Not yet implemented */
431 	return (EOPNOTSUPP);
432 }
433 
434 /*
435  * Internal version of mmap.
436  * Currently used by mmap, exec, and sys5 shared memory.
437  * Handle is either a vnode pointer or NULL for MAP_ANON.
438  */
439 int
440 vm_mmap(map, addr, size, prot, flags, handle, foff)
441 	register vm_map_t map;
442 	register vm_offset_t *addr;
443 	register vm_size_t size;
444 	vm_prot_t prot;
445 	register int flags;
446 	caddr_t handle;		/* XXX should be vp */
447 	vm_offset_t foff;
448 {
449 	register vm_pager_t pager;
450 	boolean_t fitit;
451 	vm_object_t object;
452 	struct vnode *vp;
453 	int type;
454 	int rv = KERN_SUCCESS;
455 
456 	if (size == 0)
457 		return (0);
458 
459 	if ((flags & MAP_FIXED) == 0) {
460 		fitit = TRUE;
461 		*addr = round_page(*addr);
462 	} else {
463 		fitit = FALSE;
464 		(void)vm_deallocate(map, *addr, size);
465 	}
466 
467 	/*
468 	 * Lookup/allocate pager.  All except an unnamed anonymous lookup
469 	 * gain a reference to ensure continued existance of the object.
470 	 * (XXX the exception is to appease the pageout daemon)
471 	 */
472 	if (flags & MAP_ANON)
473 		type = PG_DFLT;
474 	else {
475 		vp = (struct vnode *)handle;
476 		if (vp->v_type == VCHR) {
477 			type = PG_DEVICE;
478 			handle = (caddr_t)vp->v_rdev;
479 		} else
480 			type = PG_VNODE;
481 	}
482 	pager = vm_pager_allocate(type, handle, size, prot);
483 	if (pager == NULL)
484 		return (type == PG_DEVICE ? EINVAL : ENOMEM);
485 	/*
486 	 * Find object and release extra reference gained by lookup
487 	 */
488 	object = vm_object_lookup(pager);
489 	vm_object_deallocate(object);
490 
491 	/*
492 	 * Anonymous memory.
493 	 */
494 	if (flags & MAP_ANON) {
495 		rv = vm_allocate_with_pager(map, addr, size, fitit,
496 					    pager, (vm_offset_t)foff, TRUE);
497 		if (rv != KERN_SUCCESS) {
498 			if (handle == NULL)
499 				vm_pager_deallocate(pager);
500 			else
501 				vm_object_deallocate(object);
502 			goto out;
503 		}
504 		/*
505 		 * Don't cache anonymous objects.
506 		 * Loses the reference gained by vm_pager_allocate.
507 		 */
508 		(void) pager_cache(object, FALSE);
509 #ifdef DEBUG
510 		if (mmapdebug & MDB_MAPIT)
511 			printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
512 			       curproc->p_pid, *addr, size, pager);
513 #endif
514 	}
515 	/*
516 	 * Must be a mapped file.
517 	 * Distinguish between character special and regular files.
518 	 */
519 	else if (vp->v_type == VCHR) {
520 		rv = vm_allocate_with_pager(map, addr, size, fitit,
521 					    pager, (vm_offset_t)foff, FALSE);
522 		/*
523 		 * Uncache the object and lose the reference gained
524 		 * by vm_pager_allocate().  If the call to
525 		 * vm_allocate_with_pager() was sucessful, then we
526 		 * gained an additional reference ensuring the object
527 		 * will continue to exist.  If the call failed then
528 		 * the deallocate call below will terminate the
529 		 * object which is fine.
530 		 */
531 		(void) pager_cache(object, FALSE);
532 		if (rv != KERN_SUCCESS)
533 			goto out;
534 	}
535 	/*
536 	 * A regular file
537 	 */
538 	else {
539 #ifdef DEBUG
540 		if (object == NULL)
541 			printf("vm_mmap: no object: vp %x, pager %x\n",
542 			       vp, pager);
543 #endif
544 		/*
545 		 * Map it directly.
546 		 * Allows modifications to go out to the vnode.
547 		 */
548 		if (flags & MAP_SHARED) {
549 			rv = vm_allocate_with_pager(map, addr, size,
550 						    fitit, pager,
551 						    (vm_offset_t)foff, FALSE);
552 			if (rv != KERN_SUCCESS) {
553 				vm_object_deallocate(object);
554 				goto out;
555 			}
556 			/*
557 			 * Don't cache the object.  This is the easiest way
558 			 * of ensuring that data gets back to the filesystem
559 			 * because vnode_pager_deallocate() will fsync the
560 			 * vnode.  pager_cache() will lose the extra ref.
561 			 */
562 			if (prot & VM_PROT_WRITE)
563 				pager_cache(object, FALSE);
564 			else
565 				vm_object_deallocate(object);
566 		}
567 		/*
568 		 * Copy-on-write of file.  Two flavors.
569 		 * MAP_COPY is true COW, you essentially get a snapshot of
570 		 * the region at the time of mapping.  MAP_PRIVATE means only
571 		 * that your changes are not reflected back to the object.
572 		 * Changes made by others will be seen.
573 		 */
574 		else {
575 			vm_map_t tmap;
576 			vm_offset_t off;
577 
578 			/* locate and allocate the target address space */
579 			rv = vm_map_find(map, NULL, (vm_offset_t)0,
580 					 addr, size, fitit);
581 			if (rv != KERN_SUCCESS) {
582 				vm_object_deallocate(object);
583 				goto out;
584 			}
585 			tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
586 					     VM_MIN_ADDRESS+size, TRUE);
587 			off = VM_MIN_ADDRESS;
588 			rv = vm_allocate_with_pager(tmap, &off, size,
589 						    TRUE, pager,
590 						    (vm_offset_t)foff, FALSE);
591 			if (rv != KERN_SUCCESS) {
592 				vm_object_deallocate(object);
593 				vm_map_deallocate(tmap);
594 				goto out;
595 			}
596 			/*
597 			 * (XXX)
598 			 * MAP_PRIVATE implies that we see changes made by
599 			 * others.  To ensure that we need to guarentee that
600 			 * no copy object is created (otherwise original
601 			 * pages would be pushed to the copy object and we
602 			 * would never see changes made by others).  We
603 			 * totally sleeze it right now by marking the object
604 			 * internal temporarily.
605 			 */
606 			if ((flags & MAP_COPY) == 0)
607 				object->flags |= OBJ_INTERNAL;
608 			rv = vm_map_copy(map, tmap, *addr, size, off,
609 					 FALSE, FALSE);
610 			object->flags &= ~OBJ_INTERNAL;
611 			/*
612 			 * (XXX)
613 			 * My oh my, this only gets worse...
614 			 * Force creation of a shadow object so that
615 			 * vm_map_fork will do the right thing.
616 			 */
617 			if ((flags & MAP_COPY) == 0) {
618 				vm_map_t tmap;
619 				vm_map_entry_t tentry;
620 				vm_object_t tobject;
621 				vm_offset_t toffset;
622 				vm_prot_t tprot;
623 				boolean_t twired, tsu;
624 
625 				tmap = map;
626 				vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
627 					      &tentry, &tobject, &toffset,
628 					      &tprot, &twired, &tsu);
629 				vm_map_lookup_done(tmap, tentry);
630 			}
631 			/*
632 			 * (XXX)
633 			 * Map copy code cannot detect sharing unless a
634 			 * sharing map is involved.  So we cheat and write
635 			 * protect everything ourselves.
636 			 */
637 			vm_object_pmap_copy(object, (vm_offset_t)foff,
638 					    (vm_offset_t)foff+size);
639 			vm_object_deallocate(object);
640 			vm_map_deallocate(tmap);
641 			if (rv != KERN_SUCCESS)
642 				goto out;
643 		}
644 #ifdef DEBUG
645 		if (mmapdebug & MDB_MAPIT)
646 			printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
647 			       curproc->p_pid, *addr, size, pager);
648 #endif
649 	}
650 	/*
651 	 * Correct protection (default is VM_PROT_ALL).
652 	 * Note that we set the maximum protection.  This may not be
653 	 * entirely correct.  Maybe the maximum protection should be based
654 	 * on the object permissions where it makes sense (e.g. a vnode).
655 	 *
656 	 * Changed my mind: leave max prot at VM_PROT_ALL.
657 	 */
658 	if (prot != VM_PROT_ALL) {
659 		rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
660 		if (rv != KERN_SUCCESS) {
661 			(void) vm_deallocate(map, *addr, size);
662 			goto out;
663 		}
664 	}
665 	/*
666 	 * Shared memory is also shared with children.
667 	 */
668 	if (flags & MAP_SHARED) {
669 		rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE);
670 		if (rv != KERN_SUCCESS) {
671 			(void) vm_deallocate(map, *addr, size);
672 			goto out;
673 		}
674 	}
675 out:
676 #ifdef DEBUG
677 	if (mmapdebug & MDB_MAPIT)
678 		printf("vm_mmap: rv %d\n", rv);
679 #endif
680 	switch (rv) {
681 	case KERN_SUCCESS:
682 		return (0);
683 	case KERN_INVALID_ADDRESS:
684 	case KERN_NO_SPACE:
685 		return (ENOMEM);
686 	case KERN_PROTECTION_FAILURE:
687 		return (EACCES);
688 	default:
689 		return (EINVAL);
690 	}
691 }
692 
693 /*
694  * Internal bastardized version of MACHs vm_region system call.
695  * Given address and size it returns map attributes as well
696  * as the (locked) object mapped at that location.
697  */
698 int
699 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff)
700 	vm_map_t	map;
701 	vm_offset_t	*addr;		/* IN/OUT */
702 	vm_size_t	*size;		/* OUT */
703 	vm_prot_t	*prot;		/* OUT */
704 	vm_prot_t	*max_prot;	/* OUT */
705 	vm_inherit_t	*inheritance;	/* OUT */
706 	boolean_t	*shared;	/* OUT */
707 	vm_object_t	*object;	/* OUT */
708 	vm_offset_t	*objoff;	/* OUT */
709 {
710 	vm_map_entry_t	tmp_entry;
711 	register
712 	vm_map_entry_t	entry;
713 	register
714 	vm_offset_t	tmp_offset;
715 	vm_offset_t	start;
716 
717 	if (map == NULL)
718 		return(KERN_INVALID_ARGUMENT);
719 
720 	start = *addr;
721 
722 	vm_map_lock_read(map);
723 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
724 		if ((entry = tmp_entry->next) == &map->header) {
725 			vm_map_unlock_read(map);
726 		   	return(KERN_NO_SPACE);
727 		}
728 		start = entry->start;
729 		*addr = start;
730 	} else
731 		entry = tmp_entry;
732 
733 	*prot = entry->protection;
734 	*max_prot = entry->max_protection;
735 	*inheritance = entry->inheritance;
736 
737 	tmp_offset = entry->offset + (start - entry->start);
738 	*size = (entry->end - start);
739 
740 	if (entry->is_a_map) {
741 		register vm_map_t share_map;
742 		vm_size_t share_size;
743 
744 		share_map = entry->object.share_map;
745 
746 		vm_map_lock_read(share_map);
747 		(void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry);
748 
749 		if ((share_size = (tmp_entry->end - tmp_offset)) < *size)
750 			*size = share_size;
751 
752 		vm_object_lock(tmp_entry->object);
753 		*object = tmp_entry->object.vm_object;
754 		*objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start);
755 
756 		*shared = (share_map->ref_count != 1);
757 		vm_map_unlock_read(share_map);
758 	} else {
759 		vm_object_lock(entry->object);
760 		*object = entry->object.vm_object;
761 		*objoff = tmp_offset;
762 
763 		*shared = FALSE;
764 	}
765 
766 	vm_map_unlock_read(map);
767 
768 	return(KERN_SUCCESS);
769 }
770 
771 /*
772  * Yet another bastard routine.
773  */
774 int
775 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
776 	register vm_map_t	map;
777 	register vm_offset_t	*addr;
778 	register vm_size_t	size;
779 	boolean_t		fitit;
780 	vm_pager_t		pager;
781 	vm_offset_t		poffset;
782 	boolean_t		internal;
783 {
784 	register vm_object_t	object;
785 	register int		result;
786 
787 	if (map == NULL)
788 		return(KERN_INVALID_ARGUMENT);
789 
790 	*addr = trunc_page(*addr);
791 	size = round_page(size);
792 
793 	/*
794 	 *	Lookup the pager/paging-space in the object cache.
795 	 *	If it's not there, then create a new object and cache
796 	 *	it.
797 	 */
798 	object = vm_object_lookup(pager);
799 	cnt.v_lookups++;
800 	if (object == NULL) {
801 		object = vm_object_allocate(size);
802 		/*
803 		 * From Mike Hibler: "unnamed anonymous objects should never
804 		 * be on the hash list ... For now you can just change
805 		 * vm_allocate_with_pager to not do vm_object_enter if this
806 		 * is an internal object ..."
807 		 */
808 		if (!internal)
809 			vm_object_enter(object, pager);
810 	} else
811 		cnt.v_hits++;
812 	if (internal)
813 		object->flags |= OBJ_INTERNAL;
814 	else
815 		object->flags &= ~OBJ_INTERNAL;
816 
817 	result = vm_map_find(map, object, poffset, addr, size, fitit);
818 	if (result != KERN_SUCCESS)
819 		vm_object_deallocate(object);
820 	else if (pager != NULL)
821 		vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
822 	return(result);
823 }
824 
825 /*
826  * XXX: this routine belongs in vm_map.c.
827  *
828  * Returns TRUE if the range [start - end) is allocated in either
829  * a single entry (single_entry == TRUE) or multiple contiguous
830  * entries (single_entry == FALSE).
831  *
832  * start and end should be page aligned.
833  */
834 boolean_t
835 vm_map_is_allocated(map, start, end, single_entry)
836 	vm_map_t map;
837 	vm_offset_t start, end;
838 	boolean_t single_entry;
839 {
840 	vm_map_entry_t mapent;
841 	register vm_offset_t nend;
842 
843 	vm_map_lock_read(map);
844 
845 	/*
846 	 * Start address not in any entry
847 	 */
848 	if (!vm_map_lookup_entry(map, start, &mapent)) {
849 		vm_map_unlock_read(map);
850 		return (FALSE);
851 	}
852 	/*
853 	 * Find the maximum stretch of contiguously allocated space
854 	 */
855 	nend = mapent->end;
856 	if (!single_entry) {
857 		mapent = mapent->next;
858 		while (mapent != &map->header && mapent->start == nend) {
859 			nend = mapent->end;
860 			mapent = mapent->next;
861 		}
862 	}
863 
864 	vm_map_unlock_read(map);
865 	return (end <= nend);
866 }
867