xref: /original-bsd/sys/vm/vm_mmap.c (revision 56b48dd2)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_mmap.c 1.3 90/01/21$
13  *
14  *	@(#)vm_mmap.c	7.3 (Berkeley) 04/20/91
15  */
16 
17 /*
18  * Mapped file (mmap) interface to VM
19  */
20 
21 #include "param.h"
22 #include "systm.h"
23 #include "filedesc.h"
24 #include "proc.h"
25 #include "vnode.h"
26 #include "specdev.h"
27 #include "file.h"
28 #include "mman.h"
29 #include "conf.h"
30 
31 #include "vm.h"
32 #include "vm_pager.h"
33 #include "vm_prot.h"
34 #include "vm_statistics.h"
35 
36 #ifdef DEBUG
37 int mmapdebug = 0;
38 #define MDB_FOLLOW	0x01
39 #define MDB_SYNC	0x02
40 #define MDB_MAPIT	0x04
41 #endif
42 
43 /* ARGSUSED */
44 getpagesize(p, uap, retval)
45 	struct proc *p;
46 	void *uap;
47 	int *retval;
48 {
49 
50 	*retval = NBPG * CLSIZE;
51 	return (0);
52 }
53 
54 /* ARGSUSED */
55 sbrk(p, uap, retval)
56 	struct proc *p;
57 	struct args {
58 		int	incr;
59 	} *uap;
60 	int *retval;
61 {
62 
63 	/* Not yet implemented */
64 	return (EOPNOTSUPP);
65 }
66 
67 /* ARGSUSED */
68 sstk(p, uap, retval)
69 	struct proc *p;
70 	struct args {
71 		int	incr;
72 	} *uap;
73 	int *retval;
74 {
75 
76 	/* Not yet implemented */
77 	return (EOPNOTSUPP);
78 }
79 
80 smmap(p, uap, retval)
81 	struct proc *p;
82 	register struct args {
83 		caddr_t	addr;
84 		int	len;
85 		int	prot;
86 		int	flags;
87 		int	fd;
88 		off_t	pos;
89 	} *uap;
90 	int *retval;
91 {
92 	register struct filedesc *fdp = p->p_fd;
93 	register struct file *fp;
94 	struct vnode *vp;
95 	vm_offset_t addr;
96 	vm_size_t size;
97 	vm_prot_t prot;
98 	caddr_t handle;
99 	int mtype, error;
100 
101 #ifdef DEBUG
102 	if (mmapdebug & MDB_FOLLOW)
103 		printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
104 		       p->p_pid, uap->addr, uap->len, uap->prot,
105 		       uap->flags, uap->fd, uap->pos);
106 #endif
107 	/*
108 	 * Make sure one of the sharing types is specified
109 	 */
110 	mtype = uap->flags & MAP_TYPE;
111 	switch (mtype) {
112 	case MAP_FILE:
113 	case MAP_ANON:
114 		break;
115 	default:
116 		return(EINVAL);
117 	}
118 	/*
119 	 * Address (if FIXED) and size must be page aligned
120 	 */
121 	size = (vm_size_t)uap->len;
122 	addr = (vm_offset_t)uap->addr;
123 	if ((size & page_mask) ||
124 	    (uap->flags & MAP_FIXED) && (addr & page_mask))
125 		return(EINVAL);
126 	/*
127 	 * Mapping file or named anonymous, get fp for validation
128 	 */
129 	if (mtype == MAP_FILE || uap->fd != -1) {
130 		if (((unsigned)uap->fd) >= fdp->fd_nfiles ||
131 		    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
132 			return(EBADF);
133 	}
134 	/*
135 	 * If we are mapping a file we need to check various
136 	 * file/vnode related things.
137 	 */
138 	if (mtype == MAP_FILE) {
139 		/*
140 		 * Obtain vnode and make sure it is of appropriate type
141 		 */
142 		if (fp->f_type != DTYPE_VNODE)
143 			return(EINVAL);
144 		vp = (struct vnode *)fp->f_data;
145 		if (vp->v_type != VREG && vp->v_type != VCHR)
146 			return(EINVAL);
147 		/*
148 		 * Ensure that file protection and desired protection
149 		 * are compatible.  Note that we only worry about writability
150 		 * if mapping is shared.
151 		 */
152 		if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 ||
153 		    ((uap->flags & MAP_SHARED) &&
154 		     (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0))
155 			return(EACCES);
156 		handle = (caddr_t)vp;
157 	} else if (uap->fd != -1)
158 		handle = (caddr_t)fp;
159 	else
160 		handle = NULL;
161 	/*
162 	 * Map protections to MACH style
163 	 */
164 	prot = VM_PROT_NONE;
165 	if (uap->prot & PROT_READ)
166 		prot |= VM_PROT_READ;
167 	if (uap->prot & PROT_WRITE)
168 		prot |= VM_PROT_WRITE;
169 	if (uap->prot & PROT_EXEC)
170 		prot |= VM_PROT_EXECUTE;
171 
172 	error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot,
173 			uap->flags, handle, (vm_offset_t)uap->pos);
174 	if (error == 0)
175 		*retval = (int) addr;
176 	return(error);
177 }
178 
179 msync(p, uap, retval)
180 	struct proc *p;
181 	struct args {
182 		char	*addr;
183 		int	len;
184 	} *uap;
185 	int *retval;
186 {
187 	vm_offset_t addr, objoff, oaddr;
188 	vm_size_t size, osize;
189 	vm_prot_t prot, mprot;
190 	vm_inherit_t inherit;
191 	vm_object_t object;
192 	boolean_t shared;
193 	int rv;
194 
195 #ifdef DEBUG
196 	if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
197 		printf("msync(%d): addr %x len %x\n",
198 		       p->p_pid, uap->addr, uap->len);
199 #endif
200 	if (((int)uap->addr & page_mask) || (uap->len & page_mask))
201 		return(EINVAL);
202 	addr = oaddr = (vm_offset_t)uap->addr;
203 	osize = (vm_size_t)uap->len;
204 	/*
205 	 * Region must be entirely contained in a single entry
206 	 */
207 	if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize,
208 	    TRUE))
209 		return(EINVAL);
210 	/*
211 	 * Determine the object associated with that entry
212 	 * (object is returned locked on KERN_SUCCESS)
213 	 */
214 	rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot,
215 		       &inherit, &shared, &object, &objoff);
216 	if (rv != KERN_SUCCESS)
217 		return(EINVAL);
218 #ifdef DEBUG
219 	if (mmapdebug & MDB_SYNC)
220 		printf("msync: region: object %x addr %x size %d objoff %d\n",
221 		       object, addr, size, objoff);
222 #endif
223 	/*
224 	 * Do not msync non-vnoded backed objects.
225 	 */
226 	if (object->internal || object->pager == NULL ||
227 	    object->pager->pg_type != PG_VNODE) {
228 		vm_object_unlock(object);
229 		return(EINVAL);
230 	}
231 	objoff += oaddr - addr;
232 	if (osize == 0)
233 		osize = size;
234 #ifdef DEBUG
235 	if (mmapdebug & MDB_SYNC)
236 		printf("msync: cleaning/flushing object range [%x-%x)\n",
237 		       objoff, objoff+osize);
238 #endif
239 	if (prot & VM_PROT_WRITE)
240 		vm_object_page_clean(object, objoff, objoff+osize);
241 	/*
242 	 * (XXX)
243 	 * Bummer, gotta flush all cached pages to ensure
244 	 * consistency with the file system cache.
245 	 */
246 	vm_object_page_remove(object, objoff, objoff+osize);
247 	vm_object_unlock(object);
248 	return(0);
249 }
250 
251 munmap(p, uap, retval)
252 	register struct proc *p;
253 	register struct args {
254 		caddr_t	addr;
255 		int	len;
256 	} *uap;
257 	int *retval;
258 {
259 	vm_offset_t addr;
260 	vm_size_t size;
261 
262 #ifdef DEBUG
263 	if (mmapdebug & MDB_FOLLOW)
264 		printf("munmap(%d): addr %x len %x\n",
265 		       p->p_pid, uap->addr, uap->len);
266 #endif
267 
268 	addr = (vm_offset_t) uap->addr;
269 	size = (vm_size_t) uap->len;
270 	if ((addr & page_mask) || (size & page_mask))
271 		return(EINVAL);
272 	if (size == 0)
273 		return(0);
274 	if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size,
275 	    FALSE))
276 		return(EINVAL);
277 	/* returns nothing but KERN_SUCCESS anyway */
278 	(void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size);
279 	return(0);
280 }
281 
282 munmapfd(fd)
283 {
284 #ifdef DEBUG
285 	if (mmapdebug & MDB_FOLLOW)
286 		printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd);
287 #endif
288 
289 	/*
290 	 * XXX -- should vm_deallocate any regions mapped to this file
291 	 */
292 	curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
293 }
294 
295 mprotect(p, uap, retval)
296 	struct proc *p;
297 	struct args {
298 		char	*addr;
299 		int	len;
300 		int	prot;
301 	} *uap;
302 	int *retval;
303 {
304 	vm_offset_t addr;
305 	vm_size_t size;
306 	register vm_prot_t prot;
307 
308 #ifdef DEBUG
309 	if (mmapdebug & MDB_FOLLOW)
310 		printf("mprotect(%d): addr %x len %x prot %d\n",
311 		       p->p_pid, uap->addr, uap->len, uap->prot);
312 #endif
313 
314 	addr = (vm_offset_t) uap->addr;
315 	size = (vm_size_t) uap->len;
316 	if ((addr & page_mask) || (size & page_mask))
317 		return(EINVAL);
318 	/*
319 	 * Map protections
320 	 */
321 	prot = VM_PROT_NONE;
322 	if (uap->prot & PROT_READ)
323 		prot |= VM_PROT_READ;
324 	if (uap->prot & PROT_WRITE)
325 		prot |= VM_PROT_WRITE;
326 	if (uap->prot & PROT_EXEC)
327 		prot |= VM_PROT_EXECUTE;
328 
329 	switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,
330 	    FALSE)) {
331 	case KERN_SUCCESS:
332 		return (0);
333 	case KERN_PROTECTION_FAILURE:
334 		return (EACCES);
335 	}
336 	return (EINVAL);
337 }
338 
339 /* ARGSUSED */
340 madvise(p, uap, retval)
341 	struct proc *p;
342 	struct args {
343 		char	*addr;
344 		int	len;
345 		int	behav;
346 	} *uap;
347 	int *retval;
348 {
349 
350 	/* Not yet implemented */
351 	return (EOPNOTSUPP);
352 }
353 
354 /* ARGSUSED */
355 mincore(p, uap, retval)
356 	struct proc *p;
357 	struct args {
358 		char	*addr;
359 		int	len;
360 		char	*vec;
361 	} *uap;
362 	int *retval;
363 {
364 
365 	/* Not yet implemented */
366 	return (EOPNOTSUPP);
367 }
368 
369 /*
370  * Internal version of mmap.
371  * Currently used by mmap, exec, and sys5 shared memory.
372  * Handle is:
373  *	MAP_FILE: a vnode pointer
374  *	MAP_ANON: NULL or a file pointer
375  */
376 vm_mmap(map, addr, size, prot, flags, handle, foff)
377 	register vm_map_t map;
378 	register vm_offset_t *addr;
379 	register vm_size_t size;
380 	vm_prot_t prot;
381 	register int flags;
382 	caddr_t handle;		/* XXX should be vp */
383 	vm_offset_t foff;
384 {
385 	register vm_pager_t pager;
386 	boolean_t fitit;
387 	vm_object_t object;
388 	struct vnode *vp;
389 	int type;
390 	int rv = KERN_SUCCESS;
391 
392 	if (size == 0)
393 		return (0);
394 
395 	if ((flags & MAP_FIXED) == 0) {
396 		fitit = TRUE;
397 		*addr = round_page(*addr);
398 	} else {
399 		fitit = FALSE;
400 		(void) vm_deallocate(map, *addr, size);
401 	}
402 
403 	/*
404 	 * Lookup/allocate pager.  All except an unnamed anonymous lookup
405 	 * gain a reference to ensure continued existance of the object.
406 	 * (XXX the exception is to appease the pageout daemon)
407 	 */
408 	if ((flags & MAP_TYPE) == MAP_ANON)
409 		type = PG_DFLT;
410 	else {
411 		vp = (struct vnode *)handle;
412 		if (vp->v_type == VCHR) {
413 			type = PG_DEVICE;
414 			handle = (caddr_t)vp->v_rdev;
415 		} else
416 			type = PG_VNODE;
417 	}
418 	pager = vm_pager_allocate(type, handle, size, prot);
419 	if (pager == NULL)
420 		return (type == PG_DEVICE ? EINVAL : ENOMEM);
421 	/*
422 	 * Find object and release extra reference gained by lookup
423 	 */
424 	object = vm_object_lookup(pager);
425 	vm_object_deallocate(object);
426 
427 	/*
428 	 * Anonymous memory.
429 	 */
430 	if ((flags & MAP_TYPE) == MAP_ANON) {
431 		rv = vm_allocate_with_pager(map, addr, size, fitit,
432 					    pager, (vm_offset_t)foff, TRUE);
433 		if (rv != KERN_SUCCESS) {
434 			if (handle == NULL)
435 				vm_pager_deallocate(pager);
436 			else
437 				vm_object_deallocate(object);
438 			goto out;
439 		}
440 		/*
441 		 * Don't cache anonymous objects.
442 		 * Loses the reference gained by vm_pager_allocate.
443 		 */
444 		(void) pager_cache(object, FALSE);
445 #ifdef DEBUG
446 		if (mmapdebug & MDB_MAPIT)
447 			printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
448 			       curproc->p_pid, *addr, size, pager);
449 #endif
450 	}
451 	/*
452 	 * Must be type MAP_FILE.
453 	 * Distinguish between character special and regular files.
454 	 */
455 	else if (vp->v_type == VCHR) {
456 		rv = vm_allocate_with_pager(map, addr, size, fitit,
457 					    pager, (vm_offset_t)foff, FALSE);
458 		/*
459 		 * Uncache the object and lose the reference gained
460 		 * by vm_pager_allocate().  If the call to
461 		 * vm_allocate_with_pager() was sucessful, then we
462 		 * gained an additional reference ensuring the object
463 		 * will continue to exist.  If the call failed then
464 		 * the deallocate call below will terminate the
465 		 * object which is fine.
466 		 */
467 		(void) pager_cache(object, FALSE);
468 		if (rv != KERN_SUCCESS)
469 			goto out;
470 	}
471 	/*
472 	 * A regular file
473 	 */
474 	else {
475 #ifdef DEBUG
476 		if (object == NULL)
477 			printf("vm_mmap: no object: vp %x, pager %x\n",
478 			       vp, pager);
479 #endif
480 		/*
481 		 * Map it directly.
482 		 * Allows modifications to go out to the vnode.
483 		 */
484 		if (flags & MAP_SHARED) {
485 			rv = vm_allocate_with_pager(map, addr, size,
486 						    fitit, pager,
487 						    (vm_offset_t)foff, FALSE);
488 			if (rv != KERN_SUCCESS) {
489 				vm_object_deallocate(object);
490 				goto out;
491 			}
492 			/*
493 			 * Don't cache the object.  This is the easiest way
494 			 * of ensuring that data gets back to the filesystem
495 			 * because vnode_pager_deallocate() will fsync the
496 			 * vnode.  pager_cache() will lose the extra ref.
497 			 */
498 			if (prot & VM_PROT_WRITE)
499 				pager_cache(object, FALSE);
500 			else
501 				vm_object_deallocate(object);
502 		}
503 		/*
504 		 * Copy-on-write of file.  Two flavors.
505 		 * MAP_COPY is true COW, you essentially get a snapshot of
506 		 * the region at the time of mapping.  MAP_PRIVATE means only
507 		 * that your changes are not reflected back to the object.
508 		 * Changes made by others will be seen.
509 		 */
510 		else {
511 			vm_map_t tmap;
512 			vm_offset_t off;
513 
514 			/* locate and allocate the target address space */
515 			rv = vm_map_find(map, NULL, (vm_offset_t)0,
516 					 addr, size, fitit);
517 			if (rv != KERN_SUCCESS) {
518 				vm_object_deallocate(object);
519 				goto out;
520 			}
521 			tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
522 					     VM_MIN_ADDRESS+size, TRUE);
523 			off = VM_MIN_ADDRESS;
524 			rv = vm_allocate_with_pager(tmap, &off, size,
525 						    TRUE, pager,
526 						    (vm_offset_t)foff, FALSE);
527 			if (rv != KERN_SUCCESS) {
528 				vm_object_deallocate(object);
529 				vm_map_deallocate(tmap);
530 				goto out;
531 			}
532 			/*
533 			 * (XXX)
534 			 * MAP_PRIVATE implies that we see changes made by
535 			 * others.  To ensure that we need to guarentee that
536 			 * no copy object is created (otherwise original
537 			 * pages would be pushed to the copy object and we
538 			 * would never see changes made by others).  We
539 			 * totally sleeze it right now by marking the object
540 			 * internal temporarily.
541 			 */
542 			if ((flags & MAP_COPY) == 0)
543 				object->internal = TRUE;
544 			rv = vm_map_copy(map, tmap, *addr, size, off,
545 					 FALSE, FALSE);
546 			object->internal = FALSE;
547 			/*
548 			 * (XXX)
549 			 * My oh my, this only gets worse...
550 			 * Force creation of a shadow object so that
551 			 * vm_map_fork will do the right thing.
552 			 */
553 			if ((flags & MAP_COPY) == 0) {
554 				vm_map_t tmap;
555 				vm_map_entry_t tentry;
556 				vm_object_t tobject;
557 				vm_offset_t toffset;
558 				vm_prot_t tprot;
559 				boolean_t twired, tsu;
560 
561 				tmap = map;
562 				vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
563 					      &tentry, &tobject, &toffset,
564 					      &tprot, &twired, &tsu);
565 				vm_map_lookup_done(tmap, tentry);
566 			}
567 			/*
568 			 * (XXX)
569 			 * Map copy code cannot detect sharing unless a
570 			 * sharing map is involved.  So we cheat and write
571 			 * protect everything ourselves.  Note we cannot
572 			 * use vm_object_pmap_copy() because that relies
573 			 * on the page copy_on_write bit which isn't
574 			 * always accurate with shared objects.
575 			 */
576 			vm_object_pmap_force_copy(object, (vm_offset_t)foff,
577 					    (vm_offset_t)foff+size);
578 			vm_object_deallocate(object);
579 			vm_map_deallocate(tmap);
580 			if (rv != KERN_SUCCESS)
581 				goto out;
582 		}
583 #ifdef DEBUG
584 		if (mmapdebug & MDB_MAPIT)
585 			printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
586 			       curproc->p_pid, *addr, size, pager);
587 #endif
588 	}
589 	/*
590 	 * Correct protection (default is VM_PROT_ALL).
591 	 * Note that we set the maximum protection.  This may not be
592 	 * entirely correct.  Maybe the maximum protection should be based
593 	 * on the object permissions where it makes sense (e.g. a vnode).
594 	 *
595 	 * Changed my mind: leave max prot at VM_PROT_ALL.
596 	 */
597 	if (prot != VM_PROT_ALL) {
598 		rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
599 		if (rv != KERN_SUCCESS) {
600 			(void) vm_deallocate(map, *addr, size);
601 			goto out;
602 		}
603 	}
604 	/*
605 	 * Shared memory is also shared with children.
606 	 */
607 	if (flags & MAP_SHARED) {
608 		rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE);
609 		if (rv != KERN_SUCCESS) {
610 			(void) vm_deallocate(map, *addr, size);
611 			goto out;
612 		}
613 	}
614 out:
615 #ifdef DEBUG
616 	if (mmapdebug & MDB_MAPIT)
617 		printf("vm_mmap: rv %d\n", rv);
618 #endif
619 	switch (rv) {
620 	case KERN_SUCCESS:
621 		return (0);
622 	case KERN_INVALID_ADDRESS:
623 	case KERN_NO_SPACE:
624 		return (ENOMEM);
625 	case KERN_PROTECTION_FAILURE:
626 		return (EACCES);
627 	default:
628 		return (EINVAL);
629 	}
630 }
631 
632 /*
633  * Internal bastardized version of MACHs vm_region system call.
634  * Given address and size it returns map attributes as well
635  * as the (locked) object mapped at that location.
636  */
637 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff)
638 	vm_map_t	map;
639 	vm_offset_t	*addr;		/* IN/OUT */
640 	vm_size_t	*size;		/* OUT */
641 	vm_prot_t	*prot;		/* OUT */
642 	vm_prot_t	*max_prot;	/* OUT */
643 	vm_inherit_t	*inheritance;	/* OUT */
644 	boolean_t	*shared;	/* OUT */
645 	vm_object_t	*object;	/* OUT */
646 	vm_offset_t	*objoff;	/* OUT */
647 {
648 	vm_map_entry_t	tmp_entry;
649 	register
650 	vm_map_entry_t	entry;
651 	register
652 	vm_offset_t	tmp_offset;
653 	vm_offset_t	start;
654 
655 	if (map == NULL)
656 		return(KERN_INVALID_ARGUMENT);
657 
658 	start = *addr;
659 
660 	vm_map_lock_read(map);
661 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
662 		if ((entry = tmp_entry->next) == &map->header) {
663 			vm_map_unlock_read(map);
664 		   	return(KERN_NO_SPACE);
665 		}
666 		start = entry->start;
667 		*addr = start;
668 	} else
669 		entry = tmp_entry;
670 
671 	*prot = entry->protection;
672 	*max_prot = entry->max_protection;
673 	*inheritance = entry->inheritance;
674 
675 	tmp_offset = entry->offset + (start - entry->start);
676 	*size = (entry->end - start);
677 
678 	if (entry->is_a_map) {
679 		register vm_map_t share_map;
680 		vm_size_t share_size;
681 
682 		share_map = entry->object.share_map;
683 
684 		vm_map_lock_read(share_map);
685 		(void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry);
686 
687 		if ((share_size = (tmp_entry->end - tmp_offset)) < *size)
688 			*size = share_size;
689 
690 		vm_object_lock(tmp_entry->object);
691 		*object = tmp_entry->object.vm_object;
692 		*objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start);
693 
694 		*shared = (share_map->ref_count != 1);
695 		vm_map_unlock_read(share_map);
696 	} else {
697 		vm_object_lock(entry->object);
698 		*object = entry->object.vm_object;
699 		*objoff = tmp_offset;
700 
701 		*shared = FALSE;
702 	}
703 
704 	vm_map_unlock_read(map);
705 
706 	return(KERN_SUCCESS);
707 }
708 
709 /*
710  * Yet another bastard routine.
711  */
712 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
713 	register vm_map_t	map;
714 	register vm_offset_t	*addr;
715 	register vm_size_t	size;
716 	boolean_t		fitit;
717 	vm_pager_t		pager;
718 	vm_offset_t		poffset;
719 	boolean_t		internal;
720 {
721 	register vm_object_t	object;
722 	register int		result;
723 
724 	if (map == NULL)
725 		return(KERN_INVALID_ARGUMENT);
726 
727 	*addr = trunc_page(*addr);
728 	size = round_page(size);
729 
730 	/*
731 	 *	Lookup the pager/paging-space in the object cache.
732 	 *	If it's not there, then create a new object and cache
733 	 *	it.
734 	 */
735 	object = vm_object_lookup(pager);
736 	vm_stat.lookups++;
737 	if (object == NULL) {
738 		object = vm_object_allocate(size);
739 		vm_object_enter(object, pager);
740 	} else
741 		vm_stat.hits++;
742 	object->internal = internal;
743 
744 	result = vm_map_find(map, object, poffset, addr, size, fitit);
745 	if (result != KERN_SUCCESS)
746 		vm_object_deallocate(object);
747 	else if (pager != NULL)
748 		vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
749 	return(result);
750 }
751 
752 /*
753  * XXX: this routine belongs in vm_map.c.
754  *
755  * Returns TRUE if the range [start - end) is allocated in either
756  * a single entry (single_entry == TRUE) or multiple contiguous
757  * entries (single_entry == FALSE).
758  *
759  * start and end should be page aligned.
760  */
761 boolean_t
762 vm_map_is_allocated(map, start, end, single_entry)
763 	vm_map_t map;
764 	vm_offset_t start, end;
765 	boolean_t single_entry;
766 {
767 	vm_map_entry_t mapent;
768 	register vm_offset_t nend;
769 
770 	vm_map_lock_read(map);
771 
772 	/*
773 	 * Start address not in any entry
774 	 */
775 	if (!vm_map_lookup_entry(map, start, &mapent)) {
776 		vm_map_unlock_read(map);
777 		return (FALSE);
778 	}
779 	/*
780 	 * Find the maximum stretch of contiguously allocated space
781 	 */
782 	nend = mapent->end;
783 	if (!single_entry) {
784 		mapent = mapent->next;
785 		while (mapent != &map->header && mapent->start == nend) {
786 			nend = mapent->end;
787 			mapent = mapent->next;
788 		}
789 	}
790 
791 	vm_map_unlock_read(map);
792 	return (end <= nend);
793 }
794 
795 #include "../vm/vm_page.h"
796 
797 /*
798  * Doesn't trust the COW bit in the page structure.
799  * vm_fault can improperly set it.
800  */
801 vm_object_pmap_force_copy(object, start, end)
802 	register vm_object_t	object;
803 	register vm_offset_t	start;
804 	register vm_offset_t	end;
805 {
806 	register vm_page_t	p;
807 
808 	if (object == NULL)
809 		return;
810 
811 	vm_object_lock(object);
812 	p = (vm_page_t) queue_first(&object->memq);
813 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
814 		if (start <= p->offset && p->offset < end) {
815 			pmap_copy_on_write(VM_PAGE_TO_PHYS(p));
816 			p->copy_on_write = TRUE;
817 		}
818 		p = (vm_page_t) queue_next(&p->listq);
819 	}
820 	vm_object_unlock(object);
821 }
822