xref: /original-bsd/sys/vm/vm_mmap.c (revision dd262573)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_mmap.c 1.3 90/01/21$
13  *
14  *	@(#)vm_mmap.c	7.1 (Berkeley) 12/05/90
15  */
16 
17 /*
18  * Mapped file (mmap) interface to VM
19  */
20 
21 #include "param.h"
22 #include "systm.h"
23 #include "user.h"
24 #include "proc.h"
25 #include "vnode.h"
26 #include "specdev.h"
27 #include "file.h"
28 #include "mman.h"
29 #include "conf.h"
30 
31 #include "../vm/vm_param.h"
32 #include "../vm/vm_map.h"
33 #include "../vm/vm_pager.h"
34 #include "../vm/vm_prot.h"
35 
36 #ifdef DEBUG
37 int mmapdebug = 0;
38 #define MDB_FOLLOW	0x01
39 #define MDB_SYNC	0x02
40 #define MDB_MAPIT	0x04
41 #endif
42 
43 /* ARGSUSED */
44 getpagesize(p, uap, retval)
45 	struct proc *p;
46 	struct args *uap;
47 	int *retval;
48 {
49 
50 	*retval = NBPG * CLSIZE;
51 	return (0);
52 }
53 
54 /* ARGSUSED */
55 sbrk(p, uap, retval)
56 	struct proc *p;
57 	struct args {
58 		int	incr;
59 	} *uap;
60 	int *retval;
61 {
62 
63 	/* Not yet implemented */
64 	return (EOPNOTSUPP);
65 }
66 
67 /* ARGSUSED */
68 sstk(p, uap, retval)
69 	struct proc *p;
70 	struct args {
71 		int	incr;
72 	} *uap;
73 	int *retval;
74 {
75 
76 	/* Not yet implemented */
77 	return (EOPNOTSUPP);
78 }
79 
80 smmap(p, uap, retval)
81 	register struct proc *p;
82 	register struct args {
83 		caddr_t	addr;
84 		int	len;
85 		int	prot;
86 		int	flags;
87 		int	fd;
88 		off_t	pos;
89 	} *uap;
90 	int *retval;
91 {
92 	struct file *fp;
93 	struct vnode *vp;
94 	vm_offset_t addr;
95 	vm_size_t size;
96 	vm_prot_t prot;
97 	caddr_t handle;
98 	int mtype, error;
99 
100 #ifdef DEBUG
101 	if (mmapdebug & MDB_FOLLOW)
102 		printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
103 		       p->p_pid, uap->addr, uap->len, uap->prot,
104 		       uap->flags, uap->fd, uap->pos);
105 #endif
106 	/*
107 	 * Make sure one of the sharing types is specified
108 	 */
109 	mtype = uap->flags & MAP_TYPE;
110 	switch (mtype) {
111 	case MAP_FILE:
112 	case MAP_ANON:
113 		break;
114 	default:
115 		return(EINVAL);
116 	}
117 	/*
118 	 * Address (if FIXED) and size must be page aligned
119 	 */
120 	size = (vm_size_t)uap->len;
121 	addr = (vm_offset_t)uap->addr;
122 	if ((size & page_mask) ||
123 	    (uap->flags & MAP_FIXED) && (addr & page_mask))
124 		return(EINVAL);
125 	/*
126 	 * Mapping file or named anonymous, get fp for validation
127 	 */
128 	if (mtype == MAP_FILE || uap->fd != -1) {
129 		if ((unsigned)uap->fd >= NOFILE ||
130 		    (fp = u.u_ofile[uap->fd]) == NULL)
131 			return(EBADF);
132 	}
133 	/*
134 	 * If we are mapping a file we need to check various
135 	 * file/vnode related things.
136 	 */
137 	if (mtype == MAP_FILE) {
138 		/*
139 		 * Obtain vnode and make sure it is of appropriate type
140 		 */
141 		if (fp->f_type != DTYPE_VNODE)
142 			return(EINVAL);
143 		vp = (struct vnode *)fp->f_data;
144 		if (vp->v_type != VREG && vp->v_type != VCHR)
145 			return(EINVAL);
146 		/*
147 		 * Ensure that file protection and desired protection
148 		 * are compatible.  Note that we only worry about writability
149 		 * if mapping is shared.
150 		 */
151 		if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 ||
152 		    ((uap->flags & MAP_SHARED) &&
153 		     (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0))
154 			return(EACCES);
155 		handle = (caddr_t)vp;
156 	} else if (uap->fd != -1)
157 		handle = (caddr_t)fp;
158 	else
159 		handle = NULL;
160 	/*
161 	 * Map protections to MACH style
162 	 */
163 	prot = VM_PROT_NONE;
164 	if (uap->prot & PROT_READ)
165 		prot |= VM_PROT_READ;
166 	if (uap->prot & PROT_WRITE)
167 		prot |= VM_PROT_WRITE;
168 	if (uap->prot & PROT_EXEC)
169 		prot |= VM_PROT_EXECUTE;
170 
171 	error = vm_mmap(p->p_map, &addr, size, prot,
172 			uap->flags, handle, (vm_offset_t)uap->pos);
173 	if (error == 0)
174 		*retval = (int) addr;
175 	return(error);
176 }
177 
178 msync(p, uap, retval)
179 	struct proc *p;
180 	struct args {
181 		char	*addr;
182 		int	len;
183 	} *uap;
184 	int *retval;
185 {
186 	vm_offset_t addr, objoff, oaddr;
187 	vm_size_t size, osize;
188 	vm_prot_t prot, mprot;
189 	vm_inherit_t inherit;
190 	vm_object_t object;
191 	boolean_t shared;
192 	int rv;
193 
194 #ifdef DEBUG
195 	if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
196 		printf("msync(%d): addr %x len %x\n",
197 		       p->p_pid, uap->addr, uap->len);
198 #endif
199 	if (((int)uap->addr & page_mask) || (uap->len & page_mask))
200 		return(EINVAL);
201 	addr = oaddr = (vm_offset_t)uap->addr;
202 	osize = (vm_size_t)uap->len;
203 	/*
204 	 * Region must be entirely contained in a single entry
205 	 */
206 	if (!vm_map_is_allocated(p->p_map, addr, addr+osize, TRUE))
207 		return(EINVAL);
208 	/*
209 	 * Determine the object associated with that entry
210 	 * (object is returned locked on KERN_SUCCESS)
211 	 */
212 	rv = vm_region(p->p_map, &addr, &size, &prot, &mprot,
213 		       &inherit, &shared, &object, &objoff);
214 	if (rv != KERN_SUCCESS)
215 		return(EINVAL);
216 #ifdef DEBUG
217 	if (mmapdebug & MDB_SYNC)
218 		printf("msync: region: object %x addr %x size %d objoff %d\n",
219 		       object, addr, size, objoff);
220 #endif
221 	/*
222 	 * Do not msync non-vnoded backed objects.
223 	 */
224 	if (object->internal || object->pager == vm_pager_null ||
225 	    object->pager->pg_type != PG_VNODE) {
226 		vm_object_unlock(object);
227 		return(EINVAL);
228 	}
229 	objoff += oaddr - addr;
230 	if (osize == 0)
231 		osize = size;
232 #ifdef DEBUG
233 	if (mmapdebug & MDB_SYNC)
234 		printf("msync: cleaning/flushing object range [%x-%x)\n",
235 		       objoff, objoff+osize);
236 #endif
237 	if (prot & VM_PROT_WRITE)
238 		vm_object_page_clean(object, objoff, objoff+osize);
239 	/*
240 	 * (XXX)
241 	 * Bummer, gotta flush all cached pages to ensure
242 	 * consistency with the file system cache.
243 	 */
244 	vm_object_page_remove(object, objoff, objoff+osize);
245 	vm_object_unlock(object);
246 	return(0);
247 }
248 
249 munmap(p, uap, retval)
250 	register struct proc *p;
251 	register struct args {
252 		caddr_t	addr;
253 		int	len;
254 	} *uap;
255 	int *retval;
256 {
257 	vm_offset_t addr;
258 	vm_size_t size;
259 
260 #ifdef DEBUG
261 	if (mmapdebug & MDB_FOLLOW)
262 		printf("munmap(%d): addr %x len %x\n",
263 		       p->p_pid, uap->addr, uap->len);
264 #endif
265 
266 	addr = (vm_offset_t) uap->addr;
267 	size = (vm_size_t) uap->len;
268 	if ((addr & page_mask) || (size & page_mask))
269 		return(EINVAL);
270 	if (size == 0)
271 		return(0);
272 	if (!vm_map_is_allocated(p->p_map, addr, addr+size, FALSE))
273 		return(EINVAL);
274 	/* returns nothing but KERN_SUCCESS anyway */
275 	(void) vm_map_remove(p->p_map, addr, addr+size);
276 	return(0);
277 }
278 
279 munmapfd(fd)
280 {
281 #ifdef DEBUG
282 	if (mmapdebug & MDB_FOLLOW)
283 		printf("munmapfd(%d): fd %d\n", u.u_procp->p_pid, fd);
284 #endif
285 
286 	/*
287 	 * XXX -- should vm_deallocate any regions mapped to this file
288 	 */
289 	u.u_pofile[fd] &= ~UF_MAPPED;
290 }
291 
292 mprotect(p, uap, retval)
293 	struct proc *p;
294 	struct args {
295 		char	*addr;
296 		int	len;
297 		int	prot;
298 	} *uap;
299 	int *retval;
300 {
301 	vm_offset_t addr;
302 	vm_size_t size;
303 	register vm_prot_t prot;
304 
305 #ifdef DEBUG
306 	if (mmapdebug & MDB_FOLLOW)
307 		printf("mprotect(%d): addr %x len %x prot %d\n",
308 		       p->p_pid, uap->addr, uap->len, uap->prot);
309 #endif
310 
311 	addr = (vm_offset_t) uap->addr;
312 	size = (vm_size_t) uap->len;
313 	if ((addr & page_mask) || (size & page_mask))
314 		return(EINVAL);
315 	/*
316 	 * Map protections
317 	 */
318 	prot = VM_PROT_NONE;
319 	if (uap->prot & PROT_READ)
320 		prot |= VM_PROT_READ;
321 	if (uap->prot & PROT_WRITE)
322 		prot |= VM_PROT_WRITE;
323 	if (uap->prot & PROT_EXEC)
324 		prot |= VM_PROT_EXECUTE;
325 
326 	switch (vm_map_protect(p->p_map, addr, addr+size, prot, FALSE)) {
327 	case KERN_SUCCESS:
328 		return (0);
329 	case KERN_PROTECTION_FAILURE:
330 		return (EACCES);
331 	}
332 	return (EINVAL);
333 }
334 
335 /* ARGSUSED */
336 madvise(p, uap, retval)
337 	struct proc *p;
338 	struct args {
339 		char	*addr;
340 		int	len;
341 		int	behav;
342 	} *uap;
343 	int *retval;
344 {
345 
346 	/* Not yet implemented */
347 	return (EOPNOTSUPP);
348 }
349 
350 /* ARGSUSED */
351 mincore(p, uap, retval)
352 	struct proc *p;
353 	struct args {
354 		char	*addr;
355 		int	len;
356 		char	*vec;
357 	} *uap;
358 	int *retval;
359 {
360 
361 	/* Not yet implemented */
362 	return (EOPNOTSUPP);
363 }
364 
365 /*
366  * Internal version of mmap.
367  * Currently used by mmap, exec, and sys5 shared memory.
368  * Handle is:
369  *	MAP_FILE: a vnode pointer
370  *	MAP_ANON: NULL or a file pointer
371  */
372 vm_mmap(map, addr, size, prot, flags, handle, foff)
373 	register vm_map_t map;
374 	register vm_offset_t *addr;
375 	register vm_size_t size;
376 	vm_prot_t prot;
377 	register int flags;
378 	caddr_t handle;		/* XXX should be vp */
379 	vm_offset_t foff;
380 {
381 	register vm_pager_t pager;
382 	boolean_t fitit;
383 	vm_object_t object;
384 	struct vnode *vp;
385 	int type;
386 	int rv = KERN_SUCCESS;
387 
388 	if (size == 0)
389 		return (0);
390 
391 	if ((flags & MAP_FIXED) == 0) {
392 		fitit = TRUE;
393 		*addr = round_page(*addr);
394 	} else {
395 		fitit = FALSE;
396 		(void) vm_deallocate(map, *addr, size);
397 	}
398 
399 	/*
400 	 * Lookup/allocate pager.  All except an unnamed anonymous lookup
401 	 * gain a reference to ensure continued existance of the object.
402 	 * (XXX the exception is to appease the pageout daemon)
403 	 */
404 	if ((flags & MAP_TYPE) == MAP_ANON)
405 		type = PG_DFLT;
406 	else {
407 		vp = (struct vnode *)handle;
408 		if (vp->v_type == VCHR) {
409 			type = PG_DEVICE;
410 			handle = (caddr_t)vp->v_rdev;
411 		} else
412 			type = PG_VNODE;
413 	}
414 	pager = vm_pager_allocate(type, handle, size, prot);
415 	if (pager == VM_PAGER_NULL)
416 		return (type == PG_DEVICE ? EINVAL : ENOMEM);
417 	/*
418 	 * Find object and release extra reference gained by lookup
419 	 */
420 	object = vm_object_lookup(pager);
421 	vm_object_deallocate(object);
422 
423 	/*
424 	 * Anonymous memory.
425 	 */
426 	if ((flags & MAP_TYPE) == MAP_ANON) {
427 		rv = vm_allocate_with_pager(map, addr, size, fitit,
428 					    pager, (vm_offset_t)foff, TRUE);
429 		if (rv != KERN_SUCCESS) {
430 			if (handle == NULL)
431 				vm_pager_deallocate(pager);
432 			else
433 				vm_object_deallocate(object);
434 			goto out;
435 		}
436 		/*
437 		 * Don't cache anonymous objects.
438 		 * Loses the reference gained by vm_pager_allocate.
439 		 */
440 		(void) pager_cache(object, FALSE);
441 #ifdef DEBUG
442 		if (mmapdebug & MDB_MAPIT)
443 			printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
444 			       u.u_procp->p_pid, *addr, size, pager);
445 #endif
446 	}
447 	/*
448 	 * Must be type MAP_FILE.
449 	 * Distinguish between character special and regular files.
450 	 */
451 	else if (vp->v_type == VCHR) {
452 		rv = vm_allocate_with_pager(map, addr, size, fitit,
453 					    pager, (vm_offset_t)foff, FALSE);
454 		/*
455 		 * Uncache the object and lose the reference gained
456 		 * by vm_pager_allocate().  If the call to
457 		 * vm_allocate_with_pager() was sucessful, then we
458 		 * gained an additional reference ensuring the object
459 		 * will continue to exist.  If the call failed then
460 		 * the deallocate call below will terminate the
461 		 * object which is fine.
462 		 */
463 		(void) pager_cache(object, FALSE);
464 		if (rv != KERN_SUCCESS)
465 			goto out;
466 	}
467 	/*
468 	 * A regular file
469 	 */
470 	else {
471 #ifdef DEBUG
472 		if (object == VM_OBJECT_NULL)
473 			printf("vm_mmap: no object: vp %x, pager %x\n",
474 			       vp, pager);
475 #endif
476 		/*
477 		 * Map it directly.
478 		 * Allows modifications to go out to the vnode.
479 		 */
480 		if (flags & MAP_SHARED) {
481 			rv = vm_allocate_with_pager(map, addr, size,
482 						    fitit, pager,
483 						    (vm_offset_t)foff, FALSE);
484 			if (rv != KERN_SUCCESS) {
485 				vm_object_deallocate(object);
486 				goto out;
487 			}
488 			/*
489 			 * Don't cache the object.  This is the easiest way
490 			 * of ensuring that data gets back to the filesystem
491 			 * because vnode_pager_deallocate() will fsync the
492 			 * vnode.  pager_cache() will lose the extra ref.
493 			 */
494 			if (prot & VM_PROT_WRITE)
495 				pager_cache(object, FALSE);
496 			else
497 				vm_object_deallocate(object);
498 		}
499 		/*
500 		 * Copy-on-write of file.  Two flavors.
501 		 * MAP_COPY is true COW, you essentially get a snapshot of
502 		 * the region at the time of mapping.  MAP_PRIVATE means only
503 		 * that your changes are not reflected back to the object.
504 		 * Changes made by others will be seen.
505 		 */
506 		else {
507 			vm_map_t tmap;
508 			vm_offset_t off;
509 
510 			/* locate and allocate the target address space */
511 			rv = vm_map_find(map, VM_OBJECT_NULL, (vm_offset_t)0,
512 					 addr, size, fitit);
513 			if (rv != KERN_SUCCESS) {
514 				vm_object_deallocate(object);
515 				goto out;
516 			}
517 			tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
518 					     VM_MIN_ADDRESS+size, TRUE);
519 			off = VM_MIN_ADDRESS;
520 			rv = vm_allocate_with_pager(tmap, &off, size,
521 						    TRUE, pager,
522 						    (vm_offset_t)foff, FALSE);
523 			if (rv != KERN_SUCCESS) {
524 				vm_object_deallocate(object);
525 				vm_map_deallocate(tmap);
526 				goto out;
527 			}
528 			/*
529 			 * (XXX)
530 			 * MAP_PRIVATE implies that we see changes made by
531 			 * others.  To ensure that we need to guarentee that
532 			 * no copy object is created (otherwise original
533 			 * pages would be pushed to the copy object and we
534 			 * would never see changes made by others).  We
535 			 * totally sleeze it right now by marking the object
536 			 * internal temporarily.
537 			 */
538 			if ((flags & MAP_COPY) == 0)
539 				object->internal = TRUE;
540 			rv = vm_map_copy(map, tmap, *addr, size, off,
541 					 FALSE, FALSE);
542 			object->internal = FALSE;
543 			/*
544 			 * (XXX)
545 			 * My oh my, this only gets worse...
546 			 * Force creation of a shadow object so that
547 			 * vm_map_fork will do the right thing.
548 			 */
549 			if ((flags & MAP_COPY) == 0) {
550 				vm_map_t tmap;
551 				vm_map_entry_t tentry;
552 				vm_object_t tobject;
553 				vm_offset_t toffset;
554 				vm_prot_t tprot;
555 				boolean_t twired, tsu;
556 
557 				tmap = map;
558 				vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
559 					      &tentry, &tobject, &toffset,
560 					      &tprot, &twired, &tsu);
561 				vm_map_lookup_done(tmap, tentry);
562 			}
563 			/*
564 			 * (XXX)
565 			 * Map copy code cannot detect sharing unless a
566 			 * sharing map is involved.  So we cheat and write
567 			 * protect everything ourselves.  Note we cannot
568 			 * use vm_object_pmap_copy() because that relies
569 			 * on the page copy_on_write bit which isn't
570 			 * always accurate with shared objects.
571 			 */
572 			vm_object_pmap_force_copy(object, (vm_offset_t)foff,
573 					    (vm_offset_t)foff+size);
574 			vm_object_deallocate(object);
575 			vm_map_deallocate(tmap);
576 			if (rv != KERN_SUCCESS)
577 				goto out;
578 		}
579 #ifdef DEBUG
580 		if (mmapdebug & MDB_MAPIT)
581 			printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
582 			       u.u_procp->p_pid, *addr, size, pager);
583 #endif
584 	}
585 	/*
586 	 * Correct protection (default is VM_PROT_ALL).
587 	 * Note that we set the maximum protection.  This may not be
588 	 * entirely correct.  Maybe the maximum protection should be based
589 	 * on the object permissions where it makes sense (e.g. a vnode).
590 	 *
591 	 * Changed my mind: leave max prot at VM_PROT_ALL.
592 	 */
593 	if (prot != VM_PROT_ALL) {
594 		rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
595 		if (rv != KERN_SUCCESS) {
596 			(void) vm_deallocate(map, *addr, size);
597 			goto out;
598 		}
599 	}
600 	/*
601 	 * Shared memory is also shared with children.
602 	 */
603 	if (flags & MAP_SHARED) {
604 		rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE);
605 		if (rv != KERN_SUCCESS) {
606 			(void) vm_deallocate(map, *addr, size);
607 			goto out;
608 		}
609 	}
610 out:
611 #ifdef DEBUG
612 	if (mmapdebug & MDB_MAPIT)
613 		printf("vm_mmap: rv %d\n", rv);
614 #endif
615 	switch (rv) {
616 	case KERN_SUCCESS:
617 		return (0);
618 	case KERN_INVALID_ADDRESS:
619 	case KERN_NO_SPACE:
620 		return (ENOMEM);
621 	case KERN_PROTECTION_FAILURE:
622 		return (EACCES);
623 	default:
624 		return (EINVAL);
625 	}
626 }
627 
628 /*
629  * Internal bastardized version of MACHs vm_region system call.
630  * Given address and size it returns map attributes as well
631  * as the (locked) object mapped at that location.
632  */
633 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff)
634 	vm_map_t	map;
635 	vm_offset_t	*addr;		/* IN/OUT */
636 	vm_size_t	*size;		/* OUT */
637 	vm_prot_t	*prot;		/* OUT */
638 	vm_prot_t	*max_prot;	/* OUT */
639 	vm_inherit_t	*inheritance;	/* OUT */
640 	boolean_t	*shared;	/* OUT */
641 	vm_object_t	*object;	/* OUT */
642 	vm_offset_t	*objoff;	/* OUT */
643 {
644 	vm_map_entry_t	tmp_entry;
645 	register
646 	vm_map_entry_t	entry;
647 	register
648 	vm_offset_t	tmp_offset;
649 	vm_offset_t	start;
650 
651 	if (map == VM_MAP_NULL)
652 		return(KERN_INVALID_ARGUMENT);
653 
654 	start = *addr;
655 
656 	vm_map_lock_read(map);
657 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
658 		if ((entry = tmp_entry->next) == &map->header) {
659 			vm_map_unlock_read(map);
660 		   	return(KERN_NO_SPACE);
661 		}
662 		start = entry->start;
663 		*addr = start;
664 	} else
665 		entry = tmp_entry;
666 
667 	*prot = entry->protection;
668 	*max_prot = entry->max_protection;
669 	*inheritance = entry->inheritance;
670 
671 	tmp_offset = entry->offset + (start - entry->start);
672 	*size = (entry->end - start);
673 
674 	if (entry->is_a_map) {
675 		register vm_map_t share_map;
676 		vm_size_t share_size;
677 
678 		share_map = entry->object.share_map;
679 
680 		vm_map_lock_read(share_map);
681 		(void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry);
682 
683 		if ((share_size = (tmp_entry->end - tmp_offset)) < *size)
684 			*size = share_size;
685 
686 		vm_object_lock(tmp_entry->object);
687 		*object = tmp_entry->object.vm_object;
688 		*objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start);
689 
690 		*shared = (share_map->ref_count != 1);
691 		vm_map_unlock_read(share_map);
692 	} else {
693 		vm_object_lock(entry->object);
694 		*object = entry->object.vm_object;
695 		*objoff = tmp_offset;
696 
697 		*shared = FALSE;
698 	}
699 
700 	vm_map_unlock_read(map);
701 
702 	return(KERN_SUCCESS);
703 }
704 
705 /*
706  * Yet another bastard routine.
707  */
708 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
709 	register vm_map_t	map;
710 	register vm_offset_t	*addr;
711 	register vm_size_t	size;
712 	boolean_t		fitit;
713 	vm_pager_t		pager;
714 	vm_offset_t		poffset;
715 	boolean_t		internal;
716 {
717 	register vm_object_t	object;
718 	register int		result;
719 
720 	if (map == VM_MAP_NULL)
721 		return(KERN_INVALID_ARGUMENT);
722 
723 	*addr = trunc_page(*addr);
724 	size = round_page(size);
725 
726 	/*
727 	 *	Lookup the pager/paging-space in the object cache.
728 	 *	If it's not there, then create a new object and cache
729 	 *	it.
730 	 */
731 	object = vm_object_lookup(pager);
732 	vm_stat.lookups++;
733 	if (object == VM_OBJECT_NULL) {
734 		object = vm_object_allocate(size);
735 		vm_object_enter(object, pager);
736 	} else
737 		vm_stat.hits++;
738 	object->internal = internal;
739 
740 	result = vm_map_find(map, object, poffset, addr, size, fitit);
741 	if (result != KERN_SUCCESS)
742 		vm_object_deallocate(object);
743 	else if (pager != vm_pager_null)
744 		vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
745 	return(result);
746 }
747 
748 /*
749  * XXX: this routine belongs in vm_map.c.
750  *
751  * Returns TRUE if the range [start - end) is allocated in either
752  * a single entry (single_entry == TRUE) or multiple contiguous
753  * entries (single_entry == FALSE).
754  *
755  * start and end should be page aligned.
756  */
757 boolean_t
758 vm_map_is_allocated(map, start, end, single_entry)
759 	vm_map_t map;
760 	vm_offset_t start, end;
761 	boolean_t single_entry;
762 {
763 	vm_map_entry_t mapent;
764 	register vm_offset_t nend;
765 
766 	vm_map_lock_read(map);
767 
768 	/*
769 	 * Start address not in any entry
770 	 */
771 	if (!vm_map_lookup_entry(map, start, &mapent)) {
772 		vm_map_unlock_read(map);
773 		return (FALSE);
774 	}
775 	/*
776 	 * Find the maximum stretch of contiguously allocated space
777 	 */
778 	nend = mapent->end;
779 	if (!single_entry) {
780 		mapent = mapent->next;
781 		while (mapent != &map->header && mapent->start == nend) {
782 			nend = mapent->end;
783 			mapent = mapent->next;
784 		}
785 	}
786 
787 	vm_map_unlock_read(map);
788 	return (end <= nend);
789 }
790 
791 #include "../vm/vm_page.h"
792 
793 /*
794  * Doesn't trust the COW bit in the page structure.
795  * vm_fault can improperly set it.
796  */
797 void
798 vm_object_pmap_force_copy(object, start, end)
799 	register vm_object_t	object;
800 	register vm_offset_t	start;
801 	register vm_offset_t	end;
802 {
803 	register vm_page_t	p;
804 
805 	if (object == VM_OBJECT_NULL)
806 		return;
807 
808 	vm_object_lock(object);
809 	p = (vm_page_t) queue_first(&object->memq);
810 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
811 		if (start <= p->offset && p->offset < end) {
812 			pmap_copy_on_write(VM_PAGE_TO_PHYS(p));
813 			p->copy_on_write = TRUE;
814 		}
815 		p = (vm_page_t) queue_next(&p->listq);
816 	}
817 	vm_object_unlock(object);
818 }
819