xref: /openbsd/sys/uvm/uvm_mmap.c (revision 09467b48)
1 /*	$OpenBSD: uvm_mmap.c,v 1.161 2020/03/04 21:15:39 kettenis Exp $	*/
2 /*	$NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993 The Regents of the University of California.
7  * Copyright (c) 1988 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by the Charles D. Cranor,
26  *	Washington University, University of California, Berkeley and
27  *	its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
45  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
46  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
47  */
48 
49 /*
50  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
51  * function.
52  */
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/fcntl.h>
56 #include <sys/file.h>
57 #include <sys/filedesc.h>
58 #include <sys/resourcevar.h>
59 #include <sys/mman.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/malloc.h>
63 #include <sys/vnode.h>
64 #include <sys/conf.h>
65 #include <sys/signalvar.h>
66 #include <sys/syslog.h>
67 #include <sys/stat.h>
68 #include <sys/specdev.h>
69 #include <sys/stdint.h>
70 #include <sys/pledge.h>
71 #include <sys/unistd.h>		/* for KBIND* */
72 #include <sys/user.h>
73 
74 #include <machine/exec.h>	/* for __LDPGSZ */
75 
76 #include <sys/syscallargs.h>
77 
78 #include <uvm/uvm.h>
79 #include <uvm/uvm_device.h>
80 #include <uvm/uvm_vnode.h>
81 
82 int uvm_mmapanon(vm_map_t, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int,
83     vsize_t, struct proc *);
84 int uvm_mmapfile(vm_map_t, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int,
85     struct vnode *, voff_t, vsize_t, struct proc *);
86 
87 
88 /*
89  * Page align addr and size, returning EINVAL on wraparound.
90  */
91 #define ALIGN_ADDR(addr, size, pageoff)	do {				\
92 	pageoff = (addr & PAGE_MASK);					\
93 	if (pageoff != 0) {						\
94 		if (size > SIZE_MAX - pageoff)				\
95 			return (EINVAL);	/* wraparound */	\
96 		addr -= pageoff;					\
97 		size += pageoff;					\
98 	}								\
99 	if (size != 0) {						\
100 		size = (vsize_t)round_page(size);			\
101 		if (size == 0)						\
102 			return (EINVAL);	/* wraparound */	\
103 	}								\
104 } while (0)
105 
106 /*
107  * sys_mquery: provide mapping hints to applications that do fixed mappings
108  *
109  * flags: 0 or MAP_FIXED (MAP_FIXED - means that we insist on this addr and
110  *	don't care about PMAP_PREFER or such)
111  * addr: hint where we'd like to place the mapping.
112  * size: size of the mapping
113  * fd: fd of the file we want to map
114  * off: offset within the file
115  */
116 int
117 sys_mquery(struct proc *p, void *v, register_t *retval)
118 {
119 	struct sys_mquery_args /* {
120 		syscallarg(void *) addr;
121 		syscallarg(size_t) len;
122 		syscallarg(int) prot;
123 		syscallarg(int) flags;
124 		syscallarg(int) fd;
125 		syscallarg(long) pad;
126 		syscallarg(off_t) pos;
127 	} */ *uap = v;
128 	struct file *fp;
129 	voff_t uoff;
130 	int error;
131 	vaddr_t vaddr;
132 	int flags = 0;
133 	vsize_t size;
134 	vm_prot_t prot;
135 	int fd;
136 
137 	vaddr = (vaddr_t) SCARG(uap, addr);
138 	prot = SCARG(uap, prot);
139 	size = (vsize_t) SCARG(uap, len);
140 	fd = SCARG(uap, fd);
141 
142 	if ((prot & PROT_MASK) != prot)
143 		return (EINVAL);
144 
145 	if (SCARG(uap, flags) & MAP_FIXED)
146 		flags |= UVM_FLAG_FIXED;
147 
148 	if (fd >= 0) {
149 		if ((error = getvnode(p, fd, &fp)) != 0)
150 			return (error);
151 		uoff = SCARG(uap, pos);
152 	} else {
153 		fp = NULL;
154 		uoff = UVM_UNKNOWN_OFFSET;
155 	}
156 
157 	if (vaddr == 0)
158 		vaddr = uvm_map_hint(p->p_vmspace, prot, VM_MIN_ADDRESS,
159 		    VM_MAXUSER_ADDRESS);
160 
161 	error = uvm_map_mquery(&p->p_vmspace->vm_map, &vaddr, size, uoff,
162 	    flags);
163 	if (error == 0)
164 		*retval = (register_t)(vaddr);
165 
166 	if (fp != NULL)
167 		FRELE(fp, p);
168 	return (error);
169 }
170 
171 int	uvm_wxabort;
172 
173 /*
174  * W^X violations are only allowed on permitted filesystems.
175  */
176 static inline int
177 uvm_wxcheck(struct proc *p, char *call)
178 {
179 	struct process *pr = p->p_p;
180 	int wxallowed = (pr->ps_textvp->v_mount &&
181 	    (pr->ps_textvp->v_mount->mnt_flag & MNT_WXALLOWED));
182 
183 	if (wxallowed && (pr->ps_flags & PS_WXNEEDED))
184 		return (0);
185 
186 	if (uvm_wxabort) {
187 		/* Report W^X failures */
188 		if (pr->ps_wxcounter++ == 0)
189 			log(LOG_NOTICE, "%s(%d): %s W^X violation\n",
190 			    pr->ps_comm, pr->ps_pid, call);
191 		/* Send uncatchable SIGABRT for coredump */
192 		sigexit(p, SIGABRT);
193 	}
194 
195 	return (ENOTSUP);
196 }
197 
198 /*
199  * sys_mmap: mmap system call.
200  *
201  * => file offset and address may not be page aligned
202  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
203  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
204  *      and the return value is adjusted up by the page offset.
205  */
206 int
207 sys_mmap(struct proc *p, void *v, register_t *retval)
208 {
209 	struct sys_mmap_args /* {
210 		syscallarg(void *) addr;
211 		syscallarg(size_t) len;
212 		syscallarg(int) prot;
213 		syscallarg(int) flags;
214 		syscallarg(int) fd;
215 		syscallarg(long) pad;
216 		syscallarg(off_t) pos;
217 	} */ *uap = v;
218 	vaddr_t addr;
219 	struct vattr va;
220 	off_t pos;
221 	vsize_t limit, pageoff, size;
222 	vm_prot_t prot, maxprot;
223 	int flags, fd;
224 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
225 	struct filedesc *fdp = p->p_fd;
226 	struct file *fp = NULL;
227 	struct vnode *vp;
228 	int error;
229 
230 	/* first, extract syscall args from the uap. */
231 	addr = (vaddr_t) SCARG(uap, addr);
232 	size = (vsize_t) SCARG(uap, len);
233 	prot = SCARG(uap, prot);
234 	flags = SCARG(uap, flags);
235 	fd = SCARG(uap, fd);
236 	pos = SCARG(uap, pos);
237 
238 	/*
239 	 * Validate the flags.
240 	 */
241 	if ((prot & PROT_MASK) != prot)
242 		return (EINVAL);
243 	if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC) &&
244 	    (error = uvm_wxcheck(p, "mmap")))
245 		return (error);
246 
247 	if ((flags & MAP_FLAGMASK) != flags)
248 		return (EINVAL);
249 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
250 		return (EINVAL);
251 	if ((flags & (MAP_FIXED|__MAP_NOREPLACE)) == __MAP_NOREPLACE)
252 		return (EINVAL);
253 	if (flags & MAP_STACK) {
254 		if ((flags & (MAP_ANON|MAP_PRIVATE)) != (MAP_ANON|MAP_PRIVATE))
255 			return (EINVAL);
256 		if (flags & ~(MAP_STACK|MAP_FIXED|MAP_ANON|MAP_PRIVATE))
257 			return (EINVAL);
258 		if (pos != 0)
259 			return (EINVAL);
260 		if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE))
261 			return (EINVAL);
262 	}
263 	if (size == 0)
264 		return (EINVAL);
265 
266 	error = pledge_protexec(p, prot);
267 	if (error)
268 		return (error);
269 
270 	/* align file position and save offset.  adjust size. */
271 	ALIGN_ADDR(pos, size, pageoff);
272 
273 	/* now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" */
274 	if (flags & MAP_FIXED) {
275 		/* adjust address by the same amount as we did the offset */
276 		addr -= pageoff;
277 		if (addr & PAGE_MASK)
278 			return (EINVAL);		/* not page aligned */
279 
280 		if (addr > SIZE_MAX - size)
281 			return (EINVAL);		/* no wrapping! */
282 		if (VM_MAXUSER_ADDRESS > 0 &&
283 		    (addr + size) > VM_MAXUSER_ADDRESS)
284 			return (EINVAL);
285 		if (vm_min_address > 0 && addr < vm_min_address)
286 			return (EINVAL);
287 	}
288 
289 	/* check for file mappings (i.e. not anonymous) and verify file. */
290 	if ((flags & MAP_ANON) == 0) {
291 		if ((fp = fd_getfile(fdp, fd)) == NULL)
292 			return (EBADF);
293 
294 		if (fp->f_type != DTYPE_VNODE) {
295 			error = ENODEV;		/* only mmap vnodes! */
296 			goto out;
297 		}
298 		vp = (struct vnode *)fp->f_data;	/* convert to vnode */
299 
300 		if (vp->v_type != VREG && vp->v_type != VCHR &&
301 		    vp->v_type != VBLK) {
302 			error = ENODEV; /* only REG/CHR/BLK support mmap */
303 			goto out;
304 		}
305 
306 		if (vp->v_type == VREG && (pos + size) < pos) {
307 			error = EINVAL;		/* no offset wrapping */
308 			goto out;
309 		}
310 
311 		/* special case: catch SunOS style /dev/zero */
312 		if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
313 			flags |= MAP_ANON;
314 			FRELE(fp, p);
315 			fp = NULL;
316 			goto is_anon;
317 		}
318 
319 		/*
320 		 * Old programs may not select a specific sharing type, so
321 		 * default to an appropriate one.
322 		 */
323 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
324 #if defined(DEBUG)
325 			printf("WARNING: defaulted mmap() share type to"
326 			    " %s (pid %d comm %s)\n",
327 			    vp->v_type == VCHR ? "MAP_SHARED" : "MAP_PRIVATE",
328 			    p->p_p->ps_pid, p->p_p->ps_comm);
329 #endif
330 			if (vp->v_type == VCHR)
331 				flags |= MAP_SHARED;	/* for a device */
332 			else
333 				flags |= MAP_PRIVATE;	/* for a file */
334 		}
335 
336 		/*
337 		 * MAP_PRIVATE device mappings don't make sense (and aren't
338 		 * supported anyway).  However, some programs rely on this,
339 		 * so just change it to MAP_SHARED.
340 		 */
341 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
342 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
343 		}
344 
345 		/* now check protection */
346 		maxprot = PROT_EXEC;
347 
348 		/* check read access */
349 		if (fp->f_flag & FREAD)
350 			maxprot |= PROT_READ;
351 		else if (prot & PROT_READ) {
352 			error = EACCES;
353 			goto out;
354 		}
355 
356 		/* check write access, shared case first */
357 		if (flags & MAP_SHARED) {
358 			/*
359 			 * if the file is writable, only add PROT_WRITE to
360 			 * maxprot if the file is not immutable, append-only.
361 			 * otherwise, if we have asked for PROT_WRITE, return
362 			 * EPERM.
363 			 */
364 			if (fp->f_flag & FWRITE) {
365 				KERNEL_LOCK();
366 				error = VOP_GETATTR(vp, &va, p->p_ucred, p);
367 				KERNEL_UNLOCK();
368 				if (error)
369 					goto out;
370 				if ((va.va_flags & (IMMUTABLE|APPEND)) == 0)
371 					maxprot |= PROT_WRITE;
372 				else if (prot & PROT_WRITE) {
373 					error = EPERM;
374 					goto out;
375 				}
376 			} else if (prot & PROT_WRITE) {
377 				error = EACCES;
378 				goto out;
379 			}
380 		} else {
381 			/* MAP_PRIVATE mappings can always write to */
382 			maxprot |= PROT_WRITE;
383 		}
384 		if ((flags & __MAP_NOFAULT) != 0 ||
385 		    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
386 			limit = lim_cur(RLIMIT_DATA);
387 			if (limit < size ||
388 			    limit - size < ptoa(p->p_vmspace->vm_dused)) {
389 				error = ENOMEM;
390 				goto out;
391 			}
392 		}
393 		KERNEL_LOCK();
394 		error = uvm_mmapfile(&p->p_vmspace->vm_map, &addr, size, prot,
395 		    maxprot, flags, vp, pos, lim_cur(RLIMIT_MEMLOCK), p);
396 		KERNEL_UNLOCK();
397 	} else {		/* MAP_ANON case */
398 		if (fd != -1)
399 			return EINVAL;
400 
401 is_anon:	/* label for SunOS style /dev/zero */
402 
403 		/* __MAP_NOFAULT only makes sense with a backing object */
404 		if ((flags & __MAP_NOFAULT) != 0)
405 			return EINVAL;
406 
407 		if (prot != PROT_NONE) {
408 			limit = lim_cur(RLIMIT_DATA);
409 			if (limit < size ||
410 			    limit - size < ptoa(p->p_vmspace->vm_dused)) {
411 				return ENOMEM;
412 			}
413 		}
414 
415 		/*
416 		 * We've been treating (MAP_SHARED|MAP_PRIVATE) == 0 as
417 		 * MAP_PRIVATE, so make that clear.
418 		 */
419 		if ((flags & MAP_SHARED) == 0)
420 			flags |= MAP_PRIVATE;
421 
422 		maxprot = PROT_MASK;
423 		error = uvm_mmapanon(&p->p_vmspace->vm_map, &addr, size, prot,
424 		    maxprot, flags, lim_cur(RLIMIT_MEMLOCK), p);
425 	}
426 
427 	if (error == 0)
428 		/* remember to add offset */
429 		*retval = (register_t)(addr + pageoff);
430 
431 out:
432 	if (fp)
433 		FRELE(fp, p);
434 	return (error);
435 }
436 
437 /*
438  * sys_msync: the msync system call (a front-end for flush)
439  */
440 
441 int
442 sys_msync(struct proc *p, void *v, register_t *retval)
443 {
444 	struct sys_msync_args /* {
445 		syscallarg(void *) addr;
446 		syscallarg(size_t) len;
447 		syscallarg(int) flags;
448 	} */ *uap = v;
449 	vaddr_t addr;
450 	vsize_t size, pageoff;
451 	vm_map_t map;
452 	int flags, uvmflags;
453 
454 	/* extract syscall args from the uap */
455 	addr = (vaddr_t)SCARG(uap, addr);
456 	size = (vsize_t)SCARG(uap, len);
457 	flags = SCARG(uap, flags);
458 
459 	/* sanity check flags */
460 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
461 			(flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
462 			(flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
463 		return (EINVAL);
464 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
465 		flags |= MS_SYNC;
466 
467 	/* align the address to a page boundary, and adjust the size accordingly */
468 	ALIGN_ADDR(addr, size, pageoff);
469 	if (addr > SIZE_MAX - size)
470 		return (EINVAL);		/* disallow wrap-around. */
471 
472 	/* get map */
473 	map = &p->p_vmspace->vm_map;
474 
475 	/* translate MS_ flags into PGO_ flags */
476 	uvmflags = PGO_CLEANIT;
477 	if (flags & MS_INVALIDATE)
478 		uvmflags |= PGO_FREE;
479 	if (flags & MS_SYNC)
480 		uvmflags |= PGO_SYNCIO;
481 	else
482 		uvmflags |= PGO_SYNCIO;	 /* XXXCDC: force sync for now! */
483 
484 	return (uvm_map_clean(map, addr, addr+size, uvmflags));
485 }
486 
487 /*
488  * sys_munmap: unmap a users memory
489  */
490 int
491 sys_munmap(struct proc *p, void *v, register_t *retval)
492 {
493 	struct sys_munmap_args /* {
494 		syscallarg(void *) addr;
495 		syscallarg(size_t) len;
496 	} */ *uap = v;
497 	vaddr_t addr;
498 	vsize_t size, pageoff;
499 	vm_map_t map;
500 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
501 	struct uvm_map_deadq dead_entries;
502 
503 	/* get syscall args... */
504 	addr = (vaddr_t) SCARG(uap, addr);
505 	size = (vsize_t) SCARG(uap, len);
506 
507 	/* align address to a page boundary, and adjust size accordingly */
508 	ALIGN_ADDR(addr, size, pageoff);
509 
510 	/*
511 	 * Check for illegal addresses.  Watch out for address wrap...
512 	 * Note that VM_*_ADDRESS are not constants due to casts (argh).
513 	 */
514 	if (addr > SIZE_MAX - size)
515 		return (EINVAL);
516 	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
517 		return (EINVAL);
518 	if (vm_min_address > 0 && addr < vm_min_address)
519 		return (EINVAL);
520 	map = &p->p_vmspace->vm_map;
521 
522 
523 	vm_map_lock(map);	/* lock map so we can checkprot */
524 
525 	/*
526 	 * interesting system call semantic: make sure entire range is
527 	 * allocated before allowing an unmap.
528 	 */
529 	if (!uvm_map_checkprot(map, addr, addr + size, PROT_NONE)) {
530 		vm_map_unlock(map);
531 		return (EINVAL);
532 	}
533 
534 	TAILQ_INIT(&dead_entries);
535 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, FALSE, TRUE);
536 	vm_map_unlock(map);	/* and unlock */
537 
538 	uvm_unmap_detach(&dead_entries, 0);
539 
540 	return (0);
541 }
542 
543 /*
544  * sys_mprotect: the mprotect system call
545  */
546 int
547 sys_mprotect(struct proc *p, void *v, register_t *retval)
548 {
549 	struct sys_mprotect_args /* {
550 		syscallarg(void *) addr;
551 		syscallarg(size_t) len;
552 		syscallarg(int) prot;
553 	} */ *uap = v;
554 	vaddr_t addr;
555 	vsize_t size, pageoff;
556 	vm_prot_t prot;
557 	int error;
558 
559 	/*
560 	 * extract syscall args from uap
561 	 */
562 
563 	addr = (vaddr_t)SCARG(uap, addr);
564 	size = (vsize_t)SCARG(uap, len);
565 	prot = SCARG(uap, prot);
566 
567 	if ((prot & PROT_MASK) != prot)
568 		return (EINVAL);
569 	if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC) &&
570 	    (error = uvm_wxcheck(p, "mprotect")))
571 		return (error);
572 
573 	error = pledge_protexec(p, prot);
574 	if (error)
575 		return (error);
576 
577 	/*
578 	 * align the address to a page boundary, and adjust the size accordingly
579 	 */
580 	ALIGN_ADDR(addr, size, pageoff);
581 	if (addr > SIZE_MAX - size)
582 		return (EINVAL);		/* disallow wrap-around. */
583 
584 	return (uvm_map_protect(&p->p_vmspace->vm_map, addr, addr+size,
585 	    prot, FALSE));
586 }
587 
588 /*
589  * sys_msyscall: the msyscall system call
590  */
591 int
592 sys_msyscall(struct proc *p, void *v, register_t *retval)
593 {
594 	struct sys_msyscall_args /* {
595 		syscallarg(void *) addr;
596 		syscallarg(size_t) len;
597 	} */ *uap = v;
598 	vaddr_t addr;
599 	vsize_t size, pageoff;
600 
601 	addr = (vaddr_t)SCARG(uap, addr);
602 	size = (vsize_t)SCARG(uap, len);
603 
604 	/*
605 	 * align the address to a page boundary, and adjust the size accordingly
606 	 */
607 	ALIGN_ADDR(addr, size, pageoff);
608 	if (addr > SIZE_MAX - size)
609 		return (EINVAL);		/* disallow wrap-around. */
610 
611 	return (uvm_map_syscall(&p->p_vmspace->vm_map, addr, addr+size));
612 }
613 
614 /*
615  * sys_minherit: the minherit system call
616  */
617 int
618 sys_minherit(struct proc *p, void *v, register_t *retval)
619 {
620 	struct sys_minherit_args /* {
621 		syscallarg(void *) addr;
622 		syscallarg(size_t) len;
623 		syscallarg(int) inherit;
624 	} */ *uap = v;
625 	vaddr_t addr;
626 	vsize_t size, pageoff;
627 	vm_inherit_t inherit;
628 
629 	addr = (vaddr_t)SCARG(uap, addr);
630 	size = (vsize_t)SCARG(uap, len);
631 	inherit = SCARG(uap, inherit);
632 
633 	/*
634 	 * align the address to a page boundary, and adjust the size accordingly
635 	 */
636 	ALIGN_ADDR(addr, size, pageoff);
637 	if (addr > SIZE_MAX - size)
638 		return (EINVAL);		/* disallow wrap-around. */
639 
640 	return (uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
641 	    inherit));
642 }
643 
644 /*
645  * sys_madvise: give advice about memory usage.
646  */
647 /* ARGSUSED */
648 int
649 sys_madvise(struct proc *p, void *v, register_t *retval)
650 {
651 	struct sys_madvise_args /* {
652 		syscallarg(void *) addr;
653 		syscallarg(size_t) len;
654 		syscallarg(int) behav;
655 	} */ *uap = v;
656 	vaddr_t addr;
657 	vsize_t size, pageoff;
658 	int advice, error;
659 
660 	addr = (vaddr_t)SCARG(uap, addr);
661 	size = (vsize_t)SCARG(uap, len);
662 	advice = SCARG(uap, behav);
663 
664 	/*
665 	 * align the address to a page boundary, and adjust the size accordingly
666 	 */
667 	ALIGN_ADDR(addr, size, pageoff);
668 	if (addr > SIZE_MAX - size)
669 		return (EINVAL);		/* disallow wrap-around. */
670 
671 	switch (advice) {
672 	case MADV_NORMAL:
673 	case MADV_RANDOM:
674 	case MADV_SEQUENTIAL:
675 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr,
676 		    addr + size, advice);
677 		break;
678 
679 	case MADV_WILLNEED:
680 		/*
681 		 * Activate all these pages, pre-faulting them in if
682 		 * necessary.
683 		 */
684 		/*
685 		 * XXX IMPLEMENT ME.
686 		 * Should invent a "weak" mode for uvm_fault()
687 		 * which would only do the PGO_LOCKED pgo_get().
688 		 */
689 		return (0);
690 
691 	case MADV_DONTNEED:
692 		/*
693 		 * Deactivate all these pages.  We don't need them
694 		 * any more.  We don't, however, toss the data in
695 		 * the pages.
696 		 */
697 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
698 		    PGO_DEACTIVATE);
699 		break;
700 
701 	case MADV_FREE:
702 		/*
703 		 * These pages contain no valid data, and may be
704 		 * garbage-collected.  Toss all resources, including
705 		 * any swap space in use.
706 		 */
707 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
708 		    PGO_FREE);
709 		break;
710 
711 	case MADV_SPACEAVAIL:
712 		/*
713 		 * XXXMRG What is this?  I think it's:
714 		 *
715 		 *	Ensure that we have allocated backing-store
716 		 *	for these pages.
717 		 *
718 		 * This is going to require changes to the page daemon,
719 		 * as it will free swap space allocated to pages in core.
720 		 * There's also what to do for device/file/anonymous memory.
721 		 */
722 		return (EINVAL);
723 
724 	default:
725 		return (EINVAL);
726 	}
727 
728 	return (error);
729 }
730 
731 /*
732  * sys_mlock: memory lock
733  */
734 
735 int
736 sys_mlock(struct proc *p, void *v, register_t *retval)
737 {
738 	struct sys_mlock_args /* {
739 		syscallarg(const void *) addr;
740 		syscallarg(size_t) len;
741 	} */ *uap = v;
742 	vaddr_t addr;
743 	vsize_t size, pageoff;
744 	int error;
745 
746 	/* extract syscall args from uap */
747 	addr = (vaddr_t)SCARG(uap, addr);
748 	size = (vsize_t)SCARG(uap, len);
749 
750 	/* align address to a page boundary and adjust size accordingly */
751 	ALIGN_ADDR(addr, size, pageoff);
752 	if (addr > SIZE_MAX - size)
753 		return (EINVAL);		/* disallow wrap-around. */
754 
755 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
756 		return (EAGAIN);
757 
758 #ifdef pmap_wired_count
759 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
760 			lim_cur(RLIMIT_MEMLOCK))
761 		return (EAGAIN);
762 #else
763 	if ((error = suser(p)) != 0)
764 		return (error);
765 #endif
766 
767 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
768 	    0);
769 	return (error == 0 ? 0 : ENOMEM);
770 }
771 
772 /*
773  * sys_munlock: unlock wired pages
774  */
775 
776 int
777 sys_munlock(struct proc *p, void *v, register_t *retval)
778 {
779 	struct sys_munlock_args /* {
780 		syscallarg(const void *) addr;
781 		syscallarg(size_t) len;
782 	} */ *uap = v;
783 	vaddr_t addr;
784 	vsize_t size, pageoff;
785 	int error;
786 
787 	/* extract syscall args from uap */
788 	addr = (vaddr_t)SCARG(uap, addr);
789 	size = (vsize_t)SCARG(uap, len);
790 
791 	/* align address to a page boundary, and adjust size accordingly */
792 	ALIGN_ADDR(addr, size, pageoff);
793 	if (addr > SIZE_MAX - size)
794 		return (EINVAL);		/* disallow wrap-around. */
795 
796 #ifndef pmap_wired_count
797 	if ((error = suser(p)) != 0)
798 		return (error);
799 #endif
800 
801 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
802 	    0);
803 	return (error == 0 ? 0 : ENOMEM);
804 }
805 
806 /*
807  * sys_mlockall: lock all pages mapped into an address space.
808  */
809 int
810 sys_mlockall(struct proc *p, void *v, register_t *retval)
811 {
812 	struct sys_mlockall_args /* {
813 		syscallarg(int) flags;
814 	} */ *uap = v;
815 	int error, flags;
816 
817 	flags = SCARG(uap, flags);
818 
819 	if (flags == 0 ||
820 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
821 		return (EINVAL);
822 
823 #ifndef pmap_wired_count
824 	if ((error = suser(p)) != 0)
825 		return (error);
826 #endif
827 
828 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
829 	    lim_cur(RLIMIT_MEMLOCK));
830 	if (error != 0 && error != ENOMEM)
831 		return (EAGAIN);
832 	return (error);
833 }
834 
835 /*
836  * sys_munlockall: unlock all pages mapped into an address space.
837  */
838 int
839 sys_munlockall(struct proc *p, void *v, register_t *retval)
840 {
841 
842 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
843 	return (0);
844 }
845 
846 /*
847  * common code for mmapanon and mmapfile to lock a mmaping
848  */
849 int
850 uvm_mmaplock(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
851     vsize_t locklimit)
852 {
853 	int error;
854 
855 	/*
856 	 * POSIX 1003.1b -- if our address space was configured
857 	 * to lock all future mappings, wire the one we just made.
858 	 */
859 	if (prot == PROT_NONE) {
860 		/*
861 		 * No more work to do in this case.
862 		 */
863 		return (0);
864 	}
865 
866 	vm_map_lock(map);
867 	if (map->flags & VM_MAP_WIREFUTURE) {
868 		KERNEL_LOCK();
869 		if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax
870 #ifdef pmap_wired_count
871 		    || (locklimit != 0 && (size +
872 			 ptoa(pmap_wired_count(vm_map_pmap(map)))) >
873 			locklimit)
874 #endif
875 		) {
876 			error = ENOMEM;
877 			vm_map_unlock(map);
878 			/* unmap the region! */
879 			uvm_unmap(map, *addr, *addr + size);
880 			KERNEL_UNLOCK();
881 			return (error);
882 		}
883 		/*
884 		 * uvm_map_pageable() always returns the map
885 		 * unlocked.
886 		 */
887 		error = uvm_map_pageable(map, *addr, *addr + size,
888 		    FALSE, UVM_LK_ENTER);
889 		if (error != 0) {
890 			/* unmap the region! */
891 			uvm_unmap(map, *addr, *addr + size);
892 			KERNEL_UNLOCK();
893 			return (error);
894 		}
895 		KERNEL_UNLOCK();
896 		return (0);
897 	}
898 	vm_map_unlock(map);
899 	return (0);
900 }
901 
902 /*
903  * uvm_mmapanon: internal version of mmap for anons
904  *
905  * - used by sys_mmap
906  */
907 int
908 uvm_mmapanon(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
909     vm_prot_t maxprot, int flags, vsize_t locklimit, struct proc *p)
910 {
911 	int error;
912 	int advice = MADV_NORMAL;
913 	unsigned int uvmflag = 0;
914 	vsize_t align = 0;	/* userland page size */
915 
916 	/*
917 	 * for non-fixed mappings, round off the suggested address.
918 	 * for fixed mappings, check alignment and zap old mappings.
919 	 */
920 	if ((flags & MAP_FIXED) == 0) {
921 		*addr = round_page(*addr);	/* round */
922 	} else {
923 		if (*addr & PAGE_MASK)
924 			return(EINVAL);
925 
926 		uvmflag |= UVM_FLAG_FIXED;
927 		if ((flags & __MAP_NOREPLACE) == 0)
928 			uvmflag |= UVM_FLAG_UNMAP;
929 	}
930 
931 	if ((flags & MAP_FIXED) == 0 && size >= __LDPGSZ)
932 		align = __LDPGSZ;
933 	if ((flags & MAP_SHARED) == 0)
934 		/* XXX: defer amap create */
935 		uvmflag |= UVM_FLAG_COPYONW;
936 	else
937 		/* shared: create amap now */
938 		uvmflag |= UVM_FLAG_OVERLAY;
939 	if (flags & MAP_STACK)
940 		uvmflag |= UVM_FLAG_STACK;
941 	if (flags & MAP_CONCEAL)
942 		uvmflag |= UVM_FLAG_CONCEAL;
943 
944 	/* set up mapping flags */
945 	uvmflag = UVM_MAPFLAG(prot, maxprot,
946 	    (flags & MAP_SHARED) ? MAP_INHERIT_SHARE : MAP_INHERIT_COPY,
947 	    advice, uvmflag);
948 
949 	error = uvm_mapanon(map, addr, size, align, uvmflag);
950 
951 	if (error == 0)
952 		error = uvm_mmaplock(map, addr, size, prot, locklimit);
953 	return error;
954 }
955 
956 /*
957  * uvm_mmapfile: internal version of mmap for non-anons
958  *
959  * - used by sys_mmap
960  * - caller must page-align the file offset
961  */
962 int
963 uvm_mmapfile(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
964     vm_prot_t maxprot, int flags, struct vnode *vp, voff_t foff,
965     vsize_t locklimit, struct proc *p)
966 {
967 	struct uvm_object *uobj;
968 	int error;
969 	int advice = MADV_NORMAL;
970 	unsigned int uvmflag = 0;
971 	vsize_t align = 0;	/* userland page size */
972 
973 	/*
974 	 * for non-fixed mappings, round off the suggested address.
975 	 * for fixed mappings, check alignment and zap old mappings.
976 	 */
977 	if ((flags & MAP_FIXED) == 0) {
978 		*addr = round_page(*addr);	/* round */
979 	} else {
980 		if (*addr & PAGE_MASK)
981 			return(EINVAL);
982 
983 		uvmflag |= UVM_FLAG_FIXED;
984 		if ((flags & __MAP_NOREPLACE) == 0)
985 			uvmflag |= UVM_FLAG_UNMAP;
986 	}
987 
988 	/*
989 	 * attach to underlying vm object.
990 	 */
991 	if (vp->v_type != VCHR) {
992 		uobj = uvn_attach(vp, (flags & MAP_SHARED) ?
993 		   maxprot : (maxprot & ~PROT_WRITE));
994 
995 		/*
996 		 * XXXCDC: hack from old code
997 		 * don't allow vnodes which have been mapped
998 		 * shared-writeable to persist [forces them to be
999 		 * flushed out when last reference goes].
1000 		 * XXXCDC: interesting side effect: avoids a bug.
1001 		 * note that in WRITE [ufs_readwrite.c] that we
1002 		 * allocate buffer, uncache, and then do the write.
1003 		 * the problem with this is that if the uncache causes
1004 		 * VM data to be flushed to the same area of the file
1005 		 * we are writing to... in that case we've got the
1006 		 * buffer locked and our process goes to sleep forever.
1007 		 *
1008 		 * XXXCDC: checking maxprot protects us from the
1009 		 * "persistbug" program but this is not a long term
1010 		 * solution.
1011 		 *
1012 		 * XXXCDC: we don't bother calling uncache with the vp
1013 		 * VOP_LOCKed since we know that we are already
1014 		 * holding a valid reference to the uvn (from the
1015 		 * uvn_attach above), and thus it is impossible for
1016 		 * the uncache to kill the uvn and trigger I/O.
1017 		 */
1018 		if (flags & MAP_SHARED) {
1019 			if ((prot & PROT_WRITE) ||
1020 			    (maxprot & PROT_WRITE)) {
1021 				uvm_vnp_uncache(vp);
1022 			}
1023 		}
1024 	} else {
1025 		uobj = udv_attach(vp->v_rdev,
1026 		    (flags & MAP_SHARED) ? maxprot :
1027 		    (maxprot & ~PROT_WRITE), foff, size);
1028 		/*
1029 		 * XXX Some devices don't like to be mapped with
1030 		 * XXX PROT_EXEC, but we don't really have a
1031 		 * XXX better way of handling this, right now
1032 		 */
1033 		if (uobj == NULL && (prot & PROT_EXEC) == 0) {
1034 			maxprot &= ~PROT_EXEC;
1035 			uobj = udv_attach(vp->v_rdev,
1036 			    (flags & MAP_SHARED) ? maxprot :
1037 			    (maxprot & ~PROT_WRITE), foff, size);
1038 		}
1039 		advice = MADV_RANDOM;
1040 	}
1041 
1042 	if (uobj == NULL)
1043 		return((vp->v_type == VREG) ? ENOMEM : EINVAL);
1044 
1045 	if ((flags & MAP_SHARED) == 0)
1046 		uvmflag |= UVM_FLAG_COPYONW;
1047 	if (flags & __MAP_NOFAULT)
1048 		uvmflag |= (UVM_FLAG_NOFAULT | UVM_FLAG_OVERLAY);
1049 	if (flags & MAP_STACK)
1050 		uvmflag |= UVM_FLAG_STACK;
1051 	if (flags & MAP_CONCEAL)
1052 		uvmflag |= UVM_FLAG_CONCEAL;
1053 
1054 	/* set up mapping flags */
1055 	uvmflag = UVM_MAPFLAG(prot, maxprot,
1056 	    (flags & MAP_SHARED) ? MAP_INHERIT_SHARE : MAP_INHERIT_COPY,
1057 	    advice, uvmflag);
1058 
1059 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
1060 
1061 	if (error == 0)
1062 		return uvm_mmaplock(map, addr, size, prot, locklimit);
1063 
1064 	/* errors: first detach from the uobj, if any.  */
1065 	if (uobj)
1066 		uobj->pgops->pgo_detach(uobj);
1067 
1068 	return (error);
1069 }
1070 
1071 /* an address that can't be in userspace or kernelspace */
1072 #define	BOGO_PC	(u_long)-1
1073 int
1074 sys_kbind(struct proc *p, void *v, register_t *retval)
1075 {
1076 	struct sys_kbind_args /* {
1077 		syscallarg(const struct __kbind *) param;
1078 		syscallarg(size_t) psize;
1079 		syscallarg(uint64_t) proc_cookie;
1080 	} */ *uap = v;
1081 	const struct __kbind *paramp;
1082 	union {
1083 		struct __kbind uk[KBIND_BLOCK_MAX];
1084 		char upad[KBIND_BLOCK_MAX * sizeof(*paramp) + KBIND_DATA_MAX];
1085 	} param;
1086 	struct uvm_map_deadq dead_entries;
1087 	struct process *pr = p->p_p;
1088 	const char *data;
1089 	vaddr_t baseva, last_baseva, endva, pageoffset, kva;
1090 	size_t psize, s;
1091 	u_long pc;
1092 	int count, i;
1093 	int error;
1094 
1095 	/*
1096 	 * extract syscall args from uap
1097 	 */
1098 	paramp = SCARG(uap, param);
1099 	psize = SCARG(uap, psize);
1100 
1101 	/* a NULL paramp disables the syscall for the process */
1102 	if (paramp == NULL) {
1103 		pr->ps_kbind_addr = BOGO_PC;
1104 		return (0);
1105 	}
1106 
1107 	/* security checks */
1108 	pc = PROC_PC(p);
1109 	if (pr->ps_kbind_addr == 0) {
1110 		pr->ps_kbind_addr = pc;
1111 		pr->ps_kbind_cookie = SCARG(uap, proc_cookie);
1112 	} else if (pc != pr->ps_kbind_addr || pc == BOGO_PC)
1113 		sigexit(p, SIGILL);
1114 	else if (pr->ps_kbind_cookie != SCARG(uap, proc_cookie))
1115 		sigexit(p, SIGILL);
1116 	if (psize < sizeof(struct __kbind) || psize > sizeof(param))
1117 		return (EINVAL);
1118 	if ((error = copyin(paramp, &param, psize)))
1119 		return (error);
1120 
1121 	/*
1122 	 * The param argument points to an array of __kbind structures
1123 	 * followed by the corresponding new data areas for them.  Verify
1124 	 * that the sizes in the __kbind structures add up to the total
1125 	 * size and find the start of the new area.
1126 	 */
1127 	paramp = &param.uk[0];
1128 	s = psize;
1129 	for (count = 0; s > 0 && count < KBIND_BLOCK_MAX; count++) {
1130 		if (s < sizeof(*paramp))
1131 			return (EINVAL);
1132 		s -= sizeof(*paramp);
1133 
1134 		baseva = (vaddr_t)paramp[count].kb_addr;
1135 		endva = baseva + paramp[count].kb_size - 1;
1136 		if (paramp[count].kb_addr == NULL ||
1137 		    paramp[count].kb_size == 0 ||
1138 		    paramp[count].kb_size > KBIND_DATA_MAX ||
1139 		    baseva >= VM_MAXUSER_ADDRESS ||
1140 		    endva >= VM_MAXUSER_ADDRESS ||
1141 		    trunc_page(baseva) != trunc_page(endva) ||
1142 		    s < paramp[count].kb_size)
1143 			return (EINVAL);
1144 
1145 		s -= paramp[count].kb_size;
1146 	}
1147 	if (s > 0)
1148 		return (EINVAL);
1149 	data = (const char *)&paramp[count];
1150 
1151 	/* all looks good, so do the bindings */
1152 	last_baseva = VM_MAXUSER_ADDRESS;
1153 	kva = 0;
1154 	TAILQ_INIT(&dead_entries);
1155 	for (i = 0; i < count; i++) {
1156 		baseva = (vaddr_t)paramp[i].kb_addr;
1157 		pageoffset = baseva & PAGE_MASK;
1158 		baseva = trunc_page(baseva);
1159 
1160 		/* make sure sure the desired page is mapped into kernel_map */
1161 		if (baseva != last_baseva) {
1162 			if (kva != 0) {
1163 				vm_map_lock(kernel_map);
1164 				uvm_unmap_remove(kernel_map, kva,
1165 				    kva+PAGE_SIZE, &dead_entries, FALSE, TRUE);
1166 				vm_map_unlock(kernel_map);
1167 				kva = 0;
1168 			}
1169 			if ((error = uvm_map_extract(&p->p_vmspace->vm_map,
1170 			    baseva, PAGE_SIZE, &kva, UVM_EXTRACT_FIXPROT)))
1171 				break;
1172 			last_baseva = baseva;
1173 		}
1174 
1175 		/* do the update */
1176 		if ((error = kcopy(data, (char *)kva + pageoffset,
1177 		    paramp[i].kb_size)))
1178 			break;
1179 		data += paramp[i].kb_size;
1180 	}
1181 
1182 	if (kva != 0) {
1183 		vm_map_lock(kernel_map);
1184 		uvm_unmap_remove(kernel_map, kva, kva+PAGE_SIZE,
1185 		    &dead_entries, FALSE, TRUE);
1186 		vm_map_unlock(kernel_map);
1187 	}
1188 	uvm_unmap_detach(&dead_entries, AMAP_REFALL);
1189 
1190 	return (error);
1191 }
1192