xref: /dragonfly/sys/vm/vm_vmspace.c (revision 9b5a9965)
1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vm/vm_vmspace.c,v 1.13 2007/07/01 01:11:37 dillon Exp $
35  */
36 #include "opt_ddb.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/systm.h>
41 #include <sys/sysproto.h>
42 #include <sys/kern_syscall.h>
43 #include <sys/mman.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/vkernel.h>
48 #include <sys/vmspace.h>
49 
50 #include <vm/vm_extern.h>
51 #include <vm/pmap.h>
52 #include <ddb/ddb.h>
53 
54 #include <machine/vmparam.h>
55 
56 #include <sys/spinlock2.h>
57 #include <sys/sysref2.h>
58 
59 static struct vmspace_entry *vkernel_find_vmspace(struct vkernel_proc *vkp,
60 						  void *id);
61 static void vmspace_entry_delete(struct vmspace_entry *ve,
62 				 struct vkernel_proc *vkp);
63 
64 static MALLOC_DEFINE(M_VKERNEL, "vkernel", "VKernel structures");
65 
66 /*
67  * vmspace_create (void *id, int type, void *data)
68  *
69  * Create a VMSPACE under the control of the caller with the specified id.
70  * An id of NULL cannot be used.  The type and data fields must currently
71  * be 0.
72  *
73  * The vmspace starts out completely empty.  Memory may be mapped into the
74  * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
75  * with vmspace_mcontrol().
76  */
77 int
78 sys_vmspace_create(struct vmspace_create_args *uap)
79 {
80 	struct vmspace_entry *ve;
81 	struct vkernel_proc *vkp;
82 
83 	if (vkernel_enable == 0)
84 		return (EOPNOTSUPP);
85 
86 	/*
87 	 * Create a virtual kernel side-structure for the process if one
88 	 * does not exist.
89 	 */
90 	if ((vkp = curproc->p_vkernel) == NULL) {
91 		vkp = kmalloc(sizeof(*vkp), M_VKERNEL, M_WAITOK|M_ZERO);
92 		vkp->refs = 1;
93 		spin_init(&vkp->spin);
94 		RB_INIT(&vkp->root);
95 		curproc->p_vkernel = vkp;
96 	}
97 
98 	/*
99 	 * Create a new VMSPACE
100 	 */
101 	if (vkernel_find_vmspace(vkp, uap->id))
102 		return (EEXIST);
103 	ve = kmalloc(sizeof(struct vmspace_entry), M_VKERNEL, M_WAITOK|M_ZERO);
104 	ve->vmspace = vmspace_alloc(VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
105 	ve->id = uap->id;
106 	pmap_pinit2(vmspace_pmap(ve->vmspace));
107 	RB_INSERT(vmspace_rb_tree, &vkp->root, ve);
108 	return (0);
109 }
110 
111 /*
112  * vmspace_destroy (void *id)
113  *
114  * Destroy a VMSPACE.
115  */
116 int
117 sys_vmspace_destroy(struct vmspace_destroy_args *uap)
118 {
119 	struct vkernel_proc *vkp;
120 	struct vmspace_entry *ve;
121 
122 	if ((vkp = curproc->p_vkernel) == NULL)
123 		return (EINVAL);
124 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
125 		return (ENOENT);
126 	if (ve->refs)
127 		return (EBUSY);
128 	vmspace_entry_delete(ve, vkp);
129 	return(0);
130 }
131 
132 /*
133  * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
134  *		struct vextframe *vframe);
135  *
136  * Transfer control to a VMSPACE.  Control is returned after the specified
137  * number of microseconds or if a page fault, signal, trap, or system call
138  * occurs.  The context is updated as appropriate.
139  */
140 int
141 sys_vmspace_ctl(struct vmspace_ctl_args *uap)
142 {
143 	struct vkernel_proc *vkp;
144 	struct vkernel_lwp *vklp;
145 	struct vmspace_entry *ve;
146 	struct lwp *lp;
147 	struct proc *p;
148 	int framesz;
149 	int error;
150 
151 	lp = curthread->td_lwp;
152 	p = lp->lwp_proc;
153 
154 	if ((vkp = p->p_vkernel) == NULL)
155 		return (EINVAL);
156 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
157 		return (ENOENT);
158 
159 	/*
160 	 * Signal mailbox interlock
161 	 */
162 	if (p->p_flag & P_MAILBOX) {
163 		p->p_flag &= ~P_MAILBOX;
164 		return (EINTR);
165 	}
166 
167 	switch(uap->cmd) {
168 	case VMSPACE_CTL_RUN:
169 		/*
170 		 * Save the caller's register context, swap VM spaces, and
171 		 * install the passed register context.  Return with
172 		 * EJUSTRETURN so the syscall code doesn't adjust the context.
173 		 */
174 		atomic_add_int(&ve->refs, 1);
175 		framesz = sizeof(struct trapframe);
176 		if ((vklp = lp->lwp_vkernel) == NULL) {
177 			vklp = kmalloc(sizeof(*vklp), M_VKERNEL,
178 				       M_WAITOK|M_ZERO);
179 			lp->lwp_vkernel = vklp;
180 		}
181 		vklp->user_trapframe = uap->tframe;
182 		vklp->user_vextframe = uap->vframe;
183 		bcopy(uap->sysmsg_frame, &vklp->save_trapframe, framesz);
184 		bcopy(&curthread->td_tls, &vklp->save_vextframe.vx_tls,
185 		      sizeof(vklp->save_vextframe.vx_tls));
186 		error = copyin(uap->tframe, uap->sysmsg_frame, framesz);
187 		if (error == 0)
188 			error = copyin(&uap->vframe->vx_tls, &curthread->td_tls, sizeof(struct savetls));
189 		if (error == 0)
190 			error = cpu_sanitize_frame(uap->sysmsg_frame);
191 		if (error == 0)
192 			error = cpu_sanitize_tls(&curthread->td_tls);
193 		if (error) {
194 			bcopy(&vklp->save_trapframe, uap->sysmsg_frame, framesz);
195 			bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
196 			      sizeof(vklp->save_vextframe.vx_tls));
197 			set_user_TLS();
198 			atomic_subtract_int(&ve->refs, 1);
199 		} else {
200 			vklp->ve = ve;
201 			pmap_setlwpvm(lp, ve->vmspace);
202 			set_user_TLS();
203 			set_vkernel_fp(uap->sysmsg_frame);
204 			error = EJUSTRETURN;
205 		}
206 		break;
207 	default:
208 		error = EOPNOTSUPP;
209 		break;
210 	}
211 	return(error);
212 }
213 
214 /*
215  * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
216  *
217  * map memory within a VMSPACE.  This function is just like a normal mmap()
218  * but operates on the vmspace's memory map.  Most callers use this to create
219  * a MAP_VPAGETABLE mapping.
220  */
221 int
222 sys_vmspace_mmap(struct vmspace_mmap_args *uap)
223 {
224 	struct vkernel_proc *vkp;
225 	struct vmspace_entry *ve;
226 	int error;
227 
228 	if ((vkp = curproc->p_vkernel) == NULL)
229 		return (EINVAL);
230 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
231 		return (ENOENT);
232 	error = kern_mmap(ve->vmspace, uap->addr, uap->len,
233 			  uap->prot, uap->flags,
234 			  uap->fd, uap->offset, &uap->sysmsg_resultp);
235 	return (error);
236 }
237 
238 /*
239  * vmspace_munmap(id, addr, len)
240  *
241  * unmap memory within a VMSPACE.
242  */
243 int
244 sys_vmspace_munmap(struct vmspace_munmap_args *uap)
245 {
246 	struct vkernel_proc *vkp;
247 	struct vmspace_entry *ve;
248 	vm_offset_t addr;
249 	vm_size_t size, pageoff;
250 	vm_map_t map;
251 
252 	if ((vkp = curproc->p_vkernel) == NULL)
253 		return (EINVAL);
254 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
255 		return (ENOENT);
256 
257 	/*
258 	 * Copied from sys_munmap()
259 	 */
260 	addr = (vm_offset_t)uap->addr;
261 	size = uap->len;
262 
263 	pageoff = (addr & PAGE_MASK);
264 	addr -= pageoff;
265 	size += pageoff;
266 	size = (vm_size_t)round_page(size);
267 	if (addr + size < addr)
268 		return (EINVAL);
269 	if (size == 0)
270 		return (0);
271 
272 	if (VM_MAX_USER_ADDRESS > 0 && addr + size > VM_MAX_USER_ADDRESS)
273 		return (EINVAL);
274 	if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
275 		return (EINVAL);
276 	map = &ve->vmspace->vm_map;
277 	if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
278 		return (EINVAL);
279 	vm_map_remove(map, addr, addr + size);
280 	return (0);
281 }
282 
283 /*
284  * vmspace_pread(id, buf, nbyte, flags, offset)
285  *
286  * Read data from a vmspace.  The number of bytes read is returned or
287  * -1 if an unrecoverable error occured.  If the number of bytes read is
288  * less then the request size, a page fault occured in the VMSPACE which
289  * the caller must resolve in order to proceed.
290  */
291 int
292 sys_vmspace_pread(struct vmspace_pread_args *uap)
293 {
294 	struct vkernel_proc *vkp;
295 	struct vmspace_entry *ve;
296 
297 	if ((vkp = curproc->p_vkernel) == NULL)
298 		return (EINVAL);
299 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
300 		return (ENOENT);
301 	return (EINVAL);
302 }
303 
304 /*
305  * vmspace_pwrite(id, buf, nbyte, flags, offset)
306  *
307  * Write data to a vmspace.  The number of bytes written is returned or
308  * -1 if an unrecoverable error occured.  If the number of bytes written is
309  * less then the request size, a page fault occured in the VMSPACE which
310  * the caller must resolve in order to proceed.
311  */
312 int
313 sys_vmspace_pwrite(struct vmspace_pwrite_args *uap)
314 {
315 	struct vkernel_proc *vkp;
316 	struct vmspace_entry *ve;
317 
318 	if ((vkp = curproc->p_vkernel) == NULL)
319 		return (EINVAL);
320 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
321 		return (ENOENT);
322 	return (EINVAL);
323 }
324 
325 /*
326  * vmspace_mcontrol(id, addr, len, behav, value)
327  *
328  * madvise/mcontrol support for a vmspace.
329  */
330 int
331 sys_vmspace_mcontrol(struct vmspace_mcontrol_args *uap)
332 {
333 	struct vkernel_proc *vkp;
334 	struct vmspace_entry *ve;
335 	vm_offset_t start, end;
336 
337 	if ((vkp = curproc->p_vkernel) == NULL)
338 		return (EINVAL);
339 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
340 		return (ENOENT);
341 
342 	/*
343 	 * This code is basically copied from sys_mcontrol()
344 	 */
345 	if (uap->behav < 0 || uap->behav > MADV_CONTROL_END)
346 		return (EINVAL);
347 
348 	if (VM_MAX_USER_ADDRESS > 0 &&
349 		((vm_offset_t) uap->addr + uap->len) > VM_MAX_USER_ADDRESS)
350 		return (EINVAL);
351         if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS)
352 		return (EINVAL);
353 	if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
354 		return (EINVAL);
355 
356 	start = trunc_page((vm_offset_t) uap->addr);
357 	end = round_page((vm_offset_t) uap->addr + uap->len);
358 
359 	return (vm_map_madvise(&ve->vmspace->vm_map, start, end,
360 				uap->behav, uap->value));
361 }
362 
363 /*
364  * Red black tree functions
365  */
366 static int rb_vmspace_compare(struct vmspace_entry *, struct vmspace_entry *);
367 RB_GENERATE(vmspace_rb_tree, vmspace_entry, rb_entry, rb_vmspace_compare);
368 
369 /* a->start is address, and the only field has to be initialized */
370 static int
371 rb_vmspace_compare(struct vmspace_entry *a, struct vmspace_entry *b)
372 {
373         if ((char *)a->id < (char *)b->id)
374                 return(-1);
375         else if ((char *)a->id > (char *)b->id)
376                 return(1);
377         return(0);
378 }
379 
380 static
381 int
382 rb_vmspace_delete(struct vmspace_entry *ve, void *data)
383 {
384 	struct vkernel_proc *vkp = data;
385 
386 	KKASSERT(ve->refs == 0);
387 	vmspace_entry_delete(ve, vkp);
388 	return(0);
389 }
390 
391 /*
392  * Remove a vmspace_entry from the RB tree and destroy it.  We have to clean
393  * up the pmap, the vm_map, then destroy the vmspace.
394  */
395 static
396 void
397 vmspace_entry_delete(struct vmspace_entry *ve, struct vkernel_proc *vkp)
398 {
399 	RB_REMOVE(vmspace_rb_tree, &vkp->root, ve);
400 
401 	pmap_remove_pages(vmspace_pmap(ve->vmspace),
402 			  VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
403 	vm_map_remove(&ve->vmspace->vm_map,
404 		      VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
405 	sysref_put(&ve->vmspace->vm_sysref);
406 	kfree(ve, M_VKERNEL);
407 }
408 
409 
410 static
411 struct vmspace_entry *
412 vkernel_find_vmspace(struct vkernel_proc *vkp, void *id)
413 {
414 	struct vmspace_entry *ve;
415 	struct vmspace_entry key;
416 
417 	key.id = id;
418 	ve = RB_FIND(vmspace_rb_tree, &vkp->root, &key);
419 	return (ve);
420 }
421 
422 /*
423  * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
424  * a vkernel process.
425  */
426 void
427 vkernel_inherit(struct proc *p1, struct proc *p2)
428 {
429 	struct vkernel_proc *vkp;
430 
431 	vkp = p1->p_vkernel;
432 	KKASSERT(vkp->refs > 0);
433 	atomic_add_int(&vkp->refs, 1);
434 	p2->p_vkernel = vkp;
435 }
436 
437 void
438 vkernel_exit(struct proc *p)
439 {
440 	struct vkernel_proc *vkp;
441 	struct lwp *lp;
442 	int freeme = 0;
443 
444 	vkp = p->p_vkernel;
445 	/*
446 	 * Restore the original VM context if we are killed while running
447 	 * a different one.
448 	 *
449 	 * This isn't supposed to happen.  What is supposed to happen is
450 	 * that the process should enter vkernel_trap() before the handling
451 	 * the signal.
452 	 */
453 	LIST_FOREACH(lp, &p->p_lwps, lwp_list) {
454 		vkernel_lwp_exit(lp);
455 	}
456 
457 	/*
458 	 * Dereference the common area
459 	 */
460 	p->p_vkernel = NULL;
461 	KKASSERT(vkp->refs > 0);
462 	spin_lock_wr(&vkp->spin);
463 	if (--vkp->refs == 0)
464 		freeme = 1;
465 	spin_unlock_wr(&vkp->spin);
466 
467 	if (freeme) {
468 		RB_SCAN(vmspace_rb_tree, &vkp->root, NULL,
469 			rb_vmspace_delete, vkp);
470 		kfree(vkp, M_VKERNEL);
471 	}
472 }
473 
474 void
475 vkernel_lwp_exit(struct lwp *lp)
476 {
477 	struct vkernel_lwp *vklp;
478 	struct vmspace_entry *ve;
479 
480 	if ((vklp = lp->lwp_vkernel) != NULL) {
481 		if ((ve = vklp->ve) != NULL) {
482 			kprintf("Warning, pid %d killed with "
483 				"active VC!\n", lp->lwp_proc->p_pid);
484 #ifdef DDB
485 			db_print_backtrace();
486 #endif
487 			pmap_setlwpvm(lp, lp->lwp_proc->p_vmspace);
488 			vklp->ve = NULL;
489 			KKASSERT(ve->refs > 0);
490 			atomic_subtract_int(&ve->refs, 1);
491 		}
492 		lp->lwp_vkernel = NULL;
493 		kfree(vklp, M_VKERNEL);
494 	}
495 }
496 
497 /*
498  * A VM space under virtual kernel control trapped out or made a system call
499  * or otherwise needs to return control to the virtual kernel context.
500  */
501 int
502 vkernel_trap(struct lwp *lp, struct trapframe *frame)
503 {
504 	struct proc *p = lp->lwp_proc;
505 	struct vmspace_entry *ve;
506 	struct vkernel_lwp *vklp;
507 	int error;
508 
509 	/*
510 	 * Which vmspace entry was running?
511 	 */
512 	vklp = lp->lwp_vkernel;
513 	KKASSERT(vklp);
514 	ve = vklp->ve;
515 	KKASSERT(ve != NULL);
516 
517 	/*
518 	 * Switch the LWP vmspace back to the virtual kernel's VM space.
519 	 */
520 	vklp->ve = NULL;
521 	pmap_setlwpvm(lp, p->p_vmspace);
522 	KKASSERT(ve->refs > 0);
523 	atomic_subtract_int(&ve->refs, 1);
524 
525 	/*
526 	 * Copy the emulated process frame to the virtual kernel process.
527 	 * The emulated process cannot change TLS descriptors so don't
528 	 * bother saving them, we already have a copy.
529 	 *
530 	 * Restore the virtual kernel's saved context so the virtual kernel
531 	 * process can resume.
532 	 */
533 	error = copyout(frame, vklp->user_trapframe, sizeof(*frame));
534 	bcopy(&vklp->save_trapframe, frame, sizeof(*frame));
535 	bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
536 	      sizeof(vklp->save_vextframe.vx_tls));
537 	set_user_TLS();
538 	return(error);
539 }
540 
541