xref: /dragonfly/sys/vm/vm_glue.c (revision c8860c9a)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  *
58  * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $
59  */
60 
61 #include "opt_vm.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/resourcevar.h>
67 #include <sys/buf.h>
68 #include <sys/shm.h>
69 #include <sys/vmmeter.h>
70 #include <sys/sysctl.h>
71 
72 #include <sys/kernel.h>
73 #include <sys/unistd.h>
74 
75 #include <machine/limits.h>
76 #include <machine/vmm.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <sys/lock.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_page2.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 
89 /*
90  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
91  *
92  * Process 0 falls into this function, just loop on nothing.
93  */
94 
95 static void scheduler(void *);
96 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL);
97 
98 #ifdef INVARIANTS
99 
100 static int swap_debug = 0;
101 SYSCTL_INT(_vm, OID_AUTO, swap_debug, CTLFLAG_RW, &swap_debug, 0, "");
102 
103 #endif
104 
105 /*
106  * No requirements.
107  */
108 int
109 kernacc(c_caddr_t addr, int len, int rw)
110 {
111 	boolean_t rv;
112 	vm_offset_t saddr, eaddr;
113 	vm_prot_t prot;
114 
115 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
116 	    ("illegal ``rw'' argument to kernacc (%x)", rw));
117 
118 	/*
119 	 * The globaldata space is not part of the kernel_map proper,
120 	 * check access separately.
121 	 */
122 	if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len)))
123 		return (TRUE);
124 
125 	/*
126 	 * Nominal kernel memory access - check access via kernel_map.
127 	 */
128 	if ((vm_offset_t)addr + len > vm_map_max(&kernel_map) ||
129 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
130 		return (FALSE);
131 	}
132 	prot = rw;
133 	saddr = trunc_page((vm_offset_t)addr);
134 	eaddr = round_page((vm_offset_t)addr + len);
135 	rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE);
136 
137 	return (rv == TRUE);
138 }
139 
140 /*
141  * No requirements.
142  */
143 int
144 useracc(c_caddr_t addr, int len, int rw)
145 {
146 	boolean_t rv;
147 	vm_prot_t prot;
148 	vm_map_t map;
149 	vm_offset_t wrap;
150 	vm_offset_t gpa;
151 
152 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
153 	    ("illegal ``rw'' argument to useracc (%x)", rw));
154 	prot = rw;
155 
156 	if (curthread->td_vmm) {
157 		if (vmm_vm_get_gpa(curproc, (register_t *)&gpa, (register_t) addr))
158 			panic("%s: could not get GPA\n", __func__);
159 		addr = (c_caddr_t) gpa;
160 	}
161 
162 	/*
163 	 * XXX - check separately to disallow access to user area and user
164 	 * page tables - they are in the map.
165 	 */
166 	wrap = (vm_offset_t)addr + len;
167 	if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) {
168 		return (FALSE);
169 	}
170 	map = &curproc->p_vmspace->vm_map;
171 	vm_map_lock_read(map);
172 
173 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
174 				     round_page(wrap), prot, TRUE);
175 	vm_map_unlock_read(map);
176 
177 	return (rv == TRUE);
178 }
179 
180 /*
181  * No requirements.
182  */
183 void
184 vslock(caddr_t addr, u_int len)
185 {
186 	if (len) {
187 		vm_map_wire(&curproc->p_vmspace->vm_map,
188 			    trunc_page((vm_offset_t)addr),
189 			    round_page((vm_offset_t)addr + len), 0);
190 	}
191 }
192 
193 /*
194  * No requirements.
195  */
196 void
197 vsunlock(caddr_t addr, u_int len)
198 {
199 	if (len) {
200 		vm_map_wire(&curproc->p_vmspace->vm_map,
201 			    trunc_page((vm_offset_t)addr),
202 			    round_page((vm_offset_t)addr + len),
203 			    KM_PAGEABLE);
204 	}
205 }
206 
207 /*
208  * Implement fork's actions on an address space.  Here we arrange for the
209  * address space to be copied or referenced, allocate a user struct (pcb
210  * and kernel stack), then call the machine-dependent layer to fill those
211  * in and make the new process ready to run.  The new process is set up
212  * so that it returns directly to user mode to avoid stack copying and
213  * relocation problems.
214  *
215  * If p2 is NULL and RFPROC is 0 we are just divorcing parts of the process
216  * from itself.
217  *
218  * Otherwise if p2 is NULL the new vmspace is not to be associated with any
219  * process or thread (so things like /dev/upmap and /dev/lpmap are not
220  * retained).
221  *
222  * Otherwise if p2 is not NULL then process specific mappings will be forked.
223  * If lp2 is not NULL only the thread-specific mappings for lp2 are forked,
224  * otherwise no thread-specific mappings are forked.
225  *
226  * No requirements.
227  */
228 void
229 vm_fork(struct proc *p1, struct proc *p2, struct lwp *lp2, int flags)
230 {
231 	if ((flags & RFPROC) == 0) {
232 		/*
233 		 * Divorce the memory, if it is shared, essentially
234 		 * this changes shared memory amongst threads, into
235 		 * COW locally.
236 		 */
237 		if ((flags & RFMEM) == 0) {
238 			if (vmspace_getrefs(p1->p_vmspace) > 1) {
239 				vmspace_unshare(p1);
240 			}
241 		}
242 		cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags);
243 		return;
244 	}
245 
246 	if (flags & RFMEM) {
247 		vmspace_ref(p1->p_vmspace);
248 		p2->p_vmspace = p1->p_vmspace;
249 	}
250 
251 	while (vm_paging_severe()) {
252 		vm_wait(0);
253 	}
254 
255 	if ((flags & RFMEM) == 0) {
256 		p2->p_vmspace = vmspace_fork(p1->p_vmspace, p2, lp2);
257 
258 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
259 
260 		if (p1->p_vmspace->vm_shm)
261 			shmfork(p1, p2);
262 	}
263 
264 	pmap_init_proc(p2);
265 }
266 
267 /*
268  * Set default limits for VM system.  Call during proc0's initialization.
269  *
270  * Called from the low level boot code only.
271  */
272 void
273 vm_init_limits(struct proc *p)
274 {
275 	int rss_limit;
276 
277 	/*
278 	 * Set up the initial limits on process VM. Set the maximum resident
279 	 * set size to be half of (reasonably) available memory.  Since this
280 	 * is a soft limit, it comes into effect only when the system is out
281 	 * of memory - half of main memory helps to favor smaller processes,
282 	 * and reduces thrashing of the object cache.
283 	 */
284 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
285 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
286 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
287 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
288 	/* limit the limit to no less than 2MB */
289 	rss_limit = max(vmstats.v_free_count, 512);
290 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
291 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
292 }
293 
294 /*
295  * process 0 winds up here after all kernel initialization sysinits have
296  * run.
297  */
298 static void
299 scheduler(void *dummy)
300 {
301 	for (;;)
302 		tsleep(&proc0, 0, "idle", 0);
303 }
304