xref: /dragonfly/sys/vm/vm_glue.c (revision 7d3e9a5b)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  *
58  * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $
59  */
60 
61 #include "opt_vm.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/resourcevar.h>
67 #include <sys/buf.h>
68 #include <sys/shm.h>
69 #include <sys/vmmeter.h>
70 #include <sys/sysctl.h>
71 
72 #include <sys/kernel.h>
73 #include <sys/unistd.h>
74 
75 #include <machine/limits.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <sys/lock.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_page2.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_extern.h>
87 
88 /*
89  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
90  *
91  * Process 0 falls into this function, just loop on nothing.
92  */
93 
94 static void scheduler(void *);
95 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL);
96 
97 #ifdef INVARIANTS
98 
99 static int swap_debug = 0;
100 SYSCTL_INT(_vm, OID_AUTO, swap_debug, CTLFLAG_RW, &swap_debug, 0, "");
101 
102 #endif
103 
104 /*
105  * No requirements.
106  */
107 int
108 kernacc(c_caddr_t addr, int len, int rw)
109 {
110 	boolean_t rv;
111 	vm_offset_t saddr, eaddr;
112 	vm_prot_t prot;
113 
114 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
115 	    ("illegal ``rw'' argument to kernacc (%x)", rw));
116 
117 	/*
118 	 * The globaldata space is not part of the kernel_map proper,
119 	 * check access separately.
120 	 */
121 	if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len)))
122 		return (TRUE);
123 
124 	/*
125 	 * Nominal kernel memory access - check access via kernel_map.
126 	 */
127 	if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
128 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
129 		return (FALSE);
130 	}
131 	prot = rw;
132 	saddr = trunc_page((vm_offset_t)addr);
133 	eaddr = round_page((vm_offset_t)addr + len);
134 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot, FALSE);
135 
136 	return (rv == TRUE);
137 }
138 
139 /*
140  * No requirements.
141  */
142 int
143 useracc(c_caddr_t addr, int len, int rw)
144 {
145 	boolean_t rv;
146 	vm_prot_t prot;
147 	vm_map_t map;
148 	vm_offset_t wrap;
149 
150 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
151 	    ("illegal ``rw'' argument to useracc (%x)", rw));
152 	prot = rw;
153 
154 	/*
155 	 * XXX - check separately to disallow access to user area and user
156 	 * page tables - they are in the map.
157 	 */
158 	wrap = (vm_offset_t)addr + len;
159 	if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) {
160 		return (FALSE);
161 	}
162 	map = &curproc->p_vmspace->vm_map;
163 	vm_map_lock_read(map);
164 
165 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
166 				     round_page(wrap), prot, TRUE);
167 	vm_map_unlock_read(map);
168 
169 	return (rv == TRUE);
170 }
171 
172 /*
173  * No requirements.
174  */
175 void
176 vslock(caddr_t addr, u_int len)
177 {
178 	if (len) {
179 		/* wire the pages */
180 		vm_map_kernel_wiring(&curproc->p_vmspace->vm_map,
181 				     trunc_page((vm_offset_t)addr),
182 				     round_page((vm_offset_t)addr + len),
183 				     0);
184 	}
185 }
186 
187 /*
188  * No requirements.
189  */
190 void
191 vsunlock(caddr_t addr, u_int len)
192 {
193 	if (len) {
194 		/* unwire the pages */
195 		vm_map_kernel_wiring(&curproc->p_vmspace->vm_map,
196 				     trunc_page((vm_offset_t)addr),
197 				     round_page((vm_offset_t)addr + len),
198 				     KM_PAGEABLE);
199 	}
200 }
201 
202 /*
203  * Implement fork's actions on an address space.  Here we arrange for the
204  * address space to be copied or referenced, allocate a user struct (pcb
205  * and kernel stack), then call the machine-dependent layer to fill those
206  * in and make the new process ready to run.  The new process is set up
207  * so that it returns directly to user mode to avoid stack copying and
208  * relocation problems.
209  *
210  * If p2 is NULL and RFPROC is 0 we are just divorcing parts of the process
211  * from itself.
212  *
213  * Otherwise if p2 is NULL the new vmspace is not to be associated with any
214  * process or thread (so things like /dev/upmap and /dev/lpmap are not
215  * retained).
216  *
217  * Otherwise if p2 is not NULL then process specific mappings will be forked.
218  * If lp2 is not NULL only the thread-specific mappings for lp2 are forked,
219  * otherwise no thread-specific mappings are forked.
220  *
221  * No requirements.
222  */
223 void
224 vm_fork(struct proc *p1, struct proc *p2, struct lwp *lp2, int flags)
225 {
226 	if ((flags & RFPROC) == 0) {
227 		/*
228 		 * Divorce the memory, if it is shared, essentially
229 		 * this changes shared memory amongst threads, into
230 		 * COW locally.
231 		 */
232 		if ((flags & RFMEM) == 0) {
233 			if (vmspace_getrefs(p1->p_vmspace) > 1) {
234 				vmspace_unshare(p1);
235 			}
236 		}
237 		cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags);
238 		return;
239 	}
240 
241 	if (flags & RFMEM) {
242 		vmspace_ref(p1->p_vmspace);
243 		p2->p_vmspace = p1->p_vmspace;
244 	}
245 
246 	while (vm_paging_severe()) {
247 		vm_wait(0);
248 	}
249 
250 	if ((flags & RFMEM) == 0) {
251 		p2->p_vmspace = vmspace_fork(p1->p_vmspace, p2, lp2);
252 
253 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
254 
255 		if (p1->p_vmspace->vm_shm)
256 			shmfork(p1, p2);
257 	}
258 
259 	pmap_init_proc(p2);
260 }
261 
262 /*
263  * Set default limits for VM system.  Call during proc0's initialization.
264  *
265  * Called from the low level boot code only.
266  */
267 void
268 vm_init_limits(struct proc *p)
269 {
270 	int rss_limit;
271 
272 	/*
273 	 * Set up the initial limits on process VM. Set the maximum resident
274 	 * set size to be half of (reasonably) available memory.  Since this
275 	 * is a soft limit, it comes into effect only when the system is out
276 	 * of memory - half of main memory helps to favor smaller processes,
277 	 * and reduces thrashing of the object cache.
278 	 */
279 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
280 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
281 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
282 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
283 	/* limit the limit to no less than 2MB */
284 	rss_limit = max(vmstats.v_free_count, 512);
285 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
286 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
287 }
288 
289 /*
290  * process 0 winds up here after all kernel initialization sysinits have
291  * run.
292  */
293 static void
294 scheduler(void *dummy)
295 {
296 	for (;;)
297 		tsleep(&proc0, 0, "idle", 0);
298 }
299