xref: /dragonfly/sys/vm/vm_glue.c (revision f9993810)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  *
58  * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $
59  */
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/proc.h>
64 #include <sys/resourcevar.h>
65 #include <sys/buf.h>
66 #include <sys/shm.h>
67 #include <sys/vmmeter.h>
68 #include <sys/sysctl.h>
69 
70 #include <sys/kernel.h>
71 #include <sys/unistd.h>
72 
73 #include <machine/limits.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_param.h>
77 #include <sys/lock.h>
78 #include <vm/pmap.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_page2.h>
82 #include <vm/vm_pageout.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_extern.h>
85 
86 /*
87  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
88  *
89  * Process 0 falls into this function, just loop on nothing.
90  */
91 
92 static void scheduler(void *);
93 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL);
94 
95 #ifdef INVARIANTS
96 
97 static int swap_debug = 0;
98 SYSCTL_INT(_vm, OID_AUTO, swap_debug, CTLFLAG_RW, &swap_debug, 0, "");
99 
100 #endif
101 
102 /*
103  * No requirements.
104  */
105 int
106 kernacc(c_caddr_t addr, int len, int rw)
107 {
108 	boolean_t rv;
109 	vm_offset_t saddr, eaddr;
110 	vm_prot_t prot;
111 
112 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
113 	    ("illegal ``rw'' argument to kernacc (%x)", rw));
114 
115 	/*
116 	 * The globaldata space is not part of the kernel_map proper,
117 	 * check access separately.
118 	 */
119 	if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len)))
120 		return (TRUE);
121 
122 	/*
123 	 * Nominal kernel memory access - check access via kernel_map.
124 	 */
125 	if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
126 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
127 		return (FALSE);
128 	}
129 	prot = rw;
130 	saddr = trunc_page((vm_offset_t)addr);
131 	eaddr = round_page((vm_offset_t)addr + len);
132 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot, FALSE);
133 
134 	return (rv == TRUE);
135 }
136 
137 /*
138  * No requirements.
139  */
140 int
141 useracc(c_caddr_t addr, int len, int rw)
142 {
143 	boolean_t rv;
144 	vm_prot_t prot;
145 	vm_map_t map;
146 	vm_offset_t wrap;
147 
148 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
149 	    ("illegal ``rw'' argument to useracc (%x)", rw));
150 	prot = rw;
151 
152 	/*
153 	 * XXX - check separately to disallow access to user area and user
154 	 * page tables - they are in the map.
155 	 */
156 	wrap = (vm_offset_t)addr + len;
157 	if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) {
158 		return (FALSE);
159 	}
160 	map = &curproc->p_vmspace->vm_map;
161 	vm_map_lock_read(map);
162 
163 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
164 				     round_page(wrap), prot, TRUE);
165 	vm_map_unlock_read(map);
166 
167 	return (rv == TRUE);
168 }
169 
170 /*
171  * No requirements.
172  */
173 void
174 vslock(caddr_t addr, u_int len)
175 {
176 	if (len) {
177 		/* wire the pages */
178 		vm_map_kernel_wiring(&curproc->p_vmspace->vm_map,
179 				     trunc_page((vm_offset_t)addr),
180 				     round_page((vm_offset_t)addr + len),
181 				     0);
182 	}
183 }
184 
185 /*
186  * No requirements.
187  */
188 void
189 vsunlock(caddr_t addr, u_int len)
190 {
191 	if (len) {
192 		/* unwire the pages */
193 		vm_map_kernel_wiring(&curproc->p_vmspace->vm_map,
194 				     trunc_page((vm_offset_t)addr),
195 				     round_page((vm_offset_t)addr + len),
196 				     KM_PAGEABLE);
197 	}
198 }
199 
200 /*
201  * Implement fork's actions on an address space.  Here we arrange for the
202  * address space to be copied or referenced, allocate a user struct (pcb
203  * and kernel stack), then call the machine-dependent layer to fill those
204  * in and make the new process ready to run.  The new process is set up
205  * so that it returns directly to user mode to avoid stack copying and
206  * relocation problems.
207  *
208  * If p2 is NULL and RFPROC is 0 we are just divorcing parts of the process
209  * from itself.
210  *
211  * Otherwise if p2 is NULL the new vmspace is not to be associated with any
212  * process or thread (so things like /dev/upmap and /dev/lpmap are not
213  * retained).
214  *
215  * Otherwise if p2 is not NULL then process specific mappings will be forked.
216  * If lp2 is not NULL only the thread-specific mappings for lp2 are forked,
217  * otherwise no thread-specific mappings are forked.
218  *
219  * No requirements.
220  */
221 void
222 vm_fork(struct proc *p1, struct proc *p2, struct lwp *lp2, int flags)
223 {
224 	if ((flags & RFPROC) == 0) {
225 		/*
226 		 * Divorce the memory, if it is shared, essentially
227 		 * this changes shared memory amongst threads, into
228 		 * COW locally.
229 		 */
230 		if ((flags & RFMEM) == 0) {
231 			if (vmspace_getrefs(p1->p_vmspace) > 1) {
232 				vmspace_unshare(p1);
233 			}
234 		}
235 		cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags);
236 		return;
237 	}
238 
239 	if (flags & RFMEM) {
240 		vmspace_ref(p1->p_vmspace);
241 		p2->p_vmspace = p1->p_vmspace;
242 	}
243 
244 	while (vm_paging_severe()) {
245 		vm_wait(0);
246 	}
247 
248 	if ((flags & RFMEM) == 0) {
249 		p2->p_vmspace = vmspace_fork(p1->p_vmspace, p2, lp2);
250 
251 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
252 
253 		if (p1->p_vmspace->vm_shm)
254 			shmfork(p1, p2);
255 	}
256 
257 	pmap_init_proc(p2);
258 }
259 
260 /*
261  * Set default limits for VM system.  Call during proc0's initialization.
262  *
263  * Called from the low level boot code only.
264  */
265 void
266 vm_init_limits(struct proc *p)
267 {
268 	int rss_limit;
269 
270 	/*
271 	 * Set up the initial limits on process VM. Set the maximum resident
272 	 * set size to be half of (reasonably) available memory.  Since this
273 	 * is a soft limit, it comes into effect only when the system is out
274 	 * of memory - half of main memory helps to favor smaller processes,
275 	 * and reduces thrashing of the object cache.
276 	 */
277 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
278 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
279 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
280 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
281 	/* limit the limit to no less than 2MB */
282 	rss_limit = max(vmstats.v_free_count, 512);
283 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
284 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
285 }
286 
287 /*
288  * process 0 winds up here after all kernel initialization sysinits have
289  * run.
290  */
291 static void
292 scheduler(void *dummy)
293 {
294 	for (;;)
295 		tsleep(&proc0, 0, "idle", 0);
296 }
297