xref: /openbsd/sys/uvm/uvm_glue.c (revision 8932bfb7)
1 /*	$OpenBSD: uvm_glue.c,v 1.58 2011/04/15 21:47:24 oga Exp $	*/
2 /*	$NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993, The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by Charles D. Cranor,
24  *      Washington University, the University of California, Berkeley and
25  *      its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
43  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  */
69 
70 /*
71  * uvm_glue.c: glue functions
72  */
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/proc.h>
77 #include <sys/resourcevar.h>
78 #include <sys/buf.h>
79 #include <sys/user.h>
80 #ifdef SYSVSHM
81 #include <sys/shm.h>
82 #endif
83 #include <sys/sched.h>
84 
85 #include <uvm/uvm.h>
86 
87 #include <machine/cpu.h>
88 
89 /*
90  * uvm_kernacc: can the kernel access a region of memory
91  *
92  * - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c)
93  */
94 
95 boolean_t
96 uvm_kernacc(caddr_t addr, size_t len, int rw)
97 {
98 	boolean_t rv;
99 	vaddr_t saddr, eaddr;
100 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
101 
102 	saddr = trunc_page((vaddr_t)addr);
103 	eaddr = round_page((vaddr_t)addr + len);
104 	vm_map_lock_read(kernel_map);
105 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
106 	vm_map_unlock_read(kernel_map);
107 
108 	return(rv);
109 }
110 
111 #ifdef KGDB
112 /*
113  * Change protections on kernel pages from addr to addr+len
114  * (presumably so debugger can plant a breakpoint).
115  *
116  * We force the protection change at the pmap level.  If we were
117  * to use vm_map_protect a change to allow writing would be lazily-
118  * applied meaning we would still take a protection fault, something
119  * we really don't want to do.  It would also fragment the kernel
120  * map unnecessarily.  We cannot use pmap_protect since it also won't
121  * enforce a write-enable request.  Using pmap_enter is the only way
122  * we can ensure the change takes place properly.
123  */
124 void
125 uvm_chgkprot(caddr_t addr, size_t len, int rw)
126 {
127 	vm_prot_t prot;
128 	paddr_t pa;
129 	vaddr_t sva, eva;
130 
131 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
132 	eva = round_page((vaddr_t)addr + len);
133 	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
134 		/*
135 		 * Extract physical address for the page.
136 		 * We use a cheezy hack to differentiate physical
137 		 * page 0 from an invalid mapping, not that it
138 		 * really matters...
139 		 */
140 		if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
141 			panic("chgkprot: invalid page");
142 		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
143 	}
144 	pmap_update(pmap_kernel());
145 }
146 #endif
147 
148 /*
149  * uvm_vslock: wire user memory for I/O
150  *
151  * - called from physio and sys___sysctl
152  */
153 
154 int
155 uvm_vslock(struct proc *p, caddr_t addr, size_t len, vm_prot_t access_type)
156 {
157 	struct vm_map *map;
158 	vaddr_t start, end;
159 	int rv;
160 
161 	map = &p->p_vmspace->vm_map;
162 	start = trunc_page((vaddr_t)addr);
163 	end = round_page((vaddr_t)addr + len);
164 	if (end <= start)
165 		return (EINVAL);
166 
167 	rv = uvm_fault_wire(map, start, end, access_type);
168 
169 	return (rv);
170 }
171 
172 /*
173  * uvm_vsunlock: unwire user memory wired by uvm_vslock()
174  *
175  * - called from physio and sys___sysctl
176  */
177 
178 void
179 uvm_vsunlock(struct proc *p, caddr_t addr, size_t len)
180 {
181 	vaddr_t start, end;
182 
183 	start = trunc_page((vaddr_t)addr);
184 	end = round_page((vaddr_t)addr + len);
185 	if (end <= start)
186 		return;
187 
188 	uvm_fault_unwire(&p->p_vmspace->vm_map, start, end);
189 }
190 
191 /*
192  * uvm_vslock_device: wire user memory, make sure it's device reachable
193  *  and bounce if necessary.
194  * Always bounces for now.
195  */
196 int
197 uvm_vslock_device(struct proc *p, void *addr, size_t len,
198     vm_prot_t access_type, void **retp)
199 {
200 	struct vm_page *pg;
201 	struct pglist pgl;
202 	int npages;
203 	vaddr_t start, end, off;
204 	vaddr_t sva, va;
205 	vsize_t sz;
206 	int error, i;
207 
208 	start = trunc_page((vaddr_t)addr);
209 	end = round_page((vaddr_t)addr + len);
210 	sz = end - start;
211 	off = (vaddr_t)addr - start;
212 	if (end <= start)
213 		return (EINVAL);
214 
215 	if ((error = uvm_fault_wire(&p->p_vmspace->vm_map, start, end,
216 	    access_type))) {
217 		return (error);
218 	}
219 
220 	npages = atop(sz);
221 	for (i = 0; i < npages; i++) {
222 		paddr_t pa;
223 
224 		if (!pmap_extract(p->p_vmspace->vm_map.pmap,
225 		    start + ptoa(i), &pa)) {
226 			error = EFAULT;
227 			goto out_unwire;
228 		}
229 		if (!PADDR_IS_DMA_REACHABLE(pa))
230 			break;
231 	}
232 	if (i == npages) {
233 		*retp = NULL;
234 		return (0);
235 	}
236 
237 	if ((va = uvm_km_valloc(kernel_map, sz)) == 0) {
238 		error = ENOMEM;
239 		goto out_unwire;
240 	}
241 	sva = va;
242 
243 	TAILQ_INIT(&pgl);
244 	error = uvm_pglistalloc(npages * PAGE_SIZE, dma_constraint.ucr_low,
245 	    dma_constraint.ucr_high, 0, 0, &pgl, npages, UVM_PLA_WAITOK);
246 	if (error)
247 		goto out_unmap;
248 
249 	while ((pg = TAILQ_FIRST(&pgl)) != NULL) {
250 		TAILQ_REMOVE(&pgl, pg, pageq);
251 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
252 		    VM_PROT_READ|VM_PROT_WRITE);
253 		va += PAGE_SIZE;
254 	}
255 	pmap_update(pmap_kernel());
256 	KASSERT(va == sva + sz);
257 	*retp = (void *)(sva + off);
258 
259 	if ((error = copyin(addr, *retp, len)) == 0)
260 		return 0;
261 
262 	uvm_km_pgremove_intrsafe(sva, sva + sz);
263 	pmap_kremove(sva, sz);
264 	pmap_update(pmap_kernel());
265 out_unmap:
266 	uvm_km_free(kernel_map, sva, sz);
267 out_unwire:
268 	uvm_fault_unwire(&p->p_vmspace->vm_map, start, end);
269 	return (error);
270 }
271 
272 void
273 uvm_vsunlock_device(struct proc *p, void *addr, size_t len, void *map)
274 {
275 	vaddr_t start, end;
276 	vaddr_t kva;
277 	vsize_t sz;
278 
279 	start = trunc_page((vaddr_t)addr);
280 	end = round_page((vaddr_t)addr + len);
281 	sz = end - start;
282 	if (end <= start)
283 		return;
284 
285 	if (map)
286 		copyout(map, addr, len);
287 	uvm_fault_unwire(&p->p_vmspace->vm_map, start, end);
288 
289 	if (!map)
290 		return;
291 
292 	kva = trunc_page((vaddr_t)map);
293 	uvm_km_pgremove_intrsafe(kva, kva + sz);
294 	pmap_kremove(kva, sz);
295 	pmap_update(pmap_kernel());
296 	uvm_km_free(kernel_map, kva, sz);
297 }
298 
299 /*
300  * uvm_fork: fork a virtual address space
301  *
302  * - the address space is copied as per parent map's inherit values
303  * - a new "user" structure is allocated for the child process
304  *	[filled in by MD layer...]
305  * - if specified, the child gets a new user stack described by
306  *	stack and stacksize
307  * - NOTE: the kernel stack may be at a different location in the child
308  *	process, and thus addresses of automatic variables may be invalid
309  *	after cpu_fork returns in the child process.  We do nothing here
310  *	after cpu_fork returns.
311  * - XXXCDC: we need a way for this to return a failure value rather
312  *   than just hang
313  */
314 void
315 uvm_fork(struct proc *p1, struct proc *p2, boolean_t shared, void *stack,
316     size_t stacksize, void (*func)(void *), void * arg)
317 {
318 	struct user *up = p2->p_addr;
319 
320 	if (shared == TRUE) {
321 		p2->p_vmspace = NULL;
322 		uvmspace_share(p1, p2);			/* share vmspace */
323 	} else
324 		p2->p_vmspace = uvmspace_fork(p1->p_vmspace); /* fork vmspace */
325 
326 #ifdef PMAP_UAREA
327 	/* Tell the pmap this is a u-area mapping */
328 	PMAP_UAREA((vaddr_t)up);
329 #endif
330 
331 	/*
332 	 * p_stats currently points at a field in the user struct.  Copy
333 	 * parts of p_stats, and zero out the rest.
334 	 */
335 	p2->p_stats = &up->u_stats;
336 	memset(&up->u_stats.pstat_startzero, 0,
337 	       ((caddr_t)&up->u_stats.pstat_endzero -
338 		(caddr_t)&up->u_stats.pstat_startzero));
339 	memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy,
340 	       ((caddr_t)&up->u_stats.pstat_endcopy -
341 		(caddr_t)&up->u_stats.pstat_startcopy));
342 
343 	/*
344 	 * cpu_fork() copy and update the pcb, and make the child ready
345 	 * to run.  If this is a normal user fork, the child will exit
346 	 * directly to user mode via child_return() on its first time
347 	 * slice and will not return here.  If this is a kernel thread,
348 	 * the specified entry point will be executed.
349 	 */
350 	cpu_fork(p1, p2, stack, stacksize, func, arg);
351 }
352 
353 /*
354  * uvm_exit: exit a virtual address space
355  *
356  * - the process passed to us is a dead (pre-zombie) process; we
357  *   are running on a different context now (the reaper).
358  * - we must run in a separate thread because freeing the vmspace
359  *   of the dead process may block.
360  */
361 void
362 uvm_exit(struct proc *p)
363 {
364 	uvmspace_free(p->p_vmspace);
365 	p->p_vmspace = NULL;
366 	uvm_km_free(kernel_map, (vaddr_t)p->p_addr, USPACE);
367 	p->p_addr = NULL;
368 }
369 
370 /*
371  * uvm_init_limit: init per-process VM limits
372  *
373  * - called for process 0 and then inherited by all others.
374  */
375 void
376 uvm_init_limits(struct proc *p)
377 {
378 
379 	/*
380 	 * Set up the initial limits on process VM.  Set the maximum
381 	 * resident set size to be all of (reasonably) available memory.
382 	 * This causes any single, large process to start random page
383 	 * replacement once it fills memory.
384 	 */
385 
386 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
387 	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
388 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
389 	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
390 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
391 }
392 
393 #ifdef DEBUG
394 int	enableswap = 1;
395 int	swapdebug = 0;
396 #define	SDB_FOLLOW	1
397 #define SDB_SWAPIN	2
398 #define SDB_SWAPOUT	4
399 #endif
400 
401 /*
402  * swappable: is process "p" swappable?
403  */
404 
405 #define	swappable(p) (((p)->p_flag & (P_SYSTEM | P_WEXIT)) == 0)
406 
407 /*
408  * swapout_threads: find threads that can be swapped
409  *
410  * - called by the pagedaemon
411  * - try and swap at least one processs
412  * - processes that are sleeping or stopped for maxslp or more seconds
413  *   are swapped... otherwise the longest-sleeping or stopped process
414  *   is swapped, otherwise the longest resident process...
415  */
416 void
417 uvm_swapout_threads(void)
418 {
419 	struct proc *p;
420 	struct proc *outp, *outp2;
421 	int outpri, outpri2;
422 	int didswap = 0;
423 	extern int maxslp;
424 	/* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
425 
426 #ifdef DEBUG
427 	if (!enableswap)
428 		return;
429 #endif
430 
431 	/*
432 	 * outp/outpri  : stop/sleep process with largest sleeptime < maxslp
433 	 * outp2/outpri2: the longest resident process (its swap time)
434 	 */
435 	outp = outp2 = NULL;
436 	outpri = outpri2 = 0;
437 	LIST_FOREACH(p, &allproc, p_list) {
438 		if (!swappable(p))
439 			continue;
440 		switch (p->p_stat) {
441 		case SRUN:
442 			if (p->p_swtime > outpri2) {
443 				outp2 = p;
444 				outpri2 = p->p_swtime;
445 			}
446 			continue;
447 
448 		case SSLEEP:
449 		case SSTOP:
450 			if (p->p_slptime >= maxslp) {
451 				pmap_collect(p->p_vmspace->vm_map.pmap);
452 				didswap++;
453 			} else if (p->p_slptime > outpri) {
454 				outp = p;
455 				outpri = p->p_slptime;
456 			}
457 			continue;
458 		}
459 	}
460 
461 	/*
462 	 * If we didn't get rid of any real duds, toss out the next most
463 	 * likely sleeping/stopped or running candidate.  We only do this
464 	 * if we are real low on memory since we don't gain much by doing
465 	 * it.
466 	 */
467 	if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
468 		if ((p = outp) == NULL)
469 			p = outp2;
470 #ifdef DEBUG
471 		if (swapdebug & SDB_SWAPOUT)
472 			printf("swapout_threads: no duds, try procp %p\n", p);
473 #endif
474 		if (p)
475 			pmap_collect(p->p_vmspace->vm_map.pmap);
476 	}
477 }
478 
479 /*
480  * uvm_atopg: convert KVAs back to their page structures.
481  */
482 struct vm_page *
483 uvm_atopg(vaddr_t kva)
484 {
485 	struct vm_page *pg;
486 	paddr_t pa;
487 	boolean_t rv;
488 
489 	rv = pmap_extract(pmap_kernel(), kva, &pa);
490 	KASSERT(rv);
491 	pg = PHYS_TO_VM_PAGE(pa);
492 	KASSERT(pg != NULL);
493 	return (pg);
494 }
495