xref: /dragonfly/sys/vm/vm_glue.c (revision 3170ffd7)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  *
60  * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $
61  */
62 
63 #include "opt_vm.h"
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/resourcevar.h>
69 #include <sys/buf.h>
70 #include <sys/shm.h>
71 #include <sys/vmmeter.h>
72 #include <sys/sysctl.h>
73 
74 #include <sys/kernel.h>
75 #include <sys/unistd.h>
76 
77 #include <machine/limits.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <sys/lock.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 
89 #include <sys/user.h>
90 #include <vm/vm_page2.h>
91 #include <sys/thread2.h>
92 #include <sys/sysref2.h>
93 
94 /*
95  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
96  *
97  * Note: run scheduling should be divorced from the vm system.
98  */
99 static void scheduler (void *);
100 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
101 
102 #ifdef INVARIANTS
103 
104 static int swap_debug = 0;
105 SYSCTL_INT(_vm, OID_AUTO, swap_debug,
106 	CTLFLAG_RW, &swap_debug, 0, "");
107 
108 #endif
109 
110 static int scheduler_notify;
111 
112 static void swapout (struct proc *);
113 
114 /*
115  * No requirements.
116  */
117 int
118 kernacc(c_caddr_t addr, int len, int rw)
119 {
120 	boolean_t rv;
121 	vm_offset_t saddr, eaddr;
122 	vm_prot_t prot;
123 
124 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
125 	    ("illegal ``rw'' argument to kernacc (%x)", rw));
126 
127 	/*
128 	 * The globaldata space is not part of the kernel_map proper,
129 	 * check access separately.
130 	 */
131 	if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len)))
132 		return (TRUE);
133 
134 	/*
135 	 * Nominal kernel memory access - check access via kernel_map.
136 	 */
137 	if ((vm_offset_t)addr + len > kernel_map.max_offset ||
138 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
139 		return (FALSE);
140 	}
141 	prot = rw;
142 	saddr = trunc_page((vm_offset_t)addr);
143 	eaddr = round_page((vm_offset_t)addr + len);
144 	rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE);
145 
146 	return (rv == TRUE);
147 }
148 
149 /*
150  * No requirements.
151  */
152 int
153 useracc(c_caddr_t addr, int len, int rw)
154 {
155 	boolean_t rv;
156 	vm_prot_t prot;
157 	vm_map_t map;
158 	vm_map_entry_t save_hint;
159 	vm_offset_t wrap;
160 
161 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
162 	    ("illegal ``rw'' argument to useracc (%x)", rw));
163 	prot = rw;
164 	/*
165 	 * XXX - check separately to disallow access to user area and user
166 	 * page tables - they are in the map.
167 	 */
168 	wrap = (vm_offset_t)addr + len;
169 	if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) {
170 		return (FALSE);
171 	}
172 	map = &curproc->p_vmspace->vm_map;
173 	vm_map_lock_read(map);
174 	/*
175 	 * We save the map hint, and restore it.  Useracc appears to distort
176 	 * the map hint unnecessarily.
177 	 */
178 	save_hint = map->hint;
179 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
180 				     round_page(wrap), prot, TRUE);
181 	map->hint = save_hint;
182 	vm_map_unlock_read(map);
183 
184 	return (rv == TRUE);
185 }
186 
187 /*
188  * No requirements.
189  */
190 void
191 vslock(caddr_t addr, u_int len)
192 {
193 	if (len) {
194 		vm_map_wire(&curproc->p_vmspace->vm_map,
195 			    trunc_page((vm_offset_t)addr),
196 			    round_page((vm_offset_t)addr + len), 0);
197 	}
198 }
199 
200 /*
201  * No requirements.
202  */
203 void
204 vsunlock(caddr_t addr, u_int len)
205 {
206 	if (len) {
207 		vm_map_wire(&curproc->p_vmspace->vm_map,
208 			    trunc_page((vm_offset_t)addr),
209 			    round_page((vm_offset_t)addr + len),
210 			    KM_PAGEABLE);
211 	}
212 }
213 
214 /*
215  * Implement fork's actions on an address space.
216  * Here we arrange for the address space to be copied or referenced,
217  * allocate a user struct (pcb and kernel stack), then call the
218  * machine-dependent layer to fill those in and make the new process
219  * ready to run.  The new process is set up so that it returns directly
220  * to user mode to avoid stack copying and relocation problems.
221  *
222  * No requirements.
223  */
224 void
225 vm_fork(struct proc *p1, struct proc *p2, int flags)
226 {
227 	if ((flags & RFPROC) == 0) {
228 		/*
229 		 * Divorce the memory, if it is shared, essentially
230 		 * this changes shared memory amongst threads, into
231 		 * COW locally.
232 		 */
233 		if ((flags & RFMEM) == 0) {
234 			if (p1->p_vmspace->vm_sysref.refcnt > 1) {
235 				vmspace_unshare(p1);
236 			}
237 		}
238 		cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags);
239 		return;
240 	}
241 
242 	if (flags & RFMEM) {
243 		vmspace_ref(p1->p_vmspace);
244 		p2->p_vmspace = p1->p_vmspace;
245 	}
246 
247 	while (vm_page_count_severe()) {
248 		vm_wait(0);
249 	}
250 
251 	if ((flags & RFMEM) == 0) {
252 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
253 
254 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
255 
256 		if (p1->p_vmspace->vm_shm)
257 			shmfork(p1, p2);
258 	}
259 
260 	pmap_init_proc(p2);
261 }
262 
263 /*
264  * Set default limits for VM system.  Call during proc0's initialization.
265  *
266  * Called from the low level boot code only.
267  */
268 void
269 vm_init_limits(struct proc *p)
270 {
271 	int rss_limit;
272 
273 	/*
274 	 * Set up the initial limits on process VM. Set the maximum resident
275 	 * set size to be half of (reasonably) available memory.  Since this
276 	 * is a soft limit, it comes into effect only when the system is out
277 	 * of memory - half of main memory helps to favor smaller processes,
278 	 * and reduces thrashing of the object cache.
279 	 */
280 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
281 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
282 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
283 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
284 	/* limit the limit to no less than 2MB */
285 	rss_limit = max(vmstats.v_free_count, 512);
286 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
287 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
288 }
289 
290 /*
291  * Faultin the specified process.  Note that the process can be in any
292  * state.  Just clear P_SWAPPEDOUT and call wakeup in case the process is
293  * sleeping.
294  *
295  * No requirements.
296  */
297 void
298 faultin(struct proc *p)
299 {
300 	if (p->p_flags & P_SWAPPEDOUT) {
301 		/*
302 		 * The process is waiting in the kernel to return to user
303 		 * mode but cannot until P_SWAPPEDOUT gets cleared.
304 		 */
305 		lwkt_gettoken(&p->p_token);
306 		p->p_flags &= ~(P_SWAPPEDOUT | P_SWAPWAIT);
307 #ifdef INVARIANTS
308 		if (swap_debug)
309 			kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm);
310 #endif
311 		wakeup(p);
312 		lwkt_reltoken(&p->p_token);
313 	}
314 }
315 
316 /*
317  * Kernel initialization eventually falls through to this function,
318  * which is process 0.
319  *
320  * This swapin algorithm attempts to swap-in processes only if there
321  * is enough space for them.  Of course, if a process waits for a long
322  * time, it will be swapped in anyway.
323  */
324 struct scheduler_info {
325 	struct proc *pp;
326 	int ppri;
327 };
328 
329 static int scheduler_callback(struct proc *p, void *data);
330 
331 static void
332 scheduler(void *dummy)
333 {
334 	struct scheduler_info info;
335 	struct proc *p;
336 
337 	KKASSERT(!IN_CRITICAL_SECT(curthread));
338 loop:
339 	scheduler_notify = 0;
340 	/*
341 	 * Don't try to swap anything in if we are low on memory.
342 	 */
343 	if (vm_page_count_severe()) {
344 		vm_wait(0);
345 		goto loop;
346 	}
347 
348 	/*
349 	 * Look for a good candidate to wake up
350 	 */
351 	info.pp = NULL;
352 	info.ppri = INT_MIN;
353 	allproc_scan(scheduler_callback, &info);
354 
355 	/*
356 	 * Nothing to do, back to sleep for at least 1/10 of a second.  If
357 	 * we are woken up, immediately process the next request.  If
358 	 * multiple requests have built up the first is processed
359 	 * immediately and the rest are staggered.
360 	 */
361 	if ((p = info.pp) == NULL) {
362 		tsleep(&proc0, 0, "nowork", hz / 10);
363 		if (scheduler_notify == 0)
364 			tsleep(&scheduler_notify, 0, "nowork", 0);
365 		goto loop;
366 	}
367 
368 	/*
369 	 * Fault the selected process in, then wait for a short period of
370 	 * time and loop up.
371 	 *
372 	 * XXX we need a heuristic to get a measure of system stress and
373 	 * then adjust our stagger wakeup delay accordingly.
374 	 */
375 	lwkt_gettoken(&proc_token);
376 	faultin(p);
377 	p->p_swtime = 0;
378 	PRELE(p);
379 	lwkt_reltoken(&proc_token);
380 	tsleep(&proc0, 0, "swapin", hz / 10);
381 	goto loop;
382 }
383 
384 /*
385  * The caller must hold proc_token.
386  */
387 static int
388 scheduler_callback(struct proc *p, void *data)
389 {
390 	struct scheduler_info *info = data;
391 	struct lwp *lp;
392 	segsz_t pgs;
393 	int pri;
394 
395 	if (p->p_flags & P_SWAPWAIT) {
396 		pri = 0;
397 		FOREACH_LWP_IN_PROC(lp, p) {
398 			/* XXX lwp might need a different metric */
399 			pri += lp->lwp_slptime;
400 		}
401 		pri += p->p_swtime - p->p_nice * 8;
402 
403 		/*
404 		 * The more pages paged out while we were swapped,
405 		 * the more work we have to do to get up and running
406 		 * again and the lower our wakeup priority.
407 		 *
408 		 * Each second of sleep time is worth ~1MB
409 		 */
410 		lwkt_gettoken(&p->p_vmspace->vm_map.token);
411 		pgs = vmspace_resident_count(p->p_vmspace);
412 		if (pgs < p->p_vmspace->vm_swrss) {
413 			pri -= (p->p_vmspace->vm_swrss - pgs) /
414 				(1024 * 1024 / PAGE_SIZE);
415 		}
416 		lwkt_reltoken(&p->p_vmspace->vm_map.token);
417 
418 		/*
419 		 * If this process is higher priority and there is
420 		 * enough space, then select this process instead of
421 		 * the previous selection.
422 		 */
423 		if (pri > info->ppri) {
424 			if (info->pp)
425 				PRELE(info->pp);
426 			PHOLD(p);
427 			info->pp = p;
428 			info->ppri = pri;
429 		}
430 	}
431 	return(0);
432 }
433 
434 /*
435  * SMP races ok.
436  * No requirements.
437  */
438 void
439 swapin_request(void)
440 {
441 	if (scheduler_notify == 0) {
442 		scheduler_notify = 1;
443 		wakeup(&scheduler_notify);
444 	}
445 }
446 
447 #ifndef NO_SWAPPING
448 
449 #define	swappable(p) \
450 	(((p)->p_lock == 0) && \
451 	((p)->p_flags & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0)
452 
453 
454 /*
455  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
456  */
457 static int swap_idle_threshold1 = 15;
458 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
459 	CTLFLAG_RW, &swap_idle_threshold1, 0, "Guaranteed process resident time (sec)");
460 
461 /*
462  * Swap_idle_threshold2 is the time that a process can be idle before
463  * it will be swapped out, if idle swapping is enabled.  Default is
464  * one minute.
465  */
466 static int swap_idle_threshold2 = 60;
467 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
468 	CTLFLAG_RW, &swap_idle_threshold2, 0, "Time (sec) a process can idle before being swapped");
469 
470 /*
471  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
472  * procs and mark them as being swapped out.  This will cause the kernel
473  * to prefer to pageout those proc's pages first and the procs in question
474  * will not return to user mode until the swapper tells them they can.
475  *
476  * If any procs have been sleeping/stopped for at least maxslp seconds,
477  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
478  * if any, otherwise the longest-resident process.
479  */
480 
481 static int swapout_procs_callback(struct proc *p, void *data);
482 
483 /*
484  * No requirements.
485  */
486 void
487 swapout_procs(int action)
488 {
489 	allproc_scan(swapout_procs_callback, &action);
490 }
491 
492 /*
493  * The caller must hold proc_token
494  */
495 static int
496 swapout_procs_callback(struct proc *p, void *data)
497 {
498 	struct lwp *lp;
499 	int action = *(int *)data;
500 	int minslp = -1;
501 
502 	if (!swappable(p))
503 		return(0);
504 
505 	lwkt_gettoken(&p->p_token);
506 
507 	/*
508 	 * We only consider active processes.
509 	 */
510 	if (p->p_stat != SACTIVE && p->p_stat != SSTOP) {
511 		lwkt_reltoken(&p->p_token);
512 		return(0);
513 	}
514 
515 	FOREACH_LWP_IN_PROC(lp, p) {
516 		/*
517 		 * do not swap out a realtime process
518 		 */
519 		if (RTP_PRIO_IS_REALTIME(lp->lwp_rtprio.type)) {
520 			lwkt_reltoken(&p->p_token);
521 			return(0);
522 		}
523 
524 		/*
525 		 * Guarentee swap_idle_threshold time in memory
526 		 */
527 		if (lp->lwp_slptime < swap_idle_threshold1) {
528 			lwkt_reltoken(&p->p_token);
529 			return(0);
530 		}
531 
532 		/*
533 		 * If the system is under memory stress, or if we
534 		 * are swapping idle processes >= swap_idle_threshold2,
535 		 * then swap the process out.
536 		 */
537 		if (((action & VM_SWAP_NORMAL) == 0) &&
538 		    (((action & VM_SWAP_IDLE) == 0) ||
539 		     (lp->lwp_slptime < swap_idle_threshold2))) {
540 			lwkt_reltoken(&p->p_token);
541 			return(0);
542 		}
543 
544 		if (minslp == -1 || lp->lwp_slptime < minslp)
545 			minslp = lp->lwp_slptime;
546 	}
547 
548 	/*
549 	 * If the process has been asleep for awhile, swap
550 	 * it out.
551 	 */
552 	if ((action & VM_SWAP_NORMAL) ||
553 	    ((action & VM_SWAP_IDLE) &&
554 	     (minslp > swap_idle_threshold2))) {
555 		swapout(p);
556 	}
557 
558 	/*
559 	 * cleanup our reference
560 	 */
561 	lwkt_reltoken(&p->p_token);
562 
563 	return(0);
564 }
565 
566 /*
567  * The caller must hold proc_token and p->p_token
568  */
569 static void
570 swapout(struct proc *p)
571 {
572 #ifdef INVARIANTS
573 	if (swap_debug)
574 		kprintf("swapping out %d (%s)\n", p->p_pid, p->p_comm);
575 #endif
576 	++p->p_ru.ru_nswap;
577 
578 	/*
579 	 * remember the process resident count
580 	 */
581 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
582 	p->p_flags |= P_SWAPPEDOUT;
583 	p->p_swtime = 0;
584 }
585 
586 #endif /* !NO_SWAPPING */
587 
588