1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $ 63 * $DragonFly: src/sys/vm/vm_glue.c,v 1.32 2005/06/25 20:03:32 dillon Exp $ 64 */ 65 66 #include "opt_vm.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/proc.h> 71 #include <sys/resourcevar.h> 72 #include <sys/buf.h> 73 #include <sys/shm.h> 74 #include <sys/vmmeter.h> 75 #include <sys/sysctl.h> 76 77 #include <sys/kernel.h> 78 #include <sys/unistd.h> 79 80 #include <machine/limits.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <sys/lock.h> 85 #include <vm/pmap.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 92 #include <sys/user.h> 93 #include <vm/vm_page2.h> 94 #include <sys/thread2.h> 95 96 /* 97 * System initialization 98 * 99 * Note: proc0 from proc.h 100 */ 101 102 static void vm_init_limits (void *); 103 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 104 105 /* 106 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 107 * 108 * Note: run scheduling should be divorced from the vm system. 109 */ 110 static void scheduler (void *); 111 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 112 113 114 static void swapout (struct proc *); 115 116 int 117 kernacc(c_caddr_t addr, int len, int rw) 118 { 119 boolean_t rv; 120 vm_offset_t saddr, eaddr; 121 vm_prot_t prot; 122 123 KASSERT((rw & (~VM_PROT_ALL)) == 0, 124 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 125 prot = rw; 126 saddr = trunc_page((vm_offset_t)addr); 127 eaddr = round_page((vm_offset_t)addr + len); 128 vm_map_lock_read(kernel_map); 129 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 130 vm_map_unlock_read(kernel_map); 131 if (rv == FALSE && is_globaldata_space(saddr, eaddr)) 132 rv = TRUE; 133 return (rv == TRUE); 134 } 135 136 int 137 useracc(c_caddr_t addr, int len, int rw) 138 { 139 boolean_t rv; 140 vm_prot_t prot; 141 vm_map_t map; 142 vm_map_entry_t save_hint; 143 144 KASSERT((rw & (~VM_PROT_ALL)) == 0, 145 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 146 prot = rw; 147 /* 148 * XXX - check separately to disallow access to user area and user 149 * page tables - they are in the map. 150 * 151 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once 152 * only used (as an end address) in trap.c. Use it as an end address 153 * here too. This bogusness has spread. I just fixed where it was 154 * used as a max in vm_mmap.c. 155 */ 156 if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 157 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 158 return (FALSE); 159 } 160 map = &curproc->p_vmspace->vm_map; 161 vm_map_lock_read(map); 162 /* 163 * We save the map hint, and restore it. Useracc appears to distort 164 * the map hint unnecessarily. 165 */ 166 save_hint = map->hint; 167 rv = vm_map_check_protection(map, 168 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot); 169 map->hint = save_hint; 170 vm_map_unlock_read(map); 171 172 return (rv == TRUE); 173 } 174 175 void 176 vslock(caddr_t addr, u_int len) 177 { 178 vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 179 round_page((vm_offset_t)addr + len), 0); 180 } 181 182 void 183 vsunlock(caddr_t addr, u_int len) 184 { 185 vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 186 round_page((vm_offset_t)addr + len), KM_PAGEABLE); 187 } 188 189 /* 190 * Implement fork's actions on an address space. 191 * Here we arrange for the address space to be copied or referenced, 192 * allocate a user struct (pcb and kernel stack), then call the 193 * machine-dependent layer to fill those in and make the new process 194 * ready to run. The new process is set up so that it returns directly 195 * to user mode to avoid stack copying and relocation problems. 196 */ 197 void 198 vm_fork(struct proc *p1, struct proc *p2, int flags) 199 { 200 struct user *up; 201 struct thread *td2; 202 203 if ((flags & RFPROC) == 0) { 204 /* 205 * Divorce the memory, if it is shared, essentially 206 * this changes shared memory amongst threads, into 207 * COW locally. 208 */ 209 if ((flags & RFMEM) == 0) { 210 if (p1->p_vmspace->vm_refcnt > 1) { 211 vmspace_unshare(p1); 212 } 213 } 214 cpu_fork(p1, p2, flags); 215 return; 216 } 217 218 if (flags & RFMEM) { 219 p2->p_vmspace = p1->p_vmspace; 220 p1->p_vmspace->vm_refcnt++; 221 } 222 223 while (vm_page_count_severe()) { 224 vm_wait(); 225 } 226 227 if ((flags & RFMEM) == 0) { 228 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 229 230 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 231 232 if (p1->p_vmspace->vm_shm) 233 shmfork(p1, p2); 234 } 235 236 td2 = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, -1); 237 pmap_init_proc(p2, td2); 238 lwkt_setpri(td2, TDPRI_KERN_USER); 239 lwkt_set_comm(td2, "%s", p1->p_comm); 240 241 up = p2->p_addr; 242 243 /* 244 * p_stats currently points at fields in the user struct 245 * but not at &u, instead at p_addr. Copy parts of 246 * p_stats; zero the rest of p_stats (statistics). 247 * 248 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need 249 * to share sigacts, so we use the up->u_sigacts. 250 */ 251 p2->p_stats = &up->u_stats; 252 if (p2->p_sigacts == NULL) { 253 if (p2->p_procsig->ps_refcnt != 1) 254 printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid); 255 p2->p_sigacts = &up->u_sigacts; 256 up->u_sigacts = *p1->p_sigacts; 257 } 258 259 bzero(&up->u_stats, sizeof(struct pstats)); 260 bcopy(&p1->p_stats->p_prof, &up->u_stats.p_prof, 261 sizeof(struct uprof)); 262 bcopy(&p1->p_thread->td_start, &p2->p_thread->td_start, 263 sizeof(struct timeval)); 264 265 266 /* 267 * cpu_fork will copy and update the pcb, set up the kernel stack, 268 * and make the child ready to run. 269 */ 270 cpu_fork(p1, p2, flags); 271 } 272 273 /* 274 * Called after process has been wait(2)'ed apon and is being reaped. 275 * The idea is to reclaim resources that we could not reclaim while 276 * the process was still executing. 277 */ 278 void 279 vm_waitproc(struct proc *p) 280 { 281 p->p_stats = NULL; 282 cpu_proc_wait(p); 283 vmspace_exitfree(p); /* and clean-out the vmspace */ 284 } 285 286 /* 287 * Set default limits for VM system. 288 * Called for proc 0, and then inherited by all others. 289 * 290 * XXX should probably act directly on proc0. 291 */ 292 static void 293 vm_init_limits(void *udata) 294 { 295 struct proc *p = udata; 296 int rss_limit; 297 298 /* 299 * Set up the initial limits on process VM. Set the maximum resident 300 * set size to be half of (reasonably) available memory. Since this 301 * is a soft limit, it comes into effect only when the system is out 302 * of memory - half of main memory helps to favor smaller processes, 303 * and reduces thrashing of the object cache. 304 */ 305 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 306 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 307 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 308 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 309 /* limit the limit to no less than 2MB */ 310 rss_limit = max(vmstats.v_free_count, 512); 311 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 312 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 313 } 314 315 void 316 faultin(struct proc *p) 317 { 318 if ((p->p_flag & P_INMEM) == 0) { 319 320 ++p->p_lock; 321 322 pmap_swapin_proc(p); 323 324 crit_enter(); 325 326 /* 327 * The process is in the kernel and controlled by LWKT, 328 * so we just schedule it rather then call setrunqueue(). 329 */ 330 if (p->p_stat == SRUN) 331 lwkt_schedule(p->p_thread); 332 333 p->p_flag |= P_INMEM; 334 335 /* undo the effect of setting SLOCK above */ 336 --p->p_lock; 337 crit_exit(); 338 339 } 340 } 341 342 /* 343 * Kernel initialization eventually falls through to this function, 344 * which is process 0. 345 * 346 * This swapin algorithm attempts to swap-in processes only if there 347 * is enough space for them. Of course, if a process waits for a long 348 * time, it will be swapped in anyway. 349 */ 350 /* ARGSUSED*/ 351 static void 352 scheduler(void *dummy) 353 { 354 struct proc *p; 355 int pri; 356 struct proc *pp; 357 int ppri; 358 359 KKASSERT(!IN_CRITICAL_SECT(curthread)); 360 loop: 361 if (vm_page_count_min()) { 362 vm_wait(); 363 goto loop; 364 } 365 366 pp = NULL; 367 ppri = INT_MIN; 368 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 369 if (p->p_stat == SRUN && 370 (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) { 371 372 pri = p->p_swtime + p->p_slptime; 373 if ((p->p_flag & P_SWAPINREQ) == 0) { 374 pri -= p->p_nice * 8; 375 } 376 377 /* 378 * if this process is higher priority and there is 379 * enough space, then select this process instead of 380 * the previous selection. 381 */ 382 if (pri > ppri) { 383 pp = p; 384 ppri = pri; 385 } 386 } 387 } 388 389 /* 390 * Nothing to do, back to sleep. 391 */ 392 if ((p = pp) == NULL) { 393 tsleep(&proc0, 0, "sched", 0); 394 goto loop; 395 } 396 p->p_flag &= ~P_SWAPINREQ; 397 398 /* 399 * We would like to bring someone in. (only if there is space). 400 */ 401 faultin(p); 402 p->p_swtime = 0; 403 goto loop; 404 } 405 406 #ifndef NO_SWAPPING 407 408 #define swappable(p) \ 409 (((p)->p_lock == 0) && \ 410 ((p)->p_flag & (P_TRACED|P_SYSTEM|P_INMEM|P_WEXIT|P_SWAPPING)) == P_INMEM) 411 412 413 /* 414 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 415 */ 416 static int swap_idle_threshold1 = 2; 417 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, 418 CTLFLAG_RW, &swap_idle_threshold1, 0, ""); 419 420 /* 421 * Swap_idle_threshold2 is the time that a process can be idle before 422 * it will be swapped out, if idle swapping is enabled. 423 */ 424 static int swap_idle_threshold2 = 10; 425 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, 426 CTLFLAG_RW, &swap_idle_threshold2, 0, ""); 427 428 /* 429 * Swapout is driven by the pageout daemon. Very simple, we find eligible 430 * procs and unwire their u-areas. We try to always "swap" at least one 431 * process in case we need the room for a swapin. 432 * If any procs have been sleeping/stopped for at least maxslp seconds, 433 * they are swapped. Else, we swap the longest-sleeping or stopped process, 434 * if any, otherwise the longest-resident process. 435 */ 436 void 437 swapout_procs(int action) 438 { 439 struct proc *p; 440 struct proc *outp, *outp2; 441 int outpri, outpri2; 442 int didswap = 0; 443 444 outp = outp2 = NULL; 445 outpri = outpri2 = INT_MIN; 446 retry: 447 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 448 struct vmspace *vm; 449 if (!swappable(p)) 450 continue; 451 452 vm = p->p_vmspace; 453 454 switch (p->p_stat) { 455 default: 456 continue; 457 458 case SSLEEP: 459 case SSTOP: 460 /* 461 * do not swapout a realtime process 462 */ 463 if (RTP_PRIO_IS_REALTIME(p->p_rtprio.type)) 464 continue; 465 466 /* 467 * YYY do not swapout a proc waiting on a critical 468 * event. 469 * 470 * Guarentee swap_idle_threshold time in memory 471 */ 472 if (p->p_slptime < swap_idle_threshold1) 473 continue; 474 475 /* 476 * If the system is under memory stress, or if we 477 * are swapping idle processes >= swap_idle_threshold2, 478 * then swap the process out. 479 */ 480 if (((action & VM_SWAP_NORMAL) == 0) && 481 (((action & VM_SWAP_IDLE) == 0) || 482 (p->p_slptime < swap_idle_threshold2))) 483 continue; 484 485 ++vm->vm_refcnt; 486 /* 487 * do not swapout a process that is waiting for VM 488 * data structures there is a possible deadlock. 489 */ 490 if (lockmgr(&vm->vm_map.lock, 491 LK_EXCLUSIVE | LK_NOWAIT, 492 NULL, curthread)) { 493 vmspace_free(vm); 494 continue; 495 } 496 vm_map_unlock(&vm->vm_map); 497 /* 498 * If the process has been asleep for awhile and had 499 * most of its pages taken away already, swap it out. 500 */ 501 if ((action & VM_SWAP_NORMAL) || 502 ((action & VM_SWAP_IDLE) && 503 (p->p_slptime > swap_idle_threshold2))) { 504 swapout(p); 505 vmspace_free(vm); 506 didswap++; 507 goto retry; 508 } 509 510 /* 511 * cleanup our reference 512 */ 513 vmspace_free(vm); 514 } 515 } 516 /* 517 * If we swapped something out, and another process needed memory, 518 * then wakeup the sched process. 519 */ 520 if (didswap) 521 wakeup(&proc0); 522 } 523 524 static void 525 swapout(struct proc *p) 526 { 527 528 #if defined(SWAP_DEBUG) 529 printf("swapping out %d\n", p->p_pid); 530 #endif 531 ++p->p_stats->p_ru.ru_nswap; 532 /* 533 * remember the process resident count 534 */ 535 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 536 537 crit_enter(); 538 p->p_flag &= ~P_INMEM; 539 p->p_flag |= P_SWAPPING; 540 if (p->p_flag & P_ONRUNQ) 541 p->p_usched->remrunqueue(p); 542 crit_exit(); 543 544 pmap_swapout_proc(p); 545 546 p->p_flag &= ~P_SWAPPING; 547 p->p_swtime = 0; 548 } 549 #endif /* !NO_SWAPPING */ 550