1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_glue.c,v 1.56 2008/07/01 02:02:56 dillon Exp $ 66 */ 67 68 #include "opt_vm.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/proc.h> 73 #include <sys/resourcevar.h> 74 #include <sys/buf.h> 75 #include <sys/shm.h> 76 #include <sys/vmmeter.h> 77 #include <sys/sysctl.h> 78 79 #include <sys/kernel.h> 80 #include <sys/unistd.h> 81 82 #include <machine/limits.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <sys/lock.h> 87 #include <vm/pmap.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_pageout.h> 91 #include <vm/vm_kern.h> 92 #include <vm/vm_extern.h> 93 94 #include <sys/user.h> 95 #include <vm/vm_page2.h> 96 #include <sys/thread2.h> 97 #include <sys/sysref2.h> 98 99 /* 100 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 101 * 102 * Note: run scheduling should be divorced from the vm system. 103 */ 104 static void scheduler (void *); 105 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 106 107 #ifdef INVARIANTS 108 109 static int swap_debug = 0; 110 SYSCTL_INT(_vm, OID_AUTO, swap_debug, 111 CTLFLAG_RW, &swap_debug, 0, ""); 112 113 #endif 114 115 static int scheduler_notify; 116 117 static void swapout (struct proc *); 118 119 /* 120 * No requirements. 121 */ 122 int 123 kernacc(c_caddr_t addr, int len, int rw) 124 { 125 boolean_t rv; 126 vm_offset_t saddr, eaddr; 127 vm_prot_t prot; 128 129 KASSERT((rw & (~VM_PROT_ALL)) == 0, 130 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 131 132 /* 133 * The globaldata space is not part of the kernel_map proper, 134 * check access separately. 135 */ 136 if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len))) 137 return (TRUE); 138 139 /* 140 * Nominal kernel memory access - check access via kernel_map. 141 */ 142 if ((vm_offset_t)addr + len > kernel_map.max_offset || 143 (vm_offset_t)addr + len < (vm_offset_t)addr) { 144 return (FALSE); 145 } 146 prot = rw; 147 saddr = trunc_page((vm_offset_t)addr); 148 eaddr = round_page((vm_offset_t)addr + len); 149 rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE); 150 151 return (rv == TRUE); 152 } 153 154 /* 155 * No requirements. 156 */ 157 int 158 useracc(c_caddr_t addr, int len, int rw) 159 { 160 boolean_t rv; 161 vm_prot_t prot; 162 vm_map_t map; 163 vm_map_entry_t save_hint; 164 165 KASSERT((rw & (~VM_PROT_ALL)) == 0, 166 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 167 prot = rw; 168 /* 169 * XXX - check separately to disallow access to user area and user 170 * page tables - they are in the map. 171 * 172 * XXX - VM_MAX_USER_ADDRESS is an end address, not a max. It was once 173 * only used (as an end address) in trap.c. Use it as an end address 174 * here too. This bogusness has spread. I just fixed where it was 175 * used as a max in vm_mmap.c. 176 */ 177 if ((vm_offset_t) addr + len > /* XXX */ VM_MAX_USER_ADDRESS 178 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 179 return (FALSE); 180 } 181 map = &curproc->p_vmspace->vm_map; 182 vm_map_lock_read(map); 183 /* 184 * We save the map hint, and restore it. Useracc appears to distort 185 * the map hint unnecessarily. 186 */ 187 save_hint = map->hint; 188 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 189 round_page((vm_offset_t)addr + len), 190 prot, TRUE); 191 map->hint = save_hint; 192 vm_map_unlock_read(map); 193 194 return (rv == TRUE); 195 } 196 197 /* 198 * No requirements. 199 */ 200 void 201 vslock(caddr_t addr, u_int len) 202 { 203 if (len) { 204 vm_map_wire(&curproc->p_vmspace->vm_map, 205 trunc_page((vm_offset_t)addr), 206 round_page((vm_offset_t)addr + len), 0); 207 } 208 } 209 210 /* 211 * No requirements. 212 */ 213 void 214 vsunlock(caddr_t addr, u_int len) 215 { 216 if (len) { 217 vm_map_wire(&curproc->p_vmspace->vm_map, 218 trunc_page((vm_offset_t)addr), 219 round_page((vm_offset_t)addr + len), 220 KM_PAGEABLE); 221 } 222 } 223 224 /* 225 * Implement fork's actions on an address space. 226 * Here we arrange for the address space to be copied or referenced, 227 * allocate a user struct (pcb and kernel stack), then call the 228 * machine-dependent layer to fill those in and make the new process 229 * ready to run. The new process is set up so that it returns directly 230 * to user mode to avoid stack copying and relocation problems. 231 * 232 * No requirements. 233 */ 234 void 235 vm_fork(struct proc *p1, struct proc *p2, int flags) 236 { 237 if ((flags & RFPROC) == 0) { 238 /* 239 * Divorce the memory, if it is shared, essentially 240 * this changes shared memory amongst threads, into 241 * COW locally. 242 */ 243 if ((flags & RFMEM) == 0) { 244 if (p1->p_vmspace->vm_sysref.refcnt > 1) { 245 vmspace_unshare(p1); 246 } 247 } 248 cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags); 249 return; 250 } 251 252 if (flags & RFMEM) { 253 p2->p_vmspace = p1->p_vmspace; 254 sysref_get(&p1->p_vmspace->vm_sysref); 255 } 256 257 while (vm_page_count_severe()) { 258 vm_wait(0); 259 } 260 261 if ((flags & RFMEM) == 0) { 262 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 263 264 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 265 266 if (p1->p_vmspace->vm_shm) 267 shmfork(p1, p2); 268 } 269 270 pmap_init_proc(p2); 271 } 272 273 /* 274 * Called after process has been wait(2)'ed apon and is being reaped. 275 * The idea is to reclaim resources that we could not reclaim while 276 * the process was still executing. 277 * 278 * No requirements. 279 */ 280 void 281 vm_waitproc(struct proc *p) 282 { 283 cpu_proc_wait(p); 284 vmspace_exitfree(p); /* and clean-out the vmspace */ 285 } 286 287 /* 288 * Set default limits for VM system. Call during proc0's initialization. 289 * 290 * Called from the low level boot code only. 291 */ 292 void 293 vm_init_limits(struct proc *p) 294 { 295 int rss_limit; 296 297 /* 298 * Set up the initial limits on process VM. Set the maximum resident 299 * set size to be half of (reasonably) available memory. Since this 300 * is a soft limit, it comes into effect only when the system is out 301 * of memory - half of main memory helps to favor smaller processes, 302 * and reduces thrashing of the object cache. 303 */ 304 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 305 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 306 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 307 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 308 /* limit the limit to no less than 2MB */ 309 rss_limit = max(vmstats.v_free_count, 512); 310 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 311 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 312 } 313 314 /* 315 * Faultin the specified process. Note that the process can be in any 316 * state. Just clear P_SWAPPEDOUT and call wakeup in case the process is 317 * sleeping. 318 * 319 * No requirements. 320 */ 321 void 322 faultin(struct proc *p) 323 { 324 if (p->p_flag & P_SWAPPEDOUT) { 325 /* 326 * The process is waiting in the kernel to return to user 327 * mode but cannot until P_SWAPPEDOUT gets cleared. 328 */ 329 crit_enter(); 330 lwkt_gettoken(&proc_token); 331 p->p_flag &= ~(P_SWAPPEDOUT | P_SWAPWAIT); 332 #ifdef INVARIANTS 333 if (swap_debug) 334 kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm); 335 #endif 336 wakeup(p); 337 lwkt_reltoken(&proc_token); 338 crit_exit(); 339 } 340 } 341 342 /* 343 * Kernel initialization eventually falls through to this function, 344 * which is process 0. 345 * 346 * This swapin algorithm attempts to swap-in processes only if there 347 * is enough space for them. Of course, if a process waits for a long 348 * time, it will be swapped in anyway. 349 */ 350 struct scheduler_info { 351 struct proc *pp; 352 int ppri; 353 }; 354 355 static int scheduler_callback(struct proc *p, void *data); 356 357 static void 358 scheduler(void *dummy) 359 { 360 struct scheduler_info info; 361 struct proc *p; 362 363 KKASSERT(!IN_CRITICAL_SECT(curthread)); 364 loop: 365 scheduler_notify = 0; 366 /* 367 * Don't try to swap anything in if we are low on memory. 368 */ 369 if (vm_page_count_severe()) { 370 vm_wait(0); 371 goto loop; 372 } 373 374 /* 375 * Look for a good candidate to wake up 376 */ 377 info.pp = NULL; 378 info.ppri = INT_MIN; 379 allproc_scan(scheduler_callback, &info); 380 381 /* 382 * Nothing to do, back to sleep for at least 1/10 of a second. If 383 * we are woken up, immediately process the next request. If 384 * multiple requests have built up the first is processed 385 * immediately and the rest are staggered. 386 */ 387 if ((p = info.pp) == NULL) { 388 tsleep(&proc0, 0, "nowork", hz / 10); 389 if (scheduler_notify == 0) 390 tsleep(&scheduler_notify, 0, "nowork", 0); 391 goto loop; 392 } 393 394 /* 395 * Fault the selected process in, then wait for a short period of 396 * time and loop up. 397 * 398 * XXX we need a heuristic to get a measure of system stress and 399 * then adjust our stagger wakeup delay accordingly. 400 */ 401 lwkt_gettoken(&proc_token); 402 faultin(p); 403 p->p_swtime = 0; 404 PRELE(p); 405 lwkt_reltoken(&proc_token); 406 tsleep(&proc0, 0, "swapin", hz / 10); 407 goto loop; 408 } 409 410 /* 411 * The caller must hold proc_token. 412 */ 413 static int 414 scheduler_callback(struct proc *p, void *data) 415 { 416 struct scheduler_info *info = data; 417 struct lwp *lp; 418 segsz_t pgs; 419 int pri; 420 421 if (p->p_flag & P_SWAPWAIT) { 422 pri = 0; 423 FOREACH_LWP_IN_PROC(lp, p) { 424 /* XXX lwp might need a different metric */ 425 pri += lp->lwp_slptime; 426 } 427 pri += p->p_swtime - p->p_nice * 8; 428 429 /* 430 * The more pages paged out while we were swapped, 431 * the more work we have to do to get up and running 432 * again and the lower our wakeup priority. 433 * 434 * Each second of sleep time is worth ~1MB 435 */ 436 pgs = vmspace_resident_count(p->p_vmspace); 437 if (pgs < p->p_vmspace->vm_swrss) { 438 pri -= (p->p_vmspace->vm_swrss - pgs) / 439 (1024 * 1024 / PAGE_SIZE); 440 } 441 442 /* 443 * If this process is higher priority and there is 444 * enough space, then select this process instead of 445 * the previous selection. 446 */ 447 if (pri > info->ppri) { 448 if (info->pp) 449 PRELE(info->pp); 450 PHOLD(p); 451 info->pp = p; 452 info->ppri = pri; 453 } 454 } 455 return(0); 456 } 457 458 /* 459 * SMP races ok. 460 * No requirements. 461 */ 462 void 463 swapin_request(void) 464 { 465 if (scheduler_notify == 0) { 466 scheduler_notify = 1; 467 wakeup(&scheduler_notify); 468 } 469 } 470 471 #ifndef NO_SWAPPING 472 473 #define swappable(p) \ 474 (((p)->p_lock == 0) && \ 475 ((p)->p_flag & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0) 476 477 478 /* 479 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 480 */ 481 static int swap_idle_threshold1 = 15; 482 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, 483 CTLFLAG_RW, &swap_idle_threshold1, 0, "Guaranteed process resident time (sec)"); 484 485 /* 486 * Swap_idle_threshold2 is the time that a process can be idle before 487 * it will be swapped out, if idle swapping is enabled. Default is 488 * one minute. 489 */ 490 static int swap_idle_threshold2 = 60; 491 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, 492 CTLFLAG_RW, &swap_idle_threshold2, 0, "Time (sec) a process can idle before being swapped"); 493 494 /* 495 * Swapout is driven by the pageout daemon. Very simple, we find eligible 496 * procs and mark them as being swapped out. This will cause the kernel 497 * to prefer to pageout those proc's pages first and the procs in question 498 * will not return to user mode until the swapper tells them they can. 499 * 500 * If any procs have been sleeping/stopped for at least maxslp seconds, 501 * they are swapped. Else, we swap the longest-sleeping or stopped process, 502 * if any, otherwise the longest-resident process. 503 */ 504 505 static int swapout_procs_callback(struct proc *p, void *data); 506 507 /* 508 * No requirements. 509 */ 510 void 511 swapout_procs(int action) 512 { 513 lwkt_gettoken(&vmspace_token); 514 allproc_scan(swapout_procs_callback, &action); 515 lwkt_reltoken(&vmspace_token); 516 } 517 518 /* 519 * The caller must hold proc_token and vmspace_token. 520 */ 521 static int 522 swapout_procs_callback(struct proc *p, void *data) 523 { 524 struct vmspace *vm; 525 struct lwp *lp; 526 int action = *(int *)data; 527 int minslp = -1; 528 529 if (!swappable(p)) 530 return(0); 531 532 vm = p->p_vmspace; 533 534 /* 535 * We only consider active processes. 536 */ 537 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) 538 return(0); 539 540 FOREACH_LWP_IN_PROC(lp, p) { 541 /* 542 * do not swap out a realtime process 543 */ 544 if (RTP_PRIO_IS_REALTIME(lp->lwp_rtprio.type)) 545 return(0); 546 547 /* 548 * Guarentee swap_idle_threshold time in memory 549 */ 550 if (lp->lwp_slptime < swap_idle_threshold1) 551 return(0); 552 553 /* 554 * If the system is under memory stress, or if we 555 * are swapping idle processes >= swap_idle_threshold2, 556 * then swap the process out. 557 */ 558 if (((action & VM_SWAP_NORMAL) == 0) && 559 (((action & VM_SWAP_IDLE) == 0) || 560 (lp->lwp_slptime < swap_idle_threshold2))) { 561 return(0); 562 } 563 564 if (minslp == -1 || lp->lwp_slptime < minslp) 565 minslp = lp->lwp_slptime; 566 } 567 568 sysref_get(&vm->vm_sysref); 569 570 /* 571 * If the process has been asleep for awhile, swap 572 * it out. 573 */ 574 if ((action & VM_SWAP_NORMAL) || 575 ((action & VM_SWAP_IDLE) && 576 (minslp > swap_idle_threshold2))) { 577 swapout(p); 578 } 579 580 /* 581 * cleanup our reference 582 */ 583 sysref_put(&vm->vm_sysref); 584 585 return(0); 586 } 587 588 /* 589 * The caller must hold proc_token and vmspace_token. 590 */ 591 static void 592 swapout(struct proc *p) 593 { 594 #ifdef INVARIANTS 595 if (swap_debug) 596 kprintf("swapping out %d (%s)\n", p->p_pid, p->p_comm); 597 #endif 598 ++p->p_ru.ru_nswap; 599 /* 600 * remember the process resident count 601 */ 602 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 603 p->p_flag |= P_SWAPPEDOUT; 604 p->p_swtime = 0; 605 } 606 607 #endif /* !NO_SWAPPING */ 608 609