1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 * 58 * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $ 59 */ 60 61 #include "opt_vm.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/proc.h> 66 #include <sys/resourcevar.h> 67 #include <sys/buf.h> 68 #include <sys/shm.h> 69 #include <sys/vmmeter.h> 70 #include <sys/sysctl.h> 71 72 #include <sys/kernel.h> 73 #include <sys/unistd.h> 74 75 #include <machine/limits.h> 76 #include <machine/vmm.h> 77 78 #include <vm/vm.h> 79 #include <vm/vm_param.h> 80 #include <sys/lock.h> 81 #include <vm/pmap.h> 82 #include <vm/vm_map.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_page2.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/vm_kern.h> 87 #include <vm/vm_extern.h> 88 89 /* 90 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 91 * 92 * Note: run scheduling should be divorced from the vm system. 93 */ 94 static void scheduler (void *); 95 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL); 96 97 #ifdef INVARIANTS 98 99 static int swap_debug = 0; 100 SYSCTL_INT(_vm, OID_AUTO, swap_debug, 101 CTLFLAG_RW, &swap_debug, 0, ""); 102 103 #endif 104 105 static int scheduler_notify; 106 107 static void swapout (struct proc *); 108 109 /* 110 * No requirements. 111 */ 112 int 113 kernacc(c_caddr_t addr, int len, int rw) 114 { 115 boolean_t rv; 116 vm_offset_t saddr, eaddr; 117 vm_prot_t prot; 118 119 KASSERT((rw & (~VM_PROT_ALL)) == 0, 120 ("illegal ``rw'' argument to kernacc (%x)", rw)); 121 122 /* 123 * The globaldata space is not part of the kernel_map proper, 124 * check access separately. 125 */ 126 if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len))) 127 return (TRUE); 128 129 /* 130 * Nominal kernel memory access - check access via kernel_map. 131 */ 132 if ((vm_offset_t)addr + len > vm_map_max(&kernel_map) || 133 (vm_offset_t)addr + len < (vm_offset_t)addr) { 134 return (FALSE); 135 } 136 prot = rw; 137 saddr = trunc_page((vm_offset_t)addr); 138 eaddr = round_page((vm_offset_t)addr + len); 139 rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE); 140 141 return (rv == TRUE); 142 } 143 144 /* 145 * No requirements. 146 */ 147 int 148 useracc(c_caddr_t addr, int len, int rw) 149 { 150 boolean_t rv; 151 vm_prot_t prot; 152 vm_map_t map; 153 vm_offset_t wrap; 154 vm_offset_t gpa; 155 156 KASSERT((rw & (~VM_PROT_ALL)) == 0, 157 ("illegal ``rw'' argument to useracc (%x)", rw)); 158 prot = rw; 159 160 if (curthread->td_vmm) { 161 if (vmm_vm_get_gpa(curproc, (register_t *)&gpa, (register_t) addr)) 162 panic("%s: could not get GPA\n", __func__); 163 addr = (c_caddr_t) gpa; 164 } 165 166 /* 167 * XXX - check separately to disallow access to user area and user 168 * page tables - they are in the map. 169 */ 170 wrap = (vm_offset_t)addr + len; 171 if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) { 172 return (FALSE); 173 } 174 map = &curproc->p_vmspace->vm_map; 175 vm_map_lock_read(map); 176 177 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 178 round_page(wrap), prot, TRUE); 179 vm_map_unlock_read(map); 180 181 return (rv == TRUE); 182 } 183 184 /* 185 * No requirements. 186 */ 187 void 188 vslock(caddr_t addr, u_int len) 189 { 190 if (len) { 191 vm_map_wire(&curproc->p_vmspace->vm_map, 192 trunc_page((vm_offset_t)addr), 193 round_page((vm_offset_t)addr + len), 0); 194 } 195 } 196 197 /* 198 * No requirements. 199 */ 200 void 201 vsunlock(caddr_t addr, u_int len) 202 { 203 if (len) { 204 vm_map_wire(&curproc->p_vmspace->vm_map, 205 trunc_page((vm_offset_t)addr), 206 round_page((vm_offset_t)addr + len), 207 KM_PAGEABLE); 208 } 209 } 210 211 /* 212 * Implement fork's actions on an address space. Here we arrange for the 213 * address space to be copied or referenced, allocate a user struct (pcb 214 * and kernel stack), then call the machine-dependent layer to fill those 215 * in and make the new process ready to run. The new process is set up 216 * so that it returns directly to user mode to avoid stack copying and 217 * relocation problems. 218 * 219 * If p2 is NULL and RFPROC is 0 we are just divorcing parts of the process 220 * from itself. 221 * 222 * Otherwise if p2 is NULL the new vmspace is not to be associated with any 223 * process or thread (so things like /dev/upmap and /dev/lpmap are not 224 * retained). 225 * 226 * Otherwise if p2 is not NULL then process specific mappings will be forked. 227 * If lp2 is not NULL only the thread-specific mappings for lp2 are forked, 228 * otherwise no thread-specific mappings are forked. 229 * 230 * No requirements. 231 */ 232 void 233 vm_fork(struct proc *p1, struct proc *p2, struct lwp *lp2, int flags) 234 { 235 if ((flags & RFPROC) == 0) { 236 /* 237 * Divorce the memory, if it is shared, essentially 238 * this changes shared memory amongst threads, into 239 * COW locally. 240 */ 241 if ((flags & RFMEM) == 0) { 242 if (vmspace_getrefs(p1->p_vmspace) > 1) { 243 vmspace_unshare(p1); 244 } 245 } 246 cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags); 247 return; 248 } 249 250 if (flags & RFMEM) { 251 vmspace_ref(p1->p_vmspace); 252 p2->p_vmspace = p1->p_vmspace; 253 } 254 255 while (vm_page_count_severe()) { 256 vm_wait(0); 257 } 258 259 if ((flags & RFMEM) == 0) { 260 p2->p_vmspace = vmspace_fork(p1->p_vmspace, p2, lp2); 261 262 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 263 264 if (p1->p_vmspace->vm_shm) 265 shmfork(p1, p2); 266 } 267 268 pmap_init_proc(p2); 269 } 270 271 /* 272 * Set default limits for VM system. Call during proc0's initialization. 273 * 274 * Called from the low level boot code only. 275 */ 276 void 277 vm_init_limits(struct proc *p) 278 { 279 int rss_limit; 280 281 /* 282 * Set up the initial limits on process VM. Set the maximum resident 283 * set size to be half of (reasonably) available memory. Since this 284 * is a soft limit, it comes into effect only when the system is out 285 * of memory - half of main memory helps to favor smaller processes, 286 * and reduces thrashing of the object cache. 287 */ 288 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 289 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 290 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 291 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 292 /* limit the limit to no less than 2MB */ 293 rss_limit = max(vmstats.v_free_count, 512); 294 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 295 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 296 } 297 298 /* 299 * Faultin the specified process. Note that the process can be in any 300 * state. Just clear P_SWAPPEDOUT and call wakeup in case the process is 301 * sleeping. 302 * 303 * No requirements. 304 */ 305 void 306 faultin(struct proc *p) 307 { 308 if (p->p_flags & P_SWAPPEDOUT) { 309 /* 310 * The process is waiting in the kernel to return to user 311 * mode but cannot until P_SWAPPEDOUT gets cleared. 312 */ 313 lwkt_gettoken(&p->p_token); 314 p->p_flags &= ~(P_SWAPPEDOUT | P_SWAPWAIT); 315 #ifdef INVARIANTS 316 if (swap_debug) 317 kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm); 318 #endif 319 wakeup(p); 320 lwkt_reltoken(&p->p_token); 321 } 322 } 323 324 /* 325 * Kernel initialization eventually falls through to this function, 326 * which is process 0. 327 * 328 * This swapin algorithm attempts to swap-in processes only if there 329 * is enough space for them. Of course, if a process waits for a long 330 * time, it will be swapped in anyway. 331 */ 332 struct scheduler_info { 333 struct proc *pp; 334 int ppri; 335 }; 336 337 static int scheduler_callback(struct proc *p, void *data); 338 339 static void 340 scheduler(void *dummy) 341 { 342 struct scheduler_info info; 343 struct proc *p; 344 345 KKASSERT(!IN_CRITICAL_SECT(curthread)); 346 loop: 347 scheduler_notify = 0; 348 /* 349 * Don't try to swap anything in if we are low on memory. 350 */ 351 if (vm_page_count_severe()) { 352 vm_wait(0); 353 goto loop; 354 } 355 356 /* 357 * Look for a good candidate to wake up 358 * 359 * XXX we should make the schedule thread pcpu and then use a 360 * segmented allproc scan. 361 */ 362 info.pp = NULL; 363 info.ppri = INT_MIN; 364 allproc_scan(scheduler_callback, &info, 0); 365 366 /* 367 * Nothing to do, back to sleep for at least 1/10 of a second. If 368 * we are woken up, immediately process the next request. If 369 * multiple requests have built up the first is processed 370 * immediately and the rest are staggered. 371 */ 372 if ((p = info.pp) == NULL) { 373 tsleep(&proc0, 0, "nowork", hz / 10); 374 if (scheduler_notify == 0) 375 tsleep(&scheduler_notify, 0, "nowork", 0); 376 goto loop; 377 } 378 379 /* 380 * Fault the selected process in, then wait for a short period of 381 * time and loop up. 382 * 383 * XXX we need a heuristic to get a measure of system stress and 384 * then adjust our stagger wakeup delay accordingly. 385 */ 386 lwkt_gettoken(&p->p_token); 387 faultin(p); 388 p->p_swtime = 0; 389 lwkt_reltoken(&p->p_token); 390 PRELE(p); 391 tsleep(&proc0, 0, "swapin", hz / 10); 392 goto loop; 393 } 394 395 /* 396 * Process only has its hold count bumped, we need the token 397 * to safely scan the LWPs 398 */ 399 static int 400 scheduler_callback(struct proc *p, void *data) 401 { 402 struct scheduler_info *info = data; 403 struct vmspace *vm; 404 struct lwp *lp; 405 segsz_t pgs; 406 int pri; 407 408 /* 409 * We only care about processes in swap-wait. Interlock test with 410 * token if the flag is found set. 411 */ 412 if ((p->p_flags & P_SWAPWAIT) == 0) 413 return 0; 414 lwkt_gettoken_shared(&p->p_token); 415 if ((p->p_flags & P_SWAPWAIT) == 0) { 416 lwkt_reltoken(&p->p_token); 417 return 0; 418 } 419 420 /* 421 * Calculate priority for swap-in 422 */ 423 pri = 0; 424 FOREACH_LWP_IN_PROC(lp, p) { 425 /* XXX lwp might need a different metric */ 426 pri += lp->lwp_slptime; 427 } 428 pri += p->p_swtime - p->p_nice * 8; 429 430 /* 431 * The more pages paged out while we were swapped, 432 * the more work we have to do to get up and running 433 * again and the lower our wakeup priority. 434 * 435 * Each second of sleep time is worth ~1MB 436 */ 437 if ((vm = p->p_vmspace) != NULL) { 438 vmspace_hold(vm); 439 pgs = vmspace_resident_count(vm); 440 if (pgs < vm->vm_swrss) { 441 pri -= (vm->vm_swrss - pgs) / 442 (1024 * 1024 / PAGE_SIZE); 443 } 444 vmspace_drop(vm); 445 } 446 lwkt_reltoken(&p->p_token); 447 448 /* 449 * If this process is higher priority and there is 450 * enough space, then select this process instead of 451 * the previous selection. 452 */ 453 if (pri > info->ppri) { 454 if (info->pp) 455 PRELE(info->pp); 456 PHOLD(p); 457 info->pp = p; 458 info->ppri = pri; 459 } 460 return(0); 461 } 462 463 /* 464 * SMP races ok. 465 * No requirements. 466 */ 467 void 468 swapin_request(void) 469 { 470 if (scheduler_notify == 0) { 471 scheduler_notify = 1; 472 wakeup(&scheduler_notify); 473 } 474 } 475 476 #ifndef NO_SWAPPING 477 478 #define swappable(p) \ 479 (((p)->p_lock == 0) && \ 480 ((p)->p_flags & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0) 481 482 483 /* 484 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 485 */ 486 static int swap_idle_threshold1 = 15; 487 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, 488 CTLFLAG_RW, &swap_idle_threshold1, 0, "Guaranteed process resident time (sec)"); 489 490 /* 491 * Swap_idle_threshold2 is the time that a process can be idle before 492 * it will be swapped out, if idle swapping is enabled. Default is 493 * one minute. 494 */ 495 static int swap_idle_threshold2 = 60; 496 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, 497 CTLFLAG_RW, &swap_idle_threshold2, 0, "Time (sec) a process can idle before being swapped"); 498 499 /* 500 * Swapout is driven by the pageout daemon. Very simple, we find eligible 501 * procs and mark them as being swapped out. This will cause the kernel 502 * to prefer to pageout those proc's pages first and the procs in question 503 * will not return to user mode until the swapper tells them they can. 504 * 505 * If any procs have been sleeping/stopped for at least maxslp seconds, 506 * they are swapped. Else, we swap the longest-sleeping or stopped process, 507 * if any, otherwise the longest-resident process. 508 */ 509 510 static int swapout_procs_callback(struct proc *p, void *data); 511 512 /* 513 * No requirements. 514 */ 515 void 516 swapout_procs(int action) 517 { 518 allproc_scan(swapout_procs_callback, &action, 0); 519 } 520 521 static int 522 swapout_procs_callback(struct proc *p, void *data) 523 { 524 struct lwp *lp; 525 int action = *(int *)data; 526 int minslp = -1; 527 528 if (!swappable(p)) 529 return(0); 530 531 lwkt_gettoken(&p->p_token); 532 533 /* 534 * We only consider active processes. 535 */ 536 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) { 537 lwkt_reltoken(&p->p_token); 538 return(0); 539 } 540 541 FOREACH_LWP_IN_PROC(lp, p) { 542 /* 543 * do not swap out a realtime process 544 */ 545 if (RTP_PRIO_IS_REALTIME(lp->lwp_rtprio.type)) { 546 lwkt_reltoken(&p->p_token); 547 return(0); 548 } 549 550 /* 551 * Guarentee swap_idle_threshold time in memory 552 */ 553 if (lp->lwp_slptime < swap_idle_threshold1) { 554 lwkt_reltoken(&p->p_token); 555 return(0); 556 } 557 558 /* 559 * If the system is under memory stress, or if we 560 * are swapping idle processes >= swap_idle_threshold2, 561 * then swap the process out. 562 */ 563 if (((action & VM_SWAP_NORMAL) == 0) && 564 (((action & VM_SWAP_IDLE) == 0) || 565 (lp->lwp_slptime < swap_idle_threshold2))) { 566 lwkt_reltoken(&p->p_token); 567 return(0); 568 } 569 570 if (minslp == -1 || lp->lwp_slptime < minslp) 571 minslp = lp->lwp_slptime; 572 } 573 574 /* 575 * If the process has been asleep for awhile, swap 576 * it out. 577 */ 578 if ((action & VM_SWAP_NORMAL) || 579 ((action & VM_SWAP_IDLE) && 580 (minslp > swap_idle_threshold2))) { 581 swapout(p); 582 } 583 584 /* 585 * cleanup our reference 586 */ 587 lwkt_reltoken(&p->p_token); 588 589 return(0); 590 } 591 592 /* 593 * The caller must hold p->p_token 594 */ 595 static void 596 swapout(struct proc *p) 597 { 598 #ifdef INVARIANTS 599 if (swap_debug) 600 kprintf("swapping out %d (%s)\n", p->p_pid, p->p_comm); 601 #endif 602 ++p->p_ru.ru_nswap; 603 604 /* 605 * remember the process resident count 606 */ 607 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 608 p->p_flags |= P_SWAPPEDOUT; 609 p->p_swtime = 0; 610 } 611 612 #endif /* !NO_SWAPPING */ 613 614