1 /* $NetBSD: uvm_glue.c,v 1.58 2002/05/15 06:57:49 matt Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 42 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69 #include <sys/cdefs.h> 70 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.58 2002/05/15 06:57:49 matt Exp $"); 71 72 #include "opt_kgdb.h" 73 #include "opt_sysv.h" 74 #include "opt_uvmhist.h" 75 76 /* 77 * uvm_glue.c: glue functions 78 */ 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/proc.h> 83 #include <sys/resourcevar.h> 84 #include <sys/buf.h> 85 #include <sys/user.h> 86 #ifdef SYSVSHM 87 #include <sys/shm.h> 88 #endif 89 90 #include <uvm/uvm.h> 91 92 #include <machine/cpu.h> 93 94 /* 95 * local prototypes 96 */ 97 98 static void uvm_swapout __P((struct proc *)); 99 100 /* 101 * XXXCDC: do these really belong here? 102 */ 103 104 int readbuffers = 0; /* allow KGDB to read kern buffer pool */ 105 /* XXX: see uvm_kernacc */ 106 107 108 /* 109 * uvm_kernacc: can the kernel access a region of memory 110 * 111 * - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c) 112 */ 113 114 boolean_t 115 uvm_kernacc(addr, len, rw) 116 caddr_t addr; 117 size_t len; 118 int rw; 119 { 120 boolean_t rv; 121 vaddr_t saddr, eaddr; 122 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 123 124 saddr = trunc_page((vaddr_t)addr); 125 eaddr = round_page((vaddr_t)addr + len); 126 vm_map_lock_read(kernel_map); 127 rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot); 128 vm_map_unlock_read(kernel_map); 129 130 /* 131 * XXX there are still some things (e.g. the buffer cache) that 132 * are managed behind the VM system's back so even though an 133 * address is accessible in the mind of the VM system, there may 134 * not be physical pages where the VM thinks there is. This can 135 * lead to bogus allocation of pages in the kernel address space 136 * or worse, inconsistencies at the pmap level. We only worry 137 * about the buffer cache for now. 138 */ 139 if (!readbuffers && rv && (eaddr > (vaddr_t)buffers && 140 saddr < (vaddr_t)buffers + MAXBSIZE * nbuf)) 141 rv = FALSE; 142 return(rv); 143 } 144 145 /* 146 * uvm_useracc: can the user access it? 147 * 148 * - called from physio() and sys___sysctl(). 149 */ 150 151 boolean_t 152 uvm_useracc(addr, len, rw) 153 caddr_t addr; 154 size_t len; 155 int rw; 156 { 157 struct vm_map *map; 158 boolean_t rv; 159 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 160 161 /* XXX curproc */ 162 map = &curproc->p_vmspace->vm_map; 163 164 vm_map_lock_read(map); 165 rv = uvm_map_checkprot(map, trunc_page((vaddr_t)addr), 166 round_page((vaddr_t)addr + len), prot); 167 vm_map_unlock_read(map); 168 169 return(rv); 170 } 171 172 #ifdef KGDB 173 /* 174 * Change protections on kernel pages from addr to addr+len 175 * (presumably so debugger can plant a breakpoint). 176 * 177 * We force the protection change at the pmap level. If we were 178 * to use vm_map_protect a change to allow writing would be lazily- 179 * applied meaning we would still take a protection fault, something 180 * we really don't want to do. It would also fragment the kernel 181 * map unnecessarily. We cannot use pmap_protect since it also won't 182 * enforce a write-enable request. Using pmap_enter is the only way 183 * we can ensure the change takes place properly. 184 */ 185 void 186 uvm_chgkprot(addr, len, rw) 187 caddr_t addr; 188 size_t len; 189 int rw; 190 { 191 vm_prot_t prot; 192 paddr_t pa; 193 vaddr_t sva, eva; 194 195 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE; 196 eva = round_page((vaddr_t)addr + len); 197 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) { 198 /* 199 * Extract physical address for the page. 200 */ 201 if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE) 202 panic("chgkprot: invalid page"); 203 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED); 204 } 205 pmap_update(pmap_kernel()); 206 } 207 #endif 208 209 /* 210 * uvm_vslock: wire user memory for I/O 211 * 212 * - called from physio and sys___sysctl 213 * - XXXCDC: consider nuking this (or making it a macro?) 214 */ 215 216 int 217 uvm_vslock(p, addr, len, access_type) 218 struct proc *p; 219 caddr_t addr; 220 size_t len; 221 vm_prot_t access_type; 222 { 223 struct vm_map *map; 224 vaddr_t start, end; 225 int error; 226 227 map = &p->p_vmspace->vm_map; 228 start = trunc_page((vaddr_t)addr); 229 end = round_page((vaddr_t)addr + len); 230 error = uvm_fault_wire(map, start, end, VM_FAULT_WIRE, access_type); 231 return error; 232 } 233 234 /* 235 * uvm_vsunlock: unwire user memory wired by uvm_vslock() 236 * 237 * - called from physio and sys___sysctl 238 * - XXXCDC: consider nuking this (or making it a macro?) 239 */ 240 241 void 242 uvm_vsunlock(p, addr, len) 243 struct proc *p; 244 caddr_t addr; 245 size_t len; 246 { 247 uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr), 248 round_page((vaddr_t)addr + len)); 249 } 250 251 /* 252 * uvm_fork: fork a virtual address space 253 * 254 * - the address space is copied as per parent map's inherit values 255 * - a new "user" structure is allocated for the child process 256 * [filled in by MD layer...] 257 * - if specified, the child gets a new user stack described by 258 * stack and stacksize 259 * - NOTE: the kernel stack may be at a different location in the child 260 * process, and thus addresses of automatic variables may be invalid 261 * after cpu_fork returns in the child process. We do nothing here 262 * after cpu_fork returns. 263 * - XXXCDC: we need a way for this to return a failure value rather 264 * than just hang 265 */ 266 void 267 uvm_fork(p1, p2, shared, stack, stacksize, func, arg) 268 struct proc *p1, *p2; 269 boolean_t shared; 270 void *stack; 271 size_t stacksize; 272 void (*func) __P((void *)); 273 void *arg; 274 { 275 struct user *up = p2->p_addr; 276 int error; 277 278 if (shared == TRUE) { 279 p2->p_vmspace = NULL; 280 uvmspace_share(p1, p2); 281 } else 282 p2->p_vmspace = uvmspace_fork(p1->p_vmspace); 283 284 /* 285 * Wire down the U-area for the process, which contains the PCB 286 * and the kernel stack. Wired state is stored in p->p_flag's 287 * P_INMEM bit rather than in the vm_map_entry's wired count 288 * to prevent kernel_map fragmentation. 289 * 290 * Note the kernel stack gets read/write accesses right off 291 * the bat. 292 */ 293 error = uvm_fault_wire(kernel_map, (vaddr_t)up, (vaddr_t)up + USPACE, 294 VM_FAULT_WIRE, VM_PROT_READ | VM_PROT_WRITE); 295 if (error) 296 panic("uvm_fork: uvm_fault_wire failed: %d", error); 297 298 /* 299 * p_stats currently points at a field in the user struct. Copy 300 * parts of p_stats, and zero out the rest. 301 */ 302 p2->p_stats = &up->u_stats; 303 memset(&up->u_stats.pstat_startzero, 0, 304 ((caddr_t)&up->u_stats.pstat_endzero - 305 (caddr_t)&up->u_stats.pstat_startzero)); 306 memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy, 307 ((caddr_t)&up->u_stats.pstat_endcopy - 308 (caddr_t)&up->u_stats.pstat_startcopy)); 309 310 /* 311 * cpu_fork() copy and update the pcb, and make the child ready 312 * to run. If this is a normal user fork, the child will exit 313 * directly to user mode via child_return() on its first time 314 * slice and will not return here. If this is a kernel thread, 315 * the specified entry point will be executed. 316 */ 317 cpu_fork(p1, p2, stack, stacksize, func, arg); 318 } 319 320 /* 321 * uvm_exit: exit a virtual address space 322 * 323 * - the process passed to us is a dead (pre-zombie) process; we 324 * are running on a different context now (the reaper). 325 * - we must run in a separate thread because freeing the vmspace 326 * of the dead process may block. 327 */ 328 void 329 uvm_exit(p) 330 struct proc *p; 331 { 332 vaddr_t va = (vaddr_t)p->p_addr; 333 334 uvmspace_free(p->p_vmspace); 335 p->p_flag &= ~P_INMEM; 336 uvm_km_free(kernel_map, va, USPACE); 337 p->p_addr = NULL; 338 } 339 340 /* 341 * uvm_init_limit: init per-process VM limits 342 * 343 * - called for process 0 and then inherited by all others. 344 */ 345 void 346 uvm_init_limits(p) 347 struct proc *p; 348 { 349 350 /* 351 * Set up the initial limits on process VM. Set the maximum 352 * resident set size to be all of (reasonably) available memory. 353 * This causes any single, large process to start random page 354 * replacement once it fills memory. 355 */ 356 357 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 358 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 359 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 360 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 361 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free); 362 } 363 364 #ifdef DEBUG 365 int enableswap = 1; 366 int swapdebug = 0; 367 #define SDB_FOLLOW 1 368 #define SDB_SWAPIN 2 369 #define SDB_SWAPOUT 4 370 #endif 371 372 /* 373 * uvm_swapin: swap in a process's u-area. 374 */ 375 376 void 377 uvm_swapin(p) 378 struct proc *p; 379 { 380 vaddr_t addr; 381 int s, error; 382 383 addr = (vaddr_t)p->p_addr; 384 /* make P_INMEM true */ 385 error = uvm_fault_wire(kernel_map, addr, addr + USPACE, VM_FAULT_WIRE, 386 VM_PROT_READ | VM_PROT_WRITE); 387 if (error) { 388 panic("uvm_swapin: rewiring stack failed: %d", error); 389 } 390 391 /* 392 * Some architectures need to be notified when the user area has 393 * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c). 394 */ 395 cpu_swapin(p); 396 SCHED_LOCK(s); 397 if (p->p_stat == SRUN) 398 setrunqueue(p); 399 p->p_flag |= P_INMEM; 400 SCHED_UNLOCK(s); 401 p->p_swtime = 0; 402 ++uvmexp.swapins; 403 } 404 405 /* 406 * uvm_scheduler: process zero main loop 407 * 408 * - attempt to swapin every swaped-out, runnable process in order of 409 * priority. 410 * - if not enough memory, wake the pagedaemon and let it clear space. 411 */ 412 413 void 414 uvm_scheduler() 415 { 416 struct proc *p; 417 int pri; 418 struct proc *pp; 419 int ppri; 420 421 loop: 422 #ifdef DEBUG 423 while (!enableswap) 424 tsleep(&proc0, PVM, "noswap", 0); 425 #endif 426 pp = NULL; /* process to choose */ 427 ppri = INT_MIN; /* its priority */ 428 proclist_lock_read(); 429 LIST_FOREACH(p, &allproc, p_list) { 430 431 /* is it a runnable swapped out process? */ 432 if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) { 433 pri = p->p_swtime + p->p_slptime - 434 (p->p_nice - NZERO) * 8; 435 if (pri > ppri) { /* higher priority? remember it. */ 436 pp = p; 437 ppri = pri; 438 } 439 } 440 } 441 /* 442 * XXXSMP: possible unlock/sleep race between here and the 443 * "scheduler" tsleep below.. 444 */ 445 proclist_unlock_read(); 446 447 #ifdef DEBUG 448 if (swapdebug & SDB_FOLLOW) 449 printf("scheduler: running, procp %p pri %d\n", pp, ppri); 450 #endif 451 /* 452 * Nothing to do, back to sleep 453 */ 454 if ((p = pp) == NULL) { 455 tsleep(&proc0, PVM, "scheduler", 0); 456 goto loop; 457 } 458 459 /* 460 * we have found swapped out process which we would like to bring 461 * back in. 462 * 463 * XXX: this part is really bogus cuz we could deadlock on memory 464 * despite our feeble check 465 */ 466 if (uvmexp.free > atop(USPACE)) { 467 #ifdef DEBUG 468 if (swapdebug & SDB_SWAPIN) 469 printf("swapin: pid %d(%s)@%p, pri %d free %d\n", 470 p->p_pid, p->p_comm, p->p_addr, ppri, uvmexp.free); 471 #endif 472 uvm_swapin(p); 473 goto loop; 474 } 475 /* 476 * not enough memory, jab the pageout daemon and wait til the coast 477 * is clear 478 */ 479 #ifdef DEBUG 480 if (swapdebug & SDB_FOLLOW) 481 printf("scheduler: no room for pid %d(%s), free %d\n", 482 p->p_pid, p->p_comm, uvmexp.free); 483 #endif 484 uvm_wait("schedpwait"); 485 #ifdef DEBUG 486 if (swapdebug & SDB_FOLLOW) 487 printf("scheduler: room again, free %d\n", uvmexp.free); 488 #endif 489 goto loop; 490 } 491 492 /* 493 * swappable: is process "p" swappable? 494 */ 495 496 #define swappable(p) \ 497 (((p)->p_flag & (P_SYSTEM | P_INMEM | P_WEXIT)) == P_INMEM && \ 498 (p)->p_holdcnt == 0) 499 500 /* 501 * swapout_threads: find threads that can be swapped and unwire their 502 * u-areas. 503 * 504 * - called by the pagedaemon 505 * - try and swap at least one processs 506 * - processes that are sleeping or stopped for maxslp or more seconds 507 * are swapped... otherwise the longest-sleeping or stopped process 508 * is swapped, otherwise the longest resident process... 509 */ 510 void 511 uvm_swapout_threads() 512 { 513 struct proc *p; 514 struct proc *outp, *outp2; 515 int outpri, outpri2; 516 int didswap = 0; 517 extern int maxslp; 518 /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */ 519 520 #ifdef DEBUG 521 if (!enableswap) 522 return; 523 #endif 524 525 /* 526 * outp/outpri : stop/sleep process with largest sleeptime < maxslp 527 * outp2/outpri2: the longest resident process (its swap time) 528 */ 529 outp = outp2 = NULL; 530 outpri = outpri2 = 0; 531 proclist_lock_read(); 532 LIST_FOREACH(p, &allproc, p_list) { 533 if (!swappable(p)) 534 continue; 535 switch (p->p_stat) { 536 case SRUN: 537 case SONPROC: 538 if (p->p_swtime > outpri2) { 539 outp2 = p; 540 outpri2 = p->p_swtime; 541 } 542 continue; 543 544 case SSLEEP: 545 case SSTOP: 546 if (p->p_slptime >= maxslp) { 547 uvm_swapout(p); 548 didswap++; 549 } else if (p->p_slptime > outpri) { 550 outp = p; 551 outpri = p->p_slptime; 552 } 553 continue; 554 } 555 } 556 proclist_unlock_read(); 557 558 /* 559 * If we didn't get rid of any real duds, toss out the next most 560 * likely sleeping/stopped or running candidate. We only do this 561 * if we are real low on memory since we don't gain much by doing 562 * it (USPACE bytes). 563 */ 564 if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) { 565 if ((p = outp) == NULL) 566 p = outp2; 567 #ifdef DEBUG 568 if (swapdebug & SDB_SWAPOUT) 569 printf("swapout_threads: no duds, try procp %p\n", p); 570 #endif 571 if (p) 572 uvm_swapout(p); 573 } 574 } 575 576 /* 577 * uvm_swapout: swap out process "p" 578 * 579 * - currently "swapout" means "unwire U-area" and "pmap_collect()" 580 * the pmap. 581 * - XXXCDC: should deactivate all process' private anonymous memory 582 */ 583 584 static void 585 uvm_swapout(p) 586 struct proc *p; 587 { 588 vaddr_t addr; 589 int s; 590 591 #ifdef DEBUG 592 if (swapdebug & SDB_SWAPOUT) 593 printf("swapout: pid %d(%s)@%p, stat %x pri %d free %d\n", 594 p->p_pid, p->p_comm, p->p_addr, p->p_stat, 595 p->p_slptime, uvmexp.free); 596 #endif 597 598 /* 599 * Do any machine-specific actions necessary before swapout. 600 * This can include saving floating point state, etc. 601 */ 602 cpu_swapout(p); 603 604 /* 605 * Mark it as (potentially) swapped out. 606 */ 607 SCHED_LOCK(s); 608 p->p_flag &= ~P_INMEM; 609 if (p->p_stat == SRUN) 610 remrunqueue(p); 611 SCHED_UNLOCK(s); 612 p->p_swtime = 0; 613 p->p_stats->p_ru.ru_nswap++; 614 ++uvmexp.swapouts; 615 616 /* 617 * Unwire the to-be-swapped process's user struct and kernel stack. 618 */ 619 addr = (vaddr_t)p->p_addr; 620 uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !P_INMEM */ 621 pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); 622 } 623 624 /* 625 * uvm_coredump_walkmap: walk a process's map for the purpose of dumping 626 * a core file. 627 */ 628 629 int 630 uvm_coredump_walkmap(p, vp, cred, func, cookie) 631 struct proc *p; 632 struct vnode *vp; 633 struct ucred *cred; 634 int (*func)(struct proc *, struct vnode *, struct ucred *, 635 struct uvm_coredump_state *); 636 void *cookie; 637 { 638 struct uvm_coredump_state state; 639 struct vmspace *vm = p->p_vmspace; 640 struct vm_map *map = &vm->vm_map; 641 struct vm_map_entry *entry; 642 vaddr_t maxstack; 643 int error; 644 645 maxstack = trunc_page(USRSTACK - ctob(vm->vm_ssize)); 646 647 for (entry = map->header.next; entry != &map->header; 648 entry = entry->next) { 649 /* Should never happen for a user process. */ 650 if (UVM_ET_ISSUBMAP(entry)) 651 panic("uvm_coredump_walkmap: user process with " 652 "submap?"); 653 654 state.cookie = cookie; 655 state.start = entry->start; 656 state.end = entry->end; 657 state.prot = entry->protection; 658 state.flags = 0; 659 660 if (state.start >= VM_MAXUSER_ADDRESS) 661 continue; 662 663 if (state.end > VM_MAXUSER_ADDRESS) 664 state.end = VM_MAXUSER_ADDRESS; 665 666 if (state.start >= (vaddr_t)vm->vm_maxsaddr) { 667 if (state.end <= maxstack) 668 continue; 669 if (state.start < maxstack) 670 state.start = maxstack; 671 state.flags |= UVM_COREDUMP_STACK; 672 } 673 674 if ((entry->protection & VM_PROT_WRITE) == 0) 675 state.flags |= UVM_COREDUMP_NODUMP; 676 677 if (entry->object.uvm_obj != NULL && 678 entry->object.uvm_obj->pgops == &uvm_deviceops) 679 state.flags |= UVM_COREDUMP_NODUMP; 680 681 error = (*func)(p, vp, cred, &state); 682 if (error) 683 return (error); 684 } 685 686 return (0); 687 } 688