1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * %sccs.include.redist.c%
9 *
10 * @(#)vm_glue.c 8.9 (Berkeley) 03/04/95
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Permission to use, copy, modify and distribute this software and
17 * its documentation is hereby granted, provided that both the copyright
18 * notice and this permission notice appear in all copies of the
19 * software, derivative works or modified versions, and any portions
20 * thereof, and that both notices appear in supporting documentation.
21 *
22 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
23 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
24 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
25 *
26 * Carnegie Mellon requests users of this software to return to
27 *
28 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
29 * School of Computer Science
30 * Carnegie Mellon University
31 * Pittsburgh PA 15213-3890
32 *
33 * any improvements or extensions that they make and grant Carnegie the
34 * rights to redistribute these changes.
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/resourcevar.h>
41 #include <sys/buf.h>
42 #include <sys/user.h>
43
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_kern.h>
47
48 #include <machine/cpu.h>
49
50 int avefree = 0; /* XXX */
51 unsigned maxdmap = MAXDSIZ; /* XXX */
52 int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */
53
54 int
kernacc(addr,len,rw)55 kernacc(addr, len, rw)
56 caddr_t addr;
57 int len, rw;
58 {
59 boolean_t rv;
60 vm_offset_t saddr, eaddr;
61 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
62
63 saddr = trunc_page(addr);
64 eaddr = round_page(addr+len);
65 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
66 /*
67 * XXX there are still some things (e.g. the buffer cache) that
68 * are managed behind the VM system's back so even though an
69 * address is accessible in the mind of the VM system, there may
70 * not be physical pages where the VM thinks there is. This can
71 * lead to bogus allocation of pages in the kernel address space
72 * or worse, inconsistencies at the pmap level. We only worry
73 * about the buffer cache for now.
74 */
75 if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers &&
76 saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf))
77 rv = FALSE;
78 return(rv == TRUE);
79 }
80
81 int
useracc(addr,len,rw)82 useracc(addr, len, rw)
83 caddr_t addr;
84 int len, rw;
85 {
86 boolean_t rv;
87 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
88
89 rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
90 trunc_page(addr), round_page(addr+len), prot);
91 return(rv == TRUE);
92 }
93
94 #ifdef KGDB
95 /*
96 * Change protections on kernel pages from addr to addr+len
97 * (presumably so debugger can plant a breakpoint).
98 *
99 * We force the protection change at the pmap level. If we were
100 * to use vm_map_protect a change to allow writing would be lazily-
101 * applied meaning we would still take a protection fault, something
102 * we really don't want to do. It would also fragment the kernel
103 * map unnecessarily. We cannot use pmap_protect since it also won't
104 * enforce a write-enable request. Using pmap_enter is the only way
105 * we can ensure the change takes place properly.
106 */
107 void
chgkprot(addr,len,rw)108 chgkprot(addr, len, rw)
109 register caddr_t addr;
110 int len, rw;
111 {
112 vm_prot_t prot;
113 vm_offset_t pa, sva, eva;
114
115 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
116 eva = round_page(addr + len);
117 for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) {
118 /*
119 * Extract physical address for the page.
120 * We use a cheezy hack to differentiate physical
121 * page 0 from an invalid mapping, not that it
122 * really matters...
123 */
124 pa = pmap_extract(kernel_pmap, sva|1);
125 if (pa == 0)
126 panic("chgkprot: invalid page");
127 pmap_enter(kernel_pmap, sva, pa&~1, prot, TRUE);
128 }
129 }
130 #endif
131
132 void
vslock(addr,len)133 vslock(addr, len)
134 caddr_t addr;
135 u_int len;
136 {
137 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
138 round_page(addr+len), FALSE);
139 }
140
141 void
vsunlock(addr,len,dirtied)142 vsunlock(addr, len, dirtied)
143 caddr_t addr;
144 u_int len;
145 int dirtied;
146 {
147 #ifdef lint
148 dirtied++;
149 #endif
150 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
151 round_page(addr+len), TRUE);
152 }
153
154 /*
155 * Implement fork's actions on an address space.
156 * Here we arrange for the address space to be copied or referenced,
157 * allocate a user struct (pcb and kernel stack), then call the
158 * machine-dependent layer to fill those in and make the new process
159 * ready to run.
160 * NOTE: the kernel stack may be at a different location in the child
161 * process, and thus addresses of automatic variables may be invalid
162 * after cpu_fork returns in the child process. We do nothing here
163 * after cpu_fork returns.
164 */
165 int
vm_fork(p1,p2,isvfork)166 vm_fork(p1, p2, isvfork)
167 register struct proc *p1, *p2;
168 int isvfork;
169 {
170 register struct user *up;
171 vm_offset_t addr;
172
173 #ifdef i386
174 /*
175 * avoid copying any of the parent's pagetables or other per-process
176 * objects that reside in the map by marking all of them non-inheritable
177 */
178 (void)vm_map_inherit(&p1->p_vmspace->vm_map,
179 UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
180 #endif
181 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
182
183 #ifdef SYSVSHM
184 if (p1->p_vmspace->vm_shm)
185 shmfork(p1, p2, isvfork);
186 #endif
187
188 #ifndef i386
189 /*
190 * Allocate a wired-down (for now) pcb and kernel stack for the process
191 */
192 addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
193 if (addr == 0)
194 panic("vm_fork: no more kernel virtual memory");
195 vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
196 #else
197 /* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack,
198 and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is
199 not yet clear, yet it does... */
200 addr = kmem_alloc(kernel_map, ctob(UPAGES));
201 if (addr == 0)
202 panic("vm_fork: no more kernel virtual memory");
203 #endif
204 up = (struct user *)addr;
205 p2->p_addr = up;
206
207 /*
208 * p_stats and p_sigacts currently point at fields
209 * in the user struct but not at &u, instead at p_addr.
210 * Copy p_sigacts and parts of p_stats; zero the rest
211 * of p_stats (statistics).
212 */
213 p2->p_stats = &up->u_stats;
214 p2->p_sigacts = &up->u_sigacts;
215 up->u_sigacts = *p1->p_sigacts;
216 bzero(&up->u_stats.pstat_startzero,
217 (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
218 (caddr_t)&up->u_stats.pstat_startzero));
219 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
220 ((caddr_t)&up->u_stats.pstat_endcopy -
221 (caddr_t)&up->u_stats.pstat_startcopy));
222
223 #ifdef i386
224 { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp;
225
226 vp = &p2->p_vmspace->vm_map;
227 (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
228 (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
229 (void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE);
230 }
231 #endif
232 /*
233 * cpu_fork will copy and update the kernel stack and pcb,
234 * and make the child ready to run. It marks the child
235 * so that it can return differently than the parent.
236 * It returns twice, once in the parent process and
237 * once in the child.
238 */
239 return (cpu_fork(p1, p2));
240 }
241
242 /*
243 * Set default limits for VM system.
244 * Called for proc 0, and then inherited by all others.
245 */
246 void
vm_init_limits(p)247 vm_init_limits(p)
248 register struct proc *p;
249 {
250
251 /*
252 * Set up the initial limits on process VM.
253 * Set the maximum resident set size to be all
254 * of (reasonably) available memory. This causes
255 * any single, large process to start random page
256 * replacement once it fills memory.
257 */
258 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
259 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
260 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
261 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
262 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(cnt.v_free_count);
263 }
264
265 #include <vm/vm_pageout.h>
266
267 #ifdef DEBUG
268 int enableswap = 1;
269 int swapdebug = 0;
270 #define SDB_FOLLOW 1
271 #define SDB_SWAPIN 2
272 #define SDB_SWAPOUT 4
273 #endif
274
275 /*
276 * Brutally simple:
277 * 1. Attempt to swapin every swaped-out, runnable process in
278 * order of priority.
279 * 2. If not enough memory, wake the pageout daemon and let it
280 * clear some space.
281 */
282 void
scheduler()283 scheduler()
284 {
285 register struct proc *p;
286 register int pri;
287 struct proc *pp;
288 int ppri;
289 vm_offset_t addr;
290 vm_size_t size;
291
292 loop:
293 #ifdef DEBUG
294 while (!enableswap)
295 tsleep((caddr_t)&proc0, PVM, "noswap", 0);
296 #endif
297 pp = NULL;
298 ppri = INT_MIN;
299 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
300 if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
301 /* XXX should also penalize based on vm_swrss */
302 pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
303 if (pri > ppri) {
304 pp = p;
305 ppri = pri;
306 }
307 }
308 }
309 #ifdef DEBUG
310 if (swapdebug & SDB_FOLLOW)
311 printf("scheduler: running, procp %x pri %d\n", pp, ppri);
312 #endif
313 /*
314 * Nothing to do, back to sleep
315 */
316 if ((p = pp) == NULL) {
317 tsleep((caddr_t)&proc0, PVM, "scheduler", 0);
318 goto loop;
319 }
320
321 /*
322 * We would like to bring someone in.
323 * This part is really bogus cuz we could deadlock on memory
324 * despite our feeble check.
325 * XXX should require at least vm_swrss / 2
326 */
327 size = round_page(ctob(UPAGES));
328 addr = (vm_offset_t) p->p_addr;
329 if (cnt.v_free_count > atop(size)) {
330 #ifdef DEBUG
331 if (swapdebug & SDB_SWAPIN)
332 printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
333 p->p_pid, p->p_comm, p->p_addr,
334 ppri, cnt.v_free_count);
335 #endif
336 vm_map_pageable(kernel_map, addr, addr+size, FALSE);
337 /*
338 * Some architectures need to be notified when the
339 * user area has moved to new physical page(s) (e.g.
340 * see pmax/pmax/vm_machdep.c).
341 */
342 cpu_swapin(p);
343 (void) splstatclock();
344 if (p->p_stat == SRUN)
345 setrunqueue(p);
346 p->p_flag |= P_INMEM;
347 (void) spl0();
348 p->p_swtime = 0;
349 goto loop;
350 }
351 /*
352 * Not enough memory, jab the pageout daemon and wait til the
353 * coast is clear.
354 */
355 #ifdef DEBUG
356 if (swapdebug & SDB_FOLLOW)
357 printf("scheduler: no room for pid %d(%s), free %d\n",
358 p->p_pid, p->p_comm, cnt.v_free_count);
359 #endif
360 (void) splhigh();
361 VM_WAIT;
362 (void) spl0();
363 #ifdef DEBUG
364 if (swapdebug & SDB_FOLLOW)
365 printf("scheduler: room again, free %d\n", cnt.v_free_count);
366 #endif
367 goto loop;
368 }
369
370 #define swappable(p) \
371 (((p)->p_flag & \
372 (P_SYSTEM | P_INMEM | P_NOSWAP | P_WEXIT | P_PHYSIO)) == P_INMEM)
373
374 /*
375 * Swapout is driven by the pageout daemon. Very simple, we find eligible
376 * procs and unwire their u-areas. We try to always "swap" at least one
377 * process in case we need the room for a swapin.
378 * If any procs have been sleeping/stopped for at least maxslp seconds,
379 * they are swapped. Else, we swap the longest-sleeping or stopped process,
380 * if any, otherwise the longest-resident process.
381 */
382 void
swapout_threads()383 swapout_threads()
384 {
385 register struct proc *p;
386 struct proc *outp, *outp2;
387 int outpri, outpri2;
388 int didswap = 0;
389 extern int maxslp;
390
391 #ifdef DEBUG
392 if (!enableswap)
393 return;
394 #endif
395 outp = outp2 = NULL;
396 outpri = outpri2 = 0;
397 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
398 if (!swappable(p))
399 continue;
400 switch (p->p_stat) {
401 case SRUN:
402 if (p->p_swtime > outpri2) {
403 outp2 = p;
404 outpri2 = p->p_swtime;
405 }
406 continue;
407
408 case SSLEEP:
409 case SSTOP:
410 if (p->p_slptime >= maxslp) {
411 swapout(p);
412 didswap++;
413 } else if (p->p_slptime > outpri) {
414 outp = p;
415 outpri = p->p_slptime;
416 }
417 continue;
418 }
419 }
420 /*
421 * If we didn't get rid of any real duds, toss out the next most
422 * likely sleeping/stopped or running candidate. We only do this
423 * if we are real low on memory since we don't gain much by doing
424 * it (UPAGES pages).
425 */
426 if (didswap == 0 &&
427 cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) {
428 if ((p = outp) == 0)
429 p = outp2;
430 #ifdef DEBUG
431 if (swapdebug & SDB_SWAPOUT)
432 printf("swapout_threads: no duds, try procp %x\n", p);
433 #endif
434 if (p)
435 swapout(p);
436 }
437 }
438
439 void
swapout(p)440 swapout(p)
441 register struct proc *p;
442 {
443 vm_offset_t addr;
444 vm_size_t size;
445
446 #ifdef DEBUG
447 if (swapdebug & SDB_SWAPOUT)
448 printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
449 p->p_pid, p->p_comm, p->p_addr, p->p_stat,
450 p->p_slptime, cnt.v_free_count);
451 #endif
452 size = round_page(ctob(UPAGES));
453 addr = (vm_offset_t) p->p_addr;
454 #if defined(hp300) || defined(luna68k)
455 /*
456 * Ugh! u-area is double mapped to a fixed address behind the
457 * back of the VM system and accesses are usually through that
458 * address rather than the per-process address. Hence reference
459 * and modify information are recorded at the fixed address and
460 * lost at context switch time. We assume the u-struct and
461 * kernel stack are always accessed/modified and force it to be so.
462 */
463 {
464 register int i;
465 volatile long tmp;
466
467 for (i = 0; i < UPAGES; i++) {
468 tmp = *(long *)addr; *(long *)addr = tmp;
469 addr += NBPG;
470 }
471 addr = (vm_offset_t) p->p_addr;
472 }
473 #endif
474 #ifdef mips
475 /*
476 * Be sure to save the floating point coprocessor state before
477 * paging out the u-struct.
478 */
479 {
480 extern struct proc *machFPCurProcPtr;
481
482 if (p == machFPCurProcPtr) {
483 MachSaveCurFPState(p);
484 machFPCurProcPtr = (struct proc *)0;
485 }
486 }
487 #endif
488 #ifndef i386 /* temporary measure till we find spontaineous unwire of kstack */
489 vm_map_pageable(kernel_map, addr, addr+size, TRUE);
490 pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
491 #endif
492 (void) splhigh();
493 p->p_flag &= ~P_INMEM;
494 if (p->p_stat == SRUN)
495 remrq(p);
496 (void) spl0();
497 p->p_swtime = 0;
498 }
499
500 /*
501 * The rest of these routines fake thread handling
502 */
503
504 void
assert_wait(event,ruptible)505 assert_wait(event, ruptible)
506 void *event;
507 boolean_t ruptible;
508 {
509 #ifdef lint
510 ruptible++;
511 #endif
512 curproc->p_thread = event;
513 }
514
515 void
thread_block()516 thread_block()
517 {
518 int s = splhigh();
519
520 if (curproc->p_thread)
521 tsleep(curproc->p_thread, PVM, "thrd_block", 0);
522 splx(s);
523 }
524
525 void
thread_sleep(event,lock,ruptible)526 thread_sleep(event, lock, ruptible)
527 void *event;
528 simple_lock_t lock;
529 boolean_t ruptible;
530 {
531 int s = splhigh();
532
533 #ifdef lint
534 ruptible++;
535 #endif
536 curproc->p_thread = event;
537 simple_unlock(lock);
538 if (curproc->p_thread)
539 tsleep(event, PVM, "thrd_sleep", 0);
540 splx(s);
541 }
542
543 void
thread_wakeup(event)544 thread_wakeup(event)
545 void *event;
546 {
547 int s = splhigh();
548
549 wakeup(event);
550 splx(s);
551 }
552
553 /*
554 * DEBUG stuff
555 */
556
557 int indent = 0;
558
559 #include <machine/stdarg.h> /* see subr_prf.c */
560
561 /*ARGSUSED2*/
562 void
563 #if __STDC__
iprintf(const char * fmt,...)564 iprintf(const char *fmt, ...)
565 #else
566 iprintf(fmt /* , va_alist */)
567 char *fmt;
568 /* va_dcl */
569 #endif
570 {
571 register int i;
572 va_list ap;
573
574 for (i = indent; i >= 8; i -= 8)
575 printf("\t");
576 while (--i >= 0)
577 printf(" ");
578 va_start(ap, fmt);
579 printf("%r", fmt, ap);
580 va_end(ap);
581 }
582