xref: /original-bsd/sys/vm/vm_glue.c (revision 333da485)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_glue.c	8.6 (Berkeley) 01/05/94
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Permission to use, copy, modify and distribute this software and
17  * its documentation is hereby granted, provided that both the copyright
18  * notice and this permission notice appear in all copies of the
19  * software, derivative works or modified versions, and any portions
20  * thereof, and that both notices appear in supporting documentation.
21  *
22  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
23  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
24  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
25  *
26  * Carnegie Mellon requests users of this software to return to
27  *
28  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
29  *  School of Computer Science
30  *  Carnegie Mellon University
31  *  Pittsburgh PA 15213-3890
32  *
33  * any improvements or extensions that they make and grant Carnegie the
34  * rights to redistribute these changes.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/resourcevar.h>
41 #include <sys/buf.h>
42 #include <sys/user.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_kern.h>
47 
48 #include <machine/cpu.h>
49 
50 int	avefree = 0;		/* XXX */
51 unsigned maxdmap = MAXDSIZ;	/* XXX */
52 int	readbuffers = 0;	/* XXX allow kgdb to read kernel buffer pool */
53 
54 int
55 kernacc(addr, len, rw)
56 	caddr_t addr;
57 	int len, rw;
58 {
59 	boolean_t rv;
60 	vm_offset_t saddr, eaddr;
61 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
62 
63 	saddr = trunc_page(addr);
64 	eaddr = round_page(addr+len);
65 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
66 	/*
67 	 * XXX there are still some things (e.g. the buffer cache) that
68 	 * are managed behind the VM system's back so even though an
69 	 * address is accessible in the mind of the VM system, there may
70 	 * not be physical pages where the VM thinks there is.  This can
71 	 * lead to bogus allocation of pages in the kernel address space
72 	 * or worse, inconsistencies at the pmap level.  We only worry
73 	 * about the buffer cache for now.
74 	 */
75 	if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers &&
76 		   saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf))
77 		rv = FALSE;
78 	return(rv == TRUE);
79 }
80 
81 int
82 useracc(addr, len, rw)
83 	caddr_t addr;
84 	int len, rw;
85 {
86 	boolean_t rv;
87 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
88 
89 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
90 	    trunc_page(addr), round_page(addr+len), prot);
91 	return(rv == TRUE);
92 }
93 
94 #ifdef KGDB
95 /*
96  * Change protections on kernel pages from addr to addr+len
97  * (presumably so debugger can plant a breakpoint).
98  *
99  * We force the protection change at the pmap level.  If we were
100  * to use vm_map_protect a change to allow writing would be lazily-
101  * applied meaning we would still take a protection fault, something
102  * we really don't want to do.  It would also fragment the kernel
103  * map unnecessarily.  We cannot use pmap_protect since it also won't
104  * enforce a write-enable request.  Using pmap_enter is the only way
105  * we can ensure the change takes place properly.
106  */
107 void
108 chgkprot(addr, len, rw)
109 	register caddr_t addr;
110 	int len, rw;
111 {
112 	vm_prot_t prot;
113 	vm_offset_t pa, sva, eva;
114 
115 	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
116 	eva = round_page(addr + len);
117 	for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) {
118 		/*
119 		 * Extract physical address for the page.
120 		 * We use a cheezy hack to differentiate physical
121 		 * page 0 from an invalid mapping, not that it
122 		 * really matters...
123 		 */
124 		pa = pmap_extract(kernel_pmap, sva|1);
125 		if (pa == 0)
126 			panic("chgkprot: invalid page");
127 		pmap_enter(kernel_pmap, sva, pa&~1, prot, TRUE);
128 	}
129 }
130 #endif
131 
132 void
133 vslock(addr, len)
134 	caddr_t	addr;
135 	u_int	len;
136 {
137 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
138 			round_page(addr+len), FALSE);
139 }
140 
141 void
142 vsunlock(addr, len, dirtied)
143 	caddr_t	addr;
144 	u_int	len;
145 	int dirtied;
146 {
147 #ifdef	lint
148 	dirtied++;
149 #endif
150 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
151 			round_page(addr+len), TRUE);
152 }
153 
154 /*
155  * Implement fork's actions on an address space.
156  * Here we arrange for the address space to be copied or referenced,
157  * allocate a user struct (pcb and kernel stack), then call the
158  * machine-dependent layer to fill those in and make the new process
159  * ready to run.
160  * NOTE: the kernel stack may be at a different location in the child
161  * process, and thus addresses of automatic variables may be invalid
162  * after cpu_fork returns in the child process.  We do nothing here
163  * after cpu_fork returns.
164  */
165 int
166 vm_fork(p1, p2, isvfork)
167 	register struct proc *p1, *p2;
168 	int isvfork;
169 {
170 	register struct user *up;
171 	vm_offset_t addr;
172 
173 #ifdef i386
174 	/*
175 	 * avoid copying any of the parent's pagetables or other per-process
176 	 * objects that reside in the map by marking all of them non-inheritable
177 	 */
178 	(void)vm_map_inherit(&p1->p_vmspace->vm_map,
179 		UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
180 #endif
181 	p2->p_vmspace = vmspace_fork(p1->p_vmspace);
182 
183 #ifdef SYSVSHM
184 	if (p1->p_vmspace->vm_shm)
185 		shmfork(p1, p2, isvfork);
186 #endif
187 
188 #ifndef	i386
189 	/*
190 	 * Allocate a wired-down (for now) pcb and kernel stack for the process
191 	 */
192 	addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
193 	if (addr == 0)
194 		panic("vm_fork: no more kernel virtual memory");
195 	vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
196 #else
197 /* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack,
198 and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is
199 not yet clear, yet it does... */
200 	addr = kmem_alloc(kernel_map, ctob(UPAGES));
201 	if (addr == 0)
202 		panic("vm_fork: no more kernel virtual memory");
203 #endif
204 	up = (struct user *)addr;
205 	p2->p_addr = up;
206 
207 	/*
208 	 * p_stats and p_sigacts currently point at fields
209 	 * in the user struct but not at &u, instead at p_addr.
210 	 * Copy p_sigacts and parts of p_stats; zero the rest
211 	 * of p_stats (statistics).
212 	 */
213 	p2->p_stats = &up->u_stats;
214 	p2->p_sigacts = &up->u_sigacts;
215 	up->u_sigacts = *p1->p_sigacts;
216 	bzero(&up->u_stats.pstat_startzero,
217 	    (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
218 	    (caddr_t)&up->u_stats.pstat_startzero));
219 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
220 	    ((caddr_t)&up->u_stats.pstat_endcopy -
221 	     (caddr_t)&up->u_stats.pstat_startcopy));
222 
223 #ifdef i386
224 	{ u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp;
225 
226 	vp = &p2->p_vmspace->vm_map;
227 	(void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
228 	(void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
229 	(void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE);
230 	}
231 #endif
232 	/*
233 	 * cpu_fork will copy and update the kernel stack and pcb,
234 	 * and make the child ready to run.  It marks the child
235 	 * so that it can return differently than the parent.
236 	 * It returns twice, once in the parent process and
237 	 * once in the child.
238 	 */
239 	return (cpu_fork(p1, p2));
240 }
241 
242 /*
243  * Set default limits for VM system.
244  * Called for proc 0, and then inherited by all others.
245  */
246 void
247 vm_init_limits(p)
248 	register struct proc *p;
249 {
250 
251 	/*
252 	 * Set up the initial limits on process VM.
253 	 * Set the maximum resident set size to be all
254 	 * of (reasonably) available memory.  This causes
255 	 * any single, large process to start random page
256 	 * replacement once it fills memory.
257 	 */
258         p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
259         p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
260         p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
261         p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
262 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(cnt.v_free_count);
263 }
264 
265 #include <vm/vm_pageout.h>
266 
267 #ifdef DEBUG
268 int	enableswap = 1;
269 int	swapdebug = 0;
270 #define	SDB_FOLLOW	1
271 #define SDB_SWAPIN	2
272 #define SDB_SWAPOUT	4
273 #endif
274 
275 /*
276  * Brutally simple:
277  *	1. Attempt to swapin every swaped-out, runnable process in
278  *	   order of priority.
279  *	2. If not enough memory, wake the pageout daemon and let it
280  *	   clear some space.
281  */
282 void
283 scheduler()
284 {
285 	register struct proc *p;
286 	register int pri;
287 	struct proc *pp;
288 	int ppri;
289 	vm_offset_t addr;
290 	vm_size_t size;
291 
292 loop:
293 #ifdef DEBUG
294 	while (!enableswap)
295 		sleep((caddr_t)&proc0, PVM);
296 #endif
297 	pp = NULL;
298 	ppri = INT_MIN;
299 	for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
300 		if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
301 			pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
302 			if (pri > ppri) {
303 				pp = p;
304 				ppri = pri;
305 			}
306 		}
307 	}
308 #ifdef DEBUG
309 	if (swapdebug & SDB_FOLLOW)
310 		printf("sched: running, procp %x pri %d\n", pp, ppri);
311 #endif
312 	/*
313 	 * Nothing to do, back to sleep
314 	 */
315 	if ((p = pp) == NULL) {
316 		sleep((caddr_t)&proc0, PVM);
317 		goto loop;
318 	}
319 
320 	/*
321 	 * We would like to bring someone in.
322 	 * This part is really bogus cuz we could deadlock on memory
323 	 * despite our feeble check.
324 	 */
325 	size = round_page(ctob(UPAGES));
326 	addr = (vm_offset_t) p->p_addr;
327 	if (cnt.v_free_count > atop(size)) {
328 #ifdef DEBUG
329 		if (swapdebug & SDB_SWAPIN)
330 			printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
331 			       p->p_pid, p->p_comm, p->p_addr,
332 			       ppri, cnt.v_free_count);
333 #endif
334 		vm_map_pageable(kernel_map, addr, addr+size, FALSE);
335 		/*
336 		 * Some architectures need to be notified when the
337 		 * user area has moved to new physical page(s) (e.g.
338 		 * see pmax/pmax/vm_machdep.c).
339 		 */
340 		cpu_swapin(p);
341 		(void) splstatclock();
342 		if (p->p_stat == SRUN)
343 			setrunqueue(p);
344 		p->p_flag |= P_INMEM;
345 		(void) spl0();
346 		p->p_swtime = 0;
347 		goto loop;
348 	}
349 	/*
350 	 * Not enough memory, jab the pageout daemon and wait til the
351 	 * coast is clear.
352 	 */
353 #ifdef DEBUG
354 	if (swapdebug & SDB_FOLLOW)
355 		printf("sched: no room for pid %d(%s), free %d\n",
356 		       p->p_pid, p->p_comm, cnt.v_free_count);
357 #endif
358 	(void) splhigh();
359 	VM_WAIT;
360 	(void) spl0();
361 #ifdef DEBUG
362 	if (swapdebug & SDB_FOLLOW)
363 		printf("sched: room again, free %d\n", cnt.v_free_count);
364 #endif
365 	goto loop;
366 }
367 
368 #define	swappable(p)							\
369 	(((p)->p_flag &							\
370 	    (P_SYSTEM | P_INMEM | P_NOSWAP | P_WEXIT | P_PHYSIO)) == P_INMEM)
371 
372 /*
373  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
374  * procs and unwire their u-areas.  We try to always "swap" at least one
375  * process in case we need the room for a swapin.
376  * If any procs have been sleeping/stopped for at least maxslp seconds,
377  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
378  * if any, otherwise the longest-resident process.
379  */
380 void
381 swapout_threads()
382 {
383 	register struct proc *p;
384 	struct proc *outp, *outp2;
385 	int outpri, outpri2;
386 	int didswap = 0;
387 	extern int maxslp;
388 
389 #ifdef DEBUG
390 	if (!enableswap)
391 		return;
392 #endif
393 	outp = outp2 = NULL;
394 	outpri = outpri2 = 0;
395 	for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
396 		if (!swappable(p))
397 			continue;
398 		switch (p->p_stat) {
399 		case SRUN:
400 			if (p->p_swtime > outpri2) {
401 				outp2 = p;
402 				outpri2 = p->p_swtime;
403 			}
404 			continue;
405 
406 		case SSLEEP:
407 		case SSTOP:
408 			if (p->p_slptime >= maxslp) {
409 				swapout(p);
410 				didswap++;
411 			} else if (p->p_slptime > outpri) {
412 				outp = p;
413 				outpri = p->p_slptime;
414 			}
415 			continue;
416 		}
417 	}
418 	/*
419 	 * If we didn't get rid of any real duds, toss out the next most
420 	 * likely sleeping/stopped or running candidate.  We only do this
421 	 * if we are real low on memory since we don't gain much by doing
422 	 * it (UPAGES pages).
423 	 */
424 	if (didswap == 0 &&
425 	    cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) {
426 		if ((p = outp) == 0)
427 			p = outp2;
428 #ifdef DEBUG
429 		if (swapdebug & SDB_SWAPOUT)
430 			printf("swapout_threads: no duds, try procp %x\n", p);
431 #endif
432 		if (p)
433 			swapout(p);
434 	}
435 }
436 
437 void
438 swapout(p)
439 	register struct proc *p;
440 {
441 	vm_offset_t addr;
442 	vm_size_t size;
443 
444 #ifdef DEBUG
445 	if (swapdebug & SDB_SWAPOUT)
446 		printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
447 		       p->p_pid, p->p_comm, p->p_addr, p->p_stat,
448 		       p->p_slptime, cnt.v_free_count);
449 #endif
450 	size = round_page(ctob(UPAGES));
451 	addr = (vm_offset_t) p->p_addr;
452 #if defined(hp300) || defined(luna68k)
453 	/*
454 	 * Ugh!  u-area is double mapped to a fixed address behind the
455 	 * back of the VM system and accesses are usually through that
456 	 * address rather than the per-process address.  Hence reference
457 	 * and modify information are recorded at the fixed address and
458 	 * lost at context switch time.  We assume the u-struct and
459 	 * kernel stack are always accessed/modified and force it to be so.
460 	 */
461 	{
462 		register int i;
463 		volatile long tmp;
464 
465 		for (i = 0; i < UPAGES; i++) {
466 			tmp = *(long *)addr; *(long *)addr = tmp;
467 			addr += NBPG;
468 		}
469 		addr = (vm_offset_t) p->p_addr;
470 	}
471 #endif
472 #ifdef mips
473 	/*
474 	 * Be sure to save the floating point coprocessor state before
475 	 * paging out the u-struct.
476 	 */
477 	{
478 		extern struct proc *machFPCurProcPtr;
479 
480 		if (p == machFPCurProcPtr) {
481 			MachSaveCurFPState(p);
482 			machFPCurProcPtr = (struct proc *)0;
483 		}
484 	}
485 #endif
486 #ifndef	i386 /* temporary measure till we find spontaineous unwire of kstack */
487 	vm_map_pageable(kernel_map, addr, addr+size, TRUE);
488 	pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
489 #endif
490 	(void) splhigh();
491 	p->p_flag &= ~P_INMEM;
492 	if (p->p_stat == SRUN)
493 		remrq(p);
494 	(void) spl0();
495 	p->p_swtime = 0;
496 }
497 
498 /*
499  * The rest of these routines fake thread handling
500  */
501 
502 void
503 assert_wait(event, ruptible)
504 	int event;
505 	boolean_t ruptible;
506 {
507 #ifdef lint
508 	ruptible++;
509 #endif
510 	curproc->p_thread = event;
511 }
512 
513 void
514 thread_block()
515 {
516 	int s = splhigh();
517 
518 	if (curproc->p_thread)
519 		sleep((caddr_t)curproc->p_thread, PVM);
520 	splx(s);
521 }
522 
523 void
524 thread_sleep(event, lock, ruptible)
525 	int event;
526 	simple_lock_t lock;
527 	boolean_t ruptible;
528 {
529 #ifdef lint
530 	ruptible++;
531 #endif
532 	int s = splhigh();
533 
534 	curproc->p_thread = event;
535 	simple_unlock(lock);
536 	if (curproc->p_thread)
537 		sleep((caddr_t)event, PVM);
538 	splx(s);
539 }
540 
541 void
542 thread_wakeup(event)
543 	int event;
544 {
545 	int s = splhigh();
546 
547 	wakeup((caddr_t)event);
548 	splx(s);
549 }
550 
551 /*
552  * DEBUG stuff
553  */
554 
555 int indent = 0;
556 
557 #include <machine/stdarg.h>		/* see subr_prf.c */
558 
559 /*ARGSUSED2*/
560 void
561 #if __STDC__
562 iprintf(const char *fmt, ...)
563 #else
564 iprintf(fmt /* , va_alist */)
565 	char *fmt;
566 	/* va_dcl */
567 #endif
568 {
569 	register int i;
570 	va_list ap;
571 
572 	for (i = indent; i >= 8; i -= 8)
573 		printf("\t");
574 	while (--i >= 0)
575 		printf(" ");
576 	va_start(ap, fmt);
577 	printf("%r", fmt, ap);
578 	va_end(ap);
579 }
580