xref: /original-bsd/sys/vm/vm_glue.c (revision 753853ba)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_glue.c	7.14 (Berkeley) 02/19/92
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Permission to use, copy, modify and distribute this software and
17  * its documentation is hereby granted, provided that both the copyright
18  * notice and this permission notice appear in all copies of the
19  * software, derivative works or modified versions, and any portions
20  * thereof, and that both notices appear in supporting documentation.
21  *
22  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
23  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
24  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
25  *
26  * Carnegie Mellon requests users of this software to return to
27  *
28  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
29  *  School of Computer Science
30  *  Carnegie Mellon University
31  *  Pittsburgh PA 15213-3890
32  *
33  * any improvements or extensions that they make and grant Carnegie the
34  * rights to redistribute these changes.
35  */
36 
37 #include "param.h"
38 #include "systm.h"
39 #include "proc.h"
40 #include "resourcevar.h"
41 #include "buf.h"
42 #include "user.h"
43 
44 #include "vm.h"
45 #include "vm_page.h"
46 #include "vm_kern.h"
47 
48 int	avefree = 0;		/* XXX */
49 unsigned maxdmap = MAXDSIZ;	/* XXX */
50 int	readbuffers = 0;	/* XXX allow kgdb to read kernel buffer pool */
51 
52 kernacc(addr, len, rw)
53 	caddr_t addr;
54 	int len, rw;
55 {
56 	boolean_t rv;
57 	vm_offset_t saddr, eaddr;
58 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
59 
60 	saddr = trunc_page(addr);
61 	eaddr = round_page(addr+len-1);
62 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
63 	/*
64 	 * XXX there are still some things (e.g. the buffer cache) that
65 	 * are managed behind the VM system's back so even though an
66 	 * address is accessible in the mind of the VM system, there may
67 	 * not be physical pages where the VM thinks there is.  This can
68 	 * lead to bogus allocation of pages in the kernel address space
69 	 * or worse, inconsistencies at the pmap level.  We only worry
70 	 * about the buffer cache for now.
71 	 */
72 	if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers &&
73 		   saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf))
74 		rv = FALSE;
75 	return(rv == TRUE);
76 }
77 
78 useracc(addr, len, rw)
79 	caddr_t addr;
80 	int len, rw;
81 {
82 	boolean_t rv;
83 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
84 
85 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
86 	    trunc_page(addr), round_page(addr+len-1), prot);
87 	return(rv == TRUE);
88 }
89 
90 #ifdef KGDB
91 /*
92  * Change protections on kernel pages from addr to addr+len
93  * (presumably so debugger can plant a breakpoint).
94  * All addresses are assumed to reside in the Sysmap,
95  */
96 chgkprot(addr, len, rw)
97 	register caddr_t addr;
98 	int len, rw;
99 {
100 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
101 
102 	vm_map_protect(kernel_map, trunc_page(addr),
103 		       round_page(addr+len-1), prot, FALSE);
104 }
105 #endif
106 
107 vslock(addr, len)
108 	caddr_t	addr;
109 	u_int	len;
110 {
111 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
112 			round_page(addr+len-1), FALSE);
113 }
114 
115 vsunlock(addr, len, dirtied)
116 	caddr_t	addr;
117 	u_int	len;
118 	int dirtied;
119 {
120 #ifdef	lint
121 	dirtied++;
122 #endif	lint
123 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
124 			round_page(addr+len-1), TRUE);
125 }
126 
127 /*
128  * Implement fork's actions on an address space.
129  * Here we arrange for the address space to be copied or referenced,
130  * allocate a user struct (pcb and kernel stack), then call the
131  * machine-dependent layer to fill those in and make the new process
132  * ready to run.
133  * NOTE: the kernel stack may be at a different location in the child
134  * process, and thus addresses of automatic variables may be invalid
135  * after cpu_fork returns in the child process.  We do nothing here
136  * after cpu_fork returns.
137  */
138 vm_fork(p1, p2, isvfork)
139 	register struct proc *p1, *p2;
140 	int isvfork;
141 {
142 	register struct user *up;
143 	vm_offset_t addr;
144 
145 #ifdef i386
146 	/*
147 	 * avoid copying any of the parent's pagetables or other per-process
148 	 * objects that reside in the map by marking all of them non-inheritable
149 	 */
150 	(void)vm_map_inherit(&p1->p_vmspace->vm_map,
151 		UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
152 #endif
153 	p2->p_vmspace = vmspace_fork(p1->p_vmspace);
154 
155 #ifdef SYSVSHM
156 	if (p1->p_vmspace->vm_shm)
157 		shmfork(p1, p2, isvfork);
158 #endif
159 
160 #ifndef	i386
161 	/*
162 	 * Allocate a wired-down (for now) pcb and kernel stack for the process
163 	 */
164 	addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
165 	vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
166 #else
167 /* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack,
168 and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is
169 not yet clear, yet it does... */
170 	addr = kmem_alloc(kernel_map, ctob(UPAGES));
171 #endif
172 	up = (struct user *)addr;
173 	p2->p_addr = up;
174 
175 	/*
176 	 * p_stats and p_sigacts currently point at fields
177 	 * in the user struct but not at &u, instead at p_addr.
178 	 * Copy p_sigacts and parts of p_stats; zero the rest
179 	 * of p_stats (statistics).
180 	 */
181 	p2->p_stats = &up->u_stats;
182 	p2->p_sigacts = &up->u_sigacts;
183 	up->u_sigacts = *p1->p_sigacts;
184 	bzero(&up->u_stats.pstat_startzero,
185 	    (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
186 	    (caddr_t)&up->u_stats.pstat_startzero));
187 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
188 	    ((caddr_t)&up->u_stats.pstat_endcopy -
189 	     (caddr_t)&up->u_stats.pstat_startcopy));
190 
191 #ifdef i386
192 	{ u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp;
193 
194 	vp = &p2->p_vmspace->vm_map;
195 	(void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
196 	(void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
197 	(void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE);
198 	}
199 #endif
200 	/*
201 	 * cpu_fork will copy and update the kernel stack and pcb,
202 	 * and make the child ready to run.  It marks the child
203 	 * so that it can return differently than the parent.
204 	 * It returns twice, once in the parent process and
205 	 * once in the child.
206 	 */
207 	return (cpu_fork(p1, p2));
208 }
209 
210 /*
211  * Set default limits for VM system.
212  * Called for proc 0, and then inherited by all others.
213  */
214 vm_init_limits(p)
215 	register struct proc *p;
216 {
217 
218 	/*
219 	 * Set up the initial limits on process VM.
220 	 * Set the maximum resident set size to be all
221 	 * of (reasonably) available memory.  This causes
222 	 * any single, large process to start random page
223 	 * replacement once it fills memory.
224 	 */
225         p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
226         p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
227         p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
228         p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
229 	p->p_rlimit[RLIMIT_RSS].rlim_cur = p->p_rlimit[RLIMIT_RSS].rlim_max =
230 		ptoa(cnt.v_free_count);
231 }
232 
233 #include "../vm/vm_pageout.h"
234 
235 #ifdef DEBUG
236 int	enableswap = 1;
237 int	swapdebug = 0;
238 #define	SDB_FOLLOW	1
239 #define SDB_SWAPIN	2
240 #define SDB_SWAPOUT	4
241 #endif
242 
243 /*
244  * Brutally simple:
245  *	1. Attempt to swapin every swaped-out, runnable process in
246  *	   order of priority.
247  *	2. If not enough memory, wake the pageout daemon and let it
248  *	   clear some space.
249  */
250 sched()
251 {
252 	register struct proc *p;
253 	register int pri;
254 	struct proc *pp;
255 	int ppri;
256 	vm_offset_t addr;
257 	vm_size_t size;
258 
259 loop:
260 #ifdef DEBUG
261 	while (!enableswap)
262 		sleep((caddr_t)&proc0, PVM);
263 #endif
264 	pp = NULL;
265 	ppri = INT_MIN;
266 	for (p = allproc; p != NULL; p = p->p_nxt) {
267 		if (p->p_stat == SRUN && (p->p_flag & SLOAD) == 0) {
268 			pri = p->p_time + p->p_slptime - p->p_nice * 8;
269 			if (pri > ppri) {
270 				pp = p;
271 				ppri = pri;
272 			}
273 		}
274 	}
275 #ifdef DEBUG
276 	if (swapdebug & SDB_FOLLOW)
277 		printf("sched: running, procp %x pri %d\n", pp, ppri);
278 #endif
279 	/*
280 	 * Nothing to do, back to sleep
281 	 */
282 	if ((p = pp) == NULL) {
283 		sleep((caddr_t)&proc0, PVM);
284 		goto loop;
285 	}
286 
287 	/*
288 	 * We would like to bring someone in.
289 	 * This part is really bogus cuz we could deadlock on memory
290 	 * despite our feeble check.
291 	 */
292 	size = round_page(ctob(UPAGES));
293 	addr = (vm_offset_t) p->p_addr;
294 	if (cnt.v_free_count > atop(size)) {
295 #ifdef DEBUG
296 		if (swapdebug & SDB_SWAPIN)
297 			printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
298 			       p->p_pid, p->p_comm, p->p_addr,
299 			       ppri, cnt.v_free_count);
300 #endif
301 		vm_map_pageable(kernel_map, addr, addr+size, FALSE);
302 		(void) splclock();
303 		if (p->p_stat == SRUN)
304 			setrq(p);
305 		p->p_flag |= SLOAD;
306 		(void) spl0();
307 		p->p_time = 0;
308 		goto loop;
309 	}
310 	/*
311 	 * Not enough memory, jab the pageout daemon and wait til the
312 	 * coast is clear.
313 	 */
314 #ifdef DEBUG
315 	if (swapdebug & SDB_FOLLOW)
316 		printf("sched: no room for pid %d(%s), free %d\n",
317 		       p->p_pid, p->p_comm, cnt.v_free_count);
318 #endif
319 	(void) splhigh();
320 	VM_WAIT;
321 	(void) spl0();
322 #ifdef DEBUG
323 	if (swapdebug & SDB_FOLLOW)
324 		printf("sched: room again, free %d\n", cnt.v_free_count);
325 #endif
326 	goto loop;
327 }
328 
329 #define	swappable(p) \
330 	(((p)->p_flag & (SSYS|SLOAD|SKEEP|SWEXIT|SPHYSIO)) == SLOAD)
331 
332 /*
333  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
334  * procs and unwire their u-areas.  We try to always "swap" at least one
335  * process in case we need the room for a swapin.
336  * If any procs have been sleeping/stopped for at least maxslp seconds,
337  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
338  * if any, otherwise the longest-resident process.
339  */
340 swapout_threads()
341 {
342 	register struct proc *p;
343 	struct proc *outp, *outp2;
344 	int outpri, outpri2;
345 	int didswap = 0;
346 	extern int maxslp;
347 
348 #ifdef DEBUG
349 	if (!enableswap)
350 		return;
351 #endif
352 	outp = outp2 = NULL;
353 	outpri = outpri2 = 0;
354 	for (p = allproc; p != NULL; p = p->p_nxt) {
355 		if (!swappable(p))
356 			continue;
357 		switch (p->p_stat) {
358 		case SRUN:
359 			if (p->p_time > outpri2) {
360 				outp2 = p;
361 				outpri2 = p->p_time;
362 			}
363 			continue;
364 
365 		case SSLEEP:
366 		case SSTOP:
367 			if (p->p_slptime > maxslp) {
368 				swapout(p);
369 				didswap++;
370 			} else if (p->p_slptime > outpri) {
371 				outp = p;
372 				outpri = p->p_slptime;
373 			}
374 			continue;
375 		}
376 	}
377 	/*
378 	 * If we didn't get rid of any real duds, toss out the next most
379 	 * likely sleeping/stopped or running candidate.  We only do this
380 	 * if we are real low on memory since we don't gain much by doing
381 	 * it (UPAGES pages).
382 	 */
383 	if (didswap == 0 &&
384 	    cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) {
385 		if ((p = outp) == 0)
386 			p = outp2;
387 #ifdef DEBUG
388 		if (swapdebug & SDB_SWAPOUT)
389 			printf("swapout_threads: no duds, try procp %x\n", p);
390 #endif
391 		if (p)
392 			swapout(p);
393 	}
394 }
395 
396 swapout(p)
397 	register struct proc *p;
398 {
399 	vm_offset_t addr;
400 	vm_size_t size;
401 
402 #ifdef DEBUG
403 	if (swapdebug & SDB_SWAPOUT)
404 		printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
405 		       p->p_pid, p->p_comm, p->p_addr, p->p_stat,
406 		       p->p_slptime, cnt.v_free_count);
407 #endif
408 	size = round_page(ctob(UPAGES));
409 	addr = (vm_offset_t) p->p_addr;
410 #ifdef hp300
411 	/*
412 	 * Ugh!  u-area is double mapped to a fixed address behind the
413 	 * back of the VM system and accesses are usually through that
414 	 * address rather than the per-process address.  Hence reference
415 	 * and modify information are recorded at the fixed address and
416 	 * lost at context switch time.  We assume the u-struct and
417 	 * kernel stack are always accessed/modified and force it to be so.
418 	 */
419 	{
420 		register int i;
421 		volatile long tmp;
422 
423 		for (i = 0; i < UPAGES; i++) {
424 			tmp = *(long *)addr; *(long *)addr = tmp;
425 			addr += NBPG;
426 		}
427 		addr = (vm_offset_t) p->p_addr;
428 	}
429 #endif
430 #ifdef mips
431 	/*
432 	 * Be sure to save the floating point coprocessor state before
433 	 * paging out the u-struct.
434 	 */
435 	{
436 		extern struct proc *machFPCurProcPtr;
437 
438 		if (p == machFPCurProcPtr) {
439 			MachSaveCurFPState(p);
440 			machFPCurProcPtr = (struct proc *)0;
441 		}
442 	}
443 #endif
444 #ifndef	i386 /* temporary measure till we find spontaineous unwire of kstack */
445 	vm_map_pageable(kernel_map, addr, addr+size, TRUE);
446 	pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
447 #endif
448 	(void) splhigh();
449 	p->p_flag &= ~SLOAD;
450 	if (p->p_stat == SRUN)
451 		remrq(p);
452 	(void) spl0();
453 	p->p_time = 0;
454 }
455 
456 /*
457  * The rest of these routines fake thread handling
458  */
459 
460 void
461 assert_wait(event, ruptible)
462 	int event;
463 	boolean_t ruptible;
464 {
465 #ifdef lint
466 	ruptible++;
467 #endif
468 	curproc->p_thread = event;
469 }
470 
471 void
472 thread_block()
473 {
474 	int s = splhigh();
475 
476 	if (curproc->p_thread)
477 		sleep((caddr_t)curproc->p_thread, PVM);
478 	splx(s);
479 }
480 
481 void
482 thread_sleep(event, lock, ruptible)
483 	int event;
484 	simple_lock_t lock;
485 	boolean_t ruptible;
486 {
487 #ifdef lint
488 	ruptible++;
489 #endif
490 	int s = splhigh();
491 
492 	curproc->p_thread = event;
493 	simple_unlock(lock);
494 	if (curproc->p_thread)
495 		sleep((caddr_t)event, PVM);
496 	splx(s);
497 }
498 
499 void
500 thread_wakeup(event)
501 	int event;
502 {
503 	int s = splhigh();
504 
505 	wakeup((caddr_t)event);
506 	splx(s);
507 }
508 
509 /*
510  * DEBUG stuff
511  */
512 
513 int indent = 0;
514 
515 #include <machine/stdarg.h>		/* see subr_prf.c */
516 
517 /*ARGSUSED2*/
518 #if __STDC__
519 iprintf(const char *fmt, ...)
520 #else
521 iprintf(fmt /* , va_alist */)
522 	char *fmt;
523 	/* va_dcl */
524 #endif
525 {
526 	register int i;
527 	va_list ap;
528 
529 	for (i = indent; i >= 8; i -= 8)
530 		printf("\t");
531 	while (--i >= 0)
532 		printf(" ");
533 	va_start(ap, fmt);
534 	printf("%r", fmt, ap);
535 	va_end(ap);
536 }
537