xref: /original-bsd/sys/vm/vm_glue.c (revision 56b48dd2)
1 /*
2  * Copyright (c) 1987 Carnegie-Mellon University
3  * Copyright (c) 1991 Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * The CMU software License Agreement specifies the terms and conditions
10  * for use and redistribution.
11  *
12  *	@(#)vm_glue.c	7.2 (Berkeley) 04/20/91
13  */
14 
15 #include "param.h"
16 #include "systm.h"
17 #include "proc.h"
18 #include "resourcevar.h"
19 #include "buf.h"
20 #include "user.h"
21 
22 #include "vm.h"
23 #include "vm_page.h"
24 #include "vm_kern.h"
25 
26 int	avefree = 0;		/* XXX */
27 unsigned maxdmap = MAXDSIZ;	/* XXX */
28 
29 kernacc(addr, len, rw)
30 	caddr_t addr;
31 	int len, rw;
32 {
33 	boolean_t rv;
34 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
35 
36 	rv = vm_map_check_protection(kernel_map, trunc_page(addr),
37 				     round_page(addr+len-1), prot);
38 	return(rv == TRUE);
39 }
40 
41 useracc(addr, len, rw)
42 	caddr_t addr;
43 	int len, rw;
44 {
45 	boolean_t rv;
46 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
47 
48 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
49 	    trunc_page(addr), round_page(addr+len-1), prot);
50 	return(rv == TRUE);
51 }
52 
53 #ifdef KGDB
54 /*
55  * Change protections on kernel pages from addr to addr+size
56  * (presumably so debugger can plant a breakpoint).
57  * All addresses are assumed to reside in the Sysmap,
58  */
59 chgkprot(addr, len, rw)
60 	register caddr_t addr;
61 	int len, rw;
62 {
63 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
64 
65 	vm_map_protect(kernel_map, trunc_page(addr),
66 		       round_page(addr+len-1), prot, FALSE);
67 }
68 #endif
69 
70 vslock(addr, len)
71 	caddr_t	addr;
72 	u_int	len;
73 {
74 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
75 			round_page(addr+len-1), FALSE);
76 }
77 
78 vsunlock(addr, len, dirtied)
79 	caddr_t	addr;
80 	u_int	len;
81 	int dirtied;
82 {
83 #ifdef	lint
84 	dirtied++;
85 #endif	lint
86 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
87 			round_page(addr+len-1), TRUE);
88 }
89 
90 vm_fork(p1, p2, isvfork)
91 	register struct proc *p1, *p2;
92 	int isvfork;
93 {
94 	register struct user *up;
95 	vm_offset_t addr;
96 	vm_size_t size;
97 
98 	p2->p_vmspace = vmspace_fork(p1->p_vmspace);
99 
100 #ifdef SYSVSHM
101 	if (p1->p_vmspace->vm_shm)
102 		shmfork(p1, p2, isvfork);
103 #endif
104 
105 	/*
106 	 * Allocate a wired-down (for now) u-area for the process
107 	 */
108 	size = round_page(ctob(UPAGES));
109 	addr = kmem_alloc_pageable(kernel_map, size);
110 	vm_map_pageable(kernel_map, addr, addr + size, FALSE);
111 	p2->p_addr = (caddr_t)addr;
112 	up = (struct user *)addr;
113 
114 	/*
115 	 * Update the current u-area and copy it to the new one
116 	 * THIS SHOULD BE DONE DIFFERENTLY, probably with a single
117 	 * machine-dependent call that copies and updates the pcb+stack,
118 	 * replacing the resume and savectx.
119 	 */
120 	resume(pcbb(p1));
121 	bcopy(p1->p_addr, p2->p_addr, size);
122 	/*
123 	 * p_stats and p_sigacts currently point at fields
124 	 * in the user struct but not at &u, instead at p_addr.
125 	 */
126 	p2->p_stats = &((struct user *)p2->p_addr)->u_stats;
127 	p2->p_sigacts = &((struct user *)p2->p_addr)->u_sigacts;
128 
129 	/*
130 	 * Clear vm statistics of new process.
131 	 */
132 	bzero((caddr_t)&up->u_stats.p_ru, sizeof (struct rusage));
133 	bzero((caddr_t)&up->u_stats.p_cru, sizeof (struct rusage));
134 
135 	PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, (struct pcb *)p2->p_addr, 0);
136 
137 	/*
138 	 * Arrange for a non-local goto when the new process
139 	 * is started, to resume here, returning nonzero from setjmp.
140 	 */
141 	up->u_pcb.pcb_sswap = (int *)&u.u_ssave;
142 	if (savectx(&up->u_ssave)) {
143 		/*
144 		 * Return 1 in child.
145 		 */
146 		return (1);
147 	}
148 	return (0);
149 }
150 
151 /*
152  * Set default limits for VM system.
153  * Called for proc 0, and then inherited by all others.
154  */
155 vm_init_limits(p)
156 	register struct proc *p;
157 {
158 
159 	/*
160 	 * Set up the initial limits on process VM.
161 	 * Set the maximum resident set size to be all
162 	 * of (reasonably) available memory.  This causes
163 	 * any single, large process to start random page
164 	 * replacement once it fills memory.
165 	 */
166         p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
167         p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
168         p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
169         p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
170 	p->p_rlimit[RLIMIT_RSS].rlim_cur = p->p_rlimit[RLIMIT_RSS].rlim_max =
171 		ptoa(vm_page_free_count);
172 }
173 
174 #include "../vm/vm_pageout.h"
175 
176 #ifdef DEBUG
177 int	enableswap = 1;
178 int	swapdebug = 0;
179 #define	SDB_FOLLOW	1
180 #define SDB_SWAPIN	2
181 #define SDB_SWAPOUT	4
182 #endif
183 
184 /*
185  * Brutally simple:
186  *	1. Attempt to swapin every swaped-out, runnable process in
187  *	   order of priority.
188  *	2. If not enough memory, wake the pageout daemon and let it
189  *	   clear some space.
190  */
191 sched()
192 {
193 	register struct proc *p;
194 	register int pri;
195 	struct proc *pp;
196 	int ppri;
197 	vm_offset_t addr;
198 	vm_size_t size;
199 
200 loop:
201 #ifdef DEBUG
202 	if (!enableswap) {
203 		pp = NULL;
204 		goto noswap;
205 	}
206 #endif
207 	pp = NULL;
208 	ppri = INT_MIN;
209 	for (p = allproc; p != NULL; p = p->p_nxt)
210 		if (p->p_stat == SRUN && (p->p_flag & SLOAD) == 0) {
211 			pri = p->p_time + p->p_slptime - p->p_nice * 8;
212 			if (pri > ppri) {
213 				pp = p;
214 				ppri = pri;
215 			}
216 		}
217 #ifdef DEBUG
218 	if (swapdebug & SDB_FOLLOW)
219 		printf("sched: running, procp %x pri %d\n", pp, ppri);
220 noswap:
221 #endif
222 	/*
223 	 * Nothing to do, back to sleep
224 	 */
225 	if ((p = pp) == NULL) {
226 		sleep((caddr_t)&proc0, PVM);
227 		goto loop;
228 	}
229 
230 	/*
231 	 * We would like to bring someone in.
232 	 * This part is really bogus cuz we could deadlock on memory
233 	 * despite our feeble check.
234 	 */
235 	size = round_page(ctob(UPAGES));
236 	addr = (vm_offset_t) p->p_addr;
237 	if (vm_page_free_count > atop(size)) {
238 #ifdef DEBUG
239 		if (swapdebug & SDB_SWAPIN)
240 			printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
241 			       p->p_pid, p->p_comm, p->p_addr,
242 			       ppri, vm_page_free_count);
243 #endif
244 		vm_map_pageable(kernel_map, addr, addr+size, FALSE);
245 		(void) splclock();
246 		if (p->p_stat == SRUN)
247 			setrq(p);
248 		p->p_flag |= SLOAD;
249 		(void) spl0();
250 		p->p_time = 0;
251 		goto loop;
252 	}
253 	/*
254 	 * Not enough memory, jab the pageout daemon and wait til the
255 	 * coast is clear.
256 	 */
257 #ifdef DEBUG
258 	if (swapdebug & SDB_FOLLOW)
259 		printf("sched: no room for pid %d(%s), free %d\n",
260 		       p->p_pid, p->p_comm, vm_page_free_count);
261 #endif
262 	(void) splhigh();
263 	VM_WAIT;
264 	(void) spl0();
265 #ifdef DEBUG
266 	if (swapdebug & SDB_FOLLOW)
267 		printf("sched: room again, free %d\n", vm_page_free_count);
268 #endif
269 	goto loop;
270 }
271 
272 #define	swappable(p) \
273 	(((p)->p_flag & (SSYS|SLOAD|SKEEP|SWEXIT|SPHYSIO)) == SLOAD)
274 
275 /*
276  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
277  * procs and unwire their u-areas.  We try to always "swap" at least one
278  * process in case we need the room for a swapin.
279  * If any procs have been sleeping/stopped for at least maxslp seconds,
280  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
281  * if any, otherwise the longest-resident process.
282  */
283 swapout_threads()
284 {
285 	register struct proc *p;
286 	struct proc *outp, *outp2;
287 	int outpri, outpri2;
288 	int didswap = 0;
289 	extern int maxslp;
290 
291 #ifdef DEBUG
292 	if (!enableswap)
293 		return;
294 #endif
295 	outp = outp2 = NULL;
296 	outpri = outpri2 = 0;
297 	for (p = allproc; p != NULL; p = p->p_nxt) {
298 		if (!swappable(p))
299 			continue;
300 		switch (p->p_stat) {
301 		case SRUN:
302 			if (p->p_time > outpri2) {
303 				outp2 = p;
304 				outpri2 = p->p_time;
305 			}
306 			continue;
307 
308 		case SSLEEP:
309 		case SSTOP:
310 			if (p->p_slptime > maxslp) {
311 				swapout(p);
312 				didswap++;
313 			} else if (p->p_slptime > outpri) {
314 				outp = p;
315 				outpri = p->p_slptime;
316 			}
317 			continue;
318 		}
319 	}
320 	/*
321 	 * If we didn't get rid of any real duds, toss out the next most
322 	 * likely sleeping/stopped or running candidate.  We only do this
323 	 * if we are real low on memory since we don't gain much by doing
324 	 * it (UPAGES pages).
325 	 */
326 	if (didswap == 0 &&
327 	    vm_page_free_count <= atop(round_page(ctob(UPAGES)))) {
328 		if ((p = outp) == 0)
329 			p = outp2;
330 #ifdef DEBUG
331 		if (swapdebug & SDB_SWAPOUT)
332 			printf("swapout_threads: no duds, try procp %x\n", p);
333 #endif
334 		if (p)
335 			swapout(p);
336 	}
337 }
338 
339 swapout(p)
340 	register struct proc *p;
341 {
342 	vm_offset_t addr;
343 	vm_size_t size;
344 
345 #ifdef DEBUG
346 	if (swapdebug & SDB_SWAPOUT)
347 		printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
348 		       p->p_pid, p->p_comm, p->p_addr, p->p_stat,
349 		       p->p_slptime, vm_page_free_count);
350 #endif
351 	size = round_page(ctob(UPAGES));
352 	addr = (vm_offset_t) p->p_addr;
353 	vm_map_pageable(kernel_map, addr, addr+size, TRUE);
354 	pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
355 	(void) splhigh();
356 	p->p_flag &= ~SLOAD;
357 	if (p->p_stat == SRUN)
358 		remrq(p);
359 	(void) spl0();
360 	p->p_time = 0;
361 }
362 
363 /*
364  * The rest of these routines fake thread handling
365  */
366 
367 void
368 assert_wait(event, ruptible)
369 	int event;
370 	boolean_t ruptible;
371 {
372 #ifdef lint
373 	ruptible++;
374 #endif
375 	curproc->p_thread = event;
376 }
377 
378 void
379 thread_block()
380 {
381 	int s = splhigh();
382 
383 	if (curproc->p_thread)
384 		sleep((caddr_t)curproc->p_thread, PVM);
385 	splx(s);
386 }
387 
388 thread_sleep(event, lock, ruptible)
389 	int event;
390 	simple_lock_t lock;
391 	boolean_t ruptible;
392 {
393 #ifdef lint
394 	ruptible++;
395 #endif
396 	int s = splhigh();
397 
398 	curproc->p_thread = event;
399 	simple_unlock(lock);
400 	if (curproc->p_thread)
401 		sleep((caddr_t)event, PVM);
402 	splx(s);
403 }
404 
405 thread_wakeup(event)
406 	int event;
407 {
408 	int s = splhigh();
409 
410 	wakeup((caddr_t)event);
411 	splx(s);
412 }
413 
414 /*
415  * DEBUG stuff
416  */
417 
418 int indent = 0;
419 
420 /*ARGSUSED2*/
421 iprintf(a, b, c, d, e, f, g, h)
422 	char *a;
423 {
424 	register int i;
425 
426 	for (i = indent; i > 0; ) {
427 		if (i >= 8) {
428 			putchar('\t', 1, (caddr_t)0);
429 			i -= 8;
430 		} else {
431 			putchar(' ', 1, (caddr_t)0);
432 			i--;
433 		}
434 	}
435 	printf(a, b, c, d, e, f, g, h);
436 }
437