xref: /original-bsd/sys/tahoe/tahoe/vm_machdep.c (revision dddc135c)
1 /*
2  * Copyright (c) 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Computer Consoles Inc.
7  *
8  * Redistribution and use in source and binary forms are permitted
9  * provided that the above copyright notice and this paragraph are
10  * duplicated in all such forms and that any documentation,
11  * advertising materials, and other materials related to such
12  * distribution and use acknowledge that the software was developed
13  * by the University of California, Berkeley.  The name of the
14  * University may not be used to endorse or promote products derived
15  * from this software without specific prior written permission.
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
18  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19  *
20  *	@(#)vm_machdep.c	7.2 (Berkeley) 07/09/88
21  */
22 
23 #include "param.h"
24 #include "systm.h"
25 #include "dir.h"
26 #include "user.h"
27 #include "proc.h"
28 #include "cmap.h"
29 #include "mount.h"
30 #include "vm.h"
31 #include "text.h"
32 #include "kernel.h"
33 
34 #include "pte.h"
35 #include "cpu.h"
36 #include "mtpr.h"
37 
38 /*
39  * Set a red zone in the kernel stack after the u. area.
40  */
41 setredzone(pte, vaddr)
42 	register struct pte *pte;
43 	caddr_t vaddr;
44 {
45 
46 	pte += (sizeof (struct user) + NBPG - 1) / NBPG;
47 	*(int *)pte &= ~PG_PROT;
48 	*(int *)pte |= PG_URKR;
49 	if (vaddr)
50 		mtpr(TBIS, vaddr + sizeof (struct user) + NBPG - 1);
51 }
52 
53 /*
54  * Check for valid program size
55  * NB - Check data and data growth separately as they may overflow
56  * when summed together.
57  */
58 chksize(ts, ids, uds, ss)
59 	register unsigned ts, ids, uds, ss;
60 {
61 	extern unsigned maxtsize;
62 
63 	if (ctob(ts) > maxtsize ||
64 	    ctob(ids) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
65 	    ctob(uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
66 	    ctob(ids + uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
67 	    ctob(ss) > u.u_rlimit[RLIMIT_STACK].rlim_cur) {
68 		u.u_error = ENOMEM;
69 		return (1);
70 	}
71 	return (0);
72 }
73 
74 /*ARGSUSED*/
75 newptes(pte, v, size)
76 	register struct pte *pte;
77 	u_int v;
78 	register int size;
79 {
80 	register caddr_t a = ptob(v);
81 
82 #ifdef lint
83 	pte = pte;
84 #endif
85 	if (size >= 8) {
86 		mtpr(TBIA, 0);
87 		return;
88 	}
89 	while (size > 0) {
90 		mtpr(TBIS, a);
91 		a += NBPG;
92 		size--;
93 	}
94 }
95 
96 /*
97  * Change protection codes of text segment.
98  * Have to flush translation buffer since this
99  * affect virtual memory mapping of current process.
100  */
101 chgprot(addr, tprot)
102 	caddr_t addr;
103 	long tprot;
104 {
105 	unsigned v;
106 	int tp;
107 	register struct pte *pte;
108 	register struct cmap *c;
109 
110 	v = clbase(btop(addr));
111 	if (!isatsv(u.u_procp, v)) {
112 		u.u_error = EFAULT;
113 		return (0);
114 	}
115 	tp = vtotp(u.u_procp, v);
116 	pte = tptopte(u.u_procp, tp);
117 	if (pte->pg_fod == 0 && pte->pg_pfnum) {
118 		c = &cmap[pgtocm(pte->pg_pfnum)];
119 		if (c->c_blkno && c->c_mdev != MSWAPX)
120 			munhash(mount[c->c_mdev].m_dev,
121 			    (daddr_t)(u_long)c->c_blkno);
122 	}
123 	*(int *)pte &= ~PG_PROT;
124 	*(int *)pte |= tprot;
125 	distcl(pte);
126 	tbiscl(v);
127 	return (1);
128 }
129 
130 settprot(tprot)
131 	long tprot;
132 {
133 	register int *ptaddr, i;
134 
135 	ptaddr = (int *)mfpr(P0BR);
136 	for (i = 0; i < u.u_tsize; i++) {
137 		ptaddr[i] &= ~PG_PROT;
138 		ptaddr[i] |= tprot;
139 	}
140 	mtpr(TBIA, 0);
141 }
142 
143 #ifdef notdef
144 /*
145  * Rest are machine-dependent
146  */
147 getmemc(addr)
148 	caddr_t addr;
149 {
150 	register int c;
151 	struct pte savemap;
152 
153 	savemap = mmap[0];
154 	*(int *)mmap = PG_V | PG_KR | btop(addr);
155 	mtpr(TBIS, vmmap);
156 	uncache(&vmmap[(int)addr & PGOFSET]);
157 	c = *(char *)&vmmap[(int)addr & PGOFSET];
158 	mmap[0] = savemap;
159 	mtpr(TBIS, vmmap);
160 	return (c & 0377);
161 }
162 
163 putmemc(addr, val)
164 	caddr_t addr;
165 {
166 	struct pte savemap;
167 
168 	savemap = mmap[0];
169 	*(int *)mmap = PG_V | PG_KW | btop(addr);
170 	mtpr(TBIS, vmmap);
171 	*(char *)&vmmap[(int)addr & PGOFSET] = val;
172 
173 	mtpr(PADC, 0);
174 	mtpr(PACC, 0);
175 
176 	mmap[0] = savemap;
177 	mtpr(TBIS, vmmap);
178 }
179 #endif
180 
181 /*
182  * Move pages from one kernel virtual address to another.
183  * Both addresses are assumed to reside in the Sysmap,
184  * and size must be a multiple of CLSIZE.
185  */
186 pagemove(from, to, size)
187 	register caddr_t from, to;
188 	int size;
189 {
190 	register struct pte *fpte, *tpte;
191 
192 	if (size % CLBYTES)
193 		panic("pagemove");
194 	fpte = kvtopte(from);
195 	tpte = kvtopte(to);
196 	while (size > 0) {
197 		*tpte++ = *fpte;
198 		*(int *)fpte++ = 0;
199 		mtpr(TBIS, from);
200 		mtpr(TBIS, to);
201 		mtpr(P1DC, to);		/* purge !! */
202 		from += NBPG;
203 		to += NBPG;
204 		size -= NBPG;
205 	}
206 }
207 
208 /*
209  * Code and data key management routines.
210  *
211  * The array ckey_cnt maintains the count of processes currently
212  * sharing each code key.  The array ckey_cache maintains a record
213  * of all code keys used since the last flush of the code cache.
214  * Such keys may not be reused, even if unreferenced, until
215  * the cache is flushed.  The data cache key handling is analogous.
216  * The arrays ckey_cnt and ckey_cache are allways kept in such a way
217  * that the following invariant holds:
218  *	ckey_cnt > 0	=>'s	ckey_cache == 1
219  * meaning as long as a code key is used by at least one process, it's
220  * marked as being 'in the cache'. Of course, the following invariant
221  * also holds:
222  *	ckey_cache == 0	=>'s	ckey_cnt == 0
223  * which is just the reciprocal of the 1'st invariant.
224  * Equivalent invariants hold for the data key arrays.
225  */
226 struct	keystats ckeystats = { NCKEY - 1 };
227 struct	keystats dkeystats = { NDKEY - 1 };
228 
229 /*
230  * Release a code key.
231  */
232 ckeyrelease(key)
233 	int key;
234 {
235 	register int s;
236 
237 	s = spl8();
238 	if (--ckey_cnt[key] < 0) {
239 		printf("ckeyrelease: key = %d\n", key);
240 		ckey_cnt[key] = 0;
241 	}
242 	if (ckey_cnt[key] == 0)
243 		ckeystats.ks_dirty++;
244 	splx(s);
245 }
246 
247 /*
248  * Release a data key.
249  */
250 dkeyrelease(key)
251 	int key;
252 {
253 	register int s;
254 
255 	s = spl8();
256 	if (--dkey_cnt[key] != 0) {
257 		printf("dkeyrelease: key = %d\n", key);
258 		dkey_cnt[key] = 0;
259 	}
260 	splx(s);
261 	dkeystats.ks_dirty++;
262 }
263 
264 /*
265  * Invalidate the data cache for a process
266  * by exchanging cache keys.
267  */
268 dkeyinval(p)
269 	register struct proc *p;
270 {
271 	int s;
272 
273 	dkeystats.ks_inval++;
274 	s = spl8();
275 	if (--dkey_cnt[p->p_dkey] != 0)
276 		dkey_cnt[p->p_dkey] = 0;
277 	if (p == u.u_procp && !noproc) {
278 		p->p_dkey = getdatakey();
279 		mtpr(DCK, p->p_dkey);
280 	} else
281 		p->p_dkey = 0;
282 	splx(s);
283 }
284 
285 /*
286  * Get a code key.
287  * Strategy: try each of the following in turn
288  * until a key is allocated.
289  *
290  * 1) Find an unreferenced key not yet in the cache.
291  *    If this fails, a code cache purge will be necessary.
292  * 2) Find an unreferenced key.  Mark all unreferenced keys
293  *    as available and purge the cache.
294  * 3) Free the keys from all processes not sharing keys.
295  * 4) Free the keys from all processes.
296  */
297 getcodekey()
298 {
299 	register int i, s, freekey;
300 	register struct proc *p;
301 	int desparate = 0;
302 	static int lastkey = MAXCKEY;
303 
304 	ckeystats.ks_allocs++;
305 	s = spl8();
306 	freekey = 0;
307 	for (i = lastkey + 1; ; i++) {
308 		if (i > MAXCKEY)
309 			i = 1;
310 		if ((int)ckey_cache[i] == 0) {	/* free key, take it */
311 			ckey_cache[i] = 1, ckey_cnt[i] = 1;
312 			splx(s);
313 			ckeystats.ks_allocfree++;
314 			ckeystats.ks_avail--;
315 			lastkey = i;
316 			return (i);
317 		}
318 		if (ckey_cnt[i] == 0)		/* save for potential use */
319 			freekey = i;
320 		if (i == lastkey)
321 			break;
322 	}
323 	/*
324 	 * All code keys were marked as being in cache.
325 	 * If a key was in the cache, but not in use, grab it.
326 	 */
327 	if (freekey != 0) {
328 purge:
329 		/*
330 		 * If we've run out of free keys,
331 		 * try and free up some other keys to avoid
332 		 * future cache purges.
333 		 */
334 		ckey_cnt[freekey] = 1, ckey_cache[freekey] = 1;
335 		for (i = 1; i <= MAXCKEY; i++)
336 			if (ckey_cnt[i] == 0) {
337 				ckey_cache[i] = 0;
338 				ckeystats.ks_avail++;
339 			}
340 		mtpr(PACC, 0);
341 		splx(s);
342 		ckeystats.ks_dirty = 0;
343 		ckeystats.ks_norefs++;
344 		return (freekey);
345 	}
346 
347 	/*
348 	 * All keys are marked as in the cache and in use.
349 	 * Release all unshared keys, or, on second pass,
350 	 * release all keys.
351 	 */
352 steal:
353 	for (p = allproc; p; p = p->p_nxt)
354 		if (p->p_ckey != 0 && (p->p_flag & SSYS) == 0) {
355 			i = p->p_ckey;
356 			if (ckey_cnt[i] == 1 || desparate) {
357 				p->p_ckey = 0;
358 				if (--ckey_cnt[i] == 0) {
359 					freekey = i;
360 					if (p->p_textp)
361 						p->p_textp->x_ckey = 0;
362 				}
363 			}
364 		}
365 
366 	if (freekey) {
367 		ckeystats.ks_taken++;
368 		goto purge;
369 	} else {
370 		desparate++;
371 		goto steal;
372 	}
373 }
374 
375 /*
376  * Get a data key.
377  *
378  * General strategy:
379  * 1) Try to find a data key that isn't in the cache. Allocate it.
380  * 2) If all data keys are in the cache, find one which isn't
381  *    allocated.  Mark all unallocated keys as not in cache,
382  *    purge the cache, and allocate this one.
383  * 3) If all of them are allocated, free all process' keys
384  *    and let them reclaim then as they run.
385  */
386 getdatakey()
387 {
388 	register int i, freekey;
389 	register struct proc *p;
390 	int s;
391 	static int lastkey = MAXDKEY;
392 
393 	dkeystats.ks_allocs++;
394 	s = spl8();
395 	freekey = 0;
396 	for (i = lastkey + 1; ; i++) {
397 		if (i > MAXDKEY)
398 			i = 1;
399 		if ((int)dkey_cache[i] == 0) {	/* free key, take it */
400 			dkey_cache[i] = 1, dkey_cnt[i] = 1;
401 			splx(s);
402 			dkeystats.ks_allocfree++;
403 			dkeystats.ks_avail--;
404 			lastkey = i;
405 			return (i);
406 		}
407 		if (dkey_cnt[i] == 0)
408 			freekey = i;
409 		if (i == lastkey)
410 			break;
411 	}
412 purge:
413 	if (freekey) {
414 		/*
415 		 * Try and free up some more keys to avoid
416 		 * future allocations causing a cache purge.
417 		 */
418 		dkey_cnt[freekey] = 1, dkey_cache[freekey] = 1;
419 		for (i = 1; i <= MAXDKEY; i++)
420 			if (dkey_cnt[i] == 0) {
421 				dkey_cache[i] = 0;
422 				dkeystats.ks_avail++;
423 			}
424 		mtpr(PADC, 0);
425 		splx(s);
426 		dkeystats.ks_norefs++;
427 		dkeystats.ks_dirty = 0;
428 		return (freekey);
429 	}
430 
431 	/*
432 	 * Now, we have to take a key from someone.
433 	 * May as well take them all, so we get them
434 	 * from all of the idle procs.
435 	 */
436 	for (p = allproc; p; p = p->p_nxt)
437 		if (p->p_dkey != 0 && (p->p_flag & SSYS) == 0) {
438 			freekey = p->p_dkey;
439 			dkey_cnt[freekey] = 0;
440 			p->p_dkey = 0;
441 		}
442 	dkeystats.ks_taken++;
443 	goto purge;
444 }
445 
446 /*VARGARGS1*/
447 vtoph(p, v)
448 	register struct proc *p;
449 	unsigned v;
450 {
451 	register struct pte *pte;
452 	register unsigned pg;
453 
454 	pg = btop(v);
455 	if (pg >= BTOPKERNBASE)
456 		pte = &Sysmap[pg - BTOPKERNBASE];
457 	else
458 		pte = vtopte(p, pg);
459 	return ((pte->pg_pfnum << PGSHIFT) + (v & PGOFSET));
460 }
461