xref: /original-bsd/sys/tahoe/tahoe/vm_machdep.c (revision fa921481)
1 /*
2  * Copyright (c) 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Computer Consoles Inc.
7  *
8  * Redistribution and use in source and binary forms are permitted
9  * provided that the above copyright notice and this paragraph are
10  * duplicated in all such forms and that any documentation,
11  * advertising materials, and other materials related to such
12  * distribution and use acknowledge that the software was developed
13  * by the University of California, Berkeley.  The name of the
14  * University may not be used to endorse or promote products derived
15  * from this software without specific prior written permission.
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19  *
20  *	@(#)vm_machdep.c	7.7 (Berkeley) 06/21/90
21  */
22 
23 #include "param.h"
24 #include "systm.h"
25 #include "user.h"
26 #include "proc.h"
27 #include "cmap.h"
28 #include "vm.h"
29 #include "text.h"
30 #include "kernel.h"
31 
32 #include "pte.h"
33 #include "cpu.h"
34 #include "mtpr.h"
35 
36 /*
37  * Set a red zone in the kernel stack after the u. area.
38  */
39 setredzone(pte, vaddr)
40 	register struct pte *pte;
41 	caddr_t vaddr;
42 {
43 
44 	pte += (sizeof (struct user) + NBPG - 1) / NBPG;
45 	*(int *)pte &= ~PG_PROT;
46 	*(int *)pte |= PG_URKR;
47 	if (vaddr)
48 		mtpr(TBIS, vaddr + sizeof (struct user) + NBPG - 1);
49 }
50 
51 /*
52  * Check for valid program size
53  * NB - Check data and data growth separately as they may overflow
54  * when summed together.
55  */
56 chksize(ts, ids, uds, ss)
57 	register unsigned ts, ids, uds, ss;
58 {
59 	extern unsigned maxtsize;
60 
61 	if (ctob(ts) > maxtsize ||
62 	    ctob(ids) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
63 	    ctob(uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
64 	    ctob(ids + uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
65 	    ctob(ss) > u.u_rlimit[RLIMIT_STACK].rlim_cur) {
66 		return (ENOMEM);
67 	}
68 	return (0);
69 }
70 
71 /*ARGSUSED*/
72 newptes(pte, v, size)
73 	register struct pte *pte;
74 	u_int v;
75 	register int size;
76 {
77 	register caddr_t a = ptob(v);
78 
79 #ifdef lint
80 	pte = pte;
81 #endif
82 	if (size >= 8) {
83 		mtpr(TBIA, 0);
84 		return;
85 	}
86 	while (size > 0) {
87 		mtpr(TBIS, a);
88 		a += NBPG;
89 		size--;
90 	}
91 }
92 
93 /*
94  * Change protection codes of text segment.
95  * Have to flush translation buffer since this
96  * affect virtual memory mapping of current process.
97  */
98 chgprot(addr, tprot)
99 	caddr_t addr;
100 	long tprot;
101 {
102 	unsigned v;
103 	int tp;
104 	register struct pte *pte;
105 	register struct cmap *c;
106 
107 	v = clbase(btop(addr));
108 	if (!isatsv(u.u_procp, v))
109 		return (EFAULT);
110 	tp = vtotp(u.u_procp, v);
111 	pte = tptopte(u.u_procp, tp);
112 	if (pte->pg_fod == 0 && pte->pg_pfnum) {
113 		c = &cmap[pgtocm(pte->pg_pfnum)];
114 		if (c->c_blkno)
115 			munhash(c->c_vp, (daddr_t)(u_long)c->c_blkno);
116 	}
117 	*(int *)pte &= ~PG_PROT;
118 	*(int *)pte |= tprot;
119 	distcl(pte);
120 	tbiscl(v);
121 	return (0);
122 }
123 
124 settprot(tprot)
125 	long tprot;
126 {
127 	register int *ptaddr, i;
128 
129 	ptaddr = (int *)mfpr(P0BR);
130 	for (i = 0; i < u.u_tsize; i++) {
131 		ptaddr[i] &= ~PG_PROT;
132 		ptaddr[i] |= tprot;
133 	}
134 	mtpr(TBIA, 0);
135 }
136 
137 #ifdef notdef
138 /*
139  * Rest are machine-dependent
140  */
141 getmemc(addr)
142 	caddr_t addr;
143 {
144 	register int c;
145 	struct pte savemap;
146 
147 	savemap = mmap[0];
148 	*(int *)mmap = PG_V | PG_KR | btop(addr);
149 	mtpr(TBIS, vmmap);
150 	uncache(&vmmap[(int)addr & PGOFSET]);
151 	c = *(char *)&vmmap[(int)addr & PGOFSET];
152 	mmap[0] = savemap;
153 	mtpr(TBIS, vmmap);
154 	return (c & 0377);
155 }
156 
157 putmemc(addr, val)
158 	caddr_t addr;
159 {
160 	struct pte savemap;
161 
162 	savemap = mmap[0];
163 	*(int *)mmap = PG_V | PG_KW | btop(addr);
164 	mtpr(TBIS, vmmap);
165 	*(char *)&vmmap[(int)addr & PGOFSET] = val;
166 
167 	mtpr(PADC, 0);
168 	mtpr(PACC, 0);
169 
170 	mmap[0] = savemap;
171 	mtpr(TBIS, vmmap);
172 }
173 #endif
174 
175 /*
176  * Move pages from one kernel virtual address to another.
177  * Both addresses are assumed to reside in the Sysmap,
178  * and size must be a multiple of CLSIZE.
179  */
180 pagemove(from, to, size)
181 	register caddr_t from, to;
182 	int size;
183 {
184 	register struct pte *fpte, *tpte;
185 
186 	if (size % CLBYTES)
187 		panic("pagemove");
188 	fpte = kvtopte(from);
189 	tpte = kvtopte(to);
190 	while (size > 0) {
191 		*tpte++ = *fpte;
192 		*(int *)fpte++ = 0;
193 		mtpr(TBIS, from);
194 		mtpr(TBIS, to);
195 		mtpr(P1DC, to);		/* purge !! */
196 		from += NBPG;
197 		to += NBPG;
198 		size -= NBPG;
199 	}
200 }
201 
202 /*
203  * Code and data key management routines.
204  *
205  * The array ckey_cnt maintains the count of processes currently
206  * sharing each code key.  The array ckey_cache maintains a record
207  * of all code keys used since the last flush of the code cache.
208  * Such keys may not be reused, even if unreferenced, until
209  * the cache is flushed.  The data cache key handling is analogous.
210  * The arrays ckey_cnt and ckey_cache are allways kept in such a way
211  * that the following invariant holds:
212  *	ckey_cnt > 0	=>'s	ckey_cache == 1
213  * meaning as long as a code key is used by at least one process, it's
214  * marked as being 'in the cache'. Of course, the following invariant
215  * also holds:
216  *	ckey_cache == 0	=>'s	ckey_cnt == 0
217  * which is just the reciprocal of the 1'st invariant.
218  * Equivalent invariants hold for the data key arrays.
219  */
220 struct	keystats ckeystats = { NCKEY - 1 };
221 struct	keystats dkeystats = { NDKEY - 1 };
222 
223 /*
224  * Release a code key.
225  */
226 ckeyrelease(key)
227 	int key;
228 {
229 	register int s;
230 
231 	s = spl8();
232 	if (--ckey_cnt[key] < 0) {
233 		printf("ckeyrelease: key = %d\n", key);
234 		ckey_cnt[key] = 0;
235 	}
236 	if (ckey_cnt[key] == 0)
237 		ckeystats.ks_dirty++;
238 	splx(s);
239 }
240 
241 /*
242  * Release a data key.
243  */
244 dkeyrelease(key)
245 	int key;
246 {
247 	register int s;
248 
249 	s = spl8();
250 	if (--dkey_cnt[key] != 0) {
251 		printf("dkeyrelease: key = %d\n", key);
252 		dkey_cnt[key] = 0;
253 	}
254 	splx(s);
255 	dkeystats.ks_dirty++;
256 }
257 
258 /*
259  * Invalidate the data cache for a process
260  * by exchanging cache keys.
261  */
262 dkeyinval(p)
263 	register struct proc *p;
264 {
265 	int s;
266 
267 	dkeystats.ks_inval++;
268 	s = spl8();
269 	if (--dkey_cnt[p->p_dkey] != 0)
270 		dkey_cnt[p->p_dkey] = 0;
271 	if (p == u.u_procp && !noproc) {
272 		p->p_dkey = getdatakey();
273 		mtpr(DCK, p->p_dkey);
274 	} else
275 		p->p_dkey = 0;
276 	splx(s);
277 }
278 
279 /*
280  * Get a code key.
281  * Strategy: try each of the following in turn
282  * until a key is allocated.
283  *
284  * 1) Find an unreferenced key not yet in the cache.
285  *    If this fails, a code cache purge will be necessary.
286  * 2) Find an unreferenced key.  Mark all unreferenced keys
287  *    as available and purge the cache.
288  * 3) Free the keys from all processes not sharing keys.
289  * 4) Free the keys from all processes.
290  */
291 getcodekey()
292 {
293 	register int i, s, freekey;
294 	register struct proc *p;
295 	int desparate = 0;
296 	static int lastkey = MAXCKEY;
297 
298 	ckeystats.ks_allocs++;
299 	s = spl8();
300 	freekey = 0;
301 	for (i = lastkey + 1; ; i++) {
302 		if (i > MAXCKEY)
303 			i = 1;
304 		if ((int)ckey_cache[i] == 0) {	/* free key, take it */
305 			ckey_cache[i] = 1, ckey_cnt[i] = 1;
306 			splx(s);
307 			ckeystats.ks_allocfree++;
308 			ckeystats.ks_avail--;
309 			lastkey = i;
310 			return (i);
311 		}
312 		if (ckey_cnt[i] == 0)		/* save for potential use */
313 			freekey = i;
314 		if (i == lastkey)
315 			break;
316 	}
317 	/*
318 	 * All code keys were marked as being in cache.
319 	 * If a key was in the cache, but not in use, grab it.
320 	 */
321 	if (freekey != 0) {
322 purge:
323 		/*
324 		 * If we've run out of free keys,
325 		 * try and free up some other keys to avoid
326 		 * future cache purges.
327 		 */
328 		ckey_cnt[freekey] = 1, ckey_cache[freekey] = 1;
329 		for (i = 1; i <= MAXCKEY; i++)
330 			if (ckey_cnt[i] == 0) {
331 				ckey_cache[i] = 0;
332 				ckeystats.ks_avail++;
333 			}
334 		mtpr(PACC, 0);
335 		splx(s);
336 		ckeystats.ks_dirty = 0;
337 		ckeystats.ks_norefs++;
338 		return (freekey);
339 	}
340 
341 	/*
342 	 * All keys are marked as in the cache and in use.
343 	 * Release all unshared keys, or, on second pass,
344 	 * release all keys.
345 	 */
346 steal:
347 	for (p = allproc; p; p = p->p_nxt)
348 		if (p->p_ckey != 0 && (p->p_flag & SSYS) == 0) {
349 			i = p->p_ckey;
350 			if (ckey_cnt[i] == 1 || desparate) {
351 				p->p_ckey = 0;
352 				if (--ckey_cnt[i] == 0) {
353 					freekey = i;
354 					if (p->p_textp)
355 						p->p_textp->x_ckey = 0;
356 				}
357 			}
358 		}
359 
360 	if (freekey) {
361 		ckeystats.ks_taken++;
362 		goto purge;
363 	} else {
364 		desparate++;
365 		goto steal;
366 	}
367 }
368 
369 /*
370  * Get a data key.
371  *
372  * General strategy:
373  * 1) Try to find a data key that isn't in the cache. Allocate it.
374  * 2) If all data keys are in the cache, find one which isn't
375  *    allocated.  Mark all unallocated keys as not in cache,
376  *    purge the cache, and allocate this one.
377  * 3) If all of them are allocated, free all process' keys
378  *    and let them reclaim then as they run.
379  */
380 getdatakey()
381 {
382 	register int i, freekey;
383 	register struct proc *p;
384 	int s;
385 	static int lastkey = MAXDKEY;
386 
387 	dkeystats.ks_allocs++;
388 	s = spl8();
389 	freekey = 0;
390 	for (i = lastkey + 1; ; i++) {
391 		if (i > MAXDKEY)
392 			i = 1;
393 		if ((int)dkey_cache[i] == 0) {	/* free key, take it */
394 			dkey_cache[i] = 1, dkey_cnt[i] = 1;
395 			splx(s);
396 			dkeystats.ks_allocfree++;
397 			dkeystats.ks_avail--;
398 			lastkey = i;
399 			return (i);
400 		}
401 		if (dkey_cnt[i] == 0)
402 			freekey = i;
403 		if (i == lastkey)
404 			break;
405 	}
406 purge:
407 	if (freekey) {
408 		/*
409 		 * Try and free up some more keys to avoid
410 		 * future allocations causing a cache purge.
411 		 */
412 		dkey_cnt[freekey] = 1, dkey_cache[freekey] = 1;
413 		for (i = 1; i <= MAXDKEY; i++)
414 			if (dkey_cnt[i] == 0) {
415 				dkey_cache[i] = 0;
416 				dkeystats.ks_avail++;
417 			}
418 		mtpr(PADC, 0);
419 		splx(s);
420 		dkeystats.ks_norefs++;
421 		dkeystats.ks_dirty = 0;
422 		return (freekey);
423 	}
424 
425 	/*
426 	 * Now, we have to take a key from someone.
427 	 * May as well take them all, so we get them
428 	 * from all of the idle procs.
429 	 */
430 	for (p = allproc; p; p = p->p_nxt)
431 		if (p->p_dkey != 0 && (p->p_flag & SSYS) == 0) {
432 			freekey = p->p_dkey;
433 			dkey_cnt[freekey] = 0;
434 			p->p_dkey = 0;
435 		}
436 	dkeystats.ks_taken++;
437 	goto purge;
438 }
439 
440 /*VARGARGS1*/
441 vtoph(p, v)
442 	register struct proc *p;
443 	unsigned v;
444 {
445 	register struct pte *pte;
446 	register unsigned pg;
447 
448 	pg = btop(v);
449 	if (pg >= BTOPKERNBASE)
450 		pte = &Sysmap[pg - BTOPKERNBASE];
451 	else
452 		pte = vtopte(p, pg);
453 	return ((pte->pg_pfnum << PGSHIFT) + (v & PGOFSET));
454 }
455