xref: /original-bsd/sys/tahoe/tahoe/vm_machdep.c (revision bff07831)
1 /*
2  * Copyright (c) 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Computer Consoles Inc.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_machdep.c	7.10 (Berkeley) 09/23/93
11  */
12 
13 #include "sys/param.h"
14 #include "sys/systm.h"
15 #include "sys/user.h"
16 #include "sys/proc.h"
17 #include "sys/cmap.h"
18 #include "sys/vm.h"
19 #include "sys/text.h"
20 #include "sys/kernel.h"
21 
22 #include "../include/pte.h"
23 #include "../include/cpu.h"
24 #include "../include/mtpr.h"
25 
26 /*
27  * Set a red zone in the kernel stack after the u. area.
28  */
setredzone(pte,vaddr)29 setredzone(pte, vaddr)
30 	register struct pte *pte;
31 	caddr_t vaddr;
32 {
33 
34 	pte += (sizeof (struct user) + NBPG - 1) / NBPG;
35 	*(int *)pte &= ~PG_PROT;
36 	*(int *)pte |= PG_URKR;
37 	if (vaddr)
38 		mtpr(TBIS, vaddr + sizeof (struct user) + NBPG - 1);
39 }
40 
41 /*
42  * Check for valid program size
43  * NB - Check data and data growth separately as they may overflow
44  * when summed together.
45  */
chksize(ts,ids,uds,ss)46 chksize(ts, ids, uds, ss)
47 	register unsigned ts, ids, uds, ss;
48 {
49 	extern unsigned maxtsize;
50 
51 	if (ctob(ts) > maxtsize ||
52 	    ctob(ids) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
53 	    ctob(uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
54 	    ctob(ids + uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
55 	    ctob(ss) > u.u_rlimit[RLIMIT_STACK].rlim_cur) {
56 		return (ENOMEM);
57 	}
58 	return (0);
59 }
60 
61 /*ARGSUSED*/
newptes(pte,v,size)62 newptes(pte, v, size)
63 	register struct pte *pte;
64 	u_int v;
65 	register int size;
66 {
67 	register caddr_t a = ptob(v);
68 
69 #ifdef lint
70 	pte = pte;
71 #endif
72 	if (size >= 8) {
73 		mtpr(TBIA, 0);
74 		return;
75 	}
76 	while (size > 0) {
77 		mtpr(TBIS, a);
78 		a += NBPG;
79 		size--;
80 	}
81 }
82 
83 /*
84  * Change protection codes of text segment.
85  * Have to flush translation buffer since this
86  * affect virtual memory mapping of current process.
87  */
chgprot(addr,tprot)88 chgprot(addr, tprot)
89 	caddr_t addr;
90 	long tprot;
91 {
92 	unsigned v;
93 	int tp;
94 	register struct pte *pte;
95 	register struct cmap *c;
96 
97 	v = clbase(btop(addr));
98 	if (!isatsv(u.u_procp, v))
99 		return (EFAULT);
100 	tp = vtotp(u.u_procp, v);
101 	pte = tptopte(u.u_procp, tp);
102 	if (pte->pg_fod == 0 && pte->pg_pfnum) {
103 		c = &cmap[pgtocm(pte->pg_pfnum)];
104 		if (c->c_blkno)
105 			munhash(c->c_vp, (daddr_t)(u_long)c->c_blkno);
106 	}
107 	*(int *)pte &= ~PG_PROT;
108 	*(int *)pte |= tprot;
109 	distcl(pte);
110 	tbiscl(v);
111 	return (0);
112 }
113 
settprot(tprot)114 settprot(tprot)
115 	long tprot;
116 {
117 	register int *ptaddr, i;
118 
119 	ptaddr = (int *)mfpr(P0BR);
120 	for (i = 0; i < u.u_tsize; i++) {
121 		ptaddr[i] &= ~PG_PROT;
122 		ptaddr[i] |= tprot;
123 	}
124 	mtpr(TBIA, 0);
125 }
126 
127 #ifdef notdef
128 /*
129  * Rest are machine-dependent
130  */
getmemc(addr)131 getmemc(addr)
132 	caddr_t addr;
133 {
134 	register int c;
135 	struct pte savemap;
136 
137 	savemap = mmap[0];
138 	*(int *)mmap = PG_V | PG_KR | btop(addr);
139 	mtpr(TBIS, vmmap);
140 	uncache(&vmmap[(int)addr & PGOFSET]);
141 	c = *(char *)&vmmap[(int)addr & PGOFSET];
142 	mmap[0] = savemap;
143 	mtpr(TBIS, vmmap);
144 	return (c & 0377);
145 }
146 
putmemc(addr,val)147 putmemc(addr, val)
148 	caddr_t addr;
149 {
150 	struct pte savemap;
151 
152 	savemap = mmap[0];
153 	*(int *)mmap = PG_V | PG_KW | btop(addr);
154 	mtpr(TBIS, vmmap);
155 	*(char *)&vmmap[(int)addr & PGOFSET] = val;
156 
157 	mtpr(PADC, 0);
158 	mtpr(PACC, 0);
159 
160 	mmap[0] = savemap;
161 	mtpr(TBIS, vmmap);
162 }
163 #endif
164 
165 /*
166  * Move pages from one kernel virtual address to another.
167  * Both addresses are assumed to reside in the Sysmap,
168  * and size must be a multiple of CLSIZE.
169  */
pagemove(from,to,size)170 pagemove(from, to, size)
171 	register caddr_t from, to;
172 	int size;
173 {
174 	register struct pte *fpte, *tpte;
175 
176 	if (size % CLBYTES)
177 		panic("pagemove");
178 	fpte = kvtopte(from);
179 	tpte = kvtopte(to);
180 	while (size > 0) {
181 		*tpte++ = *fpte;
182 		*(int *)fpte++ = 0;
183 		mtpr(TBIS, from);
184 		mtpr(TBIS, to);
185 		mtpr(P1DC, to);		/* purge !! */
186 		from += NBPG;
187 		to += NBPG;
188 		size -= NBPG;
189 	}
190 }
191 
192 /*
193  * Code and data key management routines.
194  *
195  * The array ckey_cnt maintains the count of processes currently
196  * sharing each code key.  The array ckey_cache maintains a record
197  * of all code keys used since the last flush of the code cache.
198  * Such keys may not be reused, even if unreferenced, until
199  * the cache is flushed.  The data cache key handling is analogous.
200  * The arrays ckey_cnt and ckey_cache are allways kept in such a way
201  * that the following invariant holds:
202  *	ckey_cnt > 0	=>'s	ckey_cache == 1
203  * meaning as long as a code key is used by at least one process, it's
204  * marked as being 'in the cache'. Of course, the following invariant
205  * also holds:
206  *	ckey_cache == 0	=>'s	ckey_cnt == 0
207  * which is just the reciprocal of the 1'st invariant.
208  * Equivalent invariants hold for the data key arrays.
209  */
210 struct	keystats ckeystats = { NCKEY - 1 };
211 struct	keystats dkeystats = { NDKEY - 1 };
212 
213 /*
214  * Release a code key.
215  */
ckeyrelease(key)216 ckeyrelease(key)
217 	int key;
218 {
219 	register int s;
220 
221 	s = spl8();
222 	if (--ckey_cnt[key] < 0) {
223 		printf("ckeyrelease: key = %d\n", key);
224 		ckey_cnt[key] = 0;
225 	}
226 	if (ckey_cnt[key] == 0)
227 		ckeystats.ks_dirty++;
228 	splx(s);
229 }
230 
231 /*
232  * Release a data key.
233  */
dkeyrelease(key)234 dkeyrelease(key)
235 	int key;
236 {
237 	register int s;
238 
239 	s = spl8();
240 	if (--dkey_cnt[key] != 0) {
241 		printf("dkeyrelease: key = %d\n", key);
242 		dkey_cnt[key] = 0;
243 	}
244 	splx(s);
245 	dkeystats.ks_dirty++;
246 }
247 
248 /*
249  * Invalidate the data cache for a process
250  * by exchanging cache keys.
251  */
dkeyinval(p)252 dkeyinval(p)
253 	register struct proc *p;
254 {
255 	int s;
256 
257 	dkeystats.ks_inval++;
258 	s = spl8();
259 	if (--dkey_cnt[p->p_dkey] != 0)
260 		dkey_cnt[p->p_dkey] = 0;
261 	if (p == u.u_procp && !noproc) {
262 		p->p_dkey = getdatakey();
263 		mtpr(DCK, p->p_dkey);
264 	} else
265 		p->p_dkey = 0;
266 	splx(s);
267 }
268 
269 /*
270  * Get a code key.
271  * Strategy: try each of the following in turn
272  * until a key is allocated.
273  *
274  * 1) Find an unreferenced key not yet in the cache.
275  *    If this fails, a code cache purge will be necessary.
276  * 2) Find an unreferenced key.  Mark all unreferenced keys
277  *    as available and purge the cache.
278  * 3) Free the keys from all processes not sharing keys.
279  * 4) Free the keys from all processes.
280  */
getcodekey()281 getcodekey()
282 {
283 	register int i, s, freekey;
284 	register struct proc *p;
285 	int desparate = 0;
286 	static int lastkey = MAXCKEY;
287 
288 	ckeystats.ks_allocs++;
289 	s = spl8();
290 	freekey = 0;
291 	for (i = lastkey + 1; ; i++) {
292 		if (i > MAXCKEY)
293 			i = 1;
294 		if ((int)ckey_cache[i] == 0) {	/* free key, take it */
295 			ckey_cache[i] = 1, ckey_cnt[i] = 1;
296 			splx(s);
297 			ckeystats.ks_allocfree++;
298 			ckeystats.ks_avail--;
299 			lastkey = i;
300 			return (i);
301 		}
302 		if (ckey_cnt[i] == 0)		/* save for potential use */
303 			freekey = i;
304 		if (i == lastkey)
305 			break;
306 	}
307 	/*
308 	 * All code keys were marked as being in cache.
309 	 * If a key was in the cache, but not in use, grab it.
310 	 */
311 	if (freekey != 0) {
312 purge:
313 		/*
314 		 * If we've run out of free keys,
315 		 * try and free up some other keys to avoid
316 		 * future cache purges.
317 		 */
318 		ckey_cnt[freekey] = 1, ckey_cache[freekey] = 1;
319 		for (i = 1; i <= MAXCKEY; i++)
320 			if (ckey_cnt[i] == 0) {
321 				ckey_cache[i] = 0;
322 				ckeystats.ks_avail++;
323 			}
324 		mtpr(PACC, 0);
325 		splx(s);
326 		ckeystats.ks_dirty = 0;
327 		ckeystats.ks_norefs++;
328 		return (freekey);
329 	}
330 
331 	/*
332 	 * All keys are marked as in the cache and in use.
333 	 * Release all unshared keys, or, on second pass,
334 	 * release all keys.
335 	 */
336 steal:
337 	for (p = allproc; p; p = p->p_next)
338 		if (p->p_ckey != 0 && (p->p_flag & P_SYSTEM) == 0) {
339 			i = p->p_ckey;
340 			if (ckey_cnt[i] == 1 || desparate) {
341 				p->p_ckey = 0;
342 				if (--ckey_cnt[i] == 0) {
343 					freekey = i;
344 					if (p->p_textp)
345 						p->p_textp->x_ckey = 0;
346 				}
347 			}
348 		}
349 
350 	if (freekey) {
351 		ckeystats.ks_taken++;
352 		goto purge;
353 	} else {
354 		desparate++;
355 		goto steal;
356 	}
357 }
358 
359 /*
360  * Get a data key.
361  *
362  * General strategy:
363  * 1) Try to find a data key that isn't in the cache. Allocate it.
364  * 2) If all data keys are in the cache, find one which isn't
365  *    allocated.  Mark all unallocated keys as not in cache,
366  *    purge the cache, and allocate this one.
367  * 3) If all of them are allocated, free all process' keys
368  *    and let them reclaim then as they run.
369  */
getdatakey()370 getdatakey()
371 {
372 	register int i, freekey;
373 	register struct proc *p;
374 	int s;
375 	static int lastkey = MAXDKEY;
376 
377 	dkeystats.ks_allocs++;
378 	s = spl8();
379 	freekey = 0;
380 	for (i = lastkey + 1; ; i++) {
381 		if (i > MAXDKEY)
382 			i = 1;
383 		if ((int)dkey_cache[i] == 0) {	/* free key, take it */
384 			dkey_cache[i] = 1, dkey_cnt[i] = 1;
385 			splx(s);
386 			dkeystats.ks_allocfree++;
387 			dkeystats.ks_avail--;
388 			lastkey = i;
389 			return (i);
390 		}
391 		if (dkey_cnt[i] == 0)
392 			freekey = i;
393 		if (i == lastkey)
394 			break;
395 	}
396 purge:
397 	if (freekey) {
398 		/*
399 		 * Try and free up some more keys to avoid
400 		 * future allocations causing a cache purge.
401 		 */
402 		dkey_cnt[freekey] = 1, dkey_cache[freekey] = 1;
403 		for (i = 1; i <= MAXDKEY; i++)
404 			if (dkey_cnt[i] == 0) {
405 				dkey_cache[i] = 0;
406 				dkeystats.ks_avail++;
407 			}
408 		mtpr(PADC, 0);
409 		splx(s);
410 		dkeystats.ks_norefs++;
411 		dkeystats.ks_dirty = 0;
412 		return (freekey);
413 	}
414 
415 	/*
416 	 * Now, we have to take a key from someone.
417 	 * May as well take them all, so we get them
418 	 * from all of the idle procs.
419 	 */
420 	for (p = allproc; p; p = p->p_next)
421 		if (p->p_dkey != 0 && (p->p_flag & P_SYSTEM) == 0) {
422 			freekey = p->p_dkey;
423 			dkey_cnt[freekey] = 0;
424 			p->p_dkey = 0;
425 		}
426 	dkeystats.ks_taken++;
427 	goto purge;
428 }
429 
430 /*VARGARGS1*/
vtoph(p,v)431 vtoph(p, v)
432 	register struct proc *p;
433 	unsigned v;
434 {
435 	register struct pte *pte;
436 	register unsigned pg;
437 
438 	pg = btop(v);
439 	if (pg >= BTOPKERNBASE)
440 		pte = &Sysmap[pg - BTOPKERNBASE];
441 	else
442 		pte = vtopte(p, pg);
443 	return ((pte->pg_pfnum << PGSHIFT) + (v & PGOFSET));
444 }
445