xref: /original-bsd/sys/tahoe/tahoe/vm_machdep.c (revision 092d9b4e)
1 /*
2  * Copyright (c) 1988 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)vm_machdep.c	7.1 (Berkeley) 05/26/88
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "dir.h"
12 #include "user.h"
13 #include "proc.h"
14 #include "cmap.h"
15 #include "mount.h"
16 #include "vm.h"
17 #include "text.h"
18 #include "kernel.h"
19 
20 #include "pte.h"
21 #include "cpu.h"
22 #include "mtpr.h"
23 
24 /*
25  * Set a red zone in the kernel stack after the u. area.
26  */
27 setredzone(pte, vaddr)
28 	register struct pte *pte;
29 	caddr_t vaddr;
30 {
31 
32 	pte += (sizeof (struct user) + NBPG - 1) / NBPG;
33 	*(int *)pte &= ~PG_PROT;
34 	*(int *)pte |= PG_URKR;
35 	if (vaddr)
36 		mtpr(TBIS, vaddr + sizeof (struct user) + NBPG - 1);
37 }
38 
39 /*
40  * Check for valid program size
41  * NB - Check data and data growth separately as they may overflow
42  * when summed together.
43  */
44 chksize(ts, ids, uds, ss)
45 	register unsigned ts, ids, uds, ss;
46 {
47 	extern unsigned maxtsize;
48 
49 	if (ctob(ts) > maxtsize ||
50 	    ctob(ids) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
51 	    ctob(uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
52 	    ctob(ids + uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
53 	    ctob(ss) > u.u_rlimit[RLIMIT_STACK].rlim_cur) {
54 		u.u_error = ENOMEM;
55 		return (1);
56 	}
57 	return (0);
58 }
59 
60 /*ARGSUSED*/
61 newptes(pte, v, size)
62 	register struct pte *pte;
63 	u_int v;
64 	register int size;
65 {
66 	register caddr_t a = ptob(v);
67 
68 #ifdef lint
69 	pte = pte;
70 #endif
71 	if (size >= 8) {
72 		mtpr(TBIA, 0);
73 		return;
74 	}
75 	while (size > 0) {
76 		mtpr(TBIS, a);
77 		a += NBPG;
78 		size--;
79 	}
80 }
81 
82 /*
83  * Change protection codes of text segment.
84  * Have to flush translation buffer since this
85  * affect virtual memory mapping of current process.
86  */
87 chgprot(addr, tprot)
88 	caddr_t addr;
89 	long tprot;
90 {
91 	unsigned v;
92 	int tp;
93 	register struct pte *pte;
94 	register struct cmap *c;
95 
96 	v = clbase(btop(addr));
97 	if (!isatsv(u.u_procp, v)) {
98 		u.u_error = EFAULT;
99 		return (0);
100 	}
101 	tp = vtotp(u.u_procp, v);
102 	pte = tptopte(u.u_procp, tp);
103 	if (pte->pg_fod == 0 && pte->pg_pfnum) {
104 		c = &cmap[pgtocm(pte->pg_pfnum)];
105 		if (c->c_blkno && c->c_mdev != MSWAPX)
106 			munhash(mount[c->c_mdev].m_dev,
107 			    (daddr_t)(u_long)c->c_blkno);
108 	}
109 	*(int *)pte &= ~PG_PROT;
110 	*(int *)pte |= tprot;
111 	distcl(pte);
112 	tbiscl(v);
113 	return (1);
114 }
115 
116 settprot(tprot)
117 	long tprot;
118 {
119 	register int *ptaddr, i;
120 
121 	ptaddr = (int *)mfpr(P0BR);
122 	for (i = 0; i < u.u_tsize; i++) {
123 		ptaddr[i] &= ~PG_PROT;
124 		ptaddr[i] |= tprot;
125 	}
126 	mtpr(TBIA, 0);
127 }
128 
129 #ifdef notdef
130 /*
131  * Rest are machine-dependent
132  */
133 getmemc(addr)
134 	caddr_t addr;
135 {
136 	register int c;
137 	struct pte savemap;
138 
139 	savemap = mmap[0];
140 	*(int *)mmap = PG_V | PG_KR | btop(addr);
141 	mtpr(TBIS, vmmap);
142 	uncache(&vmmap[(int)addr & PGOFSET]);
143 	c = *(char *)&vmmap[(int)addr & PGOFSET];
144 	mmap[0] = savemap;
145 	mtpr(TBIS, vmmap);
146 	return (c & 0377);
147 }
148 
149 putmemc(addr, val)
150 	caddr_t addr;
151 {
152 	struct pte savemap;
153 
154 	savemap = mmap[0];
155 	*(int *)mmap = PG_V | PG_KW | btop(addr);
156 	mtpr(TBIS, vmmap);
157 	*(char *)&vmmap[(int)addr & PGOFSET] = val;
158 
159 	mtpr(PADC, 0);
160 	mtpr(PACC, 0);
161 
162 	mmap[0] = savemap;
163 	mtpr(TBIS, vmmap);
164 }
165 #endif
166 
167 /*
168  * Move pages from one kernel virtual address to another.
169  * Both addresses are assumed to reside in the Sysmap,
170  * and size must be a multiple of CLSIZE.
171  */
172 pagemove(from, to, size)
173 	register caddr_t from, to;
174 	int size;
175 {
176 	register struct pte *fpte, *tpte;
177 
178 	if (size % CLBYTES)
179 		panic("pagemove");
180 	fpte = kvtopte(from);
181 	tpte = kvtopte(to);
182 	while (size > 0) {
183 		*tpte++ = *fpte;
184 		*(int *)fpte++ = 0;
185 		mtpr(TBIS, from);
186 		mtpr(TBIS, to);
187 		mtpr(P1DC, to);		/* purge !! */
188 		from += NBPG;
189 		to += NBPG;
190 		size -= NBPG;
191 	}
192 }
193 
194 /*
195  * Code and data key management routines.
196  *
197  * The array ckey_cnt maintains the count of processes currently
198  * sharing each code key.  The array ckey_cache maintains a record
199  * of all code keys used since the last flush of the code cache.
200  * Such keys may not be reused, even if unreferenced, until
201  * the cache is flushed.  The data cache key handling is analogous.
202  * The arrays ckey_cnt and ckey_cache are allways kept in such a way
203  * that the following invariant holds:
204  *	ckey_cnt > 0	=>'s	ckey_cache == 1
205  * meaning as long as a code key is used by at least one process, it's
206  * marked as being 'in the cache'. Of course, the following invariant
207  * also holds:
208  *	ckey_cache == 0	=>'s	ckey_cnt == 0
209  * which is just the reciprocal of the 1'st invariant.
210  * Equivalent invariants hold for the data key arrays.
211  */
212 struct	keystats ckeystats = { NCKEY - 1 };
213 struct	keystats dkeystats = { NDKEY - 1 };
214 
215 /*
216  * Release a code key.
217  */
218 ckeyrelease(key)
219 	int key;
220 {
221 	register int s;
222 
223 	s = spl8();
224 	if (--ckey_cnt[key] < 0) {
225 		printf("ckeyrelease: key = %d\n", key);
226 		ckey_cnt[key] = 0;
227 	}
228 	if (ckey_cnt[key] == 0)
229 		ckeystats.ks_dirty++;
230 	splx(s);
231 }
232 
233 /*
234  * Release a data key.
235  */
236 dkeyrelease(key)
237 	int key;
238 {
239 	register int s;
240 
241 	s = spl8();
242 	if (--dkey_cnt[key] != 0) {
243 		printf("dkeyrelease: key = %d\n", key);
244 		dkey_cnt[key] = 0;
245 	}
246 	splx(s);
247 	dkeystats.ks_dirty++;
248 }
249 
250 /*
251  * Invalidate the data cache for a process
252  * by exchanging cache keys.
253  */
254 dkeyinval(p)
255 	register struct proc *p;
256 {
257 	int s;
258 
259 	dkeystats.ks_inval++;
260 	s = spl8();
261 	if (--dkey_cnt[p->p_dkey] != 0)
262 		dkey_cnt[p->p_dkey] = 0;
263 	if (p == u.u_procp && !noproc) {
264 		p->p_dkey = getdatakey();
265 		mtpr(DCK, p->p_dkey);
266 	} else
267 		p->p_dkey = 0;
268 	splx(s);
269 }
270 
271 /*
272  * Get a code key.
273  * Strategy: try each of the following in turn
274  * until a key is allocated.
275  *
276  * 1) Find an unreferenced key not yet in the cache.
277  *    If this fails, a code cache purge will be necessary.
278  * 2) Find an unreferenced key.  Mark all unreferenced keys
279  *    as available and purge the cache.
280  * 3) Free the keys from all processes not sharing keys.
281  * 4) Free the keys from all processes.
282  */
283 getcodekey()
284 {
285 	register int i, s, freekey;
286 	register struct proc *p;
287 	int desparate = 0;
288 	static int lastkey = MAXCKEY;
289 
290 	ckeystats.ks_allocs++;
291 	s = spl8();
292 	freekey = 0;
293 	for (i = lastkey + 1; ; i++) {
294 		if (i > MAXCKEY)
295 			i = 1;
296 		if ((int)ckey_cache[i] == 0) {	/* free key, take it */
297 			ckey_cache[i] = 1, ckey_cnt[i] = 1;
298 			splx(s);
299 			ckeystats.ks_allocfree++;
300 			ckeystats.ks_avail--;
301 			lastkey = i;
302 			return (i);
303 		}
304 		if (ckey_cnt[i] == 0)		/* save for potential use */
305 			freekey = i;
306 		if (i == lastkey)
307 			break;
308 	}
309 	/*
310 	 * All code keys were marked as being in cache.
311 	 * If a key was in the cache, but not in use, grab it.
312 	 */
313 	if (freekey != 0) {
314 purge:
315 		/*
316 		 * If we've run out of free keys,
317 		 * try and free up some other keys to avoid
318 		 * future cache purges.
319 		 */
320 		ckey_cnt[freekey] = 1, ckey_cache[freekey] = 1;
321 		for (i = 1; i <= MAXCKEY; i++)
322 			if (ckey_cnt[i] == 0) {
323 				ckey_cache[i] = 0;
324 				ckeystats.ks_avail++;
325 			}
326 		mtpr(PACC, 0);
327 		splx(s);
328 		ckeystats.ks_dirty = 0;
329 		ckeystats.ks_norefs++;
330 		return (freekey);
331 	}
332 
333 	/*
334 	 * All keys are marked as in the cache and in use.
335 	 * Release all unshared keys, or, on second pass,
336 	 * release all keys.
337 	 */
338 steal:
339 	for (p = allproc; p; p = p->p_nxt)
340 		if (p->p_ckey != 0 && (p->p_flag & SSYS) == 0) {
341 			i = p->p_ckey;
342 			if (ckey_cnt[i] == 1 || desparate) {
343 				p->p_ckey = 0;
344 				if (--ckey_cnt[i] == 0) {
345 					freekey = i;
346 					if (p->p_textp)
347 						p->p_textp->x_ckey = 0;
348 				}
349 			}
350 		}
351 
352 	if (freekey) {
353 		ckeystats.ks_taken++;
354 		goto purge;
355 	} else {
356 		desparate++;
357 		goto steal;
358 	}
359 }
360 
361 /*
362  * Get a data key.
363  *
364  * General strategy:
365  * 1) Try to find a data key that isn't in the cache. Allocate it.
366  * 2) If all data keys are in the cache, find one which isn't
367  *    allocated.  Mark all unallocated keys as not in cache,
368  *    purge the cache, and allocate this one.
369  * 3) If all of them are allocated, free all process' keys
370  *    and let them reclaim then as they run.
371  */
372 getdatakey()
373 {
374 	register int i, freekey;
375 	register struct proc *p;
376 	int s;
377 	static int lastkey = MAXDKEY;
378 
379 	dkeystats.ks_allocs++;
380 	s = spl8();
381 	freekey = 0;
382 	for (i = lastkey + 1; ; i++) {
383 		if (i > MAXDKEY)
384 			i = 1;
385 		if ((int)dkey_cache[i] == 0) {	/* free key, take it */
386 			dkey_cache[i] = 1, dkey_cnt[i] = 1;
387 			splx(s);
388 			dkeystats.ks_allocfree++;
389 			dkeystats.ks_avail--;
390 			lastkey = i;
391 			return (i);
392 		}
393 		if (dkey_cnt[i] == 0)
394 			freekey = i;
395 		if (i == lastkey)
396 			break;
397 	}
398 purge:
399 	if (freekey) {
400 		/*
401 		 * Try and free up some more keys to avoid
402 		 * future allocations causing a cache purge.
403 		 */
404 		dkey_cnt[freekey] = 1, dkey_cache[freekey] = 1;
405 		for (i = 1; i <= MAXDKEY; i++)
406 			if (dkey_cnt[i] == 0) {
407 				dkey_cache[i] = 0;
408 				dkeystats.ks_avail++;
409 			}
410 		mtpr(PADC, 0);
411 		splx(s);
412 		dkeystats.ks_norefs++;
413 		dkeystats.ks_dirty = 0;
414 		return (freekey);
415 	}
416 
417 	/*
418 	 * Now, we have to take a key from someone.
419 	 * May as well take them all, so we get them
420 	 * from all of the idle procs.
421 	 */
422 	for (p = allproc; p; p = p->p_nxt)
423 		if (p->p_dkey != 0 && (p->p_flag & SSYS) == 0) {
424 			freekey = p->p_dkey;
425 			dkey_cnt[freekey] = 0;
426 			p->p_dkey = 0;
427 		}
428 	dkeystats.ks_taken++;
429 	goto purge;
430 }
431 
432 /*VARGARGS1*/
433 vtoph(p, v)
434 	register struct proc *p;
435 	unsigned v;
436 {
437 	register struct pte *pte;
438 	register unsigned pg;
439 
440 	pg = btop(v);
441 	if (pg >= BTOPKERNBASE)
442 		pte = &Sysmap[pg - BTOPKERNBASE];
443 	else
444 		pte = vtopte(p, pg);
445 	return ((pte->pg_pfnum << PGSHIFT) + (v & PGOFSET));
446 }
447