xref: /original-bsd/sys/tahoe/tahoe/vm_machdep.c (revision a4d3ae46)
1 /*	vm_machdep.c	1.10	88/01/07	*/
2 
3 #include "param.h"
4 #include "systm.h"
5 #include "dir.h"
6 #include "user.h"
7 #include "proc.h"
8 #include "cmap.h"
9 #include "mount.h"
10 #include "vm.h"
11 #include "text.h"
12 #include "kernel.h"
13 
14 #include "pte.h"
15 #include "cpu.h"
16 #include "mtpr.h"
17 
18 /*
19  * Set a red zone in the kernel stack after the u. area.
20  */
21 setredzone(pte, vaddr)
22 	register struct pte *pte;
23 	caddr_t vaddr;
24 {
25 
26 	pte += (sizeof (struct user) + NBPG - 1) / NBPG;
27 	*(int *)pte &= ~PG_PROT;
28 	*(int *)pte |= PG_URKR;
29 	if (vaddr)
30 		mtpr(TBIS, vaddr + sizeof (struct user) + NBPG - 1);
31 }
32 
33 /*
34  * Check for valid program size
35  * NB - Check data and data growth separately as they may overflow
36  * when summed together.
37  */
38 chksize(ts, ids, uds, ss)
39 	register unsigned ts, ids, uds, ss;
40 {
41 	extern unsigned maxtsize;
42 
43 	if (ctob(ts) > maxtsize ||
44 	    ctob(ids) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
45 	    ctob(uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
46 	    ctob(ids + uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
47 	    ctob(ss) > u.u_rlimit[RLIMIT_STACK].rlim_cur) {
48 		u.u_error = ENOMEM;
49 		return (1);
50 	}
51 	return (0);
52 }
53 
54 /*ARGSUSED*/
55 newptes(pte, v, size)
56 	register struct pte *pte;
57 	u_int v;
58 	register int size;
59 {
60 	register caddr_t a = ptob(v);
61 
62 #ifdef lint
63 	pte = pte;
64 #endif
65 	if (size >= 8) {
66 		mtpr(TBIA, 0);
67 		return;
68 	}
69 	while (size > 0) {
70 		mtpr(TBIS, a);
71 		a += NBPG;
72 		size--;
73 	}
74 }
75 
76 /*
77  * Change protection codes of text segment.
78  * Have to flush translation buffer since this
79  * affect virtual memory mapping of current process.
80  */
81 chgprot(addr, tprot)
82 	caddr_t addr;
83 	long tprot;
84 {
85 	unsigned v;
86 	int tp;
87 	register struct pte *pte;
88 	register struct cmap *c;
89 
90 	v = clbase(btop(addr));
91 	if (!isatsv(u.u_procp, v)) {
92 		u.u_error = EFAULT;
93 		return (0);
94 	}
95 	tp = vtotp(u.u_procp, v);
96 	pte = tptopte(u.u_procp, tp);
97 	if (pte->pg_fod == 0 && pte->pg_pfnum) {
98 		c = &cmap[pgtocm(pte->pg_pfnum)];
99 		if (c->c_blkno && c->c_mdev != MSWAPX)
100 			munhash(mount[c->c_mdev].m_dev,
101 			    (daddr_t)(u_long)c->c_blkno);
102 	}
103 	*(int *)pte &= ~PG_PROT;
104 	*(int *)pte |= tprot;
105 	distcl(pte);
106 	tbiscl(v);
107 	return (1);
108 }
109 
110 settprot(tprot)
111 	long tprot;
112 {
113 	register int *ptaddr, i;
114 
115 	ptaddr = (int *)mfpr(P0BR);
116 	for (i = 0; i < u.u_tsize; i++) {
117 		ptaddr[i] &= ~PG_PROT;
118 		ptaddr[i] |= tprot;
119 	}
120 	mtpr(TBIA, 0);
121 }
122 
123 #ifdef notdef
124 /*
125  * Rest are machine-dependent
126  */
127 getmemc(addr)
128 	caddr_t addr;
129 {
130 	register int c;
131 	struct pte savemap;
132 
133 	savemap = mmap[0];
134 	*(int *)mmap = PG_V | PG_KR | btop(addr);
135 	mtpr(TBIS, vmmap);
136 	uncache(&vmmap[(int)addr & PGOFSET]);
137 	c = *(char *)&vmmap[(int)addr & PGOFSET];
138 	mmap[0] = savemap;
139 	mtpr(TBIS, vmmap);
140 	return (c & 0377);
141 }
142 
143 putmemc(addr, val)
144 	caddr_t addr;
145 {
146 	struct pte savemap;
147 
148 	savemap = mmap[0];
149 	*(int *)mmap = PG_V | PG_KW | btop(addr);
150 	mtpr(TBIS, vmmap);
151 	*(char *)&vmmap[(int)addr & PGOFSET] = val;
152 
153 	mtpr(PADC, 0);
154 	mtpr(PACC, 0);
155 
156 	mmap[0] = savemap;
157 	mtpr(TBIS, vmmap);
158 }
159 #endif
160 
161 /*
162  * Move pages from one kernel virtual address to another.
163  * Both addresses are assumed to reside in the Sysmap,
164  * and size must be a multiple of CLSIZE.
165  */
166 pagemove(from, to, size)
167 	register caddr_t from, to;
168 	int size;
169 {
170 	register struct pte *fpte, *tpte;
171 
172 	if (size % CLBYTES)
173 		panic("pagemove");
174 	fpte = &Sysmap[btop(from - KERNBASE)];
175 	tpte = &Sysmap[btop(to - KERNBASE)];
176 	while (size > 0) {
177 		*tpte++ = *fpte;
178 		*(int *)fpte++ = 0;
179 		mtpr(TBIS, from);
180 		mtpr(TBIS, to);
181 		mtpr(P1DC, to);		/* purge !! */
182 		from += NBPG;
183 		to += NBPG;
184 		size -= NBPG;
185 	}
186 }
187 
188 /*
189  * Code and data key management routines.
190  *
191  * The array ckey_cnt maintains the count of processes currently
192  * sharing each code key.  The array ckey_cache maintains a record
193  * of all code keys used since the last flush of the code cache.
194  * Such keys may not be reused, even if unreferenced, until
195  * the cache is flushed.  The data cache key handling is analogous.
196  * The arrays ckey_cnt and ckey_cache are allways kept in such a way
197  * that the following invariant holds:
198  *	ckey_cnt > 0	=>'s	ckey_cache == 1
199  * meaning as long as a code key is used by at least one process, it's
200  * marked as being 'in the cache'. Of course, the following invariant
201  * also holds:
202  *	ckey_cache == 0	=>'s	ckey_cnt == 0
203  * which is just the reciprocal of the 1'st invariant.
204  * Equivalent invariants hold for the data key arrays.
205  */
206 struct	keystats ckeystats = { NCKEY - 1 };
207 struct	keystats dkeystats = { NDKEY - 1 };
208 
209 /*
210  * Release a code key.
211  */
212 ckeyrelease(key)
213 	int key;
214 {
215 	register int s;
216 
217 	s = spl8();
218 	if (--ckey_cnt[key] < 0) {
219 		printf("ckeyrelease: key = %d\n", key);
220 		ckey_cnt[key] = 0;
221 	}
222 	if (ckey_cnt[key] == 0)
223 		ckeystats.ks_dirty++;
224 	splx(s);
225 }
226 
227 /*
228  * Release a data key.
229  */
230 dkeyrelease(key)
231 	int key;
232 {
233 	register int s;
234 
235 	s = spl8();
236 	if (--dkey_cnt[key] != 0) {
237 		printf("dkeyrelease: key = %d\n", key);
238 		dkey_cnt[key] = 0;
239 	}
240 	splx(s);
241 	dkeystats.ks_dirty++;
242 }
243 
244 /*
245  * Invalidate the data cache for a process
246  * by exchanging cache keys.
247  */
248 dkeyinval(p)
249 	register struct proc *p;
250 {
251 	register int key;
252 	int s;
253 
254 	dkeystats.ks_inval++;
255 	s = spl8();
256 	if (--dkey_cnt[p->p_dkey] != 0)
257 		dkey_cnt[p->p_dkey] = 0;
258 	if (p == u.u_procp && !noproc) {
259 		p->p_dkey = getdatakey();
260 		mtpr(DCK, p->p_dkey);
261 	} else
262 		p->p_dkey = 0;
263 	splx(s);
264 }
265 
266 /*
267  * Get a code key.
268  * Strategy: try each of the following in turn
269  * until a key is allocated.
270  *
271  * 1) Find an unreferenced key not yet in the cache.
272  *    If this fails, a code cache purge will be necessary.
273  * 2) Find an unreferenced key.  Mark all unreferenced keys
274  *    as available and purge the cache.
275  * 3) Free the keys from all processes not sharing keys.
276  * 4) Free the keys from all processes.
277  */
278 getcodekey()
279 {
280 	register int i, s, freekey;
281 	register struct proc *p;
282 	int desparate = 0;
283 	static int lastkey = MAXCKEY;
284 
285 	ckeystats.ks_allocs++;
286 	s = spl8();
287 	freekey = 0;
288 	for (i = lastkey + 1; ; i++) {
289 		if (i > MAXCKEY)
290 			i = 1;
291 		if ((int)ckey_cache[i] == 0) {	/* free key, take it */
292 			ckey_cache[i] = 1, ckey_cnt[i] = 1;
293 			splx(s);
294 			ckeystats.ks_allocfree++;
295 			ckeystats.ks_avail--;
296 			lastkey = i;
297 			return (i);
298 		}
299 		if (ckey_cnt[i] == 0)		/* save for potential use */
300 			freekey = i;
301 		if (i == lastkey)
302 			break;
303 	}
304 	/*
305 	 * All code keys were marked as being in cache.
306 	 * If a key was in the cache, but not in use, grab it.
307 	 */
308 	if (freekey != 0) {
309 purge:
310 		/*
311 		 * If we've run out of free keys,
312 		 * try and free up some other keys to avoid
313 		 * future cache purges.
314 		 */
315 		ckey_cnt[freekey] = 1, ckey_cache[freekey] = 1;
316 		for (i = 1; i <= MAXCKEY; i++)
317 			if (ckey_cnt[i] == 0) {
318 				ckey_cache[i] = 0;
319 				ckeystats.ks_avail++;
320 			}
321 		mtpr(PACC, 0);
322 		splx(s);
323 		ckeystats.ks_dirty = 0;
324 		ckeystats.ks_norefs++;
325 		return (freekey);
326 	}
327 
328 	/*
329 	 * All keys are marked as in the cache and in use.
330 	 * Release all unshared keys, or, on second pass,
331 	 * release all keys.
332 	 */
333 steal:
334 	for (p = allproc; p; p = p->p_nxt)
335 		if (p->p_ckey != 0 && (p->p_flag & SSYS) == 0) {
336 			i = p->p_ckey;
337 			if (ckey_cnt[i] == 1 || desparate) {
338 				p->p_ckey = 0;
339 				if (--ckey_cnt[i] == 0) {
340 					freekey = i;
341 					if (p->p_textp)
342 						p->p_textp->x_ckey = 0;
343 				}
344 			}
345 		}
346 
347 	if (freekey) {
348 		ckeystats.ks_taken++;
349 		goto purge;
350 	} else {
351 		desparate++;
352 		goto steal;
353 	}
354 }
355 
356 /*
357  * Get a data key.
358  *
359  * General strategy:
360  * 1) Try to find a data key that isn't in the cache. Allocate it.
361  * 2) If all data keys are in the cache, find one which isn't
362  *    allocated.  Mark all unallocated keys as not in cache,
363  *    purge the cache, and allocate this one.
364  * 3) If all of them are allocated, free all process' keys
365  *    and let them reclaim then as they run.
366  */
367 getdatakey()
368 {
369 	register int i, freekey;
370 	register struct proc *p;
371 	int s;
372 	static int lastkey = MAXDKEY;
373 
374 	dkeystats.ks_allocs++;
375 	s = spl8();
376 	freekey = 0;
377 	for (i = lastkey + 1; ; i++) {
378 		if (i > MAXDKEY)
379 			i = 1;
380 		if ((int)dkey_cache[i] == 0) {	/* free key, take it */
381 			dkey_cache[i] = 1, dkey_cnt[i] = 1;
382 			splx(s);
383 			dkeystats.ks_allocfree++;
384 			dkeystats.ks_avail--;
385 			lastkey = i;
386 			return (i);
387 		}
388 		if (dkey_cnt[i] == 0)
389 			freekey = i;
390 		if (i == lastkey)
391 			break;
392 	}
393 purge:
394 	if (freekey) {
395 		/*
396 		 * Try and free up some more keys to avoid
397 		 * future allocations causing a cache purge.
398 		 */
399 		dkey_cnt[freekey] = 1, dkey_cache[freekey] = 1;
400 		for (i = 1; i <= MAXDKEY; i++)
401 			if (dkey_cnt[i] == 0) {
402 				dkey_cache[i] = 0;
403 				dkeystats.ks_avail++;
404 			}
405 		mtpr(PADC, 0);
406 		splx(s);
407 		dkeystats.ks_norefs++;
408 		dkeystats.ks_dirty = 0;
409 		return (freekey);
410 	}
411 
412 	/*
413 	 * Now, we have to take a key from someone.
414 	 * May as well take them all, so we get them
415 	 * from all of the idle procs.
416 	 */
417 	for (p = allproc; p; p = p->p_nxt)
418 		if (p->p_dkey != 0 && (p->p_flag & SSYS) == 0) {
419 			freekey = p->p_dkey;
420 			dkey_cnt[freekey] = 0;
421 			p->p_dkey = 0;
422 		}
423 	dkeystats.ks_taken++;
424 	goto purge;
425 }
426 
427 /*VARGARGS1*/
428 vtoph(p, v)
429 	register struct proc *p;
430 	unsigned v;
431 {
432 	register struct pte *pte;
433 	register unsigned pg;
434 
435 	pg = btop(v);
436 	if (pg >= BTOPKERNBASE)
437 		pte = &Sysmap[pg - BTOPKERNBASE];
438 	else
439 		pte = vtopte(p, pg);
440 	return ((pte->pg_pfnum << PGSHIFT) + (v & PGOFSET));
441 }
442