xref: /original-bsd/sys/tahoe/tahoe/vm_machdep.c (revision e4dd4c49)
1 /*
2  * Copyright (c) 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Computer Consoles Inc.
7  *
8  * Redistribution and use in source and binary forms are permitted
9  * provided that the above copyright notice and this paragraph are
10  * duplicated in all such forms and that any documentation,
11  * advertising materials, and other materials related to such
12  * distribution and use acknowledge that the software was developed
13  * by the University of California, Berkeley.  The name of the
14  * University may not be used to endorse or promote products derived
15  * from this software without specific prior written permission.
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19  *
20  *	@(#)vm_machdep.c	7.4 (Berkeley) 04/27/90
21  */
22 
23 #include "param.h"
24 #include "systm.h"
25 #include "user.h"
26 #include "proc.h"
27 #include "cmap.h"
28 #include "mount.h"
29 #include "vm.h"
30 #include "text.h"
31 #include "kernel.h"
32 
33 #include "pte.h"
34 #include "cpu.h"
35 #include "mtpr.h"
36 
37 /*
38  * Set a red zone in the kernel stack after the u. area.
39  */
40 setredzone(pte, vaddr)
41 	register struct pte *pte;
42 	caddr_t vaddr;
43 {
44 
45 	pte += (sizeof (struct user) + NBPG - 1) / NBPG;
46 	*(int *)pte &= ~PG_PROT;
47 	*(int *)pte |= PG_URKR;
48 	if (vaddr)
49 		mtpr(TBIS, vaddr + sizeof (struct user) + NBPG - 1);
50 }
51 
52 /*
53  * Check for valid program size
54  * NB - Check data and data growth separately as they may overflow
55  * when summed together.
56  */
57 chksize(ts, ids, uds, ss)
58 	register unsigned ts, ids, uds, ss;
59 {
60 	extern unsigned maxtsize;
61 
62 	if (ctob(ts) > maxtsize ||
63 	    ctob(ids) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
64 	    ctob(uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
65 	    ctob(ids + uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
66 	    ctob(ss) > u.u_rlimit[RLIMIT_STACK].rlim_cur) {
67 		u.u_error = ENOMEM;
68 		return (1);
69 	}
70 	return (0);
71 }
72 
73 /*ARGSUSED*/
74 newptes(pte, v, size)
75 	register struct pte *pte;
76 	u_int v;
77 	register int size;
78 {
79 	register caddr_t a = ptob(v);
80 
81 #ifdef lint
82 	pte = pte;
83 #endif
84 	if (size >= 8) {
85 		mtpr(TBIA, 0);
86 		return;
87 	}
88 	while (size > 0) {
89 		mtpr(TBIS, a);
90 		a += NBPG;
91 		size--;
92 	}
93 }
94 
95 /*
96  * Change protection codes of text segment.
97  * Have to flush translation buffer since this
98  * affect virtual memory mapping of current process.
99  */
100 chgprot(addr, tprot)
101 	caddr_t addr;
102 	long tprot;
103 {
104 	unsigned v;
105 	int tp;
106 	register struct pte *pte;
107 	register struct cmap *c;
108 
109 	v = clbase(btop(addr));
110 	if (!isatsv(u.u_procp, v)) {
111 		u.u_error = EFAULT;
112 		return (0);
113 	}
114 	tp = vtotp(u.u_procp, v);
115 	pte = tptopte(u.u_procp, tp);
116 	if (pte->pg_fod == 0 && pte->pg_pfnum) {
117 		c = &cmap[pgtocm(pte->pg_pfnum)];
118 		if (c->c_blkno)
119 			munhash(c->c_vp, (daddr_t)(u_long)c->c_blkno);
120 	}
121 	*(int *)pte &= ~PG_PROT;
122 	*(int *)pte |= tprot;
123 	distcl(pte);
124 	tbiscl(v);
125 	return (1);
126 }
127 
128 settprot(tprot)
129 	long tprot;
130 {
131 	register int *ptaddr, i;
132 
133 	ptaddr = (int *)mfpr(P0BR);
134 	for (i = 0; i < u.u_tsize; i++) {
135 		ptaddr[i] &= ~PG_PROT;
136 		ptaddr[i] |= tprot;
137 	}
138 	mtpr(TBIA, 0);
139 }
140 
141 #ifdef notdef
142 /*
143  * Rest are machine-dependent
144  */
145 getmemc(addr)
146 	caddr_t addr;
147 {
148 	register int c;
149 	struct pte savemap;
150 
151 	savemap = mmap[0];
152 	*(int *)mmap = PG_V | PG_KR | btop(addr);
153 	mtpr(TBIS, vmmap);
154 	uncache(&vmmap[(int)addr & PGOFSET]);
155 	c = *(char *)&vmmap[(int)addr & PGOFSET];
156 	mmap[0] = savemap;
157 	mtpr(TBIS, vmmap);
158 	return (c & 0377);
159 }
160 
161 putmemc(addr, val)
162 	caddr_t addr;
163 {
164 	struct pte savemap;
165 
166 	savemap = mmap[0];
167 	*(int *)mmap = PG_V | PG_KW | btop(addr);
168 	mtpr(TBIS, vmmap);
169 	*(char *)&vmmap[(int)addr & PGOFSET] = val;
170 
171 	mtpr(PADC, 0);
172 	mtpr(PACC, 0);
173 
174 	mmap[0] = savemap;
175 	mtpr(TBIS, vmmap);
176 }
177 #endif
178 
179 /*
180  * Move pages from one kernel virtual address to another.
181  * Both addresses are assumed to reside in the Sysmap,
182  * and size must be a multiple of CLSIZE.
183  */
184 pagemove(from, to, size)
185 	register caddr_t from, to;
186 	int size;
187 {
188 	register struct pte *fpte, *tpte;
189 
190 	if (size % CLBYTES)
191 		panic("pagemove");
192 	fpte = kvtopte(from);
193 	tpte = kvtopte(to);
194 	while (size > 0) {
195 		*tpte++ = *fpte;
196 		*(int *)fpte++ = 0;
197 		mtpr(TBIS, from);
198 		mtpr(TBIS, to);
199 		mtpr(P1DC, to);		/* purge !! */
200 		from += NBPG;
201 		to += NBPG;
202 		size -= NBPG;
203 	}
204 }
205 
206 /*
207  * Code and data key management routines.
208  *
209  * The array ckey_cnt maintains the count of processes currently
210  * sharing each code key.  The array ckey_cache maintains a record
211  * of all code keys used since the last flush of the code cache.
212  * Such keys may not be reused, even if unreferenced, until
213  * the cache is flushed.  The data cache key handling is analogous.
214  * The arrays ckey_cnt and ckey_cache are allways kept in such a way
215  * that the following invariant holds:
216  *	ckey_cnt > 0	=>'s	ckey_cache == 1
217  * meaning as long as a code key is used by at least one process, it's
218  * marked as being 'in the cache'. Of course, the following invariant
219  * also holds:
220  *	ckey_cache == 0	=>'s	ckey_cnt == 0
221  * which is just the reciprocal of the 1'st invariant.
222  * Equivalent invariants hold for the data key arrays.
223  */
224 struct	keystats ckeystats = { NCKEY - 1 };
225 struct	keystats dkeystats = { NDKEY - 1 };
226 
227 /*
228  * Release a code key.
229  */
230 ckeyrelease(key)
231 	int key;
232 {
233 	register int s;
234 
235 	s = spl8();
236 	if (--ckey_cnt[key] < 0) {
237 		printf("ckeyrelease: key = %d\n", key);
238 		ckey_cnt[key] = 0;
239 	}
240 	if (ckey_cnt[key] == 0)
241 		ckeystats.ks_dirty++;
242 	splx(s);
243 }
244 
245 /*
246  * Release a data key.
247  */
248 dkeyrelease(key)
249 	int key;
250 {
251 	register int s;
252 
253 	s = spl8();
254 	if (--dkey_cnt[key] != 0) {
255 		printf("dkeyrelease: key = %d\n", key);
256 		dkey_cnt[key] = 0;
257 	}
258 	splx(s);
259 	dkeystats.ks_dirty++;
260 }
261 
262 /*
263  * Invalidate the data cache for a process
264  * by exchanging cache keys.
265  */
266 dkeyinval(p)
267 	register struct proc *p;
268 {
269 	int s;
270 
271 	dkeystats.ks_inval++;
272 	s = spl8();
273 	if (--dkey_cnt[p->p_dkey] != 0)
274 		dkey_cnt[p->p_dkey] = 0;
275 	if (p == u.u_procp && !noproc) {
276 		p->p_dkey = getdatakey();
277 		mtpr(DCK, p->p_dkey);
278 	} else
279 		p->p_dkey = 0;
280 	splx(s);
281 }
282 
283 /*
284  * Get a code key.
285  * Strategy: try each of the following in turn
286  * until a key is allocated.
287  *
288  * 1) Find an unreferenced key not yet in the cache.
289  *    If this fails, a code cache purge will be necessary.
290  * 2) Find an unreferenced key.  Mark all unreferenced keys
291  *    as available and purge the cache.
292  * 3) Free the keys from all processes not sharing keys.
293  * 4) Free the keys from all processes.
294  */
295 getcodekey()
296 {
297 	register int i, s, freekey;
298 	register struct proc *p;
299 	int desparate = 0;
300 	static int lastkey = MAXCKEY;
301 
302 	ckeystats.ks_allocs++;
303 	s = spl8();
304 	freekey = 0;
305 	for (i = lastkey + 1; ; i++) {
306 		if (i > MAXCKEY)
307 			i = 1;
308 		if ((int)ckey_cache[i] == 0) {	/* free key, take it */
309 			ckey_cache[i] = 1, ckey_cnt[i] = 1;
310 			splx(s);
311 			ckeystats.ks_allocfree++;
312 			ckeystats.ks_avail--;
313 			lastkey = i;
314 			return (i);
315 		}
316 		if (ckey_cnt[i] == 0)		/* save for potential use */
317 			freekey = i;
318 		if (i == lastkey)
319 			break;
320 	}
321 	/*
322 	 * All code keys were marked as being in cache.
323 	 * If a key was in the cache, but not in use, grab it.
324 	 */
325 	if (freekey != 0) {
326 purge:
327 		/*
328 		 * If we've run out of free keys,
329 		 * try and free up some other keys to avoid
330 		 * future cache purges.
331 		 */
332 		ckey_cnt[freekey] = 1, ckey_cache[freekey] = 1;
333 		for (i = 1; i <= MAXCKEY; i++)
334 			if (ckey_cnt[i] == 0) {
335 				ckey_cache[i] = 0;
336 				ckeystats.ks_avail++;
337 			}
338 		mtpr(PACC, 0);
339 		splx(s);
340 		ckeystats.ks_dirty = 0;
341 		ckeystats.ks_norefs++;
342 		return (freekey);
343 	}
344 
345 	/*
346 	 * All keys are marked as in the cache and in use.
347 	 * Release all unshared keys, or, on second pass,
348 	 * release all keys.
349 	 */
350 steal:
351 	for (p = allproc; p; p = p->p_nxt)
352 		if (p->p_ckey != 0 && (p->p_flag & SSYS) == 0) {
353 			i = p->p_ckey;
354 			if (ckey_cnt[i] == 1 || desparate) {
355 				p->p_ckey = 0;
356 				if (--ckey_cnt[i] == 0) {
357 					freekey = i;
358 					if (p->p_textp)
359 						p->p_textp->x_ckey = 0;
360 				}
361 			}
362 		}
363 
364 	if (freekey) {
365 		ckeystats.ks_taken++;
366 		goto purge;
367 	} else {
368 		desparate++;
369 		goto steal;
370 	}
371 }
372 
373 /*
374  * Get a data key.
375  *
376  * General strategy:
377  * 1) Try to find a data key that isn't in the cache. Allocate it.
378  * 2) If all data keys are in the cache, find one which isn't
379  *    allocated.  Mark all unallocated keys as not in cache,
380  *    purge the cache, and allocate this one.
381  * 3) If all of them are allocated, free all process' keys
382  *    and let them reclaim then as they run.
383  */
384 getdatakey()
385 {
386 	register int i, freekey;
387 	register struct proc *p;
388 	int s;
389 	static int lastkey = MAXDKEY;
390 
391 	dkeystats.ks_allocs++;
392 	s = spl8();
393 	freekey = 0;
394 	for (i = lastkey + 1; ; i++) {
395 		if (i > MAXDKEY)
396 			i = 1;
397 		if ((int)dkey_cache[i] == 0) {	/* free key, take it */
398 			dkey_cache[i] = 1, dkey_cnt[i] = 1;
399 			splx(s);
400 			dkeystats.ks_allocfree++;
401 			dkeystats.ks_avail--;
402 			lastkey = i;
403 			return (i);
404 		}
405 		if (dkey_cnt[i] == 0)
406 			freekey = i;
407 		if (i == lastkey)
408 			break;
409 	}
410 purge:
411 	if (freekey) {
412 		/*
413 		 * Try and free up some more keys to avoid
414 		 * future allocations causing a cache purge.
415 		 */
416 		dkey_cnt[freekey] = 1, dkey_cache[freekey] = 1;
417 		for (i = 1; i <= MAXDKEY; i++)
418 			if (dkey_cnt[i] == 0) {
419 				dkey_cache[i] = 0;
420 				dkeystats.ks_avail++;
421 			}
422 		mtpr(PADC, 0);
423 		splx(s);
424 		dkeystats.ks_norefs++;
425 		dkeystats.ks_dirty = 0;
426 		return (freekey);
427 	}
428 
429 	/*
430 	 * Now, we have to take a key from someone.
431 	 * May as well take them all, so we get them
432 	 * from all of the idle procs.
433 	 */
434 	for (p = allproc; p; p = p->p_nxt)
435 		if (p->p_dkey != 0 && (p->p_flag & SSYS) == 0) {
436 			freekey = p->p_dkey;
437 			dkey_cnt[freekey] = 0;
438 			p->p_dkey = 0;
439 		}
440 	dkeystats.ks_taken++;
441 	goto purge;
442 }
443 
444 /*VARGARGS1*/
445 vtoph(p, v)
446 	register struct proc *p;
447 	unsigned v;
448 {
449 	register struct pte *pte;
450 	register unsigned pg;
451 
452 	pg = btop(v);
453 	if (pg >= BTOPKERNBASE)
454 		pte = &Sysmap[pg - BTOPKERNBASE];
455 	else
456 		pte = vtopte(p, pg);
457 	return ((pte->pg_pfnum << PGSHIFT) + (v & PGOFSET));
458 }
459