xref: /original-bsd/sys/hp300/hp300/vm_machdep.c (revision 89a39cb6)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_machdep.c 1.18 89/08/23$
13  *
14  *	@(#)vm_machdep.c	7.5 (Berkeley) 06/21/90
15  */
16 
17 #include "param.h"
18 #include "systm.h"
19 #include "user.h"
20 #include "proc.h"
21 #include "cmap.h"
22 #include "vm.h"
23 #include "text.h"
24 #include "malloc.h"
25 #include "buf.h"
26 
27 #include "cpu.h"
28 #include "pte.h"
29 
30 /*
31  * Set a red zone in the kernel stack after the u. area.
32  * We don't support a redzone right now.  It really isn't clear
33  * that it is a good idea since, if the kernel stack were to roll
34  * into a write protected page, the processor would lock up (since
35  * it cannot create an exception frame) and we would get no useful
36  * post-mortem info.  Currently, under the DEBUG option, we just
37  * check at every clock interrupt to see if the current k-stack has
38  * gone too far (i.e. into the "redzone" page) and if so, panic.
39  * Look at _lev6intr in locore.s for more details.
40  */
41 /*ARGSUSED*/
42 setredzone(pte, vaddr)
43 	struct pte *pte;
44 	caddr_t vaddr;
45 {
46 }
47 
48 /*
49  * Check for valid program size
50  * NB - Check data and data growth separately as they may overflow
51  * when summed together.
52  */
53 chksize(ts, ids, uds, ss)
54 	unsigned ts, ids, uds, ss;
55 {
56 	extern unsigned maxtsize;
57 
58 	if (ctob(ts) > maxtsize ||
59 	    ctob(ids) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
60 	    ctob(uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
61 	    ctob(ids + uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
62 	    ctob(ss) > u.u_rlimit[RLIMIT_STACK].rlim_cur) {
63 		return (ENOMEM);
64 	}
65 	return (0);
66 }
67 
68 /*ARGSUSED*/
69 newptes(pte, v, size)
70 	struct pte *pte;
71 	u_int v;
72 	register int size;
73 {
74 	register caddr_t a;
75 
76 #ifdef lint
77 	pte = pte;
78 #endif
79 	if (size >= 8)
80 		TBIAU();
81 	else {
82 		a = ptob(v);
83 		while (size > 0) {
84 			TBIS(a);
85 			a += NBPG;
86 			size--;
87 		}
88 	}
89 	DCIU();
90 }
91 
92 /*
93  * Change protection codes of text segment.
94  * Have to flush translation buffer since this
95  * affect virtual memory mapping of current process.
96  */
97 chgprot(addr, tprot)
98 	caddr_t addr;
99 	long tprot;
100 {
101 	unsigned v;
102 	int tp;
103 	register struct pte *pte;
104 	register struct cmap *c;
105 
106 	v = clbase(btop(addr));
107 	if (!isatsv(u.u_procp, v))
108 		return (EFAULT);
109 	tp = vtotp(u.u_procp, v);
110 	pte = tptopte(u.u_procp, tp);
111 	if (pte->pg_fod == 0 && pte->pg_pfnum) {
112 		c = &cmap[pgtocm(pte->pg_pfnum)];
113 		if (c->c_blkno)
114 			munhash(c->c_vp, (daddr_t)(u_long)c->c_blkno);
115 	}
116 	*(u_int *)pte &= ~PG_PROT;
117 	*(u_int *)pte |= tprot;
118 	TBIS(addr);
119 	return (0);
120 }
121 
122 settprot(tprot)
123 	long tprot;
124 {
125 	register u_int *pte, i;
126 
127 	pte = (u_int *)u.u_procp->p_p0br;
128 	for (i = 0; i < u.u_tsize; i++, pte++) {
129 		*pte &= ~PG_PROT;
130 		*pte |= tprot;
131 	}
132 	TBIAU();
133 }
134 
135 /*
136  * Simulate effect of VAX region length registers.
137  * The one case where we must do anything is if a region has shrunk.
138  * In that case we must invalidate all the PTEs for the no longer valid VAs.
139  */
140 setptlr(region, nlen)
141 	int nlen;
142 {
143 	register struct pte *pte;
144 	register int change;
145 	int olen;
146 
147 	if (region == 0) {
148 		olen = u.u_pcb.pcb_p0lr;
149 		u.u_pcb.pcb_p0lr = nlen;
150 	} else {
151 		olen = P1PAGES - u.u_pcb.pcb_p1lr;
152 		u.u_pcb.pcb_p1lr = nlen;
153 		nlen = P1PAGES - nlen;
154 	}
155 	if ((change = olen - nlen) <= 0)
156 		return;
157 	if (region == 0)
158 		pte = u.u_pcb.pcb_p0br + u.u_pcb.pcb_p0lr;
159 	else
160 		pte = u.u_pcb.pcb_p1br + u.u_pcb.pcb_p1lr - change;
161 	do {
162 		*(u_int *)pte++ = PG_NV;
163 	} while (--change);
164 	/* short cut newptes */
165 	TBIAU();
166 	DCIU();
167 }
168 
169 /*
170  * Map `size' bytes of physical memory starting at `paddr' into
171  * kernel VA space using PTEs starting at `pte'.  Read/write and
172  * cache-inhibit status are specified by `prot'.
173  */
174 physaccess(pte, paddr, size, prot)
175 	register struct pte *pte;
176 	caddr_t paddr;
177 	register int size;
178 {
179 	register u_int page;
180 
181 	page = (u_int)paddr & PG_FRAME;
182 	for (size = btoc(size); size; size--) {
183 		*(int *)pte = PG_V | prot | page;
184 		page += NBPG;
185 		pte++;
186 	}
187 	TBIAS();
188 }
189 
190 /*
191  * Move pages from one kernel virtual address to another.
192  * Both addresses are assumed to reside in the Sysmap,
193  * and size must be a multiple of CLSIZE.
194  */
195 pagemove(from, to, size)
196 	register caddr_t from, to;
197 	int size;
198 {
199 	register struct pte *fpte, *tpte;
200 
201 	if (size % CLBYTES)
202 		panic("pagemove");
203 	fpte = kvtopte(from);
204 	tpte = kvtopte(to);
205 	while (size > 0) {
206 		*tpte++ = *fpte;
207 		*(int *)fpte++ = PG_NV;
208 		TBIS(from);
209 		TBIS(to);
210 		from += NBPG;
211 		to += NBPG;
212 		size -= NBPG;
213 	}
214 }
215 
216 #ifdef KGDB
217 /*
218  * Change protections on kernel pages from addr to addr+size
219  * (presumably so debugger can plant a breakpoint).
220  * All addresses are assumed to reside in the Sysmap,
221  */
222 chgkprot(addr, size, rw)
223 	register caddr_t addr;
224 	int size, rw;
225 {
226 	register struct pte *pte;
227 
228 	pte = &Sysmap[btop(addr)];
229 	while (size > 0) {
230 		pte->pg_prot = rw == B_WRITE? 0 : 1;
231 		TBIS(addr);
232 		addr += NBPG;
233 		size -= NBPG;
234 		pte++;
235 	}
236 }
237 #endif
238 
239 /*
240  * The probe[rw] routines should probably be redone in assembler
241  * for efficiency.
242  */
243 prober(addr)
244 	register u_int addr;
245 {
246 	register int page;
247 	register struct proc *p;
248 
249 	if (addr >= USRSTACK)
250 		return(0);
251 #ifdef HPUXCOMPAT
252 	if (ISHPMMADDR(addr))
253 		addr = HPMMBASEADDR(addr);
254 #endif
255 	page = btop(addr);
256 	p = u.u_procp;
257 	if (page < dptov(p, p->p_dsize) || page > sptov(p, p->p_ssize))
258 		return(1);
259 #ifdef MAPMEM
260 	if (page < dptov(p, p->p_dsize+p->p_mmsize) &&
261 	    (*(int *)vtopte(p, page) & (PG_FOD|PG_V)) == (PG_FOD|PG_V))
262 		return(1);
263 #endif
264 	return(0);
265 }
266 
267 probew(addr)
268 	register u_int addr;
269 {
270 	register int page;
271 	register struct proc *p;
272 
273 	if (addr >= USRSTACK)
274 		return(0);
275 #ifdef HPUXCOMPAT
276 	if (ISHPMMADDR(addr))
277 		addr = HPMMBASEADDR(addr);
278 #endif
279 	page = btop(addr);
280 	p = u.u_procp;
281 	if (page < dptov(p, p->p_dsize) || page > sptov(p, p->p_ssize))
282 		return((*(int *)vtopte(p, page) & PG_PROT) == PG_RW);
283 #ifdef MAPMEM
284 	if (page < dptov(p, p->p_dsize+p->p_mmsize))
285 		return((*(int *)vtopte(p, page) & (PG_FOD|PG_V|PG_PROT))
286 		       == (PG_FOD|PG_V|PG_RW));
287 #endif
288 	return(0);
289 }
290 
291 /*
292  * NB: assumes a physically contiguous kernel page table
293  *     (makes life a LOT simpler).
294  */
295 kernacc(addr, count, rw)
296 	register caddr_t addr;
297 	int count, rw;
298 {
299 	register struct ste *ste;
300 	register struct pte *pte;
301 	register u_int ix, cnt;
302 	extern long Syssize;
303 
304 	if (count <= 0)
305 		return(0);
306 	ix = ((int)addr & SG_IMASK) >> SG_ISHIFT;
307 	cnt = (((int)addr + count + (1<<SG_ISHIFT)-1) & SG_IMASK) >> SG_ISHIFT;
308 	cnt -= ix;
309 	for (ste = &Sysseg[ix]; cnt; cnt--, ste++)
310 		/* should check SG_PROT, but we have no RO segments now */
311 		if (ste->sg_v == 0)
312 			return(0);
313 	ix = btop(addr);
314 	cnt = btop(addr+count+NBPG-1);
315 	if (cnt > (u_int)&Syssize)
316 		return(0);
317 	cnt -= ix;
318 	for (pte = &Sysmap[ix]; cnt; cnt--, pte++)
319 		if (pte->pg_v == 0 || (rw == B_WRITE && pte->pg_prot == 1))
320 			return(0);
321 	return(1);
322 }
323 
324 useracc(addr, count, rw)
325 	register caddr_t addr;
326 	unsigned count;
327 {
328 	register int (*func)();
329 	register u_int addr2;
330 	extern int prober(), probew();
331 
332 	if (count <= 0)
333 		return(0);
334 	addr2 = (u_int) addr;
335 	addr += count;
336 	func = (rw == B_READ) ? prober : probew;
337 	do {
338 		if ((*func)(addr2) == 0)
339 			return(0);
340 		addr2 = (addr2 + NBPG) & ~PGOFSET;
341 	} while (addr2 < (u_int)addr);
342 	return(1);
343 }
344 
345 /*
346  * Convert kernel VA to physical address
347  */
348 kvtop(addr)
349 	register caddr_t addr;
350 {
351 	register int pf;
352 
353 	pf = Sysmap[btop(addr)].pg_pfnum;
354 	if (pf == 0)
355 		panic("kvtop: zero page frame");
356 	return((u_int)ptob(pf) + ((int)addr & PGOFSET));
357 }
358 
359 struct ste *
360 vtoste(p, va)
361 	register struct proc *p;
362 	register u_int va;
363 {
364 	register struct ste *ste;
365 
366 	ste = (struct ste *)((u_int)p->p_p0br + p->p_szpt * NBPG);
367 	return(ste + ((va & SG_IMASK) >> SG_ISHIFT));
368 }
369 
370 initustp(p)
371 	register struct proc *p;
372 {
373 	return((int)Usrptmap[btokmx(p->p_p0br) + p->p_szpt].pg_pfnum);
374 }
375 
376 /*
377  * Initialize segment table to reflect PTEs in Usrptmap.
378  * Segment table address is given by Usrptmap index of p_szpt.
379  */
380 initsegt(p)
381 	register struct proc *p;
382 {
383 	register int i, k, sz;
384 	register struct ste *ste;
385 	extern struct ste *vtoste();
386 
387 	k = btokmx(p->p_p0br);
388 	ste = vtoste(p, 0);
389 	/* text and data */
390 	sz = ctopt(p->p_tsize + p->p_dsize + p->p_mmsize);
391 	for (i = 0; i < sz; i++, ste++) {
392 		*(int *)ste = SG_RW | SG_V;
393 		ste->sg_pfnum = Usrptmap[k++].pg_pfnum;
394 	}
395 	/*
396 	 * Bogus!  The kernelmap may map unused PT pages
397 	 * (since we don't shrink PTs) so we need to skip over
398 	 * those STEs.  We should really free the unused PT
399 	 * pages in expand().
400 	 */
401 	sz += ctopt(p->p_ssize + HIGHPAGES);
402 	if (sz < p->p_szpt)
403 		k += p->p_szpt - sz;
404 	/* hole */
405 	sz = NPTEPG - ctopt(p->p_ssize + HIGHPAGES);
406 	for ( ; i < sz; i++, ste++)
407 		*(int *)ste = SG_NV;
408 	/* stack and u-area */
409 	sz = NPTEPG;
410 	for ( ; i < sz; i++, ste++) {
411 		*(int *)ste = SG_RW | SG_V;
412 		ste->sg_pfnum = Usrptmap[k++].pg_pfnum;
413 	}
414 }
415 
416 /*
417  * Allocate/free cache-inhibited physical memory.
418  * Assumes that malloc returns page aligned memory for requests which are
419  * a multiple of the page size.  Hence, size must be such a multiple.
420  */
421 caddr_t
422 cialloc(sz)
423 	int sz;
424 {
425 	caddr_t kva;
426 	register int npg, *pte;
427 
428 	if (sz & CLOFSET)
429 		return(NULL);
430 	kva = (caddr_t)malloc(sz, M_DEVBUF, M_NOWAIT);
431 	if (kva) {
432 		if (!claligned(kva))
433 			panic("cialloc");
434 		pte = (int *)kvtopte(kva);
435 		npg = btoc(sz);
436 		while (--npg >= 0)
437 			*pte++ |= PG_CI;
438 		TBIAS();
439 	}
440 	return(kva);
441 }
442 
443 cifree(kva, sz)
444 	caddr_t kva;
445 	int sz;
446 {
447 	register int npg, *pte;
448 
449 	if (sz & CLOFSET)
450 		panic("cifree");
451 	pte = (int *)kvtopte(kva);
452 	npg = btoc(sz);
453 	while (--npg >= 0)
454 		*pte++ &= ~PG_CI;
455 	TBIAS();
456 	free(kva, M_DEVBUF);
457 }
458 
459 extern char usrio[];
460 extern struct pte Usriomap[];
461 struct map *useriomap;
462 int usriowanted;
463 
464 /*
465  * Map an IO request into kernel virtual address space.  Requests fall into
466  * one of five catagories:
467  *
468  *	B_PHYS|B_UAREA:	User u-area swap.
469  *			Address is relative to start of u-area (p_addr).
470  *	B_PHYS|B_PAGET:	User page table swap.
471  *			Address is a kernel VA in usrpt (Usrptmap).
472  *	B_PHYS|B_DIRTY:	Dirty page push.
473  *			Address is a VA in proc2's address space.
474  *	B_PHYS|B_PGIN:	Kernel pagein of user pages.
475  *			Address is VA in user's address space.
476  *	B_PHYS:		User "raw" IO request.
477  *			Address is VA in user's address space.
478  *
479  * All requests are (re)mapped into kernel VA space via the useriomap
480  * (a name with only slightly more meaning than "kernelmap")
481  */
482 vmapbuf(bp)
483 	register struct buf *bp;
484 {
485 	register int npf, a;
486 	register caddr_t addr;
487 	register struct pte *pte, *iopte;
488 	register long flags = bp->b_flags;
489 	struct proc *p;
490 	int off, s;
491 
492 	if ((flags & B_PHYS) == 0)
493 		panic("vmapbuf");
494 	/*
495 	 * Find PTEs for the area to be mapped
496 	 */
497 	p = flags&B_DIRTY ? &proc[2] : bp->b_proc;
498 	addr = bp->b_un.b_addr;
499 	if (flags & B_UAREA)
500 		pte = &p->p_addr[btop(addr)];
501 	else if (flags & B_PAGET)
502 		pte = &Usrptmap[btokmx((struct pte *)addr)];
503 	else
504 		pte = vtopte(p, btop(addr));
505 	/*
506 	 * Allocate some kernel PTEs and load them
507 	 */
508 	off = (int)addr & PGOFSET;
509 	npf = btoc(bp->b_bcount + off);
510 	s = splbio();
511 	while ((a = rmalloc(useriomap, npf)) == 0) {
512 		usriowanted = 1;
513 		sleep((caddr_t)useriomap, PSWP);
514 	}
515 	splx(s);
516 	iopte = &Usriomap[a];
517 	bp->b_saveaddr = bp->b_un.b_addr;
518 	addr = bp->b_un.b_addr = (caddr_t)(usrio + (a << PGSHIFT)) + off;
519 	while (npf--) {
520 		mapin(iopte, (u_int)addr, pte->pg_pfnum, PG_CI|PG_RW|PG_V);
521 		iopte++, pte++;
522 		addr += NBPG;
523 	}
524 }
525 
526 /*
527  * Free the io map PTEs associated with this IO operation.
528  * We also invalidate the TLB entries and restore the original b_addr.
529  */
530 vunmapbuf(bp)
531 	register struct buf *bp;
532 {
533 	register int a, npf;
534 	register caddr_t addr = bp->b_un.b_addr;
535 	register struct pte *pte;
536 	int s;
537 
538 	if ((bp->b_flags & B_PHYS) == 0)
539 		panic("vunmapbuf");
540 	a = (int)(addr - usrio) >> PGSHIFT;
541 	npf = btoc(bp->b_bcount + ((int)addr & PGOFSET));
542 	s = splbio();
543 	rmfree(useriomap, npf, a);
544 	if (usriowanted) {
545 		usriowanted = 0;
546 		wakeup((caddr_t)useriomap);
547 	}
548 	splx(s);
549 	pte = &Usriomap[a];
550 	while (npf--) {
551 		*(int *)pte = PG_NV;
552 		TBIS((caddr_t)addr);
553 		addr += NBPG;
554 		pte++;
555 	}
556 	bp->b_un.b_addr = bp->b_saveaddr;
557 	bp->b_saveaddr = NULL;
558 }
559