xref: /original-bsd/sys/vax/uba/uba.c (revision e188a54c)
1 /*
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)uba.c	7.5 (Berkeley) 05/14/88
7  */
8 
9 #include "../machine/pte.h"
10 
11 #include "param.h"
12 #include "systm.h"
13 #include "map.h"
14 #include "buf.h"
15 #include "vm.h"
16 #include "dir.h"
17 #include "user.h"
18 #include "proc.h"
19 #include "conf.h"
20 #include "dkstat.h"
21 #include "kernel.h"
22 
23 #include "../vax/cpu.h"
24 #include "../vax/mtpr.h"
25 #include "../vax/nexus.h"
26 #include "ubareg.h"
27 #include "ubavar.h"
28 
29 #ifdef DW780
30 char	ubasr_bits[] = UBASR_BITS;
31 #endif
32 
33 #define	spluba	spl7		/* IPL 17 */
34 
35 #define	BDPMASK	0xf0000000	/* see ubavar.h */
36 
37 /*
38  * Do transfer on device argument.  The controller
39  * and uba involved are implied by the device.
40  * We queue for resource wait in the uba code if necessary.
41  * We return 1 if the transfer was started, 0 if it was not.
42  *
43  * The onq argument must be zero iff the device is not on the
44  * queue for this UBA.  If onq is set, the device must be at the
45  * head of the queue.  In any case, if the transfer is started,
46  * the device will be off the queue, and if not, it will be on.
47  *
48  * Drivers that allocate one BDP and hold it for some time should
49  * set ud_keepbdp.  In this case um_bdp tells which BDP is allocated
50  * to the controller, unless it is zero, indicating that the controller
51  * does not now have a BDP.
52  */
53 ubaqueue(ui, onq)
54 	register struct uba_device *ui;
55 	int onq;
56 {
57 	register struct uba_ctlr *um = ui->ui_mi;
58 	register struct uba_hd *uh;
59 	register struct uba_driver *ud;
60 	register int s, unit;
61 
62 	uh = &uba_hd[um->um_ubanum];
63 	ud = um->um_driver;
64 	s = spluba();
65 	/*
66 	 * Honor exclusive BDP use requests.
67 	 */
68 	if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
69 		goto rwait;
70 	if (ud->ud_keepbdp) {
71 		/*
72 		 * First get just a BDP (though in fact it comes with
73 		 * one map register too).
74 		 */
75 		if (um->um_bdp == 0) {
76 			um->um_bdp = uballoc(um->um_ubanum,
77 				(caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT);
78 			if (um->um_bdp == 0)
79 				goto rwait;
80 		}
81 		/* now share it with this transfer */
82 		um->um_ubinfo = ubasetup(um->um_ubanum,
83 			um->um_tab.b_actf->b_actf,
84 			um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT);
85 	} else
86 		um->um_ubinfo = ubasetup(um->um_ubanum,
87 			um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT);
88 	if (um->um_ubinfo == 0)
89 		goto rwait;
90 	uh->uh_users++;
91 	if (ud->ud_xclu)
92 		uh->uh_xclu = 1;
93 	splx(s);
94 	if (ui->ui_dk >= 0) {
95 		unit = ui->ui_dk;
96 		dk_busy |= 1<<unit;
97 		dk_xfer[unit]++;
98 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
99 	}
100 	if (onq)
101 		uh->uh_actf = ui->ui_forw;
102 	(*ud->ud_dgo)(um);
103 	return (1);
104 rwait:
105 	if (!onq) {
106 		ui->ui_forw = NULL;
107 		if (uh->uh_actf == NULL)
108 			uh->uh_actf = ui;
109 		else
110 			uh->uh_actl->ui_forw = ui;
111 		uh->uh_actl = ui;
112 	}
113 	splx(s);
114 	return (0);
115 }
116 
117 ubadone(um)
118 	register struct uba_ctlr *um;
119 {
120 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
121 
122 	if (um->um_driver->ud_xclu)
123 		uh->uh_xclu = 0;
124 	uh->uh_users--;
125 	if (um->um_driver->ud_keepbdp)
126 		um->um_ubinfo &= ~BDPMASK;	/* keep BDP for misers */
127 	ubarelse(um->um_ubanum, &um->um_ubinfo);
128 }
129 
130 /*
131  * Allocate and setup UBA map registers, and bdp's
132  * Flags says whether bdp is needed, whether the caller can't
133  * wait (e.g. if the caller is at interrupt level).
134  *
135  * Return value:
136  *	Bits 0-8	Byte offset
137  *	Bits 9-17	Start map reg. no.
138  *	Bits 18-27	No. mapping reg's
139  *	Bits 28-31	BDP no.
140  */
141 ubasetup(uban, bp, flags)
142 	int uban;
143 	register struct buf *bp;
144 	register int flags;
145 {
146 	register struct uba_hd *uh = &uba_hd[uban];
147 	register struct pte *pte, *io;
148 	register int npf;
149 	int pfnum, temp;
150 	int reg, bdp;
151 	unsigned v;
152 	struct proc *rp;
153 	int a, o, ubinfo;
154 
155 #ifdef DW730
156 	if (uh->uh_type == DW730)
157 		flags &= ~UBA_NEEDBDP;
158 #endif
159 #ifdef QBA
160 	if (uh->uh_type == QBA)
161 		flags &= ~UBA_NEEDBDP;
162 #endif
163 	o = (int)bp->b_un.b_addr & PGOFSET;
164 	npf = btoc(bp->b_bcount + o) + 1;
165 	a = spluba();
166 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
167 		if (flags & UBA_CANTWAIT) {
168 			splx(a);
169 			return (0);
170 		}
171 		uh->uh_mrwant++;
172 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
173 	}
174 	if ((flags & UBA_NEED16) && reg + npf > 128) {
175 		/*
176 		 * Could hang around and try again (if we can ever succeed).
177 		 * Won't help any current device...
178 		 */
179 		rmfree(uh->uh_map, (long)npf, (long)reg);
180 		splx(a);
181 		return (0);
182 	}
183 	bdp = 0;
184 	if (flags & UBA_NEEDBDP) {
185 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
186 			if (flags & UBA_CANTWAIT) {
187 				rmfree(uh->uh_map, (long)npf, (long)reg);
188 				splx(a);
189 				return (0);
190 			}
191 			uh->uh_bdpwant++;
192 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
193 		}
194 		uh->uh_bdpfree &= ~(1 << (bdp-1));
195 	} else if (flags & UBA_HAVEBDP)
196 		bdp = (flags >> 28) & 0xf;
197 	splx(a);
198 	reg--;
199 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
200 	temp = (bdp << 21) | UBAMR_MRV;
201 	if (bdp && (o & 01))
202 		temp |= UBAMR_BO;
203 	if ((bp->b_flags & B_PHYS) == 0)
204 		pte = kvtopte(bp->b_un.b_addr);
205 	else if (bp->b_flags & B_PAGET)
206 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
207 	else {
208 		rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
209 		v = btop(bp->b_un.b_addr);
210 		if (bp->b_flags & B_UAREA)
211 			pte = &rp->p_addr[v];
212 		else
213 			pte = vtopte(rp, v);
214 	}
215 	io = &uh->uh_mr[reg];
216 	while (--npf > 0) {
217 		pfnum = pte->pg_pfnum;
218 		if (pfnum == 0)
219 			panic("uba zero uentry");
220 		pte++;
221 		*(int *)io++ = pfnum | temp;
222 	}
223 	*(int *)io = 0;
224 	return (ubinfo);
225 }
226 
227 /*
228  * Non buffer setup interface... set up a buffer and call ubasetup.
229  */
230 uballoc(uban, addr, bcnt, flags)
231 	int uban;
232 	caddr_t addr;
233 	int bcnt, flags;
234 {
235 	struct buf ubabuf;
236 
237 	ubabuf.b_un.b_addr = addr;
238 	ubabuf.b_flags = B_BUSY;
239 	ubabuf.b_bcount = bcnt;
240 	/* that's all the fields ubasetup() needs */
241 	return (ubasetup(uban, &ubabuf, flags));
242 }
243 
244 /*
245  * Release resources on uba uban, and then unblock resource waiters.
246  * The map register parameter is by value since we need to block
247  * against uba resets on 11/780's.
248  */
249 ubarelse(uban, amr)
250 	int *amr;
251 {
252 	register struct uba_hd *uh = &uba_hd[uban];
253 	register int bdp, reg, npf, s;
254 	int mr;
255 
256 	/*
257 	 * Carefully see if we should release the space, since
258 	 * it may be released asynchronously at uba reset time.
259 	 */
260 	s = spluba();
261 	mr = *amr;
262 	if (mr == 0) {
263 		/*
264 		 * A ubareset() occurred before we got around
265 		 * to releasing the space... no need to bother.
266 		 */
267 		splx(s);
268 		return;
269 	}
270 	*amr = 0;
271 	bdp = (mr >> 28) & 0x0f;
272 	if (bdp) {
273 		switch (uh->uh_type) {
274 #ifdef DWBUA
275 		case DWBUA:
276 			BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
277 			break;
278 #endif
279 #ifdef DW780
280 		case DW780:
281 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
282 			break;
283 #endif
284 #ifdef DW750
285 		case DW750:
286 			uh->uh_uba->uba_dpr[bdp] |=
287 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
288 			break;
289 #endif
290 		default:
291 			break;
292 		}
293 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
294 		if (uh->uh_bdpwant) {
295 			uh->uh_bdpwant = 0;
296 			wakeup((caddr_t)&uh->uh_bdpwant);
297 		}
298 	}
299 	/*
300 	 * Put back the registers in the resource map.
301 	 * The map code must not be reentered,
302 	 * nor can the registers be freed twice.
303 	 * Unblock interrupts once this is done.
304 	 */
305 	npf = (mr >> 18) & 0x3ff;
306 	reg = ((mr >> 9) & 0x1ff) + 1;
307 	rmfree(uh->uh_map, (long)npf, (long)reg);
308 	splx(s);
309 
310 	/*
311 	 * Wakeup sleepers for map registers,
312 	 * and also, if there are processes blocked in dgo(),
313 	 * give them a chance at the UNIBUS.
314 	 */
315 	if (uh->uh_mrwant) {
316 		uh->uh_mrwant = 0;
317 		wakeup((caddr_t)&uh->uh_mrwant);
318 	}
319 	while (uh->uh_actf && ubaqueue(uh->uh_actf, 1))
320 		;
321 }
322 
323 ubapurge(um)
324 	register struct uba_ctlr *um;
325 {
326 	register struct uba_hd *uh = um->um_hd;
327 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
328 
329 	switch (uh->uh_type) {
330 #ifdef DWBUA
331 	case DWBUA:
332 		BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
333 		break;
334 #endif
335 #ifdef DW780
336 	case DW780:
337 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
338 		break;
339 #endif
340 #ifdef DW750
341 	case DW750:
342 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
343 		break;
344 #endif
345 	default:
346 		break;
347 	}
348 }
349 
350 ubainitmaps(uhp)
351 	register struct uba_hd *uhp;
352 {
353 
354 	rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
355 	switch (uhp->uh_type) {
356 #ifdef DWBUA
357 	case DWBUA:
358 		uhp->uh_bdpfree = (1<<NBDPBUA) - 1;
359 		break;
360 #endif
361 #ifdef DW780
362 	case DW780:
363 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
364 		break;
365 #endif
366 #ifdef DW750
367 	case DW750:
368 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
369 		break;
370 #endif
371 	default:
372 		break;
373 	}
374 }
375 
376 /*
377  * Generate a reset on uba number uban.  Then
378  * call each device in the character device table,
379  * giving it a chance to clean up so as to be able to continue.
380  */
381 ubareset(uban)
382 	int uban;
383 {
384 	register struct cdevsw *cdp;
385 	register struct uba_hd *uh = &uba_hd[uban];
386 	int s;
387 
388 	s = spluba();
389 	uh->uh_users = 0;
390 	uh->uh_zvcnt = 0;
391 	uh->uh_xclu = 0;
392 	uh->uh_actf = uh->uh_actl = 0;
393 	uh->uh_bdpwant = 0;
394 	uh->uh_mrwant = 0;
395 	ubainitmaps(uh);
396 	wakeup((caddr_t)&uh->uh_bdpwant);
397 	wakeup((caddr_t)&uh->uh_mrwant);
398 	printf("uba%d: reset", uban);
399 	ubainit(uh->uh_uba);
400 	ubameminit(uban);
401 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
402 		(*cdp->d_reset)(uban);
403 	ifubareset(uban);
404 	printf("\n");
405 	splx(s);
406 }
407 
408 /*
409  * Init a uba.  This is called with a pointer
410  * rather than a virtual address since it is called
411  * by code which runs with memory mapping disabled.
412  * In these cases we really don't need the interrupts
413  * enabled, but since we run with ipl high, we don't care
414  * if they are, they will never happen anyways.
415  * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
416  */
417 ubainit(uba)
418 	register struct uba_regs *uba;
419 {
420 	register struct uba_hd *uhp;
421 #ifdef QBA
422 	int isphys = 0;
423 #endif
424 
425 	for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
426 		if (uhp->uh_uba == uba)
427 			break;
428 		if (uhp->uh_physuba == uba) {
429 #ifdef QBA
430 			isphys++;
431 #endif
432 			break;
433 		}
434 	}
435 	if (uhp >= uba_hd + numuba) {
436 		printf("init unknown uba\n");
437 		return;
438 	}
439 
440 	switch (uhp->uh_type) {
441 #ifdef DWBUA
442 	case DWBUA:
443 		BUA(uba)->bua_csr |= BUACSR_UPI;
444 		/* give devices time to recover from power fail */
445 		DELAY(500000);
446 		break;
447 #endif
448 #ifdef DW780
449 	case DW780:
450 		uba->uba_cr = UBACR_ADINIT;
451 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
452 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
453 			;
454 		break;
455 #endif
456 #ifdef DW750
457 	case DW750:
458 #endif
459 #ifdef DW730
460 	case DW730:
461 #endif
462 #ifdef QBA
463 	case QBA:
464 #endif
465 #if DW750 || DW730 || QBA
466 		mtpr(IUR, 0);
467 		/* give devices time to recover from power fail */
468 /* THIS IS PROBABLY UNNECESSARY */
469 		DELAY(500000);
470 /* END PROBABLY UNNECESSARY */
471 #ifdef QBA
472 		/*
473 		 * Re-enable local memory access
474 		 * from the Q-bus.
475 		 */
476 		if (uhp->uh_type == QBA) {
477 			if (isphys)
478 				*((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
479 			else
480 				*(uhp->uh_iopage + QIPCR) = Q_LMEAE;
481 		}
482 #endif QBA
483 		break;
484 #endif DW750 || DW730 || QBA
485 	}
486 }
487 
488 #ifdef DW780
489 int	ubawedgecnt = 10;
490 int	ubacrazy = 500;
491 int	zvcnt_max = 5000;	/* in 8 sec */
492 /*
493  * This routine is called by the locore code to process a UBA
494  * error on an 11/780 or 8600.  The arguments are passed
495  * on the stack, and value-result (through some trickery).
496  * In particular, the uvec argument is used for further
497  * uba processing so the result aspect of it is very important.
498  * It must not be declared register.
499  */
500 /*ARGSUSED*/
501 ubaerror(uban, uh, ipl, uvec, uba)
502 	register int uban;
503 	register struct uba_hd *uh;
504 	int ipl, uvec;
505 	register struct uba_regs *uba;
506 {
507 	register sr, s;
508 
509 	if (uvec == 0) {
510 		/*
511 		 * Declare dt as unsigned so that negative values
512 		 * are handled as >8 below, in case time was set back.
513 		 */
514 		u_long	dt = time.tv_sec - uh->uh_zvtime;
515 
516 		uh->uh_zvtotal++;
517 		if (dt > 8) {
518 			uh->uh_zvtime = time.tv_sec;
519 			uh->uh_zvcnt = 0;
520 		}
521 		if (++uh->uh_zvcnt > zvcnt_max) {
522 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
523 				uban, uh->uh_zvcnt, dt + 1);
524 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
525 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
526 				uba->uba_cnfgr&0xff);
527 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
528 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
529 				(uba->uba_dcr&0x8000000)?"":"NOT ");
530 			ubareset(uban);
531 		}
532 		return;
533 	}
534 	if (uba->uba_cnfgr & NEX_CFGFLT) {
535 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
536 		    uban, uba->uba_sr, ubasr_bits,
537 		    uba->uba_cnfgr, NEXFLT_BITS);
538 		ubareset(uban);
539 		uvec = 0;
540 		return;
541 	}
542 	sr = uba->uba_sr;
543 	s = spluba();
544 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
545 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
546 	splx(s);
547 	uba->uba_sr = sr;
548 	uvec &= UBABRRVR_DIV;
549 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
550 		if (uh->uh_errcnt > ubacrazy)
551 			panic("uba crazy");
552 		printf("ERROR LIMIT ");
553 		ubareset(uban);
554 		uvec = 0;
555 		return;
556 	}
557 	return;
558 }
559 #endif
560 
561 /*
562  * Look for devices with unibus memory, allow them to configure, then disable
563  * map registers as necessary.  Called during autoconfiguration and ubareset.
564  * The device ubamem routine returns 0 on success, 1 on success if it is fully
565  * configured (has no csr or interrupt, so doesn't need to be probed),
566  * and -1 on failure.
567  */
568 ubameminit(uban)
569 {
570 	register struct uba_device *ui;
571 	register struct uba_hd *uh = &uba_hd[uban];
572 	caddr_t umembase = umem[uban] + 0x3e000, addr;
573 #define	ubaoff(off)	((int)(off) & 0x1fff)
574 
575 	uh->uh_lastmem = 0;
576 	for (ui = ubdinit; ui->ui_driver; ui++) {
577 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
578 			continue;
579 		if (ui->ui_driver->ud_ubamem) {
580 			/*
581 			 * During autoconfiguration, need to fudge ui_addr.
582 			 */
583 			addr = ui->ui_addr;
584 			ui->ui_addr = umembase + ubaoff(addr);
585 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
586 			case 1:
587 				ui->ui_alive = 1;
588 				/* FALLTHROUGH */
589 			case 0:
590 				ui->ui_ubanum = uban;
591 				break;
592 			}
593 			ui->ui_addr = addr;
594 		}
595 	}
596 #ifdef DW780
597 	/*
598 	 * On a DW780, throw away any map registers disabled by rounding
599 	 * the map disable in the configuration register
600 	 * up to the next 8K boundary, or below the last unibus memory.
601 	 */
602 	if (uh->uh_type == DW780) {
603 		register i;
604 
605 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
606 		while (i)
607 			(void) rmget(uh->uh_map, 1, i--);
608 	}
609 #endif
610 }
611 
612 /*
613  * Allocate UNIBUS memory.  Allocates and initializes
614  * sufficient mapping registers for access.  On a 780,
615  * the configuration register is setup to disable UBA
616  * response on DMA transfers to addresses controlled
617  * by the disabled mapping registers.
618  * On a DW780, should only be called from ubameminit, or in ascending order
619  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
620  * the last unibus memory would free unusable map registers.
621  * Doalloc is 1 to allocate, 0 to deallocate.
622  */
623 ubamem(uban, addr, npg, doalloc)
624 	int uban, addr, npg, doalloc;
625 {
626 	register struct uba_hd *uh = &uba_hd[uban];
627 	register int a;
628 	int s;
629 
630 	a = (addr >> 9) + 1;
631 	s = spluba();
632 	if (doalloc)
633 		a = rmget(uh->uh_map, npg, a);
634 	else
635 		rmfree(uh->uh_map, (long)npg, (long)a);
636 	splx(s);
637 	if (a) {
638 		register int i, *m;
639 
640 		m = (int *)&uh->uh_mr[a - 1];
641 		for (i = 0; i < npg; i++)
642 			*m++ = 0;	/* All off, especially 'valid' */
643 		i = addr + npg * 512;
644 		if (doalloc && i > uh->uh_lastmem)
645 			uh->uh_lastmem = i;
646 		else if (doalloc == 0 && i == uh->uh_lastmem)
647 			uh->uh_lastmem = addr;
648 #ifdef DW780
649 		/*
650 		 * On a 780, set up the map register disable
651 		 * field in the configuration register.  Beware
652 		 * of callers that request memory ``out of order''
653 		 * or in sections other than 8K multiples.
654 		 * Ubameminit handles such requests properly, however.
655 		 */
656 		if (uh->uh_type == DW780) {
657 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
658 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
659 			uh->uh_uba->uba_cr = i;
660 		}
661 #endif
662 	}
663 	return (a);
664 }
665 
666 #include "ik.h"
667 #include "vs.h"
668 #if NIK > 0 || NVS > 0
669 /*
670  * Map a virtual address into users address space. Actually all we
671  * do is turn on the user mode write protection bits for the particular
672  * page of memory involved.
673  */
674 maptouser(vaddress)
675 	caddr_t vaddress;
676 {
677 
678 	kvtopte(vaddress)->pg_prot = (PG_UW >> 27);
679 }
680 
681 unmaptouser(vaddress)
682 	caddr_t vaddress;
683 {
684 
685 	kvtopte(vaddress)->pg_prot = (PG_KW >> 27);
686 }
687 #endif
688