xref: /original-bsd/sys/vax/uba/uba.c (revision 9c59a687)
1 /*	uba.c	4.41	82/03/29	*/
2 
3 #include "../h/param.h"
4 #include "../h/systm.h"
5 #include "../h/cpu.h"
6 #include "../h/map.h"
7 #include "../h/pte.h"
8 #include "../h/buf.h"
9 #include "../h/vm.h"
10 #include "../h/ubareg.h"
11 #include "../h/ubavar.h"
12 #include "../h/dir.h"
13 #include "../h/user.h"
14 #include "../h/proc.h"
15 #include "../h/conf.h"
16 #include "../h/mtpr.h"
17 #include "../h/nexus.h"
18 #include "../h/dk.h"
19 
20 #if VAX780
21 char	ubasr_bits[] = UBASR_BITS;
22 #endif
23 
24 /*
25  * Do transfer on device argument.  The controller
26  * and uba involved are implied by the device.
27  * We queue for resource wait in the uba code if necessary.
28  * We return 1 if the transfer was started, 0 if it was not.
29  * If you call this routine with the head of the queue for a
30  * UBA, it will automatically remove the device from the UBA
31  * queue before it returns.  If some other device is given
32  * as argument, it will be added to the request queue if the
33  * request cannot be started immediately.  This means that
34  * passing a device which is on the queue but not at the head
35  * of the request queue is likely to be a disaster.
36  */
37 ubago(ui)
38 	register struct uba_device *ui;
39 {
40 	register struct uba_ctlr *um = ui->ui_mi;
41 	register struct uba_hd *uh;
42 	register int s, unit;
43 
44 	uh = &uba_hd[um->um_ubanum];
45 	s = spl6();
46 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
47 		goto rwait;
48 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
49 	    UBA_NEEDBDP|UBA_CANTWAIT);
50 	if (um->um_ubinfo == 0)
51 		goto rwait;
52 	uh->uh_users++;
53 	if (um->um_driver->ud_xclu)
54 		uh->uh_xclu = 1;
55 	splx(s);
56 	if (ui->ui_dk >= 0) {
57 		unit = ui->ui_dk;
58 		dk_busy |= 1<<unit;
59 		dk_xfer[unit]++;
60 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
61 	}
62 	if (uh->uh_actf == ui)
63 		uh->uh_actf = ui->ui_forw;
64 	(*um->um_driver->ud_dgo)(um);
65 	return (1);
66 rwait:
67 	if (uh->uh_actf != ui) {
68 		ui->ui_forw = NULL;
69 		if (uh->uh_actf == NULL)
70 			uh->uh_actf = ui;
71 		else
72 			uh->uh_actl->ui_forw = ui;
73 		uh->uh_actl = ui;
74 	}
75 	splx(s);
76 	return (0);
77 }
78 
79 ubadone(um)
80 	register struct uba_ctlr *um;
81 {
82 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
83 
84 	if (um->um_driver->ud_xclu)
85 		uh->uh_xclu = 0;
86 	uh->uh_users--;
87 	ubarelse(um->um_ubanum, &um->um_ubinfo);
88 }
89 
90 /*
91  * Allocate and setup UBA map registers, and bdp's
92  * Flags says whether bdp is needed, whether the caller can't
93  * wait (e.g. if the caller is at interrupt level).
94  *
95  * Return value:
96  *	Bits 0-8	Byte offset
97  *	Bits 9-17	Start map reg. no.
98  *	Bits 18-27	No. mapping reg's
99  *	Bits 28-31	BDP no.
100  */
101 ubasetup(uban, bp, flags)
102 	struct buf *bp;
103 {
104 	register struct uba_hd *uh = &uba_hd[uban];
105 	register int temp, i;
106 	int npf, reg, bdp;
107 	unsigned v;
108 	register struct pte *pte, *io;
109 	struct proc *rp;
110 	int a, o, ubinfo;
111 
112 #if VAX7ZZ
113 	if (cpu == VAX_7ZZ)
114 		flags &= ~UBA_NEEDBDP;
115 #endif
116 	v = btop(bp->b_un.b_addr);
117 	o = (int)bp->b_un.b_addr & PGOFSET;
118 	npf = btoc(bp->b_bcount + o) + 1;
119 	a = spl6();
120 	while ((reg = rmalloc(uh->uh_map, npf)) == 0) {
121 		if (flags & UBA_CANTWAIT) {
122 			splx(a);
123 			return (0);
124 		}
125 		uh->uh_mrwant++;
126 		sleep((caddr_t)uh->uh_map, PSWP);
127 	}
128 	bdp = 0;
129 	if (flags & UBA_NEEDBDP) {
130 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
131 			if (flags & UBA_CANTWAIT) {
132 				rmfree(uh->uh_map, npf, reg);
133 				splx(a);
134 				return (0);
135 			}
136 			uh->uh_bdpwant++;
137 			sleep((caddr_t)uh->uh_map, PSWP);
138 		}
139 		uh->uh_bdpfree &= ~(1 << (bdp-1));
140 	} else if (flags & UBA_HAVEBDP)
141 		bdp = (flags >> 28) & 0xf;
142 	splx(a);
143 	reg--;
144 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
145 	io = &uh->uh_uba->uba_map[reg];
146 	temp = (bdp << 21) | UBAMR_MRV;
147 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
148 	if (bdp && (o & 01))
149 		temp |= UBAMR_BO;
150 	if (bp->b_flags & B_UAREA) {
151 		for (i = UPAGES - bp->b_bcount / NBPG; i < UPAGES; i++) {
152 			if (rp->p_addr[i].pg_pfnum == 0)
153 				panic("uba: zero upage");
154 			*(int *)io++ = rp->p_addr[i].pg_pfnum | temp;
155 		}
156 	} else if ((bp->b_flags & B_PHYS) == 0) {
157 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
158 		while (--npf != 0)
159 			*(int *)io++ = pte++->pg_pfnum | temp;
160 	} else {
161 		if (bp->b_flags & B_PAGET)
162 			pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
163 		else
164 			pte = vtopte(rp, v);
165 		while (--npf != 0) {
166 			if (pte->pg_pfnum == 0)
167 				panic("uba zero uentry");
168 			*(int *)io++ = pte++->pg_pfnum | temp;
169 		}
170 	}
171 	*(int *)io++ = 0;
172 	return (ubinfo);
173 }
174 
175 /*
176  * Non buffer setup interface... set up a buffer and call ubasetup.
177  */
178 uballoc(uban, addr, bcnt, flags)
179 	int uban;
180 	caddr_t addr;
181 	int bcnt, flags;
182 {
183 	struct buf ubabuf;
184 
185 	ubabuf.b_un.b_addr = addr;
186 	ubabuf.b_flags = B_BUSY;
187 	ubabuf.b_bcount = bcnt;
188 	/* that's all the fields ubasetup() needs */
189 	return (ubasetup(uban, &ubabuf, flags));
190 }
191 
192 /*
193  * Release resources on uba uban, and then unblock resource waiters.
194  * The map register parameter is by value since we need to block
195  * against uba resets on 11/780's.
196  */
197 ubarelse(uban, amr)
198 	int *amr;
199 {
200 	register struct uba_hd *uh = &uba_hd[uban];
201 	register int bdp, reg, npf, s;
202 	int mr;
203 
204 	/*
205 	 * Carefully see if we should release the space, since
206 	 * it may be released asynchronously at uba reset time.
207 	 */
208 	s = spl6();
209 	mr = *amr;
210 	if (mr == 0) {
211 		/*
212 		 * A ubareset() occurred before we got around
213 		 * to releasing the space... no need to bother.
214 		 */
215 		splx(s);
216 		return;
217 	}
218 	*amr = 0;
219 	splx(s);		/* let interrupts in, we're safe for a while */
220 	bdp = (mr >> 28) & 0x0f;
221 	if (bdp) {
222 		switch (cpu) {
223 #if VAX780
224 		case VAX_780:
225 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
226 			break;
227 #endif
228 #if VAX750
229 		case VAX_750:
230 			uh->uh_uba->uba_dpr[bdp] |=
231 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
232 			break;
233 #endif
234 		}
235 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
236 		if (uh->uh_bdpwant) {
237 			uh->uh_bdpwant = 0;
238 			wakeup((caddr_t)uh->uh_map);
239 		}
240 	}
241 	/*
242 	 * Put back the registers in the resource map.
243 	 * The map code must not be reentered, so we do this
244 	 * at high ipl.
245 	 */
246 	npf = (mr >> 18) & 0x3ff;
247 	reg = ((mr >> 9) & 0x1ff) + 1;
248 	s = spl6();
249 	rmfree(uh->uh_map, npf, reg);
250 	splx(s);
251 
252 	/*
253 	 * Wakeup sleepers for map registers,
254 	 * and also, if there are processes blocked in dgo(),
255 	 * give them a chance at the UNIBUS.
256 	 */
257 	if (uh->uh_mrwant) {
258 		uh->uh_mrwant = 0;
259 		wakeup((caddr_t)uh->uh_map);
260 	}
261 	while (uh->uh_actf && ubago(uh->uh_actf))
262 		;
263 }
264 
265 ubapurge(um)
266 	register struct uba_ctlr *um;
267 {
268 	register struct uba_hd *uh = um->um_hd;
269 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
270 
271 	switch (cpu) {
272 #if VAX780
273 	case VAX_780:
274 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
275 		break;
276 #endif
277 #if VAX750
278 	case VAX_750:
279 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
280 		break;
281 #endif
282 	}
283 }
284 
285 /*
286  * Generate a reset on uba number uban.  Then
287  * call each device in the character device table,
288  * giving it a chance to clean up so as to be able to continue.
289  */
290 ubareset(uban)
291 	int uban;
292 {
293 	register struct cdevsw *cdp;
294 	register struct uba_hd *uh = &uba_hd[uban];
295 	int s;
296 
297 	s = spl6();
298 	uh->uh_users = 0;
299 	uh->uh_zvcnt = 0;
300 	uh->uh_xclu = 0;
301 	uh->uh_hangcnt = 0;
302 	uh->uh_actf = uh->uh_actl = 0;
303 	uh->uh_bdpwant = 0;
304 	uh->uh_mrwant = 0;
305 	wakeup((caddr_t)&uh->uh_bdpwant);
306 	wakeup((caddr_t)&uh->uh_mrwant);
307 	printf("uba%d: reset", uban);
308 	ubainit(uh->uh_uba);
309 	for (cdp = cdevsw; cdp->d_open; cdp++)
310 		(*cdp->d_reset)(uban);
311 #ifdef INET
312 	ifubareset(uban);
313 #endif
314 	printf("\n");
315 	splx(s);
316 }
317 
318 /*
319  * Init a uba.  This is called with a pointer
320  * rather than a virtual address since it is called
321  * by code which runs with memory mapping disabled.
322  * In these cases we really don't need the interrupts
323  * enabled, but since we run with ipl high, we don't care
324  * if they are, they will never happen anyways.
325  */
326 ubainit(uba)
327 	register struct uba_regs *uba;
328 {
329 
330 	switch (cpu) {
331 #if VAX780
332 	case VAX_780:
333 		uba->uba_cr = UBACR_ADINIT;
334 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
335 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
336 			;
337 		break;
338 #endif
339 #if VAX750
340 	case VAX_750:
341 #endif
342 #if VAX7ZZ
343 	case VAX_7ZZ:
344 #endif
345 #if defined(VAX750) || defined(VAX7ZZ)
346 		mtpr(IUR, 0);
347 		/* give devices time to recover from power fail */
348 /* THIS IS PROBABLY UNNECESSARY */
349 		DELAY(500000);
350 /* END PROBABLY UNNECESSARY */
351 		break;
352 #endif
353 	}
354 }
355 
356 #if VAX780
357 /*
358  * Check to make sure the UNIBUS adaptor is not hung,
359  * with an interrupt in the register to be presented,
360  * but not presenting it for an extended period (5 seconds).
361  */
362 unhang()
363 {
364 	register int uban;
365 
366 	for (uban = 0; uban < numuba; uban++) {
367 		register struct uba_hd *uh = &uba_hd[uban];
368 		register struct uba_regs *up = uh->uh_uba;
369 
370 		if (up->uba_sr == 0)
371 			return;
372 		up->uba_sr = UBASR_CRD|UBASR_LEB;
373 		uh->uh_hangcnt++;
374 		if (uh->uh_hangcnt > 5*hz) {
375 			uh->uh_hangcnt = 0;
376 			printf("uba%d: hung\n", uban);
377 			ubareset(uban);
378 		}
379 	}
380 }
381 
382 /*
383  * This is a timeout routine which decrements the ``i forgot to
384  * interrupt'' counts, on an 11/780.  This prevents slowly growing
385  * counts from causing a UBA reset since we are interested only
386  * in hang situations.
387  */
388 ubawatch()
389 {
390 	register struct uba_hd *uh;
391 	register int uban;
392 
393 	if (panicstr)
394 		return;
395 	for (uban = 0; uban < numuba; uban++) {
396 		uh = &uba_hd[uban];
397 		if (uh->uh_hangcnt)
398 			uh->uh_hangcnt--;
399 	}
400 }
401 
402 int	ubawedgecnt = 10;
403 int	ubacrazy = 500;
404 /*
405  * This routine is called by the locore code to
406  * process a UBA error on an 11/780.  The arguments are passed
407  * on the stack, and value-result (through some trickery).
408  * In particular, the uvec argument is used for further
409  * uba processing so the result aspect of it is very important.
410  * It must not be declared register.
411  */
412 /*ARGSUSED*/
413 ubaerror(uban, uh, xx, uvec, uba)
414 	register int uban;
415 	register struct uba_hd *uh;
416 	int uvec;
417 	register struct uba_regs *uba;
418 {
419 	register sr, s;
420 
421 	if (uvec == 0) {
422 		uh->uh_zvcnt++;
423 		if (uh->uh_zvcnt > 250000) {
424 			printf("uba%d: too many zero vectors\n");
425 			ubareset(uban);
426 		}
427 		uvec = 0;
428 		return;
429 	}
430 	if (uba->uba_cnfgr & NEX_CFGFLT) {
431 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
432 		    uban, uba->uba_sr, ubasr_bits,
433 		    uba->uba_cnfgr, NEXFLT_BITS);
434 		ubareset(uban);
435 		uvec = 0;
436 		return;
437 	}
438 	sr = uba->uba_sr;
439 	s = spl7();
440 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
441 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
442 	splx(s);
443 	uba->uba_sr = sr;
444 	uvec &= UBABRRVR_DIV;
445 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
446 		if (uh->uh_errcnt > ubacrazy)
447 			panic("uba crazy");
448 		printf("ERROR LIMIT ");
449 		ubareset(uban);
450 		uvec = 0;
451 		return;
452 	}
453 	return;
454 }
455 #endif
456 
457 #ifdef notdef
458 /*
459  * This routine allows remapping of previously
460  * allocated UNIBUS bdp and map resources
461  * onto different memory addresses.
462  * It should only be used by routines which need
463  * small fixed length mappings for long periods of time
464  * (like the ARPANET ACC IMP interface).
465  * It only maps kernel addresses.
466  */
467 ubaremap(uban, ubinfo, addr)
468 	int uban;
469 	register unsigned ubinfo;
470 	caddr_t addr;
471 {
472 	register struct uba_hd *uh = &uba_hd[uban];
473 	register struct pte *pte, *io;
474 	register int temp, bdp;
475 	int npf, o;
476 
477 	o = (int)addr & PGOFSET;
478 	bdp = (ubinfo >> 28) & 0xf;
479 	npf = (ubinfo >> 18) & 0x3ff;
480 	io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff];
481 	temp = (bdp << 21) | UBAMR_MRV;
482 
483 	/*
484 	 * If using buffered data path initiate purge
485 	 * of old data and set byte offset bit if next
486 	 * transfer will be from odd address.
487 	 */
488 	if (bdp) {
489 		switch (cpu) {
490 #if VAX780
491 		case VAX_780:
492 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
493 			break;
494 #endif
495 #if VAX750
496 		case VAX_750:
497 			uh->uh_uba->uba_dpr[bdp] |=
498 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
499 			break;
500 #endif
501 		}
502 		if (o & 1)
503 			temp |= UBAMR_BO;
504 	}
505 
506 	/*
507 	 * Set up the map registers, leaving an invalid reg
508 	 * at the end to guard against wild unibus transfers.
509 	 */
510 	pte = &Sysmap[btop(((int)addr)&0x7fffffff)];
511 	while (--npf != 0)
512 		*(int *)io++ = pte++->pg_pfnum | temp;
513 	*(int *)io = 0;
514 
515 	/*
516 	 * Return effective UNIBUS address.
517 	 */
518 	return (ubinfo | o);
519 }
520 #endif
521