xref: /original-bsd/sys/vax/uba/uba.c (revision 1f3a482a)
1 /*	uba.c	4.34	81/07/08	*/
2 
3 #include "../h/param.h"
4 #include "../h/systm.h"
5 #include "../h/cpu.h"
6 #include "../h/map.h"
7 #include "../h/pte.h"
8 #include "../h/buf.h"
9 #include "../h/vm.h"
10 #include "../h/ubareg.h"
11 #include "../h/ubavar.h"
12 #include "../h/dir.h"
13 #include "../h/user.h"
14 #include "../h/proc.h"
15 #include "../h/conf.h"
16 #include "../h/mtpr.h"
17 #include "../h/nexus.h"
18 #include "../h/dk.h"
19 
20 #if VAX780
21 char	ubasr_bits[] = UBASR_BITS;
22 #endif
23 
24 /*
25  * Do transfer on device argument.  The controller
26  * and uba involved are implied by the device.
27  * We queue for resource wait in the uba code if necessary.
28  * We return 1 if the transfer was started, 0 if it was not.
29  * If you call this routine with the head of the queue for a
30  * UBA, it will automatically remove the device from the UBA
31  * queue before it returns.  If some other device is given
32  * as argument, it will be added to the request queue if the
33  * request cannot be started immediately.  This means that
34  * passing a device which is on the queue but not at the head
35  * of the request queue is likely to be a disaster.
36  */
37 ubago(ui)
38 	register struct uba_device *ui;
39 {
40 	register struct uba_ctlr *um = ui->ui_mi;
41 	register struct uba_hd *uh;
42 	register int s, unit;
43 
44 	uh = &uba_hd[um->um_ubanum];
45 	s = spl6();
46 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
47 		goto rwait;
48 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
49 	    UBA_NEEDBDP|UBA_CANTWAIT);
50 	if (um->um_ubinfo == 0)
51 		goto rwait;
52 	uh->uh_users++;
53 	if (um->um_driver->ud_xclu)
54 		uh->uh_xclu = 1;
55 	splx(s);
56 	if (ui->ui_dk >= 0) {
57 		unit = ui->ui_dk;
58 		dk_busy |= 1<<unit;
59 	}
60 	if (uh->uh_actf == ui)
61 		uh->uh_actf = ui->ui_forw;
62 	(*um->um_driver->ud_dgo)(um);
63 	if (ui->ui_dk >= 0) {
64 		dk_xfer[unit]++;
65 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
66 	}
67 	return (1);
68 rwait:
69 	if (uh->uh_actf != ui) {
70 		ui->ui_forw = NULL;
71 		if (uh->uh_actf == NULL)
72 			uh->uh_actf = ui;
73 		else
74 			uh->uh_actl->ui_forw = ui;
75 		uh->uh_actl = ui;
76 	}
77 	splx(s);
78 	return (0);
79 }
80 
81 ubadone(um)
82 	register struct uba_ctlr *um;
83 {
84 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
85 
86 	if (um->um_driver->ud_xclu)
87 		uh->uh_xclu = 0;
88 	uh->uh_users--;
89 	ubarelse(um->um_ubanum, &um->um_ubinfo);
90 }
91 
92 /*
93  * Allocate and setup UBA map registers, and bdp's
94  * Flags says whether bdp is needed, whether the caller can't
95  * wait (e.g. if the caller is at interrupt level).
96  *
97  * Return value:
98  *	Bits 0-8	Byte offset
99  *	Bits 9-17	Start map reg. no.
100  *	Bits 18-27	No. mapping reg's
101  *	Bits 28-31	BDP no.
102  */
103 ubasetup(uban, bp, flags)
104 	struct buf *bp;
105 {
106 	register struct uba_hd *uh = &uba_hd[uban];
107 	register int temp, i;
108 	int npf, reg, bdp;
109 	unsigned v;
110 	register struct pte *pte, *io;
111 	struct proc *rp;
112 	int a, o, ubinfo;
113 
114 #if VAX7ZZ
115 	if (cpu == VAX_7ZZ)
116 		flags &= ~UBA_NEEDBDP;
117 #endif
118 	v = btop(bp->b_un.b_addr);
119 	o = (int)bp->b_un.b_addr & PGOFSET;
120 	npf = btoc(bp->b_bcount + o) + 1;
121 	a = spl6();
122 	while ((reg = rmalloc(uh->uh_map, npf)) == 0) {
123 		if (flags & UBA_CANTWAIT) {
124 			splx(a);
125 			return (0);
126 		}
127 		uh->uh_mrwant++;
128 		sleep((caddr_t)uh->uh_map, PSWP);
129 	}
130 	bdp = 0;
131 	if (flags & UBA_NEEDBDP) {
132 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
133 			if (flags & UBA_CANTWAIT) {
134 				rmfree(uh->uh_map, npf, reg);
135 				splx(a);
136 				return (0);
137 			}
138 			uh->uh_bdpwant++;
139 			sleep((caddr_t)uh->uh_map, PSWP);
140 		}
141 		uh->uh_bdpfree &= ~(1 << (bdp-1));
142 	}
143 	splx(a);
144 	reg--;
145 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
146 	io = &uh->uh_uba->uba_map[reg];
147 	temp = (bdp << 21) | UBAMR_MRV;
148 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
149 	if (bdp && (o & 01))
150 		temp |= UBAMR_BO;
151 	if (bp->b_flags & B_UAREA) {
152 		for (i = UPAGES - bp->b_bcount / NBPG; i < UPAGES; i++) {
153 			if (rp->p_addr[i].pg_pfnum == 0)
154 				panic("uba: zero upage");
155 			*(int *)io++ = rp->p_addr[i].pg_pfnum | temp;
156 		}
157 	} else if ((bp->b_flags & B_PHYS) == 0) {
158 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
159 		while (--npf != 0)
160 			*(int *)io++ = pte++->pg_pfnum | temp;
161 	} else {
162 		if (bp->b_flags & B_PAGET)
163 			pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
164 		else
165 			pte = vtopte(rp, v);
166 		while (--npf != 0) {
167 			if (pte->pg_pfnum == 0)
168 				panic("uba zero uentry");
169 			*(int *)io++ = pte++->pg_pfnum | temp;
170 		}
171 	}
172 	*(int *)io++ = 0;
173 	return (ubinfo);
174 }
175 
176 /*
177  * Non buffer setup interface... set up a buffer and call ubasetup.
178  */
179 uballoc(uban, addr, bcnt, flags)
180 	int uban;
181 	caddr_t addr;
182 	int bcnt, flags;
183 {
184 	struct buf ubabuf;
185 
186 	ubabuf.b_un.b_addr = addr;
187 	ubabuf.b_flags = B_BUSY;
188 	ubabuf.b_bcount = bcnt;
189 	/* that's all the fields ubasetup() needs */
190 	return (ubasetup(uban, &ubabuf, flags));
191 }
192 
193 /*
194  * Release resources on uba uban, and then unblock resource waiters.
195  * The map register parameter is by value since we need to block
196  * against uba resets on 11/780's.
197  */
198 ubarelse(uban, amr)
199 	int *amr;
200 {
201 	register struct uba_hd *uh = &uba_hd[uban];
202 	register int bdp, reg, npf, s;
203 	int mr;
204 
205 	/*
206 	 * Carefully see if we should release the space, since
207 	 * it may be released asynchronously at uba reset time.
208 	 */
209 	s = spl6();
210 	mr = *amr;
211 	if (mr == 0) {
212 		/*
213 		 * A ubareset() occurred before we got around
214 		 * to releasing the space... no need to bother.
215 		 */
216 		splx(s);
217 		return;
218 	}
219 	*amr = 0;
220 	splx(s);		/* let interrupts in, we're safe for a while */
221 	bdp = (mr >> 28) & 0x0f;
222 	if (bdp) {
223 		switch (cpu) {
224 #if VAX780
225 		case VAX_780:
226 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
227 			break;
228 #endif
229 #if VAX750
230 		case VAX_750:
231 			uh->uh_uba->uba_dpr[bdp] |=
232 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
233 			break;
234 #endif
235 		}
236 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
237 		if (uh->uh_bdpwant) {
238 			uh->uh_bdpwant = 0;
239 			wakeup((caddr_t)uh->uh_map);
240 		}
241 	}
242 	/*
243 	 * Put back the registers in the resource map.
244 	 * The map code must not be reentered, so we do this
245 	 * at high ipl.
246 	 */
247 	npf = (mr >> 18) & 0x3ff;
248 	reg = ((mr >> 9) & 0x1ff) + 1;
249 	s = spl6();
250 	rmfree(uh->uh_map, npf, reg);
251 	splx(s);
252 
253 	/*
254 	 * Wakeup sleepers for map registers,
255 	 * and also, if there are processes blocked in dgo(),
256 	 * give them a chance at the UNIBUS.
257 	 */
258 	if (uh->uh_mrwant) {
259 		uh->uh_mrwant = 0;
260 		wakeup((caddr_t)uh->uh_map);
261 	}
262 	while (uh->uh_actf && ubago(uh->uh_actf))
263 		;
264 }
265 
266 ubapurge(um)
267 	register struct uba_ctlr *um;
268 {
269 	register struct uba_hd *uh = um->um_hd;
270 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
271 
272 	switch (cpu) {
273 #if VAX780
274 	case VAX_780:
275 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
276 		break;
277 #endif
278 #if VAX750
279 	case VAX_750:
280 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
281 		break;
282 #endif
283 	}
284 }
285 
286 /*
287  * Generate a reset on uba number uban.  Then
288  * call each device in the character device table,
289  * giving it a chance to clean up so as to be able to continue.
290  */
291 ubareset(uban)
292 	int uban;
293 {
294 	register struct cdevsw *cdp;
295 	register struct uba_hd *uh = &uba_hd[uban];
296 	int s;
297 
298 	s = spl6();
299 	uh->uh_users = 0;
300 	uh->uh_zvcnt = 0;
301 	uh->uh_xclu = 0;
302 	uh->uh_hangcnt = 0;
303 	uh->uh_actf = uh->uh_actl = 0;
304 	uh->uh_bdpwant = 0;
305 	uh->uh_mrwant = 0;
306 	wakeup((caddr_t)&uh->uh_bdpwant);
307 	wakeup((caddr_t)&uh->uh_mrwant);
308 	printf("uba%d: reset", uban);
309 	ubainit(uh->uh_uba);
310 	for (cdp = cdevsw; cdp->d_open; cdp++)
311 		(*cdp->d_reset)(uban);
312 	printf("\n");
313 	splx(s);
314 }
315 
316 /*
317  * Init a uba.  This is called with a pointer
318  * rather than a virtual address since it is called
319  * by code which runs with memory mapping disabled.
320  * In these cases we really don't need the interrupts
321  * enabled, but since we run with ipl high, we don't care
322  * if they are, they will never happen anyways.
323  */
324 ubainit(uba)
325 	register struct uba_regs *uba;
326 {
327 
328 	switch (cpu) {
329 #if VAX780
330 	case VAX_780:
331 		uba->uba_cr = UBACR_ADINIT;
332 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
333 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
334 			;
335 		break;
336 #endif
337 #if VAX750
338 	case VAX_750:
339 #endif
340 #if VAX7ZZ
341 	case VAX_7ZZ:
342 #endif
343 #if defined(VAX750) || defined(VAX7ZZ)
344 		mtpr(IUR, 0);
345 		/* give devices time to recover from power fail */
346 /* THIS IS PROBABLY UNNECESSARY */
347 		DELAY(500000);
348 /* END PROBABLY UNNECESSARY */
349 		break;
350 #endif
351 	}
352 }
353 
354 #if VAX780
355 /*
356  * Check to make sure the UNIBUS adaptor is not hung,
357  * with an interrupt in the register to be presented,
358  * but not presenting it for an extended period (5 seconds).
359  */
360 unhang()
361 {
362 	register int uban;
363 
364 	for (uban = 0; uban < numuba; uban++) {
365 		register struct uba_hd *uh = &uba_hd[uban];
366 		register struct uba_regs *up = uh->uh_uba;
367 
368 		if (up->uba_sr == 0)
369 			return;
370 		up->uba_sr = UBASR_CRD|UBASR_LEB;
371 		uh->uh_hangcnt++;
372 		if (uh->uh_hangcnt > 5*hz) {
373 			uh->uh_hangcnt = 0;
374 			printf("uba%d: hung\n", uban);
375 			ubareset(uban);
376 		}
377 	}
378 }
379 
380 /*
381  * This is a timeout routine which decrements the ``i forgot to
382  * interrupt'' counts, on an 11/780.  This prevents slowly growing
383  * counts from causing a UBA reset since we are interested only
384  * in hang situations.
385  */
386 ubawatch()
387 {
388 	register struct uba_hd *uh;
389 	register int uban;
390 
391 	if (panicstr)
392 		return;
393 	for (uban = 0; uban < numuba; uban++) {
394 		uh = &uba_hd[uban];
395 		if (uh->uh_hangcnt)
396 			uh->uh_hangcnt--;
397 	}
398 }
399 
400 /*
401  * This routine is called by the locore code to
402  * process a UBA error on an 11/780.  The arguments are passed
403  * on the stack, and value-result (through some trickery).
404  * In particular, the uvec argument is used for further
405  * uba processing so the result aspect of it is very important.
406  * It must not be declared register.
407  */
408 /*ARGSUSED*/
409 ubaerror(uban, uh, xx, uvec, uba)
410 	register int uban;
411 	register struct uba_hd *uh;
412 	int uvec;
413 	register struct uba_regs *uba;
414 {
415 	register sr, s;
416 
417 	if (uvec == 0) {
418 		uh->uh_zvcnt++;
419 		if (uh->uh_zvcnt > 250000) {
420 			printf("uba%d: too many zero vectors\n");
421 			ubareset(uban);
422 		}
423 		uvec = 0;
424 		return;
425 	}
426 	if (uba->uba_cnfgr & NEX_CFGFLT) {
427 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
428 		    uban, uba->uba_sr, ubasr_bits,
429 		    uba->uba_cnfgr, NEXFLT_BITS);
430 		ubareset(uban);
431 		uvec = 0;
432 		return;
433 	}
434 	sr = uba->uba_sr;
435 	s = spl7();
436 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
437 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
438 	splx(s);
439 	uba->uba_sr = sr;
440 	uvec &= UBABRRVR_DIV;
441 	return;
442 }
443 #endif
444 
445 /*
446  * This routine allows remapping of previously
447  * allocated UNIBUS bdp and map resources
448  * onto different memory addresses.
449  * It should only be used by routines which need
450  * small fixed length mappings for long periods of time
451  * (like the ARPANET ACC IMP interface).
452  * It only maps kernel addresses.
453  */
454 ubaremap(uban, ubinfo, addr)
455 	int uban;
456 	register unsigned ubinfo;
457 	caddr_t addr;
458 {
459 	register struct uba_hd *uh = &uba_hd[uban];
460 	register struct pte *pte, *io;
461 	register int temp, bdp;
462 	int npf, o;
463 
464 	o = (int)addr & PGOFSET;
465 	bdp = (ubinfo >> 28) & 0xf;
466 	npf = (ubinfo >> 18) & 0x3ff;
467 	io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff];
468 	temp = (bdp << 21) | UBAMR_MRV;
469 
470 	/*
471 	 * If using buffered data path initiate purge
472 	 * of old data and set byte offset bit if next
473 	 * transfer will be from odd address.
474 	 */
475 	if (bdp) {
476 		switch (cpu) {
477 #if VAX780
478 		case VAX_780:
479 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
480 			break;
481 #endif
482 #if VAX750
483 		case VAX_750:
484 			uh->uh_uba->uba_dpr[bdp] |=
485 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
486 			break;
487 #endif
488 		}
489 		if (o & 1)
490 			temp |= UBAMR_BO;
491 	}
492 
493 	/*
494 	 * Set up the map registers, leaving an invalid reg
495 	 * at the end to guard against wild unibus transfers.
496 	 */
497 	pte = &Sysmap[btop(((int)addr)&0x7fffffff)];
498 	while (--npf != 0)
499 		*(int *)io++ = pte++->pg_pfnum | temp;
500 	*(int *)io = 0;
501 
502 	/*
503 	 * Return effective UNIBUS address.
504 	 */
505 	return (ubinfo | o);
506 }
507