xref: /original-bsd/sys/vax/uba/uba.c (revision 9087ff44)
1 /*
2  * Copyright (c) 1982 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)uba.c	6.5 (Berkeley) 06/08/85
7  */
8 
9 #include "../machine/pte.h"
10 
11 #include "param.h"
12 #include "systm.h"
13 #include "map.h"
14 #include "buf.h"
15 #include "vm.h"
16 #include "dir.h"
17 #include "user.h"
18 #include "proc.h"
19 #include "conf.h"
20 #include "dk.h"
21 #include "kernel.h"
22 
23 #include "../vax/cpu.h"
24 #include "../vax/mtpr.h"
25 #include "../vax/nexus.h"
26 #include "ubareg.h"
27 #include "ubavar.h"
28 
29 #if VAX780
30 char	ubasr_bits[] = UBASR_BITS;
31 #endif
32 
33 /*
34  * Do transfer on device argument.  The controller
35  * and uba involved are implied by the device.
36  * We queue for resource wait in the uba code if necessary.
37  * We return 1 if the transfer was started, 0 if it was not.
38  * If you call this routine with the head of the queue for a
39  * UBA, it will automatically remove the device from the UBA
40  * queue before it returns.  If some other device is given
41  * as argument, it will be added to the request queue if the
42  * request cannot be started immediately.  This means that
43  * passing a device which is on the queue but not at the head
44  * of the request queue is likely to be a disaster.
45  */
46 ubago(ui)
47 	register struct uba_device *ui;
48 {
49 	register struct uba_ctlr *um = ui->ui_mi;
50 	register struct uba_hd *uh;
51 	register int s, unit;
52 
53 	uh = &uba_hd[um->um_ubanum];
54 	s = spl6();
55 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
56 		goto rwait;
57 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
58 	    UBA_NEEDBDP|UBA_CANTWAIT);
59 	if (um->um_ubinfo == 0)
60 		goto rwait;
61 	uh->uh_users++;
62 	if (um->um_driver->ud_xclu)
63 		uh->uh_xclu = 1;
64 	splx(s);
65 	if (ui->ui_dk >= 0) {
66 		unit = ui->ui_dk;
67 		dk_busy |= 1<<unit;
68 		dk_xfer[unit]++;
69 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
70 	}
71 	if (uh->uh_actf == ui)
72 		uh->uh_actf = ui->ui_forw;
73 	(*um->um_driver->ud_dgo)(um);
74 	return (1);
75 rwait:
76 	if (uh->uh_actf != ui) {
77 		ui->ui_forw = NULL;
78 		if (uh->uh_actf == NULL)
79 			uh->uh_actf = ui;
80 		else
81 			uh->uh_actl->ui_forw = ui;
82 		uh->uh_actl = ui;
83 	}
84 	splx(s);
85 	return (0);
86 }
87 
88 ubadone(um)
89 	register struct uba_ctlr *um;
90 {
91 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
92 
93 	if (um->um_driver->ud_xclu)
94 		uh->uh_xclu = 0;
95 	uh->uh_users--;
96 	ubarelse(um->um_ubanum, &um->um_ubinfo);
97 }
98 
99 /*
100  * Allocate and setup UBA map registers, and bdp's
101  * Flags says whether bdp is needed, whether the caller can't
102  * wait (e.g. if the caller is at interrupt level).
103  *
104  * Return value:
105  *	Bits 0-8	Byte offset
106  *	Bits 9-17	Start map reg. no.
107  *	Bits 18-27	No. mapping reg's
108  *	Bits 28-31	BDP no.
109  */
110 ubasetup(uban, bp, flags)
111 	struct buf *bp;
112 {
113 	register struct uba_hd *uh = &uba_hd[uban];
114 	int pfnum, temp;
115 	int npf, reg, bdp;
116 	unsigned v;
117 	register struct pte *pte, *io;
118 	struct proc *rp;
119 	int a, o, ubinfo;
120 
121 #if VAX730
122 	if (cpu == VAX_730)
123 		flags &= ~UBA_NEEDBDP;
124 #endif
125 	v = btop(bp->b_un.b_addr);
126 	o = (int)bp->b_un.b_addr & PGOFSET;
127 	npf = btoc(bp->b_bcount + o) + 1;
128 	a = spl6();
129 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
130 		if (flags & UBA_CANTWAIT) {
131 			splx(a);
132 			return (0);
133 		}
134 		uh->uh_mrwant++;
135 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
136 	}
137 	if ((flags & UBA_NEED16) && reg + npf > 128) {
138 		/*
139 		 * Could hang around and try again (if we can ever succeed).
140 		 * Won't help any current device...
141 		 */
142 		rmfree(uh->uh_map, (long)npf, (long)reg);
143 		splx(a);
144 		return (0);
145 	}
146 	bdp = 0;
147 	if (flags & UBA_NEEDBDP) {
148 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
149 			if (flags & UBA_CANTWAIT) {
150 				rmfree(uh->uh_map, (long)npf, (long)reg);
151 				splx(a);
152 				return (0);
153 			}
154 			uh->uh_bdpwant++;
155 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
156 		}
157 		uh->uh_bdpfree &= ~(1 << (bdp-1));
158 	} else if (flags & UBA_HAVEBDP)
159 		bdp = (flags >> 28) & 0xf;
160 	splx(a);
161 	reg--;
162 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
163 	temp = (bdp << 21) | UBAMR_MRV;
164 	if (bdp && (o & 01))
165 		temp |= UBAMR_BO;
166 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
167 	if ((bp->b_flags & B_PHYS) == 0)
168 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
169 	else if (bp->b_flags & B_UAREA)
170 		pte = &rp->p_addr[v];
171 	else if (bp->b_flags & B_PAGET)
172 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
173 	else
174 		pte = vtopte(rp, v);
175 	io = &uh->uh_uba->uba_map[reg];
176 	while (--npf != 0) {
177 		pfnum = pte->pg_pfnum;
178 		if (pfnum == 0)
179 			panic("uba zero uentry");
180 		pte++;
181 		*(int *)io++ = pfnum | temp;
182 	}
183 	*(int *)io++ = 0;
184 	return (ubinfo);
185 }
186 
187 /*
188  * Non buffer setup interface... set up a buffer and call ubasetup.
189  */
190 uballoc(uban, addr, bcnt, flags)
191 	int uban;
192 	caddr_t addr;
193 	int bcnt, flags;
194 {
195 	struct buf ubabuf;
196 
197 	ubabuf.b_un.b_addr = addr;
198 	ubabuf.b_flags = B_BUSY;
199 	ubabuf.b_bcount = bcnt;
200 	/* that's all the fields ubasetup() needs */
201 	return (ubasetup(uban, &ubabuf, flags));
202 }
203 
204 /*
205  * Release resources on uba uban, and then unblock resource waiters.
206  * The map register parameter is by value since we need to block
207  * against uba resets on 11/780's.
208  */
209 ubarelse(uban, amr)
210 	int *amr;
211 {
212 	register struct uba_hd *uh = &uba_hd[uban];
213 	register int bdp, reg, npf, s;
214 	int mr;
215 
216 	/*
217 	 * Carefully see if we should release the space, since
218 	 * it may be released asynchronously at uba reset time.
219 	 */
220 	s = spl6();
221 	mr = *amr;
222 	if (mr == 0) {
223 		/*
224 		 * A ubareset() occurred before we got around
225 		 * to releasing the space... no need to bother.
226 		 */
227 		splx(s);
228 		return;
229 	}
230 	*amr = 0;
231 	bdp = (mr >> 28) & 0x0f;
232 	if (bdp) {
233 		switch (cpu) {
234 #if VAX780
235 		case VAX_780:
236 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
237 			break;
238 #endif
239 #if VAX750
240 		case VAX_750:
241 			uh->uh_uba->uba_dpr[bdp] |=
242 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
243 			break;
244 #endif
245 		}
246 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
247 		if (uh->uh_bdpwant) {
248 			uh->uh_bdpwant = 0;
249 			wakeup((caddr_t)&uh->uh_bdpwant);
250 		}
251 	}
252 	/*
253 	 * Put back the registers in the resource map.
254 	 * The map code must not be reentered,
255 	 * nor can the registers be freed twice.
256 	 * Unblock interrupts once this is done.
257 	 */
258 	npf = (mr >> 18) & 0x3ff;
259 	reg = ((mr >> 9) & 0x1ff) + 1;
260 	rmfree(uh->uh_map, (long)npf, (long)reg);
261 	splx(s);
262 
263 	/*
264 	 * Wakeup sleepers for map registers,
265 	 * and also, if there are processes blocked in dgo(),
266 	 * give them a chance at the UNIBUS.
267 	 */
268 	if (uh->uh_mrwant) {
269 		uh->uh_mrwant = 0;
270 		wakeup((caddr_t)&uh->uh_mrwant);
271 	}
272 	while (uh->uh_actf && ubago(uh->uh_actf))
273 		;
274 }
275 
276 ubapurge(um)
277 	register struct uba_ctlr *um;
278 {
279 	register struct uba_hd *uh = um->um_hd;
280 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
281 
282 	switch (cpu) {
283 #if VAX780
284 	case VAX_780:
285 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
286 		break;
287 #endif
288 #if VAX750
289 	case VAX_750:
290 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
291 		break;
292 #endif
293 	}
294 }
295 
296 ubainitmaps(uhp)
297 	register struct uba_hd *uhp;
298 {
299 
300 	rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ);
301 	switch (cpu) {
302 #if VAX780
303 	case VAX_780:
304 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
305 		break;
306 #endif
307 #if VAX750
308 	case VAX_750:
309 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
310 		break;
311 #endif
312 #if VAX730
313 	case VAX_730:
314 		break;
315 #endif
316 	}
317 }
318 
319 /*
320  * Generate a reset on uba number uban.  Then
321  * call each device in the character device table,
322  * giving it a chance to clean up so as to be able to continue.
323  */
324 ubareset(uban)
325 	int uban;
326 {
327 	register struct cdevsw *cdp;
328 	register struct uba_hd *uh = &uba_hd[uban];
329 	int s;
330 
331 	s = spl6();
332 	uh->uh_users = 0;
333 	uh->uh_zvcnt = 0;
334 	uh->uh_xclu = 0;
335 	uh->uh_actf = uh->uh_actl = 0;
336 	uh->uh_bdpwant = 0;
337 	uh->uh_mrwant = 0;
338 	ubainitmaps(uh);
339 	wakeup((caddr_t)&uh->uh_bdpwant);
340 	wakeup((caddr_t)&uh->uh_mrwant);
341 	printf("uba%d: reset", uban);
342 	ubainit(uh->uh_uba);
343 	ubameminit(uban);
344 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
345 		(*cdp->d_reset)(uban);
346 #ifdef INET
347 	ifubareset(uban);
348 #endif
349 	printf("\n");
350 	splx(s);
351 }
352 
353 /*
354  * Init a uba.  This is called with a pointer
355  * rather than a virtual address since it is called
356  * by code which runs with memory mapping disabled.
357  * In these cases we really don't need the interrupts
358  * enabled, but since we run with ipl high, we don't care
359  * if they are, they will never happen anyways.
360  */
361 ubainit(uba)
362 	register struct uba_regs *uba;
363 {
364 
365 	switch (cpu) {
366 #if VAX780
367 	case VAX_780:
368 		uba->uba_cr = UBACR_ADINIT;
369 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
370 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
371 			;
372 		break;
373 #endif
374 #if VAX750
375 	case VAX_750:
376 #endif
377 #if VAX730
378 	case VAX_730:
379 #endif
380 #if defined(VAX750) || defined(VAX730)
381 		mtpr(IUR, 0);
382 		/* give devices time to recover from power fail */
383 /* THIS IS PROBABLY UNNECESSARY */
384 		DELAY(500000);
385 /* END PROBABLY UNNECESSARY */
386 		break;
387 #endif
388 	}
389 }
390 
391 #ifdef VAX780
392 int	ubawedgecnt = 10;
393 int	ubacrazy = 500;
394 int	zvcnt_max = 5000;	/* in 8 sec */
395 int	zvcnt_total;
396 long	zvcnt_time;
397 /*
398  * This routine is called by the locore code to
399  * process a UBA error on an 11/780.  The arguments are passed
400  * on the stack, and value-result (through some trickery).
401  * In particular, the uvec argument is used for further
402  * uba processing so the result aspect of it is very important.
403  * It must not be declared register.
404  */
405 /*ARGSUSED*/
406 ubaerror(uban, uh, ipl, uvec, uba)
407 	register int uban;
408 	register struct uba_hd *uh;
409 	int ipl, uvec;
410 	register struct uba_regs *uba;
411 {
412 	register sr, s;
413 
414 	if (uvec == 0) {
415 		long	dt = time.tv_sec - zvcnt_time;
416 		zvcnt_total++;
417 		if (dt > 8) {
418 			zvcnt_time = time.tv_sec;
419 			uh->uh_zvcnt = 0;
420 		}
421 		if (++uh->uh_zvcnt > zvcnt_max) {
422 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
423 				uban, uh->uh_zvcnt, dt + 1);
424 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
425 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
426 				uba->uba_cnfgr&0xff);
427 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
428 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
429 				(uba->uba_dcr&0x8000000)?"":"NOT ");
430 			ubareset(uban);
431 		}
432 		return;
433 	}
434 	if (uba->uba_cnfgr & NEX_CFGFLT) {
435 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
436 		    uban, uba->uba_sr, ubasr_bits,
437 		    uba->uba_cnfgr, NEXFLT_BITS);
438 		ubareset(uban);
439 		uvec = 0;
440 		return;
441 	}
442 	sr = uba->uba_sr;
443 	s = spl7();
444 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
445 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
446 	splx(s);
447 	uba->uba_sr = sr;
448 	uvec &= UBABRRVR_DIV;
449 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
450 		if (uh->uh_errcnt > ubacrazy)
451 			panic("uba crazy");
452 		printf("ERROR LIMIT ");
453 		ubareset(uban);
454 		uvec = 0;
455 		return;
456 	}
457 	return;
458 }
459 #endif
460 
461 /*
462  * Look for devices with unibus memory, allow them to configure, then disable
463  * map registers as necessary.  Called during autoconfiguration and ubareset.
464  * The device ubamem routine returns 0 on success, 1 on success if it is fully
465  * configured (has no csr or interrupt, so doesn't need to be probed),
466  * and -1 on failure.
467  */
468 ubameminit(uban)
469 {
470 	register struct uba_device *ui;
471 	register struct uba_hd *uh = &uba_hd[uban];
472 	caddr_t umembase = umem[uban] + 0x3e000, addr;
473 #define	ubaoff(off)	((int)(off) & 0x1fff)
474 
475 	uh->uh_lastmem = 0;
476 	for (ui = ubdinit; ui->ui_driver; ui++) {
477 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
478 			continue;
479 		if (ui->ui_driver->ud_ubamem) {
480 			/*
481 			 * During autoconfiguration, need to fudge ui_addr.
482 			 */
483 			addr = ui->ui_addr;
484 			ui->ui_addr = umembase + ubaoff(addr);
485 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
486 			case 1:
487 				ui->ui_alive = 1;
488 				/* FALLTHROUGH */
489 			case 0:
490 				ui->ui_ubanum = uban;
491 				break;
492 			}
493 			ui->ui_addr = addr;
494 		}
495 	}
496 #if VAX780
497 	/*
498 	 * On a 780, throw away any map registers disabled by rounding
499 	 * the map disable in the configuration register
500 	 * up to the next 8K boundary, or below the last unibus memory.
501 	 */
502 	if (cpu == VAX_780) {
503 		register i;
504 
505 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
506 		while (i)
507 			(void) rmget(uh->uh_map, 1, i--);
508 	}
509 #endif
510 }
511 
512 /*
513  * Allocate UNIBUS memory.  Allocates and initializes
514  * sufficient mapping registers for access.  On a 780,
515  * the configuration register is setup to disable UBA
516  * response on DMA transfers to addresses controlled
517  * by the disabled mapping registers.
518  * On a 780, should only be called from ubameminit, or in ascending order
519  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
520  * the last unibus memory would free unusable map registers.
521  * Doalloc is 1 to allocate, 0 to deallocate.
522  */
523 ubamem(uban, addr, npg, doalloc)
524 	int uban, addr, npg, doalloc;
525 {
526 	register struct uba_hd *uh = &uba_hd[uban];
527 	register int a;
528 	int s;
529 
530 	a = (addr >> 9) + 1;
531 	s = spl6();
532 	if (doalloc)
533 		a = rmget(uh->uh_map, npg, a);
534 	else
535 		rmfree(uh->uh_map, (long)npg, (long)a);
536 	splx(s);
537 	if (a) {
538 		register int i, *m;
539 
540 		m = (int *)&uh->uh_uba->uba_map[a - 1];
541 		for (i = 0; i < npg; i++)
542 			*m++ = 0;	/* All off, especially 'valid' */
543 		i = addr + npg * 512;
544 		if (doalloc && i > uh->uh_lastmem)
545 			uh->uh_lastmem = i;
546 		else if (doalloc == 0 && i == uh->uh_lastmem)
547 			uh->uh_lastmem = addr;
548 #if VAX780
549 		/*
550 		 * On a 780, set up the map register disable
551 		 * field in the configuration register.  Beware
552 		 * of callers that request memory ``out of order''
553 		 * or in sections other than 8K multiples.
554 		 * Ubameminit handles such requests properly, however.
555 		 */
556 		if (cpu == VAX_780) {
557 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
558 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
559 			uh->uh_uba->uba_cr = i;
560 		}
561 #endif
562 	}
563 	return (a);
564 }
565 
566 #include "ik.h"
567 #if NIK > 0
568 /*
569  * Map a virtual address into users address space. Actually all we
570  * do is turn on the user mode write protection bits for the particular
571  * page of memory involved.
572  */
573 maptouser(vaddress)
574 	caddr_t vaddress;
575 {
576 
577 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
578 }
579 
580 unmaptouser(vaddress)
581 	caddr_t vaddress;
582 {
583 
584 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
585 }
586 #endif
587