xref: /original-bsd/sys/vax/uba/uba.c (revision 92d3de31)
1 /*	uba.c	4.61	83/03/25	*/
2 
3 #include "../machine/pte.h"
4 
5 #include "../h/param.h"
6 #include "../h/systm.h"
7 #include "../h/map.h"
8 #include "../h/buf.h"
9 #include "../h/vm.h"
10 #include "../h/dir.h"
11 #include "../h/user.h"
12 #include "../h/proc.h"
13 #include "../h/conf.h"
14 #include "../h/dk.h"
15 #include "../h/kernel.h"
16 
17 #include "../vax/cpu.h"
18 #include "../vax/mtpr.h"
19 #include "../vax/nexus.h"
20 #include "../vaxuba/ubareg.h"
21 #include "../vaxuba/ubavar.h"
22 
23 #if VAX780
24 char	ubasr_bits[] = UBASR_BITS;
25 #endif
26 
27 /*
28  * Do transfer on device argument.  The controller
29  * and uba involved are implied by the device.
30  * We queue for resource wait in the uba code if necessary.
31  * We return 1 if the transfer was started, 0 if it was not.
32  * If you call this routine with the head of the queue for a
33  * UBA, it will automatically remove the device from the UBA
34  * queue before it returns.  If some other device is given
35  * as argument, it will be added to the request queue if the
36  * request cannot be started immediately.  This means that
37  * passing a device which is on the queue but not at the head
38  * of the request queue is likely to be a disaster.
39  */
40 ubago(ui)
41 	register struct uba_device *ui;
42 {
43 	register struct uba_ctlr *um = ui->ui_mi;
44 	register struct uba_hd *uh;
45 	register int s, unit;
46 
47 	uh = &uba_hd[um->um_ubanum];
48 	s = spl6();
49 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
50 		goto rwait;
51 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
52 	    UBA_NEEDBDP|UBA_CANTWAIT);
53 	if (um->um_ubinfo == 0)
54 		goto rwait;
55 	uh->uh_users++;
56 	if (um->um_driver->ud_xclu)
57 		uh->uh_xclu = 1;
58 	splx(s);
59 	if (ui->ui_dk >= 0) {
60 		unit = ui->ui_dk;
61 		dk_busy |= 1<<unit;
62 		dk_xfer[unit]++;
63 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
64 	}
65 	if (uh->uh_actf == ui)
66 		uh->uh_actf = ui->ui_forw;
67 	(*um->um_driver->ud_dgo)(um);
68 	return (1);
69 rwait:
70 	if (uh->uh_actf != ui) {
71 		ui->ui_forw = NULL;
72 		if (uh->uh_actf == NULL)
73 			uh->uh_actf = ui;
74 		else
75 			uh->uh_actl->ui_forw = ui;
76 		uh->uh_actl = ui;
77 	}
78 	splx(s);
79 	return (0);
80 }
81 
82 ubadone(um)
83 	register struct uba_ctlr *um;
84 {
85 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
86 
87 	if (um->um_driver->ud_xclu)
88 		uh->uh_xclu = 0;
89 	uh->uh_users--;
90 	ubarelse(um->um_ubanum, &um->um_ubinfo);
91 }
92 
93 /*
94  * Allocate and setup UBA map registers, and bdp's
95  * Flags says whether bdp is needed, whether the caller can't
96  * wait (e.g. if the caller is at interrupt level).
97  *
98  * Return value:
99  *	Bits 0-8	Byte offset
100  *	Bits 9-17	Start map reg. no.
101  *	Bits 18-27	No. mapping reg's
102  *	Bits 28-31	BDP no.
103  */
104 ubasetup(uban, bp, flags)
105 	struct buf *bp;
106 {
107 	register struct uba_hd *uh = &uba_hd[uban];
108 	register int temp;
109 	int npf, reg, bdp;
110 	unsigned v;
111 	register struct pte *pte, *io;
112 	struct proc *rp;
113 	int a, o, ubinfo;
114 
115 #if VAX730
116 	if (cpu == VAX_730)
117 		flags &= ~UBA_NEEDBDP;
118 #endif
119 	v = btop(bp->b_un.b_addr);
120 	o = (int)bp->b_un.b_addr & PGOFSET;
121 	npf = btoc(bp->b_bcount + o) + 1;
122 	a = spl6();
123 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
124 		if (flags & UBA_CANTWAIT) {
125 			splx(a);
126 			return (0);
127 		}
128 		uh->uh_mrwant++;
129 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
130 	}
131 	bdp = 0;
132 	if (flags & UBA_NEEDBDP) {
133 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
134 			if (flags & UBA_CANTWAIT) {
135 				rmfree(uh->uh_map, (long)npf, (long)reg);
136 				splx(a);
137 				return (0);
138 			}
139 			uh->uh_bdpwant++;
140 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
141 		}
142 		uh->uh_bdpfree &= ~(1 << (bdp-1));
143 	} else if (flags & UBA_HAVEBDP)
144 		bdp = (flags >> 28) & 0xf;
145 	splx(a);
146 	reg--;
147 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
148 	temp = (bdp << 21) | UBAMR_MRV;
149 	if (bdp && (o & 01))
150 		temp |= UBAMR_BO;
151 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
152 	if ((bp->b_flags & B_PHYS) == 0)
153 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
154 	else if (bp->b_flags & B_UAREA)
155 		pte = &rp->p_addr[v];
156 	else if (bp->b_flags & B_PAGET)
157 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
158 	else
159 		pte = vtopte(rp, v);
160 	io = &uh->uh_uba->uba_map[reg];
161 	while (--npf != 0) {
162 		if (pte->pg_pfnum == 0)
163 			panic("uba zero uentry");
164 		*(int *)io++ = pte++->pg_pfnum | temp;
165 	}
166 	*(int *)io++ = 0;
167 	return (ubinfo);
168 }
169 
170 /*
171  * Non buffer setup interface... set up a buffer and call ubasetup.
172  */
173 uballoc(uban, addr, bcnt, flags)
174 	int uban;
175 	caddr_t addr;
176 	int bcnt, flags;
177 {
178 	struct buf ubabuf;
179 
180 	ubabuf.b_un.b_addr = addr;
181 	ubabuf.b_flags = B_BUSY;
182 	ubabuf.b_bcount = bcnt;
183 	/* that's all the fields ubasetup() needs */
184 	return (ubasetup(uban, &ubabuf, flags));
185 }
186 
187 /*
188  * Release resources on uba uban, and then unblock resource waiters.
189  * The map register parameter is by value since we need to block
190  * against uba resets on 11/780's.
191  */
192 ubarelse(uban, amr)
193 	int *amr;
194 {
195 	register struct uba_hd *uh = &uba_hd[uban];
196 	register int bdp, reg, npf, s;
197 	int mr;
198 
199 	/*
200 	 * Carefully see if we should release the space, since
201 	 * it may be released asynchronously at uba reset time.
202 	 */
203 	s = spl6();
204 	mr = *amr;
205 	if (mr == 0) {
206 		/*
207 		 * A ubareset() occurred before we got around
208 		 * to releasing the space... no need to bother.
209 		 */
210 		splx(s);
211 		return;
212 	}
213 	*amr = 0;
214 	splx(s);		/* let interrupts in, we're safe for a while */
215 	bdp = (mr >> 28) & 0x0f;
216 	if (bdp) {
217 		switch (cpu) {
218 #if VAX780
219 		case VAX_780:
220 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
221 			break;
222 #endif
223 #if VAX750
224 		case VAX_750:
225 			uh->uh_uba->uba_dpr[bdp] |=
226 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
227 			break;
228 #endif
229 		}
230 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
231 		if (uh->uh_bdpwant) {
232 			uh->uh_bdpwant = 0;
233 			wakeup((caddr_t)&uh->uh_bdpwant);
234 		}
235 	}
236 	/*
237 	 * Put back the registers in the resource map.
238 	 * The map code must not be reentered, so we do this
239 	 * at high ipl.
240 	 */
241 	npf = (mr >> 18) & 0x3ff;
242 	reg = ((mr >> 9) & 0x1ff) + 1;
243 	s = spl6();
244 	rmfree(uh->uh_map, (long)npf, (long)reg);
245 	splx(s);
246 
247 	/*
248 	 * Wakeup sleepers for map registers,
249 	 * and also, if there are processes blocked in dgo(),
250 	 * give them a chance at the UNIBUS.
251 	 */
252 	if (uh->uh_mrwant) {
253 		uh->uh_mrwant = 0;
254 		wakeup((caddr_t)&uh->uh_mrwant);
255 	}
256 	while (uh->uh_actf && ubago(uh->uh_actf))
257 		;
258 }
259 
260 ubapurge(um)
261 	register struct uba_ctlr *um;
262 {
263 	register struct uba_hd *uh = um->um_hd;
264 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
265 
266 	switch (cpu) {
267 #if VAX780
268 	case VAX_780:
269 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
270 		break;
271 #endif
272 #if VAX750
273 	case VAX_750:
274 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
275 		break;
276 #endif
277 	}
278 }
279 
280 ubainitmaps(uhp)
281 	register struct uba_hd *uhp;
282 {
283 
284 	rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ);
285 	switch (cpu) {
286 #if VAX780
287 	case VAX_780:
288 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
289 		break;
290 #endif
291 #if VAX750
292 	case VAX_750:
293 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
294 		break;
295 #endif
296 #if VAX730
297 	case VAX_730:
298 		break;
299 #endif
300 	}
301 }
302 
303 /*
304  * Generate a reset on uba number uban.  Then
305  * call each device in the character device table,
306  * giving it a chance to clean up so as to be able to continue.
307  */
308 ubareset(uban)
309 	int uban;
310 {
311 	register struct cdevsw *cdp;
312 	register struct uba_hd *uh = &uba_hd[uban];
313 	int s;
314 
315 	s = spl6();
316 	uh->uh_users = 0;
317 	uh->uh_zvcnt = 0;
318 	uh->uh_xclu = 0;
319 	uh->uh_actf = uh->uh_actl = 0;
320 	uh->uh_bdpwant = 0;
321 	uh->uh_mrwant = 0;
322 	ubainitmaps(uh);
323 	wakeup((caddr_t)&uh->uh_bdpwant);
324 	wakeup((caddr_t)&uh->uh_mrwant);
325 	printf("uba%d: reset", uban);
326 	ubainit(uh->uh_uba);
327 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
328 		(*cdp->d_reset)(uban);
329 #ifdef INET
330 	ifubareset(uban);
331 #endif
332 	printf("\n");
333 	splx(s);
334 }
335 
336 /*
337  * Init a uba.  This is called with a pointer
338  * rather than a virtual address since it is called
339  * by code which runs with memory mapping disabled.
340  * In these cases we really don't need the interrupts
341  * enabled, but since we run with ipl high, we don't care
342  * if they are, they will never happen anyways.
343  */
344 ubainit(uba)
345 	register struct uba_regs *uba;
346 {
347 
348 	switch (cpu) {
349 #if VAX780
350 	case VAX_780:
351 		uba->uba_cr = UBACR_ADINIT;
352 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
353 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
354 			;
355 		break;
356 #endif
357 #if VAX750
358 	case VAX_750:
359 #endif
360 #if VAX730
361 	case VAX_730:
362 #endif
363 #if defined(VAX750) || defined(VAX730)
364 		mtpr(IUR, 0);
365 		/* give devices time to recover from power fail */
366 /* THIS IS PROBABLY UNNECESSARY */
367 		DELAY(500000);
368 /* END PROBABLY UNNECESSARY */
369 		break;
370 #endif
371 	}
372 }
373 
374 #ifdef VAX780
375 int	ubawedgecnt = 10;
376 int	ubacrazy = 500;
377 /*
378  * This routine is called by the locore code to
379  * process a UBA error on an 11/780.  The arguments are passed
380  * on the stack, and value-result (through some trickery).
381  * In particular, the uvec argument is used for further
382  * uba processing so the result aspect of it is very important.
383  * It must not be declared register.
384  */
385 /*ARGSUSED*/
386 ubaerror(uban, uh, xx, uvec, uba)
387 	register int uban;
388 	register struct uba_hd *uh;
389 	int uvec;
390 	register struct uba_regs *uba;
391 {
392 	register sr, s;
393 
394 	if (uvec == 0) {
395 		uh->uh_zvcnt++;
396 		if (uh->uh_zvcnt > 250000) {
397 			printf("uba%d: too many zero vectors\n");
398 			ubareset(uban);
399 		}
400 		uvec = 0;
401 		return;
402 	}
403 	if (uba->uba_cnfgr & NEX_CFGFLT) {
404 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
405 		    uban, uba->uba_sr, ubasr_bits,
406 		    uba->uba_cnfgr, NEXFLT_BITS);
407 		ubareset(uban);
408 		uvec = 0;
409 		return;
410 	}
411 	sr = uba->uba_sr;
412 	s = spl7();
413 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
414 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
415 	splx(s);
416 	uba->uba_sr = sr;
417 	uvec &= UBABRRVR_DIV;
418 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
419 		if (uh->uh_errcnt > ubacrazy)
420 			panic("uba crazy");
421 		printf("ERROR LIMIT ");
422 		ubareset(uban);
423 		uvec = 0;
424 		return;
425 	}
426 	return;
427 }
428 #endif
429 
430 /*
431  * This routine is called by a driver for a device with on-board Unibus
432  * memory.  It removes the memory block from the Unibus resource map
433  * and clears the map registers for the block.
434  *
435  * Arguments are the Unibus number, the Unibus address of the memory
436  * block, its size in blocks of 512 bytes, and a flag indicating whether
437  * to allocate the unibus space form the resource map or whether it already
438  * has been.
439  *
440  * Returns > 0 if successful, 0 if not.
441  */
442 ubamem(uban, addr, size, doalloc)
443 	int uban, addr, size, doalloc;
444 {
445 	register struct uba_hd *uh = &uba_hd[uban];
446 	register int *m;
447 	register int i, a, s;
448 
449 	if (doalloc) {
450 		s = spl6();
451 		a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */
452 		splx(s);
453 	} else
454 		a = (addr>>9)+1;
455 	if (a) {
456 		m = (int *) &uh->uh_uba->uba_map[a-1];
457 		for (i=0; i<size; i++)
458 			*m++ = 0;	/* All off, especially 'valid' */
459 #if VAX780
460 		if (cpu == VAX_780) {		/* map disable */
461 			i = (addr+size*512+8191)/8192;
462 			uh->uh_uba->uba_cr |= i<<26;
463 		}
464 #endif
465 	}
466 	return(a);
467 }
468 
469 #include "ik.h"
470 #if NIK > 0
471 /*
472  * Map a virtual address into users address space. Actually all we
473  * do is turn on the user mode write protection bits for the particular
474  * page of memory involved.
475  */
476 maptouser(vaddress)
477 	caddr_t vaddress;
478 {
479 
480 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
481 }
482 
483 unmaptouser(vaddress)
484 	caddr_t vaddress;
485 {
486 
487 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
488 }
489 #endif
490