12f008b6eSmckusick /*
2a92930acSbostic * Copyright (c) 1982, 1986 The Regents of the University of California.
3a92930acSbostic * All rights reserved.
42f008b6eSmckusick *
516a9ee17Sbostic * %sccs.include.redist.c%
6a92930acSbostic *
7*880d6a2dSbostic * @(#)uba.c 7.10 (Berkeley) 12/16/90
82f008b6eSmckusick */
9dc85860cSsam
10*880d6a2dSbostic #include "sys/param.h"
11*880d6a2dSbostic #include "sys/systm.h"
12*880d6a2dSbostic #include "sys/map.h"
13*880d6a2dSbostic #include "sys/buf.h"
14*880d6a2dSbostic #include "sys/vm.h"
15*880d6a2dSbostic #include "sys/user.h"
16*880d6a2dSbostic #include "sys/proc.h"
17*880d6a2dSbostic #include "sys/conf.h"
18*880d6a2dSbostic #include "sys/dkstat.h"
19*880d6a2dSbostic #include "sys/kernel.h"
20afeb666cSwnj
21*880d6a2dSbostic #include "../include/pte.h"
22*880d6a2dSbostic #include "../include/cpu.h"
23*880d6a2dSbostic #include "../include/mtpr.h"
24a072ee87Sroot #include "../vax/nexus.h"
25737b275eSbloom #include "ubareg.h"
26737b275eSbloom #include "ubavar.h"
27a072ee87Sroot
28a2f43acbSkarels #ifdef DW780
29977e5eabSwnj char ubasr_bits[] = UBASR_BITS;
30977e5eabSwnj #endif
31977e5eabSwnj
32fc920406Skarels #define spluba spl7 /* IPL 17 */
33fc920406Skarels
34afeb666cSwnj /*
35afeb666cSwnj * Do transfer on device argument. The controller
36afeb666cSwnj * and uba involved are implied by the device.
37afeb666cSwnj * We queue for resource wait in the uba code if necessary.
38afeb666cSwnj * We return 1 if the transfer was started, 0 if it was not.
39c907f4b5Sbostic *
40c907f4b5Sbostic * The onq argument must be zero iff the device is not on the
41c907f4b5Sbostic * queue for this UBA. If onq is set, the device must be at the
42c907f4b5Sbostic * head of the queue. In any case, if the transfer is started,
43c907f4b5Sbostic * the device will be off the queue, and if not, it will be on.
44c907f4b5Sbostic *
45c907f4b5Sbostic * Drivers that allocate one BDP and hold it for some time should
46c907f4b5Sbostic * set ud_keepbdp. In this case um_bdp tells which BDP is allocated
47c907f4b5Sbostic * to the controller, unless it is zero, indicating that the controller
48c907f4b5Sbostic * does not now have a BDP.
49afeb666cSwnj */
ubaqueue(ui,onq)50c907f4b5Sbostic ubaqueue(ui, onq)
51f8444eedSwnj register struct uba_device *ui;
52c907f4b5Sbostic int onq;
53afeb666cSwnj {
54f8444eedSwnj register struct uba_ctlr *um = ui->ui_mi;
55afeb666cSwnj register struct uba_hd *uh;
56c907f4b5Sbostic register struct uba_driver *ud;
57afeb666cSwnj register int s, unit;
58afeb666cSwnj
59afeb666cSwnj uh = &uba_hd[um->um_ubanum];
60c907f4b5Sbostic ud = um->um_driver;
61fc920406Skarels s = spluba();
62c907f4b5Sbostic /*
63c907f4b5Sbostic * Honor exclusive BDP use requests.
64c907f4b5Sbostic */
65c907f4b5Sbostic if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
664b6dd631Swnj goto rwait;
67c907f4b5Sbostic if (ud->ud_keepbdp) {
68c907f4b5Sbostic /*
69c907f4b5Sbostic * First get just a BDP (though in fact it comes with
70c907f4b5Sbostic * one map register too).
71c907f4b5Sbostic */
72c907f4b5Sbostic if (um->um_bdp == 0) {
73c907f4b5Sbostic um->um_bdp = uballoc(um->um_ubanum,
74c907f4b5Sbostic (caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT);
75c907f4b5Sbostic if (um->um_bdp == 0)
76c907f4b5Sbostic goto rwait;
77c907f4b5Sbostic }
78c907f4b5Sbostic /* now share it with this transfer */
79c907f4b5Sbostic um->um_ubinfo = ubasetup(um->um_ubanum,
80c907f4b5Sbostic um->um_tab.b_actf->b_actf,
81c907f4b5Sbostic um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT);
82c907f4b5Sbostic } else
83c907f4b5Sbostic um->um_ubinfo = ubasetup(um->um_ubanum,
84c907f4b5Sbostic um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT);
854b6dd631Swnj if (um->um_ubinfo == 0)
864b6dd631Swnj goto rwait;
874b6dd631Swnj uh->uh_users++;
88c907f4b5Sbostic if (ud->ud_xclu)
894b6dd631Swnj uh->uh_xclu = 1;
90afeb666cSwnj splx(s);
91afeb666cSwnj if (ui->ui_dk >= 0) {
92afeb666cSwnj unit = ui->ui_dk;
93afeb666cSwnj dk_busy |= 1<<unit;
9425c822f3Swnj dk_xfer[unit]++;
9525c822f3Swnj dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
96afeb666cSwnj }
97c907f4b5Sbostic if (onq)
98afeb666cSwnj uh->uh_actf = ui->ui_forw;
99c907f4b5Sbostic (*ud->ud_dgo)(um);
100afeb666cSwnj return (1);
1014b6dd631Swnj rwait:
102c907f4b5Sbostic if (!onq) {
1034b6dd631Swnj ui->ui_forw = NULL;
1044b6dd631Swnj if (uh->uh_actf == NULL)
1054b6dd631Swnj uh->uh_actf = ui;
1064b6dd631Swnj else
1074b6dd631Swnj uh->uh_actl->ui_forw = ui;
1084b6dd631Swnj uh->uh_actl = ui;
1094b6dd631Swnj }
1104b6dd631Swnj splx(s);
1114b6dd631Swnj return (0);
1124b6dd631Swnj }
1134b6dd631Swnj
ubadone(um)1144b6dd631Swnj ubadone(um)
115f8444eedSwnj register struct uba_ctlr *um;
1164b6dd631Swnj {
1174b6dd631Swnj register struct uba_hd *uh = &uba_hd[um->um_ubanum];
1184b6dd631Swnj
1194d685d01Swnj if (um->um_driver->ud_xclu)
1204b6dd631Swnj uh->uh_xclu = 0;
1214b6dd631Swnj uh->uh_users--;
122c907f4b5Sbostic if (um->um_driver->ud_keepbdp)
123c907f4b5Sbostic um->um_ubinfo &= ~BDPMASK; /* keep BDP for misers */
1244b6dd631Swnj ubarelse(um->um_ubanum, &um->um_ubinfo);
125afeb666cSwnj }
126f07134f6Sbill
127f07134f6Sbill /*
1283197845dSwnj * Allocate and setup UBA map registers, and bdp's
1293197845dSwnj * Flags says whether bdp is needed, whether the caller can't
1303197845dSwnj * wait (e.g. if the caller is at interrupt level).
131ae717e01Skarels * Return value encodes map register plus page offset,
132ae717e01Skarels * bdp number and number of map registers.
133f07134f6Sbill */
ubasetup(uban,bp,flags)1343197845dSwnj ubasetup(uban, bp, flags)
135a8407c55Skarels int uban;
136a8407c55Skarels register struct buf *bp;
137a8407c55Skarels register int flags;
138f07134f6Sbill {
1393197845dSwnj register struct uba_hd *uh = &uba_hd[uban];
140f07134f6Sbill register struct pte *pte, *io;
141a8407c55Skarels register int npf;
142a8407c55Skarels int pfnum, temp;
143a8407c55Skarels int reg, bdp;
144a8407c55Skarels unsigned v;
145f07134f6Sbill struct proc *rp;
146f07134f6Sbill int a, o, ubinfo;
147f07134f6Sbill
148a2f43acbSkarels #ifdef DW730
149a2f43acbSkarels if (uh->uh_type == DW730)
150a2f43acbSkarels flags &= ~UBA_NEEDBDP;
151a2f43acbSkarels #endif
152a2f43acbSkarels #ifdef QBA
153a2f43acbSkarels if (uh->uh_type == QBA)
1542d2574b2Swnj flags &= ~UBA_NEEDBDP;
1552d2574b2Swnj #endif
156f07134f6Sbill o = (int)bp->b_un.b_addr & PGOFSET;
157f07134f6Sbill npf = btoc(bp->b_bcount + o) + 1;
158ae717e01Skarels if (npf > UBA_MAXNMR)
159ae717e01Skarels panic("uba xfer too big");
160fc920406Skarels a = spluba();
16188f3e5d6Sroot while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
16265e64c48Swnj if (flags & UBA_CANTWAIT) {
16365e64c48Swnj splx(a);
1643197845dSwnj return (0);
16565e64c48Swnj }
1663197845dSwnj uh->uh_mrwant++;
16730bcd34cSsam sleep((caddr_t)&uh->uh_mrwant, PSWP);
168f07134f6Sbill }
1699d0e0165Skarels if ((flags & UBA_NEED16) && reg + npf > 128) {
1709d0e0165Skarels /*
1719d0e0165Skarels * Could hang around and try again (if we can ever succeed).
1729d0e0165Skarels * Won't help any current device...
1739d0e0165Skarels */
1749d0e0165Skarels rmfree(uh->uh_map, (long)npf, (long)reg);
1759d0e0165Skarels splx(a);
1769d0e0165Skarels return (0);
1779d0e0165Skarels }
178f07134f6Sbill bdp = 0;
1793197845dSwnj if (flags & UBA_NEEDBDP) {
180fc920406Skarels while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
1813197845dSwnj if (flags & UBA_CANTWAIT) {
18288f3e5d6Sroot rmfree(uh->uh_map, (long)npf, (long)reg);
18365e64c48Swnj splx(a);
1843197845dSwnj return (0);
1853197845dSwnj }
1863197845dSwnj uh->uh_bdpwant++;
18730bcd34cSsam sleep((caddr_t)&uh->uh_bdpwant, PSWP);
1883197845dSwnj }
18919ba0913Swnj uh->uh_bdpfree &= ~(1 << (bdp-1));
190265965f0Swnj } else if (flags & UBA_HAVEBDP)
191265965f0Swnj bdp = (flags >> 28) & 0xf;
192f07134f6Sbill splx(a);
19319ba0913Swnj reg--;
194ae717e01Skarels ubinfo = UBAI_INFO(o, reg, npf, bdp);
195f8444eedSwnj temp = (bdp << 21) | UBAMR_MRV;
196f07134f6Sbill if (bdp && (o & 01))
197f8444eedSwnj temp |= UBAMR_BO;
198d04d7cc5Swnj if ((bp->b_flags & B_PHYS) == 0)
199a8407c55Skarels pte = kvtopte(bp->b_un.b_addr);
200d04d7cc5Swnj else if (bp->b_flags & B_PAGET)
201f07134f6Sbill pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
202a8407c55Skarels else {
203a8407c55Skarels rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
204a8407c55Skarels v = btop(bp->b_un.b_addr);
205a8407c55Skarels if (bp->b_flags & B_UAREA)
206a8407c55Skarels pte = &rp->p_addr[v];
207f07134f6Sbill else
208f07134f6Sbill pte = vtopte(rp, v);
209a8407c55Skarels }
210a2f43acbSkarels io = &uh->uh_mr[reg];
211a8407c55Skarels while (--npf > 0) {
21280ada572Smckusick pfnum = pte->pg_pfnum;
21380ada572Smckusick if (pfnum == 0)
214f07134f6Sbill panic("uba zero uentry");
21580ada572Smckusick pte++;
21680ada572Smckusick *(int *)io++ = pfnum | temp;
217f07134f6Sbill }
218a8407c55Skarels *(int *)io = 0;
219f07134f6Sbill return (ubinfo);
220f07134f6Sbill }
221f07134f6Sbill
222f07134f6Sbill /*
223afeb666cSwnj * Non buffer setup interface... set up a buffer and call ubasetup.
224f07134f6Sbill */
uballoc(uban,addr,bcnt,flags)2253197845dSwnj uballoc(uban, addr, bcnt, flags)
2262bb52ab1Swnj int uban;
227f07134f6Sbill caddr_t addr;
2282bb52ab1Swnj int bcnt, flags;
229f07134f6Sbill {
230fb44fa03Sbill struct buf ubabuf;
231f07134f6Sbill
232f07134f6Sbill ubabuf.b_un.b_addr = addr;
233f07134f6Sbill ubabuf.b_flags = B_BUSY;
234f07134f6Sbill ubabuf.b_bcount = bcnt;
235fb44fa03Sbill /* that's all the fields ubasetup() needs */
2363197845dSwnj return (ubasetup(uban, &ubabuf, flags));
237f07134f6Sbill }
238f07134f6Sbill
23940c69d8cSwnj /*
240afeb666cSwnj * Release resources on uba uban, and then unblock resource waiters.
241afeb666cSwnj * The map register parameter is by value since we need to block
242afeb666cSwnj * against uba resets on 11/780's.
24340c69d8cSwnj */
ubarelse(uban,amr)2443197845dSwnj ubarelse(uban, amr)
24540c69d8cSwnj int *amr;
246f07134f6Sbill {
2473197845dSwnj register struct uba_hd *uh = &uba_hd[uban];
248afeb666cSwnj register int bdp, reg, npf, s;
24940c69d8cSwnj int mr;
250f07134f6Sbill
251afeb666cSwnj /*
252afeb666cSwnj * Carefully see if we should release the space, since
253afeb666cSwnj * it may be released asynchronously at uba reset time.
254afeb666cSwnj */
255fc920406Skarels s = spluba();
25640c69d8cSwnj mr = *amr;
25740c69d8cSwnj if (mr == 0) {
258afeb666cSwnj /*
259afeb666cSwnj * A ubareset() occurred before we got around
260afeb666cSwnj * to releasing the space... no need to bother.
261afeb666cSwnj */
262afeb666cSwnj splx(s);
26340c69d8cSwnj return;
26440c69d8cSwnj }
265b13156d0Swnj *amr = 0;
266ae717e01Skarels bdp = UBAI_BDP(mr);
267f07134f6Sbill if (bdp) {
268a2f43acbSkarels switch (uh->uh_type) {
269a8407c55Skarels #ifdef DWBUA
270a8407c55Skarels case DWBUA:
271a8407c55Skarels BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
272a8407c55Skarels break;
273a8407c55Skarels #endif
274a2f43acbSkarels #ifdef DW780
275a2f43acbSkarels case DW780:
276f8444eedSwnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
277ae6a5a61Skre break;
278ae6a5a61Skre #endif
279a2f43acbSkarels #ifdef DW750
280a2f43acbSkarels case DW750:
281f8444eedSwnj uh->uh_uba->uba_dpr[bdp] |=
282f8444eedSwnj UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
283ae6a5a61Skre break;
284ae6a5a61Skre #endif
285a2f43acbSkarels default:
286a2f43acbSkarels break;
287ae6a5a61Skre }
288afeb666cSwnj uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */
2893197845dSwnj if (uh->uh_bdpwant) {
2903197845dSwnj uh->uh_bdpwant = 0;
29130bcd34cSsam wakeup((caddr_t)&uh->uh_bdpwant);
292f07134f6Sbill }
293f07134f6Sbill }
294afeb666cSwnj /*
295afeb666cSwnj * Put back the registers in the resource map.
2969d0e0165Skarels * The map code must not be reentered,
2979d0e0165Skarels * nor can the registers be freed twice.
2989d0e0165Skarels * Unblock interrupts once this is done.
299afeb666cSwnj */
300ae717e01Skarels npf = UBAI_NMR(mr);
301ae717e01Skarels reg = UBAI_MR(mr) + 1;
30288f3e5d6Sroot rmfree(uh->uh_map, (long)npf, (long)reg);
303afeb666cSwnj splx(s);
304afeb666cSwnj
305afeb666cSwnj /*
306afeb666cSwnj * Wakeup sleepers for map registers,
307afeb666cSwnj * and also, if there are processes blocked in dgo(),
308afeb666cSwnj * give them a chance at the UNIBUS.
309afeb666cSwnj */
3103197845dSwnj if (uh->uh_mrwant) {
3113197845dSwnj uh->uh_mrwant = 0;
31230bcd34cSsam wakeup((caddr_t)&uh->uh_mrwant);
313f07134f6Sbill }
314c907f4b5Sbostic while (uh->uh_actf && ubaqueue(uh->uh_actf, 1))
315afeb666cSwnj ;
316f07134f6Sbill }
317f07134f6Sbill
ubapurge(um)31845644aa9Swnj ubapurge(um)
319f8444eedSwnj register struct uba_ctlr *um;
32045644aa9Swnj {
32145644aa9Swnj register struct uba_hd *uh = um->um_hd;
322ae717e01Skarels register int bdp = UBAI_BDP(um->um_ubinfo);
32345644aa9Swnj
324a2f43acbSkarels switch (uh->uh_type) {
325a8407c55Skarels #ifdef DWBUA
326a8407c55Skarels case DWBUA:
327a8407c55Skarels BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
328a8407c55Skarels break;
329a8407c55Skarels #endif
330a2f43acbSkarels #ifdef DW780
331a2f43acbSkarels case DW780:
332f8444eedSwnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
33345644aa9Swnj break;
33445644aa9Swnj #endif
335a2f43acbSkarels #ifdef DW750
336a2f43acbSkarels case DW750:
337f8444eedSwnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
33845644aa9Swnj break;
33945644aa9Swnj #endif
340a2f43acbSkarels default:
341a2f43acbSkarels break;
34245644aa9Swnj }
34345644aa9Swnj }
34445644aa9Swnj
ubainitmaps(uhp)345a0b3e3b5Swnj ubainitmaps(uhp)
346a0b3e3b5Swnj register struct uba_hd *uhp;
347a0b3e3b5Swnj {
348a0b3e3b5Swnj
349ae717e01Skarels if (uhp->uh_memsize > UBA_MAXMR)
350ae717e01Skarels uhp->uh_memsize = UBA_MAXMR;
351a2f43acbSkarels rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
352a2f43acbSkarels switch (uhp->uh_type) {
353a8407c55Skarels #ifdef DWBUA
354a8407c55Skarels case DWBUA:
355a8407c55Skarels uhp->uh_bdpfree = (1<<NBDPBUA) - 1;
356a8407c55Skarels break;
357a8407c55Skarels #endif
358a2f43acbSkarels #ifdef DW780
359a2f43acbSkarels case DW780:
360a0b3e3b5Swnj uhp->uh_bdpfree = (1<<NBDP780) - 1;
361a0b3e3b5Swnj break;
362a0b3e3b5Swnj #endif
363a2f43acbSkarels #ifdef DW750
364a2f43acbSkarels case DW750:
365a0b3e3b5Swnj uhp->uh_bdpfree = (1<<NBDP750) - 1;
366a0b3e3b5Swnj break;
367a0b3e3b5Swnj #endif
368a2f43acbSkarels default:
369a0b3e3b5Swnj break;
370a0b3e3b5Swnj }
371a0b3e3b5Swnj }
372a0b3e3b5Swnj
373afeb666cSwnj /*
374afeb666cSwnj * Generate a reset on uba number uban. Then
375afeb666cSwnj * call each device in the character device table,
376afeb666cSwnj * giving it a chance to clean up so as to be able to continue.
377afeb666cSwnj */
ubareset(uban)3783197845dSwnj ubareset(uban)
379afeb666cSwnj int uban;
380dddbc201Sbill {
381dddbc201Sbill register struct cdevsw *cdp;
382686485b2Swnj register struct uba_hd *uh = &uba_hd[uban];
3836015de8dSbill int s;
384dddbc201Sbill
385fc920406Skarels s = spluba();
386686485b2Swnj uh->uh_users = 0;
387686485b2Swnj uh->uh_zvcnt = 0;
388686485b2Swnj uh->uh_xclu = 0;
389686485b2Swnj uh->uh_actf = uh->uh_actl = 0;
390686485b2Swnj uh->uh_bdpwant = 0;
391686485b2Swnj uh->uh_mrwant = 0;
392a0b3e3b5Swnj ubainitmaps(uh);
393686485b2Swnj wakeup((caddr_t)&uh->uh_bdpwant);
394686485b2Swnj wakeup((caddr_t)&uh->uh_mrwant);
395977e5eabSwnj printf("uba%d: reset", uban);
396686485b2Swnj ubainit(uh->uh_uba);
3979d0e0165Skarels ubameminit(uban);
3986bb7e6a5Ssam for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
3993197845dSwnj (*cdp->d_reset)(uban);
400fe1af64cSwnj ifubareset(uban);
401dddbc201Sbill printf("\n");
40243ebcfccSbill splx(s);
403dddbc201Sbill }
4043197845dSwnj
405afeb666cSwnj /*
406afeb666cSwnj * Init a uba. This is called with a pointer
407afeb666cSwnj * rather than a virtual address since it is called
408afeb666cSwnj * by code which runs with memory mapping disabled.
409afeb666cSwnj * In these cases we really don't need the interrupts
410afeb666cSwnj * enabled, but since we run with ipl high, we don't care
411afeb666cSwnj * if they are, they will never happen anyways.
412a2f43acbSkarels * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
413afeb666cSwnj */
ubainit(uba)414ae6a5a61Skre ubainit(uba)
415ae6a5a61Skre register struct uba_regs *uba;
4163197845dSwnj {
417a2f43acbSkarels register struct uba_hd *uhp;
418a8407c55Skarels #ifdef QBA
419a2f43acbSkarels int isphys = 0;
420a8407c55Skarels #endif
4213197845dSwnj
422a2f43acbSkarels for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
423a2f43acbSkarels if (uhp->uh_uba == uba)
424a2f43acbSkarels break;
425a2f43acbSkarels if (uhp->uh_physuba == uba) {
426a8407c55Skarels #ifdef QBA
427a2f43acbSkarels isphys++;
428a8407c55Skarels #endif
429a2f43acbSkarels break;
430a2f43acbSkarels }
431a2f43acbSkarels }
432a2f43acbSkarels if (uhp >= uba_hd + numuba) {
433a2f43acbSkarels printf("init unknown uba\n");
434a2f43acbSkarels return;
435a2f43acbSkarels }
436a2f43acbSkarels
437a2f43acbSkarels switch (uhp->uh_type) {
438a8407c55Skarels #ifdef DWBUA
439a8407c55Skarels case DWBUA:
440a8407c55Skarels BUA(uba)->bua_csr |= BUACSR_UPI;
441a8407c55Skarels /* give devices time to recover from power fail */
442a8407c55Skarels DELAY(500000);
443a8407c55Skarels break;
444a8407c55Skarels #endif
445a2f43acbSkarels #ifdef DW780
446a2f43acbSkarels case DW780:
447f8444eedSwnj uba->uba_cr = UBACR_ADINIT;
448f8444eedSwnj uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
449f8444eedSwnj while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
4503197845dSwnj ;
451f8444eedSwnj break;
452f8444eedSwnj #endif
453a2f43acbSkarels #ifdef DW750
454a2f43acbSkarels case DW750:
4552d2574b2Swnj #endif
456a2f43acbSkarels #ifdef DW730
457a2f43acbSkarels case DW730:
45800a5eb10Swnj #endif
459a2f43acbSkarels #ifdef QBA
460a2f43acbSkarels case QBA:
461f235151bSkridle #endif
462a2f43acbSkarels #if DW750 || DW730 || QBA
46300a5eb10Swnj mtpr(IUR, 0);
46400a5eb10Swnj /* give devices time to recover from power fail */
46500a5eb10Swnj /* THIS IS PROBABLY UNNECESSARY */
46600a5eb10Swnj DELAY(500000);
46700a5eb10Swnj /* END PROBABLY UNNECESSARY */
468a2f43acbSkarels #ifdef QBA
469a2f43acbSkarels /*
470a2f43acbSkarels * Re-enable local memory access
471a2f43acbSkarels * from the Q-bus.
472a2f43acbSkarels */
473a2f43acbSkarels if (uhp->uh_type == QBA) {
474a2f43acbSkarels if (isphys)
475a2f43acbSkarels *((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
476a2f43acbSkarels else
477a2f43acbSkarels *(uhp->uh_iopage + QIPCR) = Q_LMEAE;
478a2f43acbSkarels }
479a2f43acbSkarels #endif QBA
480f8444eedSwnj break;
481a2f43acbSkarels #endif DW750 || DW730 || QBA
482f8444eedSwnj }
4833197845dSwnj }
4843197845dSwnj
485ae717e01Skarels #ifdef QBA
486ae717e01Skarels /*
487ae717e01Skarels * Determine the interrupt priority of a Q-bus
488ae717e01Skarels * peripheral. The device probe routine must spl6(),
489ae717e01Skarels * attempt to make the device request an interrupt,
490ae717e01Skarels * delaying as necessary, then call this routine
491ae717e01Skarels * before resetting the device.
492ae717e01Skarels */
qbgetpri()493ae717e01Skarels qbgetpri()
494ae717e01Skarels {
495ae717e01Skarels int pri;
496ae717e01Skarels extern int cvec;
497ae717e01Skarels
498ae717e01Skarels for (pri = 0x17; pri > 0x14; ) {
499ae717e01Skarels if (cvec && cvec != 0x200) /* interrupted at pri */
500ae717e01Skarels break;
501ae717e01Skarels pri--;
502ae717e01Skarels splx(pri - 1);
503ae717e01Skarels }
504ae717e01Skarels (void) spl0();
505ae717e01Skarels return (pri);
506ae717e01Skarels }
507ae717e01Skarels #endif
508ae717e01Skarels
509a2f43acbSkarels #ifdef DW780
5105f7d8a26Swnj int ubawedgecnt = 10;
5115f7d8a26Swnj int ubacrazy = 500;
5129d0e0165Skarels int zvcnt_max = 5000; /* in 8 sec */
513afeb666cSwnj /*
514fdb67d91Sbloom * This routine is called by the locore code to process a UBA
515fdb67d91Sbloom * error on an 11/780 or 8600. The arguments are passed
516afeb666cSwnj * on the stack, and value-result (through some trickery).
517afeb666cSwnj * In particular, the uvec argument is used for further
518afeb666cSwnj * uba processing so the result aspect of it is very important.
519afeb666cSwnj * It must not be declared register.
520afeb666cSwnj */
521ae6a5a61Skre /*ARGSUSED*/
ubaerror(uban,uh,ipl,uvec,uba)5229d0e0165Skarels ubaerror(uban, uh, ipl, uvec, uba)
5233197845dSwnj register int uban;
5243197845dSwnj register struct uba_hd *uh;
5259d0e0165Skarels int ipl, uvec;
5263197845dSwnj register struct uba_regs *uba;
5273197845dSwnj {
5283197845dSwnj register sr, s;
5293197845dSwnj
5303197845dSwnj if (uvec == 0) {
531d30b22dcSkarels /*
532d30b22dcSkarels * Declare dt as unsigned so that negative values
533d30b22dcSkarels * are handled as >8 below, in case time was set back.
534d30b22dcSkarels */
535d30b22dcSkarels u_long dt = time.tv_sec - uh->uh_zvtime;
536d30b22dcSkarels
537d30b22dcSkarels uh->uh_zvtotal++;
5389d0e0165Skarels if (dt > 8) {
539d30b22dcSkarels uh->uh_zvtime = time.tv_sec;
5409d0e0165Skarels uh->uh_zvcnt = 0;
5419d0e0165Skarels }
5429d0e0165Skarels if (++uh->uh_zvcnt > zvcnt_max) {
5439d0e0165Skarels printf("uba%d: too many zero vectors (%d in <%d sec)\n",
5449d0e0165Skarels uban, uh->uh_zvcnt, dt + 1);
5459d0e0165Skarels printf("\tIPL 0x%x\n\tcnfgr: %b Adapter Code: 0x%x\n",
5469d0e0165Skarels ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
5479d0e0165Skarels uba->uba_cnfgr&0xff);
5489d0e0165Skarels printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
5499d0e0165Skarels uba->uba_sr, ubasr_bits, uba->uba_dcr,
5509d0e0165Skarels (uba->uba_dcr&0x8000000)?"":"NOT ");
5513197845dSwnj ubareset(uban);
5523197845dSwnj }
5533197845dSwnj return;
5543197845dSwnj }
5553197845dSwnj if (uba->uba_cnfgr & NEX_CFGFLT) {
556977e5eabSwnj printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
557977e5eabSwnj uban, uba->uba_sr, ubasr_bits,
558147995f1Swnj uba->uba_cnfgr, NEXFLT_BITS);
5593197845dSwnj ubareset(uban);
5603197845dSwnj uvec = 0;
5613197845dSwnj return;
5623197845dSwnj }
5633197845dSwnj sr = uba->uba_sr;
564fc920406Skarels s = spluba();
5654d3b0019Swnj printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
5664d3b0019Swnj uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
5673197845dSwnj splx(s);
5683197845dSwnj uba->uba_sr = sr;
569f8444eedSwnj uvec &= UBABRRVR_DIV;
5705f7d8a26Swnj if (++uh->uh_errcnt % ubawedgecnt == 0) {
5715f7d8a26Swnj if (uh->uh_errcnt > ubacrazy)
5725f7d8a26Swnj panic("uba crazy");
5735f7d8a26Swnj printf("ERROR LIMIT ");
5745f7d8a26Swnj ubareset(uban);
5755f7d8a26Swnj uvec = 0;
5765f7d8a26Swnj return;
5775f7d8a26Swnj }
5783197845dSwnj return;
5793197845dSwnj }
5803197845dSwnj #endif
5818894282fSfeldman
5828894282fSfeldman /*
5839d0e0165Skarels * Look for devices with unibus memory, allow them to configure, then disable
5849d0e0165Skarels * map registers as necessary. Called during autoconfiguration and ubareset.
5859d0e0165Skarels * The device ubamem routine returns 0 on success, 1 on success if it is fully
5869d0e0165Skarels * configured (has no csr or interrupt, so doesn't need to be probed),
5879d0e0165Skarels * and -1 on failure.
5889d0e0165Skarels */
ubameminit(uban)5899d0e0165Skarels ubameminit(uban)
5909d0e0165Skarels {
5919d0e0165Skarels register struct uba_device *ui;
5929d0e0165Skarels register struct uba_hd *uh = &uba_hd[uban];
5939d0e0165Skarels caddr_t umembase = umem[uban] + 0x3e000, addr;
5949d0e0165Skarels #define ubaoff(off) ((int)(off) & 0x1fff)
5959d0e0165Skarels
5969d0e0165Skarels uh->uh_lastmem = 0;
5979d0e0165Skarels for (ui = ubdinit; ui->ui_driver; ui++) {
5989d0e0165Skarels if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
5999d0e0165Skarels continue;
6009d0e0165Skarels if (ui->ui_driver->ud_ubamem) {
6019d0e0165Skarels /*
6029d0e0165Skarels * During autoconfiguration, need to fudge ui_addr.
6039d0e0165Skarels */
6049d0e0165Skarels addr = ui->ui_addr;
6059d0e0165Skarels ui->ui_addr = umembase + ubaoff(addr);
6069d0e0165Skarels switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
6079d0e0165Skarels case 1:
6089d0e0165Skarels ui->ui_alive = 1;
6099d0e0165Skarels /* FALLTHROUGH */
6109d0e0165Skarels case 0:
6119d0e0165Skarels ui->ui_ubanum = uban;
6129d0e0165Skarels break;
6139d0e0165Skarels }
6149d0e0165Skarels ui->ui_addr = addr;
6159d0e0165Skarels }
6169d0e0165Skarels }
617a2f43acbSkarels #ifdef DW780
6189d0e0165Skarels /*
619a2f43acbSkarels * On a DW780, throw away any map registers disabled by rounding
6209d0e0165Skarels * the map disable in the configuration register
6219d0e0165Skarels * up to the next 8K boundary, or below the last unibus memory.
6229d0e0165Skarels */
623a2f43acbSkarels if (uh->uh_type == DW780) {
6249d0e0165Skarels register i;
6259d0e0165Skarels
6269d0e0165Skarels i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
6279d0e0165Skarels while (i)
6289d0e0165Skarels (void) rmget(uh->uh_map, 1, i--);
6299d0e0165Skarels }
6309d0e0165Skarels #endif
6319d0e0165Skarels }
6329d0e0165Skarels
6339d0e0165Skarels /*
6345f1fbc05Ssam * Allocate UNIBUS memory. Allocates and initializes
6355f1fbc05Ssam * sufficient mapping registers for access. On a 780,
6365f1fbc05Ssam * the configuration register is setup to disable UBA
6375f1fbc05Ssam * response on DMA transfers to addresses controlled
6385f1fbc05Ssam * by the disabled mapping registers.
639a2f43acbSkarels * On a DW780, should only be called from ubameminit, or in ascending order
6409d0e0165Skarels * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
6419d0e0165Skarels * the last unibus memory would free unusable map registers.
6429d0e0165Skarels * Doalloc is 1 to allocate, 0 to deallocate.
6438894282fSfeldman */
ubamem(uban,addr,npg,doalloc)6445f1fbc05Ssam ubamem(uban, addr, npg, doalloc)
6455f1fbc05Ssam int uban, addr, npg, doalloc;
6468894282fSfeldman {
6478894282fSfeldman register struct uba_hd *uh = &uba_hd[uban];
6485f1fbc05Ssam register int a;
6499d0e0165Skarels int s;
6508894282fSfeldman
651e4876169Sfeldman a = (addr >> 9) + 1;
652fc920406Skarels s = spluba();
6539d0e0165Skarels if (doalloc)
6549d0e0165Skarels a = rmget(uh->uh_map, npg, a);
6559d0e0165Skarels else
6569d0e0165Skarels rmfree(uh->uh_map, (long)npg, (long)a);
6579d0e0165Skarels splx(s);
6588894282fSfeldman if (a) {
6595f1fbc05Ssam register int i, *m;
6605f1fbc05Ssam
661a2f43acbSkarels m = (int *)&uh->uh_mr[a - 1];
6625f1fbc05Ssam for (i = 0; i < npg; i++)
6638894282fSfeldman *m++ = 0; /* All off, especially 'valid' */
6649d0e0165Skarels i = addr + npg * 512;
6659d0e0165Skarels if (doalloc && i > uh->uh_lastmem)
6669d0e0165Skarels uh->uh_lastmem = i;
6679d0e0165Skarels else if (doalloc == 0 && i == uh->uh_lastmem)
6689d0e0165Skarels uh->uh_lastmem = addr;
669a2f43acbSkarels #ifdef DW780
6705f1fbc05Ssam /*
6715f1fbc05Ssam * On a 780, set up the map register disable
6725f1fbc05Ssam * field in the configuration register. Beware
6739d0e0165Skarels * of callers that request memory ``out of order''
6749d0e0165Skarels * or in sections other than 8K multiples.
6759d0e0165Skarels * Ubameminit handles such requests properly, however.
6765f1fbc05Ssam */
677a2f43acbSkarels if (uh->uh_type == DW780) {
6789d0e0165Skarels i = uh->uh_uba->uba_cr &~ 0x7c000000;
6799d0e0165Skarels i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
6809d0e0165Skarels uh->uh_uba->uba_cr = i;
681e4876169Sfeldman }
682e4876169Sfeldman #endif
6838894282fSfeldman }
6848894282fSfeldman return (a);
6858894282fSfeldman }
6863959efb7Ssam
6876b9453d7Ssam #include "ik.h"
68803677a12Sjg #include "vs.h"
68903677a12Sjg #if NIK > 0 || NVS > 0
6903959efb7Ssam /*
6913959efb7Ssam * Map a virtual address into users address space. Actually all we
6923959efb7Ssam * do is turn on the user mode write protection bits for the particular
6933959efb7Ssam * page of memory involved.
6943959efb7Ssam */
maptouser(vaddress)6953959efb7Ssam maptouser(vaddress)
6963959efb7Ssam caddr_t vaddress;
6973959efb7Ssam {
6983959efb7Ssam
699a8407c55Skarels kvtopte(vaddress)->pg_prot = (PG_UW >> 27);
7003959efb7Ssam }
7013959efb7Ssam
unmaptouser(vaddress)7023959efb7Ssam unmaptouser(vaddress)
7033959efb7Ssam caddr_t vaddress;
7043959efb7Ssam {
7053959efb7Ssam
706a8407c55Skarels kvtopte(vaddress)->pg_prot = (PG_KW >> 27);
7073959efb7Ssam }
7084d87cda7Ssam #endif
709